aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/numa.c4
-rw-r--r--drivers/acpi/processor_perflib.c2
-rw-r--r--drivers/char/agp/intel-agp.c123
-rw-r--r--drivers/char/cyclades.c16
-rw-r--r--drivers/char/hvc_console.c2
-rw-r--r--drivers/char/ip2/ip2main.c26
-rw-r--r--drivers/char/isicom.c54
-rw-r--r--drivers/char/moxa.c20
-rw-r--r--drivers/char/mxser.c3
-rw-r--r--drivers/char/nozomi.c157
-rw-r--r--drivers/char/serial167.c3
-rw-r--r--drivers/char/specialix.c2
-rw-r--r--drivers/char/synclink.c4
-rw-r--r--drivers/char/synclink_gt.c186
-rw-r--r--drivers/char/tty_buffer.c17
-rw-r--r--drivers/char/tty_ldisc.c50
-rw-r--r--drivers/char/vt_ioctl.c39
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/edac/amd64_edac.c39
-rw-r--r--drivers/edac/amd64_edac.h3
-rw-r--r--drivers/firewire/core-cdev.c368
-rw-r--r--drivers/firewire/core-device.c198
-rw-r--r--drivers/firewire/core-transaction.c17
-rw-r--r--drivers/firewire/core.h2
-rw-r--r--drivers/firewire/ohci.c364
-rw-r--r--drivers/firewire/sbp2.c5
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_buffer.c184
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c6
-rw-r--r--drivers/gpu/drm/drm_drv.c44
-rw-r--r--drivers/gpu/drm/drm_edid.c30
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c26
-rw-r--r--drivers/gpu/drm/drm_gem.c70
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c253
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c326
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c27
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h69
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c430
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c169
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c313
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h170
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c10
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c14
-rw-r--r--drivers/gpu/drm/i915/intel_display.c216
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c6
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c41
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c29
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c23
-rw-r--r--drivers/gpu/drm/nouveau/Makefile2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c160
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c339
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h126
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_calc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c167
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c108
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h53
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c508
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c40
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c49
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c54
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c74
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c2367
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c2
-rw-r--r--drivers/gpu/drm/radeon/Makefile9
-rw-r--r--drivers/gpu/drm/radeon/atom.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios.h7300
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c456
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c64
-rw-r--r--drivers/gpu/drm/radeon/avivod.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c767
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h176
-rw-r--r--drivers/gpu/drm/radeon/r100.c176
-rw-r--r--drivers/gpu/drm/radeon/r200.c46
-rw-r--r--drivers/gpu/drm/radeon/r300.c157
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c280
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r420.c49
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h100
-rw-r--r--drivers/gpu/drm/radeon/r520.c21
-rw-r--r--drivers/gpu/drm/radeon/r600.c190
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c21
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c17
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c10
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c262
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c831
-rw-r--r--drivers/gpu/drm/radeon/r600d.h467
-rw-r--r--drivers/gpu/drm/radeon/radeon.h167
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h172
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c435
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c257
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c290
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c235
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c332
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h46
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c354
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c768
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h55
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c399
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h50
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c203
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c12
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r600837
-rw-r--r--drivers/gpu/drm/radeon/rs400.c39
-rw-r--r--drivers/gpu/drm/radeon/rs600.c56
-rw-r--r--drivers/gpu/drm/radeon/rs690.c41
-rw-r--r--drivers/gpu/drm/radeon/rv515.c21
-rw-r--r--drivers/gpu/drm/radeon/rv770.c259
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c18
-rw-r--r--drivers/gpu/vga/Kconfig11
-rw-r--r--drivers/gpu/vga/Makefile1
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c450
-rw-r--r--drivers/hid/usbhid/hiddev.c7
-rw-r--r--drivers/i2c/Kconfig10
-rw-r--r--drivers/i2c/Makefile1
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c2
-rw-r--r--drivers/i2c/busses/Kconfig7
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c2
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c2
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c2
-rw-r--r--drivers/i2c/busses/i2c-amd756.c2
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c2
-rw-r--r--drivers/i2c/busses/i2c-hydra.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c7
-rw-r--r--drivers/i2c/busses/i2c-isch.c2
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c2
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c48
-rw-r--r--drivers/i2c/busses/i2c-parport.c43
-rw-r--r--drivers/i2c/busses/i2c-parport.h4
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c2
-rw-r--r--drivers/i2c/busses/i2c-sis630.c2
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c2
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c10
-rw-r--r--drivers/i2c/busses/i2c-via.c2
-rw-r--r--drivers/i2c/busses/i2c-viapro.c2
-rw-r--r--drivers/i2c/i2c-core.c54
-rw-r--r--drivers/i2c/i2c-smbus.c263
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/ucm.c63
-rw-r--r--drivers/infiniband/core/ud_header.c14
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/core/user_mad.c173
-rw-r--r--drivers/infiniband/core/uverbs.h11
-rw-r--r--drivers/infiniband/core/uverbs_main.c234
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c15
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h17
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c80
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c9
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c5
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c2
-rw-r--r--drivers/infiniband/hw/nes/nes.c1
-rw-r--r--drivers/infiniband/hw/nes/nes.h9
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c11
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c484
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c61
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c10
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c47
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h97
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c506
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c64
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c281
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c91
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h6
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--drivers/media/dvb/firewire/firedtv-fw.c39
-rw-r--r--drivers/media/video/dabusb.c8
-rw-r--r--drivers/mmc/card/sdio_uart.c93
-rw-r--r--drivers/net/cxgb3/adapter.h5
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c57
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h5
-rw-r--r--drivers/net/cxgb3/regs.h16
-rw-r--r--drivers/net/cxgb3/sge.c10
-rw-r--r--drivers/net/cxgb3/t3_hw.c5
-rw-r--r--drivers/parport/parport_pc.c6
-rw-r--r--drivers/pci/hotplug/ibmphp_res.c14
-rw-r--r--drivers/pcmcia/Kconfig14
-rw-r--r--drivers/pcmcia/cardbus.c2
-rw-r--r--drivers/pcmcia/cistpl.c1908
-rw-r--r--drivers/pcmcia/db1xxx_ss.c19
-rw-r--r--drivers/pcmcia/pd6729.c18
-rw-r--r--drivers/pcmcia/rsrc_mgr.c3
-rw-r--r--drivers/pcmcia/xxs1500_ss.c16
-rw-r--r--drivers/pcmcia/yenta_socket.c8
-rw-r--r--drivers/platform/x86/Kconfig13
-rw-r--r--drivers/platform/x86/asus-laptop.c1741
-rw-r--r--drivers/platform/x86/asus_acpi.c3
-rw-r--r--drivers/platform/x86/classmate-laptop.c4
-rw-r--r--drivers/platform/x86/dell-laptop.c9
-rw-r--r--drivers/platform/x86/eeepc-laptop.c21
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c116
-rw-r--r--drivers/platform/x86/toshiba_acpi.c30
-rw-r--r--drivers/serial/68328serial.c8
-rw-r--r--drivers/serial/8250.c21
-rw-r--r--drivers/serial/8250_pci.c31
-rw-r--r--drivers/serial/Kconfig53
-rw-r--r--drivers/serial/atmel_serial.c22
-rw-r--r--drivers/serial/bcm63xx_uart.c7
-rw-r--r--drivers/serial/bfin_5xx.c22
-rw-r--r--drivers/serial/bfin_sport_uart.c701
-rw-r--r--drivers/serial/bfin_sport_uart.h38
-rw-r--r--drivers/serial/icom.c5
-rw-r--r--drivers/serial/imx.c6
-rw-r--r--drivers/serial/ioc3_serial.c3
-rw-r--r--drivers/serial/jsm/jsm_driver.c1
-rw-r--r--drivers/serial/jsm/jsm_tty.c9
-rw-r--r--drivers/serial/msm_serial.c6
-rw-r--r--drivers/serial/timbuart.c7
-rw-r--r--drivers/staging/usbip/vhci_sysfs.c2
-rw-r--r--drivers/usb/Kconfig2
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/atm/cxacru.c192
-rw-r--r--drivers/usb/atm/usbatm.c3
-rw-r--r--drivers/usb/atm/usbatm.h15
-rw-r--r--drivers/usb/c67x00/c67x00-drv.c8
-rw-r--r--drivers/usb/class/cdc-acm.c82
-rw-r--r--drivers/usb/class/cdc-acm.h2
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/class/usblp.c22
-rw-r--r--drivers/usb/class/usbtmc.c2
-rw-r--r--drivers/usb/core/Kconfig4
-rw-r--r--drivers/usb/core/devices.c32
-rw-r--r--drivers/usb/core/devio.c127
-rw-r--r--drivers/usb/core/driver.c918
-rw-r--r--drivers/usb/core/file.c2
-rw-r--r--drivers/usb/core/hcd.c27
-rw-r--r--drivers/usb/core/hcd.h13
-rw-r--r--drivers/usb/core/hub.c120
-rw-r--r--drivers/usb/core/message.c5
-rw-r--r--drivers/usb/core/quirks.c18
-rw-r--r--drivers/usb/core/sysfs.c85
-rw-r--r--drivers/usb/core/urb.c13
-rw-r--r--drivers/usb/core/usb.c38
-rw-r--r--drivers/usb/core/usb.h43
-rw-r--r--drivers/usb/early/ehci-dbgp.c68
-rw-r--r--drivers/usb/gadget/Kconfig10
-rw-r--r--drivers/usb/gadget/Makefile2
-rw-r--r--drivers/usb/gadget/at91_udc.c10
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c9
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.h1
-rw-r--r--drivers/usb/gadget/epautoconf.c24
-rw-r--r--drivers/usb/gadget/ether.c2
-rw-r--r--drivers/usb/gadget/f_acm.c8
-rw-r--r--drivers/usb/gadget/f_ecm.c7
-rw-r--r--drivers/usb/gadget/f_mass_storage.c50
-rw-r--r--drivers/usb/gadget/f_rndis.c4
-rw-r--r--drivers/usb/gadget/file_storage.c8
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/gadget_chips.h59
-rw-r--r--drivers/usb/gadget/gmidi.c5
-rw-r--r--drivers/usb/gadget/goku_udc.c2
-rw-r--r--drivers/usb/gadget/inode.c39
-rw-r--r--drivers/usb/gadget/mass_storage.c8
-rw-r--r--drivers/usb/gadget/nokia.c259
-rw-r--r--drivers/usb/gadget/printer.c18
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c135
-rw-r--r--drivers/usb/gadget/pxa27x_udc.h6
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c11
-rw-r--r--drivers/usb/gadget/u_ether.c5
-rw-r--r--drivers/usb/gadget/u_ether.h7
-rw-r--r--drivers/usb/gadget/zero.c6
-rw-r--r--drivers/usb/host/Kconfig11
-rw-r--r--drivers/usb/host/Makefile2
-rw-r--r--drivers/usb/host/ehci-atmel.c2
-rw-r--r--drivers/usb/host/ehci-au1xxx.c6
-rw-r--r--drivers/usb/host/ehci-fsl.c97
-rw-r--r--drivers/usb/host/ehci-mxc.c23
-rw-r--r--drivers/usb/host/ehci-omap.c47
-rw-r--r--drivers/usb/host/ehci-orion.c8
-rw-r--r--drivers/usb/host/ehci-ppc-of.c14
-rw-r--r--drivers/usb/host/ehci-sched.c12
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c8
-rw-r--r--drivers/usb/host/fhci-hcd.c4
-rw-r--r--drivers/usb/host/imx21-dbg.c527
-rw-r--r--drivers/usb/host/imx21-hcd.c1789
-rw-r--r--drivers/usb/host/imx21-hcd.h436
-rw-r--r--drivers/usb/host/isp1362-hcd.c15
-rw-r--r--drivers/usb/host/isp1760-hcd.c10
-rw-r--r--drivers/usb/host/isp1760-if.c2
-rw-r--r--drivers/usb/host/ohci-da8xx.c456
-rw-r--r--drivers/usb/host/ohci-dbg.c4
-rw-r--r--drivers/usb/host/ohci-hcd.c5
-rw-r--r--drivers/usb/host/ohci-lh7a404.c11
-rw-r--r--drivers/usb/host/ohci-pnx4008.c6
-rw-r--r--drivers/usb/host/ohci-ppc-of.c10
-rw-r--r--drivers/usb/host/ohci-ppc-soc.c8
-rw-r--r--drivers/usb/host/ohci-sa1111.c8
-rw-r--r--drivers/usb/host/sl811-hcd.c5
-rw-r--r--drivers/usb/host/uhci-hcd.c1
-rw-r--r--drivers/usb/host/xhci-dbg.c19
-rw-r--r--drivers/usb/host/xhci-ext-caps.h7
-rw-r--r--drivers/usb/host/xhci-hcd.c150
-rw-r--r--drivers/usb/host/xhci-hub.c65
-rw-r--r--drivers/usb/host/xhci-mem.c47
-rw-r--r--drivers/usb/host/xhci-pci.c1
-rw-r--r--drivers/usb/host/xhci-ring.c41
-rw-r--r--drivers/usb/host/xhci.h11
-rw-r--r--drivers/usb/image/mdc800.c2
-rw-r--r--drivers/usb/image/microtek.c4
-rw-r--r--drivers/usb/misc/Kconfig25
-rw-r--r--drivers/usb/misc/Makefile2
-rw-r--r--drivers/usb/misc/adutux.c8
-rw-r--r--drivers/usb/misc/appledisplay.c5
-rw-r--r--drivers/usb/misc/berry_charge.c183
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c2
-rw-r--r--drivers/usb/misc/cytherm.c2
-rw-r--r--drivers/usb/misc/emi26.c2
-rw-r--r--drivers/usb/misc/emi62.c2
-rw-r--r--drivers/usb/misc/ftdi-elan.c11
-rw-r--r--drivers/usb/misc/idmouse.c2
-rw-r--r--drivers/usb/misc/iowarrior.c6
-rw-r--r--drivers/usb/misc/isight_firmware.c4
-rw-r--r--drivers/usb/misc/ldusb.c4
-rw-r--r--drivers/usb/misc/legousbtower.c13
-rw-r--r--drivers/usb/misc/rio500.c11
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c20
-rw-r--r--drivers/usb/misc/trancevibrator.c2
-rw-r--r--drivers/usb/misc/usblcd.c7
-rw-r--r--drivers/usb/misc/usbled.c2
-rw-r--r--drivers/usb/misc/usbsevseg.c2
-rw-r--r--drivers/usb/misc/usbtest.c6
-rw-r--r--drivers/usb/misc/uss720.c2
-rw-r--r--drivers/usb/misc/vstusb.c783
-rw-r--r--drivers/usb/mon/mon_bin.c7
-rw-r--r--drivers/usb/mon/mon_text.c6
-rw-r--r--drivers/usb/musb/blackfin.c28
-rw-r--r--drivers/usb/musb/cppi_dma.c33
-rw-r--r--drivers/usb/musb/musb_core.c562
-rw-r--r--drivers/usb/musb/musb_core.h72
-rw-r--r--drivers/usb/musb/musb_gadget.c20
-rw-r--r--drivers/usb/musb/musb_host.c34
-rw-r--r--drivers/usb/musb/musb_regs.h101
-rw-r--r--drivers/usb/musb/musbhsdma.c25
-rw-r--r--drivers/usb/musb/musbhsdma.h17
-rw-r--r--drivers/usb/musb/omap2430.c48
-rw-r--r--drivers/usb/musb/omap2430.h32
-rw-r--r--drivers/usb/musb/tusb6010.c2
-rw-r--r--drivers/usb/musb/tusb6010_omap.c2
-rw-r--r--drivers/usb/otg/twl4030-usb.c45
-rw-r--r--drivers/usb/serial/Kconfig19
-rw-r--r--drivers/usb/serial/Makefile2
-rw-r--r--drivers/usb/serial/aircable.c36
-rw-r--r--drivers/usb/serial/ark3116.c3
-rw-r--r--drivers/usb/serial/belkin_sa.c2
-rw-r--r--drivers/usb/serial/ch341.c27
-rw-r--r--drivers/usb/serial/cp210x.c7
-rw-r--r--drivers/usb/serial/cyberjack.c5
-rw-r--r--drivers/usb/serial/cypress_m8.c82
-rw-r--r--drivers/usb/serial/digi_acceleport.c38
-rw-r--r--drivers/usb/serial/empeg.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c195
-rw-r--r--drivers/usb/serial/ftdi_sio.h6
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h39
-rw-r--r--drivers/usb/serial/funsoft.c2
-rw-r--r--drivers/usb/serial/garmin_gps.c3
-rw-r--r--drivers/usb/serial/generic.c7
-rw-r--r--drivers/usb/serial/hp4x.c2
-rw-r--r--drivers/usb/serial/io_edgeport.c69
-rw-r--r--drivers/usb/serial/io_tables.h10
-rw-r--r--drivers/usb/serial/io_ti.c75
-rw-r--r--drivers/usb/serial/ipaq.c1
-rw-r--r--drivers/usb/serial/ipw.c3
-rw-r--r--drivers/usb/serial/ir-usb.c13
-rw-r--r--drivers/usb/serial/iuu_phoenix.c2
-rw-r--r--drivers/usb/serial/keyspan.c57
-rw-r--r--drivers/usb/serial/keyspan.h10
-rw-r--r--drivers/usb/serial/keyspan_pda.c60
-rw-r--r--drivers/usb/serial/kl5kusb105.c66
-rw-r--r--drivers/usb/serial/kobil_sct.c25
-rw-r--r--drivers/usb/serial/mct_u232.c57
-rw-r--r--drivers/usb/serial/mct_u232.h2
-rw-r--r--drivers/usb/serial/mos7720.c185
-rw-r--r--drivers/usb/serial/mos7840.c27
-rw-r--r--drivers/usb/serial/moto_modem.c2
-rw-r--r--drivers/usb/serial/navman.c3
-rw-r--r--drivers/usb/serial/omninet.c8
-rw-r--r--drivers/usb/serial/opticon.c17
-rw-r--r--drivers/usb/serial/option.c71
-rw-r--r--drivers/usb/serial/oti6858.c36
-rw-r--r--drivers/usb/serial/pl2303.c38
-rw-r--r--drivers/usb/serial/qcaux.c96
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/serial/siemens_mpi.c2
-rw-r--r--drivers/usb/serial/sierra.c59
-rw-r--r--drivers/usb/serial/spcp8x5.c27
-rw-r--r--drivers/usb/serial/symbolserial.c12
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c3
-rw-r--r--drivers/usb/serial/usb-serial.c15
-rw-r--r--drivers/usb/serial/usb_debug.c2
-rw-r--r--drivers/usb/serial/visor.c40
-rw-r--r--drivers/usb/serial/vivopay-serial.c76
-rw-r--r--drivers/usb/serial/whiteheat.c24
-rw-r--r--drivers/usb/storage/onetouch.c2
-rw-r--r--drivers/usb/storage/scsiglue.c10
-rw-r--r--drivers/usb/storage/shuttle_usbat.c15
-rw-r--r--drivers/usb/storage/transport.c6
-rw-r--r--drivers/usb/storage/unusual_devs.h88
-rw-r--r--drivers/usb/usb-skeleton.c2
-rw-r--r--drivers/usb/wusbcore/cbaf.c2
-rw-r--r--drivers/usb/wusbcore/devconnect.c2
-rw-r--r--drivers/usb/wusbcore/mmc.c2
-rw-r--r--drivers/video/console/fbcon.c18
-rw-r--r--drivers/video/fbmem.c1
-rw-r--r--drivers/xen/events.c8
462 files changed, 30028 insertions, 14042 deletions
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 7ad48dfc12db..b8725461d887 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -279,9 +279,9 @@ int __init acpi_numa_init(void)
279 /* SRAT: Static Resource Affinity Table */ 279 /* SRAT: Static Resource Affinity Table */
280 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { 280 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
281 acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY, 281 acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
282 acpi_parse_x2apic_affinity, NR_CPUS); 282 acpi_parse_x2apic_affinity, nr_cpu_ids);
283 acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY, 283 acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
284 acpi_parse_processor_affinity, NR_CPUS); 284 acpi_parse_processor_affinity, nr_cpu_ids);
285 ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, 285 ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
286 acpi_parse_memory_affinity, 286 acpi_parse_memory_affinity,
287 NR_NODE_MEMBLKS); 287 NR_NODE_MEMBLKS);
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index a959f6a07508..d648a9860b88 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -561,7 +561,7 @@ end:
561} 561}
562 562
563int acpi_processor_preregister_performance( 563int acpi_processor_preregister_performance(
564 struct acpi_processor_performance *performance) 564 struct acpi_processor_performance __percpu *performance)
565{ 565{
566 int count, count_target; 566 int count, count_target;
567 int retval = 0; 567 int retval = 0;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 8a713f1e9653..919a28558d36 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -11,6 +11,9 @@
11#include <asm/smp.h> 11#include <asm/smp.h>
12#include "agp.h" 12#include "agp.h"
13 13
14int intel_agp_enabled;
15EXPORT_SYMBOL(intel_agp_enabled);
16
14/* 17/*
15 * If we have Intel graphics, we're not going to have anything other than 18 * If we have Intel graphics, we're not going to have anything other than
16 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent 19 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
@@ -65,6 +68,10 @@
65#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 68#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
66#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a 69#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
67#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 70#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
71#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100
72#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
73#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
74#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
68 75
69/* cover 915 and 945 variants */ 76/* cover 915 and 945 variants */
70#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ 77#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -99,7 +106,9 @@
99 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \ 106 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
100 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \ 107 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
101 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ 108 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
102 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB) 109 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
110 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
111 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
103 112
104extern int agp_memory_reserved; 113extern int agp_memory_reserved;
105 114
@@ -148,6 +157,25 @@ extern int agp_memory_reserved;
148#define INTEL_I7505_AGPCTRL 0x70 157#define INTEL_I7505_AGPCTRL 0x70
149#define INTEL_I7505_MCHCFG 0x50 158#define INTEL_I7505_MCHCFG 0x50
150 159
160#define SNB_GMCH_CTRL 0x50
161#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
162#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
163#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
164#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
165#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
166#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
167#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
168#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
169#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
170#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
171#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
172#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
173#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
174#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
175#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
176#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
177#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
178
151static const struct aper_size_info_fixed intel_i810_sizes[] = 179static const struct aper_size_info_fixed intel_i810_sizes[] =
152{ 180{
153 {64, 16384, 4}, 181 {64, 16384, 4},
@@ -294,6 +322,13 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem,
294 off_t pg_start, int mask_type) 322 off_t pg_start, int mask_type)
295{ 323{
296 int i, j; 324 int i, j;
325 u32 cache_bits = 0;
326
327 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
328 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
329 {
330 cache_bits = I830_PTE_SYSTEM_CACHED;
331 }
297 332
298 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 333 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
299 writel(agp_bridge->driver->mask_memory(agp_bridge, 334 writel(agp_bridge->driver->mask_memory(agp_bridge,
@@ -614,7 +649,7 @@ static struct aper_size_info_fixed intel_i830_sizes[] =
614static void intel_i830_init_gtt_entries(void) 649static void intel_i830_init_gtt_entries(void)
615{ 650{
616 u16 gmch_ctrl; 651 u16 gmch_ctrl;
617 int gtt_entries; 652 int gtt_entries = 0;
618 u8 rdct; 653 u8 rdct;
619 int local = 0; 654 int local = 0;
620 static const int ddt[4] = { 0, 16, 32, 64 }; 655 static const int ddt[4] = { 0, 16, 32, 64 };
@@ -706,6 +741,63 @@ static void intel_i830_init_gtt_entries(void)
706 gtt_entries = 0; 741 gtt_entries = 0;
707 break; 742 break;
708 } 743 }
744 } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
745 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
746 /*
747 * SandyBridge has new memory control reg at 0x50.w
748 */
749 u16 snb_gmch_ctl;
750 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
751 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
752 case SNB_GMCH_GMS_STOLEN_32M:
753 gtt_entries = MB(32) - KB(size);
754 break;
755 case SNB_GMCH_GMS_STOLEN_64M:
756 gtt_entries = MB(64) - KB(size);
757 break;
758 case SNB_GMCH_GMS_STOLEN_96M:
759 gtt_entries = MB(96) - KB(size);
760 break;
761 case SNB_GMCH_GMS_STOLEN_128M:
762 gtt_entries = MB(128) - KB(size);
763 break;
764 case SNB_GMCH_GMS_STOLEN_160M:
765 gtt_entries = MB(160) - KB(size);
766 break;
767 case SNB_GMCH_GMS_STOLEN_192M:
768 gtt_entries = MB(192) - KB(size);
769 break;
770 case SNB_GMCH_GMS_STOLEN_224M:
771 gtt_entries = MB(224) - KB(size);
772 break;
773 case SNB_GMCH_GMS_STOLEN_256M:
774 gtt_entries = MB(256) - KB(size);
775 break;
776 case SNB_GMCH_GMS_STOLEN_288M:
777 gtt_entries = MB(288) - KB(size);
778 break;
779 case SNB_GMCH_GMS_STOLEN_320M:
780 gtt_entries = MB(320) - KB(size);
781 break;
782 case SNB_GMCH_GMS_STOLEN_352M:
783 gtt_entries = MB(352) - KB(size);
784 break;
785 case SNB_GMCH_GMS_STOLEN_384M:
786 gtt_entries = MB(384) - KB(size);
787 break;
788 case SNB_GMCH_GMS_STOLEN_416M:
789 gtt_entries = MB(416) - KB(size);
790 break;
791 case SNB_GMCH_GMS_STOLEN_448M:
792 gtt_entries = MB(448) - KB(size);
793 break;
794 case SNB_GMCH_GMS_STOLEN_480M:
795 gtt_entries = MB(480) - KB(size);
796 break;
797 case SNB_GMCH_GMS_STOLEN_512M:
798 gtt_entries = MB(512) - KB(size);
799 break;
800 }
709 } else { 801 } else {
710 switch (gmch_ctrl & I855_GMCH_GMS_MASK) { 802 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
711 case I855_GMCH_GMS_STOLEN_1M: 803 case I855_GMCH_GMS_STOLEN_1M:
@@ -1357,6 +1449,8 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1357 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB: 1449 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
1358 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB: 1450 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
1359 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: 1451 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
1452 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1453 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1360 *gtt_offset = *gtt_size = MB(2); 1454 *gtt_offset = *gtt_size = MB(2);
1361 break; 1455 break;
1362 default: 1456 default:
@@ -2338,9 +2432,9 @@ static const struct intel_driver_description {
2338 NULL, &intel_g33_driver }, 2432 NULL, &intel_g33_driver },
2339 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", 2433 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
2340 NULL, &intel_g33_driver }, 2434 NULL, &intel_g33_driver },
2341 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "Pineview", 2435 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150",
2342 NULL, &intel_g33_driver }, 2436 NULL, &intel_g33_driver },
2343 { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "Pineview", 2437 { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150",
2344 NULL, &intel_g33_driver }, 2438 NULL, &intel_g33_driver },
2345 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, 2439 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
2346 "GM45", NULL, &intel_i965_driver }, 2440 "GM45", NULL, &intel_i965_driver },
@@ -2355,13 +2449,17 @@ static const struct intel_driver_description {
2355 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, 2449 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
2356 "G41", NULL, &intel_i965_driver }, 2450 "G41", NULL, &intel_i965_driver },
2357 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0, 2451 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0,
2358 "Ironlake/D", NULL, &intel_i965_driver }, 2452 "HD Graphics", NULL, &intel_i965_driver },
2359 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, 2453 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
2360 "Ironlake/M", NULL, &intel_i965_driver }, 2454 "HD Graphics", NULL, &intel_i965_driver },
2361 { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, 2455 { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
2362 "Ironlake/MA", NULL, &intel_i965_driver }, 2456 "HD Graphics", NULL, &intel_i965_driver },
2363 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, 2457 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
2364 "Ironlake/MC2", NULL, &intel_i965_driver }, 2458 "HD Graphics", NULL, &intel_i965_driver },
2459 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 0,
2460 "Sandybridge", NULL, &intel_i965_driver },
2461 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 0,
2462 "Sandybridge", NULL, &intel_i965_driver },
2365 { 0, 0, 0, NULL, NULL, NULL } 2463 { 0, 0, 0, NULL, NULL, NULL }
2366}; 2464};
2367 2465
@@ -2371,7 +2469,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2371 struct agp_bridge_data *bridge; 2469 struct agp_bridge_data *bridge;
2372 u8 cap_ptr = 0; 2470 u8 cap_ptr = 0;
2373 struct resource *r; 2471 struct resource *r;
2374 int i; 2472 int i, err;
2375 2473
2376 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); 2474 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
2377 2475
@@ -2463,7 +2561,10 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2463 } 2561 }
2464 2562
2465 pci_set_drvdata(pdev, bridge); 2563 pci_set_drvdata(pdev, bridge);
2466 return agp_add_bridge(bridge); 2564 err = agp_add_bridge(bridge);
2565 if (!err)
2566 intel_agp_enabled = 1;
2567 return err;
2467} 2568}
2468 2569
2469static void __devexit agp_intel_remove(struct pci_dev *pdev) 2570static void __devexit agp_intel_remove(struct pci_dev *pdev)
@@ -2568,6 +2669,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
2568 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), 2669 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
2569 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), 2670 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
2570 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), 2671 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
2672 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
2673 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
2571 { } 2674 { }
2572}; 2675};
2573 2676
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index 4254457d3911..b861c08263a4 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -158,13 +158,11 @@ static unsigned int cy_isa_addresses[] = {
158 158
159#define NR_ISA_ADDRS ARRAY_SIZE(cy_isa_addresses) 159#define NR_ISA_ADDRS ARRAY_SIZE(cy_isa_addresses)
160 160
161#ifdef MODULE
162static long maddr[NR_CARDS]; 161static long maddr[NR_CARDS];
163static int irq[NR_CARDS]; 162static int irq[NR_CARDS];
164 163
165module_param_array(maddr, long, NULL, 0); 164module_param_array(maddr, long, NULL, 0);
166module_param_array(irq, int, NULL, 0); 165module_param_array(irq, int, NULL, 0);
167#endif
168 166
169#endif /* CONFIG_ISA */ 167#endif /* CONFIG_ISA */
170 168
@@ -598,12 +596,6 @@ static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip,
598 save_car = readb(base_addr + (CyCAR << index)); 596 save_car = readb(base_addr + (CyCAR << index));
599 cy_writeb(base_addr + (CyCAR << index), save_xir); 597 cy_writeb(base_addr + (CyCAR << index), save_xir);
600 598
601 /* validate the port# (as configured and open) */
602 if (channel + chip * 4 >= cinfo->nports) {
603 cy_writeb(base_addr + (CySRER << index),
604 readb(base_addr + (CySRER << index)) & ~CyTxRdy);
605 goto end;
606 }
607 info = &cinfo->ports[channel + chip * 4]; 599 info = &cinfo->ports[channel + chip * 4];
608 tty = tty_port_tty_get(&info->port); 600 tty = tty_port_tty_get(&info->port);
609 if (tty == NULL) { 601 if (tty == NULL) {
@@ -3316,13 +3308,10 @@ static int __init cy_detect_isa(void)
3316 unsigned short cy_isa_irq, nboard; 3308 unsigned short cy_isa_irq, nboard;
3317 void __iomem *cy_isa_address; 3309 void __iomem *cy_isa_address;
3318 unsigned short i, j, cy_isa_nchan; 3310 unsigned short i, j, cy_isa_nchan;
3319#ifdef MODULE
3320 int isparam = 0; 3311 int isparam = 0;
3321#endif
3322 3312
3323 nboard = 0; 3313 nboard = 0;
3324 3314
3325#ifdef MODULE
3326 /* Check for module parameters */ 3315 /* Check for module parameters */
3327 for (i = 0; i < NR_CARDS; i++) { 3316 for (i = 0; i < NR_CARDS; i++) {
3328 if (maddr[i] || i) { 3317 if (maddr[i] || i) {
@@ -3332,7 +3321,6 @@ static int __init cy_detect_isa(void)
3332 if (!maddr[i]) 3321 if (!maddr[i])
3333 break; 3322 break;
3334 } 3323 }
3335#endif
3336 3324
3337 /* scan the address table probing for Cyclom-Y/ISA boards */ 3325 /* scan the address table probing for Cyclom-Y/ISA boards */
3338 for (i = 0; i < NR_ISA_ADDRS; i++) { 3326 for (i = 0; i < NR_ISA_ADDRS; i++) {
@@ -3353,11 +3341,10 @@ static int __init cy_detect_isa(void)
3353 iounmap(cy_isa_address); 3341 iounmap(cy_isa_address);
3354 continue; 3342 continue;
3355 } 3343 }
3356#ifdef MODULE 3344
3357 if (isparam && i < NR_CARDS && irq[i]) 3345 if (isparam && i < NR_CARDS && irq[i])
3358 cy_isa_irq = irq[i]; 3346 cy_isa_irq = irq[i];
3359 else 3347 else
3360#endif
3361 /* find out the board's irq by probing */ 3348 /* find out the board's irq by probing */
3362 cy_isa_irq = detect_isa_irq(cy_isa_address); 3349 cy_isa_irq = detect_isa_irq(cy_isa_address);
3363 if (cy_isa_irq == 0) { 3350 if (cy_isa_irq == 0) {
@@ -4208,3 +4195,4 @@ module_exit(cy_cleanup_module);
4208MODULE_LICENSE("GPL"); 4195MODULE_LICENSE("GPL");
4209MODULE_VERSION(CY_VERSION); 4196MODULE_VERSION(CY_VERSION);
4210MODULE_ALIAS_CHARDEV_MAJOR(CYCLADES_MAJOR); 4197MODULE_ALIAS_CHARDEV_MAJOR(CYCLADES_MAJOR);
4198MODULE_FIRMWARE("cyzfirm.bin");
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 4c3b59be286a..465185fc0f52 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -146,7 +146,7 @@ static void hvc_console_print(struct console *co, const char *b,
146 return; 146 return;
147 147
148 /* This console adapter was removed so it is not usable. */ 148 /* This console adapter was removed so it is not usable. */
149 if (vtermnos[index] < 0) 149 if (vtermnos[index] == -1)
150 return; 150 return;
151 151
152 while (count > 0 || i > 0) { 152 while (count > 0 || i > 0) {
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index 517271c762e6..911e1da6def2 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -208,6 +208,7 @@ static int DumpFifoBuffer( char __user *, int);
208 208
209static void ip2_init_board(int, const struct firmware *); 209static void ip2_init_board(int, const struct firmware *);
210static unsigned short find_eisa_board(int); 210static unsigned short find_eisa_board(int);
211static int ip2_setup(char *str);
211 212
212/***************/ 213/***************/
213/* Static Data */ 214/* Static Data */
@@ -263,7 +264,7 @@ static int tracewrap;
263/* Macros */ 264/* Macros */
264/**********/ 265/**********/
265 266
266#if defined(MODULE) && defined(IP2DEBUG_OPEN) 267#ifdef IP2DEBUG_OPEN
267#define DBG_CNT(s) printk(KERN_DEBUG "(%s): [%x] ttyc=%d, modc=%x -> %s\n", \ 268#define DBG_CNT(s) printk(KERN_DEBUG "(%s): [%x] ttyc=%d, modc=%x -> %s\n", \
268 tty->name,(pCh->flags), \ 269 tty->name,(pCh->flags), \
269 tty->count,/*GET_USE_COUNT(module)*/0,s) 270 tty->count,/*GET_USE_COUNT(module)*/0,s)
@@ -285,7 +286,10 @@ MODULE_AUTHOR("Doug McNash");
285MODULE_DESCRIPTION("Computone IntelliPort Plus Driver"); 286MODULE_DESCRIPTION("Computone IntelliPort Plus Driver");
286MODULE_LICENSE("GPL"); 287MODULE_LICENSE("GPL");
287 288
289#define MAX_CMD_STR 50
290
288static int poll_only; 291static int poll_only;
292static char cmd[MAX_CMD_STR];
289 293
290static int Eisa_irq; 294static int Eisa_irq;
291static int Eisa_slot; 295static int Eisa_slot;
@@ -309,6 +313,8 @@ module_param_array(io, int, NULL, 0);
309MODULE_PARM_DESC(io, "I/O ports for IntelliPort Cards"); 313MODULE_PARM_DESC(io, "I/O ports for IntelliPort Cards");
310module_param(poll_only, bool, 0); 314module_param(poll_only, bool, 0);
311MODULE_PARM_DESC(poll_only, "Do not use card interrupts"); 315MODULE_PARM_DESC(poll_only, "Do not use card interrupts");
316module_param_string(ip2, cmd, MAX_CMD_STR, 0);
317MODULE_PARM_DESC(ip2, "Contains module parameter passed with 'ip2='");
312 318
313/* for sysfs class support */ 319/* for sysfs class support */
314static struct class *ip2_class; 320static struct class *ip2_class;
@@ -487,7 +493,6 @@ static const struct firmware *ip2_request_firmware(void)
487 return fw; 493 return fw;
488} 494}
489 495
490#ifndef MODULE
491/****************************************************************************** 496/******************************************************************************
492 * ip2_setup: 497 * ip2_setup:
493 * str: kernel command line string 498 * str: kernel command line string
@@ -531,7 +536,6 @@ static int __init ip2_setup(char *str)
531 return 1; 536 return 1;
532} 537}
533__setup("ip2=", ip2_setup); 538__setup("ip2=", ip2_setup);
534#endif /* !MODULE */
535 539
536static int __init ip2_loadmain(void) 540static int __init ip2_loadmain(void)
537{ 541{
@@ -539,14 +543,20 @@ static int __init ip2_loadmain(void)
539 int err = 0; 543 int err = 0;
540 i2eBordStrPtr pB = NULL; 544 i2eBordStrPtr pB = NULL;
541 int rc = -1; 545 int rc = -1;
542 struct pci_dev *pdev = NULL;
543 const struct firmware *fw = NULL; 546 const struct firmware *fw = NULL;
547 char *str;
548
549 str = cmd;
544 550
545 if (poll_only) { 551 if (poll_only) {
546 /* Hard lock the interrupts to zero */ 552 /* Hard lock the interrupts to zero */
547 irq[0] = irq[1] = irq[2] = irq[3] = poll_only = 0; 553 irq[0] = irq[1] = irq[2] = irq[3] = poll_only = 0;
548 } 554 }
549 555
556 /* Check module parameter with 'ip2=' has been passed or not */
557 if (!poll_only && (!strncmp(str, "ip2=", 4)))
558 ip2_setup(str);
559
550 ip2trace(ITRC_NO_PORT, ITRC_INIT, ITRC_ENTER, 0); 560 ip2trace(ITRC_NO_PORT, ITRC_INIT, ITRC_ENTER, 0);
551 561
552 /* process command line arguments to modprobe or 562 /* process command line arguments to modprobe or
@@ -612,6 +622,7 @@ static int __init ip2_loadmain(void)
612 case PCI: 622 case PCI:
613#ifdef CONFIG_PCI 623#ifdef CONFIG_PCI
614 { 624 {
625 struct pci_dev *pdev = NULL;
615 u32 addr; 626 u32 addr;
616 int status; 627 int status;
617 628
@@ -626,7 +637,7 @@ static int __init ip2_loadmain(void)
626 637
627 if (pci_enable_device(pdev)) { 638 if (pci_enable_device(pdev)) {
628 dev_err(&pdev->dev, "can't enable device\n"); 639 dev_err(&pdev->dev, "can't enable device\n");
629 break; 640 goto out;
630 } 641 }
631 ip2config.type[i] = PCI; 642 ip2config.type[i] = PCI;
632 ip2config.pci_dev[i] = pci_dev_get(pdev); 643 ip2config.pci_dev[i] = pci_dev_get(pdev);
@@ -638,6 +649,8 @@ static int __init ip2_loadmain(void)
638 dev_err(&pdev->dev, "I/O address error\n"); 649 dev_err(&pdev->dev, "I/O address error\n");
639 650
640 ip2config.irq[i] = pdev->irq; 651 ip2config.irq[i] = pdev->irq;
652out:
653 pci_dev_put(pdev);
641 } 654 }
642#else 655#else
643 printk(KERN_ERR "IP2: PCI card specified but PCI " 656 printk(KERN_ERR "IP2: PCI card specified but PCI "
@@ -656,7 +669,6 @@ static int __init ip2_loadmain(void)
656 break; 669 break;
657 } /* switch */ 670 } /* switch */
658 } /* for */ 671 } /* for */
659 pci_dev_put(pdev);
660 672
661 for (i = 0; i < IP2_MAX_BOARDS; ++i) { 673 for (i = 0; i < IP2_MAX_BOARDS; ++i) {
662 if (ip2config.addr[i]) { 674 if (ip2config.addr[i]) {
@@ -3197,3 +3209,5 @@ static struct pci_device_id ip2main_pci_tbl[] __devinitdata = {
3197}; 3209};
3198 3210
3199MODULE_DEVICE_TABLE(pci, ip2main_pci_tbl); 3211MODULE_DEVICE_TABLE(pci, ip2main_pci_tbl);
3212
3213MODULE_FIRMWARE("intelliport2.bin");
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 300d5bd6cd06..be2e8f9a27c3 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -113,6 +113,8 @@
113 * 64-bit verification 113 * 64-bit verification
114 */ 114 */
115 115
116#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
117
116#include <linux/module.h> 118#include <linux/module.h>
117#include <linux/firmware.h> 119#include <linux/firmware.h>
118#include <linux/kernel.h> 120#include <linux/kernel.h>
@@ -140,7 +142,6 @@
140#define InterruptTheCard(base) outw(0, (base) + 0xc) 142#define InterruptTheCard(base) outw(0, (base) + 0xc)
141#define ClearInterrupt(base) inw((base) + 0x0a) 143#define ClearInterrupt(base) inw((base) + 0x0a)
142 144
143#define pr_dbg(str...) pr_debug("ISICOM: " str)
144#ifdef DEBUG 145#ifdef DEBUG
145#define isicom_paranoia_check(a, b, c) __isicom_paranoia_check((a), (b), (c)) 146#define isicom_paranoia_check(a, b, c) __isicom_paranoia_check((a), (b), (c))
146#else 147#else
@@ -249,8 +250,7 @@ static int lock_card(struct isi_board *card)
249 spin_unlock_irqrestore(&card->card_lock, card->flags); 250 spin_unlock_irqrestore(&card->card_lock, card->flags);
250 msleep(10); 251 msleep(10);
251 } 252 }
252 printk(KERN_WARNING "ISICOM: Failed to lock Card (0x%lx)\n", 253 pr_warning("Failed to lock Card (0x%lx)\n", card->base);
253 card->base);
254 254
255 return 0; /* Failed to acquire the card! */ 255 return 0; /* Failed to acquire the card! */
256} 256}
@@ -379,13 +379,13 @@ static inline int __isicom_paranoia_check(struct isi_port const *port,
379 char *name, const char *routine) 379 char *name, const char *routine)
380{ 380{
381 if (!port) { 381 if (!port) {
382 printk(KERN_WARNING "ISICOM: Warning: bad isicom magic for " 382 pr_warning("Warning: bad isicom magic for dev %s in %s.\n",
383 "dev %s in %s.\n", name, routine); 383 name, routine);
384 return 1; 384 return 1;
385 } 385 }
386 if (port->magic != ISICOM_MAGIC) { 386 if (port->magic != ISICOM_MAGIC) {
387 printk(KERN_WARNING "ISICOM: Warning: NULL isicom port for " 387 pr_warning("Warning: NULL isicom port for dev %s in %s.\n",
388 "dev %s in %s.\n", name, routine); 388 name, routine);
389 return 1; 389 return 1;
390 } 390 }
391 391
@@ -450,8 +450,8 @@ static void isicom_tx(unsigned long _data)
450 if (!(inw(base + 0x02) & (1 << port->channel))) 450 if (!(inw(base + 0x02) & (1 << port->channel)))
451 continue; 451 continue;
452 452
453 pr_dbg("txing %d bytes, port%d.\n", txcount, 453 pr_debug("txing %d bytes, port%d.\n",
454 port->channel + 1); 454 txcount, port->channel + 1);
455 outw((port->channel << isi_card[card].shift_count) | txcount, 455 outw((port->channel << isi_card[card].shift_count) | txcount,
456 base); 456 base);
457 residue = NO; 457 residue = NO;
@@ -547,8 +547,8 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
547 byte_count = header & 0xff; 547 byte_count = header & 0xff;
548 548
549 if (channel + 1 > card->port_count) { 549 if (channel + 1 > card->port_count) {
550 printk(KERN_WARNING "ISICOM: isicom_interrupt(0x%lx): " 550 pr_warning("%s(0x%lx): %d(channel) > port_count.\n",
551 "%d(channel) > port_count.\n", base, channel+1); 551 __func__, base, channel+1);
552 outw(0x0000, base+0x04); /* enable interrupts */ 552 outw(0x0000, base+0x04); /* enable interrupts */
553 spin_unlock(&card->card_lock); 553 spin_unlock(&card->card_lock);
554 return IRQ_HANDLED; 554 return IRQ_HANDLED;
@@ -582,14 +582,15 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
582 if (port->status & ISI_DCD) { 582 if (port->status & ISI_DCD) {
583 if (!(header & ISI_DCD)) { 583 if (!(header & ISI_DCD)) {
584 /* Carrier has been lost */ 584 /* Carrier has been lost */
585 pr_dbg("interrupt: DCD->low.\n" 585 pr_debug("%s: DCD->low.\n",
586 ); 586 __func__);
587 port->status &= ~ISI_DCD; 587 port->status &= ~ISI_DCD;
588 tty_hangup(tty); 588 tty_hangup(tty);
589 } 589 }
590 } else if (header & ISI_DCD) { 590 } else if (header & ISI_DCD) {
591 /* Carrier has been detected */ 591 /* Carrier has been detected */
592 pr_dbg("interrupt: DCD->high.\n"); 592 pr_debug("%s: DCD->high.\n",
593 __func__);
593 port->status |= ISI_DCD; 594 port->status |= ISI_DCD;
594 wake_up_interruptible(&port->port.open_wait); 595 wake_up_interruptible(&port->port.open_wait);
595 } 596 }
@@ -641,17 +642,19 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
641 break; 642 break;
642 643
643 case 2: /* Statistics */ 644 case 2: /* Statistics */
644 pr_dbg("isicom_interrupt: stats!!!.\n"); 645 pr_debug("%s: stats!!!\n", __func__);
645 break; 646 break;
646 647
647 default: 648 default:
648 pr_dbg("Intr: Unknown code in status packet.\n"); 649 pr_debug("%s: Unknown code in status packet.\n",
650 __func__);
649 break; 651 break;
650 } 652 }
651 } else { /* Data Packet */ 653 } else { /* Data Packet */
652 654
653 count = tty_prepare_flip_string(tty, &rp, byte_count & ~1); 655 count = tty_prepare_flip_string(tty, &rp, byte_count & ~1);
654 pr_dbg("Intr: Can rx %d of %d bytes.\n", count, byte_count); 656 pr_debug("%s: Can rx %d of %d bytes.\n",
657 __func__, count, byte_count);
655 word_count = count >> 1; 658 word_count = count >> 1;
656 insw(base, rp, word_count); 659 insw(base, rp, word_count);
657 byte_count -= (word_count << 1); 660 byte_count -= (word_count << 1);
@@ -661,8 +664,8 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
661 byte_count -= 2; 664 byte_count -= 2;
662 } 665 }
663 if (byte_count > 0) { 666 if (byte_count > 0) {
664 pr_dbg("Intr(0x%lx:%d): Flip buffer overflow! dropping " 667 pr_debug("%s(0x%lx:%d): Flip buffer overflow! dropping bytes...\n",
665 "bytes...\n", base, channel + 1); 668 __func__, base, channel + 1);
666 /* drain out unread xtra data */ 669 /* drain out unread xtra data */
667 while (byte_count > 0) { 670 while (byte_count > 0) {
668 inw(base); 671 inw(base);
@@ -888,8 +891,8 @@ static void isicom_shutdown_port(struct isi_port *port)
888 struct isi_board *card = port->card; 891 struct isi_board *card = port->card;
889 892
890 if (--card->count < 0) { 893 if (--card->count < 0) {
891 pr_dbg("isicom_shutdown_port: bad board(0x%lx) count %d.\n", 894 pr_debug("%s: bad board(0x%lx) count %d.\n",
892 card->base, card->count); 895 __func__, card->base, card->count);
893 card->count = 0; 896 card->count = 0;
894 } 897 }
895 /* last port was closed, shutdown that board too */ 898 /* last port was closed, shutdown that board too */
@@ -1681,13 +1684,13 @@ static int __init isicom_init(void)
1681 1684
1682 retval = tty_register_driver(isicom_normal); 1685 retval = tty_register_driver(isicom_normal);
1683 if (retval) { 1686 if (retval) {
1684 pr_dbg("Couldn't register the dialin driver\n"); 1687 pr_debug("Couldn't register the dialin driver\n");
1685 goto err_puttty; 1688 goto err_puttty;
1686 } 1689 }
1687 1690
1688 retval = pci_register_driver(&isicom_driver); 1691 retval = pci_register_driver(&isicom_driver);
1689 if (retval < 0) { 1692 if (retval < 0) {
1690 printk(KERN_ERR "ISICOM: Unable to register pci driver.\n"); 1693 pr_err("Unable to register pci driver.\n");
1691 goto err_unrtty; 1694 goto err_unrtty;
1692 } 1695 }
1693 1696
@@ -1717,3 +1720,8 @@ module_exit(isicom_exit);
1717MODULE_AUTHOR("MultiTech"); 1720MODULE_AUTHOR("MultiTech");
1718MODULE_DESCRIPTION("Driver for the ISI series of cards by MultiTech"); 1721MODULE_DESCRIPTION("Driver for the ISI series of cards by MultiTech");
1719MODULE_LICENSE("GPL"); 1722MODULE_LICENSE("GPL");
1723MODULE_FIRMWARE("isi608.bin");
1724MODULE_FIRMWARE("isi608em.bin");
1725MODULE_FIRMWARE("isi616em.bin");
1726MODULE_FIRMWARE("isi4608.bin");
1727MODULE_FIRMWARE("isi4616.bin");
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 63ee3bbc1ce4..166495d6a1d7 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -164,24 +164,25 @@ static unsigned int moxaFuncTout = HZ / 2;
164static unsigned int moxaLowWaterChk; 164static unsigned int moxaLowWaterChk;
165static DEFINE_MUTEX(moxa_openlock); 165static DEFINE_MUTEX(moxa_openlock);
166static DEFINE_SPINLOCK(moxa_lock); 166static DEFINE_SPINLOCK(moxa_lock);
167/* Variables for insmod */ 167
168#ifdef MODULE
169static unsigned long baseaddr[MAX_BOARDS]; 168static unsigned long baseaddr[MAX_BOARDS];
170static unsigned int type[MAX_BOARDS]; 169static unsigned int type[MAX_BOARDS];
171static unsigned int numports[MAX_BOARDS]; 170static unsigned int numports[MAX_BOARDS];
172#endif
173 171
174MODULE_AUTHOR("William Chen"); 172MODULE_AUTHOR("William Chen");
175MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver"); 173MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver");
176MODULE_LICENSE("GPL"); 174MODULE_LICENSE("GPL");
177#ifdef MODULE 175MODULE_FIRMWARE("c218tunx.cod");
176MODULE_FIRMWARE("cp204unx.cod");
177MODULE_FIRMWARE("c320tunx.cod");
178
178module_param_array(type, uint, NULL, 0); 179module_param_array(type, uint, NULL, 0);
179MODULE_PARM_DESC(type, "card type: C218=2, C320=4"); 180MODULE_PARM_DESC(type, "card type: C218=2, C320=4");
180module_param_array(baseaddr, ulong, NULL, 0); 181module_param_array(baseaddr, ulong, NULL, 0);
181MODULE_PARM_DESC(baseaddr, "base address"); 182MODULE_PARM_DESC(baseaddr, "base address");
182module_param_array(numports, uint, NULL, 0); 183module_param_array(numports, uint, NULL, 0);
183MODULE_PARM_DESC(numports, "numports (ignored for C218)"); 184MODULE_PARM_DESC(numports, "numports (ignored for C218)");
184#endif 185
185module_param(ttymajor, int, 0); 186module_param(ttymajor, int, 0);
186 187
187/* 188/*
@@ -1024,6 +1025,8 @@ static int __init moxa_init(void)
1024{ 1025{
1025 unsigned int isabrds = 0; 1026 unsigned int isabrds = 0;
1026 int retval = 0; 1027 int retval = 0;
1028 struct moxa_board_conf *brd = moxa_boards;
1029 unsigned int i;
1027 1030
1028 printk(KERN_INFO "MOXA Intellio family driver version %s\n", 1031 printk(KERN_INFO "MOXA Intellio family driver version %s\n",
1029 MOXA_VERSION); 1032 MOXA_VERSION);
@@ -1051,10 +1054,7 @@ static int __init moxa_init(void)
1051 } 1054 }
1052 1055
1053 /* Find the boards defined from module args. */ 1056 /* Find the boards defined from module args. */
1054#ifdef MODULE 1057
1055 {
1056 struct moxa_board_conf *brd = moxa_boards;
1057 unsigned int i;
1058 for (i = 0; i < MAX_BOARDS; i++) { 1058 for (i = 0; i < MAX_BOARDS; i++) {
1059 if (!baseaddr[i]) 1059 if (!baseaddr[i])
1060 break; 1060 break;
@@ -1087,8 +1087,6 @@ static int __init moxa_init(void)
1087 isabrds++; 1087 isabrds++;
1088 } 1088 }
1089 } 1089 }
1090 }
1091#endif
1092 1090
1093#ifdef CONFIG_PCI 1091#ifdef CONFIG_PCI
1094 retval = pci_register_driver(&moxa_pci_driver); 1092 retval = pci_register_driver(&moxa_pci_driver);
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 3d923065d9a2..e0c5d2a69046 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -895,8 +895,7 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty)
895 if (inb(info->ioaddr + UART_LSR) == 0xff) { 895 if (inb(info->ioaddr + UART_LSR) == 0xff) {
896 spin_unlock_irqrestore(&info->slock, flags); 896 spin_unlock_irqrestore(&info->slock, flags);
897 if (capable(CAP_SYS_ADMIN)) { 897 if (capable(CAP_SYS_ADMIN)) {
898 if (tty) 898 set_bit(TTY_IO_ERROR, &tty->flags);
899 set_bit(TTY_IO_ERROR, &tty->flags);
900 return 0; 899 return 0;
901 } else 900 } else
902 return -ENODEV; 901 return -ENODEV;
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index 2ad7d37afbd0..a3f32a15fde4 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -136,10 +136,6 @@ static int debug;
136#define RECEIVE_BUF_MAX 4 136#define RECEIVE_BUF_MAX 4
137 137
138 138
139/* Define all types of vendors and devices to support */
140#define VENDOR1 0x1931 /* Vendor Option */
141#define DEVICE1 0x000c /* HSDPA card */
142
143#define R_IIR 0x0000 /* Interrupt Identity Register */ 139#define R_IIR 0x0000 /* Interrupt Identity Register */
144#define R_FCR 0x0000 /* Flow Control Register */ 140#define R_FCR 0x0000 /* Flow Control Register */
145#define R_IER 0x0004 /* Interrupt Enable Register */ 141#define R_IER 0x0004 /* Interrupt Enable Register */
@@ -371,6 +367,8 @@ struct port {
371 struct mutex tty_sem; 367 struct mutex tty_sem;
372 wait_queue_head_t tty_wait; 368 wait_queue_head_t tty_wait;
373 struct async_icount tty_icount; 369 struct async_icount tty_icount;
370
371 struct nozomi *dc;
374}; 372};
375 373
376/* Private data one for each card in the system */ 374/* Private data one for each card in the system */
@@ -405,7 +403,7 @@ struct buffer {
405 403
406/* Global variables */ 404/* Global variables */
407static const struct pci_device_id nozomi_pci_tbl[] __devinitconst = { 405static const struct pci_device_id nozomi_pci_tbl[] __devinitconst = {
408 {PCI_DEVICE(VENDOR1, DEVICE1)}, 406 {PCI_DEVICE(0x1931, 0x000c)}, /* Nozomi HSDPA */
409 {}, 407 {},
410}; 408};
411 409
@@ -414,6 +412,8 @@ MODULE_DEVICE_TABLE(pci, nozomi_pci_tbl);
414static struct nozomi *ndevs[NOZOMI_MAX_CARDS]; 412static struct nozomi *ndevs[NOZOMI_MAX_CARDS];
415static struct tty_driver *ntty_driver; 413static struct tty_driver *ntty_driver;
416 414
415static const struct tty_port_operations noz_tty_port_ops;
416
417/* 417/*
418 * find card by tty_index 418 * find card by tty_index
419 */ 419 */
@@ -853,8 +853,6 @@ static int receive_data(enum port_type index, struct nozomi *dc)
853 goto put; 853 goto put;
854 } 854 }
855 855
856 tty_buffer_request_room(tty, size);
857
858 while (size > 0) { 856 while (size > 0) {
859 read_mem32((u32 *) buf, addr + offset, RECEIVE_BUF_MAX); 857 read_mem32((u32 *) buf, addr + offset, RECEIVE_BUF_MAX);
860 858
@@ -1473,9 +1471,11 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev,
1473 1471
1474 for (i = 0; i < MAX_PORT; i++) { 1472 for (i = 0; i < MAX_PORT; i++) {
1475 struct device *tty_dev; 1473 struct device *tty_dev;
1476 1474 struct port *port = &dc->port[i];
1477 mutex_init(&dc->port[i].tty_sem); 1475 port->dc = dc;
1478 tty_port_init(&dc->port[i].port); 1476 mutex_init(&port->tty_sem);
1477 tty_port_init(&port->port);
1478 port->port.ops = &noz_tty_port_ops;
1479 tty_dev = tty_register_device(ntty_driver, dc->index_start + i, 1479 tty_dev = tty_register_device(ntty_driver, dc->index_start + i,
1480 &pdev->dev); 1480 &pdev->dev);
1481 1481
@@ -1600,67 +1600,74 @@ static void set_dtr(const struct tty_struct *tty, int dtr)
1600 * ---------------------------------------------------------------------------- 1600 * ----------------------------------------------------------------------------
1601 */ 1601 */
1602 1602
1603/* Called when the userspace process opens the tty, /dev/noz*. */ 1603static int ntty_install(struct tty_driver *driver, struct tty_struct *tty)
1604static int ntty_open(struct tty_struct *tty, struct file *file)
1605{ 1604{
1606 struct port *port = get_port_by_tty(tty); 1605 struct port *port = get_port_by_tty(tty);
1607 struct nozomi *dc = get_dc_by_tty(tty); 1606 struct nozomi *dc = get_dc_by_tty(tty);
1608 unsigned long flags; 1607 int ret;
1609
1610 if (!port || !dc || dc->state != NOZOMI_STATE_READY) 1608 if (!port || !dc || dc->state != NOZOMI_STATE_READY)
1611 return -ENODEV; 1609 return -ENODEV;
1612 1610 ret = tty_init_termios(tty);
1613 if (mutex_lock_interruptible(&port->tty_sem)) 1611 if (ret == 0) {
1614 return -ERESTARTSYS; 1612 tty_driver_kref_get(driver);
1615 1613 driver->ttys[tty->index] = tty;
1616 port->port.count++;
1617 dc->open_ttys++;
1618
1619 /* Enable interrupt downlink for channel */
1620 if (port->port.count == 1) {
1621 tty->driver_data = port;
1622 tty_port_tty_set(&port->port, tty);
1623 DBG1("open: %d", port->token_dl);
1624 spin_lock_irqsave(&dc->spin_mutex, flags);
1625 dc->last_ier = dc->last_ier | port->token_dl;
1626 writew(dc->last_ier, dc->reg_ier);
1627 spin_unlock_irqrestore(&dc->spin_mutex, flags);
1628 } 1614 }
1629 mutex_unlock(&port->tty_sem); 1615 return ret;
1630 return 0;
1631} 1616}
1632 1617
1633/* Called when the userspace process close the tty, /dev/noz*. Also 1618static void ntty_cleanup(struct tty_struct *tty)
1634 called immediately if ntty_open fails in which case tty->driver_data 1619{
1635 will be NULL an we exit by the first return */ 1620 tty->driver_data = NULL;
1621}
1636 1622
1637static void ntty_close(struct tty_struct *tty, struct file *file) 1623static int ntty_activate(struct tty_port *tport, struct tty_struct *tty)
1638{ 1624{
1639 struct nozomi *dc = get_dc_by_tty(tty); 1625 struct port *port = container_of(tport, struct port, port);
1640 struct port *nport = tty->driver_data; 1626 struct nozomi *dc = port->dc;
1641 struct tty_port *port = &nport->port;
1642 unsigned long flags; 1627 unsigned long flags;
1643 1628
1644 if (!dc || !nport) 1629 DBG1("open: %d", port->token_dl);
1645 return; 1630 spin_lock_irqsave(&dc->spin_mutex, flags);
1631 dc->last_ier = dc->last_ier | port->token_dl;
1632 writew(dc->last_ier, dc->reg_ier);
1633 dc->open_ttys++;
1634 spin_unlock_irqrestore(&dc->spin_mutex, flags);
1635 printk("noz: activated %d: %p\n", tty->index, tport);
1636 return 0;
1637}
1646 1638
1647 /* Users cannot interrupt a close */ 1639static int ntty_open(struct tty_struct *tty, struct file *filp)
1648 mutex_lock(&nport->tty_sem); 1640{
1641 struct port *port = get_port_by_tty(tty);
1642 return tty_port_open(&port->port, tty, filp);
1643}
1649 1644
1650 WARN_ON(!port->count); 1645static void ntty_shutdown(struct tty_port *tport)
1646{
1647 struct port *port = container_of(tport, struct port, port);
1648 struct nozomi *dc = port->dc;
1649 unsigned long flags;
1651 1650
1651 DBG1("close: %d", port->token_dl);
1652 spin_lock_irqsave(&dc->spin_mutex, flags);
1653 dc->last_ier &= ~(port->token_dl);
1654 writew(dc->last_ier, dc->reg_ier);
1652 dc->open_ttys--; 1655 dc->open_ttys--;
1653 port->count--; 1656 spin_unlock_irqrestore(&dc->spin_mutex, flags);
1657 printk("noz: shutdown %p\n", tport);
1658}
1654 1659
1655 if (port->count == 0) { 1660static void ntty_close(struct tty_struct *tty, struct file *filp)
1656 DBG1("close: %d", nport->token_dl); 1661{
1657 tty_port_tty_set(port, NULL); 1662 struct port *port = tty->driver_data;
1658 spin_lock_irqsave(&dc->spin_mutex, flags); 1663 if (port)
1659 dc->last_ier &= ~(nport->token_dl); 1664 tty_port_close(&port->port, tty, filp);
1660 writew(dc->last_ier, dc->reg_ier); 1665}
1661 spin_unlock_irqrestore(&dc->spin_mutex, flags); 1666
1662 } 1667static void ntty_hangup(struct tty_struct *tty)
1663 mutex_unlock(&nport->tty_sem); 1668{
1669 struct port *port = tty->driver_data;
1670 tty_port_hangup(&port->port);
1664} 1671}
1665 1672
1666/* 1673/*
@@ -1680,15 +1687,7 @@ static int ntty_write(struct tty_struct *tty, const unsigned char *buffer,
1680 if (!dc || !port) 1687 if (!dc || !port)
1681 return -ENODEV; 1688 return -ENODEV;
1682 1689
1683 if (unlikely(!mutex_trylock(&port->tty_sem))) { 1690 mutex_lock(&port->tty_sem);
1684 /*
1685 * must test lock as tty layer wraps calls
1686 * to this function with BKL
1687 */
1688 dev_err(&dc->pdev->dev, "Would have deadlocked - "
1689 "return EAGAIN\n");
1690 return -EAGAIN;
1691 }
1692 1691
1693 if (unlikely(!port->port.count)) { 1692 if (unlikely(!port->port.count)) {
1694 DBG1(" "); 1693 DBG1(" ");
@@ -1728,25 +1727,23 @@ exit:
1728 * This method is called by the upper tty layer. 1727 * This method is called by the upper tty layer.
1729 * #according to sources N_TTY.c it expects a value >= 0 and 1728 * #according to sources N_TTY.c it expects a value >= 0 and
1730 * does not check for negative values. 1729 * does not check for negative values.
1730 *
1731 * If the port is unplugged report lots of room and let the bits
1732 * dribble away so we don't block anything.
1731 */ 1733 */
1732static int ntty_write_room(struct tty_struct *tty) 1734static int ntty_write_room(struct tty_struct *tty)
1733{ 1735{
1734 struct port *port = tty->driver_data; 1736 struct port *port = tty->driver_data;
1735 int room = 0; 1737 int room = 4096;
1736 const struct nozomi *dc = get_dc_by_tty(tty); 1738 const struct nozomi *dc = get_dc_by_tty(tty);
1737 1739
1738 if (!dc || !port) 1740 if (dc) {
1739 return 0; 1741 mutex_lock(&port->tty_sem);
1740 if (!mutex_trylock(&port->tty_sem)) 1742 if (port->port.count)
1741 return 0; 1743 room = port->fifo_ul.size -
1742 1744 kfifo_len(&port->fifo_ul);
1743 if (!port->port.count) 1745 mutex_unlock(&port->tty_sem);
1744 goto exit; 1746 }
1745
1746 room = port->fifo_ul.size - kfifo_len(&port->fifo_ul);
1747
1748exit:
1749 mutex_unlock(&port->tty_sem);
1750 return room; 1747 return room;
1751} 1748}
1752 1749
@@ -1906,10 +1903,16 @@ exit_in_buffer:
1906 return rval; 1903 return rval;
1907} 1904}
1908 1905
1906static const struct tty_port_operations noz_tty_port_ops = {
1907 .activate = ntty_activate,
1908 .shutdown = ntty_shutdown,
1909};
1910
1909static const struct tty_operations tty_ops = { 1911static const struct tty_operations tty_ops = {
1910 .ioctl = ntty_ioctl, 1912 .ioctl = ntty_ioctl,
1911 .open = ntty_open, 1913 .open = ntty_open,
1912 .close = ntty_close, 1914 .close = ntty_close,
1915 .hangup = ntty_hangup,
1913 .write = ntty_write, 1916 .write = ntty_write,
1914 .write_room = ntty_write_room, 1917 .write_room = ntty_write_room,
1915 .unthrottle = ntty_unthrottle, 1918 .unthrottle = ntty_unthrottle,
@@ -1917,6 +1920,8 @@ static const struct tty_operations tty_ops = {
1917 .chars_in_buffer = ntty_chars_in_buffer, 1920 .chars_in_buffer = ntty_chars_in_buffer,
1918 .tiocmget = ntty_tiocmget, 1921 .tiocmget = ntty_tiocmget,
1919 .tiocmset = ntty_tiocmset, 1922 .tiocmset = ntty_tiocmset,
1923 .install = ntty_install,
1924 .cleanup = ntty_cleanup,
1920}; 1925};
1921 1926
1922/* Module initialization */ 1927/* Module initialization */
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 452370af95de..986aa606a6b6 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -658,8 +658,7 @@ static irqreturn_t cd2401_rx_interrupt(int irq, void *dev_id)
658 info->mon.char_max = char_count; 658 info->mon.char_max = char_count;
659 info->mon.char_last = char_count; 659 info->mon.char_last = char_count;
660#endif 660#endif
661 len = tty_buffer_request_room(tty, char_count); 661 while (char_count--) {
662 while (len--) {
663 data = base_addr[CyRDR]; 662 data = base_addr[CyRDR];
664 tty_insert_flip_char(tty, data, TTY_NORMAL); 663 tty_insert_flip_char(tty, data, TTY_NORMAL);
665#ifdef CYCLOM_16Y_HACK 664#ifdef CYCLOM_16Y_HACK
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 268e17f9ec3f..07ac14d949ce 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -646,8 +646,6 @@ static void sx_receive(struct specialix_board *bp)
646 dprintk(SX_DEBUG_RX, "port: %p: count: %d\n", port, count); 646 dprintk(SX_DEBUG_RX, "port: %p: count: %d\n", port, count);
647 port->hits[count > 8 ? 9 : count]++; 647 port->hits[count > 8 ? 9 : count]++;
648 648
649 tty_buffer_request_room(tty, count);
650
651 while (count--) 649 while (count--)
652 tty_insert_flip_char(tty, sx_in(bp, CD186x_RDR), TTY_NORMAL); 650 tty_insert_flip_char(tty, sx_in(bp, CD186x_RDR), TTY_NORMAL);
653 tty_flip_buffer_push(tty); 651 tty_flip_buffer_push(tty);
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index 4846b73ef28d..0658fc548222 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -2031,7 +2031,7 @@ static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2031 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char")) 2031 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2032 return 0; 2032 return 0;
2033 2033
2034 if (!tty || !info->xmit_buf) 2034 if (!info->xmit_buf)
2035 return 0; 2035 return 0;
2036 2036
2037 spin_lock_irqsave(&info->irq_spinlock, flags); 2037 spin_lock_irqsave(&info->irq_spinlock, flags);
@@ -2121,7 +2121,7 @@ static int mgsl_write(struct tty_struct * tty,
2121 if (mgsl_paranoia_check(info, tty->name, "mgsl_write")) 2121 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2122 goto cleanup; 2122 goto cleanup;
2123 2123
2124 if (!tty || !info->xmit_buf) 2124 if (!info->xmit_buf)
2125 goto cleanup; 2125 goto cleanup;
2126 2126
2127 if ( info->params.mode == MGSL_MODE_HDLC || 2127 if ( info->params.mode == MGSL_MODE_HDLC ||
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 8678f0c8699d..4561ce2fba6d 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -468,7 +468,7 @@ static unsigned int free_tbuf_count(struct slgt_info *info);
468static unsigned int tbuf_bytes(struct slgt_info *info); 468static unsigned int tbuf_bytes(struct slgt_info *info);
469static void reset_tbufs(struct slgt_info *info); 469static void reset_tbufs(struct slgt_info *info);
470static void tdma_reset(struct slgt_info *info); 470static void tdma_reset(struct slgt_info *info);
471static void tx_load(struct slgt_info *info, const char *buf, unsigned int count); 471static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count);
472 472
473static void get_signals(struct slgt_info *info); 473static void get_signals(struct slgt_info *info);
474static void set_signals(struct slgt_info *info); 474static void set_signals(struct slgt_info *info);
@@ -813,59 +813,32 @@ static int write(struct tty_struct *tty,
813 int ret = 0; 813 int ret = 0;
814 struct slgt_info *info = tty->driver_data; 814 struct slgt_info *info = tty->driver_data;
815 unsigned long flags; 815 unsigned long flags;
816 unsigned int bufs_needed;
817 816
818 if (sanity_check(info, tty->name, "write")) 817 if (sanity_check(info, tty->name, "write"))
819 goto cleanup; 818 return -EIO;
819
820 DBGINFO(("%s write count=%d\n", info->device_name, count)); 820 DBGINFO(("%s write count=%d\n", info->device_name, count));
821 821
822 if (!info->tx_buf) 822 if (!info->tx_buf || (count > info->max_frame_size))
823 goto cleanup; 823 return -EIO;
824 824
825 if (count > info->max_frame_size) { 825 if (!count || tty->stopped || tty->hw_stopped)
826 ret = -EIO; 826 return 0;
827 goto cleanup;
828 }
829 827
830 if (!count) 828 spin_lock_irqsave(&info->lock, flags);
831 goto cleanup;
832 829
833 if (!info->tx_active && info->tx_count) { 830 if (info->tx_count) {
834 /* send accumulated data from send_char() */ 831 /* send accumulated data from send_char() */
835 tx_load(info, info->tx_buf, info->tx_count); 832 if (!tx_load(info, info->tx_buf, info->tx_count))
836 goto start; 833 goto cleanup;
834 info->tx_count = 0;
837 } 835 }
838 bufs_needed = (count/DMABUFSIZE);
839 if (count % DMABUFSIZE)
840 ++bufs_needed;
841 if (bufs_needed > free_tbuf_count(info))
842 goto cleanup;
843 836
844 ret = info->tx_count = count; 837 if (tx_load(info, buf, count))
845 tx_load(info, buf, count); 838 ret = count;
846 goto start;
847
848start:
849 if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
850 spin_lock_irqsave(&info->lock,flags);
851 if (!info->tx_active)
852 tx_start(info);
853 else if (!(rd_reg32(info, TDCSR) & BIT0)) {
854 /* transmit still active but transmit DMA stopped */
855 unsigned int i = info->tbuf_current;
856 if (!i)
857 i = info->tbuf_count;
858 i--;
859 /* if DMA buf unsent must try later after tx idle */
860 if (desc_count(info->tbufs[i]))
861 ret = 0;
862 }
863 if (ret > 0)
864 update_tx_timer(info);
865 spin_unlock_irqrestore(&info->lock,flags);
866 }
867 839
868cleanup: 840cleanup:
841 spin_unlock_irqrestore(&info->lock, flags);
869 DBGINFO(("%s write rc=%d\n", info->device_name, ret)); 842 DBGINFO(("%s write rc=%d\n", info->device_name, ret));
870 return ret; 843 return ret;
871} 844}
@@ -882,7 +855,7 @@ static int put_char(struct tty_struct *tty, unsigned char ch)
882 if (!info->tx_buf) 855 if (!info->tx_buf)
883 return 0; 856 return 0;
884 spin_lock_irqsave(&info->lock,flags); 857 spin_lock_irqsave(&info->lock,flags);
885 if (!info->tx_active && (info->tx_count < info->max_frame_size)) { 858 if (info->tx_count < info->max_frame_size) {
886 info->tx_buf[info->tx_count++] = ch; 859 info->tx_buf[info->tx_count++] = ch;
887 ret = 1; 860 ret = 1;
888 } 861 }
@@ -981,10 +954,8 @@ static void flush_chars(struct tty_struct *tty)
981 DBGINFO(("%s flush_chars start transmit\n", info->device_name)); 954 DBGINFO(("%s flush_chars start transmit\n", info->device_name));
982 955
983 spin_lock_irqsave(&info->lock,flags); 956 spin_lock_irqsave(&info->lock,flags);
984 if (!info->tx_active && info->tx_count) { 957 if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
985 tx_load(info, info->tx_buf,info->tx_count); 958 info->tx_count = 0;
986 tx_start(info);
987 }
988 spin_unlock_irqrestore(&info->lock,flags); 959 spin_unlock_irqrestore(&info->lock,flags);
989} 960}
990 961
@@ -997,10 +968,9 @@ static void flush_buffer(struct tty_struct *tty)
997 return; 968 return;
998 DBGINFO(("%s flush_buffer\n", info->device_name)); 969 DBGINFO(("%s flush_buffer\n", info->device_name));
999 970
1000 spin_lock_irqsave(&info->lock,flags); 971 spin_lock_irqsave(&info->lock, flags);
1001 if (!info->tx_active) 972 info->tx_count = 0;
1002 info->tx_count = 0; 973 spin_unlock_irqrestore(&info->lock, flags);
1003 spin_unlock_irqrestore(&info->lock,flags);
1004 974
1005 tty_wakeup(tty); 975 tty_wakeup(tty);
1006} 976}
@@ -1033,12 +1003,10 @@ static void tx_release(struct tty_struct *tty)
1033 if (sanity_check(info, tty->name, "tx_release")) 1003 if (sanity_check(info, tty->name, "tx_release"))
1034 return; 1004 return;
1035 DBGINFO(("%s tx_release\n", info->device_name)); 1005 DBGINFO(("%s tx_release\n", info->device_name));
1036 spin_lock_irqsave(&info->lock,flags); 1006 spin_lock_irqsave(&info->lock, flags);
1037 if (!info->tx_active && info->tx_count) { 1007 if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
1038 tx_load(info, info->tx_buf, info->tx_count); 1008 info->tx_count = 0;
1039 tx_start(info); 1009 spin_unlock_irqrestore(&info->lock, flags);
1040 }
1041 spin_unlock_irqrestore(&info->lock,flags);
1042} 1010}
1043 1011
1044/* 1012/*
@@ -1506,27 +1474,25 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
1506 1474
1507 DBGINFO(("%s hdlc_xmit\n", dev->name)); 1475 DBGINFO(("%s hdlc_xmit\n", dev->name));
1508 1476
1477 if (!skb->len)
1478 return NETDEV_TX_OK;
1479
1509 /* stop sending until this frame completes */ 1480 /* stop sending until this frame completes */
1510 netif_stop_queue(dev); 1481 netif_stop_queue(dev);
1511 1482
1512 /* copy data to device buffers */
1513 info->tx_count = skb->len;
1514 tx_load(info, skb->data, skb->len);
1515
1516 /* update network statistics */ 1483 /* update network statistics */
1517 dev->stats.tx_packets++; 1484 dev->stats.tx_packets++;
1518 dev->stats.tx_bytes += skb->len; 1485 dev->stats.tx_bytes += skb->len;
1519 1486
1520 /* done with socket buffer, so free it */
1521 dev_kfree_skb(skb);
1522
1523 /* save start time for transmit timeout detection */ 1487 /* save start time for transmit timeout detection */
1524 dev->trans_start = jiffies; 1488 dev->trans_start = jiffies;
1525 1489
1526 spin_lock_irqsave(&info->lock,flags); 1490 spin_lock_irqsave(&info->lock, flags);
1527 tx_start(info); 1491 tx_load(info, skb->data, skb->len);
1528 update_tx_timer(info); 1492 spin_unlock_irqrestore(&info->lock, flags);
1529 spin_unlock_irqrestore(&info->lock,flags); 1493
1494 /* done with socket buffer, so free it */
1495 dev_kfree_skb(skb);
1530 1496
1531 return NETDEV_TX_OK; 1497 return NETDEV_TX_OK;
1532} 1498}
@@ -2180,7 +2146,7 @@ static void isr_serial(struct slgt_info *info)
2180 2146
2181 if (info->params.mode == MGSL_MODE_ASYNC) { 2147 if (info->params.mode == MGSL_MODE_ASYNC) {
2182 if (status & IRQ_TXIDLE) { 2148 if (status & IRQ_TXIDLE) {
2183 if (info->tx_count) 2149 if (info->tx_active)
2184 isr_txeom(info, status); 2150 isr_txeom(info, status);
2185 } 2151 }
2186 if (info->rx_pio && (status & IRQ_RXDATA)) 2152 if (info->rx_pio && (status & IRQ_RXDATA))
@@ -2276,13 +2242,42 @@ static void isr_tdma(struct slgt_info *info)
2276 } 2242 }
2277} 2243}
2278 2244
2245/*
2246 * return true if there are unsent tx DMA buffers, otherwise false
2247 *
2248 * if there are unsent buffers then info->tbuf_start
2249 * is set to index of first unsent buffer
2250 */
2251static bool unsent_tbufs(struct slgt_info *info)
2252{
2253 unsigned int i = info->tbuf_current;
2254 bool rc = false;
2255
2256 /*
2257 * search backwards from last loaded buffer (precedes tbuf_current)
2258 * for first unsent buffer (desc_count > 0)
2259 */
2260
2261 do {
2262 if (i)
2263 i--;
2264 else
2265 i = info->tbuf_count - 1;
2266 if (!desc_count(info->tbufs[i]))
2267 break;
2268 info->tbuf_start = i;
2269 rc = true;
2270 } while (i != info->tbuf_current);
2271
2272 return rc;
2273}
2274
2279static void isr_txeom(struct slgt_info *info, unsigned short status) 2275static void isr_txeom(struct slgt_info *info, unsigned short status)
2280{ 2276{
2281 DBGISR(("%s txeom status=%04x\n", info->device_name, status)); 2277 DBGISR(("%s txeom status=%04x\n", info->device_name, status));
2282 2278
2283 slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER); 2279 slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
2284 tdma_reset(info); 2280 tdma_reset(info);
2285 reset_tbufs(info);
2286 if (status & IRQ_TXUNDER) { 2281 if (status & IRQ_TXUNDER) {
2287 unsigned short val = rd_reg16(info, TCR); 2282 unsigned short val = rd_reg16(info, TCR);
2288 wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */ 2283 wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
@@ -2297,8 +2292,12 @@ static void isr_txeom(struct slgt_info *info, unsigned short status)
2297 info->icount.txok++; 2292 info->icount.txok++;
2298 } 2293 }
2299 2294
2295 if (unsent_tbufs(info)) {
2296 tx_start(info);
2297 update_tx_timer(info);
2298 return;
2299 }
2300 info->tx_active = false; 2300 info->tx_active = false;
2301 info->tx_count = 0;
2302 2301
2303 del_timer(&info->tx_timer); 2302 del_timer(&info->tx_timer);
2304 2303
@@ -3949,7 +3948,7 @@ static void tx_start(struct slgt_info *info)
3949 info->tx_enabled = true; 3948 info->tx_enabled = true;
3950 } 3949 }
3951 3950
3952 if (info->tx_count) { 3951 if (desc_count(info->tbufs[info->tbuf_start])) {
3953 info->drop_rts_on_tx_done = false; 3952 info->drop_rts_on_tx_done = false;
3954 3953
3955 if (info->params.mode != MGSL_MODE_ASYNC) { 3954 if (info->params.mode != MGSL_MODE_ASYNC) {
@@ -4772,25 +4771,36 @@ static unsigned int tbuf_bytes(struct slgt_info *info)
4772} 4771}
4773 4772
4774/* 4773/*
4775 * load transmit DMA buffer(s) with data 4774 * load data into transmit DMA buffer ring and start transmitter if needed
4775 * return true if data accepted, otherwise false (buffers full)
4776 */ 4776 */
4777static void tx_load(struct slgt_info *info, const char *buf, unsigned int size) 4777static bool tx_load(struct slgt_info *info, const char *buf, unsigned int size)
4778{ 4778{
4779 unsigned short count; 4779 unsigned short count;
4780 unsigned int i; 4780 unsigned int i;
4781 struct slgt_desc *d; 4781 struct slgt_desc *d;
4782 4782
4783 if (size == 0) 4783 /* check required buffer space */
4784 return; 4784 if (DIV_ROUND_UP(size, DMABUFSIZE) > free_tbuf_count(info))
4785 return false;
4785 4786
4786 DBGDATA(info, buf, size, "tx"); 4787 DBGDATA(info, buf, size, "tx");
4787 4788
4789 /*
4790 * copy data to one or more DMA buffers in circular ring
4791 * tbuf_start = first buffer for this data
4792 * tbuf_current = next free buffer
4793 *
4794 * Copy all data before making data visible to DMA controller by
4795 * setting descriptor count of the first buffer.
4796 * This prevents an active DMA controller from reading the first DMA
4797 * buffers of a frame and stopping before the final buffers are filled.
4798 */
4799
4788 info->tbuf_start = i = info->tbuf_current; 4800 info->tbuf_start = i = info->tbuf_current;
4789 4801
4790 while (size) { 4802 while (size) {
4791 d = &info->tbufs[i]; 4803 d = &info->tbufs[i];
4792 if (++i == info->tbuf_count)
4793 i = 0;
4794 4804
4795 count = (unsigned short)((size > DMABUFSIZE) ? DMABUFSIZE : size); 4805 count = (unsigned short)((size > DMABUFSIZE) ? DMABUFSIZE : size);
4796 memcpy(d->buf, buf, count); 4806 memcpy(d->buf, buf, count);
@@ -4808,11 +4818,27 @@ static void tx_load(struct slgt_info *info, const char *buf, unsigned int size)
4808 else 4818 else
4809 set_desc_eof(*d, 0); 4819 set_desc_eof(*d, 0);
4810 4820
4811 set_desc_count(*d, count); 4821 /* set descriptor count for all but first buffer */
4822 if (i != info->tbuf_start)
4823 set_desc_count(*d, count);
4812 d->buf_count = count; 4824 d->buf_count = count;
4825
4826 if (++i == info->tbuf_count)
4827 i = 0;
4813 } 4828 }
4814 4829
4815 info->tbuf_current = i; 4830 info->tbuf_current = i;
4831
4832 /* set first buffer count to make new data visible to DMA controller */
4833 d = &info->tbufs[info->tbuf_start];
4834 set_desc_count(*d, d->buf_count);
4835
4836 /* start transmitter if needed and update transmit timeout */
4837 if (!info->tx_active)
4838 tx_start(info);
4839 update_tx_timer(info);
4840
4841 return true;
4816} 4842}
4817 4843
4818static int register_test(struct slgt_info *info) 4844static int register_test(struct slgt_info *info)
@@ -4934,9 +4960,7 @@ static int loopback_test(struct slgt_info *info)
4934 spin_lock_irqsave(&info->lock,flags); 4960 spin_lock_irqsave(&info->lock,flags);
4935 async_mode(info); 4961 async_mode(info);
4936 rx_start(info); 4962 rx_start(info);
4937 info->tx_count = count;
4938 tx_load(info, buf, count); 4963 tx_load(info, buf, count);
4939 tx_start(info);
4940 spin_unlock_irqrestore(&info->lock, flags); 4964 spin_unlock_irqrestore(&info->lock, flags);
4941 4965
4942 /* wait for receive complete */ 4966 /* wait for receive complete */
diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c
index 66fa4e10d76b..af8d97715728 100644
--- a/drivers/char/tty_buffer.c
+++ b/drivers/char/tty_buffer.c
@@ -231,9 +231,10 @@ int tty_buffer_request_room(struct tty_struct *tty, size_t size)
231EXPORT_SYMBOL_GPL(tty_buffer_request_room); 231EXPORT_SYMBOL_GPL(tty_buffer_request_room);
232 232
233/** 233/**
234 * tty_insert_flip_string - Add characters to the tty buffer 234 * tty_insert_flip_string_fixed_flag - Add characters to the tty buffer
235 * @tty: tty structure 235 * @tty: tty structure
236 * @chars: characters 236 * @chars: characters
237 * @flag: flag value for each character
237 * @size: size 238 * @size: size
238 * 239 *
239 * Queue a series of bytes to the tty buffering. All the characters 240 * Queue a series of bytes to the tty buffering. All the characters
@@ -242,18 +243,19 @@ EXPORT_SYMBOL_GPL(tty_buffer_request_room);
242 * Locking: Called functions may take tty->buf.lock 243 * Locking: Called functions may take tty->buf.lock
243 */ 244 */
244 245
245int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, 246int tty_insert_flip_string_fixed_flag(struct tty_struct *tty,
246 size_t size) 247 const unsigned char *chars, char flag, size_t size)
247{ 248{
248 int copied = 0; 249 int copied = 0;
249 do { 250 do {
250 int space = tty_buffer_request_room(tty, size - copied); 251 int goal = min(size - copied, TTY_BUFFER_PAGE);
252 int space = tty_buffer_request_room(tty, goal);
251 struct tty_buffer *tb = tty->buf.tail; 253 struct tty_buffer *tb = tty->buf.tail;
252 /* If there is no space then tb may be NULL */ 254 /* If there is no space then tb may be NULL */
253 if (unlikely(space == 0)) 255 if (unlikely(space == 0))
254 break; 256 break;
255 memcpy(tb->char_buf_ptr + tb->used, chars, space); 257 memcpy(tb->char_buf_ptr + tb->used, chars, space);
256 memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space); 258 memset(tb->flag_buf_ptr + tb->used, flag, space);
257 tb->used += space; 259 tb->used += space;
258 copied += space; 260 copied += space;
259 chars += space; 261 chars += space;
@@ -262,7 +264,7 @@ int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
262 } while (unlikely(size > copied)); 264 } while (unlikely(size > copied));
263 return copied; 265 return copied;
264} 266}
265EXPORT_SYMBOL(tty_insert_flip_string); 267EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
266 268
267/** 269/**
268 * tty_insert_flip_string_flags - Add characters to the tty buffer 270 * tty_insert_flip_string_flags - Add characters to the tty buffer
@@ -283,7 +285,8 @@ int tty_insert_flip_string_flags(struct tty_struct *tty,
283{ 285{
284 int copied = 0; 286 int copied = 0;
285 do { 287 do {
286 int space = tty_buffer_request_room(tty, size - copied); 288 int goal = min(size - copied, TTY_BUFFER_PAGE);
289 int space = tty_buffer_request_room(tty, goal);
287 struct tty_buffer *tb = tty->buf.tail; 290 struct tty_buffer *tb = tty->buf.tail;
288 /* If there is no space then tb may be NULL */ 291 /* If there is no space then tb may be NULL */
289 if (unlikely(space == 0)) 292 if (unlikely(space == 0))
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index 3f653f7d849f..500e740ec5e4 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -706,12 +706,13 @@ static void tty_reset_termios(struct tty_struct *tty)
706/** 706/**
707 * tty_ldisc_reinit - reinitialise the tty ldisc 707 * tty_ldisc_reinit - reinitialise the tty ldisc
708 * @tty: tty to reinit 708 * @tty: tty to reinit
709 * @ldisc: line discipline to reinitialize
709 * 710 *
710 * Switch the tty back to N_TTY line discipline and leave the 711 * Switch the tty to a line discipline and leave the ldisc
711 * ldisc state closed 712 * state closed
712 */ 713 */
713 714
714static void tty_ldisc_reinit(struct tty_struct *tty) 715static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
715{ 716{
716 struct tty_ldisc *ld; 717 struct tty_ldisc *ld;
717 718
@@ -721,10 +722,10 @@ static void tty_ldisc_reinit(struct tty_struct *tty)
721 /* 722 /*
722 * Switch the line discipline back 723 * Switch the line discipline back
723 */ 724 */
724 ld = tty_ldisc_get(N_TTY); 725 ld = tty_ldisc_get(ldisc);
725 BUG_ON(IS_ERR(ld)); 726 BUG_ON(IS_ERR(ld));
726 tty_ldisc_assign(tty, ld); 727 tty_ldisc_assign(tty, ld);
727 tty_set_termios_ldisc(tty, N_TTY); 728 tty_set_termios_ldisc(tty, ldisc);
728} 729}
729 730
730/** 731/**
@@ -745,6 +746,8 @@ static void tty_ldisc_reinit(struct tty_struct *tty)
745void tty_ldisc_hangup(struct tty_struct *tty) 746void tty_ldisc_hangup(struct tty_struct *tty)
746{ 747{
747 struct tty_ldisc *ld; 748 struct tty_ldisc *ld;
749 int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS;
750 int err = 0;
748 751
749 /* 752 /*
750 * FIXME! What are the locking issues here? This may me overdoing 753 * FIXME! What are the locking issues here? This may me overdoing
@@ -772,25 +775,32 @@ void tty_ldisc_hangup(struct tty_struct *tty)
772 wake_up_interruptible_poll(&tty->read_wait, POLLIN); 775 wake_up_interruptible_poll(&tty->read_wait, POLLIN);
773 /* 776 /*
774 * Shutdown the current line discipline, and reset it to 777 * Shutdown the current line discipline, and reset it to
775 * N_TTY. 778 * N_TTY if need be.
779 *
780 * Avoid racing set_ldisc or tty_ldisc_release
776 */ 781 */
777 if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) { 782 mutex_lock(&tty->ldisc_mutex);
778 /* Avoid racing set_ldisc or tty_ldisc_release */ 783 tty_ldisc_halt(tty);
779 mutex_lock(&tty->ldisc_mutex); 784 /* At this point we have a closed ldisc and we want to
780 tty_ldisc_halt(tty); 785 reopen it. We could defer this to the next open but
781 if (tty->ldisc) { /* Not yet closed */ 786 it means auditing a lot of other paths so this is
782 /* Switch back to N_TTY */ 787 a FIXME */
783 tty_ldisc_reinit(tty); 788 if (tty->ldisc) { /* Not yet closed */
784 /* At this point we have a closed ldisc and we want to 789 if (reset == 0) {
785 reopen it. We could defer this to the next open but 790 tty_ldisc_reinit(tty, tty->termios->c_line);
786 it means auditing a lot of other paths so this is 791 err = tty_ldisc_open(tty, tty->ldisc);
787 a FIXME */ 792 }
793 /* If the re-open fails or we reset then go to N_TTY. The
794 N_TTY open cannot fail */
795 if (reset || err) {
796 tty_ldisc_reinit(tty, N_TTY);
788 WARN_ON(tty_ldisc_open(tty, tty->ldisc)); 797 WARN_ON(tty_ldisc_open(tty, tty->ldisc));
789 tty_ldisc_enable(tty);
790 } 798 }
791 mutex_unlock(&tty->ldisc_mutex); 799 tty_ldisc_enable(tty);
792 tty_reset_termios(tty);
793 } 800 }
801 mutex_unlock(&tty->ldisc_mutex);
802 if (reset)
803 tty_reset_termios(tty);
794} 804}
795 805
796/** 806/**
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 6aa10284104a..87778dcf8727 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -888,7 +888,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
888 ret = -EFAULT; 888 ret = -EFAULT;
889 goto out; 889 goto out;
890 } 890 }
891 if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS) { 891 if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS && tmp.mode != VT_PROCESS_AUTO) {
892 ret = -EINVAL; 892 ret = -EINVAL;
893 goto out; 893 goto out;
894 } 894 }
@@ -1622,7 +1622,7 @@ static void complete_change_console(struct vc_data *vc)
1622 * telling it that it has acquired. Also check if it has died and 1622 * telling it that it has acquired. Also check if it has died and
1623 * clean up (similar to logic employed in change_console()) 1623 * clean up (similar to logic employed in change_console())
1624 */ 1624 */
1625 if (vc->vt_mode.mode == VT_PROCESS) { 1625 if (vc->vt_mode.mode == VT_PROCESS || vc->vt_mode.mode == VT_PROCESS_AUTO) {
1626 /* 1626 /*
1627 * Send the signal as privileged - kill_pid() will 1627 * Send the signal as privileged - kill_pid() will
1628 * tell us if the process has gone or something else 1628 * tell us if the process has gone or something else
@@ -1682,7 +1682,7 @@ void change_console(struct vc_data *new_vc)
1682 * vt to auto control. 1682 * vt to auto control.
1683 */ 1683 */
1684 vc = vc_cons[fg_console].d; 1684 vc = vc_cons[fg_console].d;
1685 if (vc->vt_mode.mode == VT_PROCESS) { 1685 if (vc->vt_mode.mode == VT_PROCESS || vc->vt_mode.mode == VT_PROCESS_AUTO) {
1686 /* 1686 /*
1687 * Send the signal as privileged - kill_pid() will 1687 * Send the signal as privileged - kill_pid() will
1688 * tell us if the process has gone or something else 1688 * tell us if the process has gone or something else
@@ -1693,27 +1693,28 @@ void change_console(struct vc_data *new_vc)
1693 */ 1693 */
1694 vc->vt_newvt = new_vc->vc_num; 1694 vc->vt_newvt = new_vc->vc_num;
1695 if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) { 1695 if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) {
1696 if(vc->vt_mode.mode == VT_PROCESS)
1697 /*
1698 * It worked. Mark the vt to switch to and
1699 * return. The process needs to send us a
1700 * VT_RELDISP ioctl to complete the switch.
1701 */
1702 return;
1703 } else {
1696 /* 1704 /*
1697 * It worked. Mark the vt to switch to and 1705 * The controlling process has died, so we revert back to
1698 * return. The process needs to send us a 1706 * normal operation. In this case, we'll also change back
1699 * VT_RELDISP ioctl to complete the switch. 1707 * to KD_TEXT mode. I'm not sure if this is strictly correct
1708 * but it saves the agony when the X server dies and the screen
1709 * remains blanked due to KD_GRAPHICS! It would be nice to do
1710 * this outside of VT_PROCESS but there is no single process
1711 * to account for and tracking tty count may be undesirable.
1700 */ 1712 */
1701 return; 1713 reset_vc(vc);
1702 } 1714 }
1703 1715
1704 /* 1716 /*
1705 * The controlling process has died, so we revert back to 1717 * Fall through to normal (VT_AUTO and VT_PROCESS_AUTO) handling of the switch...
1706 * normal operation. In this case, we'll also change back
1707 * to KD_TEXT mode. I'm not sure if this is strictly correct
1708 * but it saves the agony when the X server dies and the screen
1709 * remains blanked due to KD_GRAPHICS! It would be nice to do
1710 * this outside of VT_PROCESS but there is no single process
1711 * to account for and tracking tty count may be undesirable.
1712 */
1713 reset_vc(vc);
1714
1715 /*
1716 * Fall through to normal (VT_AUTO) handling of the switch...
1717 */ 1718 */
1718 } 1719 }
1719 1720
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index e7a3230fb7d5..87399cafce37 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -284,7 +284,7 @@ struct dma_chan_tbl_ent {
284/** 284/**
285 * channel_table - percpu lookup table for memory-to-memory offload providers 285 * channel_table - percpu lookup table for memory-to-memory offload providers
286 */ 286 */
287static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END]; 287static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
288 288
289static int __init dma_channel_table_init(void) 289static int __init dma_channel_table_init(void)
290{ 290{
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 3391e6739d06..cf17dbb8014f 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -13,7 +13,7 @@ module_param(report_gart_errors, int, 0644);
13static int ecc_enable_override; 13static int ecc_enable_override;
14module_param(ecc_enable_override, int, 0644); 14module_param(ecc_enable_override, int, 0644);
15 15
16static struct msr *msrs; 16static struct msr __percpu *msrs;
17 17
18/* Lookup table for all possible MC control instances */ 18/* Lookup table for all possible MC control instances */
19struct amd64_pvt; 19struct amd64_pvt;
@@ -2553,14 +2553,14 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
2553 2553
2554 if (on) { 2554 if (on) {
2555 if (reg->l & K8_MSR_MCGCTL_NBE) 2555 if (reg->l & K8_MSR_MCGCTL_NBE)
2556 pvt->flags.ecc_report = 1; 2556 pvt->flags.nb_mce_enable = 1;
2557 2557
2558 reg->l |= K8_MSR_MCGCTL_NBE; 2558 reg->l |= K8_MSR_MCGCTL_NBE;
2559 } else { 2559 } else {
2560 /* 2560 /*
2561 * Turn off ECC reporting only when it was off before 2561 * Turn off NB MCE reporting only when it was off before
2562 */ 2562 */
2563 if (!pvt->flags.ecc_report) 2563 if (!pvt->flags.nb_mce_enable)
2564 reg->l &= ~K8_MSR_MCGCTL_NBE; 2564 reg->l &= ~K8_MSR_MCGCTL_NBE;
2565 } 2565 }
2566 } 2566 }
@@ -2571,22 +2571,11 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
2571 return 0; 2571 return 0;
2572} 2572}
2573 2573
2574/*
2575 * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
2576 * enable it.
2577 */
2578static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) 2574static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2579{ 2575{
2580 struct amd64_pvt *pvt = mci->pvt_info; 2576 struct amd64_pvt *pvt = mci->pvt_info;
2581 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; 2577 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2582 2578
2583 if (!ecc_enable_override)
2584 return;
2585
2586 amd64_printk(KERN_WARNING,
2587 "'ecc_enable_override' parameter is active, "
2588 "Enabling AMD ECC hardware now: CAUTION\n");
2589
2590 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value); 2579 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
2591 2580
2592 /* turn on UECCn and CECCEn bits */ 2581 /* turn on UECCn and CECCEn bits */
@@ -2611,6 +2600,8 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2611 "This node reports that DRAM ECC is " 2600 "This node reports that DRAM ECC is "
2612 "currently Disabled; ENABLING now\n"); 2601 "currently Disabled; ENABLING now\n");
2613 2602
2603 pvt->flags.nb_ecc_prev = 0;
2604
2614 /* Attempt to turn on DRAM ECC Enable */ 2605 /* Attempt to turn on DRAM ECC Enable */
2615 value |= K8_NBCFG_ECC_ENABLE; 2606 value |= K8_NBCFG_ECC_ENABLE;
2616 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value); 2607 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
@@ -2625,7 +2616,10 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2625 amd64_printk(KERN_DEBUG, 2616 amd64_printk(KERN_DEBUG,
2626 "Hardware accepted DRAM ECC Enable\n"); 2617 "Hardware accepted DRAM ECC Enable\n");
2627 } 2618 }
2619 } else {
2620 pvt->flags.nb_ecc_prev = 1;
2628 } 2621 }
2622
2629 debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, 2623 debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2630 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", 2624 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2631 (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"); 2625 (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
@@ -2644,12 +2638,18 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2644 value &= ~mask; 2638 value &= ~mask;
2645 value |= pvt->old_nbctl; 2639 value |= pvt->old_nbctl;
2646 2640
2647 /* restore the NB Enable MCGCTL bit */
2648 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); 2641 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2649 2642
2643 /* restore previous BIOS DRAM ECC "off" setting which we force-enabled */
2644 if (!pvt->flags.nb_ecc_prev) {
2645 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2646 value &= ~K8_NBCFG_ECC_ENABLE;
2647 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
2648 }
2649
2650 /* restore the NB Enable MCGCTL bit */
2650 if (amd64_toggle_ecc_err_reporting(pvt, OFF)) 2651 if (amd64_toggle_ecc_err_reporting(pvt, OFF))
2651 amd64_printk(KERN_WARNING, "Error restoring ECC reporting over " 2652 amd64_printk(KERN_WARNING, "Error restoring NB MCGCTL settings!\n");
2652 "MCGCTL!\n");
2653} 2653}
2654 2654
2655/* 2655/*
@@ -2690,8 +2690,9 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2690 if (!ecc_enable_override) { 2690 if (!ecc_enable_override) {
2691 amd64_printk(KERN_NOTICE, "%s", ecc_msg); 2691 amd64_printk(KERN_NOTICE, "%s", ecc_msg);
2692 return -ENODEV; 2692 return -ENODEV;
2693 } else {
2694 amd64_printk(KERN_WARNING, "Forcing ECC checking on!\n");
2693 } 2695 }
2694 ecc_enable_override = 0;
2695 } 2696 }
2696 2697
2697 return 0; 2698 return 0;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 41bc561e5981..0d4bf5638243 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -487,7 +487,8 @@ struct amd64_pvt {
487 /* misc settings */ 487 /* misc settings */
488 struct flags { 488 struct flags {
489 unsigned long cf8_extcfg:1; 489 unsigned long cf8_extcfg:1;
490 unsigned long ecc_report:1; 490 unsigned long nb_mce_enable:1;
491 unsigned long nb_ecc_prev:1;
491 } flags; 492 } flags;
492}; 493};
493 494
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 4eeaed57e219..8be720b278b7 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -25,6 +25,7 @@
25#include <linux/firewire.h> 25#include <linux/firewire.h>
26#include <linux/firewire-cdev.h> 26#include <linux/firewire-cdev.h>
27#include <linux/idr.h> 27#include <linux/idr.h>
28#include <linux/irqflags.h>
28#include <linux/jiffies.h> 29#include <linux/jiffies.h>
29#include <linux/kernel.h> 30#include <linux/kernel.h>
30#include <linux/kref.h> 31#include <linux/kref.h>
@@ -32,7 +33,6 @@
32#include <linux/module.h> 33#include <linux/module.h>
33#include <linux/mutex.h> 34#include <linux/mutex.h>
34#include <linux/poll.h> 35#include <linux/poll.h>
35#include <linux/preempt.h>
36#include <linux/sched.h> 36#include <linux/sched.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/string.h> 38#include <linux/string.h>
@@ -368,39 +368,56 @@ void fw_device_cdev_remove(struct fw_device *device)
368 for_each_client(device, wake_up_client); 368 for_each_client(device, wake_up_client);
369} 369}
370 370
371static int ioctl_get_info(struct client *client, void *buffer) 371union ioctl_arg {
372 struct fw_cdev_get_info get_info;
373 struct fw_cdev_send_request send_request;
374 struct fw_cdev_allocate allocate;
375 struct fw_cdev_deallocate deallocate;
376 struct fw_cdev_send_response send_response;
377 struct fw_cdev_initiate_bus_reset initiate_bus_reset;
378 struct fw_cdev_add_descriptor add_descriptor;
379 struct fw_cdev_remove_descriptor remove_descriptor;
380 struct fw_cdev_create_iso_context create_iso_context;
381 struct fw_cdev_queue_iso queue_iso;
382 struct fw_cdev_start_iso start_iso;
383 struct fw_cdev_stop_iso stop_iso;
384 struct fw_cdev_get_cycle_timer get_cycle_timer;
385 struct fw_cdev_allocate_iso_resource allocate_iso_resource;
386 struct fw_cdev_send_stream_packet send_stream_packet;
387 struct fw_cdev_get_cycle_timer2 get_cycle_timer2;
388};
389
390static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
372{ 391{
373 struct fw_cdev_get_info *get_info = buffer; 392 struct fw_cdev_get_info *a = &arg->get_info;
374 struct fw_cdev_event_bus_reset bus_reset; 393 struct fw_cdev_event_bus_reset bus_reset;
375 unsigned long ret = 0; 394 unsigned long ret = 0;
376 395
377 client->version = get_info->version; 396 client->version = a->version;
378 get_info->version = FW_CDEV_VERSION; 397 a->version = FW_CDEV_VERSION;
379 get_info->card = client->device->card->index; 398 a->card = client->device->card->index;
380 399
381 down_read(&fw_device_rwsem); 400 down_read(&fw_device_rwsem);
382 401
383 if (get_info->rom != 0) { 402 if (a->rom != 0) {
384 void __user *uptr = u64_to_uptr(get_info->rom); 403 size_t want = a->rom_length;
385 size_t want = get_info->rom_length;
386 size_t have = client->device->config_rom_length * 4; 404 size_t have = client->device->config_rom_length * 4;
387 405
388 ret = copy_to_user(uptr, client->device->config_rom, 406 ret = copy_to_user(u64_to_uptr(a->rom),
389 min(want, have)); 407 client->device->config_rom, min(want, have));
390 } 408 }
391 get_info->rom_length = client->device->config_rom_length * 4; 409 a->rom_length = client->device->config_rom_length * 4;
392 410
393 up_read(&fw_device_rwsem); 411 up_read(&fw_device_rwsem);
394 412
395 if (ret != 0) 413 if (ret != 0)
396 return -EFAULT; 414 return -EFAULT;
397 415
398 client->bus_reset_closure = get_info->bus_reset_closure; 416 client->bus_reset_closure = a->bus_reset_closure;
399 if (get_info->bus_reset != 0) { 417 if (a->bus_reset != 0) {
400 void __user *uptr = u64_to_uptr(get_info->bus_reset);
401
402 fill_bus_reset_event(&bus_reset, client); 418 fill_bus_reset_event(&bus_reset, client);
403 if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset))) 419 if (copy_to_user(u64_to_uptr(a->bus_reset),
420 &bus_reset, sizeof(bus_reset)))
404 return -EFAULT; 421 return -EFAULT;
405 } 422 }
406 423
@@ -571,11 +588,9 @@ static int init_request(struct client *client,
571 return ret; 588 return ret;
572} 589}
573 590
574static int ioctl_send_request(struct client *client, void *buffer) 591static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
575{ 592{
576 struct fw_cdev_send_request *request = buffer; 593 switch (arg->send_request.tcode) {
577
578 switch (request->tcode) {
579 case TCODE_WRITE_QUADLET_REQUEST: 594 case TCODE_WRITE_QUADLET_REQUEST:
580 case TCODE_WRITE_BLOCK_REQUEST: 595 case TCODE_WRITE_BLOCK_REQUEST:
581 case TCODE_READ_QUADLET_REQUEST: 596 case TCODE_READ_QUADLET_REQUEST:
@@ -592,7 +607,7 @@ static int ioctl_send_request(struct client *client, void *buffer)
592 return -EINVAL; 607 return -EINVAL;
593 } 608 }
594 609
595 return init_request(client, request, client->device->node_id, 610 return init_request(client, &arg->send_request, client->device->node_id,
596 client->device->max_speed); 611 client->device->max_speed);
597} 612}
598 613
@@ -683,9 +698,9 @@ static void release_address_handler(struct client *client,
683 kfree(r); 698 kfree(r);
684} 699}
685 700
686static int ioctl_allocate(struct client *client, void *buffer) 701static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
687{ 702{
688 struct fw_cdev_allocate *request = buffer; 703 struct fw_cdev_allocate *a = &arg->allocate;
689 struct address_handler_resource *r; 704 struct address_handler_resource *r;
690 struct fw_address_region region; 705 struct fw_address_region region;
691 int ret; 706 int ret;
@@ -694,13 +709,13 @@ static int ioctl_allocate(struct client *client, void *buffer)
694 if (r == NULL) 709 if (r == NULL)
695 return -ENOMEM; 710 return -ENOMEM;
696 711
697 region.start = request->offset; 712 region.start = a->offset;
698 region.end = request->offset + request->length; 713 region.end = a->offset + a->length;
699 r->handler.length = request->length; 714 r->handler.length = a->length;
700 r->handler.address_callback = handle_request; 715 r->handler.address_callback = handle_request;
701 r->handler.callback_data = r; 716 r->handler.callback_data = r;
702 r->closure = request->closure; 717 r->closure = a->closure;
703 r->client = client; 718 r->client = client;
704 719
705 ret = fw_core_add_address_handler(&r->handler, &region); 720 ret = fw_core_add_address_handler(&r->handler, &region);
706 if (ret < 0) { 721 if (ret < 0) {
@@ -714,27 +729,25 @@ static int ioctl_allocate(struct client *client, void *buffer)
714 release_address_handler(client, &r->resource); 729 release_address_handler(client, &r->resource);
715 return ret; 730 return ret;
716 } 731 }
717 request->handle = r->resource.handle; 732 a->handle = r->resource.handle;
718 733
719 return 0; 734 return 0;
720} 735}
721 736
722static int ioctl_deallocate(struct client *client, void *buffer) 737static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
723{ 738{
724 struct fw_cdev_deallocate *request = buffer; 739 return release_client_resource(client, arg->deallocate.handle,
725
726 return release_client_resource(client, request->handle,
727 release_address_handler, NULL); 740 release_address_handler, NULL);
728} 741}
729 742
730static int ioctl_send_response(struct client *client, void *buffer) 743static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
731{ 744{
732 struct fw_cdev_send_response *request = buffer; 745 struct fw_cdev_send_response *a = &arg->send_response;
733 struct client_resource *resource; 746 struct client_resource *resource;
734 struct inbound_transaction_resource *r; 747 struct inbound_transaction_resource *r;
735 int ret = 0; 748 int ret = 0;
736 749
737 if (release_client_resource(client, request->handle, 750 if (release_client_resource(client, a->handle,
738 release_request, &resource) < 0) 751 release_request, &resource) < 0)
739 return -EINVAL; 752 return -EINVAL;
740 753
@@ -743,28 +756,24 @@ static int ioctl_send_response(struct client *client, void *buffer)
743 if (is_fcp_request(r->request)) 756 if (is_fcp_request(r->request))
744 goto out; 757 goto out;
745 758
746 if (request->length < r->length) 759 if (a->length < r->length)
747 r->length = request->length; 760 r->length = a->length;
748 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) { 761 if (copy_from_user(r->data, u64_to_uptr(a->data), r->length)) {
749 ret = -EFAULT; 762 ret = -EFAULT;
750 kfree(r->request); 763 kfree(r->request);
751 goto out; 764 goto out;
752 } 765 }
753 fw_send_response(client->device->card, r->request, request->rcode); 766 fw_send_response(client->device->card, r->request, a->rcode);
754 out: 767 out:
755 kfree(r); 768 kfree(r);
756 769
757 return ret; 770 return ret;
758} 771}
759 772
760static int ioctl_initiate_bus_reset(struct client *client, void *buffer) 773static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
761{ 774{
762 struct fw_cdev_initiate_bus_reset *request = buffer; 775 return fw_core_initiate_bus_reset(client->device->card,
763 int short_reset; 776 arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
764
765 short_reset = (request->type == FW_CDEV_SHORT_RESET);
766
767 return fw_core_initiate_bus_reset(client->device->card, short_reset);
768} 777}
769 778
770static void release_descriptor(struct client *client, 779static void release_descriptor(struct client *client,
@@ -777,9 +786,9 @@ static void release_descriptor(struct client *client,
777 kfree(r); 786 kfree(r);
778} 787}
779 788
780static int ioctl_add_descriptor(struct client *client, void *buffer) 789static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
781{ 790{
782 struct fw_cdev_add_descriptor *request = buffer; 791 struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
783 struct descriptor_resource *r; 792 struct descriptor_resource *r;
784 int ret; 793 int ret;
785 794
@@ -787,22 +796,21 @@ static int ioctl_add_descriptor(struct client *client, void *buffer)
787 if (!client->device->is_local) 796 if (!client->device->is_local)
788 return -ENOSYS; 797 return -ENOSYS;
789 798
790 if (request->length > 256) 799 if (a->length > 256)
791 return -EINVAL; 800 return -EINVAL;
792 801
793 r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL); 802 r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
794 if (r == NULL) 803 if (r == NULL)
795 return -ENOMEM; 804 return -ENOMEM;
796 805
797 if (copy_from_user(r->data, 806 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
798 u64_to_uptr(request->data), request->length * 4)) {
799 ret = -EFAULT; 807 ret = -EFAULT;
800 goto failed; 808 goto failed;
801 } 809 }
802 810
803 r->descriptor.length = request->length; 811 r->descriptor.length = a->length;
804 r->descriptor.immediate = request->immediate; 812 r->descriptor.immediate = a->immediate;
805 r->descriptor.key = request->key; 813 r->descriptor.key = a->key;
806 r->descriptor.data = r->data; 814 r->descriptor.data = r->data;
807 815
808 ret = fw_core_add_descriptor(&r->descriptor); 816 ret = fw_core_add_descriptor(&r->descriptor);
@@ -815,7 +823,7 @@ static int ioctl_add_descriptor(struct client *client, void *buffer)
815 fw_core_remove_descriptor(&r->descriptor); 823 fw_core_remove_descriptor(&r->descriptor);
816 goto failed; 824 goto failed;
817 } 825 }
818 request->handle = r->resource.handle; 826 a->handle = r->resource.handle;
819 827
820 return 0; 828 return 0;
821 failed: 829 failed:
@@ -824,11 +832,9 @@ static int ioctl_add_descriptor(struct client *client, void *buffer)
824 return ret; 832 return ret;
825} 833}
826 834
827static int ioctl_remove_descriptor(struct client *client, void *buffer) 835static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
828{ 836{
829 struct fw_cdev_remove_descriptor *request = buffer; 837 return release_client_resource(client, arg->remove_descriptor.handle,
830
831 return release_client_resource(client, request->handle,
832 release_descriptor, NULL); 838 release_descriptor, NULL);
833} 839}
834 840
@@ -851,49 +857,44 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle,
851 sizeof(e->interrupt) + header_length, NULL, 0); 857 sizeof(e->interrupt) + header_length, NULL, 0);
852} 858}
853 859
854static int ioctl_create_iso_context(struct client *client, void *buffer) 860static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
855{ 861{
856 struct fw_cdev_create_iso_context *request = buffer; 862 struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
857 struct fw_iso_context *context; 863 struct fw_iso_context *context;
858 864
859 /* We only support one context at this time. */ 865 /* We only support one context at this time. */
860 if (client->iso_context != NULL) 866 if (client->iso_context != NULL)
861 return -EBUSY; 867 return -EBUSY;
862 868
863 if (request->channel > 63) 869 if (a->channel > 63)
864 return -EINVAL; 870 return -EINVAL;
865 871
866 switch (request->type) { 872 switch (a->type) {
867 case FW_ISO_CONTEXT_RECEIVE: 873 case FW_ISO_CONTEXT_RECEIVE:
868 if (request->header_size < 4 || (request->header_size & 3)) 874 if (a->header_size < 4 || (a->header_size & 3))
869 return -EINVAL; 875 return -EINVAL;
870
871 break; 876 break;
872 877
873 case FW_ISO_CONTEXT_TRANSMIT: 878 case FW_ISO_CONTEXT_TRANSMIT:
874 if (request->speed > SCODE_3200) 879 if (a->speed > SCODE_3200)
875 return -EINVAL; 880 return -EINVAL;
876
877 break; 881 break;
878 882
879 default: 883 default:
880 return -EINVAL; 884 return -EINVAL;
881 } 885 }
882 886
883 context = fw_iso_context_create(client->device->card, 887 context = fw_iso_context_create(client->device->card, a->type,
884 request->type, 888 a->channel, a->speed, a->header_size,
885 request->channel, 889 iso_callback, client);
886 request->speed,
887 request->header_size,
888 iso_callback, client);
889 if (IS_ERR(context)) 890 if (IS_ERR(context))
890 return PTR_ERR(context); 891 return PTR_ERR(context);
891 892
892 client->iso_closure = request->closure; 893 client->iso_closure = a->closure;
893 client->iso_context = context; 894 client->iso_context = context;
894 895
895 /* We only support one context at this time. */ 896 /* We only support one context at this time. */
896 request->handle = 0; 897 a->handle = 0;
897 898
898 return 0; 899 return 0;
899} 900}
@@ -906,9 +907,9 @@ static int ioctl_create_iso_context(struct client *client, void *buffer)
906#define GET_SY(v) (((v) >> 20) & 0x0f) 907#define GET_SY(v) (((v) >> 20) & 0x0f)
907#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff) 908#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
908 909
909static int ioctl_queue_iso(struct client *client, void *buffer) 910static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
910{ 911{
911 struct fw_cdev_queue_iso *request = buffer; 912 struct fw_cdev_queue_iso *a = &arg->queue_iso;
912 struct fw_cdev_iso_packet __user *p, *end, *next; 913 struct fw_cdev_iso_packet __user *p, *end, *next;
913 struct fw_iso_context *ctx = client->iso_context; 914 struct fw_iso_context *ctx = client->iso_context;
914 unsigned long payload, buffer_end, header_length; 915 unsigned long payload, buffer_end, header_length;
@@ -919,7 +920,7 @@ static int ioctl_queue_iso(struct client *client, void *buffer)
919 u8 header[256]; 920 u8 header[256];
920 } u; 921 } u;
921 922
922 if (ctx == NULL || request->handle != 0) 923 if (ctx == NULL || a->handle != 0)
923 return -EINVAL; 924 return -EINVAL;
924 925
925 /* 926 /*
@@ -929,23 +930,23 @@ static int ioctl_queue_iso(struct client *client, void *buffer)
929 * set them both to 0, which will still let packets with 930 * set them both to 0, which will still let packets with
930 * payload_length == 0 through. In other words, if no packets 931 * payload_length == 0 through. In other words, if no packets
931 * use the indirect payload, the iso buffer need not be mapped 932 * use the indirect payload, the iso buffer need not be mapped
932 * and the request->data pointer is ignored. 933 * and the a->data pointer is ignored.
933 */ 934 */
934 935
935 payload = (unsigned long)request->data - client->vm_start; 936 payload = (unsigned long)a->data - client->vm_start;
936 buffer_end = client->buffer.page_count << PAGE_SHIFT; 937 buffer_end = client->buffer.page_count << PAGE_SHIFT;
937 if (request->data == 0 || client->buffer.pages == NULL || 938 if (a->data == 0 || client->buffer.pages == NULL ||
938 payload >= buffer_end) { 939 payload >= buffer_end) {
939 payload = 0; 940 payload = 0;
940 buffer_end = 0; 941 buffer_end = 0;
941 } 942 }
942 943
943 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets); 944 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
944 945
945 if (!access_ok(VERIFY_READ, p, request->size)) 946 if (!access_ok(VERIFY_READ, p, a->size))
946 return -EFAULT; 947 return -EFAULT;
947 948
948 end = (void __user *)p + request->size; 949 end = (void __user *)p + a->size;
949 count = 0; 950 count = 0;
950 while (p < end) { 951 while (p < end) {
951 if (get_user(control, &p->control)) 952 if (get_user(control, &p->control))
@@ -995,61 +996,78 @@ static int ioctl_queue_iso(struct client *client, void *buffer)
995 count++; 996 count++;
996 } 997 }
997 998
998 request->size -= uptr_to_u64(p) - request->packets; 999 a->size -= uptr_to_u64(p) - a->packets;
999 request->packets = uptr_to_u64(p); 1000 a->packets = uptr_to_u64(p);
1000 request->data = client->vm_start + payload; 1001 a->data = client->vm_start + payload;
1001 1002
1002 return count; 1003 return count;
1003} 1004}
1004 1005
1005static int ioctl_start_iso(struct client *client, void *buffer) 1006static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1006{ 1007{
1007 struct fw_cdev_start_iso *request = buffer; 1008 struct fw_cdev_start_iso *a = &arg->start_iso;
1008 1009
1009 if (client->iso_context == NULL || request->handle != 0) 1010 if (client->iso_context == NULL || a->handle != 0)
1010 return -EINVAL; 1011 return -EINVAL;
1011 1012
1012 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { 1013 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
1013 if (request->tags == 0 || request->tags > 15) 1014 (a->tags == 0 || a->tags > 15 || a->sync > 15))
1014 return -EINVAL; 1015 return -EINVAL;
1015
1016 if (request->sync > 15)
1017 return -EINVAL;
1018 }
1019 1016
1020 return fw_iso_context_start(client->iso_context, request->cycle, 1017 return fw_iso_context_start(client->iso_context,
1021 request->sync, request->tags); 1018 a->cycle, a->sync, a->tags);
1022} 1019}
1023 1020
1024static int ioctl_stop_iso(struct client *client, void *buffer) 1021static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1025{ 1022{
1026 struct fw_cdev_stop_iso *request = buffer; 1023 struct fw_cdev_stop_iso *a = &arg->stop_iso;
1027 1024
1028 if (client->iso_context == NULL || request->handle != 0) 1025 if (client->iso_context == NULL || a->handle != 0)
1029 return -EINVAL; 1026 return -EINVAL;
1030 1027
1031 return fw_iso_context_stop(client->iso_context); 1028 return fw_iso_context_stop(client->iso_context);
1032} 1029}
1033 1030
1034static int ioctl_get_cycle_timer(struct client *client, void *buffer) 1031static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1035{ 1032{
1036 struct fw_cdev_get_cycle_timer *request = buffer; 1033 struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1037 struct fw_card *card = client->device->card; 1034 struct fw_card *card = client->device->card;
1038 unsigned long long bus_time; 1035 struct timespec ts = {0, 0};
1039 struct timeval tv; 1036 u32 cycle_time;
1040 unsigned long flags; 1037 int ret = 0;
1038
1039 local_irq_disable();
1040
1041 cycle_time = card->driver->get_cycle_time(card);
1041 1042
1042 preempt_disable(); 1043 switch (a->clk_id) {
1043 local_irq_save(flags); 1044 case CLOCK_REALTIME: getnstimeofday(&ts); break;
1045 case CLOCK_MONOTONIC: do_posix_clock_monotonic_gettime(&ts); break;
1046 case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break;
1047 default:
1048 ret = -EINVAL;
1049 }
1044 1050
1045 bus_time = card->driver->get_bus_time(card); 1051 local_irq_enable();
1046 do_gettimeofday(&tv);
1047 1052
1048 local_irq_restore(flags); 1053 a->tv_sec = ts.tv_sec;
1049 preempt_enable(); 1054 a->tv_nsec = ts.tv_nsec;
1055 a->cycle_timer = cycle_time;
1056
1057 return ret;
1058}
1059
1060static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1061{
1062 struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1063 struct fw_cdev_get_cycle_timer2 ct2;
1064
1065 ct2.clk_id = CLOCK_REALTIME;
1066 ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1067
1068 a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
1069 a->cycle_timer = ct2.cycle_timer;
1050 1070
1051 request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
1052 request->cycle_timer = bus_time & 0xffffffff;
1053 return 0; 1071 return 0;
1054} 1072}
1055 1073
@@ -1220,33 +1238,32 @@ static int init_iso_resource(struct client *client,
1220 return ret; 1238 return ret;
1221} 1239}
1222 1240
1223static int ioctl_allocate_iso_resource(struct client *client, void *buffer) 1241static int ioctl_allocate_iso_resource(struct client *client,
1242 union ioctl_arg *arg)
1224{ 1243{
1225 struct fw_cdev_allocate_iso_resource *request = buffer; 1244 return init_iso_resource(client,
1226 1245 &arg->allocate_iso_resource, ISO_RES_ALLOC);
1227 return init_iso_resource(client, request, ISO_RES_ALLOC);
1228} 1246}
1229 1247
1230static int ioctl_deallocate_iso_resource(struct client *client, void *buffer) 1248static int ioctl_deallocate_iso_resource(struct client *client,
1249 union ioctl_arg *arg)
1231{ 1250{
1232 struct fw_cdev_deallocate *request = buffer; 1251 return release_client_resource(client,
1233 1252 arg->deallocate.handle, release_iso_resource, NULL);
1234 return release_client_resource(client, request->handle,
1235 release_iso_resource, NULL);
1236} 1253}
1237 1254
1238static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer) 1255static int ioctl_allocate_iso_resource_once(struct client *client,
1256 union ioctl_arg *arg)
1239{ 1257{
1240 struct fw_cdev_allocate_iso_resource *request = buffer; 1258 return init_iso_resource(client,
1241 1259 &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1242 return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
1243} 1260}
1244 1261
1245static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer) 1262static int ioctl_deallocate_iso_resource_once(struct client *client,
1263 union ioctl_arg *arg)
1246{ 1264{
1247 struct fw_cdev_allocate_iso_resource *request = buffer; 1265 return init_iso_resource(client,
1248 1266 &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1249 return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
1250} 1267}
1251 1268
1252/* 1269/*
@@ -1254,16 +1271,17 @@ static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffe
1254 * limited by the device's link speed, the local node's link speed, 1271 * limited by the device's link speed, the local node's link speed,
1255 * and all PHY port speeds between the two links. 1272 * and all PHY port speeds between the two links.
1256 */ 1273 */
1257static int ioctl_get_speed(struct client *client, void *buffer) 1274static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1258{ 1275{
1259 return client->device->max_speed; 1276 return client->device->max_speed;
1260} 1277}
1261 1278
1262static int ioctl_send_broadcast_request(struct client *client, void *buffer) 1279static int ioctl_send_broadcast_request(struct client *client,
1280 union ioctl_arg *arg)
1263{ 1281{
1264 struct fw_cdev_send_request *request = buffer; 1282 struct fw_cdev_send_request *a = &arg->send_request;
1265 1283
1266 switch (request->tcode) { 1284 switch (a->tcode) {
1267 case TCODE_WRITE_QUADLET_REQUEST: 1285 case TCODE_WRITE_QUADLET_REQUEST:
1268 case TCODE_WRITE_BLOCK_REQUEST: 1286 case TCODE_WRITE_BLOCK_REQUEST:
1269 break; 1287 break;
@@ -1272,36 +1290,36 @@ static int ioctl_send_broadcast_request(struct client *client, void *buffer)
1272 } 1290 }
1273 1291
1274 /* Security policy: Only allow accesses to Units Space. */ 1292 /* Security policy: Only allow accesses to Units Space. */
1275 if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END) 1293 if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1276 return -EACCES; 1294 return -EACCES;
1277 1295
1278 return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100); 1296 return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1279} 1297}
1280 1298
1281static int ioctl_send_stream_packet(struct client *client, void *buffer) 1299static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1282{ 1300{
1283 struct fw_cdev_send_stream_packet *p = buffer; 1301 struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1284 struct fw_cdev_send_request request; 1302 struct fw_cdev_send_request request;
1285 int dest; 1303 int dest;
1286 1304
1287 if (p->speed > client->device->card->link_speed || 1305 if (a->speed > client->device->card->link_speed ||
1288 p->length > 1024 << p->speed) 1306 a->length > 1024 << a->speed)
1289 return -EIO; 1307 return -EIO;
1290 1308
1291 if (p->tag > 3 || p->channel > 63 || p->sy > 15) 1309 if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1292 return -EINVAL; 1310 return -EINVAL;
1293 1311
1294 dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy); 1312 dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1295 request.tcode = TCODE_STREAM_DATA; 1313 request.tcode = TCODE_STREAM_DATA;
1296 request.length = p->length; 1314 request.length = a->length;
1297 request.closure = p->closure; 1315 request.closure = a->closure;
1298 request.data = p->data; 1316 request.data = a->data;
1299 request.generation = p->generation; 1317 request.generation = a->generation;
1300 1318
1301 return init_request(client, &request, dest, p->speed); 1319 return init_request(client, &request, dest, a->speed);
1302} 1320}
1303 1321
1304static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { 1322static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1305 ioctl_get_info, 1323 ioctl_get_info,
1306 ioctl_send_request, 1324 ioctl_send_request,
1307 ioctl_allocate, 1325 ioctl_allocate,
@@ -1322,47 +1340,35 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
1322 ioctl_get_speed, 1340 ioctl_get_speed,
1323 ioctl_send_broadcast_request, 1341 ioctl_send_broadcast_request,
1324 ioctl_send_stream_packet, 1342 ioctl_send_stream_packet,
1343 ioctl_get_cycle_timer2,
1325}; 1344};
1326 1345
1327static int dispatch_ioctl(struct client *client, 1346static int dispatch_ioctl(struct client *client,
1328 unsigned int cmd, void __user *arg) 1347 unsigned int cmd, void __user *arg)
1329{ 1348{
1330 char buffer[sizeof(union { 1349 union ioctl_arg buffer;
1331 struct fw_cdev_get_info _00;
1332 struct fw_cdev_send_request _01;
1333 struct fw_cdev_allocate _02;
1334 struct fw_cdev_deallocate _03;
1335 struct fw_cdev_send_response _04;
1336 struct fw_cdev_initiate_bus_reset _05;
1337 struct fw_cdev_add_descriptor _06;
1338 struct fw_cdev_remove_descriptor _07;
1339 struct fw_cdev_create_iso_context _08;
1340 struct fw_cdev_queue_iso _09;
1341 struct fw_cdev_start_iso _0a;
1342 struct fw_cdev_stop_iso _0b;
1343 struct fw_cdev_get_cycle_timer _0c;
1344 struct fw_cdev_allocate_iso_resource _0d;
1345 struct fw_cdev_send_stream_packet _13;
1346 })];
1347 int ret; 1350 int ret;
1348 1351
1352 if (fw_device_is_shutdown(client->device))
1353 return -ENODEV;
1354
1349 if (_IOC_TYPE(cmd) != '#' || 1355 if (_IOC_TYPE(cmd) != '#' ||
1350 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) 1356 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
1351 return -EINVAL; 1357 return -EINVAL;
1352 1358
1353 if (_IOC_DIR(cmd) & _IOC_WRITE) { 1359 if (_IOC_DIR(cmd) & _IOC_WRITE) {
1354 if (_IOC_SIZE(cmd) > sizeof(buffer) || 1360 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
1355 copy_from_user(buffer, arg, _IOC_SIZE(cmd))) 1361 copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1356 return -EFAULT; 1362 return -EFAULT;
1357 } 1363 }
1358 1364
1359 ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer); 1365 ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1360 if (ret < 0) 1366 if (ret < 0)
1361 return ret; 1367 return ret;
1362 1368
1363 if (_IOC_DIR(cmd) & _IOC_READ) { 1369 if (_IOC_DIR(cmd) & _IOC_READ) {
1364 if (_IOC_SIZE(cmd) > sizeof(buffer) || 1370 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
1365 copy_to_user(arg, buffer, _IOC_SIZE(cmd))) 1371 copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1366 return -EFAULT; 1372 return -EFAULT;
1367 } 1373 }
1368 1374
@@ -1372,24 +1378,14 @@ static int dispatch_ioctl(struct client *client,
1372static long fw_device_op_ioctl(struct file *file, 1378static long fw_device_op_ioctl(struct file *file,
1373 unsigned int cmd, unsigned long arg) 1379 unsigned int cmd, unsigned long arg)
1374{ 1380{
1375 struct client *client = file->private_data; 1381 return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1376
1377 if (fw_device_is_shutdown(client->device))
1378 return -ENODEV;
1379
1380 return dispatch_ioctl(client, cmd, (void __user *) arg);
1381} 1382}
1382 1383
1383#ifdef CONFIG_COMPAT 1384#ifdef CONFIG_COMPAT
1384static long fw_device_op_compat_ioctl(struct file *file, 1385static long fw_device_op_compat_ioctl(struct file *file,
1385 unsigned int cmd, unsigned long arg) 1386 unsigned int cmd, unsigned long arg)
1386{ 1387{
1387 struct client *client = file->private_data; 1388 return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
1388
1389 if (fw_device_is_shutdown(client->device))
1390 return -ENODEV;
1391
1392 return dispatch_ioctl(client, cmd, compat_ptr(arg));
1393} 1389}
1394#endif 1390#endif
1395 1391
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 9d0dfcbe2c1c..014cabd3afda 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -18,6 +18,7 @@
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21#include <linux/bug.h>
21#include <linux/ctype.h> 22#include <linux/ctype.h>
22#include <linux/delay.h> 23#include <linux/delay.h>
23#include <linux/device.h> 24#include <linux/device.h>
@@ -43,7 +44,7 @@
43 44
44#include "core.h" 45#include "core.h"
45 46
46void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p) 47void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p)
47{ 48{
48 ci->p = p + 1; 49 ci->p = p + 1;
49 ci->end = ci->p + (p[0] >> 16); 50 ci->end = ci->p + (p[0] >> 16);
@@ -59,9 +60,76 @@ int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
59} 60}
60EXPORT_SYMBOL(fw_csr_iterator_next); 61EXPORT_SYMBOL(fw_csr_iterator_next);
61 62
63static const u32 *search_leaf(const u32 *directory, int search_key)
64{
65 struct fw_csr_iterator ci;
66 int last_key = 0, key, value;
67
68 fw_csr_iterator_init(&ci, directory);
69 while (fw_csr_iterator_next(&ci, &key, &value)) {
70 if (last_key == search_key &&
71 key == (CSR_DESCRIPTOR | CSR_LEAF))
72 return ci.p - 1 + value;
73
74 last_key = key;
75 }
76
77 return NULL;
78}
79
80static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
81{
82 unsigned int quadlets, i;
83 char c;
84
85 if (!size || !buf)
86 return -EINVAL;
87
88 quadlets = min(block[0] >> 16, 256U);
89 if (quadlets < 2)
90 return -ENODATA;
91
92 if (block[1] != 0 || block[2] != 0)
93 /* unknown language/character set */
94 return -ENODATA;
95
96 block += 3;
97 quadlets -= 2;
98 for (i = 0; i < quadlets * 4 && i < size - 1; i++) {
99 c = block[i / 4] >> (24 - 8 * (i % 4));
100 if (c == '\0')
101 break;
102 buf[i] = c;
103 }
104 buf[i] = '\0';
105
106 return i;
107}
108
109/**
110 * fw_csr_string - reads a string from the configuration ROM
111 * @directory: e.g. root directory or unit directory
112 * @key: the key of the preceding directory entry
113 * @buf: where to put the string
114 * @size: size of @buf, in bytes
115 *
116 * The string is taken from a minimal ASCII text descriptor leaf after
117 * the immediate entry with @key. The string is zero-terminated.
118 * Returns strlen(buf) or a negative error code.
119 */
120int fw_csr_string(const u32 *directory, int key, char *buf, size_t size)
121{
122 const u32 *leaf = search_leaf(directory, key);
123 if (!leaf)
124 return -ENOENT;
125
126 return textual_leaf_to_string(leaf, buf, size);
127}
128EXPORT_SYMBOL(fw_csr_string);
129
62static bool is_fw_unit(struct device *dev); 130static bool is_fw_unit(struct device *dev);
63 131
64static int match_unit_directory(u32 *directory, u32 match_flags, 132static int match_unit_directory(const u32 *directory, u32 match_flags,
65 const struct ieee1394_device_id *id) 133 const struct ieee1394_device_id *id)
66{ 134{
67 struct fw_csr_iterator ci; 135 struct fw_csr_iterator ci;
@@ -195,7 +263,7 @@ static ssize_t show_immediate(struct device *dev,
195 struct config_rom_attribute *attr = 263 struct config_rom_attribute *attr =
196 container_of(dattr, struct config_rom_attribute, attr); 264 container_of(dattr, struct config_rom_attribute, attr);
197 struct fw_csr_iterator ci; 265 struct fw_csr_iterator ci;
198 u32 *dir; 266 const u32 *dir;
199 int key, value, ret = -ENOENT; 267 int key, value, ret = -ENOENT;
200 268
201 down_read(&fw_device_rwsem); 269 down_read(&fw_device_rwsem);
@@ -226,10 +294,10 @@ static ssize_t show_text_leaf(struct device *dev,
226{ 294{
227 struct config_rom_attribute *attr = 295 struct config_rom_attribute *attr =
228 container_of(dattr, struct config_rom_attribute, attr); 296 container_of(dattr, struct config_rom_attribute, attr);
229 struct fw_csr_iterator ci; 297 const u32 *dir;
230 u32 *dir, *block = NULL, *p, *end; 298 size_t bufsize;
231 int length, key, value, last_key = 0, ret = -ENOENT; 299 char dummy_buf[2];
232 char *b; 300 int ret;
233 301
234 down_read(&fw_device_rwsem); 302 down_read(&fw_device_rwsem);
235 303
@@ -238,40 +306,23 @@ static ssize_t show_text_leaf(struct device *dev,
238 else 306 else
239 dir = fw_device(dev)->config_rom + 5; 307 dir = fw_device(dev)->config_rom + 5;
240 308
241 fw_csr_iterator_init(&ci, dir); 309 if (buf) {
242 while (fw_csr_iterator_next(&ci, &key, &value)) { 310 bufsize = PAGE_SIZE - 1;
243 if (attr->key == last_key && 311 } else {
244 key == (CSR_DESCRIPTOR | CSR_LEAF)) 312 buf = dummy_buf;
245 block = ci.p - 1 + value; 313 bufsize = 1;
246 last_key = key;
247 } 314 }
248 315
249 if (block == NULL) 316 ret = fw_csr_string(dir, attr->key, buf, bufsize);
250 goto out;
251
252 length = min(block[0] >> 16, 256U);
253 if (length < 3)
254 goto out;
255
256 if (block[1] != 0 || block[2] != 0)
257 /* Unknown encoding. */
258 goto out;
259 317
260 if (buf == NULL) { 318 if (ret >= 0) {
261 ret = length * 4; 319 /* Strip trailing whitespace and add newline. */
262 goto out; 320 while (ret > 0 && isspace(buf[ret - 1]))
321 ret--;
322 strcpy(buf + ret, "\n");
323 ret++;
263 } 324 }
264 325
265 b = buf;
266 end = &block[length + 1];
267 for (p = &block[3]; p < end; p++, b += 4)
268 * (u32 *) b = (__force u32) __cpu_to_be32(*p);
269
270 /* Strip trailing whitespace and add newline. */
271 while (b--, (isspace(*b) || *b == '\0') && b > buf);
272 strcpy(b + 1, "\n");
273 ret = b + 2 - buf;
274 out:
275 up_read(&fw_device_rwsem); 326 up_read(&fw_device_rwsem);
276 327
277 return ret; 328 return ret;
@@ -371,7 +422,7 @@ static ssize_t guid_show(struct device *dev,
371 return ret; 422 return ret;
372} 423}
373 424
374static int units_sprintf(char *buf, u32 *directory) 425static int units_sprintf(char *buf, const u32 *directory)
375{ 426{
376 struct fw_csr_iterator ci; 427 struct fw_csr_iterator ci;
377 int key, value; 428 int key, value;
@@ -441,28 +492,29 @@ static int read_rom(struct fw_device *device,
441 return rcode; 492 return rcode;
442} 493}
443 494
444#define READ_BIB_ROM_SIZE 256 495#define MAX_CONFIG_ROM_SIZE 256
445#define READ_BIB_STACK_SIZE 16
446 496
447/* 497/*
448 * Read the bus info block, perform a speed probe, and read all of the rest of 498 * Read the bus info block, perform a speed probe, and read all of the rest of
449 * the config ROM. We do all this with a cached bus generation. If the bus 499 * the config ROM. We do all this with a cached bus generation. If the bus
450 * generation changes under us, read_bus_info_block will fail and get retried. 500 * generation changes under us, read_config_rom will fail and get retried.
451 * It's better to start all over in this case because the node from which we 501 * It's better to start all over in this case because the node from which we
452 * are reading the ROM may have changed the ROM during the reset. 502 * are reading the ROM may have changed the ROM during the reset.
453 */ 503 */
454static int read_bus_info_block(struct fw_device *device, int generation) 504static int read_config_rom(struct fw_device *device, int generation)
455{ 505{
456 u32 *rom, *stack, *old_rom, *new_rom; 506 const u32 *old_rom, *new_rom;
507 u32 *rom, *stack;
457 u32 sp, key; 508 u32 sp, key;
458 int i, end, length, ret = -1; 509 int i, end, length, ret = -1;
459 510
460 rom = kmalloc(sizeof(*rom) * READ_BIB_ROM_SIZE + 511 rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE +
461 sizeof(*stack) * READ_BIB_STACK_SIZE, GFP_KERNEL); 512 sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL);
462 if (rom == NULL) 513 if (rom == NULL)
463 return -ENOMEM; 514 return -ENOMEM;
464 515
465 stack = &rom[READ_BIB_ROM_SIZE]; 516 stack = &rom[MAX_CONFIG_ROM_SIZE];
517 memset(rom, 0, sizeof(*rom) * MAX_CONFIG_ROM_SIZE);
466 518
467 device->max_speed = SCODE_100; 519 device->max_speed = SCODE_100;
468 520
@@ -529,40 +581,54 @@ static int read_bus_info_block(struct fw_device *device, int generation)
529 */ 581 */
530 key = stack[--sp]; 582 key = stack[--sp];
531 i = key & 0xffffff; 583 i = key & 0xffffff;
532 if (i >= READ_BIB_ROM_SIZE) 584 if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE))
533 /*
534 * The reference points outside the standard
535 * config rom area, something's fishy.
536 */
537 goto out; 585 goto out;
538 586
539 /* Read header quadlet for the block to get the length. */ 587 /* Read header quadlet for the block to get the length. */
540 if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE) 588 if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
541 goto out; 589 goto out;
542 end = i + (rom[i] >> 16) + 1; 590 end = i + (rom[i] >> 16) + 1;
543 i++; 591 if (end > MAX_CONFIG_ROM_SIZE) {
544 if (end > READ_BIB_ROM_SIZE)
545 /* 592 /*
546 * This block extends outside standard config 593 * This block extends outside the config ROM which is
547 * area (and the array we're reading it 594 * a firmware bug. Ignore this whole block, i.e.
548 * into). That's broken, so ignore this 595 * simply set a fake block length of 0.
549 * device.
550 */ 596 */
551 goto out; 597 fw_error("skipped invalid ROM block %x at %llx\n",
598 rom[i],
599 i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
600 rom[i] = 0;
601 end = i;
602 }
603 i++;
552 604
553 /* 605 /*
554 * Now read in the block. If this is a directory 606 * Now read in the block. If this is a directory
555 * block, check the entries as we read them to see if 607 * block, check the entries as we read them to see if
556 * it references another block, and push it in that case. 608 * it references another block, and push it in that case.
557 */ 609 */
558 while (i < end) { 610 for (; i < end; i++) {
559 if (read_rom(device, generation, i, &rom[i]) != 611 if (read_rom(device, generation, i, &rom[i]) !=
560 RCODE_COMPLETE) 612 RCODE_COMPLETE)
561 goto out; 613 goto out;
562 if ((key >> 30) == 3 && (rom[i] >> 30) > 1 && 614
563 sp < READ_BIB_STACK_SIZE) 615 if ((key >> 30) != 3 || (rom[i] >> 30) < 2)
564 stack[sp++] = i + rom[i]; 616 continue;
565 i++; 617 /*
618 * Offset points outside the ROM. May be a firmware
619 * bug or an Extended ROM entry (IEEE 1212-2001 clause
620 * 7.7.18). Simply overwrite this pointer here by a
621 * fake immediate entry so that later iterators over
622 * the ROM don't have to check offsets all the time.
623 */
624 if (i + (rom[i] & 0xffffff) >= MAX_CONFIG_ROM_SIZE) {
625 fw_error("skipped unsupported ROM entry %x at %llx\n",
626 rom[i],
627 i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
628 rom[i] = 0;
629 continue;
630 }
631 stack[sp++] = i + rom[i];
566 } 632 }
567 if (length < i) 633 if (length < i)
568 length = i; 634 length = i;
@@ -905,7 +971,7 @@ static void fw_device_init(struct work_struct *work)
905 * device. 971 * device.
906 */ 972 */
907 973
908 if (read_bus_info_block(device, device->generation) < 0) { 974 if (read_config_rom(device, device->generation) < 0) {
909 if (device->config_rom_retries < MAX_RETRIES && 975 if (device->config_rom_retries < MAX_RETRIES &&
910 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { 976 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
911 device->config_rom_retries++; 977 device->config_rom_retries++;
@@ -1022,7 +1088,7 @@ enum {
1022}; 1088};
1023 1089
1024/* Reread and compare bus info block and header of root directory */ 1090/* Reread and compare bus info block and header of root directory */
1025static int reread_bus_info_block(struct fw_device *device, int generation) 1091static int reread_config_rom(struct fw_device *device, int generation)
1026{ 1092{
1027 u32 q; 1093 u32 q;
1028 int i; 1094 int i;
@@ -1048,7 +1114,7 @@ static void fw_device_refresh(struct work_struct *work)
1048 struct fw_card *card = device->card; 1114 struct fw_card *card = device->card;
1049 int node_id = device->node_id; 1115 int node_id = device->node_id;
1050 1116
1051 switch (reread_bus_info_block(device, device->generation)) { 1117 switch (reread_config_rom(device, device->generation)) {
1052 case REREAD_BIB_ERROR: 1118 case REREAD_BIB_ERROR:
1053 if (device->config_rom_retries < MAX_RETRIES / 2 && 1119 if (device->config_rom_retries < MAX_RETRIES / 2 &&
1054 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { 1120 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
@@ -1082,7 +1148,7 @@ static void fw_device_refresh(struct work_struct *work)
1082 */ 1148 */
1083 device_for_each_child(&device->device, NULL, shutdown_unit); 1149 device_for_each_child(&device->device, NULL, shutdown_unit);
1084 1150
1085 if (read_bus_info_block(device, device->generation) < 0) { 1151 if (read_config_rom(device, device->generation) < 0) {
1086 if (device->config_rom_retries < MAX_RETRIES && 1152 if (device->config_rom_retries < MAX_RETRIES &&
1087 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { 1153 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
1088 device->config_rom_retries++; 1154 device->config_rom_retries++;
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 495849eb13cc..673b03f8b4ec 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -921,23 +921,15 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
921 void *payload, size_t length, void *callback_data) 921 void *payload, size_t length, void *callback_data)
922{ 922{
923 int reg = offset & ~CSR_REGISTER_BASE; 923 int reg = offset & ~CSR_REGISTER_BASE;
924 unsigned long long bus_time;
925 __be32 *data = payload; 924 __be32 *data = payload;
926 int rcode = RCODE_COMPLETE; 925 int rcode = RCODE_COMPLETE;
927 926
928 switch (reg) { 927 switch (reg) {
929 case CSR_CYCLE_TIME: 928 case CSR_CYCLE_TIME:
930 case CSR_BUS_TIME: 929 if (TCODE_IS_READ_REQUEST(tcode) && length == 4)
931 if (!TCODE_IS_READ_REQUEST(tcode) || length != 4) { 930 *data = cpu_to_be32(card->driver->get_cycle_time(card));
932 rcode = RCODE_TYPE_ERROR;
933 break;
934 }
935
936 bus_time = card->driver->get_bus_time(card);
937 if (reg == CSR_CYCLE_TIME)
938 *data = cpu_to_be32(bus_time);
939 else 931 else
940 *data = cpu_to_be32(bus_time >> 25); 932 rcode = RCODE_TYPE_ERROR;
941 break; 933 break;
942 934
943 case CSR_BROADCAST_CHANNEL: 935 case CSR_BROADCAST_CHANNEL:
@@ -968,6 +960,9 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
968 case CSR_BUSY_TIMEOUT: 960 case CSR_BUSY_TIMEOUT:
969 /* FIXME: Implement this. */ 961 /* FIXME: Implement this. */
970 962
963 case CSR_BUS_TIME:
964 /* Useless without initialization by the bus manager. */
965
971 default: 966 default:
972 rcode = RCODE_ADDRESS_ERROR; 967 rcode = RCODE_ADDRESS_ERROR;
973 break; 968 break;
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index ed3b1a765c00..fb0321300cce 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -70,7 +70,7 @@ struct fw_card_driver {
70 int (*enable_phys_dma)(struct fw_card *card, 70 int (*enable_phys_dma)(struct fw_card *card,
71 int node_id, int generation); 71 int node_id, int generation);
72 72
73 u64 (*get_bus_time)(struct fw_card *card); 73 u32 (*get_cycle_time)(struct fw_card *card);
74 74
75 struct fw_iso_context * 75 struct fw_iso_context *
76 (*allocate_iso_context)(struct fw_card *card, 76 (*allocate_iso_context)(struct fw_card *card,
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 43ebf337b131..75dc6988cffd 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -38,7 +38,6 @@
38#include <linux/spinlock.h> 38#include <linux/spinlock.h>
39#include <linux/string.h> 39#include <linux/string.h>
40 40
41#include <asm/atomic.h>
42#include <asm/byteorder.h> 41#include <asm/byteorder.h>
43#include <asm/page.h> 42#include <asm/page.h>
44#include <asm/system.h> 43#include <asm/system.h>
@@ -73,20 +72,6 @@ struct descriptor {
73 __le16 transfer_status; 72 __le16 transfer_status;
74} __attribute__((aligned(16))); 73} __attribute__((aligned(16)));
75 74
76struct db_descriptor {
77 __le16 first_size;
78 __le16 control;
79 __le16 second_req_count;
80 __le16 first_req_count;
81 __le32 branch_address;
82 __le16 second_res_count;
83 __le16 first_res_count;
84 __le32 reserved0;
85 __le32 first_buffer;
86 __le32 second_buffer;
87 __le32 reserved1;
88} __attribute__((aligned(16)));
89
90#define CONTROL_SET(regs) (regs) 75#define CONTROL_SET(regs) (regs)
91#define CONTROL_CLEAR(regs) ((regs) + 4) 76#define CONTROL_CLEAR(regs) ((regs) + 4)
92#define COMMAND_PTR(regs) ((regs) + 12) 77#define COMMAND_PTR(regs) ((regs) + 12)
@@ -181,31 +166,16 @@ struct fw_ohci {
181 struct fw_card card; 166 struct fw_card card;
182 167
183 __iomem char *registers; 168 __iomem char *registers;
184 dma_addr_t self_id_bus;
185 __le32 *self_id_cpu;
186 struct tasklet_struct bus_reset_tasklet;
187 int node_id; 169 int node_id;
188 int generation; 170 int generation;
189 int request_generation; /* for timestamping incoming requests */ 171 int request_generation; /* for timestamping incoming requests */
190 atomic_t bus_seconds; 172 unsigned quirks;
191
192 bool use_dualbuffer;
193 bool old_uninorth;
194 bool bus_reset_packet_quirk;
195 173
196 /* 174 /*
197 * Spinlock for accessing fw_ohci data. Never call out of 175 * Spinlock for accessing fw_ohci data. Never call out of
198 * this driver with this lock held. 176 * this driver with this lock held.
199 */ 177 */
200 spinlock_t lock; 178 spinlock_t lock;
201 u32 self_id_buffer[512];
202
203 /* Config rom buffers */
204 __be32 *config_rom;
205 dma_addr_t config_rom_bus;
206 __be32 *next_config_rom;
207 dma_addr_t next_config_rom_bus;
208 __be32 next_header;
209 179
210 struct ar_context ar_request_ctx; 180 struct ar_context ar_request_ctx;
211 struct ar_context ar_response_ctx; 181 struct ar_context ar_response_ctx;
@@ -217,6 +187,18 @@ struct fw_ohci {
217 u64 ir_context_channels; 187 u64 ir_context_channels;
218 u32 ir_context_mask; 188 u32 ir_context_mask;
219 struct iso_context *ir_context_list; 189 struct iso_context *ir_context_list;
190
191 __be32 *config_rom;
192 dma_addr_t config_rom_bus;
193 __be32 *next_config_rom;
194 dma_addr_t next_config_rom_bus;
195 __be32 next_header;
196
197 __le32 *self_id_cpu;
198 dma_addr_t self_id_bus;
199 struct tasklet_struct bus_reset_tasklet;
200
201 u32 self_id_buffer[512];
220}; 202};
221 203
222static inline struct fw_ohci *fw_ohci(struct fw_card *card) 204static inline struct fw_ohci *fw_ohci(struct fw_card *card)
@@ -249,6 +231,30 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
249 231
250static char ohci_driver_name[] = KBUILD_MODNAME; 232static char ohci_driver_name[] = KBUILD_MODNAME;
251 233
234#define QUIRK_CYCLE_TIMER 1
235#define QUIRK_RESET_PACKET 2
236#define QUIRK_BE_HEADERS 4
237
238/* In case of multiple matches in ohci_quirks[], only the first one is used. */
239static const struct {
240 unsigned short vendor, device, flags;
241} ohci_quirks[] = {
242 {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
243 {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
244 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
245 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
246 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
247};
248
249/* This overrides anything that was found in ohci_quirks[]. */
250static int param_quirks;
251module_param_named(quirks, param_quirks, int, 0644);
252MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
253 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
254 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
255 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
256 ")");
257
252#ifdef CONFIG_FIREWIRE_OHCI_DEBUG 258#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
253 259
254#define OHCI_PARAM_DEBUG_AT_AR 1 260#define OHCI_PARAM_DEBUG_AT_AR 1
@@ -275,7 +281,7 @@ static void log_irqs(u32 evt)
275 !(evt & OHCI1394_busReset)) 281 !(evt & OHCI1394_busReset))
276 return; 282 return;
277 283
278 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, 284 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
279 evt & OHCI1394_selfIDComplete ? " selfID" : "", 285 evt & OHCI1394_selfIDComplete ? " selfID" : "",
280 evt & OHCI1394_RQPkt ? " AR_req" : "", 286 evt & OHCI1394_RQPkt ? " AR_req" : "",
281 evt & OHCI1394_RSPkt ? " AR_resp" : "", 287 evt & OHCI1394_RSPkt ? " AR_resp" : "",
@@ -285,7 +291,6 @@ static void log_irqs(u32 evt)
285 evt & OHCI1394_isochTx ? " IT" : "", 291 evt & OHCI1394_isochTx ? " IT" : "",
286 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", 292 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
287 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", 293 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
288 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
289 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", 294 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
290 evt & OHCI1394_regAccessFail ? " regAccessFail" : "", 295 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
291 evt & OHCI1394_busReset ? " busReset" : "", 296 evt & OHCI1394_busReset ? " busReset" : "",
@@ -293,8 +298,7 @@ static void log_irqs(u32 evt)
293 OHCI1394_RSPkt | OHCI1394_reqTxComplete | 298 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
294 OHCI1394_respTxComplete | OHCI1394_isochRx | 299 OHCI1394_respTxComplete | OHCI1394_isochRx |
295 OHCI1394_isochTx | OHCI1394_postedWriteErr | 300 OHCI1394_isochTx | OHCI1394_postedWriteErr |
296 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds | 301 OHCI1394_cycleTooLong | OHCI1394_cycleInconsistent |
297 OHCI1394_cycleInconsistent |
298 OHCI1394_regAccessFail | OHCI1394_busReset) 302 OHCI1394_regAccessFail | OHCI1394_busReset)
299 ? " ?" : ""); 303 ? " ?" : "");
300} 304}
@@ -524,7 +528,7 @@ static void ar_context_release(struct ar_context *ctx)
524 528
525#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) 529#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
526#define cond_le32_to_cpu(v) \ 530#define cond_le32_to_cpu(v) \
527 (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v)) 531 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
528#else 532#else
529#define cond_le32_to_cpu(v) le32_to_cpu(v) 533#define cond_le32_to_cpu(v) le32_to_cpu(v)
530#endif 534#endif
@@ -605,7 +609,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
605 * at a slightly incorrect time (in bus_reset_tasklet). 609 * at a slightly incorrect time (in bus_reset_tasklet).
606 */ 610 */
607 if (evt == OHCI1394_evt_bus_reset) { 611 if (evt == OHCI1394_evt_bus_reset) {
608 if (!ohci->bus_reset_packet_quirk) 612 if (!(ohci->quirks & QUIRK_RESET_PACKET))
609 ohci->request_generation = (p.header[2] >> 16) & 0xff; 613 ohci->request_generation = (p.header[2] >> 16) & 0xff;
610 } else if (ctx == &ohci->ar_request_ctx) { 614 } else if (ctx == &ohci->ar_request_ctx) {
611 fw_core_handle_request(&ohci->card, &p); 615 fw_core_handle_request(&ohci->card, &p);
@@ -1329,7 +1333,7 @@ static void bus_reset_tasklet(unsigned long data)
1329 context_stop(&ohci->at_response_ctx); 1333 context_stop(&ohci->at_response_ctx);
1330 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); 1334 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1331 1335
1332 if (ohci->bus_reset_packet_quirk) 1336 if (ohci->quirks & QUIRK_RESET_PACKET)
1333 ohci->request_generation = generation; 1337 ohci->request_generation = generation;
1334 1338
1335 /* 1339 /*
@@ -1384,7 +1388,7 @@ static void bus_reset_tasklet(unsigned long data)
1384static irqreturn_t irq_handler(int irq, void *data) 1388static irqreturn_t irq_handler(int irq, void *data)
1385{ 1389{
1386 struct fw_ohci *ohci = data; 1390 struct fw_ohci *ohci = data;
1387 u32 event, iso_event, cycle_time; 1391 u32 event, iso_event;
1388 int i; 1392 int i;
1389 1393
1390 event = reg_read(ohci, OHCI1394_IntEventClear); 1394 event = reg_read(ohci, OHCI1394_IntEventClear);
@@ -1454,12 +1458,6 @@ static irqreturn_t irq_handler(int irq, void *data)
1454 fw_notify("isochronous cycle inconsistent\n"); 1458 fw_notify("isochronous cycle inconsistent\n");
1455 } 1459 }
1456 1460
1457 if (event & OHCI1394_cycle64Seconds) {
1458 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1459 if ((cycle_time & 0x80000000) == 0)
1460 atomic_inc(&ohci->bus_seconds);
1461 }
1462
1463 return IRQ_HANDLED; 1461 return IRQ_HANDLED;
1464} 1462}
1465 1463
@@ -1553,8 +1551,7 @@ static int ohci_enable(struct fw_card *card,
1553 OHCI1394_reqTxComplete | OHCI1394_respTxComplete | 1551 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1554 OHCI1394_isochRx | OHCI1394_isochTx | 1552 OHCI1394_isochRx | OHCI1394_isochTx |
1555 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong | 1553 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
1556 OHCI1394_cycleInconsistent | 1554 OHCI1394_cycleInconsistent | OHCI1394_regAccessFail |
1557 OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
1558 OHCI1394_masterIntEnable); 1555 OHCI1394_masterIntEnable);
1559 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) 1556 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
1560 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); 1557 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
@@ -1794,16 +1791,61 @@ static int ohci_enable_phys_dma(struct fw_card *card,
1794#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ 1791#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
1795} 1792}
1796 1793
1797static u64 ohci_get_bus_time(struct fw_card *card) 1794static u32 cycle_timer_ticks(u32 cycle_timer)
1798{ 1795{
1799 struct fw_ohci *ohci = fw_ohci(card); 1796 u32 ticks;
1800 u32 cycle_time;
1801 u64 bus_time;
1802 1797
1803 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1798 ticks = cycle_timer & 0xfff;
1804 bus_time = ((u64)atomic_read(&ohci->bus_seconds) << 32) | cycle_time; 1799 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1800 ticks += (3072 * 8000) * (cycle_timer >> 25);
1801
1802 return ticks;
1803}
1804
1805/*
1806 * Some controllers exhibit one or more of the following bugs when updating the
1807 * iso cycle timer register:
1808 * - When the lowest six bits are wrapping around to zero, a read that happens
1809 * at the same time will return garbage in the lowest ten bits.
1810 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1811 * not incremented for about 60 ns.
1812 * - Occasionally, the entire register reads zero.
1813 *
1814 * To catch these, we read the register three times and ensure that the
1815 * difference between each two consecutive reads is approximately the same, i.e.
1816 * less than twice the other. Furthermore, any negative difference indicates an
1817 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1818 * execute, so we have enough precision to compute the ratio of the differences.)
1819 */
1820static u32 ohci_get_cycle_time(struct fw_card *card)
1821{
1822 struct fw_ohci *ohci = fw_ohci(card);
1823 u32 c0, c1, c2;
1824 u32 t0, t1, t2;
1825 s32 diff01, diff12;
1826 int i;
1805 1827
1806 return bus_time; 1828 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1829
1830 if (ohci->quirks & QUIRK_CYCLE_TIMER) {
1831 i = 0;
1832 c1 = c2;
1833 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1834 do {
1835 c0 = c1;
1836 c1 = c2;
1837 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1838 t0 = cycle_timer_ticks(c0);
1839 t1 = cycle_timer_ticks(c1);
1840 t2 = cycle_timer_ticks(c2);
1841 diff01 = t1 - t0;
1842 diff12 = t2 - t1;
1843 } while ((diff01 <= 0 || diff12 <= 0 ||
1844 diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
1845 && i++ < 20);
1846 }
1847
1848 return c2;
1807} 1849}
1808 1850
1809static void copy_iso_headers(struct iso_context *ctx, void *p) 1851static void copy_iso_headers(struct iso_context *ctx, void *p)
@@ -1828,52 +1870,6 @@ static void copy_iso_headers(struct iso_context *ctx, void *p)
1828 ctx->header_length += ctx->base.header_size; 1870 ctx->header_length += ctx->base.header_size;
1829} 1871}
1830 1872
1831static int handle_ir_dualbuffer_packet(struct context *context,
1832 struct descriptor *d,
1833 struct descriptor *last)
1834{
1835 struct iso_context *ctx =
1836 container_of(context, struct iso_context, context);
1837 struct db_descriptor *db = (struct db_descriptor *) d;
1838 __le32 *ir_header;
1839 size_t header_length;
1840 void *p, *end;
1841
1842 if (db->first_res_count != 0 && db->second_res_count != 0) {
1843 if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
1844 /* This descriptor isn't done yet, stop iteration. */
1845 return 0;
1846 }
1847 ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
1848 }
1849
1850 header_length = le16_to_cpu(db->first_req_count) -
1851 le16_to_cpu(db->first_res_count);
1852
1853 p = db + 1;
1854 end = p + header_length;
1855 while (p < end) {
1856 copy_iso_headers(ctx, p);
1857 ctx->excess_bytes +=
1858 (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
1859 p += max(ctx->base.header_size, (size_t)8);
1860 }
1861
1862 ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
1863 le16_to_cpu(db->second_res_count);
1864
1865 if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
1866 ir_header = (__le32 *) (db + 1);
1867 ctx->base.callback(&ctx->base,
1868 le32_to_cpu(ir_header[0]) & 0xffff,
1869 ctx->header_length, ctx->header,
1870 ctx->base.callback_data);
1871 ctx->header_length = 0;
1872 }
1873
1874 return 1;
1875}
1876
1877static int handle_ir_packet_per_buffer(struct context *context, 1873static int handle_ir_packet_per_buffer(struct context *context,
1878 struct descriptor *d, 1874 struct descriptor *d,
1879 struct descriptor *last) 1875 struct descriptor *last)
@@ -1960,10 +1956,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
1960 channels = &ohci->ir_context_channels; 1956 channels = &ohci->ir_context_channels;
1961 mask = &ohci->ir_context_mask; 1957 mask = &ohci->ir_context_mask;
1962 list = ohci->ir_context_list; 1958 list = ohci->ir_context_list;
1963 if (ohci->use_dualbuffer) 1959 callback = handle_ir_packet_per_buffer;
1964 callback = handle_ir_dualbuffer_packet;
1965 else
1966 callback = handle_ir_packet_per_buffer;
1967 } 1960 }
1968 1961
1969 spin_lock_irqsave(&ohci->lock, flags); 1962 spin_lock_irqsave(&ohci->lock, flags);
@@ -2026,8 +2019,6 @@ static int ohci_start_iso(struct fw_iso_context *base,
2026 } else { 2019 } else {
2027 index = ctx - ohci->ir_context_list; 2020 index = ctx - ohci->ir_context_list;
2028 control = IR_CONTEXT_ISOCH_HEADER; 2021 control = IR_CONTEXT_ISOCH_HEADER;
2029 if (ohci->use_dualbuffer)
2030 control |= IR_CONTEXT_DUAL_BUFFER_MODE;
2031 match = (tags << 28) | (sync << 8) | ctx->base.channel; 2022 match = (tags << 28) | (sync << 8) | ctx->base.channel;
2032 if (cycle >= 0) { 2023 if (cycle >= 0) {
2033 match |= (cycle & 0x07fff) << 12; 2024 match |= (cycle & 0x07fff) << 12;
@@ -2188,92 +2179,6 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
2188 return 0; 2179 return 0;
2189} 2180}
2190 2181
2191static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2192 struct fw_iso_packet *packet,
2193 struct fw_iso_buffer *buffer,
2194 unsigned long payload)
2195{
2196 struct iso_context *ctx = container_of(base, struct iso_context, base);
2197 struct db_descriptor *db = NULL;
2198 struct descriptor *d;
2199 struct fw_iso_packet *p;
2200 dma_addr_t d_bus, page_bus;
2201 u32 z, header_z, length, rest;
2202 int page, offset, packet_count, header_size;
2203
2204 /*
2205 * FIXME: Cycle lost behavior should be configurable: lose
2206 * packet, retransmit or terminate..
2207 */
2208
2209 p = packet;
2210 z = 2;
2211
2212 /*
2213 * The OHCI controller puts the isochronous header and trailer in the
2214 * buffer, so we need at least 8 bytes.
2215 */
2216 packet_count = p->header_length / ctx->base.header_size;
2217 header_size = packet_count * max(ctx->base.header_size, (size_t)8);
2218
2219 /* Get header size in number of descriptors. */
2220 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
2221 page = payload >> PAGE_SHIFT;
2222 offset = payload & ~PAGE_MASK;
2223 rest = p->payload_length;
2224 /*
2225 * The controllers I've tested have not worked correctly when
2226 * second_req_count is zero. Rather than do something we know won't
2227 * work, return an error
2228 */
2229 if (rest == 0)
2230 return -EINVAL;
2231
2232 while (rest > 0) {
2233 d = context_get_descriptors(&ctx->context,
2234 z + header_z, &d_bus);
2235 if (d == NULL)
2236 return -ENOMEM;
2237
2238 db = (struct db_descriptor *) d;
2239 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
2240 DESCRIPTOR_BRANCH_ALWAYS);
2241 db->first_size =
2242 cpu_to_le16(max(ctx->base.header_size, (size_t)8));
2243 if (p->skip && rest == p->payload_length) {
2244 db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
2245 db->first_req_count = db->first_size;
2246 } else {
2247 db->first_req_count = cpu_to_le16(header_size);
2248 }
2249 db->first_res_count = db->first_req_count;
2250 db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
2251
2252 if (p->skip && rest == p->payload_length)
2253 length = 4;
2254 else if (offset + rest < PAGE_SIZE)
2255 length = rest;
2256 else
2257 length = PAGE_SIZE - offset;
2258
2259 db->second_req_count = cpu_to_le16(length);
2260 db->second_res_count = db->second_req_count;
2261 page_bus = page_private(buffer->pages[page]);
2262 db->second_buffer = cpu_to_le32(page_bus + offset);
2263
2264 if (p->interrupt && length == rest)
2265 db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
2266
2267 context_append(&ctx->context, d, z, header_z);
2268 offset = (offset + length) & ~PAGE_MASK;
2269 rest -= length;
2270 if (offset == 0)
2271 page++;
2272 }
2273
2274 return 0;
2275}
2276
2277static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, 2182static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2278 struct fw_iso_packet *packet, 2183 struct fw_iso_packet *packet,
2279 struct fw_iso_buffer *buffer, 2184 struct fw_iso_buffer *buffer,
@@ -2364,9 +2269,6 @@ static int ohci_queue_iso(struct fw_iso_context *base,
2364 spin_lock_irqsave(&ctx->context.ohci->lock, flags); 2269 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
2365 if (base->type == FW_ISO_CONTEXT_TRANSMIT) 2270 if (base->type == FW_ISO_CONTEXT_TRANSMIT)
2366 ret = ohci_queue_iso_transmit(base, packet, buffer, payload); 2271 ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
2367 else if (ctx->context.ohci->use_dualbuffer)
2368 ret = ohci_queue_iso_receive_dualbuffer(base, packet,
2369 buffer, payload);
2370 else 2272 else
2371 ret = ohci_queue_iso_receive_packet_per_buffer(base, packet, 2273 ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
2372 buffer, payload); 2274 buffer, payload);
@@ -2383,7 +2285,7 @@ static const struct fw_card_driver ohci_driver = {
2383 .send_response = ohci_send_response, 2285 .send_response = ohci_send_response,
2384 .cancel_packet = ohci_cancel_packet, 2286 .cancel_packet = ohci_cancel_packet,
2385 .enable_phys_dma = ohci_enable_phys_dma, 2287 .enable_phys_dma = ohci_enable_phys_dma,
2386 .get_bus_time = ohci_get_bus_time, 2288 .get_cycle_time = ohci_get_cycle_time,
2387 2289
2388 .allocate_iso_context = ohci_allocate_iso_context, 2290 .allocate_iso_context = ohci_allocate_iso_context,
2389 .free_iso_context = ohci_free_iso_context, 2291 .free_iso_context = ohci_free_iso_context,
@@ -2421,17 +2323,13 @@ static void ohci_pmac_off(struct pci_dev *dev)
2421#define ohci_pmac_off(dev) 2323#define ohci_pmac_off(dev)
2422#endif /* CONFIG_PPC_PMAC */ 2324#endif /* CONFIG_PPC_PMAC */
2423 2325
2424#define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT
2425#define PCI_DEVICE_ID_AGERE_FW643 0x5901
2426#define PCI_DEVICE_ID_TI_TSB43AB23 0x8024
2427
2428static int __devinit pci_probe(struct pci_dev *dev, 2326static int __devinit pci_probe(struct pci_dev *dev,
2429 const struct pci_device_id *ent) 2327 const struct pci_device_id *ent)
2430{ 2328{
2431 struct fw_ohci *ohci; 2329 struct fw_ohci *ohci;
2432 u32 bus_options, max_receive, link_speed, version; 2330 u32 bus_options, max_receive, link_speed, version;
2433 u64 guid; 2331 u64 guid;
2434 int err; 2332 int i, err, n_ir, n_it;
2435 size_t size; 2333 size_t size;
2436 2334
2437 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); 2335 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
@@ -2472,36 +2370,15 @@ static int __devinit pci_probe(struct pci_dev *dev,
2472 goto fail_iomem; 2370 goto fail_iomem;
2473 } 2371 }
2474 2372
2475 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 2373 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
2476#if 0 2374 if (ohci_quirks[i].vendor == dev->vendor &&
2477 /* FIXME: make it a context option or remove dual-buffer mode */ 2375 (ohci_quirks[i].device == dev->device ||
2478 ohci->use_dualbuffer = version >= OHCI_VERSION_1_1; 2376 ohci_quirks[i].device == (unsigned short)PCI_ANY_ID)) {
2479#endif 2377 ohci->quirks = ohci_quirks[i].flags;
2480 2378 break;
2481 /* dual-buffer mode is broken if more than one IR context is active */ 2379 }
2482 if (dev->vendor == PCI_VENDOR_ID_AGERE && 2380 if (param_quirks)
2483 dev->device == PCI_DEVICE_ID_AGERE_FW643) 2381 ohci->quirks = param_quirks;
2484 ohci->use_dualbuffer = false;
2485
2486 /* dual-buffer mode is broken */
2487 if (dev->vendor == PCI_VENDOR_ID_RICOH &&
2488 dev->device == PCI_DEVICE_ID_RICOH_R5C832)
2489 ohci->use_dualbuffer = false;
2490
2491/* x86-32 currently doesn't use highmem for dma_alloc_coherent */
2492#if !defined(CONFIG_X86_32)
2493 /* dual-buffer mode is broken with descriptor addresses above 2G */
2494 if (dev->vendor == PCI_VENDOR_ID_TI &&
2495 (dev->device == PCI_DEVICE_ID_TI_TSB43AB22 ||
2496 dev->device == PCI_DEVICE_ID_TI_TSB43AB23))
2497 ohci->use_dualbuffer = false;
2498#endif
2499
2500#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
2501 ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
2502 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
2503#endif
2504 ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
2505 2382
2506 ar_context_init(&ohci->ar_request_ctx, ohci, 2383 ar_context_init(&ohci->ar_request_ctx, ohci,
2507 OHCI1394_AsReqRcvContextControlSet); 2384 OHCI1394_AsReqRcvContextControlSet);
@@ -2516,17 +2393,19 @@ static int __devinit pci_probe(struct pci_dev *dev,
2516 OHCI1394_AsRspTrContextControlSet, handle_at_packet); 2393 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
2517 2394
2518 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); 2395 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
2519 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); 2396 ohci->ir_context_channels = ~0ULL;
2397 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
2520 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); 2398 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
2521 size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask); 2399 n_ir = hweight32(ohci->ir_context_mask);
2522 ohci->it_context_list = kzalloc(size, GFP_KERNEL); 2400 size = sizeof(struct iso_context) * n_ir;
2401 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
2523 2402
2524 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); 2403 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
2525 ohci->ir_context_channels = ~0ULL; 2404 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
2526 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
2527 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); 2405 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
2528 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask); 2406 n_it = hweight32(ohci->it_context_mask);
2529 ohci->ir_context_list = kzalloc(size, GFP_KERNEL); 2407 size = sizeof(struct iso_context) * n_it;
2408 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
2530 2409
2531 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { 2410 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
2532 err = -ENOMEM; 2411 err = -ENOMEM;
@@ -2553,8 +2432,11 @@ static int __devinit pci_probe(struct pci_dev *dev,
2553 if (err) 2432 if (err)
2554 goto fail_self_id; 2433 goto fail_self_id;
2555 2434
2556 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n", 2435 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2557 dev_name(&dev->dev), version >> 16, version & 0xff); 2436 fw_notify("Added fw-ohci device %s, OHCI v%x.%x, "
2437 "%d IR + %d IT contexts, quirks 0x%x\n",
2438 dev_name(&dev->dev), version >> 16, version & 0xff,
2439 n_ir, n_it, ohci->quirks);
2558 2440
2559 return 0; 2441 return 0;
2560 2442
@@ -2662,7 +2544,7 @@ static int pci_resume(struct pci_dev *dev)
2662} 2544}
2663#endif 2545#endif
2664 2546
2665static struct pci_device_id pci_table[] = { 2547static const struct pci_device_id pci_table[] = {
2666 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) }, 2548 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
2667 { } 2549 { }
2668}; 2550};
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 70fef40cd22f..ca264f2fdf0c 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1014,7 +1014,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
1014 return 0; 1014 return 0;
1015} 1015}
1016 1016
1017static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory) 1017static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt,
1018 const u32 *directory)
1018{ 1019{
1019 struct fw_csr_iterator ci; 1020 struct fw_csr_iterator ci;
1020 int key, value; 1021 int key, value;
@@ -1027,7 +1028,7 @@ static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory)
1027 return 0; 1028 return 0;
1028} 1029}
1029 1030
1030static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory, 1031static int sbp2_scan_unit_dir(struct sbp2_target *tgt, const u32 *directory,
1031 u32 *model, u32 *firmware_revision) 1032 u32 *model, u32 *firmware_revision)
1032{ 1033{
1033 struct fw_csr_iterator ci; 1034 struct fw_csr_iterator ci;
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 39c5aa75b8f1..abe3f446ca48 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -4,7 +4,7 @@
4 4
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6 6
7drm-y := drm_auth.o drm_bufs.o drm_cache.o \ 7drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
8 drm_context.o drm_dma.o drm_drawable.o \ 8 drm_context.o drm_dma.o drm_drawable.o \
9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ 9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
new file mode 100644
index 000000000000..55d03ed05000
--- /dev/null
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -0,0 +1,184 @@
1/**************************************************************************
2 *
3 * Copyright 2010 Pauli Nieminen.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28/*
29 * Multipart buffer for coping data which is larger than the page size.
30 *
31 * Authors:
32 * Pauli Nieminen <suokkos-at-gmail-dot-com>
33 */
34
35#include "drm_buffer.h"
36
37/**
38 * Allocate the drm buffer object.
39 *
40 * buf: Pointer to a pointer where the object is stored.
41 * size: The number of bytes to allocate.
42 */
43int drm_buffer_alloc(struct drm_buffer **buf, int size)
44{
45 int nr_pages = size / PAGE_SIZE + 1;
46 int idx;
47
48 /* Allocating pointer table to end of structure makes drm_buffer
49 * variable sized */
50 *buf = kzalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *),
51 GFP_KERNEL);
52
53 if (*buf == NULL) {
54 DRM_ERROR("Failed to allocate drm buffer object to hold"
55 " %d bytes in %d pages.\n",
56 size, nr_pages);
57 return -ENOMEM;
58 }
59
60 (*buf)->size = size;
61
62 for (idx = 0; idx < nr_pages; ++idx) {
63
64 (*buf)->data[idx] =
65 kmalloc(min(PAGE_SIZE, size - idx * PAGE_SIZE),
66 GFP_KERNEL);
67
68
69 if ((*buf)->data[idx] == NULL) {
70 DRM_ERROR("Failed to allocate %dth page for drm"
71 " buffer with %d bytes and %d pages.\n",
72 idx + 1, size, nr_pages);
73 goto error_out;
74 }
75
76 }
77
78 return 0;
79
80error_out:
81
82 /* Only last element can be null pointer so check for it first. */
83 if ((*buf)->data[idx])
84 kfree((*buf)->data[idx]);
85
86 for (--idx; idx >= 0; --idx)
87 kfree((*buf)->data[idx]);
88
89 kfree(*buf);
90 return -ENOMEM;
91}
92EXPORT_SYMBOL(drm_buffer_alloc);
93
94/**
95 * Copy the user data to the begin of the buffer and reset the processing
96 * iterator.
97 *
98 * user_data: A pointer the data that is copied to the buffer.
99 * size: The Number of bytes to copy.
100 */
101extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
102 void __user *user_data, int size)
103{
104 int nr_pages = size / PAGE_SIZE + 1;
105 int idx;
106
107 if (size > buf->size) {
108 DRM_ERROR("Requesting to copy %d bytes to a drm buffer with"
109 " %d bytes space\n",
110 size, buf->size);
111 return -EFAULT;
112 }
113
114 for (idx = 0; idx < nr_pages; ++idx) {
115
116 if (DRM_COPY_FROM_USER(buf->data[idx],
117 user_data + idx * PAGE_SIZE,
118 min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
119 DRM_ERROR("Failed to copy user data (%p) to drm buffer"
120 " (%p) %dth page.\n",
121 user_data, buf, idx);
122 return -EFAULT;
123
124 }
125 }
126 buf->iterator = 0;
127 return 0;
128}
129EXPORT_SYMBOL(drm_buffer_copy_from_user);
130
131/**
132 * Free the drm buffer object
133 */
134void drm_buffer_free(struct drm_buffer *buf)
135{
136
137 if (buf != NULL) {
138
139 int nr_pages = buf->size / PAGE_SIZE + 1;
140 int idx;
141 for (idx = 0; idx < nr_pages; ++idx)
142 kfree(buf->data[idx]);
143
144 kfree(buf);
145 }
146}
147EXPORT_SYMBOL(drm_buffer_free);
148
149/**
150 * Read an object from buffer that may be split to multiple parts. If object
151 * is not split function just returns the pointer to object in buffer. But in
152 * case of split object data is copied to given stack object that is suplied
153 * by caller.
154 *
155 * The processing location of the buffer is also advanced to the next byte
156 * after the object.
157 *
158 * objsize: The size of the objet in bytes.
159 * stack_obj: A pointer to a memory location where object can be copied.
160 */
161void *drm_buffer_read_object(struct drm_buffer *buf,
162 int objsize, void *stack_obj)
163{
164 int idx = drm_buffer_index(buf);
165 int page = drm_buffer_page(buf);
166 void *obj = 0;
167
168 if (idx + objsize <= PAGE_SIZE) {
169 obj = &buf->data[page][idx];
170 } else {
171 /* The object is split which forces copy to temporary object.*/
172 int beginsz = PAGE_SIZE - idx;
173 memcpy(stack_obj, &buf->data[page][idx], beginsz);
174
175 memcpy(stack_obj + beginsz, &buf->data[page + 1][0],
176 objsize - beginsz);
177
178 obj = stack_obj;
179 }
180
181 drm_buffer_advance(buf, objsize);
182 return obj;
183}
184EXPORT_SYMBOL(drm_buffer_read_object);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 7d0f00a935fa..f2aaf39be398 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -836,11 +836,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
836 mode_changed = true; 836 mode_changed = true;
837 } else if (set->fb == NULL) { 837 } else if (set->fb == NULL) {
838 mode_changed = true; 838 mode_changed = true;
839 } else if ((set->fb->bits_per_pixel != 839 } else
840 set->crtc->fb->bits_per_pixel) ||
841 set->fb->depth != set->crtc->fb->depth)
842 fb_changed = true;
843 else
844 fb_changed = true; 840 fb_changed = true;
845 } 841 }
846 842
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 766c46875a20..f3c58e2bd75c 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -125,28 +125,28 @@ static struct drm_ioctl_desc drm_ioctls[] = {
125 125
126 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 126 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
127 127
128 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), 128 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
129 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), 129 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
130 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), 130 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
131 131
132 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW), 132 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
133 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW), 133 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
134 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW), 134 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
135 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 135 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
136 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER), 136 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
137 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER), 137 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
138 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW), 138 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
139 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW), 139 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
140 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 140 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
141 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 141 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
142 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW), 142 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
144 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 144 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
145 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW), 145 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
146 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW), 146 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
147 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW), 147 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
148 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 148 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
149 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW) 149 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
150}; 150};
151 151
152#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 152#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ab6c97330412..f97e7c42ac8e 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -60,8 +60,7 @@
60#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5) 60#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
61/* use +hsync +vsync for detailed mode */ 61/* use +hsync +vsync for detailed mode */
62#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) 62#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
63/* define the number of Extension EDID block */ 63
64#define MAX_EDID_EXT_NUM 4
65 64
66#define LEVEL_DMT 0 65#define LEVEL_DMT 0
67#define LEVEL_GTF 1 66#define LEVEL_GTF 1
@@ -114,14 +113,14 @@ static const u8 edid_header[] = {
114}; 113};
115 114
116/** 115/**
117 * edid_is_valid - sanity check EDID data 116 * drm_edid_is_valid - sanity check EDID data
118 * @edid: EDID data 117 * @edid: EDID data
119 * 118 *
120 * Sanity check the EDID block by looking at the header, the version number 119 * Sanity check the EDID block by looking at the header, the version number
121 * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's 120 * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
122 * valid. 121 * valid.
123 */ 122 */
124static bool edid_is_valid(struct edid *edid) 123bool drm_edid_is_valid(struct edid *edid)
125{ 124{
126 int i, score = 0; 125 int i, score = 0;
127 u8 csum = 0; 126 u8 csum = 0;
@@ -163,6 +162,7 @@ bad:
163 } 162 }
164 return 0; 163 return 0;
165} 164}
165EXPORT_SYMBOL(drm_edid_is_valid);
166 166
167/** 167/**
168 * edid_vendor - match a string against EDID's obfuscated vendor field 168 * edid_vendor - match a string against EDID's obfuscated vendor field
@@ -1112,8 +1112,8 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
1112 } 1112 }
1113 1113
1114 /* Chose real EDID extension number */ 1114 /* Chose real EDID extension number */
1115 edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ? 1115 edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
1116 MAX_EDID_EXT_NUM : edid->extensions; 1116 DRM_MAX_EDID_EXT_NUM : edid->extensions;
1117 1117
1118 /* Find CEA extension */ 1118 /* Find CEA extension */
1119 for (i = 0; i < edid_ext_num; i++) { 1119 for (i = 0; i < edid_ext_num; i++) {
@@ -1195,7 +1195,7 @@ static int drm_ddc_read_edid(struct drm_connector *connector,
1195 for (i = 0; i < 4; i++) { 1195 for (i = 0; i < 4; i++) {
1196 if (drm_do_probe_ddc_edid(adapter, buf, len)) 1196 if (drm_do_probe_ddc_edid(adapter, buf, len))
1197 return -1; 1197 return -1;
1198 if (edid_is_valid((struct edid *)buf)) 1198 if (drm_edid_is_valid((struct edid *)buf))
1199 return 0; 1199 return 0;
1200 } 1200 }
1201 1201
@@ -1220,7 +1220,7 @@ struct edid *drm_get_edid(struct drm_connector *connector,
1220 int ret; 1220 int ret;
1221 struct edid *edid; 1221 struct edid *edid;
1222 1222
1223 edid = kmalloc(EDID_LENGTH * (MAX_EDID_EXT_NUM + 1), 1223 edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
1224 GFP_KERNEL); 1224 GFP_KERNEL);
1225 if (edid == NULL) { 1225 if (edid == NULL) {
1226 dev_warn(&connector->dev->pdev->dev, 1226 dev_warn(&connector->dev->pdev->dev,
@@ -1238,14 +1238,14 @@ struct edid *drm_get_edid(struct drm_connector *connector,
1238 if (edid->extensions != 0) { 1238 if (edid->extensions != 0) {
1239 int edid_ext_num = edid->extensions; 1239 int edid_ext_num = edid->extensions;
1240 1240
1241 if (edid_ext_num > MAX_EDID_EXT_NUM) { 1241 if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) {
1242 dev_warn(&connector->dev->pdev->dev, 1242 dev_warn(&connector->dev->pdev->dev,
1243 "The number of extension(%d) is " 1243 "The number of extension(%d) is "
1244 "over max (%d), actually read number (%d)\n", 1244 "over max (%d), actually read number (%d)\n",
1245 edid_ext_num, MAX_EDID_EXT_NUM, 1245 edid_ext_num, DRM_MAX_EDID_EXT_NUM,
1246 MAX_EDID_EXT_NUM); 1246 DRM_MAX_EDID_EXT_NUM);
1247 /* Reset EDID extension number to be read */ 1247 /* Reset EDID extension number to be read */
1248 edid_ext_num = MAX_EDID_EXT_NUM; 1248 edid_ext_num = DRM_MAX_EDID_EXT_NUM;
1249 } 1249 }
1250 /* Read EDID including extensions too */ 1250 /* Read EDID including extensions too */
1251 ret = drm_ddc_read_edid(connector, adapter, (char *)edid, 1251 ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
@@ -1288,8 +1288,8 @@ bool drm_detect_hdmi_monitor(struct edid *edid)
1288 goto end; 1288 goto end;
1289 1289
1290 /* Chose real EDID extension number */ 1290 /* Chose real EDID extension number */
1291 edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ? 1291 edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
1292 MAX_EDID_EXT_NUM : edid->extensions; 1292 DRM_MAX_EDID_EXT_NUM : edid->extensions;
1293 1293
1294 /* Find CEA extension */ 1294 /* Find CEA extension */
1295 for (i = 0; i < edid_ext_num; i++) { 1295 for (i = 0; i < edid_ext_num; i++) {
@@ -1346,7 +1346,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
1346 if (edid == NULL) { 1346 if (edid == NULL) {
1347 return 0; 1347 return 0;
1348 } 1348 }
1349 if (!edid_is_valid(edid)) { 1349 if (!drm_edid_is_valid(edid)) {
1350 dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", 1350 dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
1351 drm_get_connector_name(connector)); 1351 drm_get_connector_name(connector));
1352 return 0; 1352 return 0;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0f9e90552dc4..50549703584f 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -27,6 +27,7 @@
27 * Dave Airlie <airlied@linux.ie> 27 * Dave Airlie <airlied@linux.ie>
28 * Jesse Barnes <jesse.barnes@intel.com> 28 * Jesse Barnes <jesse.barnes@intel.com>
29 */ 29 */
30#include <linux/kernel.h>
30#include <linux/sysrq.h> 31#include <linux/sysrq.h>
31#include <linux/fb.h> 32#include <linux/fb.h>
32#include "drmP.h" 33#include "drmP.h"
@@ -50,21 +51,6 @@ int drm_fb_helper_add_connector(struct drm_connector *connector)
50} 51}
51EXPORT_SYMBOL(drm_fb_helper_add_connector); 52EXPORT_SYMBOL(drm_fb_helper_add_connector);
52 53
53static int my_atoi(const char *name)
54{
55 int val = 0;
56
57 for (;; name++) {
58 switch (*name) {
59 case '0' ... '9':
60 val = 10*val+(*name-'0');
61 break;
62 default:
63 return val;
64 }
65 }
66}
67
68/** 54/**
69 * drm_fb_helper_connector_parse_command_line - parse command line for connector 55 * drm_fb_helper_connector_parse_command_line - parse command line for connector
70 * @connector - connector to parse line for 56 * @connector - connector to parse line for
@@ -111,7 +97,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
111 namelen = i; 97 namelen = i;
112 if (!refresh_specified && !bpp_specified && 98 if (!refresh_specified && !bpp_specified &&
113 !yres_specified) { 99 !yres_specified) {
114 refresh = my_atoi(&name[i+1]); 100 refresh = simple_strtol(&name[i+1], NULL, 10);
115 refresh_specified = 1; 101 refresh_specified = 1;
116 if (cvt || rb) 102 if (cvt || rb)
117 cvt = 0; 103 cvt = 0;
@@ -121,7 +107,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
121 case '-': 107 case '-':
122 namelen = i; 108 namelen = i;
123 if (!bpp_specified && !yres_specified) { 109 if (!bpp_specified && !yres_specified) {
124 bpp = my_atoi(&name[i+1]); 110 bpp = simple_strtol(&name[i+1], NULL, 10);
125 bpp_specified = 1; 111 bpp_specified = 1;
126 if (cvt || rb) 112 if (cvt || rb)
127 cvt = 0; 113 cvt = 0;
@@ -130,7 +116,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
130 break; 116 break;
131 case 'x': 117 case 'x':
132 if (!yres_specified) { 118 if (!yres_specified) {
133 yres = my_atoi(&name[i+1]); 119 yres = simple_strtol(&name[i+1], NULL, 10);
134 yres_specified = 1; 120 yres_specified = 1;
135 } else 121 } else
136 goto done; 122 goto done;
@@ -170,7 +156,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
170 } 156 }
171 } 157 }
172 if (i < 0 && yres_specified) { 158 if (i < 0 && yres_specified) {
173 xres = my_atoi(name); 159 xres = simple_strtol(name, NULL, 10);
174 res_specified = 1; 160 res_specified = 1;
175 } 161 }
176done: 162done:
@@ -694,7 +680,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
694 int i; 680 int i;
695 681
696 if (var->pixclock != 0) { 682 if (var->pixclock != 0) {
697 DRM_ERROR("PIXEL CLCOK SET\n"); 683 DRM_ERROR("PIXEL CLOCK SET\n");
698 return -EINVAL; 684 return -EINVAL;
699 } 685 }
700 686
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 8bf3770f294e..aa89d4b0b4c4 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -192,9 +192,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
192 idr_remove(&filp->object_idr, handle); 192 idr_remove(&filp->object_idr, handle);
193 spin_unlock(&filp->table_lock); 193 spin_unlock(&filp->table_lock);
194 194
195 mutex_lock(&dev->struct_mutex); 195 drm_gem_object_handle_unreference_unlocked(obj);
196 drm_gem_object_handle_unreference(obj);
197 mutex_unlock(&dev->struct_mutex);
198 196
199 return 0; 197 return 0;
200} 198}
@@ -325,9 +323,7 @@ again:
325 } 323 }
326 324
327err: 325err:
328 mutex_lock(&dev->struct_mutex); 326 drm_gem_object_unreference_unlocked(obj);
329 drm_gem_object_unreference(obj);
330 mutex_unlock(&dev->struct_mutex);
331 return ret; 327 return ret;
332} 328}
333 329
@@ -358,9 +354,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
358 return -ENOENT; 354 return -ENOENT;
359 355
360 ret = drm_gem_handle_create(file_priv, obj, &handle); 356 ret = drm_gem_handle_create(file_priv, obj, &handle);
361 mutex_lock(&dev->struct_mutex); 357 drm_gem_object_unreference_unlocked(obj);
362 drm_gem_object_unreference(obj);
363 mutex_unlock(&dev->struct_mutex);
364 if (ret) 358 if (ret)
365 return ret; 359 return ret;
366 360
@@ -390,7 +384,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
390{ 384{
391 struct drm_gem_object *obj = ptr; 385 struct drm_gem_object *obj = ptr;
392 386
393 drm_gem_object_handle_unreference(obj); 387 drm_gem_object_handle_unreference_unlocked(obj);
394 388
395 return 0; 389 return 0;
396} 390}
@@ -403,16 +397,25 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
403void 397void
404drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 398drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
405{ 399{
406 mutex_lock(&dev->struct_mutex);
407 idr_for_each(&file_private->object_idr, 400 idr_for_each(&file_private->object_idr,
408 &drm_gem_object_release_handle, NULL); 401 &drm_gem_object_release_handle, NULL);
409 402
410 idr_destroy(&file_private->object_idr); 403 idr_destroy(&file_private->object_idr);
411 mutex_unlock(&dev->struct_mutex); 404}
405
406static void
407drm_gem_object_free_common(struct drm_gem_object *obj)
408{
409 struct drm_device *dev = obj->dev;
410 fput(obj->filp);
411 atomic_dec(&dev->object_count);
412 atomic_sub(obj->size, &dev->object_memory);
413 kfree(obj);
412} 414}
413 415
414/** 416/**
415 * Called after the last reference to the object has been lost. 417 * Called after the last reference to the object has been lost.
418 * Must be called holding struct_ mutex
416 * 419 *
417 * Frees the object 420 * Frees the object
418 */ 421 */
@@ -427,14 +430,40 @@ drm_gem_object_free(struct kref *kref)
427 if (dev->driver->gem_free_object != NULL) 430 if (dev->driver->gem_free_object != NULL)
428 dev->driver->gem_free_object(obj); 431 dev->driver->gem_free_object(obj);
429 432
430 fput(obj->filp); 433 drm_gem_object_free_common(obj);
431 atomic_dec(&dev->object_count);
432 atomic_sub(obj->size, &dev->object_memory);
433 kfree(obj);
434} 434}
435EXPORT_SYMBOL(drm_gem_object_free); 435EXPORT_SYMBOL(drm_gem_object_free);
436 436
437/** 437/**
438 * Called after the last reference to the object has been lost.
439 * Must be called without holding struct_mutex
440 *
441 * Frees the object
442 */
443void
444drm_gem_object_free_unlocked(struct kref *kref)
445{
446 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
447 struct drm_device *dev = obj->dev;
448
449 if (dev->driver->gem_free_object_unlocked != NULL)
450 dev->driver->gem_free_object_unlocked(obj);
451 else if (dev->driver->gem_free_object != NULL) {
452 mutex_lock(&dev->struct_mutex);
453 dev->driver->gem_free_object(obj);
454 mutex_unlock(&dev->struct_mutex);
455 }
456
457 drm_gem_object_free_common(obj);
458}
459EXPORT_SYMBOL(drm_gem_object_free_unlocked);
460
461static void drm_gem_object_ref_bug(struct kref *list_kref)
462{
463 BUG();
464}
465
466/**
438 * Called after the last handle to the object has been closed 467 * Called after the last handle to the object has been closed
439 * 468 *
440 * Removes any name for the object. Note that this must be 469 * Removes any name for the object. Note that this must be
@@ -458,8 +487,10 @@ drm_gem_object_handle_free(struct kref *kref)
458 /* 487 /*
459 * The object name held a reference to this object, drop 488 * The object name held a reference to this object, drop
460 * that now. 489 * that now.
490 *
491 * This cannot be the last reference, since the handle holds one too.
461 */ 492 */
462 drm_gem_object_unreference(obj); 493 kref_put(&obj->refcount, drm_gem_object_ref_bug);
463 } else 494 } else
464 spin_unlock(&dev->object_name_lock); 495 spin_unlock(&dev->object_name_lock);
465 496
@@ -477,11 +508,8 @@ EXPORT_SYMBOL(drm_gem_vm_open);
477void drm_gem_vm_close(struct vm_area_struct *vma) 508void drm_gem_vm_close(struct vm_area_struct *vma)
478{ 509{
479 struct drm_gem_object *obj = vma->vm_private_data; 510 struct drm_gem_object *obj = vma->vm_private_data;
480 struct drm_device *dev = obj->dev;
481 511
482 mutex_lock(&dev->struct_mutex); 512 drm_gem_object_unreference_unlocked(obj);
483 drm_gem_object_unreference(obj);
484 mutex_unlock(&dev->struct_mutex);
485} 513}
486EXPORT_SYMBOL(drm_gem_vm_close); 514EXPORT_SYMBOL(drm_gem_vm_close);
487 515
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a894ade03093..1376dfe44c95 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -162,7 +162,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
162 struct drm_device *dev = node->minor->dev; 162 struct drm_device *dev = node->minor->dev;
163 drm_i915_private_t *dev_priv = dev->dev_private; 163 drm_i915_private_t *dev_priv = dev->dev_private;
164 164
165 if (!IS_IRONLAKE(dev)) { 165 if (!HAS_PCH_SPLIT(dev)) {
166 seq_printf(m, "Interrupt enable: %08x\n", 166 seq_printf(m, "Interrupt enable: %08x\n",
167 I915_READ(IER)); 167 I915_READ(IER));
168 seq_printf(m, "Interrupt identity: %08x\n", 168 seq_printf(m, "Interrupt identity: %08x\n",
@@ -350,6 +350,36 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
350 return 0; 350 return 0;
351} 351}
352 352
353static const char *pin_flag(int pinned)
354{
355 if (pinned > 0)
356 return " P";
357 else if (pinned < 0)
358 return " p";
359 else
360 return "";
361}
362
363static const char *tiling_flag(int tiling)
364{
365 switch (tiling) {
366 default:
367 case I915_TILING_NONE: return "";
368 case I915_TILING_X: return " X";
369 case I915_TILING_Y: return " Y";
370 }
371}
372
373static const char *dirty_flag(int dirty)
374{
375 return dirty ? " dirty" : "";
376}
377
378static const char *purgeable_flag(int purgeable)
379{
380 return purgeable ? " purgeable" : "";
381}
382
353static int i915_error_state(struct seq_file *m, void *unused) 383static int i915_error_state(struct seq_file *m, void *unused)
354{ 384{
355 struct drm_info_node *node = (struct drm_info_node *) m->private; 385 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -357,6 +387,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
357 drm_i915_private_t *dev_priv = dev->dev_private; 387 drm_i915_private_t *dev_priv = dev->dev_private;
358 struct drm_i915_error_state *error; 388 struct drm_i915_error_state *error;
359 unsigned long flags; 389 unsigned long flags;
390 int i, page, offset, elt;
360 391
361 spin_lock_irqsave(&dev_priv->error_lock, flags); 392 spin_lock_irqsave(&dev_priv->error_lock, flags);
362 if (!dev_priv->first_error) { 393 if (!dev_priv->first_error) {
@@ -368,6 +399,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
368 399
369 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 400 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
370 error->time.tv_usec); 401 error->time.tv_usec);
402 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
371 seq_printf(m, "EIR: 0x%08x\n", error->eir); 403 seq_printf(m, "EIR: 0x%08x\n", error->eir);
372 seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); 404 seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
373 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); 405 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
@@ -379,6 +411,59 @@ static int i915_error_state(struct seq_file *m, void *unused)
379 seq_printf(m, " INSTPS: 0x%08x\n", error->instps); 411 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
380 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 412 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
381 } 413 }
414 seq_printf(m, "seqno: 0x%08x\n", error->seqno);
415
416 if (error->active_bo_count) {
417 seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
418
419 for (i = 0; i < error->active_bo_count; i++) {
420 seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s",
421 error->active_bo[i].gtt_offset,
422 error->active_bo[i].size,
423 error->active_bo[i].read_domains,
424 error->active_bo[i].write_domain,
425 error->active_bo[i].seqno,
426 pin_flag(error->active_bo[i].pinned),
427 tiling_flag(error->active_bo[i].tiling),
428 dirty_flag(error->active_bo[i].dirty),
429 purgeable_flag(error->active_bo[i].purgeable));
430
431 if (error->active_bo[i].name)
432 seq_printf(m, " (name: %d)", error->active_bo[i].name);
433 if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
434 seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
435
436 seq_printf(m, "\n");
437 }
438 }
439
440 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
441 if (error->batchbuffer[i]) {
442 struct drm_i915_error_object *obj = error->batchbuffer[i];
443
444 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
445 offset = 0;
446 for (page = 0; page < obj->page_count; page++) {
447 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
448 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
449 offset += 4;
450 }
451 }
452 }
453 }
454
455 if (error->ringbuffer) {
456 struct drm_i915_error_object *obj = error->ringbuffer;
457
458 seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
459 offset = 0;
460 for (page = 0; page < obj->page_count; page++) {
461 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
462 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
463 offset += 4;
464 }
465 }
466 }
382 467
383out: 468out:
384 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 469 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
@@ -386,6 +471,165 @@ out:
386 return 0; 471 return 0;
387} 472}
388 473
474static int i915_rstdby_delays(struct seq_file *m, void *unused)
475{
476 struct drm_info_node *node = (struct drm_info_node *) m->private;
477 struct drm_device *dev = node->minor->dev;
478 drm_i915_private_t *dev_priv = dev->dev_private;
479 u16 crstanddelay = I915_READ16(CRSTANDVID);
480
481 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
482
483 return 0;
484}
485
486static int i915_cur_delayinfo(struct seq_file *m, void *unused)
487{
488 struct drm_info_node *node = (struct drm_info_node *) m->private;
489 struct drm_device *dev = node->minor->dev;
490 drm_i915_private_t *dev_priv = dev->dev_private;
491 u16 rgvswctl = I915_READ16(MEMSWCTL);
492
493 seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3);
494 seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1);
495 seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf,
496 rgvswctl & 0x3f);
497
498 return 0;
499}
500
501static int i915_delayfreq_table(struct seq_file *m, void *unused)
502{
503 struct drm_info_node *node = (struct drm_info_node *) m->private;
504 struct drm_device *dev = node->minor->dev;
505 drm_i915_private_t *dev_priv = dev->dev_private;
506 u32 delayfreq;
507 int i;
508
509 for (i = 0; i < 16; i++) {
510 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
511 seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq);
512 }
513
514 return 0;
515}
516
517static inline int MAP_TO_MV(int map)
518{
519 return 1250 - (map * 25);
520}
521
522static int i915_inttoext_table(struct seq_file *m, void *unused)
523{
524 struct drm_info_node *node = (struct drm_info_node *) m->private;
525 struct drm_device *dev = node->minor->dev;
526 drm_i915_private_t *dev_priv = dev->dev_private;
527 u32 inttoext;
528 int i;
529
530 for (i = 1; i <= 32; i++) {
531 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
532 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
533 }
534
535 return 0;
536}
537
538static int i915_drpc_info(struct seq_file *m, void *unused)
539{
540 struct drm_info_node *node = (struct drm_info_node *) m->private;
541 struct drm_device *dev = node->minor->dev;
542 drm_i915_private_t *dev_priv = dev->dev_private;
543 u32 rgvmodectl = I915_READ(MEMMODECTL);
544
545 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
546 "yes" : "no");
547 seq_printf(m, "Boost freq: %d\n",
548 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
549 MEMMODE_BOOST_FREQ_SHIFT);
550 seq_printf(m, "HW control enabled: %s\n",
551 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
552 seq_printf(m, "SW control enabled: %s\n",
553 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
554 seq_printf(m, "Gated voltage change: %s\n",
555 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
556 seq_printf(m, "Starting frequency: P%d\n",
557 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
558 seq_printf(m, "Max frequency: P%d\n",
559 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
560 seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
561
562 return 0;
563}
564
565static int i915_fbc_status(struct seq_file *m, void *unused)
566{
567 struct drm_info_node *node = (struct drm_info_node *) m->private;
568 struct drm_device *dev = node->minor->dev;
569 struct drm_crtc *crtc;
570 drm_i915_private_t *dev_priv = dev->dev_private;
571 bool fbc_enabled = false;
572
573 if (!dev_priv->display.fbc_enabled) {
574 seq_printf(m, "FBC unsupported on this chipset\n");
575 return 0;
576 }
577
578 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
579 if (!crtc->enabled)
580 continue;
581 if (dev_priv->display.fbc_enabled(crtc))
582 fbc_enabled = true;
583 }
584
585 if (fbc_enabled) {
586 seq_printf(m, "FBC enabled\n");
587 } else {
588 seq_printf(m, "FBC disabled: ");
589 switch (dev_priv->no_fbc_reason) {
590 case FBC_STOLEN_TOO_SMALL:
591 seq_printf(m, "not enough stolen memory");
592 break;
593 case FBC_UNSUPPORTED_MODE:
594 seq_printf(m, "mode not supported");
595 break;
596 case FBC_MODE_TOO_LARGE:
597 seq_printf(m, "mode too large");
598 break;
599 case FBC_BAD_PLANE:
600 seq_printf(m, "FBC unsupported on plane");
601 break;
602 case FBC_NOT_TILED:
603 seq_printf(m, "scanout buffer not tiled");
604 break;
605 default:
606 seq_printf(m, "unknown reason");
607 }
608 seq_printf(m, "\n");
609 }
610 return 0;
611}
612
613static int i915_sr_status(struct seq_file *m, void *unused)
614{
615 struct drm_info_node *node = (struct drm_info_node *) m->private;
616 struct drm_device *dev = node->minor->dev;
617 drm_i915_private_t *dev_priv = dev->dev_private;
618 bool sr_enabled = false;
619
620 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev))
621 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
622 else if (IS_I915GM(dev))
623 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
624 else if (IS_PINEVIEW(dev))
625 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
626
627 seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
628 "disabled");
629
630 return 0;
631}
632
389static int 633static int
390i915_wedged_open(struct inode *inode, 634i915_wedged_open(struct inode *inode,
391 struct file *filp) 635 struct file *filp)
@@ -503,6 +747,13 @@ static struct drm_info_list i915_debugfs_list[] = {
503 {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, 747 {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
504 {"i915_batchbuffers", i915_batchbuffer_info, 0}, 748 {"i915_batchbuffers", i915_batchbuffer_info, 0},
505 {"i915_error_state", i915_error_state, 0}, 749 {"i915_error_state", i915_error_state, 0},
750 {"i915_rstdby_delays", i915_rstdby_delays, 0},
751 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
752 {"i915_delayfreq_table", i915_delayfreq_table, 0},
753 {"i915_inttoext_table", i915_inttoext_table, 0},
754 {"i915_drpc_info", i915_drpc_info, 0},
755 {"i915_fbc_status", i915_fbc_status, 0},
756 {"i915_sr_status", i915_sr_status, 0},
506}; 757};
507#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 758#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
508 759
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2307f98349f7..8bfc0bbf13e6 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -35,6 +35,9 @@
35#include "i915_drv.h" 35#include "i915_drv.h"
36#include "i915_trace.h" 36#include "i915_trace.h"
37#include <linux/vgaarb.h> 37#include <linux/vgaarb.h>
38#include <linux/acpi.h>
39#include <linux/pnp.h>
40#include <linux/vga_switcheroo.h>
38 41
39/* Really want an OS-independent resettable timer. Would like to have 42/* Really want an OS-independent resettable timer. Would like to have
40 * this loop run for (eg) 3 sec, but have the timer reset every time 43 * this loop run for (eg) 3 sec, but have the timer reset every time
@@ -933,6 +936,120 @@ static int i915_get_bridge_dev(struct drm_device *dev)
933 return 0; 936 return 0;
934} 937}
935 938
939#define MCHBAR_I915 0x44
940#define MCHBAR_I965 0x48
941#define MCHBAR_SIZE (4*4096)
942
943#define DEVEN_REG 0x54
944#define DEVEN_MCHBAR_EN (1 << 28)
945
946/* Allocate space for the MCH regs if needed, return nonzero on error */
947static int
948intel_alloc_mchbar_resource(struct drm_device *dev)
949{
950 drm_i915_private_t *dev_priv = dev->dev_private;
951 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
952 u32 temp_lo, temp_hi = 0;
953 u64 mchbar_addr;
954 int ret = 0;
955
956 if (IS_I965G(dev))
957 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
958 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
959 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
960
961 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
962#ifdef CONFIG_PNP
963 if (mchbar_addr &&
964 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
965 ret = 0;
966 goto out;
967 }
968#endif
969
970 /* Get some space for it */
971 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
972 MCHBAR_SIZE, MCHBAR_SIZE,
973 PCIBIOS_MIN_MEM,
974 0, pcibios_align_resource,
975 dev_priv->bridge_dev);
976 if (ret) {
977 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
978 dev_priv->mch_res.start = 0;
979 goto out;
980 }
981
982 if (IS_I965G(dev))
983 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
984 upper_32_bits(dev_priv->mch_res.start));
985
986 pci_write_config_dword(dev_priv->bridge_dev, reg,
987 lower_32_bits(dev_priv->mch_res.start));
988out:
989 return ret;
990}
991
992/* Setup MCHBAR if possible, return true if we should disable it again */
993static void
994intel_setup_mchbar(struct drm_device *dev)
995{
996 drm_i915_private_t *dev_priv = dev->dev_private;
997 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
998 u32 temp;
999 bool enabled;
1000
1001 dev_priv->mchbar_need_disable = false;
1002
1003 if (IS_I915G(dev) || IS_I915GM(dev)) {
1004 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1005 enabled = !!(temp & DEVEN_MCHBAR_EN);
1006 } else {
1007 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1008 enabled = temp & 1;
1009 }
1010
1011 /* If it's already enabled, don't have to do anything */
1012 if (enabled)
1013 return;
1014
1015 if (intel_alloc_mchbar_resource(dev))
1016 return;
1017
1018 dev_priv->mchbar_need_disable = true;
1019
1020 /* Space is allocated or reserved, so enable it. */
1021 if (IS_I915G(dev) || IS_I915GM(dev)) {
1022 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
1023 temp | DEVEN_MCHBAR_EN);
1024 } else {
1025 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1026 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
1027 }
1028}
1029
1030static void
1031intel_teardown_mchbar(struct drm_device *dev)
1032{
1033 drm_i915_private_t *dev_priv = dev->dev_private;
1034 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
1035 u32 temp;
1036
1037 if (dev_priv->mchbar_need_disable) {
1038 if (IS_I915G(dev) || IS_I915GM(dev)) {
1039 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
1040 temp &= ~DEVEN_MCHBAR_EN;
1041 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
1042 } else {
1043 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
1044 temp &= ~1;
1045 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
1046 }
1047 }
1048
1049 if (dev_priv->mch_res.start)
1050 release_resource(&dev_priv->mch_res);
1051}
1052
936/** 1053/**
937 * i915_probe_agp - get AGP bootup configuration 1054 * i915_probe_agp - get AGP bootup configuration
938 * @pdev: PCI device 1055 * @pdev: PCI device
@@ -978,59 +1095,123 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
978 * Some of the preallocated space is taken by the GTT 1095 * Some of the preallocated space is taken by the GTT
979 * and popup. GTT is 1K per MB of aperture size, and popup is 4K. 1096 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
980 */ 1097 */
981 if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev)) 1098 if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
982 overhead = 4096; 1099 overhead = 4096;
983 else 1100 else
984 overhead = (*aperture_size / 1024) + 4096; 1101 overhead = (*aperture_size / 1024) + 4096;
985 1102
986 switch (tmp & INTEL_GMCH_GMS_MASK) { 1103 if (IS_GEN6(dev)) {
987 case INTEL_855_GMCH_GMS_DISABLED: 1104 /* SNB has memory control reg at 0x50.w */
988 DRM_ERROR("video memory is disabled\n"); 1105 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp);
989 return -1; 1106
990 case INTEL_855_GMCH_GMS_STOLEN_1M: 1107 switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) {
991 stolen = 1 * 1024 * 1024; 1108 case INTEL_855_GMCH_GMS_DISABLED:
992 break; 1109 DRM_ERROR("video memory is disabled\n");
993 case INTEL_855_GMCH_GMS_STOLEN_4M: 1110 return -1;
994 stolen = 4 * 1024 * 1024; 1111 case SNB_GMCH_GMS_STOLEN_32M:
995 break; 1112 stolen = 32 * 1024 * 1024;
996 case INTEL_855_GMCH_GMS_STOLEN_8M: 1113 break;
997 stolen = 8 * 1024 * 1024; 1114 case SNB_GMCH_GMS_STOLEN_64M:
998 break; 1115 stolen = 64 * 1024 * 1024;
999 case INTEL_855_GMCH_GMS_STOLEN_16M: 1116 break;
1000 stolen = 16 * 1024 * 1024; 1117 case SNB_GMCH_GMS_STOLEN_96M:
1001 break; 1118 stolen = 96 * 1024 * 1024;
1002 case INTEL_855_GMCH_GMS_STOLEN_32M: 1119 break;
1003 stolen = 32 * 1024 * 1024; 1120 case SNB_GMCH_GMS_STOLEN_128M:
1004 break; 1121 stolen = 128 * 1024 * 1024;
1005 case INTEL_915G_GMCH_GMS_STOLEN_48M: 1122 break;
1006 stolen = 48 * 1024 * 1024; 1123 case SNB_GMCH_GMS_STOLEN_160M:
1007 break; 1124 stolen = 160 * 1024 * 1024;
1008 case INTEL_915G_GMCH_GMS_STOLEN_64M: 1125 break;
1009 stolen = 64 * 1024 * 1024; 1126 case SNB_GMCH_GMS_STOLEN_192M:
1010 break; 1127 stolen = 192 * 1024 * 1024;
1011 case INTEL_GMCH_GMS_STOLEN_128M: 1128 break;
1012 stolen = 128 * 1024 * 1024; 1129 case SNB_GMCH_GMS_STOLEN_224M:
1013 break; 1130 stolen = 224 * 1024 * 1024;
1014 case INTEL_GMCH_GMS_STOLEN_256M: 1131 break;
1015 stolen = 256 * 1024 * 1024; 1132 case SNB_GMCH_GMS_STOLEN_256M:
1016 break; 1133 stolen = 256 * 1024 * 1024;
1017 case INTEL_GMCH_GMS_STOLEN_96M: 1134 break;
1018 stolen = 96 * 1024 * 1024; 1135 case SNB_GMCH_GMS_STOLEN_288M:
1019 break; 1136 stolen = 288 * 1024 * 1024;
1020 case INTEL_GMCH_GMS_STOLEN_160M: 1137 break;
1021 stolen = 160 * 1024 * 1024; 1138 case SNB_GMCH_GMS_STOLEN_320M:
1022 break; 1139 stolen = 320 * 1024 * 1024;
1023 case INTEL_GMCH_GMS_STOLEN_224M: 1140 break;
1024 stolen = 224 * 1024 * 1024; 1141 case SNB_GMCH_GMS_STOLEN_352M:
1025 break; 1142 stolen = 352 * 1024 * 1024;
1026 case INTEL_GMCH_GMS_STOLEN_352M: 1143 break;
1027 stolen = 352 * 1024 * 1024; 1144 case SNB_GMCH_GMS_STOLEN_384M:
1028 break; 1145 stolen = 384 * 1024 * 1024;
1029 default: 1146 break;
1030 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", 1147 case SNB_GMCH_GMS_STOLEN_416M:
1031 tmp & INTEL_GMCH_GMS_MASK); 1148 stolen = 416 * 1024 * 1024;
1032 return -1; 1149 break;
1150 case SNB_GMCH_GMS_STOLEN_448M:
1151 stolen = 448 * 1024 * 1024;
1152 break;
1153 case SNB_GMCH_GMS_STOLEN_480M:
1154 stolen = 480 * 1024 * 1024;
1155 break;
1156 case SNB_GMCH_GMS_STOLEN_512M:
1157 stolen = 512 * 1024 * 1024;
1158 break;
1159 default:
1160 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
1161 tmp & SNB_GMCH_GMS_STOLEN_MASK);
1162 return -1;
1163 }
1164 } else {
1165 switch (tmp & INTEL_GMCH_GMS_MASK) {
1166 case INTEL_855_GMCH_GMS_DISABLED:
1167 DRM_ERROR("video memory is disabled\n");
1168 return -1;
1169 case INTEL_855_GMCH_GMS_STOLEN_1M:
1170 stolen = 1 * 1024 * 1024;
1171 break;
1172 case INTEL_855_GMCH_GMS_STOLEN_4M:
1173 stolen = 4 * 1024 * 1024;
1174 break;
1175 case INTEL_855_GMCH_GMS_STOLEN_8M:
1176 stolen = 8 * 1024 * 1024;
1177 break;
1178 case INTEL_855_GMCH_GMS_STOLEN_16M:
1179 stolen = 16 * 1024 * 1024;
1180 break;
1181 case INTEL_855_GMCH_GMS_STOLEN_32M:
1182 stolen = 32 * 1024 * 1024;
1183 break;
1184 case INTEL_915G_GMCH_GMS_STOLEN_48M:
1185 stolen = 48 * 1024 * 1024;
1186 break;
1187 case INTEL_915G_GMCH_GMS_STOLEN_64M:
1188 stolen = 64 * 1024 * 1024;
1189 break;
1190 case INTEL_GMCH_GMS_STOLEN_128M:
1191 stolen = 128 * 1024 * 1024;
1192 break;
1193 case INTEL_GMCH_GMS_STOLEN_256M:
1194 stolen = 256 * 1024 * 1024;
1195 break;
1196 case INTEL_GMCH_GMS_STOLEN_96M:
1197 stolen = 96 * 1024 * 1024;
1198 break;
1199 case INTEL_GMCH_GMS_STOLEN_160M:
1200 stolen = 160 * 1024 * 1024;
1201 break;
1202 case INTEL_GMCH_GMS_STOLEN_224M:
1203 stolen = 224 * 1024 * 1024;
1204 break;
1205 case INTEL_GMCH_GMS_STOLEN_352M:
1206 stolen = 352 * 1024 * 1024;
1207 break;
1208 default:
1209 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
1210 tmp & INTEL_GMCH_GMS_MASK);
1211 return -1;
1212 }
1033 } 1213 }
1214
1034 *preallocated_size = stolen - overhead; 1215 *preallocated_size = stolen - overhead;
1035 *start = overhead; 1216 *start = overhead;
1036 1217
@@ -1064,7 +1245,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1064 int gtt_offset, gtt_size; 1245 int gtt_offset, gtt_size;
1065 1246
1066 if (IS_I965G(dev)) { 1247 if (IS_I965G(dev)) {
1067 if (IS_G4X(dev) || IS_IRONLAKE(dev)) { 1248 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
1068 gtt_offset = 2*1024*1024; 1249 gtt_offset = 2*1024*1024;
1069 gtt_size = 2*1024*1024; 1250 gtt_size = 2*1024*1024;
1070 } else { 1251 } else {
@@ -1133,6 +1314,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1133 /* Leave 1M for line length buffer & misc. */ 1314 /* Leave 1M for line length buffer & misc. */
1134 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); 1315 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
1135 if (!compressed_fb) { 1316 if (!compressed_fb) {
1317 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1136 i915_warn_stolen(dev); 1318 i915_warn_stolen(dev);
1137 return; 1319 return;
1138 } 1320 }
@@ -1140,6 +1322,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1140 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 1322 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
1141 if (!compressed_fb) { 1323 if (!compressed_fb) {
1142 i915_warn_stolen(dev); 1324 i915_warn_stolen(dev);
1325 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1143 return; 1326 return;
1144 } 1327 }
1145 1328
@@ -1199,6 +1382,32 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
1199 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1382 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1200} 1383}
1201 1384
1385static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1386{
1387 struct drm_device *dev = pci_get_drvdata(pdev);
1388 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1389 if (state == VGA_SWITCHEROO_ON) {
1390 printk(KERN_INFO "i915: switched off\n");
1391 /* i915 resume handler doesn't set to D0 */
1392 pci_set_power_state(dev->pdev, PCI_D0);
1393 i915_resume(dev);
1394 } else {
1395 printk(KERN_ERR "i915: switched off\n");
1396 i915_suspend(dev, pmm);
1397 }
1398}
1399
1400static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1401{
1402 struct drm_device *dev = pci_get_drvdata(pdev);
1403 bool can_switch;
1404
1405 spin_lock(&dev->count_lock);
1406 can_switch = (dev->open_count == 0);
1407 spin_unlock(&dev->count_lock);
1408 return can_switch;
1409}
1410
1202static int i915_load_modeset_init(struct drm_device *dev, 1411static int i915_load_modeset_init(struct drm_device *dev,
1203 unsigned long prealloc_start, 1412 unsigned long prealloc_start,
1204 unsigned long prealloc_size, 1413 unsigned long prealloc_size,
@@ -1260,6 +1469,12 @@ static int i915_load_modeset_init(struct drm_device *dev,
1260 if (ret) 1469 if (ret)
1261 goto destroy_ringbuffer; 1470 goto destroy_ringbuffer;
1262 1471
1472 ret = vga_switcheroo_register_client(dev->pdev,
1473 i915_switcheroo_set_state,
1474 i915_switcheroo_can_switch);
1475 if (ret)
1476 goto destroy_ringbuffer;
1477
1263 intel_modeset_init(dev); 1478 intel_modeset_init(dev);
1264 1479
1265 ret = drm_irq_install(dev); 1480 ret = drm_irq_install(dev);
@@ -1281,7 +1496,9 @@ static int i915_load_modeset_init(struct drm_device *dev,
1281 return 0; 1496 return 0;
1282 1497
1283destroy_ringbuffer: 1498destroy_ringbuffer:
1499 mutex_lock(&dev->struct_mutex);
1284 i915_gem_cleanup_ringbuffer(dev); 1500 i915_gem_cleanup_ringbuffer(dev);
1501 mutex_unlock(&dev->struct_mutex);
1285out: 1502out:
1286 return ret; 1503 return ret;
1287} 1504}
@@ -1445,11 +1662,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1445 1662
1446 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1663 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1447 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 1664 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1448 if (IS_G4X(dev) || IS_IRONLAKE(dev)) { 1665 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
1449 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 1666 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
1450 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 1667 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1451 } 1668 }
1452 1669
1670 /* Try to make sure MCHBAR is enabled before poking at it */
1671 intel_setup_mchbar(dev);
1672
1453 i915_gem_load(dev); 1673 i915_gem_load(dev);
1454 1674
1455 /* Init HWS */ 1675 /* Init HWS */
@@ -1523,6 +1743,8 @@ int i915_driver_unload(struct drm_device *dev)
1523{ 1743{
1524 struct drm_i915_private *dev_priv = dev->dev_private; 1744 struct drm_i915_private *dev_priv = dev->dev_private;
1525 1745
1746 i915_destroy_error_state(dev);
1747
1526 destroy_workqueue(dev_priv->wq); 1748 destroy_workqueue(dev_priv->wq);
1527 del_timer_sync(&dev_priv->hangcheck_timer); 1749 del_timer_sync(&dev_priv->hangcheck_timer);
1528 1750
@@ -1544,6 +1766,7 @@ int i915_driver_unload(struct drm_device *dev)
1544 dev_priv->child_dev_num = 0; 1766 dev_priv->child_dev_num = 0;
1545 } 1767 }
1546 drm_irq_uninstall(dev); 1768 drm_irq_uninstall(dev);
1769 vga_switcheroo_unregister_client(dev->pdev);
1547 vga_client_register(dev->pdev, NULL, NULL, NULL); 1770 vga_client_register(dev->pdev, NULL, NULL, NULL);
1548 } 1771 }
1549 1772
@@ -1569,6 +1792,8 @@ int i915_driver_unload(struct drm_device *dev)
1569 intel_cleanup_overlay(dev); 1792 intel_cleanup_overlay(dev);
1570 } 1793 }
1571 1794
1795 intel_teardown_mchbar(dev);
1796
1572 pci_dev_put(dev_priv->bridge_dev); 1797 pci_dev_put(dev_priv->bridge_dev);
1573 kfree(dev->dev_private); 1798 kfree(dev->dev_private);
1574 1799
@@ -1611,6 +1836,7 @@ void i915_driver_lastclose(struct drm_device * dev)
1611 1836
1612 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { 1837 if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
1613 drm_fb_helper_restore(); 1838 drm_fb_helper_restore();
1839 vga_switcheroo_process_delayed_switch();
1614 return; 1840 return;
1615 } 1841 }
1616 1842
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cf4cb3e9a0c2..1b2e95455c05 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -49,6 +49,7 @@ unsigned int i915_lvds_downclock = 0;
49module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 49module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
50 50
51static struct drm_driver driver; 51static struct drm_driver driver;
52extern int intel_agp_enabled;
52 53
53#define INTEL_VGA_DEVICE(id, info) { \ 54#define INTEL_VGA_DEVICE(id, info) { \
54 .class = PCI_CLASS_DISPLAY_VGA << 8, \ 55 .class = PCI_CLASS_DISPLAY_VGA << 8, \
@@ -136,6 +137,16 @@ const static struct intel_device_info intel_ironlake_m_info = {
136 .has_hotplug = 1, 137 .has_hotplug = 1,
137}; 138};
138 139
140const static struct intel_device_info intel_sandybridge_d_info = {
141 .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
142 .has_hotplug = 1,
143};
144
145const static struct intel_device_info intel_sandybridge_m_info = {
146 .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1,
147 .has_hotplug = 1,
148};
149
139const static struct pci_device_id pciidlist[] = { 150const static struct pci_device_id pciidlist[] = {
140 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), 151 INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
141 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), 152 INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
@@ -167,6 +178,8 @@ const static struct pci_device_id pciidlist[] = {
167 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), 178 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
168 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), 179 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
169 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), 180 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
181 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
182 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
170 {0, 0, 0} 183 {0, 0, 0}
171}; 184};
172 185
@@ -201,7 +214,7 @@ static int i915_drm_freeze(struct drm_device *dev)
201 return 0; 214 return 0;
202} 215}
203 216
204static int i915_suspend(struct drm_device *dev, pm_message_t state) 217int i915_suspend(struct drm_device *dev, pm_message_t state)
205{ 218{
206 int error; 219 int error;
207 220
@@ -255,7 +268,7 @@ static int i915_drm_thaw(struct drm_device *dev)
255 return error; 268 return error;
256} 269}
257 270
258static int i915_resume(struct drm_device *dev) 271int i915_resume(struct drm_device *dev)
259{ 272{
260 if (pci_enable_device(dev->pdev)) 273 if (pci_enable_device(dev->pdev))
261 return -EIO; 274 return -EIO;
@@ -546,6 +559,11 @@ static struct drm_driver driver = {
546 559
547static int __init i915_init(void) 560static int __init i915_init(void)
548{ 561{
562 if (!intel_agp_enabled) {
563 DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
564 return -ENODEV;
565 }
566
549 driver.num_ioctls = i915_max_ioctl; 567 driver.num_ioctls = i915_max_ioctl;
550 568
551 i915_gem_shrinker_init(); 569 i915_gem_shrinker_init();
@@ -571,6 +589,11 @@ static int __init i915_init(void)
571 driver.driver_features &= ~DRIVER_MODESET; 589 driver.driver_features &= ~DRIVER_MODESET;
572#endif 590#endif
573 591
592 if (!(driver.driver_features & DRIVER_MODESET)) {
593 driver.suspend = i915_suspend;
594 driver.resume = i915_resume;
595 }
596
574 return drm_init(&driver); 597 return drm_init(&driver);
575} 598}
576 599
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b99b6a841d95..979439cfb827 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -150,7 +150,27 @@ struct drm_i915_error_state {
150 u32 instps; 150 u32 instps;
151 u32 instdone1; 151 u32 instdone1;
152 u32 seqno; 152 u32 seqno;
153 u64 bbaddr;
153 struct timeval time; 154 struct timeval time;
155 struct drm_i915_error_object {
156 int page_count;
157 u32 gtt_offset;
158 u32 *pages[0];
159 } *ringbuffer, *batchbuffer[2];
160 struct drm_i915_error_buffer {
161 size_t size;
162 u32 name;
163 u32 seqno;
164 u32 gtt_offset;
165 u32 read_domains;
166 u32 write_domain;
167 u32 fence_reg;
168 s32 pinned:2;
169 u32 tiling:2;
170 u32 dirty:1;
171 u32 purgeable:1;
172 } *active_bo;
173 u32 active_bo_count;
154}; 174};
155 175
156struct drm_i915_display_funcs { 176struct drm_i915_display_funcs {
@@ -192,6 +212,14 @@ struct intel_device_info {
192 u8 cursor_needs_physical : 1; 212 u8 cursor_needs_physical : 1;
193}; 213};
194 214
215enum no_fbc_reason {
216 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
217 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
218 FBC_MODE_TOO_LARGE, /* mode too large for compression */
219 FBC_BAD_PLANE, /* fbc not supported on plane */
220 FBC_NOT_TILED, /* buffer not tiled */
221};
222
195typedef struct drm_i915_private { 223typedef struct drm_i915_private {
196 struct drm_device *dev; 224 struct drm_device *dev;
197 225
@@ -452,6 +480,7 @@ typedef struct drm_i915_private {
452 u32 savePIPEB_DATA_N1; 480 u32 savePIPEB_DATA_N1;
453 u32 savePIPEB_LINK_M1; 481 u32 savePIPEB_LINK_M1;
454 u32 savePIPEB_LINK_N1; 482 u32 savePIPEB_LINK_N1;
483 u32 saveMCHBAR_RENDER_STANDBY;
455 484
456 struct { 485 struct {
457 struct drm_mm gtt_space; 486 struct drm_mm gtt_space;
@@ -590,6 +619,14 @@ typedef struct drm_i915_private {
590 int child_dev_num; 619 int child_dev_num;
591 struct child_device_config *child_dev; 620 struct child_device_config *child_dev;
592 struct drm_connector *int_lvds_connector; 621 struct drm_connector *int_lvds_connector;
622
623 bool mchbar_need_disable;
624
625 u8 cur_delay;
626 u8 min_delay;
627 u8 max_delay;
628
629 enum no_fbc_reason no_fbc_reason;
593} drm_i915_private_t; 630} drm_i915_private_t;
594 631
595/** driver private structure attached to each drm_gem_object */ 632/** driver private structure attached to each drm_gem_object */
@@ -736,6 +773,8 @@ extern unsigned int i915_fbpercrtc;
736extern unsigned int i915_powersave; 773extern unsigned int i915_powersave;
737extern unsigned int i915_lvds_downclock; 774extern unsigned int i915_lvds_downclock;
738 775
776extern int i915_suspend(struct drm_device *dev, pm_message_t state);
777extern int i915_resume(struct drm_device *dev);
739extern void i915_save_display(struct drm_device *dev); 778extern void i915_save_display(struct drm_device *dev);
740extern void i915_restore_display(struct drm_device *dev); 779extern void i915_restore_display(struct drm_device *dev);
741extern int i915_master_create(struct drm_device *dev, struct drm_master *master); 780extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
@@ -761,6 +800,7 @@ extern int i965_reset(struct drm_device *dev, u8 flags);
761 800
762/* i915_irq.c */ 801/* i915_irq.c */
763void i915_hangcheck_elapsed(unsigned long data); 802void i915_hangcheck_elapsed(unsigned long data);
803void i915_destroy_error_state(struct drm_device *dev);
764extern int i915_irq_emit(struct drm_device *dev, void *data, 804extern int i915_irq_emit(struct drm_device *dev, void *data,
765 struct drm_file *file_priv); 805 struct drm_file *file_priv);
766extern int i915_irq_wait(struct drm_device *dev, void *data, 806extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -897,7 +937,8 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
897void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); 937void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
898bool i915_tiling_ok(struct drm_device *dev, int stride, int size, 938bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
899 int tiling_mode); 939 int tiling_mode);
900bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj); 940bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
941 int tiling_mode);
901 942
902/* i915_gem_debug.c */ 943/* i915_gem_debug.c */
903void i915_gem_dump_object(struct drm_gem_object *obj, int len, 944void i915_gem_dump_object(struct drm_gem_object *obj, int len,
@@ -1026,7 +1067,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1026#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1067#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1027#define IS_I85X(dev) ((dev)->pci_device == 0x3582) 1068#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
1028#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1069#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1029#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx) 1070#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
1030#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 1071#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1031#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1072#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1032#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1073#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
@@ -1045,8 +1086,29 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1045#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) 1086#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
1046#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1087#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1047 1088
1089#define IS_GEN3(dev) (IS_I915G(dev) || \
1090 IS_I915GM(dev) || \
1091 IS_I945G(dev) || \
1092 IS_I945GM(dev) || \
1093 IS_G33(dev) || \
1094 IS_PINEVIEW(dev))
1095#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \
1096 (dev)->pci_device == 0x2982 || \
1097 (dev)->pci_device == 0x2992 || \
1098 (dev)->pci_device == 0x29A2 || \
1099 (dev)->pci_device == 0x2A02 || \
1100 (dev)->pci_device == 0x2A12 || \
1101 (dev)->pci_device == 0x2E02 || \
1102 (dev)->pci_device == 0x2E12 || \
1103 (dev)->pci_device == 0x2E22 || \
1104 (dev)->pci_device == 0x2E32 || \
1105 (dev)->pci_device == 0x2A42 || \
1106 (dev)->pci_device == 0x2E42)
1107
1048#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1108#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1049 1109
1110#define IS_GEN6(dev) ((dev)->pci_device == 0x0102)
1111
1050/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1112/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1051 * rows, which changed the alignment requirements and fence programming. 1113 * rows, which changed the alignment requirements and fence programming.
1052 */ 1114 */
@@ -1067,6 +1129,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1067#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1129#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1068#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) 1130#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
1069 1131
1132#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
1133 IS_GEN6(dev))
1134
1070#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1135#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1071 1136
1072#endif 1137#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ec8a0d7ffa39..fba37e9f775d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -128,9 +128,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
128 return -ENOMEM; 128 return -ENOMEM;
129 129
130 ret = drm_gem_handle_create(file_priv, obj, &handle); 130 ret = drm_gem_handle_create(file_priv, obj, &handle);
131 mutex_lock(&dev->struct_mutex); 131 drm_gem_object_handle_unreference_unlocked(obj);
132 drm_gem_object_handle_unreference(obj);
133 mutex_unlock(&dev->struct_mutex);
134 132
135 if (ret) 133 if (ret)
136 return ret; 134 return ret;
@@ -488,7 +486,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
488 */ 486 */
489 if (args->offset > obj->size || args->size > obj->size || 487 if (args->offset > obj->size || args->size > obj->size ||
490 args->offset + args->size > obj->size) { 488 args->offset + args->size > obj->size) {
491 drm_gem_object_unreference(obj); 489 drm_gem_object_unreference_unlocked(obj);
492 return -EINVAL; 490 return -EINVAL;
493 } 491 }
494 492
@@ -501,7 +499,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
501 file_priv); 499 file_priv);
502 } 500 }
503 501
504 drm_gem_object_unreference(obj); 502 drm_gem_object_unreference_unlocked(obj);
505 503
506 return ret; 504 return ret;
507} 505}
@@ -961,7 +959,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
961 */ 959 */
962 if (args->offset > obj->size || args->size > obj->size || 960 if (args->offset > obj->size || args->size > obj->size ||
963 args->offset + args->size > obj->size) { 961 args->offset + args->size > obj->size) {
964 drm_gem_object_unreference(obj); 962 drm_gem_object_unreference_unlocked(obj);
965 return -EINVAL; 963 return -EINVAL;
966 } 964 }
967 965
@@ -995,7 +993,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
995 DRM_INFO("pwrite failed %d\n", ret); 993 DRM_INFO("pwrite failed %d\n", ret);
996#endif 994#endif
997 995
998 drm_gem_object_unreference(obj); 996 drm_gem_object_unreference_unlocked(obj);
999 997
1000 return ret; 998 return ret;
1001} 999}
@@ -1138,9 +1136,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1138 PROT_READ | PROT_WRITE, MAP_SHARED, 1136 PROT_READ | PROT_WRITE, MAP_SHARED,
1139 args->offset); 1137 args->offset);
1140 up_write(&current->mm->mmap_sem); 1138 up_write(&current->mm->mmap_sem);
1141 mutex_lock(&dev->struct_mutex); 1139 drm_gem_object_unreference_unlocked(obj);
1142 drm_gem_object_unreference(obj);
1143 mutex_unlock(&dev->struct_mutex);
1144 if (IS_ERR((void *)addr)) 1140 if (IS_ERR((void *)addr))
1145 return addr; 1141 return addr;
1146 1142
@@ -1562,6 +1558,38 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1562 i915_verify_inactive(dev, __FILE__, __LINE__); 1558 i915_verify_inactive(dev, __FILE__, __LINE__);
1563} 1559}
1564 1560
1561static void
1562i915_gem_process_flushing_list(struct drm_device *dev,
1563 uint32_t flush_domains, uint32_t seqno)
1564{
1565 drm_i915_private_t *dev_priv = dev->dev_private;
1566 struct drm_i915_gem_object *obj_priv, *next;
1567
1568 list_for_each_entry_safe(obj_priv, next,
1569 &dev_priv->mm.gpu_write_list,
1570 gpu_write_list) {
1571 struct drm_gem_object *obj = obj_priv->obj;
1572
1573 if ((obj->write_domain & flush_domains) ==
1574 obj->write_domain) {
1575 uint32_t old_write_domain = obj->write_domain;
1576
1577 obj->write_domain = 0;
1578 list_del_init(&obj_priv->gpu_write_list);
1579 i915_gem_object_move_to_active(obj, seqno);
1580
1581 /* update the fence lru list */
1582 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1583 list_move_tail(&obj_priv->fence_list,
1584 &dev_priv->mm.fence_list);
1585
1586 trace_i915_gem_object_change_domain(obj,
1587 obj->read_domains,
1588 old_write_domain);
1589 }
1590 }
1591}
1592
1565/** 1593/**
1566 * Creates a new sequence number, emitting a write of it to the status page 1594 * Creates a new sequence number, emitting a write of it to the status page
1567 * plus an interrupt, which will trigger i915_user_interrupt_handler. 1595 * plus an interrupt, which will trigger i915_user_interrupt_handler.
@@ -1620,29 +1648,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1620 /* Associate any objects on the flushing list matching the write 1648 /* Associate any objects on the flushing list matching the write
1621 * domain we're flushing with our flush. 1649 * domain we're flushing with our flush.
1622 */ 1650 */
1623 if (flush_domains != 0) { 1651 if (flush_domains != 0)
1624 struct drm_i915_gem_object *obj_priv, *next; 1652 i915_gem_process_flushing_list(dev, flush_domains, seqno);
1625
1626 list_for_each_entry_safe(obj_priv, next,
1627 &dev_priv->mm.gpu_write_list,
1628 gpu_write_list) {
1629 struct drm_gem_object *obj = obj_priv->obj;
1630
1631 if ((obj->write_domain & flush_domains) ==
1632 obj->write_domain) {
1633 uint32_t old_write_domain = obj->write_domain;
1634
1635 obj->write_domain = 0;
1636 list_del_init(&obj_priv->gpu_write_list);
1637 i915_gem_object_move_to_active(obj, seqno);
1638
1639 trace_i915_gem_object_change_domain(obj,
1640 obj->read_domains,
1641 old_write_domain);
1642 }
1643 }
1644
1645 }
1646 1653
1647 if (!dev_priv->mm.suspended) { 1654 if (!dev_priv->mm.suspended) {
1648 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 1655 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1822,7 +1829,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1822 return -EIO; 1829 return -EIO;
1823 1830
1824 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1831 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1825 if (IS_IRONLAKE(dev)) 1832 if (HAS_PCH_SPLIT(dev))
1826 ier = I915_READ(DEIER) | I915_READ(GTIER); 1833 ier = I915_READ(DEIER) | I915_READ(GTIER);
1827 else 1834 else
1828 ier = I915_READ(IER); 1835 ier = I915_READ(IER);
@@ -1991,6 +1998,7 @@ int
1991i915_gem_object_unbind(struct drm_gem_object *obj) 1998i915_gem_object_unbind(struct drm_gem_object *obj)
1992{ 1999{
1993 struct drm_device *dev = obj->dev; 2000 struct drm_device *dev = obj->dev;
2001 drm_i915_private_t *dev_priv = dev->dev_private;
1994 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2002 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1995 int ret = 0; 2003 int ret = 0;
1996 2004
@@ -2046,8 +2054,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2046 } 2054 }
2047 2055
2048 /* Remove ourselves from the LRU list if present. */ 2056 /* Remove ourselves from the LRU list if present. */
2057 spin_lock(&dev_priv->mm.active_list_lock);
2049 if (!list_empty(&obj_priv->list)) 2058 if (!list_empty(&obj_priv->list))
2050 list_del_init(&obj_priv->list); 2059 list_del_init(&obj_priv->list);
2060 spin_unlock(&dev_priv->mm.active_list_lock);
2051 2061
2052 if (i915_gem_object_is_purgeable(obj_priv)) 2062 if (i915_gem_object_is_purgeable(obj_priv))
2053 i915_gem_object_truncate(obj); 2063 i915_gem_object_truncate(obj);
@@ -2085,11 +2095,34 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
2085} 2095}
2086 2096
2087static int 2097static int
2098i915_gpu_idle(struct drm_device *dev)
2099{
2100 drm_i915_private_t *dev_priv = dev->dev_private;
2101 bool lists_empty;
2102 uint32_t seqno;
2103
2104 spin_lock(&dev_priv->mm.active_list_lock);
2105 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
2106 list_empty(&dev_priv->mm.active_list);
2107 spin_unlock(&dev_priv->mm.active_list_lock);
2108
2109 if (lists_empty)
2110 return 0;
2111
2112 /* Flush everything onto the inactive list. */
2113 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2114 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
2115 if (seqno == 0)
2116 return -ENOMEM;
2117
2118 return i915_wait_request(dev, seqno);
2119}
2120
2121static int
2088i915_gem_evict_everything(struct drm_device *dev) 2122i915_gem_evict_everything(struct drm_device *dev)
2089{ 2123{
2090 drm_i915_private_t *dev_priv = dev->dev_private; 2124 drm_i915_private_t *dev_priv = dev->dev_private;
2091 int ret; 2125 int ret;
2092 uint32_t seqno;
2093 bool lists_empty; 2126 bool lists_empty;
2094 2127
2095 spin_lock(&dev_priv->mm.active_list_lock); 2128 spin_lock(&dev_priv->mm.active_list_lock);
@@ -2102,12 +2135,7 @@ i915_gem_evict_everything(struct drm_device *dev)
2102 return -ENOSPC; 2135 return -ENOSPC;
2103 2136
2104 /* Flush everything (on to the inactive lists) and evict */ 2137 /* Flush everything (on to the inactive lists) and evict */
2105 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 2138 ret = i915_gpu_idle(dev);
2106 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
2107 if (seqno == 0)
2108 return -ENOMEM;
2109
2110 ret = i915_wait_request(dev, seqno);
2111 if (ret) 2139 if (ret)
2112 return ret; 2140 return ret;
2113 2141
@@ -2265,6 +2293,28 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
2265 return 0; 2293 return 0;
2266} 2294}
2267 2295
2296static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
2297{
2298 struct drm_gem_object *obj = reg->obj;
2299 struct drm_device *dev = obj->dev;
2300 drm_i915_private_t *dev_priv = dev->dev_private;
2301 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2302 int regnum = obj_priv->fence_reg;
2303 uint64_t val;
2304
2305 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2306 0xfffff000) << 32;
2307 val |= obj_priv->gtt_offset & 0xfffff000;
2308 val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
2309 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2310
2311 if (obj_priv->tiling_mode == I915_TILING_Y)
2312 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2313 val |= I965_FENCE_REG_VALID;
2314
2315 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
2316}
2317
2268static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) 2318static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2269{ 2319{
2270 struct drm_gem_object *obj = reg->obj; 2320 struct drm_gem_object *obj = reg->obj;
@@ -2361,6 +2411,58 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2361 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); 2411 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2362} 2412}
2363 2413
2414static int i915_find_fence_reg(struct drm_device *dev)
2415{
2416 struct drm_i915_fence_reg *reg = NULL;
2417 struct drm_i915_gem_object *obj_priv = NULL;
2418 struct drm_i915_private *dev_priv = dev->dev_private;
2419 struct drm_gem_object *obj = NULL;
2420 int i, avail, ret;
2421
2422 /* First try to find a free reg */
2423 avail = 0;
2424 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2425 reg = &dev_priv->fence_regs[i];
2426 if (!reg->obj)
2427 return i;
2428
2429 obj_priv = reg->obj->driver_private;
2430 if (!obj_priv->pin_count)
2431 avail++;
2432 }
2433
2434 if (avail == 0)
2435 return -ENOSPC;
2436
2437 /* None available, try to steal one or wait for a user to finish */
2438 i = I915_FENCE_REG_NONE;
2439 list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
2440 fence_list) {
2441 obj = obj_priv->obj;
2442
2443 if (obj_priv->pin_count)
2444 continue;
2445
2446 /* found one! */
2447 i = obj_priv->fence_reg;
2448 break;
2449 }
2450
2451 BUG_ON(i == I915_FENCE_REG_NONE);
2452
2453 /* We only have a reference on obj from the active list. put_fence_reg
2454 * might drop that one, causing a use-after-free in it. So hold a
2455 * private reference to obj like the other callers of put_fence_reg
2456 * (set_tiling ioctl) do. */
2457 drm_gem_object_reference(obj);
2458 ret = i915_gem_object_put_fence_reg(obj);
2459 drm_gem_object_unreference(obj);
2460 if (ret != 0)
2461 return ret;
2462
2463 return i;
2464}
2465
2364/** 2466/**
2365 * i915_gem_object_get_fence_reg - set up a fence reg for an object 2467 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2366 * @obj: object to map through a fence reg 2468 * @obj: object to map through a fence reg
@@ -2381,8 +2483,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2381 struct drm_i915_private *dev_priv = dev->dev_private; 2483 struct drm_i915_private *dev_priv = dev->dev_private;
2382 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2484 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2383 struct drm_i915_fence_reg *reg = NULL; 2485 struct drm_i915_fence_reg *reg = NULL;
2384 struct drm_i915_gem_object *old_obj_priv = NULL; 2486 int ret;
2385 int i, ret, avail;
2386 2487
2387 /* Just update our place in the LRU if our fence is getting used. */ 2488 /* Just update our place in the LRU if our fence is getting used. */
2388 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { 2489 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -2410,86 +2511,27 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2410 break; 2511 break;
2411 } 2512 }
2412 2513
2413 /* First try to find a free reg */ 2514 ret = i915_find_fence_reg(dev);
2414 avail = 0; 2515 if (ret < 0)
2415 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { 2516 return ret;
2416 reg = &dev_priv->fence_regs[i];
2417 if (!reg->obj)
2418 break;
2419
2420 old_obj_priv = reg->obj->driver_private;
2421 if (!old_obj_priv->pin_count)
2422 avail++;
2423 }
2424
2425 /* None available, try to steal one or wait for a user to finish */
2426 if (i == dev_priv->num_fence_regs) {
2427 struct drm_gem_object *old_obj = NULL;
2428
2429 if (avail == 0)
2430 return -ENOSPC;
2431
2432 list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
2433 fence_list) {
2434 old_obj = old_obj_priv->obj;
2435
2436 if (old_obj_priv->pin_count)
2437 continue;
2438
2439 /* Take a reference, as otherwise the wait_rendering
2440 * below may cause the object to get freed out from
2441 * under us.
2442 */
2443 drm_gem_object_reference(old_obj);
2444
2445 /* i915 uses fences for GPU access to tiled buffers */
2446 if (IS_I965G(dev) || !old_obj_priv->active)
2447 break;
2448
2449 /* This brings the object to the head of the LRU if it
2450 * had been written to. The only way this should
2451 * result in us waiting longer than the expected
2452 * optimal amount of time is if there was a
2453 * fence-using buffer later that was read-only.
2454 */
2455 i915_gem_object_flush_gpu_write_domain(old_obj);
2456 ret = i915_gem_object_wait_rendering(old_obj);
2457 if (ret != 0) {
2458 drm_gem_object_unreference(old_obj);
2459 return ret;
2460 }
2461
2462 break;
2463 }
2464
2465 /*
2466 * Zap this virtual mapping so we can set up a fence again
2467 * for this object next time we need it.
2468 */
2469 i915_gem_release_mmap(old_obj);
2470
2471 i = old_obj_priv->fence_reg;
2472 reg = &dev_priv->fence_regs[i];
2473
2474 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2475 list_del_init(&old_obj_priv->fence_list);
2476
2477 drm_gem_object_unreference(old_obj);
2478 }
2479 2517
2480 obj_priv->fence_reg = i; 2518 obj_priv->fence_reg = ret;
2519 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2481 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); 2520 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2482 2521
2483 reg->obj = obj; 2522 reg->obj = obj;
2484 2523
2485 if (IS_I965G(dev)) 2524 if (IS_GEN6(dev))
2525 sandybridge_write_fence_reg(reg);
2526 else if (IS_I965G(dev))
2486 i965_write_fence_reg(reg); 2527 i965_write_fence_reg(reg);
2487 else if (IS_I9XX(dev)) 2528 else if (IS_I9XX(dev))
2488 i915_write_fence_reg(reg); 2529 i915_write_fence_reg(reg);
2489 else 2530 else
2490 i830_write_fence_reg(reg); 2531 i830_write_fence_reg(reg);
2491 2532
2492 trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode); 2533 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
2534 obj_priv->tiling_mode);
2493 2535
2494 return 0; 2536 return 0;
2495} 2537}
@@ -2508,9 +2550,12 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2508 drm_i915_private_t *dev_priv = dev->dev_private; 2550 drm_i915_private_t *dev_priv = dev->dev_private;
2509 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2551 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2510 2552
2511 if (IS_I965G(dev)) 2553 if (IS_GEN6(dev)) {
2554 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2555 (obj_priv->fence_reg * 8), 0);
2556 } else if (IS_I965G(dev)) {
2512 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); 2557 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2513 else { 2558 } else {
2514 uint32_t fence_reg; 2559 uint32_t fence_reg;
2515 2560
2516 if (obj_priv->fence_reg < 8) 2561 if (obj_priv->fence_reg < 8)
@@ -2544,6 +2589,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2544 if (obj_priv->fence_reg == I915_FENCE_REG_NONE) 2589 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2545 return 0; 2590 return 0;
2546 2591
2592 /* If we've changed tiling, GTT-mappings of the object
2593 * need to re-fault to ensure that the correct fence register
2594 * setup is in place.
2595 */
2596 i915_gem_release_mmap(obj);
2597
2547 /* On the i915, GPU access to tiled buffers is via a fence, 2598 /* On the i915, GPU access to tiled buffers is via a fence,
2548 * therefore we must wait for any outstanding access to complete 2599 * therefore we must wait for any outstanding access to complete
2549 * before clearing the fence. 2600 * before clearing the fence.
@@ -2552,12 +2603,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2552 int ret; 2603 int ret;
2553 2604
2554 i915_gem_object_flush_gpu_write_domain(obj); 2605 i915_gem_object_flush_gpu_write_domain(obj);
2555 i915_gem_object_flush_gtt_write_domain(obj);
2556 ret = i915_gem_object_wait_rendering(obj); 2606 ret = i915_gem_object_wait_rendering(obj);
2557 if (ret != 0) 2607 if (ret != 0)
2558 return ret; 2608 return ret;
2559 } 2609 }
2560 2610
2611 i915_gem_object_flush_gtt_write_domain(obj);
2561 i915_gem_clear_fence_reg (obj); 2612 i915_gem_clear_fence_reg (obj);
2562 2613
2563 return 0; 2614 return 0;
@@ -2697,7 +2748,6 @@ static void
2697i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) 2748i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2698{ 2749{
2699 struct drm_device *dev = obj->dev; 2750 struct drm_device *dev = obj->dev;
2700 uint32_t seqno;
2701 uint32_t old_write_domain; 2751 uint32_t old_write_domain;
2702 2752
2703 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 2753 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
@@ -2706,9 +2756,8 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2706 /* Queue the GPU write cache flushing we need. */ 2756 /* Queue the GPU write cache flushing we need. */
2707 old_write_domain = obj->write_domain; 2757 old_write_domain = obj->write_domain;
2708 i915_gem_flush(dev, 0, obj->write_domain); 2758 i915_gem_flush(dev, 0, obj->write_domain);
2709 seqno = i915_add_request(dev, NULL, obj->write_domain); 2759 (void) i915_add_request(dev, NULL, obj->write_domain);
2710 BUG_ON(obj->write_domain); 2760 BUG_ON(obj->write_domain);
2711 i915_gem_object_move_to_active(obj, seqno);
2712 2761
2713 trace_i915_gem_object_change_domain(obj, 2762 trace_i915_gem_object_change_domain(obj,
2714 obj->read_domains, 2763 obj->read_domains,
@@ -3247,7 +3296,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3247 obj_priv->tiling_mode != I915_TILING_NONE; 3296 obj_priv->tiling_mode != I915_TILING_NONE;
3248 3297
3249 /* Check fence reg constraints and rebind if necessary */ 3298 /* Check fence reg constraints and rebind if necessary */
3250 if (need_fence && !i915_obj_fenceable(dev, obj)) 3299 if (need_fence && !i915_gem_object_fence_offset_ok(obj,
3300 obj_priv->tiling_mode))
3251 i915_gem_object_unbind(obj); 3301 i915_gem_object_unbind(obj);
3252 3302
3253 /* Choose the GTT offset for our buffer and put it there. */ 3303 /* Choose the GTT offset for our buffer and put it there. */
@@ -3317,6 +3367,16 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3317 } 3367 }
3318 3368
3319 /* Validate that the target is in a valid r/w GPU domain */ 3369 /* Validate that the target is in a valid r/w GPU domain */
3370 if (reloc->write_domain & (reloc->write_domain - 1)) {
3371 DRM_ERROR("reloc with multiple write domains: "
3372 "obj %p target %d offset %d "
3373 "read %08x write %08x",
3374 obj, reloc->target_handle,
3375 (int) reloc->offset,
3376 reloc->read_domains,
3377 reloc->write_domain);
3378 return -EINVAL;
3379 }
3320 if (reloc->write_domain & I915_GEM_DOMAIN_CPU || 3380 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3321 reloc->read_domains & I915_GEM_DOMAIN_CPU) { 3381 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3322 DRM_ERROR("reloc with read/write CPU domains: " 3382 DRM_ERROR("reloc with read/write CPU domains: "
@@ -4445,8 +4505,7 @@ int
4445i915_gem_idle(struct drm_device *dev) 4505i915_gem_idle(struct drm_device *dev)
4446{ 4506{
4447 drm_i915_private_t *dev_priv = dev->dev_private; 4507 drm_i915_private_t *dev_priv = dev->dev_private;
4448 uint32_t seqno, cur_seqno, last_seqno; 4508 int ret;
4449 int stuck, ret;
4450 4509
4451 mutex_lock(&dev->struct_mutex); 4510 mutex_lock(&dev->struct_mutex);
4452 4511
@@ -4455,115 +4514,36 @@ i915_gem_idle(struct drm_device *dev)
4455 return 0; 4514 return 0;
4456 } 4515 }
4457 4516
4458 /* Hack! Don't let anybody do execbuf while we don't control the chip. 4517 ret = i915_gpu_idle(dev);
4459 * We need to replace this with a semaphore, or something. 4518 if (ret) {
4460 */
4461 dev_priv->mm.suspended = 1;
4462 del_timer(&dev_priv->hangcheck_timer);
4463
4464 /* Cancel the retire work handler, wait for it to finish if running
4465 */
4466 mutex_unlock(&dev->struct_mutex);
4467 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4468 mutex_lock(&dev->struct_mutex);
4469
4470 i915_kernel_lost_context(dev);
4471
4472 /* Flush the GPU along with all non-CPU write domains
4473 */
4474 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
4475 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
4476
4477 if (seqno == 0) {
4478 mutex_unlock(&dev->struct_mutex); 4519 mutex_unlock(&dev->struct_mutex);
4479 return -ENOMEM; 4520 return ret;
4480 } 4521 }
4481 4522
4482 dev_priv->mm.waiting_gem_seqno = seqno; 4523 /* Under UMS, be paranoid and evict. */
4483 last_seqno = 0; 4524 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4484 stuck = 0; 4525 ret = i915_gem_evict_from_inactive_list(dev);
4485 for (;;) { 4526 if (ret) {
4486 cur_seqno = i915_get_gem_seqno(dev); 4527 mutex_unlock(&dev->struct_mutex);
4487 if (i915_seqno_passed(cur_seqno, seqno)) 4528 return ret;
4488 break;
4489 if (last_seqno == cur_seqno) {
4490 if (stuck++ > 100) {
4491 DRM_ERROR("hardware wedged\n");
4492 atomic_set(&dev_priv->mm.wedged, 1);
4493 DRM_WAKEUP(&dev_priv->irq_queue);
4494 break;
4495 }
4496 } 4529 }
4497 msleep(10);
4498 last_seqno = cur_seqno;
4499 }
4500 dev_priv->mm.waiting_gem_seqno = 0;
4501
4502 i915_gem_retire_requests(dev);
4503
4504 spin_lock(&dev_priv->mm.active_list_lock);
4505 if (!atomic_read(&dev_priv->mm.wedged)) {
4506 /* Active and flushing should now be empty as we've
4507 * waited for a sequence higher than any pending execbuffer
4508 */
4509 WARN_ON(!list_empty(&dev_priv->mm.active_list));
4510 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
4511 /* Request should now be empty as we've also waited
4512 * for the last request in the list
4513 */
4514 WARN_ON(!list_empty(&dev_priv->mm.request_list));
4515 } 4530 }
4516 4531
4517 /* Empty the active and flushing lists to inactive. If there's 4532 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4518 * anything left at this point, it means that we're wedged and 4533 * We need to replace this with a semaphore, or something.
4519 * nothing good's going to happen by leaving them there. So strip 4534 * And not confound mm.suspended!
4520 * the GPU domains and just stuff them onto inactive.
4521 */ 4535 */
4522 while (!list_empty(&dev_priv->mm.active_list)) { 4536 dev_priv->mm.suspended = 1;
4523 struct drm_gem_object *obj; 4537 del_timer(&dev_priv->hangcheck_timer);
4524 uint32_t old_write_domain;
4525
4526 obj = list_first_entry(&dev_priv->mm.active_list,
4527 struct drm_i915_gem_object,
4528 list)->obj;
4529 old_write_domain = obj->write_domain;
4530 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4531 i915_gem_object_move_to_inactive(obj);
4532
4533 trace_i915_gem_object_change_domain(obj,
4534 obj->read_domains,
4535 old_write_domain);
4536 }
4537 spin_unlock(&dev_priv->mm.active_list_lock);
4538
4539 while (!list_empty(&dev_priv->mm.flushing_list)) {
4540 struct drm_gem_object *obj;
4541 uint32_t old_write_domain;
4542
4543 obj = list_first_entry(&dev_priv->mm.flushing_list,
4544 struct drm_i915_gem_object,
4545 list)->obj;
4546 old_write_domain = obj->write_domain;
4547 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4548 i915_gem_object_move_to_inactive(obj);
4549
4550 trace_i915_gem_object_change_domain(obj,
4551 obj->read_domains,
4552 old_write_domain);
4553 }
4554
4555
4556 /* Move all inactive buffers out of the GTT. */
4557 ret = i915_gem_evict_from_inactive_list(dev);
4558 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
4559 if (ret) {
4560 mutex_unlock(&dev->struct_mutex);
4561 return ret;
4562 }
4563 4538
4539 i915_kernel_lost_context(dev);
4564 i915_gem_cleanup_ringbuffer(dev); 4540 i915_gem_cleanup_ringbuffer(dev);
4541
4565 mutex_unlock(&dev->struct_mutex); 4542 mutex_unlock(&dev->struct_mutex);
4566 4543
4544 /* Cancel the retire work handler, which should be idle now. */
4545 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4546
4567 return 0; 4547 return 0;
4568} 4548}
4569 4549
@@ -4607,8 +4587,13 @@ i915_gem_init_hws(struct drm_device *dev)
4607 } 4587 }
4608 dev_priv->hws_obj = obj; 4588 dev_priv->hws_obj = obj;
4609 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 4589 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4610 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 4590 if (IS_GEN6(dev)) {
4611 I915_READ(HWS_PGA); /* posting read */ 4591 I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
4592 I915_READ(HWS_PGA_GEN6); /* posting read */
4593 } else {
4594 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
4595 I915_READ(HWS_PGA); /* posting read */
4596 }
4612 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); 4597 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4613 4598
4614 return 0; 4599 return 0;
@@ -4850,7 +4835,8 @@ i915_gem_load(struct drm_device *dev)
4850 spin_unlock(&shrink_list_lock); 4835 spin_unlock(&shrink_list_lock);
4851 4836
4852 /* Old X drivers will take 0-2 for front, back, depth buffers */ 4837 /* Old X drivers will take 0-2 for front, back, depth buffers */
4853 dev_priv->fence_reg_start = 3; 4838 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4839 dev_priv->fence_reg_start = 3;
4854 4840
4855 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4841 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4856 dev_priv->num_fence_regs = 16; 4842 dev_priv->num_fence_regs = 16;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index df278b2685bf..b5c55d88ff76 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,8 +25,6 @@
25 * 25 *
26 */ 26 */
27 27
28#include <linux/acpi.h>
29#include <linux/pnp.h>
30#include "linux/string.h" 28#include "linux/string.h"
31#include "linux/bitops.h" 29#include "linux/bitops.h"
32#include "drmP.h" 30#include "drmP.h"
@@ -83,120 +81,6 @@
83 * to match what the GPU expects. 81 * to match what the GPU expects.
84 */ 82 */
85 83
86#define MCHBAR_I915 0x44
87#define MCHBAR_I965 0x48
88#define MCHBAR_SIZE (4*4096)
89
90#define DEVEN_REG 0x54
91#define DEVEN_MCHBAR_EN (1 << 28)
92
93/* Allocate space for the MCH regs if needed, return nonzero on error */
94static int
95intel_alloc_mchbar_resource(struct drm_device *dev)
96{
97 drm_i915_private_t *dev_priv = dev->dev_private;
98 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
99 u32 temp_lo, temp_hi = 0;
100 u64 mchbar_addr;
101 int ret = 0;
102
103 if (IS_I965G(dev))
104 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
105 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
106 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
107
108 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
109#ifdef CONFIG_PNP
110 if (mchbar_addr &&
111 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
112 ret = 0;
113 goto out;
114 }
115#endif
116
117 /* Get some space for it */
118 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
119 MCHBAR_SIZE, MCHBAR_SIZE,
120 PCIBIOS_MIN_MEM,
121 0, pcibios_align_resource,
122 dev_priv->bridge_dev);
123 if (ret) {
124 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
125 dev_priv->mch_res.start = 0;
126 goto out;
127 }
128
129 if (IS_I965G(dev))
130 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
131 upper_32_bits(dev_priv->mch_res.start));
132
133 pci_write_config_dword(dev_priv->bridge_dev, reg,
134 lower_32_bits(dev_priv->mch_res.start));
135out:
136 return ret;
137}
138
139/* Setup MCHBAR if possible, return true if we should disable it again */
140static bool
141intel_setup_mchbar(struct drm_device *dev)
142{
143 drm_i915_private_t *dev_priv = dev->dev_private;
144 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
145 u32 temp;
146 bool need_disable = false, enabled;
147
148 if (IS_I915G(dev) || IS_I915GM(dev)) {
149 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
150 enabled = !!(temp & DEVEN_MCHBAR_EN);
151 } else {
152 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
153 enabled = temp & 1;
154 }
155
156 /* If it's already enabled, don't have to do anything */
157 if (enabled)
158 goto out;
159
160 if (intel_alloc_mchbar_resource(dev))
161 goto out;
162
163 need_disable = true;
164
165 /* Space is allocated or reserved, so enable it. */
166 if (IS_I915G(dev) || IS_I915GM(dev)) {
167 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
168 temp | DEVEN_MCHBAR_EN);
169 } else {
170 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
171 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
172 }
173out:
174 return need_disable;
175}
176
177static void
178intel_teardown_mchbar(struct drm_device *dev, bool disable)
179{
180 drm_i915_private_t *dev_priv = dev->dev_private;
181 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
182 u32 temp;
183
184 if (disable) {
185 if (IS_I915G(dev) || IS_I915GM(dev)) {
186 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
187 temp &= ~DEVEN_MCHBAR_EN;
188 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
189 } else {
190 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
191 temp &= ~1;
192 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
193 }
194 }
195
196 if (dev_priv->mch_res.start)
197 release_resource(&dev_priv->mch_res);
198}
199
200/** 84/**
201 * Detects bit 6 swizzling of address lookup between IGD access and CPU 85 * Detects bit 6 swizzling of address lookup between IGD access and CPU
202 * access through main memory. 86 * access through main memory.
@@ -207,9 +91,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
207 drm_i915_private_t *dev_priv = dev->dev_private; 91 drm_i915_private_t *dev_priv = dev->dev_private;
208 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 92 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
209 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 93 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
210 bool need_disable;
211 94
212 if (IS_IRONLAKE(dev)) { 95 if (IS_IRONLAKE(dev) || IS_GEN6(dev)) {
213 /* On Ironlake whatever DRAM config, GPU always do 96 /* On Ironlake whatever DRAM config, GPU always do
214 * same swizzling setup. 97 * same swizzling setup.
215 */ 98 */
@@ -224,9 +107,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
224 } else if (IS_MOBILE(dev)) { 107 } else if (IS_MOBILE(dev)) {
225 uint32_t dcc; 108 uint32_t dcc;
226 109
227 /* Try to make sure MCHBAR is enabled before poking at it */
228 need_disable = intel_setup_mchbar(dev);
229
230 /* On mobile 9xx chipsets, channel interleave by the CPU is 110 /* On mobile 9xx chipsets, channel interleave by the CPU is
231 * determined by DCC. For single-channel, neither the CPU 111 * determined by DCC. For single-channel, neither the CPU
232 * nor the GPU do swizzling. For dual channel interleaved, 112 * nor the GPU do swizzling. For dual channel interleaved,
@@ -266,8 +146,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
266 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 146 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
267 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 147 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
268 } 148 }
269
270 intel_teardown_mchbar(dev, need_disable);
271 } else { 149 } else {
272 /* The 965, G33, and newer, have a very flexible memory 150 /* The 965, G33, and newer, have a very flexible memory
273 * configuration. It will enable dual-channel mode 151 * configuration. It will enable dual-channel mode
@@ -302,39 +180,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
302 dev_priv->mm.bit_6_swizzle_y = swizzle_y; 180 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
303} 181}
304 182
305
306/**
307 * Returns whether an object is currently fenceable. If not, it may need
308 * to be unbound and have its pitch adjusted.
309 */
310bool
311i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj)
312{
313 struct drm_i915_gem_object *obj_priv = obj->driver_private;
314
315 if (IS_I965G(dev)) {
316 /* The 965 can have fences at any page boundary. */
317 if (obj->size & 4095)
318 return false;
319 return true;
320 } else if (IS_I9XX(dev)) {
321 if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
322 return false;
323 } else {
324 if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
325 return false;
326 }
327
328 /* Power of two sized... */
329 if (obj->size & (obj->size - 1))
330 return false;
331
332 /* Objects must be size aligned as well */
333 if (obj_priv->gtt_offset & (obj->size - 1))
334 return false;
335 return true;
336}
337
338/* Check pitch constriants for all chips & tiling formats */ 183/* Check pitch constriants for all chips & tiling formats */
339bool 184bool
340i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) 185i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
@@ -391,7 +236,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
391 return true; 236 return true;
392} 237}
393 238
394static bool 239bool
395i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) 240i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
396{ 241{
397 struct drm_device *dev = obj->dev; 242 struct drm_device *dev = obj->dev;
@@ -438,9 +283,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
438 obj_priv = obj->driver_private; 283 obj_priv = obj->driver_private;
439 284
440 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { 285 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
441 mutex_lock(&dev->struct_mutex); 286 drm_gem_object_unreference_unlocked(obj);
442 drm_gem_object_unreference(obj);
443 mutex_unlock(&dev->struct_mutex);
444 return -EINVAL; 287 return -EINVAL;
445 } 288 }
446 289
@@ -493,12 +336,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
493 goto err; 336 goto err;
494 } 337 }
495 338
496 /* If we've changed tiling, GTT-mappings of the object
497 * need to re-fault to ensure that the correct fence register
498 * setup is in place.
499 */
500 i915_gem_release_mmap(obj);
501
502 obj_priv->tiling_mode = args->tiling_mode; 339 obj_priv->tiling_mode = args->tiling_mode;
503 obj_priv->stride = args->stride; 340 obj_priv->stride = args->stride;
504 } 341 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a17d6bdfe63e..5388354da0d1 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -166,7 +166,7 @@ void intel_enable_asle (struct drm_device *dev)
166{ 166{
167 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 167 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
168 168
169 if (IS_IRONLAKE(dev)) 169 if (HAS_PCH_SPLIT(dev))
170 ironlake_enable_display_irq(dev_priv, DE_GSE); 170 ironlake_enable_display_irq(dev_priv, DE_GSE);
171 else 171 else
172 i915_enable_pipestat(dev_priv, 1, 172 i915_enable_pipestat(dev_priv, 1,
@@ -269,6 +269,57 @@ static void i915_hotplug_work_func(struct work_struct *work)
269 drm_sysfs_hotplug_event(dev); 269 drm_sysfs_hotplug_event(dev);
270} 270}
271 271
272static void i915_handle_rps_change(struct drm_device *dev)
273{
274 drm_i915_private_t *dev_priv = dev->dev_private;
275 u32 busy_up, busy_down, max_avg, min_avg;
276 u16 rgvswctl;
277 u8 new_delay = dev_priv->cur_delay;
278
279 I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG);
280 busy_up = I915_READ(RCPREVBSYTUPAVG);
281 busy_down = I915_READ(RCPREVBSYTDNAVG);
282 max_avg = I915_READ(RCBMAXAVG);
283 min_avg = I915_READ(RCBMINAVG);
284
285 /* Handle RCS change request from hw */
286 if (busy_up > max_avg) {
287 if (dev_priv->cur_delay != dev_priv->max_delay)
288 new_delay = dev_priv->cur_delay - 1;
289 if (new_delay < dev_priv->max_delay)
290 new_delay = dev_priv->max_delay;
291 } else if (busy_down < min_avg) {
292 if (dev_priv->cur_delay != dev_priv->min_delay)
293 new_delay = dev_priv->cur_delay + 1;
294 if (new_delay > dev_priv->min_delay)
295 new_delay = dev_priv->min_delay;
296 }
297
298 DRM_DEBUG("rps change requested: %d -> %d\n",
299 dev_priv->cur_delay, new_delay);
300
301 rgvswctl = I915_READ(MEMSWCTL);
302 if (rgvswctl & MEMCTL_CMD_STS) {
303 DRM_ERROR("gpu busy, RCS change rejected\n");
304 return; /* still busy with another command */
305 }
306
307 /* Program the new state */
308 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
309 (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
310 I915_WRITE(MEMSWCTL, rgvswctl);
311 POSTING_READ(MEMSWCTL);
312
313 rgvswctl |= MEMCTL_CMD_STS;
314 I915_WRITE(MEMSWCTL, rgvswctl);
315
316 dev_priv->cur_delay = new_delay;
317
318 DRM_DEBUG("rps changed\n");
319
320 return;
321}
322
272irqreturn_t ironlake_irq_handler(struct drm_device *dev) 323irqreturn_t ironlake_irq_handler(struct drm_device *dev)
273{ 324{
274 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 325 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -331,6 +382,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
331 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 382 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
332 } 383 }
333 384
385 if (de_iir & DE_PCU_EVENT) {
386 I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS));
387 i915_handle_rps_change(dev);
388 }
389
334 /* should clear PCH hotplug event before clear CPU irq */ 390 /* should clear PCH hotplug event before clear CPU irq */
335 I915_WRITE(SDEIIR, pch_iir); 391 I915_WRITE(SDEIIR, pch_iir);
336 I915_WRITE(GTIIR, gt_iir); 392 I915_WRITE(GTIIR, gt_iir);
@@ -376,6 +432,121 @@ static void i915_error_work_func(struct work_struct *work)
376 } 432 }
377} 433}
378 434
435static struct drm_i915_error_object *
436i915_error_object_create(struct drm_device *dev,
437 struct drm_gem_object *src)
438{
439 struct drm_i915_error_object *dst;
440 struct drm_i915_gem_object *src_priv;
441 int page, page_count;
442
443 if (src == NULL)
444 return NULL;
445
446 src_priv = src->driver_private;
447 if (src_priv->pages == NULL)
448 return NULL;
449
450 page_count = src->size / PAGE_SIZE;
451
452 dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
453 if (dst == NULL)
454 return NULL;
455
456 for (page = 0; page < page_count; page++) {
457 void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
458 if (d == NULL)
459 goto unwind;
460 s = kmap_atomic(src_priv->pages[page], KM_USER0);
461 memcpy(d, s, PAGE_SIZE);
462 kunmap_atomic(s, KM_USER0);
463 dst->pages[page] = d;
464 }
465 dst->page_count = page_count;
466 dst->gtt_offset = src_priv->gtt_offset;
467
468 return dst;
469
470unwind:
471 while (page--)
472 kfree(dst->pages[page]);
473 kfree(dst);
474 return NULL;
475}
476
477static void
478i915_error_object_free(struct drm_i915_error_object *obj)
479{
480 int page;
481
482 if (obj == NULL)
483 return;
484
485 for (page = 0; page < obj->page_count; page++)
486 kfree(obj->pages[page]);
487
488 kfree(obj);
489}
490
491static void
492i915_error_state_free(struct drm_device *dev,
493 struct drm_i915_error_state *error)
494{
495 i915_error_object_free(error->batchbuffer[0]);
496 i915_error_object_free(error->batchbuffer[1]);
497 i915_error_object_free(error->ringbuffer);
498 kfree(error->active_bo);
499 kfree(error);
500}
501
502static u32
503i915_get_bbaddr(struct drm_device *dev, u32 *ring)
504{
505 u32 cmd;
506
507 if (IS_I830(dev) || IS_845G(dev))
508 cmd = MI_BATCH_BUFFER;
509 else if (IS_I965G(dev))
510 cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
511 MI_BATCH_NON_SECURE_I965);
512 else
513 cmd = (MI_BATCH_BUFFER_START | (2 << 6));
514
515 return ring[0] == cmd ? ring[1] : 0;
516}
517
518static u32
519i915_ringbuffer_last_batch(struct drm_device *dev)
520{
521 struct drm_i915_private *dev_priv = dev->dev_private;
522 u32 head, bbaddr;
523 u32 *ring;
524
525 /* Locate the current position in the ringbuffer and walk back
526 * to find the most recently dispatched batch buffer.
527 */
528 bbaddr = 0;
529 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
530 ring = (u32 *)(dev_priv->ring.virtual_start + head);
531
532 while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
533 bbaddr = i915_get_bbaddr(dev, ring);
534 if (bbaddr)
535 break;
536 }
537
538 if (bbaddr == 0) {
539 ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size);
540 while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
541 bbaddr = i915_get_bbaddr(dev, ring);
542 if (bbaddr)
543 break;
544 }
545 }
546
547 return bbaddr;
548}
549
379/** 550/**
380 * i915_capture_error_state - capture an error record for later analysis 551 * i915_capture_error_state - capture an error record for later analysis
381 * @dev: drm device 552 * @dev: drm device
@@ -388,19 +559,26 @@ static void i915_error_work_func(struct work_struct *work)
388static void i915_capture_error_state(struct drm_device *dev) 559static void i915_capture_error_state(struct drm_device *dev)
389{ 560{
390 struct drm_i915_private *dev_priv = dev->dev_private; 561 struct drm_i915_private *dev_priv = dev->dev_private;
562 struct drm_i915_gem_object *obj_priv;
391 struct drm_i915_error_state *error; 563 struct drm_i915_error_state *error;
564 struct drm_gem_object *batchbuffer[2];
392 unsigned long flags; 565 unsigned long flags;
566 u32 bbaddr;
567 int count;
393 568
394 spin_lock_irqsave(&dev_priv->error_lock, flags); 569 spin_lock_irqsave(&dev_priv->error_lock, flags);
395 if (dev_priv->first_error) 570 error = dev_priv->first_error;
396 goto out; 571 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
572 if (error)
573 return;
397 574
398 error = kmalloc(sizeof(*error), GFP_ATOMIC); 575 error = kmalloc(sizeof(*error), GFP_ATOMIC);
399 if (!error) { 576 if (!error) {
400 DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n"); 577 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
401 goto out; 578 return;
402 } 579 }
403 580
581 error->seqno = i915_get_gem_seqno(dev);
404 error->eir = I915_READ(EIR); 582 error->eir = I915_READ(EIR);
405 error->pgtbl_er = I915_READ(PGTBL_ER); 583 error->pgtbl_er = I915_READ(PGTBL_ER);
406 error->pipeastat = I915_READ(PIPEASTAT); 584 error->pipeastat = I915_READ(PIPEASTAT);
@@ -411,6 +589,7 @@ static void i915_capture_error_state(struct drm_device *dev)
411 error->ipehr = I915_READ(IPEHR); 589 error->ipehr = I915_READ(IPEHR);
412 error->instdone = I915_READ(INSTDONE); 590 error->instdone = I915_READ(INSTDONE);
413 error->acthd = I915_READ(ACTHD); 591 error->acthd = I915_READ(ACTHD);
592 error->bbaddr = 0;
414 } else { 593 } else {
415 error->ipeir = I915_READ(IPEIR_I965); 594 error->ipeir = I915_READ(IPEIR_I965);
416 error->ipehr = I915_READ(IPEHR_I965); 595 error->ipehr = I915_READ(IPEHR_I965);
@@ -418,14 +597,101 @@ static void i915_capture_error_state(struct drm_device *dev)
418 error->instps = I915_READ(INSTPS); 597 error->instps = I915_READ(INSTPS);
419 error->instdone1 = I915_READ(INSTDONE1); 598 error->instdone1 = I915_READ(INSTDONE1);
420 error->acthd = I915_READ(ACTHD_I965); 599 error->acthd = I915_READ(ACTHD_I965);
600 error->bbaddr = I915_READ64(BB_ADDR);
421 } 601 }
422 602
423 do_gettimeofday(&error->time); 603 bbaddr = i915_ringbuffer_last_batch(dev);
604
605 /* Grab the current batchbuffer, most likely to have crashed. */
606 batchbuffer[0] = NULL;
607 batchbuffer[1] = NULL;
608 count = 0;
609 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
610 struct drm_gem_object *obj = obj_priv->obj;
611
612 if (batchbuffer[0] == NULL &&
613 bbaddr >= obj_priv->gtt_offset &&
614 bbaddr < obj_priv->gtt_offset + obj->size)
615 batchbuffer[0] = obj;
616
617 if (batchbuffer[1] == NULL &&
618 error->acthd >= obj_priv->gtt_offset &&
619 error->acthd < obj_priv->gtt_offset + obj->size &&
620 batchbuffer[0] != obj)
621 batchbuffer[1] = obj;
622
623 count++;
624 }
424 625
425 dev_priv->first_error = error; 626 /* We need to copy these to an anonymous buffer as the simplest
627 * method to avoid being overwritten by userpace.
628 */
629 error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
630 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
631
632 /* Record the ringbuffer */
633 error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj);
634
635 /* Record buffers on the active list. */
636 error->active_bo = NULL;
637 error->active_bo_count = 0;
638
639 if (count)
640 error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
641 GFP_ATOMIC);
642
643 if (error->active_bo) {
644 int i = 0;
645 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
646 struct drm_gem_object *obj = obj_priv->obj;
647
648 error->active_bo[i].size = obj->size;
649 error->active_bo[i].name = obj->name;
650 error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
651 error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
652 error->active_bo[i].read_domains = obj->read_domains;
653 error->active_bo[i].write_domain = obj->write_domain;
654 error->active_bo[i].fence_reg = obj_priv->fence_reg;
655 error->active_bo[i].pinned = 0;
656 if (obj_priv->pin_count > 0)
657 error->active_bo[i].pinned = 1;
658 if (obj_priv->user_pin_count > 0)
659 error->active_bo[i].pinned = -1;
660 error->active_bo[i].tiling = obj_priv->tiling_mode;
661 error->active_bo[i].dirty = obj_priv->dirty;
662 error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
663
664 if (++i == count)
665 break;
666 }
667 error->active_bo_count = i;
668 }
669
670 do_gettimeofday(&error->time);
426 671
427out: 672 spin_lock_irqsave(&dev_priv->error_lock, flags);
673 if (dev_priv->first_error == NULL) {
674 dev_priv->first_error = error;
675 error = NULL;
676 }
428 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 677 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
678
679 if (error)
680 i915_error_state_free(dev, error);
681}
682
683void i915_destroy_error_state(struct drm_device *dev)
684{
685 struct drm_i915_private *dev_priv = dev->dev_private;
686 struct drm_i915_error_state *error;
687
688 spin_lock(&dev_priv->error_lock);
689 error = dev_priv->first_error;
690 dev_priv->first_error = NULL;
691 spin_unlock(&dev_priv->error_lock);
692
693 if (error)
694 i915_error_state_free(dev, error);
429} 695}
430 696
431/** 697/**
@@ -576,7 +842,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
576 842
577 atomic_inc(&dev_priv->irq_received); 843 atomic_inc(&dev_priv->irq_received);
578 844
579 if (IS_IRONLAKE(dev)) 845 if (HAS_PCH_SPLIT(dev))
580 return ironlake_irq_handler(dev); 846 return ironlake_irq_handler(dev);
581 847
582 iir = I915_READ(IIR); 848 iir = I915_READ(IIR);
@@ -737,7 +1003,7 @@ void i915_user_irq_get(struct drm_device *dev)
737 1003
738 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1004 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
739 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { 1005 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
740 if (IS_IRONLAKE(dev)) 1006 if (HAS_PCH_SPLIT(dev))
741 ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 1007 ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
742 else 1008 else
743 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 1009 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
@@ -753,7 +1019,7 @@ void i915_user_irq_put(struct drm_device *dev)
753 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1019 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
754 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 1020 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
755 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { 1021 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
756 if (IS_IRONLAKE(dev)) 1022 if (HAS_PCH_SPLIT(dev))
757 ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 1023 ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
758 else 1024 else
759 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 1025 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
@@ -861,7 +1127,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
861 return -EINVAL; 1127 return -EINVAL;
862 1128
863 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1129 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
864 if (IS_IRONLAKE(dev)) 1130 if (HAS_PCH_SPLIT(dev))
865 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1131 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
866 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1132 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
867 else if (IS_I965G(dev)) 1133 else if (IS_I965G(dev))
@@ -883,7 +1149,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
883 unsigned long irqflags; 1149 unsigned long irqflags;
884 1150
885 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1151 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
886 if (IS_IRONLAKE(dev)) 1152 if (HAS_PCH_SPLIT(dev))
887 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1153 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
888 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1154 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
889 else 1155 else
@@ -897,7 +1163,7 @@ void i915_enable_interrupt (struct drm_device *dev)
897{ 1163{
898 struct drm_i915_private *dev_priv = dev->dev_private; 1164 struct drm_i915_private *dev_priv = dev->dev_private;
899 1165
900 if (!IS_IRONLAKE(dev)) 1166 if (!HAS_PCH_SPLIT(dev))
901 opregion_enable_asle(dev); 1167 opregion_enable_asle(dev);
902 dev_priv->irq_enabled = 1; 1168 dev_priv->irq_enabled = 1;
903} 1169}
@@ -973,7 +1239,11 @@ void i915_hangcheck_elapsed(unsigned long data)
973 struct drm_device *dev = (struct drm_device *)data; 1239 struct drm_device *dev = (struct drm_device *)data;
974 drm_i915_private_t *dev_priv = dev->dev_private; 1240 drm_i915_private_t *dev_priv = dev->dev_private;
975 uint32_t acthd; 1241 uint32_t acthd;
976 1242
1243 /* No reset support on this chip yet. */
1244 if (IS_GEN6(dev))
1245 return;
1246
977 if (!IS_I965G(dev)) 1247 if (!IS_I965G(dev))
978 acthd = I915_READ(ACTHD); 1248 acthd = I915_READ(ACTHD);
979 else 1249 else
@@ -1064,6 +1334,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1064 I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); 1334 I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
1065 (void) I915_READ(SDEIER); 1335 (void) I915_READ(SDEIER);
1066 1336
1337 if (IS_IRONLAKE_M(dev)) {
1338 /* Clear & enable PCU event interrupts */
1339 I915_WRITE(DEIIR, DE_PCU_EVENT);
1340 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1341 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1342 }
1343
1067 return 0; 1344 return 0;
1068} 1345}
1069 1346
@@ -1076,7 +1353,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1076 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 1353 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1077 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 1354 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1078 1355
1079 if (IS_IRONLAKE(dev)) { 1356 if (HAS_PCH_SPLIT(dev)) {
1080 ironlake_irq_preinstall(dev); 1357 ironlake_irq_preinstall(dev);
1081 return; 1358 return;
1082 } 1359 }
@@ -1108,7 +1385,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1108 1385
1109 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1386 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1110 1387
1111 if (IS_IRONLAKE(dev)) 1388 if (HAS_PCH_SPLIT(dev))
1112 return ironlake_irq_postinstall(dev); 1389 return ironlake_irq_postinstall(dev);
1113 1390
1114 /* Unmask the interrupts that we always want on. */ 1391 /* Unmask the interrupts that we always want on. */
@@ -1196,7 +1473,7 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
1196 1473
1197 dev_priv->vblank_pipe = 0; 1474 dev_priv->vblank_pipe = 0;
1198 1475
1199 if (IS_IRONLAKE(dev)) { 1476 if (HAS_PCH_SPLIT(dev)) {
1200 ironlake_irq_uninstall(dev); 1477 ironlake_irq_uninstall(dev);
1201 return; 1478 return;
1202 } 1479 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ab1bd2d3d3b6..3d59862c7ccd 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -53,6 +53,25 @@
53#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) 53#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
54#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) 54#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
55 55
56#define SNB_GMCH_CTRL 0x50
57#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
58#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
59#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
60#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
61#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
62#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
63#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
64#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
65#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
66#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
67#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
68#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
69#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
70#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
71#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
72#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
73#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
74
56/* PCI config space */ 75/* PCI config space */
57 76
58#define HPLLCC 0xc0 /* 855 only */ 77#define HPLLCC 0xc0 /* 855 only */
@@ -61,6 +80,7 @@
61#define GC_CLOCK_100_200 (1 << 0) 80#define GC_CLOCK_100_200 (1 << 0)
62#define GC_CLOCK_100_133 (2 << 0) 81#define GC_CLOCK_100_133 (2 << 0)
63#define GC_CLOCK_166_250 (3 << 0) 82#define GC_CLOCK_166_250 (3 << 0)
83#define GCFGC2 0xda
64#define GCFGC 0xf0 /* 915+ only */ 84#define GCFGC 0xf0 /* 915+ only */
65#define GC_LOW_FREQUENCY_ENABLE (1 << 7) 85#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
66#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) 86#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
@@ -234,6 +254,9 @@
234#define I965_FENCE_REG_VALID (1<<0) 254#define I965_FENCE_REG_VALID (1<<0)
235#define I965_FENCE_MAX_PITCH_VAL 0x0400 255#define I965_FENCE_MAX_PITCH_VAL 0x0400
236 256
257#define FENCE_REG_SANDYBRIDGE_0 0x100000
258#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
259
237/* 260/*
238 * Instruction and interrupt control regs 261 * Instruction and interrupt control regs
239 */ 262 */
@@ -265,6 +288,7 @@
265#define INSTDONE1 0x0207c /* 965+ only */ 288#define INSTDONE1 0x0207c /* 965+ only */
266#define ACTHD_I965 0x02074 289#define ACTHD_I965 0x02074
267#define HWS_PGA 0x02080 290#define HWS_PGA 0x02080
291#define HWS_PGA_GEN6 0x04080
268#define HWS_ADDRESS_MASK 0xfffff000 292#define HWS_ADDRESS_MASK 0xfffff000
269#define HWS_START_ADDRESS_SHIFT 4 293#define HWS_START_ADDRESS_SHIFT 4
270#define PWRCTXA 0x2088 /* 965GM+ only */ 294#define PWRCTXA 0x2088 /* 965GM+ only */
@@ -282,7 +306,7 @@
282#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) 306#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
283#define I915_DISPLAY_PORT_INTERRUPT (1<<17) 307#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
284#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) 308#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
285#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) 309#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
286#define I915_HWB_OOM_INTERRUPT (1<<13) 310#define I915_HWB_OOM_INTERRUPT (1<<13)
287#define I915_SYNC_STATUS_INTERRUPT (1<<12) 311#define I915_SYNC_STATUS_INTERRUPT (1<<12)
288#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) 312#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
@@ -306,11 +330,14 @@
306#define I915_ERROR_MEMORY_REFRESH (1<<1) 330#define I915_ERROR_MEMORY_REFRESH (1<<1)
307#define I915_ERROR_INSTRUCTION (1<<0) 331#define I915_ERROR_INSTRUCTION (1<<0)
308#define INSTPM 0x020c0 332#define INSTPM 0x020c0
333#define INSTPM_SELF_EN (1<<12) /* 915GM only */
309#define ACTHD 0x020c8 334#define ACTHD 0x020c8
310#define FW_BLC 0x020d8 335#define FW_BLC 0x020d8
311#define FW_BLC2 0x020dc 336#define FW_BLC2 0x020dc
312#define FW_BLC_SELF 0x020e0 /* 915+ only */ 337#define FW_BLC_SELF 0x020e0 /* 915+ only */
313#define FW_BLC_SELF_EN (1<<15) 338#define FW_BLC_SELF_EN_MASK (1<<31)
339#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
340#define FW_BLC_SELF_EN (1<<15) /* 945 only */
314#define MM_BURST_LENGTH 0x00700000 341#define MM_BURST_LENGTH 0x00700000
315#define MM_FIFO_WATERMARK 0x0001F000 342#define MM_FIFO_WATERMARK 0x0001F000
316#define LM_BURST_LENGTH 0x00000700 343#define LM_BURST_LENGTH 0x00000700
@@ -324,6 +351,7 @@
324#define CM0_COLOR_EVICT_DISABLE (1<<3) 351#define CM0_COLOR_EVICT_DISABLE (1<<3)
325#define CM0_DEPTH_WRITE_DISABLE (1<<1) 352#define CM0_DEPTH_WRITE_DISABLE (1<<1)
326#define CM0_RC_OP_FLUSH_DISABLE (1<<0) 353#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
354#define BB_ADDR 0x02140 /* 8 bytes */
327#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 355#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
328 356
329 357
@@ -784,10 +812,144 @@
784#define CLKCFG_MEM_800 (3 << 4) 812#define CLKCFG_MEM_800 (3 << 4)
785#define CLKCFG_MEM_MASK (7 << 4) 813#define CLKCFG_MEM_MASK (7 << 4)
786 814
787/** GM965 GM45 render standby register */ 815#define CRSTANDVID 0x11100
788#define MCHBAR_RENDER_STANDBY 0x111B8 816#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
817#define PXVFREQ_PX_MASK 0x7f000000
818#define PXVFREQ_PX_SHIFT 24
819#define VIDFREQ_BASE 0x11110
820#define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */
821#define VIDFREQ2 0x11114
822#define VIDFREQ3 0x11118
823#define VIDFREQ4 0x1111c
824#define VIDFREQ_P0_MASK 0x1f000000
825#define VIDFREQ_P0_SHIFT 24
826#define VIDFREQ_P0_CSCLK_MASK 0x00f00000
827#define VIDFREQ_P0_CSCLK_SHIFT 20
828#define VIDFREQ_P0_CRCLK_MASK 0x000f0000
829#define VIDFREQ_P0_CRCLK_SHIFT 16
830#define VIDFREQ_P1_MASK 0x00001f00
831#define VIDFREQ_P1_SHIFT 8
832#define VIDFREQ_P1_CSCLK_MASK 0x000000f0
833#define VIDFREQ_P1_CSCLK_SHIFT 4
834#define VIDFREQ_P1_CRCLK_MASK 0x0000000f
835#define INTTOEXT_BASE_ILK 0x11300
836#define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */
837#define INTTOEXT_MAP3_SHIFT 24
838#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT)
839#define INTTOEXT_MAP2_SHIFT 16
840#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT)
841#define INTTOEXT_MAP1_SHIFT 8
842#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT)
843#define INTTOEXT_MAP0_SHIFT 0
844#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT)
845#define MEMSWCTL 0x11170 /* Ironlake only */
846#define MEMCTL_CMD_MASK 0xe000
847#define MEMCTL_CMD_SHIFT 13
848#define MEMCTL_CMD_RCLK_OFF 0
849#define MEMCTL_CMD_RCLK_ON 1
850#define MEMCTL_CMD_CHFREQ 2
851#define MEMCTL_CMD_CHVID 3
852#define MEMCTL_CMD_VMMOFF 4
853#define MEMCTL_CMD_VMMON 5
854#define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears
855 when command complete */
856#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
857#define MEMCTL_FREQ_SHIFT 8
858#define MEMCTL_SFCAVM (1<<7)
859#define MEMCTL_TGT_VID_MASK 0x007f
860#define MEMIHYST 0x1117c
861#define MEMINTREN 0x11180 /* 16 bits */
862#define MEMINT_RSEXIT_EN (1<<8)
863#define MEMINT_CX_SUPR_EN (1<<7)
864#define MEMINT_CONT_BUSY_EN (1<<6)
865#define MEMINT_AVG_BUSY_EN (1<<5)
866#define MEMINT_EVAL_CHG_EN (1<<4)
867#define MEMINT_MON_IDLE_EN (1<<3)
868#define MEMINT_UP_EVAL_EN (1<<2)
869#define MEMINT_DOWN_EVAL_EN (1<<1)
870#define MEMINT_SW_CMD_EN (1<<0)
871#define MEMINTRSTR 0x11182 /* 16 bits */
872#define MEM_RSEXIT_MASK 0xc000
873#define MEM_RSEXIT_SHIFT 14
874#define MEM_CONT_BUSY_MASK 0x3000
875#define MEM_CONT_BUSY_SHIFT 12
876#define MEM_AVG_BUSY_MASK 0x0c00
877#define MEM_AVG_BUSY_SHIFT 10
878#define MEM_EVAL_CHG_MASK 0x0300
879#define MEM_EVAL_BUSY_SHIFT 8
880#define MEM_MON_IDLE_MASK 0x00c0
881#define MEM_MON_IDLE_SHIFT 6
882#define MEM_UP_EVAL_MASK 0x0030
883#define MEM_UP_EVAL_SHIFT 4
884#define MEM_DOWN_EVAL_MASK 0x000c
885#define MEM_DOWN_EVAL_SHIFT 2
886#define MEM_SW_CMD_MASK 0x0003
887#define MEM_INT_STEER_GFX 0
888#define MEM_INT_STEER_CMR 1
889#define MEM_INT_STEER_SMI 2
890#define MEM_INT_STEER_SCI 3
891#define MEMINTRSTS 0x11184
892#define MEMINT_RSEXIT (1<<7)
893#define MEMINT_CONT_BUSY (1<<6)
894#define MEMINT_AVG_BUSY (1<<5)
895#define MEMINT_EVAL_CHG (1<<4)
896#define MEMINT_MON_IDLE (1<<3)
897#define MEMINT_UP_EVAL (1<<2)
898#define MEMINT_DOWN_EVAL (1<<1)
899#define MEMINT_SW_CMD (1<<0)
900#define MEMMODECTL 0x11190
901#define MEMMODE_BOOST_EN (1<<31)
902#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
903#define MEMMODE_BOOST_FREQ_SHIFT 24
904#define MEMMODE_IDLE_MODE_MASK 0x00030000
905#define MEMMODE_IDLE_MODE_SHIFT 16
906#define MEMMODE_IDLE_MODE_EVAL 0
907#define MEMMODE_IDLE_MODE_CONT 1
908#define MEMMODE_HWIDLE_EN (1<<15)
909#define MEMMODE_SWMODE_EN (1<<14)
910#define MEMMODE_RCLK_GATE (1<<13)
911#define MEMMODE_HW_UPDATE (1<<12)
912#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
913#define MEMMODE_FSTART_SHIFT 8
914#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
915#define MEMMODE_FMAX_SHIFT 4
916#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */
917#define RCBMAXAVG 0x1119c
918#define MEMSWCTL2 0x1119e /* Cantiga only */
919#define SWMEMCMD_RENDER_OFF (0 << 13)
920#define SWMEMCMD_RENDER_ON (1 << 13)
921#define SWMEMCMD_SWFREQ (2 << 13)
922#define SWMEMCMD_TARVID (3 << 13)
923#define SWMEMCMD_VRM_OFF (4 << 13)
924#define SWMEMCMD_VRM_ON (5 << 13)
925#define CMDSTS (1<<12)
926#define SFCAVM (1<<11)
927#define SWFREQ_MASK 0x0380 /* P0-7 */
928#define SWFREQ_SHIFT 7
929#define TARVID_MASK 0x001f
930#define MEMSTAT_CTG 0x111a0
931#define RCBMINAVG 0x111a0
932#define RCUPEI 0x111b0
933#define RCDNEI 0x111b4
934#define MCHBAR_RENDER_STANDBY 0x111b8
789#define RCX_SW_EXIT (1<<23) 935#define RCX_SW_EXIT (1<<23)
790#define RSX_STATUS_MASK 0x00700000 936#define RSX_STATUS_MASK 0x00700000
937#define VIDCTL 0x111c0
938#define VIDSTS 0x111c8
939#define VIDSTART 0x111cc /* 8 bits */
940#define MEMSTAT_ILK 0x111f8
941#define MEMSTAT_VID_MASK 0x7f00
942#define MEMSTAT_VID_SHIFT 8
943#define MEMSTAT_PSTATE_MASK 0x00f8
944#define MEMSTAT_PSTATE_SHIFT 3
945#define MEMSTAT_MON_ACTV (1<<2)
946#define MEMSTAT_SRC_CTL_MASK 0x0003
947#define MEMSTAT_SRC_CTL_CORE 0
948#define MEMSTAT_SRC_CTL_TRB 1
949#define MEMSTAT_SRC_CTL_THM 2
950#define MEMSTAT_SRC_CTL_STDBY 3
951#define RCPREVBSYTUPAVG 0x113b8
952#define RCPREVBSYTDNAVG 0x113bc
791#define PEG_BAND_GAP_DATA 0x14d68 953#define PEG_BAND_GAP_DATA 0x14d68
792 954
793/* 955/*
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a3b90c9561dc..ac0d1a73ac22 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -682,6 +682,8 @@ void i915_restore_display(struct drm_device *dev)
682 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); 682 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
683 I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); 683 I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
684 I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); 684 I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
685 I915_WRITE(MCHBAR_RENDER_STANDBY,
686 dev_priv->saveMCHBAR_RENDER_STANDBY);
685 } else { 687 } else {
686 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); 688 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
687 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); 689 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
@@ -745,11 +747,16 @@ int i915_save_state(struct drm_device *dev)
745 dev_priv->saveGTIMR = I915_READ(GTIMR); 747 dev_priv->saveGTIMR = I915_READ(GTIMR);
746 dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); 748 dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
747 dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); 749 dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
750 dev_priv->saveMCHBAR_RENDER_STANDBY =
751 I915_READ(MCHBAR_RENDER_STANDBY);
748 } else { 752 } else {
749 dev_priv->saveIER = I915_READ(IER); 753 dev_priv->saveIER = I915_READ(IER);
750 dev_priv->saveIMR = I915_READ(IMR); 754 dev_priv->saveIMR = I915_READ(IMR);
751 } 755 }
752 756
757 if (IS_IRONLAKE_M(dev))
758 ironlake_disable_drps(dev);
759
753 /* Cache mode state */ 760 /* Cache mode state */
754 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 761 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
755 762
@@ -820,6 +827,9 @@ int i915_restore_state(struct drm_device *dev)
820 /* Clock gating state */ 827 /* Clock gating state */
821 intel_init_clock_gating(dev); 828 intel_init_clock_gating(dev);
822 829
830 if (IS_IRONLAKE_M(dev))
831 ironlake_enable_drps(dev);
832
823 /* Cache mode state */ 833 /* Cache mode state */
824 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 834 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
825 835
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 15fbc1b5a83e..70c9d4ba7042 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -247,6 +247,7 @@ static void
247parse_general_features(struct drm_i915_private *dev_priv, 247parse_general_features(struct drm_i915_private *dev_priv,
248 struct bdb_header *bdb) 248 struct bdb_header *bdb)
249{ 249{
250 struct drm_device *dev = dev_priv->dev;
250 struct bdb_general_features *general; 251 struct bdb_general_features *general;
251 252
252 /* Set sensible defaults in case we can't find the general block */ 253 /* Set sensible defaults in case we can't find the general block */
@@ -263,7 +264,7 @@ parse_general_features(struct drm_i915_private *dev_priv,
263 if (IS_I85X(dev_priv->dev)) 264 if (IS_I85X(dev_priv->dev))
264 dev_priv->lvds_ssc_freq = 265 dev_priv->lvds_ssc_freq =
265 general->ssc_freq ? 66 : 48; 266 general->ssc_freq ? 66 : 48;
266 else if (IS_IRONLAKE(dev_priv->dev)) 267 else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev))
267 dev_priv->lvds_ssc_freq = 268 dev_priv->lvds_ssc_freq =
268 general->ssc_freq ? 100 : 120; 269 general->ssc_freq ? 100 : 120;
269 else 270 else
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 79dd4026586f..fccf07470c8f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
39 struct drm_i915_private *dev_priv = dev->dev_private; 39 struct drm_i915_private *dev_priv = dev->dev_private;
40 u32 temp, reg; 40 u32 temp, reg;
41 41
42 if (IS_IRONLAKE(dev)) 42 if (HAS_PCH_SPLIT(dev))
43 reg = PCH_ADPA; 43 reg = PCH_ADPA;
44 else 44 else
45 reg = ADPA; 45 reg = ADPA;
@@ -113,7 +113,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
113 else 113 else
114 dpll_md_reg = DPLL_B_MD; 114 dpll_md_reg = DPLL_B_MD;
115 115
116 if (IS_IRONLAKE(dev)) 116 if (HAS_PCH_SPLIT(dev))
117 adpa_reg = PCH_ADPA; 117 adpa_reg = PCH_ADPA;
118 else 118 else
119 adpa_reg = ADPA; 119 adpa_reg = ADPA;
@@ -122,7 +122,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
122 * Disable separate mode multiplier used when cloning SDVO to CRT 122 * Disable separate mode multiplier used when cloning SDVO to CRT
123 * XXX this needs to be adjusted when we really are cloning 123 * XXX this needs to be adjusted when we really are cloning
124 */ 124 */
125 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) { 125 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
126 dpll_md = I915_READ(dpll_md_reg); 126 dpll_md = I915_READ(dpll_md_reg);
127 I915_WRITE(dpll_md_reg, 127 I915_WRITE(dpll_md_reg,
128 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); 128 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -136,11 +136,11 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
136 136
137 if (intel_crtc->pipe == 0) { 137 if (intel_crtc->pipe == 0) {
138 adpa |= ADPA_PIPE_A_SELECT; 138 adpa |= ADPA_PIPE_A_SELECT;
139 if (!IS_IRONLAKE(dev)) 139 if (!HAS_PCH_SPLIT(dev))
140 I915_WRITE(BCLRPAT_A, 0); 140 I915_WRITE(BCLRPAT_A, 0);
141 } else { 141 } else {
142 adpa |= ADPA_PIPE_B_SELECT; 142 adpa |= ADPA_PIPE_B_SELECT;
143 if (!IS_IRONLAKE(dev)) 143 if (!HAS_PCH_SPLIT(dev))
144 I915_WRITE(BCLRPAT_B, 0); 144 I915_WRITE(BCLRPAT_B, 0);
145 } 145 }
146 146
@@ -202,7 +202,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
202 u32 hotplug_en; 202 u32 hotplug_en;
203 int i, tries = 0; 203 int i, tries = 0;
204 204
205 if (IS_IRONLAKE(dev)) 205 if (HAS_PCH_SPLIT(dev))
206 return intel_ironlake_crt_detect_hotplug(connector); 206 return intel_ironlake_crt_detect_hotplug(connector);
207 207
208 /* 208 /*
@@ -524,7 +524,7 @@ void intel_crt_init(struct drm_device *dev)
524 &intel_output->enc); 524 &intel_output->enc);
525 525
526 /* Set up the DDC bus. */ 526 /* Set up the DDC bus. */
527 if (IS_IRONLAKE(dev)) 527 if (HAS_PCH_SPLIT(dev))
528 i2c_reg = PCH_GPIOA; 528 i2c_reg = PCH_GPIOA;
529 else { 529 else {
530 i2c_reg = GPIOA; 530 i2c_reg = GPIOA;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b27202d23ebc..9cd6de5f9906 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -232,7 +232,7 @@ struct intel_limit {
232#define G4X_P2_DISPLAY_PORT_FAST 10 232#define G4X_P2_DISPLAY_PORT_FAST 10
233#define G4X_P2_DISPLAY_PORT_LIMIT 0 233#define G4X_P2_DISPLAY_PORT_LIMIT 0
234 234
235/* Ironlake */ 235/* Ironlake / Sandybridge */
236/* as we calculate clock using (register_value + 2) for 236/* as we calculate clock using (register_value + 2) for
237 N/M1/M2, so here the range value for them is (actual_value-2). 237 N/M1/M2, so here the range value for them is (actual_value-2).
238 */ 238 */
@@ -690,7 +690,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
690 struct drm_device *dev = crtc->dev; 690 struct drm_device *dev = crtc->dev;
691 const intel_limit_t *limit; 691 const intel_limit_t *limit;
692 692
693 if (IS_IRONLAKE(dev)) 693 if (HAS_PCH_SPLIT(dev))
694 limit = intel_ironlake_limit(crtc); 694 limit = intel_ironlake_limit(crtc);
695 else if (IS_G4X(dev)) { 695 else if (IS_G4X(dev)) {
696 limit = intel_g4x_limit(crtc); 696 limit = intel_g4x_limit(crtc);
@@ -886,7 +886,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
886 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 886 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
887 int lvds_reg; 887 int lvds_reg;
888 888
889 if (IS_IRONLAKE(dev)) 889 if (HAS_PCH_SPLIT(dev))
890 lvds_reg = PCH_LVDS; 890 lvds_reg = PCH_LVDS;
891 else 891 else
892 lvds_reg = LVDS; 892 lvds_reg = LVDS;
@@ -1188,25 +1188,30 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1188 if (intel_fb->obj->size > dev_priv->cfb_size) { 1188 if (intel_fb->obj->size > dev_priv->cfb_size) {
1189 DRM_DEBUG_KMS("framebuffer too large, disabling " 1189 DRM_DEBUG_KMS("framebuffer too large, disabling "
1190 "compression\n"); 1190 "compression\n");
1191 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1191 goto out_disable; 1192 goto out_disable;
1192 } 1193 }
1193 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 1194 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1194 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { 1195 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
1195 DRM_DEBUG_KMS("mode incompatible with compression, " 1196 DRM_DEBUG_KMS("mode incompatible with compression, "
1196 "disabling\n"); 1197 "disabling\n");
1198 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1197 goto out_disable; 1199 goto out_disable;
1198 } 1200 }
1199 if ((mode->hdisplay > 2048) || 1201 if ((mode->hdisplay > 2048) ||
1200 (mode->vdisplay > 1536)) { 1202 (mode->vdisplay > 1536)) {
1201 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 1203 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1204 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1202 goto out_disable; 1205 goto out_disable;
1203 } 1206 }
1204 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { 1207 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
1205 DRM_DEBUG_KMS("plane not 0, disabling compression\n"); 1208 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1209 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1206 goto out_disable; 1210 goto out_disable;
1207 } 1211 }
1208 if (obj_priv->tiling_mode != I915_TILING_X) { 1212 if (obj_priv->tiling_mode != I915_TILING_X) {
1209 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); 1213 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
1214 dev_priv->no_fbc_reason = FBC_NOT_TILED;
1210 goto out_disable; 1215 goto out_disable;
1211 } 1216 }
1212 1217
@@ -1366,7 +1371,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1366 dspcntr &= ~DISPPLANE_TILED; 1371 dspcntr &= ~DISPPLANE_TILED;
1367 } 1372 }
1368 1373
1369 if (IS_IRONLAKE(dev)) 1374 if (HAS_PCH_SPLIT(dev))
1370 /* must disable */ 1375 /* must disable */
1371 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 1376 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1372 1377
@@ -1427,7 +1432,7 @@ static void i915_disable_vga (struct drm_device *dev)
1427 u8 sr1; 1432 u8 sr1;
1428 u32 vga_reg; 1433 u32 vga_reg;
1429 1434
1430 if (IS_IRONLAKE(dev)) 1435 if (HAS_PCH_SPLIT(dev))
1431 vga_reg = CPU_VGACNTRL; 1436 vga_reg = CPU_VGACNTRL;
1432 else 1437 else
1433 vga_reg = VGACNTRL; 1438 vga_reg = VGACNTRL;
@@ -2111,7 +2116,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
2111 struct drm_display_mode *adjusted_mode) 2116 struct drm_display_mode *adjusted_mode)
2112{ 2117{
2113 struct drm_device *dev = crtc->dev; 2118 struct drm_device *dev = crtc->dev;
2114 if (IS_IRONLAKE(dev)) { 2119 if (HAS_PCH_SPLIT(dev)) {
2115 /* FDI link clock is fixed at 2.7G */ 2120 /* FDI link clock is fixed at 2.7G */
2116 if (mode->clock * 3 > 27000 * 4) 2121 if (mode->clock * 3 > 27000 * 4)
2117 return MODE_CLOCK_HIGH; 2122 return MODE_CLOCK_HIGH;
@@ -2757,11 +2762,22 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2757 srwm = total_size - sr_entries; 2762 srwm = total_size - sr_entries;
2758 if (srwm < 0) 2763 if (srwm < 0)
2759 srwm = 1; 2764 srwm = 1;
2760 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); 2765
2766 if (IS_I945G(dev) || IS_I945GM(dev))
2767 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2768 else if (IS_I915GM(dev)) {
2769 /* 915M has a smaller SRWM field */
2770 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
2771 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
2772 }
2761 } else { 2773 } else {
2762 /* Turn off self refresh if both pipes are enabled */ 2774 /* Turn off self refresh if both pipes are enabled */
2763 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 2775 if (IS_I945G(dev) || IS_I945GM(dev)) {
2764 & ~FW_BLC_SELF_EN); 2776 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2777 & ~FW_BLC_SELF_EN);
2778 } else if (IS_I915GM(dev)) {
2779 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
2780 }
2765 } 2781 }
2766 2782
2767 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2783 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -2967,7 +2983,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2967 refclk / 1000); 2983 refclk / 1000);
2968 } else if (IS_I9XX(dev)) { 2984 } else if (IS_I9XX(dev)) {
2969 refclk = 96000; 2985 refclk = 96000;
2970 if (IS_IRONLAKE(dev)) 2986 if (HAS_PCH_SPLIT(dev))
2971 refclk = 120000; /* 120Mhz refclk */ 2987 refclk = 120000; /* 120Mhz refclk */
2972 } else { 2988 } else {
2973 refclk = 48000; 2989 refclk = 48000;
@@ -3025,7 +3041,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3025 } 3041 }
3026 3042
3027 /* FDI link */ 3043 /* FDI link */
3028 if (IS_IRONLAKE(dev)) { 3044 if (HAS_PCH_SPLIT(dev)) {
3029 int lane, link_bw, bpp; 3045 int lane, link_bw, bpp;
3030 /* eDP doesn't require FDI link, so just set DP M/N 3046 /* eDP doesn't require FDI link, so just set DP M/N
3031 according to current link config */ 3047 according to current link config */
@@ -3102,7 +3118,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3102 * PCH B stepping, previous chipset stepping should be 3118 * PCH B stepping, previous chipset stepping should be
3103 * ignoring this setting. 3119 * ignoring this setting.
3104 */ 3120 */
3105 if (IS_IRONLAKE(dev)) { 3121 if (HAS_PCH_SPLIT(dev)) {
3106 temp = I915_READ(PCH_DREF_CONTROL); 3122 temp = I915_READ(PCH_DREF_CONTROL);
3107 /* Always enable nonspread source */ 3123 /* Always enable nonspread source */
3108 temp &= ~DREF_NONSPREAD_SOURCE_MASK; 3124 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
@@ -3149,7 +3165,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3149 reduced_clock.m2; 3165 reduced_clock.m2;
3150 } 3166 }
3151 3167
3152 if (!IS_IRONLAKE(dev)) 3168 if (!HAS_PCH_SPLIT(dev))
3153 dpll = DPLL_VGA_MODE_DIS; 3169 dpll = DPLL_VGA_MODE_DIS;
3154 3170
3155 if (IS_I9XX(dev)) { 3171 if (IS_I9XX(dev)) {
@@ -3162,7 +3178,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3162 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 3178 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
3163 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 3179 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3164 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 3180 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
3165 else if (IS_IRONLAKE(dev)) 3181 else if (HAS_PCH_SPLIT(dev))
3166 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 3182 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
3167 } 3183 }
3168 if (is_dp) 3184 if (is_dp)
@@ -3174,7 +3190,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3174 else { 3190 else {
3175 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 3191 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3176 /* also FPA1 */ 3192 /* also FPA1 */
3177 if (IS_IRONLAKE(dev)) 3193 if (HAS_PCH_SPLIT(dev))
3178 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 3194 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
3179 if (IS_G4X(dev) && has_reduced_clock) 3195 if (IS_G4X(dev) && has_reduced_clock)
3180 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 3196 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
@@ -3193,7 +3209,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3193 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 3209 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
3194 break; 3210 break;
3195 } 3211 }
3196 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) 3212 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
3197 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 3213 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
3198 } else { 3214 } else {
3199 if (is_lvds) { 3215 if (is_lvds) {
@@ -3227,7 +3243,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3227 3243
3228 /* Ironlake's plane is forced to pipe, bit 24 is to 3244 /* Ironlake's plane is forced to pipe, bit 24 is to
3229 enable color space conversion */ 3245 enable color space conversion */
3230 if (!IS_IRONLAKE(dev)) { 3246 if (!HAS_PCH_SPLIT(dev)) {
3231 if (pipe == 0) 3247 if (pipe == 0)
3232 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 3248 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
3233 else 3249 else
@@ -3254,14 +3270,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3254 3270
3255 3271
3256 /* Disable the panel fitter if it was on our pipe */ 3272 /* Disable the panel fitter if it was on our pipe */
3257 if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe) 3273 if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
3258 I915_WRITE(PFIT_CONTROL, 0); 3274 I915_WRITE(PFIT_CONTROL, 0);
3259 3275
3260 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 3276 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
3261 drm_mode_debug_printmodeline(mode); 3277 drm_mode_debug_printmodeline(mode);
3262 3278
3263 /* assign to Ironlake registers */ 3279 /* assign to Ironlake registers */
3264 if (IS_IRONLAKE(dev)) { 3280 if (HAS_PCH_SPLIT(dev)) {
3265 fp_reg = pch_fp_reg; 3281 fp_reg = pch_fp_reg;
3266 dpll_reg = pch_dpll_reg; 3282 dpll_reg = pch_dpll_reg;
3267 } 3283 }
@@ -3282,7 +3298,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3282 if (is_lvds) { 3298 if (is_lvds) {
3283 u32 lvds; 3299 u32 lvds;
3284 3300
3285 if (IS_IRONLAKE(dev)) 3301 if (HAS_PCH_SPLIT(dev))
3286 lvds_reg = PCH_LVDS; 3302 lvds_reg = PCH_LVDS;
3287 3303
3288 lvds = I915_READ(lvds_reg); 3304 lvds = I915_READ(lvds_reg);
@@ -3304,12 +3320,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3304 /* set the dithering flag */ 3320 /* set the dithering flag */
3305 if (IS_I965G(dev)) { 3321 if (IS_I965G(dev)) {
3306 if (dev_priv->lvds_dither) { 3322 if (dev_priv->lvds_dither) {
3307 if (IS_IRONLAKE(dev)) 3323 if (HAS_PCH_SPLIT(dev))
3308 pipeconf |= PIPE_ENABLE_DITHER; 3324 pipeconf |= PIPE_ENABLE_DITHER;
3309 else 3325 else
3310 lvds |= LVDS_ENABLE_DITHER; 3326 lvds |= LVDS_ENABLE_DITHER;
3311 } else { 3327 } else {
3312 if (IS_IRONLAKE(dev)) 3328 if (HAS_PCH_SPLIT(dev))
3313 pipeconf &= ~PIPE_ENABLE_DITHER; 3329 pipeconf &= ~PIPE_ENABLE_DITHER;
3314 else 3330 else
3315 lvds &= ~LVDS_ENABLE_DITHER; 3331 lvds &= ~LVDS_ENABLE_DITHER;
@@ -3328,7 +3344,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3328 /* Wait for the clocks to stabilize. */ 3344 /* Wait for the clocks to stabilize. */
3329 udelay(150); 3345 udelay(150);
3330 3346
3331 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) { 3347 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
3332 if (is_sdvo) { 3348 if (is_sdvo) {
3333 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 3349 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
3334 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | 3350 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
@@ -3375,14 +3391,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3375 /* pipesrc and dspsize control the size that is scaled from, which should 3391 /* pipesrc and dspsize control the size that is scaled from, which should
3376 * always be the user's requested size. 3392 * always be the user's requested size.
3377 */ 3393 */
3378 if (!IS_IRONLAKE(dev)) { 3394 if (!HAS_PCH_SPLIT(dev)) {
3379 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | 3395 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
3380 (mode->hdisplay - 1)); 3396 (mode->hdisplay - 1));
3381 I915_WRITE(dsppos_reg, 0); 3397 I915_WRITE(dsppos_reg, 0);
3382 } 3398 }
3383 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 3399 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
3384 3400
3385 if (IS_IRONLAKE(dev)) { 3401 if (HAS_PCH_SPLIT(dev)) {
3386 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); 3402 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
3387 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); 3403 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
3388 I915_WRITE(link_m1_reg, m_n.link_m); 3404 I915_WRITE(link_m1_reg, m_n.link_m);
@@ -3438,7 +3454,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
3438 return; 3454 return;
3439 3455
3440 /* use legacy palette for Ironlake */ 3456 /* use legacy palette for Ironlake */
3441 if (IS_IRONLAKE(dev)) 3457 if (HAS_PCH_SPLIT(dev))
3442 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : 3458 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
3443 LGC_PALETTE_B; 3459 LGC_PALETTE_B;
3444 3460
@@ -3553,11 +3569,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3553 intel_crtc->cursor_bo = bo; 3569 intel_crtc->cursor_bo = bo;
3554 3570
3555 return 0; 3571 return 0;
3556fail:
3557 mutex_lock(&dev->struct_mutex);
3558fail_locked: 3572fail_locked:
3559 drm_gem_object_unreference(bo);
3560 mutex_unlock(&dev->struct_mutex); 3573 mutex_unlock(&dev->struct_mutex);
3574fail:
3575 drm_gem_object_unreference_unlocked(bo);
3561 return ret; 3576 return ret;
3562} 3577}
3563 3578
@@ -3922,7 +3937,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
3922 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 3937 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3923 int dpll = I915_READ(dpll_reg); 3938 int dpll = I915_READ(dpll_reg);
3924 3939
3925 if (IS_IRONLAKE(dev)) 3940 if (HAS_PCH_SPLIT(dev))
3926 return; 3941 return;
3927 3942
3928 if (!dev_priv->lvds_downclock_avail) 3943 if (!dev_priv->lvds_downclock_avail)
@@ -3961,7 +3976,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
3961 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 3976 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3962 int dpll = I915_READ(dpll_reg); 3977 int dpll = I915_READ(dpll_reg);
3963 3978
3964 if (IS_IRONLAKE(dev)) 3979 if (HAS_PCH_SPLIT(dev))
3965 return; 3980 return;
3966 3981
3967 if (!dev_priv->lvds_downclock_avail) 3982 if (!dev_priv->lvds_downclock_avail)
@@ -4011,6 +4026,11 @@ static void intel_idle_update(struct work_struct *work)
4011 4026
4012 mutex_lock(&dev->struct_mutex); 4027 mutex_lock(&dev->struct_mutex);
4013 4028
4029 if (IS_I945G(dev) || IS_I945GM(dev)) {
4030 DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
4031 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4032 }
4033
4014 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4034 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4015 /* Skip inactive CRTCs */ 4035 /* Skip inactive CRTCs */
4016 if (!crtc->fb) 4036 if (!crtc->fb)
@@ -4044,9 +4064,17 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4044 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4064 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4045 return; 4065 return;
4046 4066
4047 if (!dev_priv->busy) 4067 if (!dev_priv->busy) {
4068 if (IS_I945G(dev) || IS_I945GM(dev)) {
4069 u32 fw_blc_self;
4070
4071 DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
4072 fw_blc_self = I915_READ(FW_BLC_SELF);
4073 fw_blc_self &= ~FW_BLC_SELF_EN;
4074 I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
4075 }
4048 dev_priv->busy = true; 4076 dev_priv->busy = true;
4049 else 4077 } else
4050 mod_timer(&dev_priv->idle_timer, jiffies + 4078 mod_timer(&dev_priv->idle_timer, jiffies +
4051 msecs_to_jiffies(GPU_IDLE_TIMEOUT)); 4079 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
4052 4080
@@ -4058,6 +4086,14 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
4058 intel_fb = to_intel_framebuffer(crtc->fb); 4086 intel_fb = to_intel_framebuffer(crtc->fb);
4059 if (intel_fb->obj == obj) { 4087 if (intel_fb->obj == obj) {
4060 if (!intel_crtc->busy) { 4088 if (!intel_crtc->busy) {
4089 if (IS_I945G(dev) || IS_I945GM(dev)) {
4090 u32 fw_blc_self;
4091
4092 DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
4093 fw_blc_self = I915_READ(FW_BLC_SELF);
4094 fw_blc_self &= ~FW_BLC_SELF_EN;
4095 I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
4096 }
4061 /* Non-busy -> busy, upclock */ 4097 /* Non-busy -> busy, upclock */
4062 intel_increase_pllclock(crtc, true); 4098 intel_increase_pllclock(crtc, true);
4063 intel_crtc->busy = true; 4099 intel_crtc->busy = true;
@@ -4382,7 +4418,7 @@ static void intel_setup_outputs(struct drm_device *dev)
4382 if (IS_MOBILE(dev) && !IS_I830(dev)) 4418 if (IS_MOBILE(dev) && !IS_I830(dev))
4383 intel_lvds_init(dev); 4419 intel_lvds_init(dev);
4384 4420
4385 if (IS_IRONLAKE(dev)) { 4421 if (HAS_PCH_SPLIT(dev)) {
4386 int found; 4422 int found;
4387 4423
4388 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) 4424 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
@@ -4451,7 +4487,7 @@ static void intel_setup_outputs(struct drm_device *dev)
4451 DRM_DEBUG_KMS("probing DP_D\n"); 4487 DRM_DEBUG_KMS("probing DP_D\n");
4452 intel_dp_init(dev, DP_D); 4488 intel_dp_init(dev, DP_D);
4453 } 4489 }
4454 } else if (IS_I8XX(dev)) 4490 } else if (IS_GEN2(dev))
4455 intel_dvo_init(dev); 4491 intel_dvo_init(dev);
4456 4492
4457 if (SUPPORTS_TV(dev)) 4493 if (SUPPORTS_TV(dev))
@@ -4476,9 +4512,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
4476 intelfb_remove(dev, fb); 4512 intelfb_remove(dev, fb);
4477 4513
4478 drm_framebuffer_cleanup(fb); 4514 drm_framebuffer_cleanup(fb);
4479 mutex_lock(&dev->struct_mutex); 4515 drm_gem_object_unreference_unlocked(intel_fb->obj);
4480 drm_gem_object_unreference(intel_fb->obj);
4481 mutex_unlock(&dev->struct_mutex);
4482 4516
4483 kfree(intel_fb); 4517 kfree(intel_fb);
4484} 4518}
@@ -4541,9 +4575,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
4541 4575
4542 ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); 4576 ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
4543 if (ret) { 4577 if (ret) {
4544 mutex_lock(&dev->struct_mutex); 4578 drm_gem_object_unreference_unlocked(obj);
4545 drm_gem_object_unreference(obj);
4546 mutex_unlock(&dev->struct_mutex);
4547 return NULL; 4579 return NULL;
4548 } 4580 }
4549 4581
@@ -4591,6 +4623,91 @@ err_unref:
4591 return NULL; 4623 return NULL;
4592} 4624}
4593 4625
4626void ironlake_enable_drps(struct drm_device *dev)
4627{
4628 struct drm_i915_private *dev_priv = dev->dev_private;
4629 u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl;
4630 u8 fmax, fmin, fstart, vstart;
4631 int i = 0;
4632
4633 /* 100ms RC evaluation intervals */
4634 I915_WRITE(RCUPEI, 100000);
4635 I915_WRITE(RCDNEI, 100000);
4636
4637 /* Set max/min thresholds to 90ms and 80ms respectively */
4638 I915_WRITE(RCBMAXAVG, 90000);
4639 I915_WRITE(RCBMINAVG, 80000);
4640
4641 I915_WRITE(MEMIHYST, 1);
4642
4643 /* Set up min, max, and cur for interrupt handling */
4644 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4645 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4646 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4647 MEMMODE_FSTART_SHIFT;
4648 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
4649 PXVFREQ_PX_SHIFT;
4650
4651 dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */
4652 dev_priv->min_delay = fmin;
4653 dev_priv->cur_delay = fstart;
4654
4655 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4656
4657 /*
4658 * Interrupts will be enabled in ironlake_irq_postinstall
4659 */
4660
4661 I915_WRITE(VIDSTART, vstart);
4662 POSTING_READ(VIDSTART);
4663
4664 rgvmodectl |= MEMMODE_SWMODE_EN;
4665 I915_WRITE(MEMMODECTL, rgvmodectl);
4666
4667 while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) {
4668 if (i++ > 100) {
4669 DRM_ERROR("stuck trying to change perf mode\n");
4670 break;
4671 }
4672 msleep(1);
4673 }
4674 msleep(1);
4675
4676 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4677 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4678 I915_WRITE(MEMSWCTL, rgvswctl);
4679 POSTING_READ(MEMSWCTL);
4680
4681 rgvswctl |= MEMCTL_CMD_STS;
4682 I915_WRITE(MEMSWCTL, rgvswctl);
4683}
4684
4685void ironlake_disable_drps(struct drm_device *dev)
4686{
4687 struct drm_i915_private *dev_priv = dev->dev_private;
4688 u32 rgvswctl;
4689 u8 fstart;
4690
4691 /* Ack interrupts, disable EFC interrupt */
4692 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4693 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4694 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4695 I915_WRITE(DEIIR, DE_PCU_EVENT);
4696 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4697
4698 /* Go back to the starting frequency */
4699 fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >>
4700 MEMMODE_FSTART_SHIFT;
4701 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4702 (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4703 I915_WRITE(MEMSWCTL, rgvswctl);
4704 msleep(1);
4705 rgvswctl |= MEMCTL_CMD_STS;
4706 I915_WRITE(MEMSWCTL, rgvswctl);
4707 msleep(1);
4708
4709}
4710
4594void intel_init_clock_gating(struct drm_device *dev) 4711void intel_init_clock_gating(struct drm_device *dev)
4595{ 4712{
4596 struct drm_i915_private *dev_priv = dev->dev_private; 4713 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4599,7 +4716,7 @@ void intel_init_clock_gating(struct drm_device *dev)
4599 * Disable clock gating reported to work incorrectly according to the 4716 * Disable clock gating reported to work incorrectly according to the
4600 * specs, but enable as much else as we can. 4717 * specs, but enable as much else as we can.
4601 */ 4718 */
4602 if (IS_IRONLAKE(dev)) { 4719 if (HAS_PCH_SPLIT(dev)) {
4603 return; 4720 return;
4604 } else if (IS_G4X(dev)) { 4721 } else if (IS_G4X(dev)) {
4605 uint32_t dspclk_gate; 4722 uint32_t dspclk_gate;
@@ -4672,7 +4789,7 @@ static void intel_init_display(struct drm_device *dev)
4672 struct drm_i915_private *dev_priv = dev->dev_private; 4789 struct drm_i915_private *dev_priv = dev->dev_private;
4673 4790
4674 /* We always want a DPMS function */ 4791 /* We always want a DPMS function */
4675 if (IS_IRONLAKE(dev)) 4792 if (HAS_PCH_SPLIT(dev))
4676 dev_priv->display.dpms = ironlake_crtc_dpms; 4793 dev_priv->display.dpms = ironlake_crtc_dpms;
4677 else 4794 else
4678 dev_priv->display.dpms = i9xx_crtc_dpms; 4795 dev_priv->display.dpms = i9xx_crtc_dpms;
@@ -4715,7 +4832,7 @@ static void intel_init_display(struct drm_device *dev)
4715 i830_get_display_clock_speed; 4832 i830_get_display_clock_speed;
4716 4833
4717 /* For FIFO watermark updates */ 4834 /* For FIFO watermark updates */
4718 if (IS_IRONLAKE(dev)) 4835 if (HAS_PCH_SPLIT(dev))
4719 dev_priv->display.update_wm = NULL; 4836 dev_priv->display.update_wm = NULL;
4720 else if (IS_G4X(dev)) 4837 else if (IS_G4X(dev))
4721 dev_priv->display.update_wm = g4x_update_wm; 4838 dev_priv->display.update_wm = g4x_update_wm;
@@ -4774,11 +4891,6 @@ void intel_modeset_init(struct drm_device *dev)
4774 DRM_DEBUG_KMS("%d display pipe%s available.\n", 4891 DRM_DEBUG_KMS("%d display pipe%s available.\n",
4775 num_pipe, num_pipe > 1 ? "s" : ""); 4892 num_pipe, num_pipe > 1 ? "s" : "");
4776 4893
4777 if (IS_I85X(dev))
4778 pci_read_config_word(dev->pdev, HPLLCC, &dev_priv->orig_clock);
4779 else if (IS_I9XX(dev) || IS_G4X(dev))
4780 pci_read_config_word(dev->pdev, GCFGC, &dev_priv->orig_clock);
4781
4782 for (i = 0; i < num_pipe; i++) { 4894 for (i = 0; i < num_pipe; i++) {
4783 intel_crtc_init(dev, i); 4895 intel_crtc_init(dev, i);
4784 } 4896 }
@@ -4787,6 +4899,9 @@ void intel_modeset_init(struct drm_device *dev)
4787 4899
4788 intel_init_clock_gating(dev); 4900 intel_init_clock_gating(dev);
4789 4901
4902 if (IS_IRONLAKE_M(dev))
4903 ironlake_enable_drps(dev);
4904
4790 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 4905 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
4791 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 4906 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
4792 (unsigned long)dev); 4907 (unsigned long)dev);
@@ -4834,6 +4949,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
4834 drm_gem_object_unreference(dev_priv->pwrctx); 4949 drm_gem_object_unreference(dev_priv->pwrctx);
4835 } 4950 }
4836 4951
4952 if (IS_IRONLAKE_M(dev))
4953 ironlake_disable_drps(dev);
4954
4837 mutex_unlock(&dev->struct_mutex); 4955 mutex_unlock(&dev->struct_mutex);
4838 4956
4839 drm_mode_config_cleanup(dev); 4957 drm_mode_config_cleanup(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 439506cefc14..3ef3a0d0edd0 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -231,7 +231,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
231 */ 231 */
232 if (IS_eDP(intel_output)) 232 if (IS_eDP(intel_output))
233 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 233 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
234 else if (IS_IRONLAKE(dev)) 234 else if (HAS_PCH_SPLIT(dev))
235 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ 235 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
236 else 236 else
237 aux_clock_divider = intel_hrawclk(dev) / 2; 237 aux_clock_divider = intel_hrawclk(dev) / 2;
@@ -584,7 +584,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
584 intel_dp_compute_m_n(3, lane_count, 584 intel_dp_compute_m_n(3, lane_count,
585 mode->clock, adjusted_mode->clock, &m_n); 585 mode->clock, adjusted_mode->clock, &m_n);
586 586
587 if (IS_IRONLAKE(dev)) { 587 if (HAS_PCH_SPLIT(dev)) {
588 if (intel_crtc->pipe == 0) { 588 if (intel_crtc->pipe == 0) {
589 I915_WRITE(TRANSA_DATA_M1, 589 I915_WRITE(TRANSA_DATA_M1,
590 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | 590 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
@@ -1176,7 +1176,7 @@ intel_dp_detect(struct drm_connector *connector)
1176 1176
1177 dp_priv->has_audio = false; 1177 dp_priv->has_audio = false;
1178 1178
1179 if (IS_IRONLAKE(dev)) 1179 if (HAS_PCH_SPLIT(dev))
1180 return ironlake_dp_detect(connector); 1180 return ironlake_dp_detect(connector);
1181 1181
1182 temp = I915_READ(PORT_HOTPLUG_EN); 1182 temp = I915_READ(PORT_HOTPLUG_EN);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a51573da1ff6..3a467ca57857 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -209,6 +209,8 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
209extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 209extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
210 u16 *blue, int regno); 210 u16 *blue, int regno);
211extern void intel_init_clock_gating(struct drm_device *dev); 211extern void intel_init_clock_gating(struct drm_device *dev);
212extern void ironlake_enable_drps(struct drm_device *dev);
213extern void ironlake_disable_drps(struct drm_device *dev);
212 214
213extern int intel_framebuffer_create(struct drm_device *dev, 215extern int intel_framebuffer_create(struct drm_device *dev,
214 struct drm_mode_fb_cmd *mode_cmd, 216 struct drm_mode_fb_cmd *mode_cmd,
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index aaabbcbe5905..8cd791dc5b29 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -35,6 +35,7 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/fb.h> 36#include <linux/fb.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/vga_switcheroo.h>
38 39
39#include "drmP.h" 40#include "drmP.h"
40#include "drm.h" 41#include "drm.h"
@@ -235,6 +236,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
235 obj_priv->gtt_offset, fbo); 236 obj_priv->gtt_offset, fbo);
236 237
237 mutex_unlock(&dev->struct_mutex); 238 mutex_unlock(&dev->struct_mutex);
239 vga_switcheroo_client_fb_set(dev->pdev, info);
238 return 0; 240 return 0;
239 241
240out_unpin: 242out_unpin:
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 0e268deed761..a30f8bfc1985 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -82,7 +82,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
82 /* HW workaround, need to toggle enable bit off and on for 12bpc, but 82 /* HW workaround, need to toggle enable bit off and on for 12bpc, but
83 * we do this anyway which shows more stable in testing. 83 * we do this anyway which shows more stable in testing.
84 */ 84 */
85 if (IS_IRONLAKE(dev)) { 85 if (HAS_PCH_SPLIT(dev)) {
86 I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE); 86 I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
87 POSTING_READ(hdmi_priv->sdvox_reg); 87 POSTING_READ(hdmi_priv->sdvox_reg);
88 } 88 }
@@ -99,7 +99,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
99 /* HW workaround, need to write this twice for issue that may result 99 /* HW workaround, need to write this twice for issue that may result
100 * in first write getting masked. 100 * in first write getting masked.
101 */ 101 */
102 if (IS_IRONLAKE(dev)) { 102 if (HAS_PCH_SPLIT(dev)) {
103 I915_WRITE(hdmi_priv->sdvox_reg, temp); 103 I915_WRITE(hdmi_priv->sdvox_reg, temp);
104 POSTING_READ(hdmi_priv->sdvox_reg); 104 POSTING_READ(hdmi_priv->sdvox_reg);
105 } 105 }
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 8673c735b8ab..fcc753ca5d94 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -128,7 +128,7 @@ intel_i2c_reset_gmbus(struct drm_device *dev)
128{ 128{
129 struct drm_i915_private *dev_priv = dev->dev_private; 129 struct drm_i915_private *dev_priv = dev->dev_private;
130 130
131 if (IS_IRONLAKE(dev)) { 131 if (HAS_PCH_SPLIT(dev)) {
132 I915_WRITE(PCH_GMBUS0, 0); 132 I915_WRITE(PCH_GMBUS0, 0);
133 } else { 133 } else {
134 I915_WRITE(GMBUS0, 0); 134 I915_WRITE(GMBUS0, 0);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c2e8a45780d5..14e516fdc2dd 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -56,7 +56,7 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level)
56 struct drm_i915_private *dev_priv = dev->dev_private; 56 struct drm_i915_private *dev_priv = dev->dev_private;
57 u32 blc_pwm_ctl, reg; 57 u32 blc_pwm_ctl, reg;
58 58
59 if (IS_IRONLAKE(dev)) 59 if (HAS_PCH_SPLIT(dev))
60 reg = BLC_PWM_CPU_CTL; 60 reg = BLC_PWM_CPU_CTL;
61 else 61 else
62 reg = BLC_PWM_CTL; 62 reg = BLC_PWM_CTL;
@@ -74,7 +74,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
74 struct drm_i915_private *dev_priv = dev->dev_private; 74 struct drm_i915_private *dev_priv = dev->dev_private;
75 u32 reg; 75 u32 reg;
76 76
77 if (IS_IRONLAKE(dev)) 77 if (HAS_PCH_SPLIT(dev))
78 reg = BLC_PWM_PCH_CTL2; 78 reg = BLC_PWM_PCH_CTL2;
79 else 79 else
80 reg = BLC_PWM_CTL; 80 reg = BLC_PWM_CTL;
@@ -89,17 +89,22 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
89static void intel_lvds_set_power(struct drm_device *dev, bool on) 89static void intel_lvds_set_power(struct drm_device *dev, bool on)
90{ 90{
91 struct drm_i915_private *dev_priv = dev->dev_private; 91 struct drm_i915_private *dev_priv = dev->dev_private;
92 u32 pp_status, ctl_reg, status_reg; 92 u32 pp_status, ctl_reg, status_reg, lvds_reg;
93 93
94 if (IS_IRONLAKE(dev)) { 94 if (HAS_PCH_SPLIT(dev)) {
95 ctl_reg = PCH_PP_CONTROL; 95 ctl_reg = PCH_PP_CONTROL;
96 status_reg = PCH_PP_STATUS; 96 status_reg = PCH_PP_STATUS;
97 lvds_reg = PCH_LVDS;
97 } else { 98 } else {
98 ctl_reg = PP_CONTROL; 99 ctl_reg = PP_CONTROL;
99 status_reg = PP_STATUS; 100 status_reg = PP_STATUS;
101 lvds_reg = LVDS;
100 } 102 }
101 103
102 if (on) { 104 if (on) {
105 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
106 POSTING_READ(lvds_reg);
107
103 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | 108 I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
104 POWER_TARGET_ON); 109 POWER_TARGET_ON);
105 do { 110 do {
@@ -115,6 +120,9 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
115 do { 120 do {
116 pp_status = I915_READ(status_reg); 121 pp_status = I915_READ(status_reg);
117 } while (pp_status & PP_ON); 122 } while (pp_status & PP_ON);
123
124 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
125 POSTING_READ(lvds_reg);
118 } 126 }
119} 127}
120 128
@@ -137,7 +145,7 @@ static void intel_lvds_save(struct drm_connector *connector)
137 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; 145 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
138 u32 pwm_ctl_reg; 146 u32 pwm_ctl_reg;
139 147
140 if (IS_IRONLAKE(dev)) { 148 if (HAS_PCH_SPLIT(dev)) {
141 pp_on_reg = PCH_PP_ON_DELAYS; 149 pp_on_reg = PCH_PP_ON_DELAYS;
142 pp_off_reg = PCH_PP_OFF_DELAYS; 150 pp_off_reg = PCH_PP_OFF_DELAYS;
143 pp_ctl_reg = PCH_PP_CONTROL; 151 pp_ctl_reg = PCH_PP_CONTROL;
@@ -174,7 +182,7 @@ static void intel_lvds_restore(struct drm_connector *connector)
174 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; 182 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
175 u32 pwm_ctl_reg; 183 u32 pwm_ctl_reg;
176 184
177 if (IS_IRONLAKE(dev)) { 185 if (HAS_PCH_SPLIT(dev)) {
178 pp_on_reg = PCH_PP_ON_DELAYS; 186 pp_on_reg = PCH_PP_ON_DELAYS;
179 pp_off_reg = PCH_PP_OFF_DELAYS; 187 pp_off_reg = PCH_PP_OFF_DELAYS;
180 pp_ctl_reg = PCH_PP_CONTROL; 188 pp_ctl_reg = PCH_PP_CONTROL;
@@ -297,7 +305,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
297 } 305 }
298 306
299 /* full screen scale for now */ 307 /* full screen scale for now */
300 if (IS_IRONLAKE(dev)) 308 if (HAS_PCH_SPLIT(dev))
301 goto out; 309 goto out;
302 310
303 /* 965+ wants fuzzy fitting */ 311 /* 965+ wants fuzzy fitting */
@@ -327,7 +335,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
327 * to register description and PRM. 335 * to register description and PRM.
328 * Change the value here to see the borders for debugging 336 * Change the value here to see the borders for debugging
329 */ 337 */
330 if (!IS_IRONLAKE(dev)) { 338 if (!HAS_PCH_SPLIT(dev)) {
331 I915_WRITE(BCLRPAT_A, 0); 339 I915_WRITE(BCLRPAT_A, 0);
332 I915_WRITE(BCLRPAT_B, 0); 340 I915_WRITE(BCLRPAT_B, 0);
333 } 341 }
@@ -548,7 +556,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
548 struct drm_i915_private *dev_priv = dev->dev_private; 556 struct drm_i915_private *dev_priv = dev->dev_private;
549 u32 reg; 557 u32 reg;
550 558
551 if (IS_IRONLAKE(dev)) 559 if (HAS_PCH_SPLIT(dev))
552 reg = BLC_PWM_CPU_CTL; 560 reg = BLC_PWM_CPU_CTL;
553 else 561 else
554 reg = BLC_PWM_CTL; 562 reg = BLC_PWM_CTL;
@@ -587,7 +595,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
587 * settings. 595 * settings.
588 */ 596 */
589 597
590 if (IS_IRONLAKE(dev)) 598 if (HAS_PCH_SPLIT(dev))
591 return; 599 return;
592 600
593 /* 601 /*
@@ -655,8 +663,15 @@ static const struct dmi_system_id bad_lid_status[] = {
655 */ 663 */
656static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) 664static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
657{ 665{
666 struct drm_device *dev = connector->dev;
658 enum drm_connector_status status = connector_status_connected; 667 enum drm_connector_status status = connector_status_connected;
659 668
669 /* ACPI lid methods were generally unreliable in this generation, so
670 * don't even bother.
671 */
672 if (IS_GEN2(dev))
673 return connector_status_connected;
674
660 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) 675 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
661 status = connector_status_disconnected; 676 status = connector_status_disconnected;
662 677
@@ -1020,7 +1035,7 @@ void intel_lvds_init(struct drm_device *dev)
1020 return; 1035 return;
1021 } 1036 }
1022 1037
1023 if (IS_IRONLAKE(dev)) { 1038 if (HAS_PCH_SPLIT(dev)) {
1024 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) 1039 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
1025 return; 1040 return;
1026 if (dev_priv->edp_support) { 1041 if (dev_priv->edp_support) {
@@ -1123,7 +1138,7 @@ void intel_lvds_init(struct drm_device *dev)
1123 */ 1138 */
1124 1139
1125 /* Ironlake: FIXME if still fail, not try pipe mode now */ 1140 /* Ironlake: FIXME if still fail, not try pipe mode now */
1126 if (IS_IRONLAKE(dev)) 1141 if (HAS_PCH_SPLIT(dev))
1127 goto failed; 1142 goto failed;
1128 1143
1129 lvds = I915_READ(LVDS); 1144 lvds = I915_READ(LVDS);
@@ -1144,7 +1159,7 @@ void intel_lvds_init(struct drm_device *dev)
1144 goto failed; 1159 goto failed;
1145 1160
1146out: 1161out:
1147 if (IS_IRONLAKE(dev)) { 1162 if (HAS_PCH_SPLIT(dev)) {
1148 u32 pwm; 1163 u32 pwm;
1149 /* make sure PWM is enabled */ 1164 /* make sure PWM is enabled */
1150 pwm = I915_READ(BLC_PWM_CPU_CTL2); 1165 pwm = I915_READ(BLC_PWM_CPU_CTL2);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 2639591c72e9..d355d1d527e7 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -172,7 +172,7 @@ struct overlay_registers {
172#define OFC_UPDATE 0x1 172#define OFC_UPDATE 0x1
173 173
174#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev)) 174#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
175#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev)) 175#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev))
176 176
177 177
178static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay) 178static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
@@ -199,16 +199,11 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
199 199
200static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) 200static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
201{ 201{
202 struct drm_device *dev = overlay->dev;
203 drm_i915_private_t *dev_priv = dev->dev_private;
204
205 if (OVERLAY_NONPHYSICAL(overlay->dev)) 202 if (OVERLAY_NONPHYSICAL(overlay->dev))
206 io_mapping_unmap_atomic(overlay->virt_addr); 203 io_mapping_unmap_atomic(overlay->virt_addr);
207 204
208 overlay->virt_addr = NULL; 205 overlay->virt_addr = NULL;
209 206
210 I915_READ(OVADD); /* flush wc cashes */
211
212 return; 207 return;
213} 208}
214 209
@@ -225,9 +220,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
225 overlay->active = 1; 220 overlay->active = 1;
226 overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP; 221 overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
227 222
228 BEGIN_LP_RING(6); 223 BEGIN_LP_RING(4);
229 OUT_RING(MI_FLUSH);
230 OUT_RING(MI_NOOP);
231 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); 224 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
232 OUT_RING(overlay->flip_addr | OFC_UPDATE); 225 OUT_RING(overlay->flip_addr | OFC_UPDATE);
233 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 226 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -267,9 +260,7 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
267 if (tmp & (1 << 17)) 260 if (tmp & (1 << 17))
268 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); 261 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
269 262
270 BEGIN_LP_RING(4); 263 BEGIN_LP_RING(2);
271 OUT_RING(MI_FLUSH);
272 OUT_RING(MI_NOOP);
273 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 264 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
274 OUT_RING(flip_addr); 265 OUT_RING(flip_addr);
275 ADVANCE_LP_RING(); 266 ADVANCE_LP_RING();
@@ -338,9 +329,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
338 /* wait for overlay to go idle */ 329 /* wait for overlay to go idle */
339 overlay->hw_wedged = SWITCH_OFF_STAGE_1; 330 overlay->hw_wedged = SWITCH_OFF_STAGE_1;
340 331
341 BEGIN_LP_RING(6); 332 BEGIN_LP_RING(4);
342 OUT_RING(MI_FLUSH);
343 OUT_RING(MI_NOOP);
344 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 333 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
345 OUT_RING(flip_addr); 334 OUT_RING(flip_addr);
346 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 335 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -358,9 +347,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
358 /* turn overlay off */ 347 /* turn overlay off */
359 overlay->hw_wedged = SWITCH_OFF_STAGE_2; 348 overlay->hw_wedged = SWITCH_OFF_STAGE_2;
360 349
361 BEGIN_LP_RING(6); 350 BEGIN_LP_RING(4);
362 OUT_RING(MI_FLUSH);
363 OUT_RING(MI_NOOP);
364 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); 351 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
365 OUT_RING(flip_addr); 352 OUT_RING(flip_addr);
366 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 353 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -435,9 +422,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
435 422
436 overlay->hw_wedged = SWITCH_OFF_STAGE_2; 423 overlay->hw_wedged = SWITCH_OFF_STAGE_2;
437 424
438 BEGIN_LP_RING(6); 425 BEGIN_LP_RING(4);
439 OUT_RING(MI_FLUSH);
440 OUT_RING(MI_NOOP);
441 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); 426 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
442 OUT_RING(flip_addr); 427 OUT_RING(flip_addr);
443 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 428 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -1179,7 +1164,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1179out_unlock: 1164out_unlock:
1180 mutex_unlock(&dev->struct_mutex); 1165 mutex_unlock(&dev->struct_mutex);
1181 mutex_unlock(&dev->mode_config.mutex); 1166 mutex_unlock(&dev->mode_config.mutex);
1182 drm_gem_object_unreference(new_bo); 1167 drm_gem_object_unreference_unlocked(new_bo);
1183 kfree(params); 1168 kfree(params);
1184 1169
1185 return ret; 1170 return ret;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 82678d30ab06..48daee5c9c63 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -35,6 +35,7 @@
35#include "i915_drm.h" 35#include "i915_drm.h"
36#include "i915_drv.h" 36#include "i915_drv.h"
37#include "intel_sdvo_regs.h" 37#include "intel_sdvo_regs.h"
38#include <linux/dmi.h>
38 39
39static char *tv_format_names[] = { 40static char *tv_format_names[] = {
40 "NTSC_M" , "NTSC_J" , "NTSC_443", 41 "NTSC_M" , "NTSC_J" , "NTSC_443",
@@ -2283,6 +2284,25 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
2283 return 0x72; 2284 return 0x72;
2284} 2285}
2285 2286
2287static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id)
2288{
2289 DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident);
2290 return 1;
2291}
2292
2293static struct dmi_system_id intel_sdvo_bad_tv[] = {
2294 {
2295 .callback = intel_sdvo_bad_tv_callback,
2296 .ident = "IntelG45/ICH10R/DME1737",
2297 .matches = {
2298 DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"),
2299 DMI_MATCH(DMI_PRODUCT_NAME, "4800784"),
2300 },
2301 },
2302
2303 { } /* terminating entry */
2304};
2305
2286static bool 2306static bool
2287intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) 2307intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2288{ 2308{
@@ -2323,7 +2343,8 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2323 (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2343 (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2324 (1 << INTEL_ANALOG_CLONE_BIT); 2344 (1 << INTEL_ANALOG_CLONE_BIT);
2325 } 2345 }
2326 } else if (flags & SDVO_OUTPUT_SVID0) { 2346 } else if ((flags & SDVO_OUTPUT_SVID0) &&
2347 !dmi_check_system(intel_sdvo_bad_tv)) {
2327 2348
2328 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; 2349 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
2329 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; 2350 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 48c290b5da8c..32db806f3b5a 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -16,7 +16,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
16 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ 16 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
17 nv04_graph.o nv10_graph.o nv20_graph.o \ 17 nv04_graph.o nv10_graph.o nv20_graph.o \
18 nv40_graph.o nv50_graph.o \ 18 nv40_graph.o nv50_graph.o \
19 nv40_grctx.o \ 19 nv40_grctx.o nv50_grctx.o \
20 nv04_instmem.o nv50_instmem.o \ 20 nv04_instmem.o nv50_instmem.o \
21 nv50_crtc.o nv50_dac.o nv50_sor.o \ 21 nv50_crtc.o nv50_dac.o nv50_sor.o \
22 nv50_cursor.o nv50_display.o nv50_fbcon.o \ 22 nv50_cursor.o nv50_display.o nv50_fbcon.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 48227e744753..0e0730a53137 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -11,6 +11,8 @@
11#include "nouveau_drm.h" 11#include "nouveau_drm.h"
12#include "nv50_display.h" 12#include "nv50_display.h"
13 13
14#include <linux/vga_switcheroo.h>
15
14#define NOUVEAU_DSM_SUPPORTED 0x00 16#define NOUVEAU_DSM_SUPPORTED 0x00
15#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00 17#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00
16 18
@@ -28,31 +30,30 @@
28#define NOUVEAU_DSM_POWER_SPEED 0x01 30#define NOUVEAU_DSM_POWER_SPEED 0x01
29#define NOUVEAU_DSM_POWER_STAMINA 0x02 31#define NOUVEAU_DSM_POWER_STAMINA 0x02
30 32
31static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result) 33static struct nouveau_dsm_priv {
32{ 34 bool dsm_detected;
33 static char muid[] = { 35 acpi_handle dhandle;
34 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, 36 acpi_handle dsm_handle;
35 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, 37} nouveau_dsm_priv;
36 }; 38
39static const char nouveau_dsm_muid[] = {
40 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
41 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
42};
37 43
38 struct pci_dev *pdev = dev->pdev; 44static int nouveau_dsm(acpi_handle handle, int func, int arg, int *result)
39 struct acpi_handle *handle; 45{
40 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 46 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
41 struct acpi_object_list input; 47 struct acpi_object_list input;
42 union acpi_object params[4]; 48 union acpi_object params[4];
43 union acpi_object *obj; 49 union acpi_object *obj;
44 int err; 50 int err;
45 51
46 handle = DEVICE_ACPI_HANDLE(&pdev->dev);
47
48 if (!handle)
49 return -ENODEV;
50
51 input.count = 4; 52 input.count = 4;
52 input.pointer = params; 53 input.pointer = params;
53 params[0].type = ACPI_TYPE_BUFFER; 54 params[0].type = ACPI_TYPE_BUFFER;
54 params[0].buffer.length = sizeof(muid); 55 params[0].buffer.length = sizeof(nouveau_dsm_muid);
55 params[0].buffer.pointer = (char *)muid; 56 params[0].buffer.pointer = (char *)nouveau_dsm_muid;
56 params[1].type = ACPI_TYPE_INTEGER; 57 params[1].type = ACPI_TYPE_INTEGER;
57 params[1].integer.value = 0x00000102; 58 params[1].integer.value = 0x00000102;
58 params[2].type = ACPI_TYPE_INTEGER; 59 params[2].type = ACPI_TYPE_INTEGER;
@@ -62,7 +63,7 @@ static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
62 63
63 err = acpi_evaluate_object(handle, "_DSM", &input, &output); 64 err = acpi_evaluate_object(handle, "_DSM", &input, &output);
64 if (err) { 65 if (err) {
65 NV_INFO(dev, "failed to evaluate _DSM: %d\n", err); 66 printk(KERN_INFO "failed to evaluate _DSM: %d\n", err);
66 return err; 67 return err;
67 } 68 }
68 69
@@ -86,40 +87,119 @@ static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
86 return 0; 87 return 0;
87} 88}
88 89
89int nouveau_hybrid_setup(struct drm_device *dev) 90static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id)
90{ 91{
91 int result; 92 return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL);
92 93}
93 if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE, 94
94 &result)) 95static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switcheroo_state state)
95 return -ENODEV; 96{
96 97 int arg;
97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); 98 if (state == VGA_SWITCHEROO_ON)
98 99 arg = NOUVEAU_DSM_POWER_SPEED;
99 if (result) { /* Ensure that the external GPU is enabled */ 100 else
100 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL); 101 arg = NOUVEAU_DSM_POWER_STAMINA;
101 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED, 102 nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg, NULL);
102 NULL); 103 return 0;
103 } else { /* Stamina mode - disable the external GPU */ 104}
104 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, 105
105 NULL); 106static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
106 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, 107{
107 NULL); 108 if (id == VGA_SWITCHEROO_IGD)
108 } 109 return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_STAMINA);
110 else
111 return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_SPEED);
112}
109 113
114static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
115 enum vga_switcheroo_state state)
116{
117 if (id == VGA_SWITCHEROO_IGD)
118 return 0;
119
120 return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dsm_handle, state);
121}
122
123static int nouveau_dsm_init(void)
124{
110 return 0; 125 return 0;
111} 126}
112 127
113bool nouveau_dsm_probe(struct drm_device *dev) 128static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
114{ 129{
115 int support = 0; 130 if (nouveau_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
131 return VGA_SWITCHEROO_IGD;
132 else
133 return VGA_SWITCHEROO_DIS;
134}
135
136static struct vga_switcheroo_handler nouveau_dsm_handler = {
137 .switchto = nouveau_dsm_switchto,
138 .power_state = nouveau_dsm_power_state,
139 .init = nouveau_dsm_init,
140 .get_client_id = nouveau_dsm_get_client_id,
141};
116 142
117 if (nouveau_dsm(dev, NOUVEAU_DSM_SUPPORTED, 143static bool nouveau_dsm_pci_probe(struct pci_dev *pdev)
118 NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &support)) 144{
145 acpi_handle dhandle, nvidia_handle;
146 acpi_status status;
147 int ret;
148 uint32_t result;
149
150 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
151 if (!dhandle)
152 return false;
153 status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
154 if (ACPI_FAILURE(status)) {
119 return false; 155 return false;
156 }
120 157
121 if (!support) 158 ret= nouveau_dsm(nvidia_handle, NOUVEAU_DSM_SUPPORTED,
159 NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
160 if (ret < 0)
122 return false; 161 return false;
123 162
163 nouveau_dsm_priv.dhandle = dhandle;
164 nouveau_dsm_priv.dsm_handle = nvidia_handle;
124 return true; 165 return true;
125} 166}
167
168static bool nouveau_dsm_detect(void)
169{
170 char acpi_method_name[255] = { 0 };
171 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
172 struct pci_dev *pdev = NULL;
173 int has_dsm = 0;
174 int vga_count = 0;
175 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
176 vga_count++;
177
178 has_dsm |= (nouveau_dsm_pci_probe(pdev) == true);
179 }
180
181 if (vga_count == 2 && has_dsm) {
182 acpi_get_name(nouveau_dsm_priv.dsm_handle, ACPI_FULL_PATHNAME, &buffer);
183 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
184 acpi_method_name);
185 nouveau_dsm_priv.dsm_detected = true;
186 return true;
187 }
188 return false;
189}
190
191void nouveau_register_dsm_handler(void)
192{
193 bool r;
194
195 r = nouveau_dsm_detect();
196 if (!r)
197 return;
198
199 vga_switcheroo_register_handler(&nouveau_dsm_handler);
200}
201
202void nouveau_unregister_dsm_handler(void)
203{
204 vga_switcheroo_unregister_handler();
205}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0e9cd1d49130..71247da17da5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -311,11 +311,11 @@ valid_reg(struct nvbios *bios, uint32_t reg)
311 311
312 /* C51 has misaligned regs on purpose. Marvellous */ 312 /* C51 has misaligned regs on purpose. Marvellous */
313 if (reg & 0x2 || 313 if (reg & 0x2 ||
314 (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) 314 (reg & 0x1 && dev_priv->vbios.chip_version != 0x51))
315 NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg); 315 NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg);
316 316
317 /* warn on C51 regs that haven't been verified accessible in tracing */ 317 /* warn on C51 regs that haven't been verified accessible in tracing */
318 if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 && 318 if (reg & 0x1 && dev_priv->vbios.chip_version == 0x51 &&
319 reg != 0x130d && reg != 0x1311 && reg != 0x60081d) 319 reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
320 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n", 320 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
321 reg); 321 reg);
@@ -420,7 +420,7 @@ bios_wr32(struct nvbios *bios, uint32_t reg, uint32_t data)
420 LOG_OLD_VALUE(bios_rd32(bios, reg)); 420 LOG_OLD_VALUE(bios_rd32(bios, reg));
421 BIOSLOG(bios, " Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data); 421 BIOSLOG(bios, " Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
422 422
423 if (dev_priv->VBIOS.execute) { 423 if (dev_priv->vbios.execute) {
424 still_alive(); 424 still_alive();
425 nv_wr32(bios->dev, reg, data); 425 nv_wr32(bios->dev, reg, data);
426 } 426 }
@@ -647,7 +647,7 @@ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
647 reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16); 647 reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16);
648 reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1; 648 reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1;
649 649
650 if (dev_priv->VBIOS.execute) { 650 if (dev_priv->vbios.execute) {
651 still_alive(); 651 still_alive();
652 nv_wr32(dev, reg + 4, reg1); 652 nv_wr32(dev, reg + 4, reg1);
653 nv_wr32(dev, reg + 0, reg0); 653 nv_wr32(dev, reg + 0, reg0);
@@ -689,7 +689,7 @@ setPLL(struct nvbios *bios, uint32_t reg, uint32_t clk)
689static int dcb_entry_idx_from_crtchead(struct drm_device *dev) 689static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
690{ 690{
691 struct drm_nouveau_private *dev_priv = dev->dev_private; 691 struct drm_nouveau_private *dev_priv = dev->dev_private;
692 struct nvbios *bios = &dev_priv->VBIOS; 692 struct nvbios *bios = &dev_priv->vbios;
693 693
694 /* 694 /*
695 * For the results of this function to be correct, CR44 must have been 695 * For the results of this function to be correct, CR44 must have been
@@ -700,7 +700,7 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
700 700
701 uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0); 701 uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0);
702 702
703 if (dcb_entry > bios->bdcb.dcb.entries) { 703 if (dcb_entry > bios->dcb.entries) {
704 NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently " 704 NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently "
705 "(%02X)\n", dcb_entry); 705 "(%02X)\n", dcb_entry);
706 dcb_entry = 0x7f; /* unused / invalid marker */ 706 dcb_entry = 0x7f; /* unused / invalid marker */
@@ -713,25 +713,26 @@ static struct nouveau_i2c_chan *
713init_i2c_device_find(struct drm_device *dev, int i2c_index) 713init_i2c_device_find(struct drm_device *dev, int i2c_index)
714{ 714{
715 struct drm_nouveau_private *dev_priv = dev->dev_private; 715 struct drm_nouveau_private *dev_priv = dev->dev_private;
716 struct bios_parsed_dcb *bdcb = &dev_priv->VBIOS.bdcb; 716 struct dcb_table *dcb = &dev_priv->vbios.dcb;
717 717
718 if (i2c_index == 0xff) { 718 if (i2c_index == 0xff) {
719 /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */ 719 /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
720 int idx = dcb_entry_idx_from_crtchead(dev), shift = 0; 720 int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
721 int default_indices = bdcb->i2c_default_indices; 721 int default_indices = dcb->i2c_default_indices;
722 722
723 if (idx != 0x7f && bdcb->dcb.entry[idx].i2c_upper_default) 723 if (idx != 0x7f && dcb->entry[idx].i2c_upper_default)
724 shift = 4; 724 shift = 4;
725 725
726 i2c_index = (default_indices >> shift) & 0xf; 726 i2c_index = (default_indices >> shift) & 0xf;
727 } 727 }
728 if (i2c_index == 0x80) /* g80+ */ 728 if (i2c_index == 0x80) /* g80+ */
729 i2c_index = bdcb->i2c_default_indices & 0xf; 729 i2c_index = dcb->i2c_default_indices & 0xf;
730 730
731 return nouveau_i2c_find(dev, i2c_index); 731 return nouveau_i2c_find(dev, i2c_index);
732} 732}
733 733
734static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv) 734static uint32_t
735get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
735{ 736{
736 /* 737 /*
737 * For mlv < 0x80, it is an index into a table of TMDS base addresses. 738 * For mlv < 0x80, it is an index into a table of TMDS base addresses.
@@ -744,6 +745,7 @@ static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
744 */ 745 */
745 746
746 struct drm_nouveau_private *dev_priv = dev->dev_private; 747 struct drm_nouveau_private *dev_priv = dev->dev_private;
748 struct nvbios *bios = &dev_priv->vbios;
747 const int pramdac_offset[13] = { 749 const int pramdac_offset[13] = {
748 0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 }; 750 0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
749 const uint32_t pramdac_table[4] = { 751 const uint32_t pramdac_table[4] = {
@@ -756,13 +758,12 @@ static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
756 dcb_entry = dcb_entry_idx_from_crtchead(dev); 758 dcb_entry = dcb_entry_idx_from_crtchead(dev);
757 if (dcb_entry == 0x7f) 759 if (dcb_entry == 0x7f)
758 return 0; 760 return 0;
759 dacoffset = pramdac_offset[ 761 dacoffset = pramdac_offset[bios->dcb.entry[dcb_entry].or];
760 dev_priv->VBIOS.bdcb.dcb.entry[dcb_entry].or];
761 if (mlv == 0x81) 762 if (mlv == 0x81)
762 dacoffset ^= 8; 763 dacoffset ^= 8;
763 return 0x6808b0 + dacoffset; 764 return 0x6808b0 + dacoffset;
764 } else { 765 } else {
765 if (mlv > ARRAY_SIZE(pramdac_table)) { 766 if (mlv >= ARRAY_SIZE(pramdac_table)) {
766 NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n", 767 NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n",
767 mlv); 768 mlv);
768 return 0; 769 return 0;
@@ -2574,19 +2575,19 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2574 2575
2575 const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; 2576 const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
2576 const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c }; 2577 const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
2577 const uint8_t *gpio_table = &bios->data[bios->bdcb.gpio_table_ptr]; 2578 const uint8_t *gpio_table = &bios->data[bios->dcb.gpio_table_ptr];
2578 const uint8_t *gpio_entry; 2579 const uint8_t *gpio_entry;
2579 int i; 2580 int i;
2580 2581
2581 if (!iexec->execute) 2582 if (!iexec->execute)
2582 return 1; 2583 return 1;
2583 2584
2584 if (bios->bdcb.version != 0x40) { 2585 if (bios->dcb.version != 0x40) {
2585 NV_ERROR(bios->dev, "DCB table not version 4.0\n"); 2586 NV_ERROR(bios->dev, "DCB table not version 4.0\n");
2586 return 0; 2587 return 0;
2587 } 2588 }
2588 2589
2589 if (!bios->bdcb.gpio_table_ptr) { 2590 if (!bios->dcb.gpio_table_ptr) {
2590 NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n"); 2591 NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n");
2591 return 0; 2592 return 0;
2592 } 2593 }
@@ -3123,7 +3124,7 @@ run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
3123 struct dcb_entry *dcbent, int head, bool dl) 3124 struct dcb_entry *dcbent, int head, bool dl)
3124{ 3125{
3125 struct drm_nouveau_private *dev_priv = dev->dev_private; 3126 struct drm_nouveau_private *dev_priv = dev->dev_private;
3126 struct nvbios *bios = &dev_priv->VBIOS; 3127 struct nvbios *bios = &dev_priv->vbios;
3127 struct init_exec iexec = {true, false}; 3128 struct init_exec iexec = {true, false};
3128 3129
3129 NV_TRACE(dev, "0x%04X: Parsing digital output script table\n", 3130 NV_TRACE(dev, "0x%04X: Parsing digital output script table\n",
@@ -3140,7 +3141,7 @@ run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
3140static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script) 3141static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script)
3141{ 3142{
3142 struct drm_nouveau_private *dev_priv = dev->dev_private; 3143 struct drm_nouveau_private *dev_priv = dev->dev_private;
3143 struct nvbios *bios = &dev_priv->VBIOS; 3144 struct nvbios *bios = &dev_priv->vbios;
3144 uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0); 3145 uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0);
3145 uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]); 3146 uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
3146 3147
@@ -3194,7 +3195,7 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
3194 * of a list of pxclks and script pointers. 3195 * of a list of pxclks and script pointers.
3195 */ 3196 */
3196 struct drm_nouveau_private *dev_priv = dev->dev_private; 3197 struct drm_nouveau_private *dev_priv = dev->dev_private;
3197 struct nvbios *bios = &dev_priv->VBIOS; 3198 struct nvbios *bios = &dev_priv->vbios;
3198 unsigned int outputset = (dcbent->or == 4) ? 1 : 0; 3199 unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
3199 uint16_t scriptptr = 0, clktable; 3200 uint16_t scriptptr = 0, clktable;
3200 uint8_t clktableptr = 0; 3201 uint8_t clktableptr = 0;
@@ -3261,7 +3262,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head,
3261 */ 3262 */
3262 3263
3263 struct drm_nouveau_private *dev_priv = dev->dev_private; 3264 struct drm_nouveau_private *dev_priv = dev->dev_private;
3264 struct nvbios *bios = &dev_priv->VBIOS; 3265 struct nvbios *bios = &dev_priv->vbios;
3265 uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; 3266 uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
3266 uint32_t sel_clk_binding, sel_clk; 3267 uint32_t sel_clk_binding, sel_clk;
3267 int ret; 3268 int ret;
@@ -3395,7 +3396,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
3395#ifndef __powerpc__ 3396#ifndef __powerpc__
3396 NV_ERROR(dev, "Pointer to flat panel table invalid\n"); 3397 NV_ERROR(dev, "Pointer to flat panel table invalid\n");
3397#endif 3398#endif
3398 bios->pub.digital_min_front_porch = 0x4b; 3399 bios->digital_min_front_porch = 0x4b;
3399 return 0; 3400 return 0;
3400 } 3401 }
3401 3402
@@ -3428,7 +3429,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
3428 * fptable[4] is the minimum 3429 * fptable[4] is the minimum
3429 * RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap 3430 * RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap
3430 */ 3431 */
3431 bios->pub.digital_min_front_porch = fptable[4]; 3432 bios->digital_min_front_porch = fptable[4];
3432 ofs = -7; 3433 ofs = -7;
3433 break; 3434 break;
3434 default: 3435 default:
@@ -3467,7 +3468,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
3467 3468
3468 /* nv4x cards need both a strap value and fpindex of 0xf to use DDC */ 3469 /* nv4x cards need both a strap value and fpindex of 0xf to use DDC */
3469 if (lth.lvds_ver > 0x10) 3470 if (lth.lvds_ver > 0x10)
3470 bios->pub.fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf; 3471 bios->fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf;
3471 3472
3472 /* 3473 /*
3473 * If either the strap or xlated fpindex value are 0xf there is no 3474 * If either the strap or xlated fpindex value are 0xf there is no
@@ -3491,7 +3492,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
3491bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode) 3492bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode)
3492{ 3493{
3493 struct drm_nouveau_private *dev_priv = dev->dev_private; 3494 struct drm_nouveau_private *dev_priv = dev->dev_private;
3494 struct nvbios *bios = &dev_priv->VBIOS; 3495 struct nvbios *bios = &dev_priv->vbios;
3495 uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr]; 3496 uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr];
3496 3497
3497 if (!mode) /* just checking whether we can produce a mode */ 3498 if (!mode) /* just checking whether we can produce a mode */
@@ -3562,11 +3563,11 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
3562 * until later, when this function should be called with non-zero pxclk 3563 * until later, when this function should be called with non-zero pxclk
3563 */ 3564 */
3564 struct drm_nouveau_private *dev_priv = dev->dev_private; 3565 struct drm_nouveau_private *dev_priv = dev->dev_private;
3565 struct nvbios *bios = &dev_priv->VBIOS; 3566 struct nvbios *bios = &dev_priv->vbios;
3566 int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0; 3567 int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0;
3567 struct lvdstableheader lth; 3568 struct lvdstableheader lth;
3568 uint16_t lvdsofs; 3569 uint16_t lvdsofs;
3569 int ret, chip_version = bios->pub.chip_version; 3570 int ret, chip_version = bios->chip_version;
3570 3571
3571 ret = parse_lvds_manufacturer_table_header(dev, bios, &lth); 3572 ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
3572 if (ret) 3573 if (ret)
@@ -3682,7 +3683,7 @@ bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
3682 uint16_t record, int record_len, int record_nr) 3683 uint16_t record, int record_len, int record_nr)
3683{ 3684{
3684 struct drm_nouveau_private *dev_priv = dev->dev_private; 3685 struct drm_nouveau_private *dev_priv = dev->dev_private;
3685 struct nvbios *bios = &dev_priv->VBIOS; 3686 struct nvbios *bios = &dev_priv->vbios;
3686 uint32_t entry; 3687 uint32_t entry;
3687 uint16_t table; 3688 uint16_t table;
3688 int i, v; 3689 int i, v;
@@ -3716,7 +3717,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
3716 int *length) 3717 int *length)
3717{ 3718{
3718 struct drm_nouveau_private *dev_priv = dev->dev_private; 3719 struct drm_nouveau_private *dev_priv = dev->dev_private;
3719 struct nvbios *bios = &dev_priv->VBIOS; 3720 struct nvbios *bios = &dev_priv->vbios;
3720 uint8_t *table; 3721 uint8_t *table;
3721 3722
3722 if (!bios->display.dp_table_ptr) { 3723 if (!bios->display.dp_table_ptr) {
@@ -3725,7 +3726,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
3725 } 3726 }
3726 table = &bios->data[bios->display.dp_table_ptr]; 3727 table = &bios->data[bios->display.dp_table_ptr];
3727 3728
3728 if (table[0] != 0x21) { 3729 if (table[0] != 0x20 && table[0] != 0x21) {
3729 NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n", 3730 NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n",
3730 table[0]); 3731 table[0]);
3731 return NULL; 3732 return NULL;
@@ -3765,7 +3766,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3765 */ 3766 */
3766 3767
3767 struct drm_nouveau_private *dev_priv = dev->dev_private; 3768 struct drm_nouveau_private *dev_priv = dev->dev_private;
3768 struct nvbios *bios = &dev_priv->VBIOS; 3769 struct nvbios *bios = &dev_priv->vbios;
3769 uint8_t *table = &bios->data[bios->display.script_table_ptr]; 3770 uint8_t *table = &bios->data[bios->display.script_table_ptr];
3770 uint8_t *otable = NULL; 3771 uint8_t *otable = NULL;
3771 uint16_t script; 3772 uint16_t script;
@@ -3918,8 +3919,8 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i
3918 */ 3919 */
3919 3920
3920 struct drm_nouveau_private *dev_priv = dev->dev_private; 3921 struct drm_nouveau_private *dev_priv = dev->dev_private;
3921 struct nvbios *bios = &dev_priv->VBIOS; 3922 struct nvbios *bios = &dev_priv->vbios;
3922 int cv = bios->pub.chip_version; 3923 int cv = bios->chip_version;
3923 uint16_t clktable = 0, scriptptr; 3924 uint16_t clktable = 0, scriptptr;
3924 uint32_t sel_clk_binding, sel_clk; 3925 uint32_t sel_clk_binding, sel_clk;
3925 3926
@@ -3978,8 +3979,8 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
3978 */ 3979 */
3979 3980
3980 struct drm_nouveau_private *dev_priv = dev->dev_private; 3981 struct drm_nouveau_private *dev_priv = dev->dev_private;
3981 struct nvbios *bios = &dev_priv->VBIOS; 3982 struct nvbios *bios = &dev_priv->vbios;
3982 int cv = bios->pub.chip_version, pllindex = 0; 3983 int cv = bios->chip_version, pllindex = 0;
3983 uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0; 3984 uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0;
3984 uint32_t crystal_strap_mask, crystal_straps; 3985 uint32_t crystal_strap_mask, crystal_straps;
3985 3986
@@ -4332,7 +4333,7 @@ static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint
4332 */ 4333 */
4333 4334
4334 bios->major_version = bios->data[offset + 3]; 4335 bios->major_version = bios->data[offset + 3];
4335 bios->pub.chip_version = bios->data[offset + 2]; 4336 bios->chip_version = bios->data[offset + 2];
4336 NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n", 4337 NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n",
4337 bios->data[offset + 3], bios->data[offset + 2], 4338 bios->data[offset + 3], bios->data[offset + 2],
4338 bios->data[offset + 1], bios->data[offset]); 4339 bios->data[offset + 1], bios->data[offset]);
@@ -4402,7 +4403,7 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
4402 } 4403 }
4403 4404
4404 /* First entry is normal dac, 2nd tv-out perhaps? */ 4405 /* First entry is normal dac, 2nd tv-out perhaps? */
4405 bios->pub.dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff; 4406 bios->dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff;
4406 4407
4407 return 0; 4408 return 0;
4408} 4409}
@@ -4526,8 +4527,8 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
4526 return -ENOSYS; 4527 return -ENOSYS;
4527 } 4528 }
4528 4529
4529 bios->pub.dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]); 4530 bios->dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]);
4530 bios->pub.tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]); 4531 bios->tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]);
4531 4532
4532 return 0; 4533 return 0;
4533} 4534}
@@ -4796,11 +4797,11 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
4796 uint16_t legacy_scripts_offset, legacy_i2c_offset; 4797 uint16_t legacy_scripts_offset, legacy_i2c_offset;
4797 4798
4798 /* load needed defaults in case we can't parse this info */ 4799 /* load needed defaults in case we can't parse this info */
4799 bios->bdcb.dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX; 4800 bios->dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
4800 bios->bdcb.dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX; 4801 bios->dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
4801 bios->bdcb.dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX; 4802 bios->dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
4802 bios->bdcb.dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX; 4803 bios->dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
4803 bios->pub.digital_min_front_porch = 0x4b; 4804 bios->digital_min_front_porch = 0x4b;
4804 bios->fmaxvco = 256000; 4805 bios->fmaxvco = 256000;
4805 bios->fminvco = 128000; 4806 bios->fminvco = 128000;
4806 bios->fp.duallink_transition_clk = 90000; 4807 bios->fp.duallink_transition_clk = 90000;
@@ -4907,10 +4908,10 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
4907 bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset]; 4908 bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
4908 bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1]; 4909 bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
4909 bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2]; 4910 bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
4910 bios->bdcb.dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4]; 4911 bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
4911 bios->bdcb.dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5]; 4912 bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
4912 bios->bdcb.dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6]; 4913 bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
4913 bios->bdcb.dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7]; 4914 bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
4914 4915
4915 if (bmplength > 74) { 4916 if (bmplength > 74) {
4916 bios->fmaxvco = ROM32(bmp[67]); 4917 bios->fmaxvco = ROM32(bmp[67]);
@@ -4984,7 +4985,8 @@ read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, i
4984 else 4985 else
4985 NV_WARN(dev, 4986 NV_WARN(dev,
4986 "DCB I2C table has more entries than indexable " 4987 "DCB I2C table has more entries than indexable "
4987 "(%d entries, max index 15)\n", i2ctable[2]); 4988 "(%d entries, max %d)\n", i2ctable[2],
4989 DCB_MAX_NUM_I2C_ENTRIES);
4988 entry_len = i2ctable[3]; 4990 entry_len = i2ctable[3];
4989 /* [4] is i2c_default_indices, read in parse_dcb_table() */ 4991 /* [4] is i2c_default_indices, read in parse_dcb_table() */
4990 } 4992 }
@@ -5000,8 +5002,8 @@ read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, i
5000 5002
5001 if (index == 0xf) 5003 if (index == 0xf)
5002 return 0; 5004 return 0;
5003 if (index > i2c_entries) { 5005 if (index >= i2c_entries) {
5004 NV_ERROR(dev, "DCB I2C index too big (%d > %d)\n", 5006 NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
5005 index, i2ctable[2]); 5007 index, i2ctable[2]);
5006 return -ENOENT; 5008 return -ENOENT;
5007 } 5009 }
@@ -5036,7 +5038,7 @@ read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, i
5036static struct dcb_gpio_entry * 5038static struct dcb_gpio_entry *
5037new_gpio_entry(struct nvbios *bios) 5039new_gpio_entry(struct nvbios *bios)
5038{ 5040{
5039 struct parsed_dcb_gpio *gpio = &bios->bdcb.gpio; 5041 struct dcb_gpio_table *gpio = &bios->dcb.gpio;
5040 5042
5041 return &gpio->entry[gpio->entries++]; 5043 return &gpio->entry[gpio->entries++];
5042} 5044}
@@ -5045,14 +5047,14 @@ struct dcb_gpio_entry *
5045nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag) 5047nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
5046{ 5048{
5047 struct drm_nouveau_private *dev_priv = dev->dev_private; 5049 struct drm_nouveau_private *dev_priv = dev->dev_private;
5048 struct nvbios *bios = &dev_priv->VBIOS; 5050 struct nvbios *bios = &dev_priv->vbios;
5049 int i; 5051 int i;
5050 5052
5051 for (i = 0; i < bios->bdcb.gpio.entries; i++) { 5053 for (i = 0; i < bios->dcb.gpio.entries; i++) {
5052 if (bios->bdcb.gpio.entry[i].tag != tag) 5054 if (bios->dcb.gpio.entry[i].tag != tag)
5053 continue; 5055 continue;
5054 5056
5055 return &bios->bdcb.gpio.entry[i]; 5057 return &bios->dcb.gpio.entry[i];
5056 } 5058 }
5057 5059
5058 return NULL; 5060 return NULL;
@@ -5100,7 +5102,7 @@ static void
5100parse_dcb_gpio_table(struct nvbios *bios) 5102parse_dcb_gpio_table(struct nvbios *bios)
5101{ 5103{
5102 struct drm_device *dev = bios->dev; 5104 struct drm_device *dev = bios->dev;
5103 uint16_t gpio_table_ptr = bios->bdcb.gpio_table_ptr; 5105 uint16_t gpio_table_ptr = bios->dcb.gpio_table_ptr;
5104 uint8_t *gpio_table = &bios->data[gpio_table_ptr]; 5106 uint8_t *gpio_table = &bios->data[gpio_table_ptr];
5105 int header_len = gpio_table[1], 5107 int header_len = gpio_table[1],
5106 entries = gpio_table[2], 5108 entries = gpio_table[2],
@@ -5108,7 +5110,7 @@ parse_dcb_gpio_table(struct nvbios *bios)
5108 void (*parse_entry)(struct nvbios *, uint16_t) = NULL; 5110 void (*parse_entry)(struct nvbios *, uint16_t) = NULL;
5109 int i; 5111 int i;
5110 5112
5111 if (bios->bdcb.version >= 0x40) { 5113 if (bios->dcb.version >= 0x40) {
5112 if (gpio_table_ptr && entry_len != 4) { 5114 if (gpio_table_ptr && entry_len != 4) {
5113 NV_WARN(dev, "Invalid DCB GPIO table entry length.\n"); 5115 NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
5114 return; 5116 return;
@@ -5116,7 +5118,7 @@ parse_dcb_gpio_table(struct nvbios *bios)
5116 5118
5117 parse_entry = parse_dcb40_gpio_entry; 5119 parse_entry = parse_dcb40_gpio_entry;
5118 5120
5119 } else if (bios->bdcb.version >= 0x30) { 5121 } else if (bios->dcb.version >= 0x30) {
5120 if (gpio_table_ptr && entry_len != 2) { 5122 if (gpio_table_ptr && entry_len != 2) {
5121 NV_WARN(dev, "Invalid DCB GPIO table entry length.\n"); 5123 NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
5122 return; 5124 return;
@@ -5124,7 +5126,7 @@ parse_dcb_gpio_table(struct nvbios *bios)
5124 5126
5125 parse_entry = parse_dcb30_gpio_entry; 5127 parse_entry = parse_dcb30_gpio_entry;
5126 5128
5127 } else if (bios->bdcb.version >= 0x22) { 5129 } else if (bios->dcb.version >= 0x22) {
5128 /* 5130 /*
5129 * DCBs older than v3.0 don't really have a GPIO 5131 * DCBs older than v3.0 don't really have a GPIO
5130 * table, instead they keep some GPIO info at fixed 5132 * table, instead they keep some GPIO info at fixed
@@ -5158,30 +5160,67 @@ struct dcb_connector_table_entry *
5158nouveau_bios_connector_entry(struct drm_device *dev, int index) 5160nouveau_bios_connector_entry(struct drm_device *dev, int index)
5159{ 5161{
5160 struct drm_nouveau_private *dev_priv = dev->dev_private; 5162 struct drm_nouveau_private *dev_priv = dev->dev_private;
5161 struct nvbios *bios = &dev_priv->VBIOS; 5163 struct nvbios *bios = &dev_priv->vbios;
5162 struct dcb_connector_table_entry *cte; 5164 struct dcb_connector_table_entry *cte;
5163 5165
5164 if (index >= bios->bdcb.connector.entries) 5166 if (index >= bios->dcb.connector.entries)
5165 return NULL; 5167 return NULL;
5166 5168
5167 cte = &bios->bdcb.connector.entry[index]; 5169 cte = &bios->dcb.connector.entry[index];
5168 if (cte->type == 0xff) 5170 if (cte->type == 0xff)
5169 return NULL; 5171 return NULL;
5170 5172
5171 return cte; 5173 return cte;
5172} 5174}
5173 5175
5176static enum dcb_connector_type
5177divine_connector_type(struct nvbios *bios, int index)
5178{
5179 struct dcb_table *dcb = &bios->dcb;
5180 unsigned encoders = 0, type = DCB_CONNECTOR_NONE;
5181 int i;
5182
5183 for (i = 0; i < dcb->entries; i++) {
5184 if (dcb->entry[i].connector == index)
5185 encoders |= (1 << dcb->entry[i].type);
5186 }
5187
5188 if (encoders & (1 << OUTPUT_DP)) {
5189 if (encoders & (1 << OUTPUT_TMDS))
5190 type = DCB_CONNECTOR_DP;
5191 else
5192 type = DCB_CONNECTOR_eDP;
5193 } else
5194 if (encoders & (1 << OUTPUT_TMDS)) {
5195 if (encoders & (1 << OUTPUT_ANALOG))
5196 type = DCB_CONNECTOR_DVI_I;
5197 else
5198 type = DCB_CONNECTOR_DVI_D;
5199 } else
5200 if (encoders & (1 << OUTPUT_ANALOG)) {
5201 type = DCB_CONNECTOR_VGA;
5202 } else
5203 if (encoders & (1 << OUTPUT_LVDS)) {
5204 type = DCB_CONNECTOR_LVDS;
5205 } else
5206 if (encoders & (1 << OUTPUT_TV)) {
5207 type = DCB_CONNECTOR_TV_0;
5208 }
5209
5210 return type;
5211}
5212
5174static void 5213static void
5175parse_dcb_connector_table(struct nvbios *bios) 5214parse_dcb_connector_table(struct nvbios *bios)
5176{ 5215{
5177 struct drm_device *dev = bios->dev; 5216 struct drm_device *dev = bios->dev;
5178 struct dcb_connector_table *ct = &bios->bdcb.connector; 5217 struct dcb_connector_table *ct = &bios->dcb.connector;
5179 struct dcb_connector_table_entry *cte; 5218 struct dcb_connector_table_entry *cte;
5180 uint8_t *conntab = &bios->data[bios->bdcb.connector_table_ptr]; 5219 uint8_t *conntab = &bios->data[bios->dcb.connector_table_ptr];
5181 uint8_t *entry; 5220 uint8_t *entry;
5182 int i; 5221 int i;
5183 5222
5184 if (!bios->bdcb.connector_table_ptr) { 5223 if (!bios->dcb.connector_table_ptr) {
5185 NV_DEBUG_KMS(dev, "No DCB connector table present\n"); 5224 NV_DEBUG_KMS(dev, "No DCB connector table present\n");
5186 return; 5225 return;
5187 } 5226 }
@@ -5203,6 +5242,7 @@ parse_dcb_connector_table(struct nvbios *bios)
5203 cte->entry = ROM16(entry[0]); 5242 cte->entry = ROM16(entry[0]);
5204 else 5243 else
5205 cte->entry = ROM32(entry[0]); 5244 cte->entry = ROM32(entry[0]);
5245
5206 cte->type = (cte->entry & 0x000000ff) >> 0; 5246 cte->type = (cte->entry & 0x000000ff) >> 0;
5207 cte->index = (cte->entry & 0x00000f00) >> 8; 5247 cte->index = (cte->entry & 0x00000f00) >> 8;
5208 switch (cte->entry & 0x00033000) { 5248 switch (cte->entry & 0x00033000) {
@@ -5228,10 +5268,33 @@ parse_dcb_connector_table(struct nvbios *bios)
5228 5268
5229 NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n", 5269 NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
5230 i, cte->entry, cte->type, cte->index, cte->gpio_tag); 5270 i, cte->entry, cte->type, cte->index, cte->gpio_tag);
5271
5272 /* check for known types, fallback to guessing the type
5273 * from attached encoders if we hit an unknown.
5274 */
5275 switch (cte->type) {
5276 case DCB_CONNECTOR_VGA:
5277 case DCB_CONNECTOR_TV_0:
5278 case DCB_CONNECTOR_TV_1:
5279 case DCB_CONNECTOR_TV_3:
5280 case DCB_CONNECTOR_DVI_I:
5281 case DCB_CONNECTOR_DVI_D:
5282 case DCB_CONNECTOR_LVDS:
5283 case DCB_CONNECTOR_DP:
5284 case DCB_CONNECTOR_eDP:
5285 case DCB_CONNECTOR_HDMI_0:
5286 case DCB_CONNECTOR_HDMI_1:
5287 break;
5288 default:
5289 cte->type = divine_connector_type(bios, cte->index);
5290 NV_WARN(dev, "unknown type, using 0x%02x", cte->type);
5291 break;
5292 }
5293
5231 } 5294 }
5232} 5295}
5233 5296
5234static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb) 5297static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
5235{ 5298{
5236 struct dcb_entry *entry = &dcb->entry[dcb->entries]; 5299 struct dcb_entry *entry = &dcb->entry[dcb->entries];
5237 5300
@@ -5241,7 +5304,7 @@ static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb)
5241 return entry; 5304 return entry;
5242} 5305}
5243 5306
5244static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads) 5307static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads)
5245{ 5308{
5246 struct dcb_entry *entry = new_dcb_entry(dcb); 5309 struct dcb_entry *entry = new_dcb_entry(dcb);
5247 5310
@@ -5252,7 +5315,7 @@ static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads)
5252 /* "or" mostly unused in early gen crt modesetting, 0 is fine */ 5315 /* "or" mostly unused in early gen crt modesetting, 0 is fine */
5253} 5316}
5254 5317
5255static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads) 5318static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
5256{ 5319{
5257 struct dcb_entry *entry = new_dcb_entry(dcb); 5320 struct dcb_entry *entry = new_dcb_entry(dcb);
5258 5321
@@ -5279,7 +5342,7 @@ static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads)
5279#endif 5342#endif
5280} 5343}
5281 5344
5282static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads) 5345static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads)
5283{ 5346{
5284 struct dcb_entry *entry = new_dcb_entry(dcb); 5347 struct dcb_entry *entry = new_dcb_entry(dcb);
5285 5348
@@ -5290,13 +5353,13 @@ static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads)
5290} 5353}
5291 5354
5292static bool 5355static bool
5293parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, 5356parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5294 uint32_t conn, uint32_t conf, struct dcb_entry *entry) 5357 uint32_t conn, uint32_t conf, struct dcb_entry *entry)
5295{ 5358{
5296 entry->type = conn & 0xf; 5359 entry->type = conn & 0xf;
5297 entry->i2c_index = (conn >> 4) & 0xf; 5360 entry->i2c_index = (conn >> 4) & 0xf;
5298 entry->heads = (conn >> 8) & 0xf; 5361 entry->heads = (conn >> 8) & 0xf;
5299 if (bdcb->version >= 0x40) 5362 if (dcb->version >= 0x40)
5300 entry->connector = (conn >> 12) & 0xf; 5363 entry->connector = (conn >> 12) & 0xf;
5301 entry->bus = (conn >> 16) & 0xf; 5364 entry->bus = (conn >> 16) & 0xf;
5302 entry->location = (conn >> 20) & 0x3; 5365 entry->location = (conn >> 20) & 0x3;
@@ -5314,7 +5377,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5314 * Although the rest of a CRT conf dword is usually 5377 * Although the rest of a CRT conf dword is usually
5315 * zeros, mac biosen have stuff there so we must mask 5378 * zeros, mac biosen have stuff there so we must mask
5316 */ 5379 */
5317 entry->crtconf.maxfreq = (bdcb->version < 0x30) ? 5380 entry->crtconf.maxfreq = (dcb->version < 0x30) ?
5318 (conf & 0xffff) * 10 : 5381 (conf & 0xffff) * 10 :
5319 (conf & 0xff) * 10000; 5382 (conf & 0xff) * 10000;
5320 break; 5383 break;
@@ -5323,7 +5386,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5323 uint32_t mask; 5386 uint32_t mask;
5324 if (conf & 0x1) 5387 if (conf & 0x1)
5325 entry->lvdsconf.use_straps_for_mode = true; 5388 entry->lvdsconf.use_straps_for_mode = true;
5326 if (bdcb->version < 0x22) { 5389 if (dcb->version < 0x22) {
5327 mask = ~0xd; 5390 mask = ~0xd;
5328 /* 5391 /*
5329 * The laptop in bug 14567 lies and claims to not use 5392 * The laptop in bug 14567 lies and claims to not use
@@ -5347,7 +5410,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5347 * Until we even try to use these on G8x, it's 5410 * Until we even try to use these on G8x, it's
5348 * useless reporting unknown bits. They all are. 5411 * useless reporting unknown bits. They all are.
5349 */ 5412 */
5350 if (bdcb->version >= 0x40) 5413 if (dcb->version >= 0x40)
5351 break; 5414 break;
5352 5415
5353 NV_ERROR(dev, "Unknown LVDS configuration bits, " 5416 NV_ERROR(dev, "Unknown LVDS configuration bits, "
@@ -5357,7 +5420,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5357 } 5420 }
5358 case OUTPUT_TV: 5421 case OUTPUT_TV:
5359 { 5422 {
5360 if (bdcb->version >= 0x30) 5423 if (dcb->version >= 0x30)
5361 entry->tvconf.has_component_output = conf & (0x8 << 4); 5424 entry->tvconf.has_component_output = conf & (0x8 << 4);
5362 else 5425 else
5363 entry->tvconf.has_component_output = false; 5426 entry->tvconf.has_component_output = false;
@@ -5384,8 +5447,10 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5384 break; 5447 break;
5385 case 0xe: 5448 case 0xe:
5386 /* weird g80 mobile type that "nv" treats as a terminator */ 5449 /* weird g80 mobile type that "nv" treats as a terminator */
5387 bdcb->dcb.entries--; 5450 dcb->entries--;
5388 return false; 5451 return false;
5452 default:
5453 break;
5389 } 5454 }
5390 5455
5391 /* unsure what DCB version introduces this, 3.0? */ 5456 /* unsure what DCB version introduces this, 3.0? */
@@ -5396,7 +5461,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5396} 5461}
5397 5462
5398static bool 5463static bool
5399parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, 5464parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
5400 uint32_t conn, uint32_t conf, struct dcb_entry *entry) 5465 uint32_t conn, uint32_t conf, struct dcb_entry *entry)
5401{ 5466{
5402 switch (conn & 0x0000000f) { 5467 switch (conn & 0x0000000f) {
@@ -5462,27 +5527,27 @@ parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
5462 return true; 5527 return true;
5463} 5528}
5464 5529
5465static bool parse_dcb_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, 5530static bool parse_dcb_entry(struct drm_device *dev, struct dcb_table *dcb,
5466 uint32_t conn, uint32_t conf) 5531 uint32_t conn, uint32_t conf)
5467{ 5532{
5468 struct dcb_entry *entry = new_dcb_entry(&bdcb->dcb); 5533 struct dcb_entry *entry = new_dcb_entry(dcb);
5469 bool ret; 5534 bool ret;
5470 5535
5471 if (bdcb->version >= 0x20) 5536 if (dcb->version >= 0x20)
5472 ret = parse_dcb20_entry(dev, bdcb, conn, conf, entry); 5537 ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
5473 else 5538 else
5474 ret = parse_dcb15_entry(dev, &bdcb->dcb, conn, conf, entry); 5539 ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
5475 if (!ret) 5540 if (!ret)
5476 return ret; 5541 return ret;
5477 5542
5478 read_dcb_i2c_entry(dev, bdcb->version, bdcb->i2c_table, 5543 read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
5479 entry->i2c_index, &bdcb->dcb.i2c[entry->i2c_index]); 5544 entry->i2c_index, &dcb->i2c[entry->i2c_index]);
5480 5545
5481 return true; 5546 return true;
5482} 5547}
5483 5548
5484static 5549static
5485void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb) 5550void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
5486{ 5551{
5487 /* 5552 /*
5488 * DCB v2.0 lists each output combination separately. 5553 * DCB v2.0 lists each output combination separately.
@@ -5534,8 +5599,7 @@ static int
5534parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) 5599parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5535{ 5600{
5536 struct drm_nouveau_private *dev_priv = dev->dev_private; 5601 struct drm_nouveau_private *dev_priv = dev->dev_private;
5537 struct bios_parsed_dcb *bdcb = &bios->bdcb; 5602 struct dcb_table *dcb = &bios->dcb;
5538 struct parsed_dcb *dcb;
5539 uint16_t dcbptr = 0, i2ctabptr = 0; 5603 uint16_t dcbptr = 0, i2ctabptr = 0;
5540 uint8_t *dcbtable; 5604 uint8_t *dcbtable;
5541 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES; 5605 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
@@ -5543,9 +5607,6 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5543 int recordlength = 8, confofs = 4; 5607 int recordlength = 8, confofs = 4;
5544 int i; 5608 int i;
5545 5609
5546 dcb = bios->pub.dcb = &bdcb->dcb;
5547 dcb->entries = 0;
5548
5549 /* get the offset from 0x36 */ 5610 /* get the offset from 0x36 */
5550 if (dev_priv->card_type > NV_04) { 5611 if (dev_priv->card_type > NV_04) {
5551 dcbptr = ROM16(bios->data[0x36]); 5612 dcbptr = ROM16(bios->data[0x36]);
@@ -5567,21 +5628,21 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5567 dcbtable = &bios->data[dcbptr]; 5628 dcbtable = &bios->data[dcbptr];
5568 5629
5569 /* get DCB version */ 5630 /* get DCB version */
5570 bdcb->version = dcbtable[0]; 5631 dcb->version = dcbtable[0];
5571 NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n", 5632 NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
5572 bdcb->version >> 4, bdcb->version & 0xf); 5633 dcb->version >> 4, dcb->version & 0xf);
5573 5634
5574 if (bdcb->version >= 0x20) { /* NV17+ */ 5635 if (dcb->version >= 0x20) { /* NV17+ */
5575 uint32_t sig; 5636 uint32_t sig;
5576 5637
5577 if (bdcb->version >= 0x30) { /* NV40+ */ 5638 if (dcb->version >= 0x30) { /* NV40+ */
5578 headerlen = dcbtable[1]; 5639 headerlen = dcbtable[1];
5579 entries = dcbtable[2]; 5640 entries = dcbtable[2];
5580 recordlength = dcbtable[3]; 5641 recordlength = dcbtable[3];
5581 i2ctabptr = ROM16(dcbtable[4]); 5642 i2ctabptr = ROM16(dcbtable[4]);
5582 sig = ROM32(dcbtable[6]); 5643 sig = ROM32(dcbtable[6]);
5583 bdcb->gpio_table_ptr = ROM16(dcbtable[10]); 5644 dcb->gpio_table_ptr = ROM16(dcbtable[10]);
5584 bdcb->connector_table_ptr = ROM16(dcbtable[20]); 5645 dcb->connector_table_ptr = ROM16(dcbtable[20]);
5585 } else { 5646 } else {
5586 i2ctabptr = ROM16(dcbtable[2]); 5647 i2ctabptr = ROM16(dcbtable[2]);
5587 sig = ROM32(dcbtable[4]); 5648 sig = ROM32(dcbtable[4]);
@@ -5593,7 +5654,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5593 "signature (%08X)\n", sig); 5654 "signature (%08X)\n", sig);
5594 return -EINVAL; 5655 return -EINVAL;
5595 } 5656 }
5596 } else if (bdcb->version >= 0x15) { /* some NV11 and NV20 */ 5657 } else if (dcb->version >= 0x15) { /* some NV11 and NV20 */
5597 char sig[8] = { 0 }; 5658 char sig[8] = { 0 };
5598 5659
5599 strncpy(sig, (char *)&dcbtable[-7], 7); 5660 strncpy(sig, (char *)&dcbtable[-7], 7);
@@ -5641,14 +5702,11 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5641 if (!i2ctabptr) 5702 if (!i2ctabptr)
5642 NV_WARN(dev, "No pointer to DCB I2C port table\n"); 5703 NV_WARN(dev, "No pointer to DCB I2C port table\n");
5643 else { 5704 else {
5644 bdcb->i2c_table = &bios->data[i2ctabptr]; 5705 dcb->i2c_table = &bios->data[i2ctabptr];
5645 if (bdcb->version >= 0x30) 5706 if (dcb->version >= 0x30)
5646 bdcb->i2c_default_indices = bdcb->i2c_table[4]; 5707 dcb->i2c_default_indices = dcb->i2c_table[4];
5647 } 5708 }
5648 5709
5649 parse_dcb_gpio_table(bios);
5650 parse_dcb_connector_table(bios);
5651
5652 if (entries > DCB_MAX_NUM_ENTRIES) 5710 if (entries > DCB_MAX_NUM_ENTRIES)
5653 entries = DCB_MAX_NUM_ENTRIES; 5711 entries = DCB_MAX_NUM_ENTRIES;
5654 5712
@@ -5673,7 +5731,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5673 NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n", 5731 NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
5674 dcb->entries, connection, config); 5732 dcb->entries, connection, config);
5675 5733
5676 if (!parse_dcb_entry(dev, bdcb, connection, config)) 5734 if (!parse_dcb_entry(dev, dcb, connection, config))
5677 break; 5735 break;
5678 } 5736 }
5679 5737
@@ -5681,18 +5739,22 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5681 * apart for v2.1+ not being known for requiring merging, this 5739 * apart for v2.1+ not being known for requiring merging, this
5682 * guarantees dcbent->index is the index of the entry in the rom image 5740 * guarantees dcbent->index is the index of the entry in the rom image
5683 */ 5741 */
5684 if (bdcb->version < 0x21) 5742 if (dcb->version < 0x21)
5685 merge_like_dcb_entries(dev, dcb); 5743 merge_like_dcb_entries(dev, dcb);
5686 5744
5687 return dcb->entries ? 0 : -ENXIO; 5745 if (!dcb->entries)
5746 return -ENXIO;
5747
5748 parse_dcb_gpio_table(bios);
5749 parse_dcb_connector_table(bios);
5750 return 0;
5688} 5751}
5689 5752
5690static void 5753static void
5691fixup_legacy_connector(struct nvbios *bios) 5754fixup_legacy_connector(struct nvbios *bios)
5692{ 5755{
5693 struct bios_parsed_dcb *bdcb = &bios->bdcb; 5756 struct dcb_table *dcb = &bios->dcb;
5694 struct parsed_dcb *dcb = &bdcb->dcb; 5757 int i, i2c, i2c_conn[DCB_MAX_NUM_I2C_ENTRIES] = { };
5695 int high = 0, i;
5696 5758
5697 /* 5759 /*
5698 * DCB 3.0 also has the table in most cases, but there are some cards 5760 * DCB 3.0 also has the table in most cases, but there are some cards
@@ -5700,9 +5762,11 @@ fixup_legacy_connector(struct nvbios *bios)
5700 * indices are all 0. We don't need the connector indices on pre-G80 5762 * indices are all 0. We don't need the connector indices on pre-G80
5701 * chips (yet?) so limit the use to DCB 4.0 and above. 5763 * chips (yet?) so limit the use to DCB 4.0 and above.
5702 */ 5764 */
5703 if (bdcb->version >= 0x40) 5765 if (dcb->version >= 0x40)
5704 return; 5766 return;
5705 5767
5768 dcb->connector.entries = 0;
5769
5706 /* 5770 /*
5707 * No known connector info before v3.0, so make it up. the rule here 5771 * No known connector info before v3.0, so make it up. the rule here
5708 * is: anything on the same i2c bus is considered to be on the same 5772 * is: anything on the same i2c bus is considered to be on the same
@@ -5710,37 +5774,38 @@ fixup_legacy_connector(struct nvbios *bios)
5710 * its own unique connector index. 5774 * its own unique connector index.
5711 */ 5775 */
5712 for (i = 0; i < dcb->entries; i++) { 5776 for (i = 0; i < dcb->entries; i++) {
5713 if (dcb->entry[i].i2c_index == 0xf)
5714 continue;
5715
5716 /* 5777 /*
5717 * Ignore the I2C index for on-chip TV-out, as there 5778 * Ignore the I2C index for on-chip TV-out, as there
5718 * are cards with bogus values (nv31m in bug 23212), 5779 * are cards with bogus values (nv31m in bug 23212),
5719 * and it's otherwise useless. 5780 * and it's otherwise useless.
5720 */ 5781 */
5721 if (dcb->entry[i].type == OUTPUT_TV && 5782 if (dcb->entry[i].type == OUTPUT_TV &&
5722 dcb->entry[i].location == DCB_LOC_ON_CHIP) { 5783 dcb->entry[i].location == DCB_LOC_ON_CHIP)
5723 dcb->entry[i].i2c_index = 0xf; 5784 dcb->entry[i].i2c_index = 0xf;
5785 i2c = dcb->entry[i].i2c_index;
5786
5787 if (i2c_conn[i2c]) {
5788 dcb->entry[i].connector = i2c_conn[i2c] - 1;
5724 continue; 5789 continue;
5725 } 5790 }
5726 5791
5727 dcb->entry[i].connector = dcb->entry[i].i2c_index; 5792 dcb->entry[i].connector = dcb->connector.entries++;
5728 if (dcb->entry[i].connector > high) 5793 if (i2c != 0xf)
5729 high = dcb->entry[i].connector; 5794 i2c_conn[i2c] = dcb->connector.entries;
5730 } 5795 }
5731 5796
5732 for (i = 0; i < dcb->entries; i++) { 5797 /* Fake the connector table as well as just connector indices */
5733 if (dcb->entry[i].i2c_index != 0xf) 5798 for (i = 0; i < dcb->connector.entries; i++) {
5734 continue; 5799 dcb->connector.entry[i].index = i;
5735 5800 dcb->connector.entry[i].type = divine_connector_type(bios, i);
5736 dcb->entry[i].connector = ++high; 5801 dcb->connector.entry[i].gpio_tag = 0xff;
5737 } 5802 }
5738} 5803}
5739 5804
5740static void 5805static void
5741fixup_legacy_i2c(struct nvbios *bios) 5806fixup_legacy_i2c(struct nvbios *bios)
5742{ 5807{
5743 struct parsed_dcb *dcb = &bios->bdcb.dcb; 5808 struct dcb_table *dcb = &bios->dcb;
5744 int i; 5809 int i;
5745 5810
5746 for (i = 0; i < dcb->entries; i++) { 5811 for (i = 0; i < dcb->entries; i++) {
@@ -5826,7 +5891,7 @@ static int load_nv17_hw_sequencer_ucode(struct drm_device *dev,
5826uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev) 5891uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
5827{ 5892{
5828 struct drm_nouveau_private *dev_priv = dev->dev_private; 5893 struct drm_nouveau_private *dev_priv = dev->dev_private;
5829 struct nvbios *bios = &dev_priv->VBIOS; 5894 struct nvbios *bios = &dev_priv->vbios;
5830 const uint8_t edid_sig[] = { 5895 const uint8_t edid_sig[] = {
5831 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; 5896 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
5832 uint16_t offset = 0; 5897 uint16_t offset = 0;
@@ -5859,7 +5924,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
5859 struct dcb_entry *dcbent) 5924 struct dcb_entry *dcbent)
5860{ 5925{
5861 struct drm_nouveau_private *dev_priv = dev->dev_private; 5926 struct drm_nouveau_private *dev_priv = dev->dev_private;
5862 struct nvbios *bios = &dev_priv->VBIOS; 5927 struct nvbios *bios = &dev_priv->vbios;
5863 struct init_exec iexec = { true, false }; 5928 struct init_exec iexec = { true, false };
5864 5929
5865 mutex_lock(&bios->lock); 5930 mutex_lock(&bios->lock);
@@ -5872,7 +5937,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
5872static bool NVInitVBIOS(struct drm_device *dev) 5937static bool NVInitVBIOS(struct drm_device *dev)
5873{ 5938{
5874 struct drm_nouveau_private *dev_priv = dev->dev_private; 5939 struct drm_nouveau_private *dev_priv = dev->dev_private;
5875 struct nvbios *bios = &dev_priv->VBIOS; 5940 struct nvbios *bios = &dev_priv->vbios;
5876 5941
5877 memset(bios, 0, sizeof(struct nvbios)); 5942 memset(bios, 0, sizeof(struct nvbios));
5878 mutex_init(&bios->lock); 5943 mutex_init(&bios->lock);
@@ -5888,7 +5953,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
5888static int nouveau_parse_vbios_struct(struct drm_device *dev) 5953static int nouveau_parse_vbios_struct(struct drm_device *dev)
5889{ 5954{
5890 struct drm_nouveau_private *dev_priv = dev->dev_private; 5955 struct drm_nouveau_private *dev_priv = dev->dev_private;
5891 struct nvbios *bios = &dev_priv->VBIOS; 5956 struct nvbios *bios = &dev_priv->vbios;
5892 const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' }; 5957 const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
5893 const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 }; 5958 const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
5894 int offset; 5959 int offset;
@@ -5915,7 +5980,7 @@ int
5915nouveau_run_vbios_init(struct drm_device *dev) 5980nouveau_run_vbios_init(struct drm_device *dev)
5916{ 5981{
5917 struct drm_nouveau_private *dev_priv = dev->dev_private; 5982 struct drm_nouveau_private *dev_priv = dev->dev_private;
5918 struct nvbios *bios = &dev_priv->VBIOS; 5983 struct nvbios *bios = &dev_priv->vbios;
5919 int i, ret = 0; 5984 int i, ret = 0;
5920 5985
5921 NVLockVgaCrtcs(dev, false); 5986 NVLockVgaCrtcs(dev, false);
@@ -5946,9 +6011,9 @@ nouveau_run_vbios_init(struct drm_device *dev)
5946 } 6011 }
5947 6012
5948 if (dev_priv->card_type >= NV_50) { 6013 if (dev_priv->card_type >= NV_50) {
5949 for (i = 0; i < bios->bdcb.dcb.entries; i++) { 6014 for (i = 0; i < bios->dcb.entries; i++) {
5950 nouveau_bios_run_display_table(dev, 6015 nouveau_bios_run_display_table(dev,
5951 &bios->bdcb.dcb.entry[i], 6016 &bios->dcb.entry[i],
5952 0, 0); 6017 0, 0);
5953 } 6018 }
5954 } 6019 }
@@ -5962,11 +6027,11 @@ static void
5962nouveau_bios_i2c_devices_takedown(struct drm_device *dev) 6027nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
5963{ 6028{
5964 struct drm_nouveau_private *dev_priv = dev->dev_private; 6029 struct drm_nouveau_private *dev_priv = dev->dev_private;
5965 struct nvbios *bios = &dev_priv->VBIOS; 6030 struct nvbios *bios = &dev_priv->vbios;
5966 struct dcb_i2c_entry *entry; 6031 struct dcb_i2c_entry *entry;
5967 int i; 6032 int i;
5968 6033
5969 entry = &bios->bdcb.dcb.i2c[0]; 6034 entry = &bios->dcb.i2c[0];
5970 for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++) 6035 for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
5971 nouveau_i2c_fini(dev, entry); 6036 nouveau_i2c_fini(dev, entry);
5972} 6037}
@@ -5975,13 +6040,11 @@ int
5975nouveau_bios_init(struct drm_device *dev) 6040nouveau_bios_init(struct drm_device *dev)
5976{ 6041{
5977 struct drm_nouveau_private *dev_priv = dev->dev_private; 6042 struct drm_nouveau_private *dev_priv = dev->dev_private;
5978 struct nvbios *bios = &dev_priv->VBIOS; 6043 struct nvbios *bios = &dev_priv->vbios;
5979 uint32_t saved_nv_pextdev_boot_0; 6044 uint32_t saved_nv_pextdev_boot_0;
5980 bool was_locked; 6045 bool was_locked;
5981 int ret; 6046 int ret;
5982 6047
5983 dev_priv->vbios = &bios->pub;
5984
5985 if (!NVInitVBIOS(dev)) 6048 if (!NVInitVBIOS(dev))
5986 return -ENODEV; 6049 return -ENODEV;
5987 6050
@@ -6023,10 +6086,8 @@ nouveau_bios_init(struct drm_device *dev)
6023 bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0); 6086 bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0);
6024 6087
6025 ret = nouveau_run_vbios_init(dev); 6088 ret = nouveau_run_vbios_init(dev);
6026 if (ret) { 6089 if (ret)
6027 dev_priv->vbios = NULL;
6028 return ret; 6090 return ret;
6029 }
6030 6091
6031 /* feature_byte on BMP is poor, but init always sets CR4B */ 6092 /* feature_byte on BMP is poor, but init always sets CR4B */
6032 was_locked = NVLockVgaCrtcs(dev, false); 6093 was_locked = NVLockVgaCrtcs(dev, false);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index fd94bd6dc264..9f688aa9a655 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -34,9 +34,67 @@
34 34
35#define DCB_LOC_ON_CHIP 0 35#define DCB_LOC_ON_CHIP 0
36 36
37struct dcb_i2c_entry {
38 uint8_t port_type;
39 uint8_t read, write;
40 struct nouveau_i2c_chan *chan;
41};
42
43enum dcb_gpio_tag {
44 DCB_GPIO_TVDAC0 = 0xc,
45 DCB_GPIO_TVDAC1 = 0x2d,
46};
47
48struct dcb_gpio_entry {
49 enum dcb_gpio_tag tag;
50 int line;
51 bool invert;
52};
53
54struct dcb_gpio_table {
55 int entries;
56 struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
57};
58
59enum dcb_connector_type {
60 DCB_CONNECTOR_VGA = 0x00,
61 DCB_CONNECTOR_TV_0 = 0x10,
62 DCB_CONNECTOR_TV_1 = 0x11,
63 DCB_CONNECTOR_TV_3 = 0x13,
64 DCB_CONNECTOR_DVI_I = 0x30,
65 DCB_CONNECTOR_DVI_D = 0x31,
66 DCB_CONNECTOR_LVDS = 0x40,
67 DCB_CONNECTOR_DP = 0x46,
68 DCB_CONNECTOR_eDP = 0x47,
69 DCB_CONNECTOR_HDMI_0 = 0x60,
70 DCB_CONNECTOR_HDMI_1 = 0x61,
71 DCB_CONNECTOR_NONE = 0xff
72};
73
74struct dcb_connector_table_entry {
75 uint32_t entry;
76 enum dcb_connector_type type;
77 uint8_t index;
78 uint8_t gpio_tag;
79};
80
81struct dcb_connector_table {
82 int entries;
83 struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
84};
85
86enum dcb_type {
87 OUTPUT_ANALOG = 0,
88 OUTPUT_TV = 1,
89 OUTPUT_TMDS = 2,
90 OUTPUT_LVDS = 3,
91 OUTPUT_DP = 6,
92 OUTPUT_ANY = -1
93};
94
37struct dcb_entry { 95struct dcb_entry {
38 int index; /* may not be raw dcb index if merging has happened */ 96 int index; /* may not be raw dcb index if merging has happened */
39 uint8_t type; 97 enum dcb_type type;
40 uint8_t i2c_index; 98 uint8_t i2c_index;
41 uint8_t heads; 99 uint8_t heads;
42 uint8_t connector; 100 uint8_t connector;
@@ -71,69 +129,22 @@ struct dcb_entry {
71 bool i2c_upper_default; 129 bool i2c_upper_default;
72}; 130};
73 131
74struct dcb_i2c_entry { 132struct dcb_table {
75 uint8_t port_type; 133 uint8_t version;
76 uint8_t read, write;
77 struct nouveau_i2c_chan *chan;
78};
79 134
80struct parsed_dcb {
81 int entries; 135 int entries;
82 struct dcb_entry entry[DCB_MAX_NUM_ENTRIES]; 136 struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
83 struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
84};
85
86enum dcb_gpio_tag {
87 DCB_GPIO_TVDAC0 = 0xc,
88 DCB_GPIO_TVDAC1 = 0x2d,
89};
90
91struct dcb_gpio_entry {
92 enum dcb_gpio_tag tag;
93 int line;
94 bool invert;
95};
96
97struct parsed_dcb_gpio {
98 int entries;
99 struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
100};
101
102struct dcb_connector_table_entry {
103 uint32_t entry;
104 uint8_t type;
105 uint8_t index;
106 uint8_t gpio_tag;
107};
108
109struct dcb_connector_table {
110 int entries;
111 struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
112};
113
114struct bios_parsed_dcb {
115 uint8_t version;
116
117 struct parsed_dcb dcb;
118 137
119 uint8_t *i2c_table; 138 uint8_t *i2c_table;
120 uint8_t i2c_default_indices; 139 uint8_t i2c_default_indices;
140 struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
121 141
122 uint16_t gpio_table_ptr; 142 uint16_t gpio_table_ptr;
123 struct parsed_dcb_gpio gpio; 143 struct dcb_gpio_table gpio;
124 uint16_t connector_table_ptr; 144 uint16_t connector_table_ptr;
125 struct dcb_connector_table connector; 145 struct dcb_connector_table connector;
126}; 146};
127 147
128enum nouveau_encoder_type {
129 OUTPUT_ANALOG = 0,
130 OUTPUT_TV = 1,
131 OUTPUT_TMDS = 2,
132 OUTPUT_LVDS = 3,
133 OUTPUT_DP = 6,
134 OUTPUT_ANY = -1
135};
136
137enum nouveau_or { 148enum nouveau_or {
138 OUTPUT_A = (1 << 0), 149 OUTPUT_A = (1 << 0),
139 OUTPUT_B = (1 << 1), 150 OUTPUT_B = (1 << 1),
@@ -190,8 +201,8 @@ struct pll_lims {
190 int refclk; 201 int refclk;
191}; 202};
192 203
193struct nouveau_bios_info { 204struct nvbios {
194 struct parsed_dcb *dcb; 205 struct drm_device *dev;
195 206
196 uint8_t chip_version; 207 uint8_t chip_version;
197 208
@@ -199,11 +210,6 @@ struct nouveau_bios_info {
199 uint32_t tvdactestval; 210 uint32_t tvdactestval;
200 uint8_t digital_min_front_porch; 211 uint8_t digital_min_front_porch;
201 bool fp_no_ddc; 212 bool fp_no_ddc;
202};
203
204struct nvbios {
205 struct drm_device *dev;
206 struct nouveau_bios_info pub;
207 213
208 struct mutex lock; 214 struct mutex lock;
209 215
@@ -234,7 +240,7 @@ struct nvbios {
234 uint16_t some_script_ptr; /* BIT I + 14 */ 240 uint16_t some_script_ptr; /* BIT I + 14 */
235 uint16_t init96_tbl_ptr; /* BIT I + 16 */ 241 uint16_t init96_tbl_ptr; /* BIT I + 16 */
236 242
237 struct bios_parsed_dcb bdcb; 243 struct dcb_table dcb;
238 244
239 struct { 245 struct {
240 int crtchead; 246 int crtchead;
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
index ee2b84504d05..88f9bc0941eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_calc.c
+++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
@@ -274,7 +274,7 @@ getMNP_single(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
274 * returns calculated clock 274 * returns calculated clock
275 */ 275 */
276 struct drm_nouveau_private *dev_priv = dev->dev_private; 276 struct drm_nouveau_private *dev_priv = dev->dev_private;
277 int cv = dev_priv->vbios->chip_version; 277 int cv = dev_priv->vbios.chip_version;
278 int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq; 278 int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq;
279 int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m; 279 int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m;
280 int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n; 280 int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n;
@@ -373,7 +373,7 @@ getMNP_double(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
373 * returns calculated clock 373 * returns calculated clock
374 */ 374 */
375 struct drm_nouveau_private *dev_priv = dev->dev_private; 375 struct drm_nouveau_private *dev_priv = dev->dev_private;
376 int chip_version = dev_priv->vbios->chip_version; 376 int chip_version = dev_priv->vbios.chip_version;
377 int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq; 377 int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq;
378 int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq; 378 int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq;
379 int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq; 379 int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 2281f99da7fc..6dfb425cbae9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -35,22 +35,27 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
35 struct drm_nouveau_private *dev_priv = dev->dev_private; 35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_bo *pb = chan->pushbuf_bo; 36 struct nouveau_bo *pb = chan->pushbuf_bo;
37 struct nouveau_gpuobj *pushbuf = NULL; 37 struct nouveau_gpuobj *pushbuf = NULL;
38 uint32_t start = pb->bo.mem.mm_node->start << PAGE_SHIFT;
39 int ret; 38 int ret;
40 39
40 if (dev_priv->card_type >= NV_50) {
41 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
42 dev_priv->vm_end, NV_DMA_ACCESS_RO,
43 NV_DMA_TARGET_AGP, &pushbuf);
44 chan->pushbuf_base = pb->bo.offset;
45 } else
41 if (pb->bo.mem.mem_type == TTM_PL_TT) { 46 if (pb->bo.mem.mem_type == TTM_PL_TT) {
42 ret = nouveau_gpuobj_gart_dma_new(chan, 0, 47 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
43 dev_priv->gart_info.aper_size, 48 dev_priv->gart_info.aper_size,
44 NV_DMA_ACCESS_RO, &pushbuf, 49 NV_DMA_ACCESS_RO, &pushbuf,
45 NULL); 50 NULL);
46 chan->pushbuf_base = start; 51 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
47 } else 52 } else
48 if (dev_priv->card_type != NV_04) { 53 if (dev_priv->card_type != NV_04) {
49 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, 54 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
50 dev_priv->fb_available_size, 55 dev_priv->fb_available_size,
51 NV_DMA_ACCESS_RO, 56 NV_DMA_ACCESS_RO,
52 NV_DMA_TARGET_VIDMEM, &pushbuf); 57 NV_DMA_TARGET_VIDMEM, &pushbuf);
53 chan->pushbuf_base = start; 58 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
54 } else { 59 } else {
55 /* NV04 cmdbuf hack, from original ddx.. not sure of it's 60 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
56 * exact reason for existing :) PCI access to cmdbuf in 61 * exact reason for existing :) PCI access to cmdbuf in
@@ -61,7 +66,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
61 dev_priv->fb_available_size, 66 dev_priv->fb_available_size,
62 NV_DMA_ACCESS_RO, 67 NV_DMA_ACCESS_RO,
63 NV_DMA_TARGET_PCI, &pushbuf); 68 NV_DMA_TARGET_PCI, &pushbuf);
64 chan->pushbuf_base = start; 69 chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
65 } 70 }
66 71
67 ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf); 72 ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
@@ -275,9 +280,18 @@ nouveau_channel_free(struct nouveau_channel *chan)
275 */ 280 */
276 nouveau_fence_fini(chan); 281 nouveau_fence_fini(chan);
277 282
278 /* Ensure the channel is no longer active on the GPU */ 283 /* This will prevent pfifo from switching channels. */
279 pfifo->reassign(dev, false); 284 pfifo->reassign(dev, false);
280 285
286 /* We want to give pgraph a chance to idle and get rid of all potential
287 * errors. We need to do this before the lock, otherwise the irq handler
288 * is unable to process them.
289 */
290 if (pgraph->channel(dev) == chan)
291 nouveau_wait_for_idle(dev);
292
293 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
294
281 pgraph->fifo_access(dev, false); 295 pgraph->fifo_access(dev, false);
282 if (pgraph->channel(dev) == chan) 296 if (pgraph->channel(dev) == chan)
283 pgraph->unload_context(dev); 297 pgraph->unload_context(dev);
@@ -293,6 +307,8 @@ nouveau_channel_free(struct nouveau_channel *chan)
293 307
294 pfifo->reassign(dev, true); 308 pfifo->reassign(dev, true);
295 309
310 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
311
296 /* Release the channel's resources */ 312 /* Release the channel's resources */
297 nouveau_gpuobj_ref_del(dev, &chan->pushbuf); 313 nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
298 if (chan->pushbuf_bo) { 314 if (chan->pushbuf_bo) {
@@ -369,6 +385,14 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
369 return ret; 385 return ret;
370 init->channel = chan->id; 386 init->channel = chan->id;
371 387
388 if (chan->dma.ib_max)
389 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
390 NOUVEAU_GEM_DOMAIN_GART;
391 else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
392 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
393 else
394 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
395
372 init->subchan[0].handle = NvM2MF; 396 init->subchan[0].handle = NvM2MF;
373 if (dev_priv->card_type < NV_50) 397 if (dev_priv->card_type < NV_50)
374 init->subchan[0].grclass = 0x0039; 398 init->subchan[0].grclass = 0x0039;
@@ -408,7 +432,6 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
408 ***********************************/ 432 ***********************************/
409 433
410struct drm_ioctl_desc nouveau_ioctls[] = { 434struct drm_ioctl_desc nouveau_ioctls[] = {
411 DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
412 DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), 435 DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
413 DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 436 DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
414 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), 437 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
@@ -418,13 +441,9 @@ struct drm_ioctl_desc nouveau_ioctls[] = {
418 DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), 441 DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
419 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), 442 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
420 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), 443 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
421 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL, nouveau_gem_ioctl_pushbuf_call, DRM_AUTH),
422 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN, nouveau_gem_ioctl_pin, DRM_AUTH),
423 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN, nouveau_gem_ioctl_unpin, DRM_AUTH),
424 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), 444 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
425 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), 445 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
426 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), 446 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
427 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2, nouveau_gem_ioctl_pushbuf_call2, DRM_AUTH),
428}; 447};
429 448
430int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); 449int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index d2f63353ea97..24327f468c4b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -218,7 +218,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
218 connector->interlace_allowed = true; 218 connector->interlace_allowed = true;
219 } 219 }
220 220
221 if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) { 221 if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
222 drm_connector_property_set_value(connector, 222 drm_connector_property_set_value(connector,
223 dev->mode_config.dvi_i_subconnector_property, 223 dev->mode_config.dvi_i_subconnector_property,
224 nv_encoder->dcb->type == OUTPUT_TMDS ? 224 nv_encoder->dcb->type == OUTPUT_TMDS ?
@@ -236,15 +236,17 @@ nouveau_connector_detect(struct drm_connector *connector)
236 struct nouveau_i2c_chan *i2c; 236 struct nouveau_i2c_chan *i2c;
237 int type, flags; 237 int type, flags;
238 238
239 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) 239 if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS)
240 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); 240 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
241 if (nv_encoder && nv_connector->native_mode) { 241 if (nv_encoder && nv_connector->native_mode) {
242 unsigned status = connector_status_connected;
243
242#ifdef CONFIG_ACPI 244#ifdef CONFIG_ACPI
243 if (!nouveau_ignorelid && !acpi_lid_open()) 245 if (!nouveau_ignorelid && !acpi_lid_open())
244 return connector_status_disconnected; 246 status = connector_status_unknown;
245#endif 247#endif
246 nouveau_connector_set_encoder(connector, nv_encoder); 248 nouveau_connector_set_encoder(connector, nv_encoder);
247 return connector_status_connected; 249 return status;
248 } 250 }
249 251
250 /* Cleanup the previous EDID block. */ 252 /* Cleanup the previous EDID block. */
@@ -279,7 +281,7 @@ nouveau_connector_detect(struct drm_connector *connector)
279 * same i2c channel so the value returned from ddc_detect 281 * same i2c channel so the value returned from ddc_detect
280 * isn't necessarily correct. 282 * isn't necessarily correct.
281 */ 283 */
282 if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) { 284 if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
283 if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL) 285 if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
284 type = OUTPUT_TMDS; 286 type = OUTPUT_TMDS;
285 else 287 else
@@ -321,11 +323,11 @@ detect_analog:
321static void 323static void
322nouveau_connector_force(struct drm_connector *connector) 324nouveau_connector_force(struct drm_connector *connector)
323{ 325{
324 struct drm_device *dev = connector->dev; 326 struct nouveau_connector *nv_connector = nouveau_connector(connector);
325 struct nouveau_encoder *nv_encoder; 327 struct nouveau_encoder *nv_encoder;
326 int type; 328 int type;
327 329
328 if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) { 330 if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
329 if (connector->force == DRM_FORCE_ON_DIGITAL) 331 if (connector->force == DRM_FORCE_ON_DIGITAL)
330 type = OUTPUT_TMDS; 332 type = OUTPUT_TMDS;
331 else 333 else
@@ -335,7 +337,7 @@ nouveau_connector_force(struct drm_connector *connector)
335 337
336 nv_encoder = find_encoder_by_type(connector, type); 338 nv_encoder = find_encoder_by_type(connector, type);
337 if (!nv_encoder) { 339 if (!nv_encoder) {
338 NV_ERROR(dev, "can't find encoder to force %s on!\n", 340 NV_ERROR(connector->dev, "can't find encoder to force %s on!\n",
339 drm_get_connector_name(connector)); 341 drm_get_connector_name(connector));
340 connector->status = connector_status_disconnected; 342 connector->status = connector_status_disconnected;
341 return; 343 return;
@@ -369,7 +371,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
369 } 371 }
370 372
371 /* LVDS always needs gpu scaling */ 373 /* LVDS always needs gpu scaling */
372 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS && 374 if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS &&
373 value == DRM_MODE_SCALE_NONE) 375 value == DRM_MODE_SCALE_NONE)
374 return -EINVAL; 376 return -EINVAL;
375 377
@@ -535,7 +537,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
535 /* If we're not LVDS, destroy the previous native mode, the attached 537 /* If we're not LVDS, destroy the previous native mode, the attached
536 * monitor could have changed. 538 * monitor could have changed.
537 */ 539 */
538 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && 540 if (nv_connector->dcb->type != DCB_CONNECTOR_LVDS &&
539 nv_connector->native_mode) { 541 nv_connector->native_mode) {
540 drm_mode_destroy(dev, nv_connector->native_mode); 542 drm_mode_destroy(dev, nv_connector->native_mode);
541 nv_connector->native_mode = NULL; 543 nv_connector->native_mode = NULL;
@@ -563,7 +565,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
563 ret = get_slave_funcs(nv_encoder)-> 565 ret = get_slave_funcs(nv_encoder)->
564 get_modes(to_drm_encoder(nv_encoder), connector); 566 get_modes(to_drm_encoder(nv_encoder), connector);
565 567
566 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) 568 if (nv_encoder->dcb->type == OUTPUT_LVDS)
567 ret += nouveau_connector_scaler_modes_add(connector); 569 ret += nouveau_connector_scaler_modes_add(connector);
568 570
569 return ret; 571 return ret;
@@ -613,6 +615,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
613 615
614 clock *= 3; 616 clock *= 3;
615 break; 617 break;
618 default:
619 BUG_ON(1);
620 return MODE_BAD;
616 } 621 }
617 622
618 if (clock < min_clock) 623 if (clock < min_clock)
@@ -680,7 +685,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
680 /* Firstly try getting EDID over DDC, if allowed and I2C channel 685 /* Firstly try getting EDID over DDC, if allowed and I2C channel
681 * is available. 686 * is available.
682 */ 687 */
683 if (!dev_priv->VBIOS.pub.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf) 688 if (!dev_priv->vbios.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf)
684 i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); 689 i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
685 690
686 if (i2c) { 691 if (i2c) {
@@ -695,7 +700,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
695 */ 700 */
696 if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) && 701 if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) &&
697 (nv_encoder->dcb->lvdsconf.use_straps_for_mode || 702 (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
698 dev_priv->VBIOS.pub.fp_no_ddc)) { 703 dev_priv->vbios.fp_no_ddc)) {
699 nv_connector->native_mode = drm_mode_duplicate(dev, &native); 704 nv_connector->native_mode = drm_mode_duplicate(dev, &native);
700 goto out; 705 goto out;
701 } 706 }
@@ -704,7 +709,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
704 * stored for the panel stored in them. 709 * stored for the panel stored in them.
705 */ 710 */
706 if (!nv_connector->edid && !nv_connector->native_mode && 711 if (!nv_connector->edid && !nv_connector->native_mode &&
707 !dev_priv->VBIOS.pub.fp_no_ddc) { 712 !dev_priv->vbios.fp_no_ddc) {
708 struct edid *edid = 713 struct edid *edid =
709 (struct edid *)nouveau_bios_embedded_edid(dev); 714 (struct edid *)nouveau_bios_embedded_edid(dev);
710 if (edid) { 715 if (edid) {
@@ -739,46 +744,66 @@ out:
739} 744}
740 745
741int 746int
742nouveau_connector_create(struct drm_device *dev, int index, int type) 747nouveau_connector_create(struct drm_device *dev,
748 struct dcb_connector_table_entry *dcb)
743{ 749{
744 struct drm_nouveau_private *dev_priv = dev->dev_private; 750 struct drm_nouveau_private *dev_priv = dev->dev_private;
745 struct nouveau_connector *nv_connector = NULL; 751 struct nouveau_connector *nv_connector = NULL;
746 struct drm_connector *connector; 752 struct drm_connector *connector;
747 struct drm_encoder *encoder; 753 struct drm_encoder *encoder;
748 int ret; 754 int ret, type;
749 755
750 NV_DEBUG_KMS(dev, "\n"); 756 NV_DEBUG_KMS(dev, "\n");
751 757
752 nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); 758 switch (dcb->type) {
753 if (!nv_connector) 759 case DCB_CONNECTOR_NONE:
754 return -ENOMEM; 760 return 0;
755 nv_connector->dcb = nouveau_bios_connector_entry(dev, index); 761 case DCB_CONNECTOR_VGA:
756 connector = &nv_connector->base;
757
758 switch (type) {
759 case DRM_MODE_CONNECTOR_VGA:
760 NV_INFO(dev, "Detected a VGA connector\n"); 762 NV_INFO(dev, "Detected a VGA connector\n");
763 type = DRM_MODE_CONNECTOR_VGA;
761 break; 764 break;
762 case DRM_MODE_CONNECTOR_DVID: 765 case DCB_CONNECTOR_TV_0:
763 NV_INFO(dev, "Detected a DVI-D connector\n"); 766 case DCB_CONNECTOR_TV_1:
767 case DCB_CONNECTOR_TV_3:
768 NV_INFO(dev, "Detected a TV connector\n");
769 type = DRM_MODE_CONNECTOR_TV;
764 break; 770 break;
765 case DRM_MODE_CONNECTOR_DVII: 771 case DCB_CONNECTOR_DVI_I:
766 NV_INFO(dev, "Detected a DVI-I connector\n"); 772 NV_INFO(dev, "Detected a DVI-I connector\n");
773 type = DRM_MODE_CONNECTOR_DVII;
767 break; 774 break;
768 case DRM_MODE_CONNECTOR_LVDS: 775 case DCB_CONNECTOR_DVI_D:
769 NV_INFO(dev, "Detected a LVDS connector\n"); 776 NV_INFO(dev, "Detected a DVI-D connector\n");
777 type = DRM_MODE_CONNECTOR_DVID;
770 break; 778 break;
771 case DRM_MODE_CONNECTOR_TV: 779 case DCB_CONNECTOR_HDMI_0:
772 NV_INFO(dev, "Detected a TV connector\n"); 780 case DCB_CONNECTOR_HDMI_1:
781 NV_INFO(dev, "Detected a HDMI connector\n");
782 type = DRM_MODE_CONNECTOR_HDMIA;
783 break;
784 case DCB_CONNECTOR_LVDS:
785 NV_INFO(dev, "Detected a LVDS connector\n");
786 type = DRM_MODE_CONNECTOR_LVDS;
773 break; 787 break;
774 case DRM_MODE_CONNECTOR_DisplayPort: 788 case DCB_CONNECTOR_DP:
775 NV_INFO(dev, "Detected a DisplayPort connector\n"); 789 NV_INFO(dev, "Detected a DisplayPort connector\n");
790 type = DRM_MODE_CONNECTOR_DisplayPort;
776 break; 791 break;
777 default: 792 case DCB_CONNECTOR_eDP:
778 NV_ERROR(dev, "Unknown connector, this is not good.\n"); 793 NV_INFO(dev, "Detected an eDP connector\n");
794 type = DRM_MODE_CONNECTOR_eDP;
779 break; 795 break;
796 default:
797 NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type);
798 return -EINVAL;
780 } 799 }
781 800
801 nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
802 if (!nv_connector)
803 return -ENOMEM;
804 nv_connector->dcb = dcb;
805 connector = &nv_connector->base;
806
782 /* defaults, will get overridden in detect() */ 807 /* defaults, will get overridden in detect() */
783 connector->interlace_allowed = false; 808 connector->interlace_allowed = false;
784 connector->doublescan_allowed = false; 809 connector->doublescan_allowed = false;
@@ -786,55 +811,65 @@ nouveau_connector_create(struct drm_device *dev, int index, int type)
786 drm_connector_init(dev, connector, &nouveau_connector_funcs, type); 811 drm_connector_init(dev, connector, &nouveau_connector_funcs, type);
787 drm_connector_helper_add(connector, &nouveau_connector_helper_funcs); 812 drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
788 813
814 /* attach encoders */
815 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
816 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
817
818 if (nv_encoder->dcb->connector != dcb->index)
819 continue;
820
821 if (get_slave_funcs(nv_encoder))
822 get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
823
824 drm_mode_connector_attach_encoder(connector, encoder);
825 }
826
827 if (!connector->encoder_ids[0]) {
828 NV_WARN(dev, " no encoders, ignoring\n");
829 drm_connector_cleanup(connector);
830 kfree(connector);
831 return 0;
832 }
833
789 /* Init DVI-I specific properties */ 834 /* Init DVI-I specific properties */
790 if (type == DRM_MODE_CONNECTOR_DVII) { 835 if (dcb->type == DCB_CONNECTOR_DVI_I) {
791 drm_mode_create_dvi_i_properties(dev); 836 drm_mode_create_dvi_i_properties(dev);
792 drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0); 837 drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
793 drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0); 838 drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
794 } 839 }
795 840
796 if (type != DRM_MODE_CONNECTOR_LVDS) 841 if (dcb->type != DCB_CONNECTOR_LVDS)
797 nv_connector->use_dithering = false; 842 nv_connector->use_dithering = false;
798 843
799 if (type == DRM_MODE_CONNECTOR_DVID || 844 switch (dcb->type) {
800 type == DRM_MODE_CONNECTOR_DVII || 845 case DCB_CONNECTOR_VGA:
801 type == DRM_MODE_CONNECTOR_LVDS || 846 if (dev_priv->card_type >= NV_50) {
802 type == DRM_MODE_CONNECTOR_DisplayPort) {
803 nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
804
805 drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property,
806 nv_connector->scaling_mode);
807 drm_connector_attach_property(connector, dev->mode_config.dithering_mode_property,
808 nv_connector->use_dithering ? DRM_MODE_DITHERING_ON
809 : DRM_MODE_DITHERING_OFF);
810
811 } else {
812 nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
813
814 if (type == DRM_MODE_CONNECTOR_VGA &&
815 dev_priv->card_type >= NV_50) {
816 drm_connector_attach_property(connector, 847 drm_connector_attach_property(connector,
817 dev->mode_config.scaling_mode_property, 848 dev->mode_config.scaling_mode_property,
818 nv_connector->scaling_mode); 849 nv_connector->scaling_mode);
819 } 850 }
820 } 851 /* fall-through */
821 852 case DCB_CONNECTOR_TV_0:
822 /* attach encoders */ 853 case DCB_CONNECTOR_TV_1:
823 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 854 case DCB_CONNECTOR_TV_3:
824 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 855 nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
825 856 break;
826 if (nv_encoder->dcb->connector != index) 857 default:
827 continue; 858 nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
828
829 if (get_slave_funcs(nv_encoder))
830 get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
831 859
832 drm_mode_connector_attach_encoder(connector, encoder); 860 drm_connector_attach_property(connector,
861 dev->mode_config.scaling_mode_property,
862 nv_connector->scaling_mode);
863 drm_connector_attach_property(connector,
864 dev->mode_config.dithering_mode_property,
865 nv_connector->use_dithering ?
866 DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
867 break;
833 } 868 }
834 869
835 drm_sysfs_connector_add(connector); 870 drm_sysfs_connector_add(connector);
836 871
837 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 872 if (dcb->type == DCB_CONNECTOR_LVDS) {
838 ret = nouveau_connector_create_lvds(dev, connector); 873 ret = nouveau_connector_create_lvds(dev, connector);
839 if (ret) { 874 if (ret) {
840 connector->funcs->destroy(connector); 875 connector->funcs->destroy(connector);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 728b8090e5ff..4ef38abc2d9c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -49,6 +49,7 @@ static inline struct nouveau_connector *nouveau_connector(
49 return container_of(con, struct nouveau_connector, base); 49 return container_of(con, struct nouveau_connector, base);
50} 50}
51 51
52int nouveau_connector_create(struct drm_device *dev, int i2c_index, int type); 52int nouveau_connector_create(struct drm_device *,
53 struct dcb_connector_table_entry *);
53 54
54#endif /* __NOUVEAU_CONNECTOR_H__ */ 55#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index d79db3698f16..8ff9ef5d4b47 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -47,12 +47,23 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
47 seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2); 47 seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
48 seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2); 48 seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
49 seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2); 49 seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2);
50 if (chan->dma.ib_max) {
51 seq_printf(m, " ib max: 0x%08x\n", chan->dma.ib_max);
52 seq_printf(m, " ib put: 0x%08x\n", chan->dma.ib_put);
53 seq_printf(m, " ib free: 0x%08x\n", chan->dma.ib_free);
54 }
50 55
51 seq_printf(m, "gpu fifo state:\n"); 56 seq_printf(m, "gpu fifo state:\n");
52 seq_printf(m, " get: 0x%08x\n", 57 seq_printf(m, " get: 0x%08x\n",
53 nvchan_rd32(chan, chan->user_get)); 58 nvchan_rd32(chan, chan->user_get));
54 seq_printf(m, " put: 0x%08x\n", 59 seq_printf(m, " put: 0x%08x\n",
55 nvchan_rd32(chan, chan->user_put)); 60 nvchan_rd32(chan, chan->user_put));
61 if (chan->dma.ib_max) {
62 seq_printf(m, " ib get: 0x%08x\n",
63 nvchan_rd32(chan, 0x88));
64 seq_printf(m, " ib put: 0x%08x\n",
65 nvchan_rd32(chan, 0x8c));
66 }
56 67
57 seq_printf(m, "last fence : %d\n", chan->fence.sequence); 68 seq_printf(m, "last fence : %d\n", chan->fence.sequence);
58 seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack); 69 seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack);
@@ -133,9 +144,22 @@ nouveau_debugfs_memory_info(struct seq_file *m, void *data)
133 return 0; 144 return 0;
134} 145}
135 146
147static int
148nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
149{
150 struct drm_info_node *node = (struct drm_info_node *) m->private;
151 struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
152 int i;
153
154 for (i = 0; i < dev_priv->vbios.length; i++)
155 seq_printf(m, "%c", dev_priv->vbios.data[i]);
156 return 0;
157}
158
136static struct drm_info_list nouveau_debugfs_list[] = { 159static struct drm_info_list nouveau_debugfs_list[] = {
137 { "chipset", nouveau_debugfs_chipset_info, 0, NULL }, 160 { "chipset", nouveau_debugfs_chipset_info, 0, NULL },
138 { "memory", nouveau_debugfs_memory_info, 0, NULL }, 161 { "memory", nouveau_debugfs_memory_info, 0, NULL },
162 { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
139}; 163};
140#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) 164#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
141 165
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index dfc94391d71e..cf1c5c0a0abe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -39,11 +39,8 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
39 if (drm_fb->fbdev) 39 if (drm_fb->fbdev)
40 nouveau_fbcon_remove(dev, drm_fb); 40 nouveau_fbcon_remove(dev, drm_fb);
41 41
42 if (fb->nvbo) { 42 if (fb->nvbo)
43 mutex_lock(&dev->struct_mutex); 43 drm_gem_object_unreference_unlocked(fb->nvbo->gem);
44 drm_gem_object_unreference(fb->nvbo->gem);
45 mutex_unlock(&dev->struct_mutex);
46 }
47 44
48 drm_framebuffer_cleanup(drm_fb); 45 drm_framebuffer_cleanup(drm_fb);
49 kfree(fb); 46 kfree(fb);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 50d9e67745af..c8482a108a78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -32,7 +32,22 @@
32void 32void
33nouveau_dma_pre_init(struct nouveau_channel *chan) 33nouveau_dma_pre_init(struct nouveau_channel *chan)
34{ 34{
35 chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2; 35 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
36 struct nouveau_bo *pushbuf = chan->pushbuf_bo;
37
38 if (dev_priv->card_type == NV_50) {
39 const int ib_size = pushbuf->bo.mem.size / 2;
40
41 chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
42 chan->dma.ib_max = (ib_size / 8) - 1;
43 chan->dma.ib_put = 0;
44 chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
45
46 chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2;
47 } else {
48 chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2;
49 }
50
36 chan->dma.put = 0; 51 chan->dma.put = 0;
37 chan->dma.cur = chan->dma.put; 52 chan->dma.cur = chan->dma.put;
38 chan->dma.free = chan->dma.max - chan->dma.cur; 53 chan->dma.free = chan->dma.max - chan->dma.cur;
@@ -162,12 +177,101 @@ READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
162 return (val - chan->pushbuf_base) >> 2; 177 return (val - chan->pushbuf_base) >> 2;
163} 178}
164 179
180void
181nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
182 int delta, int length)
183{
184 struct nouveau_bo *pb = chan->pushbuf_bo;
185 uint64_t offset = bo->bo.offset + delta;
186 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
187
188 BUG_ON(chan->dma.ib_free < 1);
189 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
190 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
191
192 chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
193 nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
194 chan->dma.ib_free--;
195}
196
197static int
198nv50_dma_push_wait(struct nouveau_channel *chan, int count)
199{
200 uint32_t cnt = 0, prev_get = 0;
201
202 while (chan->dma.ib_free < count) {
203 uint32_t get = nvchan_rd32(chan, 0x88);
204 if (get != prev_get) {
205 prev_get = get;
206 cnt = 0;
207 }
208
209 if ((++cnt & 0xff) == 0) {
210 DRM_UDELAY(1);
211 if (cnt > 100000)
212 return -EBUSY;
213 }
214
215 chan->dma.ib_free = get - chan->dma.ib_put;
216 if (chan->dma.ib_free <= 0)
217 chan->dma.ib_free += chan->dma.ib_max + 1;
218 }
219
220 return 0;
221}
222
223static int
224nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
225{
226 uint32_t cnt = 0, prev_get = 0;
227 int ret;
228
229 ret = nv50_dma_push_wait(chan, slots + 1);
230 if (unlikely(ret))
231 return ret;
232
233 while (chan->dma.free < count) {
234 int get = READ_GET(chan, &prev_get, &cnt);
235 if (unlikely(get < 0)) {
236 if (get == -EINVAL)
237 continue;
238
239 return get;
240 }
241
242 if (get <= chan->dma.cur) {
243 chan->dma.free = chan->dma.max - chan->dma.cur;
244 if (chan->dma.free >= count)
245 break;
246
247 FIRE_RING(chan);
248 do {
249 get = READ_GET(chan, &prev_get, &cnt);
250 if (unlikely(get < 0)) {
251 if (get == -EINVAL)
252 continue;
253 return get;
254 }
255 } while (get == 0);
256 chan->dma.cur = 0;
257 chan->dma.put = 0;
258 }
259
260 chan->dma.free = get - chan->dma.cur - 1;
261 }
262
263 return 0;
264}
265
165int 266int
166nouveau_dma_wait(struct nouveau_channel *chan, int size) 267nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
167{ 268{
168 uint32_t prev_get = 0, cnt = 0; 269 uint32_t prev_get = 0, cnt = 0;
169 int get; 270 int get;
170 271
272 if (chan->dma.ib_max)
273 return nv50_dma_wait(chan, slots, size);
274
171 while (chan->dma.free < size) { 275 while (chan->dma.free < size) {
172 get = READ_GET(chan, &prev_get, &cnt); 276 get = READ_GET(chan, &prev_get, &cnt);
173 if (unlikely(get == -EBUSY)) 277 if (unlikely(get == -EBUSY))
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index dabfd655f93e..8b05c15866d5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -31,6 +31,9 @@
31#define NOUVEAU_DMA_DEBUG 0 31#define NOUVEAU_DMA_DEBUG 0
32#endif 32#endif
33 33
34void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
35 int delta, int length);
36
34/* 37/*
35 * There's a hw race condition where you can't jump to your PUT offset, 38 * There's a hw race condition where you can't jump to your PUT offset,
36 * to avoid this we jump to offset + SKIPS and fill the difference with 39 * to avoid this we jump to offset + SKIPS and fill the difference with
@@ -96,13 +99,11 @@ enum {
96static __must_check inline int 99static __must_check inline int
97RING_SPACE(struct nouveau_channel *chan, int size) 100RING_SPACE(struct nouveau_channel *chan, int size)
98{ 101{
99 if (chan->dma.free < size) { 102 int ret;
100 int ret;
101 103
102 ret = nouveau_dma_wait(chan, size); 104 ret = nouveau_dma_wait(chan, 1, size);
103 if (ret) 105 if (ret)
104 return ret; 106 return ret;
105 }
106 107
107 chan->dma.free -= size; 108 chan->dma.free -= size;
108 return 0; 109 return 0;
@@ -146,7 +147,13 @@ FIRE_RING(struct nouveau_channel *chan)
146 return; 147 return;
147 chan->accel_done = true; 148 chan->accel_done = true;
148 149
149 WRITE_PUT(chan->dma.cur); 150 if (chan->dma.ib_max) {
151 nv50_dma_push(chan, chan->pushbuf_bo, chan->dma.put << 2,
152 (chan->dma.cur - chan->dma.put) << 2);
153 } else {
154 WRITE_PUT(chan->dma.cur);
155 }
156
150 chan->dma.put = chan->dma.cur; 157 chan->dma.put = chan->dma.cur;
151} 158}
152 159
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index da3b93b84502..30cc09e8a709 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -75,11 +75,11 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
75int nouveau_ignorelid = 0; 75int nouveau_ignorelid = 0;
76module_param_named(ignorelid, nouveau_ignorelid, int, 0400); 76module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
77 77
78MODULE_PARM_DESC(noagp, "Disable all acceleration"); 78MODULE_PARM_DESC(noaccel, "Disable all acceleration");
79int nouveau_noaccel = 0; 79int nouveau_noaccel = 0;
80module_param_named(noaccel, nouveau_noaccel, int, 0400); 80module_param_named(noaccel, nouveau_noaccel, int, 0400);
81 81
82MODULE_PARM_DESC(noagp, "Disable fbcon acceleration"); 82MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
83int nouveau_nofbaccel = 0; 83int nouveau_nofbaccel = 0;
84module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); 84module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
85 85
@@ -135,7 +135,7 @@ nouveau_pci_remove(struct pci_dev *pdev)
135 drm_put_dev(dev); 135 drm_put_dev(dev);
136} 136}
137 137
138static int 138int
139nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) 139nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
140{ 140{
141 struct drm_device *dev = pci_get_drvdata(pdev); 141 struct drm_device *dev = pci_get_drvdata(pdev);
@@ -233,7 +233,7 @@ out_abort:
233 return ret; 233 return ret;
234} 234}
235 235
236static int 236int
237nouveau_pci_resume(struct pci_dev *pdev) 237nouveau_pci_resume(struct pci_dev *pdev)
238{ 238{
239 struct drm_device *dev = pci_get_drvdata(pdev); 239 struct drm_device *dev = pci_get_drvdata(pdev);
@@ -402,8 +402,10 @@ static int __init nouveau_init(void)
402 nouveau_modeset = 1; 402 nouveau_modeset = 1;
403 } 403 }
404 404
405 if (nouveau_modeset == 1) 405 if (nouveau_modeset == 1) {
406 driver.driver_features |= DRIVER_MODESET; 406 driver.driver_features |= DRIVER_MODESET;
407 nouveau_register_dsm_handler();
408 }
407 409
408 return drm_init(&driver); 410 return drm_init(&driver);
409} 411}
@@ -411,6 +413,7 @@ static int __init nouveau_init(void)
411static void __exit nouveau_exit(void) 413static void __exit nouveau_exit(void)
412{ 414{
413 drm_exit(&driver); 415 drm_exit(&driver);
416 nouveau_unregister_dsm_handler();
414} 417}
415 418
416module_init(nouveau_init); 419module_init(nouveau_init);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 1c15ef37b71c..5f8d987af363 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -34,7 +34,7 @@
34 34
35#define DRIVER_MAJOR 0 35#define DRIVER_MAJOR 0
36#define DRIVER_MINOR 0 36#define DRIVER_MINOR 0
37#define DRIVER_PATCHLEVEL 15 37#define DRIVER_PATCHLEVEL 16
38 38
39#define NOUVEAU_FAMILY 0x0000FFFF 39#define NOUVEAU_FAMILY 0x0000FFFF
40#define NOUVEAU_FLAGS 0xFFFF0000 40#define NOUVEAU_FLAGS 0xFFFF0000
@@ -83,6 +83,7 @@ struct nouveau_bo {
83 struct drm_file *reserved_by; 83 struct drm_file *reserved_by;
84 struct list_head entry; 84 struct list_head entry;
85 int pbbo_index; 85 int pbbo_index;
86 bool validate_mapped;
86 87
87 struct nouveau_channel *channel; 88 struct nouveau_channel *channel;
88 89
@@ -239,6 +240,11 @@ struct nouveau_channel {
239 int cur; 240 int cur;
240 int put; 241 int put;
241 /* access via pushbuf_bo */ 242 /* access via pushbuf_bo */
243
244 int ib_base;
245 int ib_max;
246 int ib_free;
247 int ib_put;
242 } dma; 248 } dma;
243 249
244 uint32_t sw_subchannel[8]; 250 uint32_t sw_subchannel[8];
@@ -533,6 +539,9 @@ struct drm_nouveau_private {
533 struct nouveau_engine engine; 539 struct nouveau_engine engine;
534 struct nouveau_channel *channel; 540 struct nouveau_channel *channel;
535 541
542 /* For PFIFO and PGRAPH. */
543 spinlock_t context_switch_lock;
544
536 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ 545 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
537 struct nouveau_gpuobj *ramht; 546 struct nouveau_gpuobj *ramht;
538 uint32_t ramin_rsvd_vram; 547 uint32_t ramin_rsvd_vram;
@@ -596,8 +605,7 @@ struct drm_nouveau_private {
596 605
597 struct list_head gpuobj_list; 606 struct list_head gpuobj_list;
598 607
599 struct nvbios VBIOS; 608 struct nvbios vbios;
600 struct nouveau_bios_info *vbios;
601 609
602 struct nv04_mode_state mode_reg; 610 struct nv04_mode_state mode_reg;
603 struct nv04_mode_state saved_reg; 611 struct nv04_mode_state saved_reg;
@@ -614,7 +622,6 @@ struct drm_nouveau_private {
614 } susres; 622 } susres;
615 623
616 struct backlight_device *backlight; 624 struct backlight_device *backlight;
617 bool acpi_dsm;
618 625
619 struct nouveau_channel *evo; 626 struct nouveau_channel *evo;
620 627
@@ -682,6 +689,9 @@ extern int nouveau_ignorelid;
682extern int nouveau_nofbaccel; 689extern int nouveau_nofbaccel;
683extern int nouveau_noaccel; 690extern int nouveau_noaccel;
684 691
692extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
693extern int nouveau_pci_resume(struct pci_dev *pdev);
694
685/* nouveau_state.c */ 695/* nouveau_state.c */
686extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); 696extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
687extern int nouveau_load(struct drm_device *, unsigned long flags); 697extern int nouveau_load(struct drm_device *, unsigned long flags);
@@ -696,12 +706,6 @@ extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
696 uint32_t reg, uint32_t mask, uint32_t val); 706 uint32_t reg, uint32_t mask, uint32_t val);
697extern bool nouveau_wait_for_idle(struct drm_device *); 707extern bool nouveau_wait_for_idle(struct drm_device *);
698extern int nouveau_card_init(struct drm_device *); 708extern int nouveau_card_init(struct drm_device *);
699extern int nouveau_ioctl_card_init(struct drm_device *, void *data,
700 struct drm_file *);
701extern int nouveau_ioctl_suspend(struct drm_device *, void *data,
702 struct drm_file *);
703extern int nouveau_ioctl_resume(struct drm_device *, void *data,
704 struct drm_file *);
705 709
706/* nouveau_mem.c */ 710/* nouveau_mem.c */
707extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start, 711extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start,
@@ -845,21 +849,15 @@ nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
845/* nouveau_dma.c */ 849/* nouveau_dma.c */
846extern void nouveau_dma_pre_init(struct nouveau_channel *); 850extern void nouveau_dma_pre_init(struct nouveau_channel *);
847extern int nouveau_dma_init(struct nouveau_channel *); 851extern int nouveau_dma_init(struct nouveau_channel *);
848extern int nouveau_dma_wait(struct nouveau_channel *, int size); 852extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
849 853
850/* nouveau_acpi.c */ 854/* nouveau_acpi.c */
851#ifdef CONFIG_ACPI 855#if defined(CONFIG_ACPI)
852extern int nouveau_hybrid_setup(struct drm_device *dev); 856void nouveau_register_dsm_handler(void);
853extern bool nouveau_dsm_probe(struct drm_device *dev); 857void nouveau_unregister_dsm_handler(void);
854#else 858#else
855static inline int nouveau_hybrid_setup(struct drm_device *dev) 859static inline void nouveau_register_dsm_handler(void) {}
856{ 860static inline void nouveau_unregister_dsm_handler(void) {}
857 return 0;
858}
859static inline bool nouveau_dsm_probe(struct drm_device *dev)
860{
861 return false;
862}
863#endif 861#endif
864 862
865/* nouveau_backlight.c */ 863/* nouveau_backlight.c */
@@ -1027,6 +1025,7 @@ extern void nv50_graph_destroy_context(struct nouveau_channel *);
1027extern int nv50_graph_load_context(struct nouveau_channel *); 1025extern int nv50_graph_load_context(struct nouveau_channel *);
1028extern int nv50_graph_unload_context(struct drm_device *); 1026extern int nv50_graph_unload_context(struct drm_device *);
1029extern void nv50_graph_context_switch(struct drm_device *); 1027extern void nv50_graph_context_switch(struct drm_device *);
1028extern int nv50_grctx_init(struct nouveau_grctx *);
1030 1029
1031/* nouveau_grctx.c */ 1030/* nouveau_grctx.c */
1032extern int nouveau_grctx_prog_load(struct drm_device *); 1031extern int nouveau_grctx_prog_load(struct drm_device *);
@@ -1152,16 +1151,6 @@ extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
1152 struct drm_file *); 1151 struct drm_file *);
1153extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *, 1152extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
1154 struct drm_file *); 1153 struct drm_file *);
1155extern int nouveau_gem_ioctl_pushbuf_call(struct drm_device *, void *,
1156 struct drm_file *);
1157extern int nouveau_gem_ioctl_pushbuf_call2(struct drm_device *, void *,
1158 struct drm_file *);
1159extern int nouveau_gem_ioctl_pin(struct drm_device *, void *,
1160 struct drm_file *);
1161extern int nouveau_gem_ioctl_unpin(struct drm_device *, void *,
1162 struct drm_file *);
1163extern int nouveau_gem_ioctl_tile(struct drm_device *, void *,
1164 struct drm_file *);
1165extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *, 1154extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
1166 struct drm_file *); 1155 struct drm_file *);
1167extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *, 1156extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index ea879a2efef3..68cedd9194fe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -36,6 +36,7 @@
36#include <linux/fb.h> 36#include <linux/fb.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/screen_info.h> 38#include <linux/screen_info.h>
39#include <linux/vga_switcheroo.h>
39 40
40#include "drmP.h" 41#include "drmP.h"
41#include "drm.h" 42#include "drm.h"
@@ -370,6 +371,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
370 nvbo->bo.offset, nvbo); 371 nvbo->bo.offset, nvbo);
371 372
372 mutex_unlock(&dev->struct_mutex); 373 mutex_unlock(&dev->struct_mutex);
374 vga_switcheroo_client_fb_set(dev->pdev, info);
373 return 0; 375 return 0;
374 376
375out_unref: 377out_unref:
@@ -401,10 +403,8 @@ nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
401 403
402 unregister_framebuffer(info); 404 unregister_framebuffer(info);
403 nouveau_bo_unmap(nouveau_fb->nvbo); 405 nouveau_bo_unmap(nouveau_fb->nvbo);
404 mutex_lock(&dev->struct_mutex); 406 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
405 drm_gem_object_unreference(nouveau_fb->nvbo->gem);
406 nouveau_fb->nvbo = NULL; 407 nouveau_fb->nvbo = NULL;
407 mutex_unlock(&dev->struct_mutex);
408 if (par) 408 if (par)
409 drm_fb_helper_free(&par->helper); 409 drm_fb_helper_free(&par->helper);
410 framebuffer_release(info); 410 framebuffer_release(info);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 70cc30803e3b..0d22f66f1c79 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -167,12 +167,10 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
167 167
168 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); 168 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
169out: 169out:
170 mutex_lock(&dev->struct_mutex); 170 drm_gem_object_handle_unreference_unlocked(nvbo->gem);
171 drm_gem_object_handle_unreference(nvbo->gem);
172 mutex_unlock(&dev->struct_mutex);
173 171
174 if (ret) 172 if (ret)
175 drm_gem_object_unreference(nvbo->gem); 173 drm_gem_object_unreference_unlocked(nvbo->gem);
176 return ret; 174 return ret;
177} 175}
178 176
@@ -243,6 +241,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
243 nouveau_fence_unref((void *)&prev_fence); 241 nouveau_fence_unref((void *)&prev_fence);
244 } 242 }
245 243
244 if (unlikely(nvbo->validate_mapped)) {
245 ttm_bo_kunmap(&nvbo->kmap);
246 nvbo->validate_mapped = false;
247 }
248
246 list_del(&nvbo->entry); 249 list_del(&nvbo->entry);
247 nvbo->reserved_by = NULL; 250 nvbo->reserved_by = NULL;
248 ttm_bo_unreserve(&nvbo->bo); 251 ttm_bo_unreserve(&nvbo->bo);
@@ -302,11 +305,14 @@ retry:
302 if (ret == -EAGAIN) 305 if (ret == -EAGAIN)
303 ret = ttm_bo_wait_unreserved(&nvbo->bo, false); 306 ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
304 drm_gem_object_unreference(gem); 307 drm_gem_object_unreference(gem);
305 if (ret) 308 if (ret) {
309 NV_ERROR(dev, "fail reserve\n");
306 return ret; 310 return ret;
311 }
307 goto retry; 312 goto retry;
308 } 313 }
309 314
315 b->user_priv = (uint64_t)(unsigned long)nvbo;
310 nvbo->reserved_by = file_priv; 316 nvbo->reserved_by = file_priv;
311 nvbo->pbbo_index = i; 317 nvbo->pbbo_index = i;
312 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 318 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
@@ -336,8 +342,10 @@ retry:
336 } 342 }
337 343
338 ret = ttm_bo_wait_cpu(&nvbo->bo, false); 344 ret = ttm_bo_wait_cpu(&nvbo->bo, false);
339 if (ret) 345 if (ret) {
346 NV_ERROR(dev, "fail wait_cpu\n");
340 return ret; 347 return ret;
348 }
341 goto retry; 349 goto retry;
342 } 350 }
343 } 351 }
@@ -351,6 +359,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
351{ 359{
352 struct drm_nouveau_gem_pushbuf_bo __user *upbbo = 360 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
353 (void __force __user *)(uintptr_t)user_pbbo_ptr; 361 (void __force __user *)(uintptr_t)user_pbbo_ptr;
362 struct drm_device *dev = chan->dev;
354 struct nouveau_bo *nvbo; 363 struct nouveau_bo *nvbo;
355 int ret, relocs = 0; 364 int ret, relocs = 0;
356 365
@@ -362,39 +371,46 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
362 spin_lock(&nvbo->bo.lock); 371 spin_lock(&nvbo->bo.lock);
363 ret = ttm_bo_wait(&nvbo->bo, false, false, false); 372 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
364 spin_unlock(&nvbo->bo.lock); 373 spin_unlock(&nvbo->bo.lock);
365 if (unlikely(ret)) 374 if (unlikely(ret)) {
375 NV_ERROR(dev, "fail wait other chan\n");
366 return ret; 376 return ret;
377 }
367 } 378 }
368 379
369 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, 380 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
370 b->write_domains, 381 b->write_domains,
371 b->valid_domains); 382 b->valid_domains);
372 if (unlikely(ret)) 383 if (unlikely(ret)) {
384 NV_ERROR(dev, "fail set_domain\n");
373 return ret; 385 return ret;
386 }
374 387
375 nvbo->channel = chan; 388 nvbo->channel = chan;
376 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, 389 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
377 false, false); 390 false, false);
378 nvbo->channel = NULL; 391 nvbo->channel = NULL;
379 if (unlikely(ret)) 392 if (unlikely(ret)) {
393 NV_ERROR(dev, "fail ttm_validate\n");
380 return ret; 394 return ret;
395 }
381 396
382 if (nvbo->bo.offset == b->presumed_offset && 397 if (nvbo->bo.offset == b->presumed.offset &&
383 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 398 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
384 b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) || 399 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
385 (nvbo->bo.mem.mem_type == TTM_PL_TT && 400 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
386 b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART))) 401 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
387 continue; 402 continue;
388 403
389 if (nvbo->bo.mem.mem_type == TTM_PL_TT) 404 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
390 b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART; 405 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
391 else 406 else
392 b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM; 407 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
393 b->presumed_offset = nvbo->bo.offset; 408 b->presumed.offset = nvbo->bo.offset;
394 b->presumed_ok = 0; 409 b->presumed.valid = 0;
395 relocs++; 410 relocs++;
396 411
397 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b))) 412 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
413 &b->presumed, sizeof(b->presumed)))
398 return -EFAULT; 414 return -EFAULT;
399 } 415 }
400 416
@@ -408,6 +424,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
408 uint64_t user_buffers, int nr_buffers, 424 uint64_t user_buffers, int nr_buffers,
409 struct validate_op *op, int *apply_relocs) 425 struct validate_op *op, int *apply_relocs)
410{ 426{
427 struct drm_device *dev = chan->dev;
411 int ret, relocs = 0; 428 int ret, relocs = 0;
412 429
413 INIT_LIST_HEAD(&op->vram_list); 430 INIT_LIST_HEAD(&op->vram_list);
@@ -418,11 +435,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
418 return 0; 435 return 0;
419 436
420 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); 437 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
421 if (unlikely(ret)) 438 if (unlikely(ret)) {
439 NV_ERROR(dev, "validate_init\n");
422 return ret; 440 return ret;
441 }
423 442
424 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); 443 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
425 if (unlikely(ret < 0)) { 444 if (unlikely(ret < 0)) {
445 NV_ERROR(dev, "validate vram_list\n");
426 validate_fini(op, NULL); 446 validate_fini(op, NULL);
427 return ret; 447 return ret;
428 } 448 }
@@ -430,6 +450,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
430 450
431 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); 451 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
432 if (unlikely(ret < 0)) { 452 if (unlikely(ret < 0)) {
453 NV_ERROR(dev, "validate gart_list\n");
433 validate_fini(op, NULL); 454 validate_fini(op, NULL);
434 return ret; 455 return ret;
435 } 456 }
@@ -437,6 +458,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
437 458
438 ret = validate_list(chan, &op->both_list, pbbo, user_buffers); 459 ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
439 if (unlikely(ret < 0)) { 460 if (unlikely(ret < 0)) {
461 NV_ERROR(dev, "validate both_list\n");
440 validate_fini(op, NULL); 462 validate_fini(op, NULL);
441 return ret; 463 return ret;
442 } 464 }
@@ -465,59 +487,82 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
465} 487}
466 488
467static int 489static int
468nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, 490nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
469 struct drm_nouveau_gem_pushbuf_bo *bo, 491 struct drm_nouveau_gem_pushbuf *req,
470 unsigned nr_relocs, uint64_t ptr_relocs, 492 struct drm_nouveau_gem_pushbuf_bo *bo)
471 unsigned nr_dwords, unsigned first_dword,
472 uint32_t *pushbuf, bool is_iomem)
473{ 493{
474 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 494 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
475 struct drm_device *dev = chan->dev;
476 int ret = 0; 495 int ret = 0;
477 unsigned i; 496 unsigned i;
478 497
479 reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); 498 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
480 if (IS_ERR(reloc)) 499 if (IS_ERR(reloc))
481 return PTR_ERR(reloc); 500 return PTR_ERR(reloc);
482 501
483 for (i = 0; i < nr_relocs; i++) { 502 for (i = 0; i < req->nr_relocs; i++) {
484 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; 503 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
485 struct drm_nouveau_gem_pushbuf_bo *b; 504 struct drm_nouveau_gem_pushbuf_bo *b;
505 struct nouveau_bo *nvbo;
486 uint32_t data; 506 uint32_t data;
487 507
488 if (r->bo_index >= nr_bo || r->reloc_index < first_dword || 508 if (unlikely(r->bo_index > req->nr_buffers)) {
489 r->reloc_index >= first_dword + nr_dwords) { 509 NV_ERROR(dev, "reloc bo index invalid\n");
490 NV_ERROR(dev, "Bad relocation %d\n", i);
491 NV_ERROR(dev, " bo: %d max %d\n", r->bo_index, nr_bo);
492 NV_ERROR(dev, " id: %d max %d\n", r->reloc_index, nr_dwords);
493 ret = -EINVAL; 510 ret = -EINVAL;
494 break; 511 break;
495 } 512 }
496 513
497 b = &bo[r->bo_index]; 514 b = &bo[r->bo_index];
498 if (b->presumed_ok) 515 if (b->presumed.valid)
499 continue; 516 continue;
500 517
518 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
519 NV_ERROR(dev, "reloc container bo index invalid\n");
520 ret = -EINVAL;
521 break;
522 }
523 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
524
525 if (unlikely(r->reloc_bo_offset + 4 >
526 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
527 NV_ERROR(dev, "reloc outside of bo\n");
528 ret = -EINVAL;
529 break;
530 }
531
532 if (!nvbo->kmap.virtual) {
533 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
534 &nvbo->kmap);
535 if (ret) {
536 NV_ERROR(dev, "failed kmap for reloc\n");
537 break;
538 }
539 nvbo->validate_mapped = true;
540 }
541
501 if (r->flags & NOUVEAU_GEM_RELOC_LOW) 542 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
502 data = b->presumed_offset + r->data; 543 data = b->presumed.offset + r->data;
503 else 544 else
504 if (r->flags & NOUVEAU_GEM_RELOC_HIGH) 545 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
505 data = (b->presumed_offset + r->data) >> 32; 546 data = (b->presumed.offset + r->data) >> 32;
506 else 547 else
507 data = r->data; 548 data = r->data;
508 549
509 if (r->flags & NOUVEAU_GEM_RELOC_OR) { 550 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
510 if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART) 551 if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
511 data |= r->tor; 552 data |= r->tor;
512 else 553 else
513 data |= r->vor; 554 data |= r->vor;
514 } 555 }
515 556
516 if (is_iomem) 557 spin_lock(&nvbo->bo.lock);
517 iowrite32_native(data, (void __force __iomem *) 558 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
518 &pushbuf[r->reloc_index]); 559 spin_unlock(&nvbo->bo.lock);
519 else 560 if (ret) {
520 pushbuf[r->reloc_index] = data; 561 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
562 break;
563 }
564
565 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
521 } 566 }
522 567
523 kfree(reloc); 568 kfree(reloc);
@@ -528,127 +573,50 @@ int
528nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, 573nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
529 struct drm_file *file_priv) 574 struct drm_file *file_priv)
530{ 575{
576 struct drm_nouveau_private *dev_priv = dev->dev_private;
531 struct drm_nouveau_gem_pushbuf *req = data; 577 struct drm_nouveau_gem_pushbuf *req = data;
532 struct drm_nouveau_gem_pushbuf_bo *bo = NULL; 578 struct drm_nouveau_gem_pushbuf_push *push;
579 struct drm_nouveau_gem_pushbuf_bo *bo;
533 struct nouveau_channel *chan; 580 struct nouveau_channel *chan;
534 struct validate_op op; 581 struct validate_op op;
535 struct nouveau_fence* fence = 0; 582 struct nouveau_fence *fence = 0;
536 uint32_t *pushbuf = NULL; 583 int i, j, ret = 0, do_reloc = 0;
537 int ret = 0, do_reloc = 0, i;
538 584
539 NOUVEAU_CHECK_INITIALISED_WITH_RETURN; 585 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
540 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); 586 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
541 587
542 if (req->nr_dwords >= chan->dma.max || 588 req->vram_available = dev_priv->fb_aper_free;
543 req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS || 589 req->gart_available = dev_priv->gart_info.aper_free;
544 req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) { 590 if (unlikely(req->nr_push == 0))
545 NV_ERROR(dev, "Pushbuf config exceeds limits:\n"); 591 goto out_next;
546 NV_ERROR(dev, " dwords : %d max %d\n", req->nr_dwords,
547 chan->dma.max - 1);
548 NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
549 NOUVEAU_GEM_MAX_BUFFERS);
550 NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
551 NOUVEAU_GEM_MAX_RELOCS);
552 return -EINVAL;
553 }
554
555 pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t));
556 if (IS_ERR(pushbuf))
557 return PTR_ERR(pushbuf);
558
559 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
560 if (IS_ERR(bo)) {
561 kfree(pushbuf);
562 return PTR_ERR(bo);
563 }
564
565 mutex_lock(&dev->struct_mutex);
566
567 /* Validate buffer list */
568 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
569 req->nr_buffers, &op, &do_reloc);
570 if (ret)
571 goto out;
572
573 /* Apply any relocations that are required */
574 if (do_reloc) {
575 ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers,
576 bo, req->nr_relocs,
577 req->relocs,
578 req->nr_dwords, 0,
579 pushbuf, false);
580 if (ret)
581 goto out;
582 }
583
584 /* Emit push buffer to the hw
585 */
586 ret = RING_SPACE(chan, req->nr_dwords);
587 if (ret)
588 goto out;
589
590 OUT_RINGp(chan, pushbuf, req->nr_dwords);
591 592
592 ret = nouveau_fence_new(chan, &fence, true); 593 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
593 if (ret) { 594 NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
594 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); 595 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
595 WIND_RING(chan); 596 return -EINVAL;
596 goto out;
597 } 597 }
598 598
599 if (nouveau_gem_pushbuf_sync(chan)) { 599 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
600 ret = nouveau_fence_wait(fence, NULL, false, false); 600 NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
601 if (ret) { 601 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
602 for (i = 0; i < req->nr_dwords; i++) 602 return -EINVAL;
603 NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
604 NV_ERROR(dev, "^^ above push buffer is fail :(\n");
605 }
606 } 603 }
607 604
608out: 605 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
609 validate_fini(&op, fence); 606 NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
610 nouveau_fence_unref((void**)&fence); 607 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
611 mutex_unlock(&dev->struct_mutex);
612 kfree(pushbuf);
613 kfree(bo);
614 return ret;
615}
616
617#define PUSHBUF_CAL (dev_priv->card_type >= NV_20)
618
619int
620nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
621 struct drm_file *file_priv)
622{
623 struct drm_nouveau_private *dev_priv = dev->dev_private;
624 struct drm_nouveau_gem_pushbuf_call *req = data;
625 struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
626 struct nouveau_channel *chan;
627 struct drm_gem_object *gem;
628 struct nouveau_bo *pbbo;
629 struct validate_op op;
630 struct nouveau_fence* fence = 0;
631 int i, ret = 0, do_reloc = 0;
632
633 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
634 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
635
636 if (unlikely(req->handle == 0))
637 goto out_next;
638
639 if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
640 req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
641 NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
642 NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
643 NOUVEAU_GEM_MAX_BUFFERS);
644 NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
645 NOUVEAU_GEM_MAX_RELOCS);
646 return -EINVAL; 608 return -EINVAL;
647 } 609 }
648 610
611 push = u_memcpya(req->push, req->nr_push, sizeof(*push));
612 if (IS_ERR(push))
613 return PTR_ERR(push);
614
649 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); 615 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
650 if (IS_ERR(bo)) 616 if (IS_ERR(bo)) {
617 kfree(push);
651 return PTR_ERR(bo); 618 return PTR_ERR(bo);
619 }
652 620
653 mutex_lock(&dev->struct_mutex); 621 mutex_lock(&dev->struct_mutex);
654 622
@@ -660,122 +628,84 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
660 goto out; 628 goto out;
661 } 629 }
662 630
663 /* Validate DMA push buffer */
664 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
665 if (!gem) {
666 NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle);
667 ret = -EINVAL;
668 goto out;
669 }
670 pbbo = nouveau_gem_object(gem);
671
672 if ((req->offset & 3) || req->nr_dwords < 2 ||
673 (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size ||
674 (unsigned long)req->nr_dwords >
675 ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) {
676 NV_ERROR(dev, "pb call misaligned or out of bounds: "
677 "%d + %d * 4 > %ld\n",
678 req->offset, req->nr_dwords, pbbo->bo.mem.size);
679 ret = -EINVAL;
680 drm_gem_object_unreference(gem);
681 goto out;
682 }
683
684 ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
685 chan->fence.sequence);
686 if (ret) {
687 NV_ERROR(dev, "resv pb: %d\n", ret);
688 drm_gem_object_unreference(gem);
689 goto out;
690 }
691
692 nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type);
693 ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false);
694 if (ret) {
695 NV_ERROR(dev, "validate pb: %d\n", ret);
696 ttm_bo_unreserve(&pbbo->bo);
697 drm_gem_object_unreference(gem);
698 goto out;
699 }
700
701 list_add_tail(&pbbo->entry, &op.both_list);
702
703 /* If presumed return address doesn't match, we need to map the
704 * push buffer and fix it..
705 */
706 if (!PUSHBUF_CAL) {
707 uint32_t retaddy;
708
709 if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) {
710 ret = nouveau_dma_wait(chan, 4 + NOUVEAU_DMA_SKIPS);
711 if (ret) {
712 NV_ERROR(dev, "jmp_space: %d\n", ret);
713 goto out;
714 }
715 }
716
717 retaddy = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
718 retaddy |= 0x20000000;
719 if (retaddy != req->suffix0) {
720 req->suffix0 = retaddy;
721 do_reloc = 1;
722 }
723 }
724
725 /* Apply any relocations that are required */ 631 /* Apply any relocations that are required */
726 if (do_reloc) { 632 if (do_reloc) {
727 void *pbvirt; 633 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
728 bool is_iomem;
729 ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages,
730 &pbbo->kmap);
731 if (ret) { 634 if (ret) {
732 NV_ERROR(dev, "kmap pb: %d\n", ret); 635 NV_ERROR(dev, "reloc apply: %d\n", ret);
733 goto out; 636 goto out;
734 } 637 }
638 }
735 639
736 pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem); 640 if (chan->dma.ib_max) {
737 ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo, 641 ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
738 req->nr_relocs,
739 req->relocs,
740 req->nr_dwords,
741 req->offset / 4,
742 pbvirt, is_iomem);
743
744 if (!PUSHBUF_CAL) {
745 nouveau_bo_wr32(pbbo,
746 req->offset / 4 + req->nr_dwords - 2,
747 req->suffix0);
748 }
749
750 ttm_bo_kunmap(&pbbo->kmap);
751 if (ret) { 642 if (ret) {
752 NV_ERROR(dev, "reloc apply: %d\n", ret); 643 NV_INFO(dev, "nv50cal_space: %d\n", ret);
753 goto out; 644 goto out;
754 } 645 }
755 }
756 646
757 if (PUSHBUF_CAL) { 647 for (i = 0; i < req->nr_push; i++) {
758 ret = RING_SPACE(chan, 2); 648 struct nouveau_bo *nvbo = (void *)(unsigned long)
649 bo[push[i].bo_index].user_priv;
650
651 nv50_dma_push(chan, nvbo, push[i].offset,
652 push[i].length);
653 }
654 } else
655 if (dev_priv->card_type >= NV_20) {
656 ret = RING_SPACE(chan, req->nr_push * 2);
759 if (ret) { 657 if (ret) {
760 NV_ERROR(dev, "cal_space: %d\n", ret); 658 NV_ERROR(dev, "cal_space: %d\n", ret);
761 goto out; 659 goto out;
762 } 660 }
763 OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) + 661
764 req->offset) | 2); 662 for (i = 0; i < req->nr_push; i++) {
765 OUT_RING(chan, 0); 663 struct nouveau_bo *nvbo = (void *)(unsigned long)
664 bo[push[i].bo_index].user_priv;
665 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
666
667 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
668 push[i].offset) | 2);
669 OUT_RING(chan, 0);
670 }
766 } else { 671 } else {
767 ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS); 672 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
768 if (ret) { 673 if (ret) {
769 NV_ERROR(dev, "jmp_space: %d\n", ret); 674 NV_ERROR(dev, "jmp_space: %d\n", ret);
770 goto out; 675 goto out;
771 } 676 }
772 OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
773 req->offset) | 0x20000000);
774 OUT_RING(chan, 0);
775 677
776 /* Space the jumps apart with NOPs. */ 678 for (i = 0; i < req->nr_push; i++) {
777 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) 679 struct nouveau_bo *nvbo = (void *)(unsigned long)
680 bo[push[i].bo_index].user_priv;
681 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
682 uint32_t cmd;
683
684 cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
685 cmd |= 0x20000000;
686 if (unlikely(cmd != req->suffix0)) {
687 if (!nvbo->kmap.virtual) {
688 ret = ttm_bo_kmap(&nvbo->bo, 0,
689 nvbo->bo.mem.
690 num_pages,
691 &nvbo->kmap);
692 if (ret) {
693 WIND_RING(chan);
694 goto out;
695 }
696 nvbo->validate_mapped = true;
697 }
698
699 nouveau_bo_wr32(nvbo, (push[i].offset +
700 push[i].length - 8) / 4, cmd);
701 }
702
703 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
704 push[i].offset) | 0x20000000);
778 OUT_RING(chan, 0); 705 OUT_RING(chan, 0);
706 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
707 OUT_RING(chan, 0);
708 }
779 } 709 }
780 710
781 ret = nouveau_fence_new(chan, &fence, true); 711 ret = nouveau_fence_new(chan, &fence, true);
@@ -790,9 +720,14 @@ out:
790 nouveau_fence_unref((void**)&fence); 720 nouveau_fence_unref((void**)&fence);
791 mutex_unlock(&dev->struct_mutex); 721 mutex_unlock(&dev->struct_mutex);
792 kfree(bo); 722 kfree(bo);
723 kfree(push);
793 724
794out_next: 725out_next:
795 if (PUSHBUF_CAL) { 726 if (chan->dma.ib_max) {
727 req->suffix0 = 0x00000000;
728 req->suffix1 = 0x00000000;
729 } else
730 if (dev_priv->card_type >= NV_20) {
796 req->suffix0 = 0x00020000; 731 req->suffix0 = 0x00020000;
797 req->suffix1 = 0x00000000; 732 req->suffix1 = 0x00000000;
798 } else { 733 } else {
@@ -804,19 +739,6 @@ out_next:
804 return ret; 739 return ret;
805} 740}
806 741
807int
808nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data,
809 struct drm_file *file_priv)
810{
811 struct drm_nouveau_private *dev_priv = dev->dev_private;
812 struct drm_nouveau_gem_pushbuf_call *req = data;
813
814 req->vram_available = dev_priv->fb_aper_free;
815 req->gart_available = dev_priv->gart_info.aper_free;
816
817 return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv);
818}
819
820static inline uint32_t 742static inline uint32_t
821domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain) 743domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
822{ 744{
@@ -831,74 +753,6 @@ domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
831} 753}
832 754
833int 755int
834nouveau_gem_ioctl_pin(struct drm_device *dev, void *data,
835 struct drm_file *file_priv)
836{
837 struct drm_nouveau_gem_pin *req = data;
838 struct drm_gem_object *gem;
839 struct nouveau_bo *nvbo;
840 int ret = 0;
841
842 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
843
844 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
845 NV_ERROR(dev, "pin only allowed without kernel modesetting\n");
846 return -EINVAL;
847 }
848
849 if (!DRM_SUSER(DRM_CURPROC))
850 return -EPERM;
851
852 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
853 if (!gem)
854 return -EINVAL;
855 nvbo = nouveau_gem_object(gem);
856
857 ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain));
858 if (ret)
859 goto out;
860
861 req->offset = nvbo->bo.offset;
862 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
863 req->domain = NOUVEAU_GEM_DOMAIN_GART;
864 else
865 req->domain = NOUVEAU_GEM_DOMAIN_VRAM;
866
867out:
868 mutex_lock(&dev->struct_mutex);
869 drm_gem_object_unreference(gem);
870 mutex_unlock(&dev->struct_mutex);
871
872 return ret;
873}
874
875int
876nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data,
877 struct drm_file *file_priv)
878{
879 struct drm_nouveau_gem_pin *req = data;
880 struct drm_gem_object *gem;
881 int ret;
882
883 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
884
885 if (drm_core_check_feature(dev, DRIVER_MODESET))
886 return -EINVAL;
887
888 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
889 if (!gem)
890 return -EINVAL;
891
892 ret = nouveau_bo_unpin(nouveau_gem_object(gem));
893
894 mutex_lock(&dev->struct_mutex);
895 drm_gem_object_unreference(gem);
896 mutex_unlock(&dev->struct_mutex);
897
898 return ret;
899}
900
901int
902nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, 756nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
903 struct drm_file *file_priv) 757 struct drm_file *file_priv)
904{ 758{
@@ -935,9 +789,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
935 } 789 }
936 790
937out: 791out:
938 mutex_lock(&dev->struct_mutex); 792 drm_gem_object_unreference_unlocked(gem);
939 drm_gem_object_unreference(gem);
940 mutex_unlock(&dev->struct_mutex);
941 return ret; 793 return ret;
942} 794}
943 795
@@ -965,9 +817,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
965 ret = 0; 817 ret = 0;
966 818
967out: 819out:
968 mutex_lock(&dev->struct_mutex); 820 drm_gem_object_unreference_unlocked(gem);
969 drm_gem_object_unreference(gem);
970 mutex_unlock(&dev->struct_mutex);
971 return ret; 821 return ret;
972} 822}
973 823
@@ -986,9 +836,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
986 return -EINVAL; 836 return -EINVAL;
987 837
988 ret = nouveau_gem_info(gem, req); 838 ret = nouveau_gem_info(gem, req);
989 mutex_lock(&dev->struct_mutex); 839 drm_gem_object_unreference_unlocked(gem);
990 drm_gem_object_unreference(gem);
991 mutex_unlock(&dev->struct_mutex);
992 return ret; 840 return ret;
993} 841}
994 842
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index dc46792a5c96..7855b35effc3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -160,7 +160,7 @@ static void
160setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv) 160setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv)
161{ 161{
162 struct drm_nouveau_private *dev_priv = dev->dev_private; 162 struct drm_nouveau_private *dev_priv = dev->dev_private;
163 int chip_version = dev_priv->vbios->chip_version; 163 int chip_version = dev_priv->vbios.chip_version;
164 uint32_t oldpll = NVReadRAMDAC(dev, 0, reg); 164 uint32_t oldpll = NVReadRAMDAC(dev, 0, reg);
165 int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff; 165 int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
166 uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1; 166 uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
@@ -216,7 +216,7 @@ setPLL_double_highregs(struct drm_device *dev, uint32_t reg1,
216 struct nouveau_pll_vals *pv) 216 struct nouveau_pll_vals *pv)
217{ 217{
218 struct drm_nouveau_private *dev_priv = dev->dev_private; 218 struct drm_nouveau_private *dev_priv = dev->dev_private;
219 int chip_version = dev_priv->vbios->chip_version; 219 int chip_version = dev_priv->vbios.chip_version;
220 bool nv3035 = chip_version == 0x30 || chip_version == 0x35; 220 bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
221 uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70); 221 uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70);
222 uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1); 222 uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1);
@@ -374,7 +374,7 @@ nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1,
374 struct nouveau_pll_vals *pv) 374 struct nouveau_pll_vals *pv)
375{ 375{
376 struct drm_nouveau_private *dev_priv = dev->dev_private; 376 struct drm_nouveau_private *dev_priv = dev->dev_private;
377 int cv = dev_priv->vbios->chip_version; 377 int cv = dev_priv->vbios.chip_version;
378 378
379 if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 || 379 if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
380 cv >= 0x40) { 380 cv >= 0x40) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 70e994d28122..88583e7bf651 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -254,16 +254,16 @@ struct nouveau_i2c_chan *
254nouveau_i2c_find(struct drm_device *dev, int index) 254nouveau_i2c_find(struct drm_device *dev, int index)
255{ 255{
256 struct drm_nouveau_private *dev_priv = dev->dev_private; 256 struct drm_nouveau_private *dev_priv = dev->dev_private;
257 struct nvbios *bios = &dev_priv->VBIOS; 257 struct nvbios *bios = &dev_priv->vbios;
258 258
259 if (index > DCB_MAX_NUM_I2C_ENTRIES) 259 if (index >= DCB_MAX_NUM_I2C_ENTRIES)
260 return NULL; 260 return NULL;
261 261
262 if (!bios->bdcb.dcb.i2c[index].chan) { 262 if (!bios->dcb.i2c[index].chan) {
263 if (nouveau_i2c_init(dev, &bios->bdcb.dcb.i2c[index], index)) 263 if (nouveau_i2c_init(dev, &bios->dcb.i2c[index], index))
264 return NULL; 264 return NULL;
265 } 265 }
266 266
267 return bios->bdcb.dcb.i2c[index].chan; 267 return bios->dcb.i2c[index].chan;
268} 268}
269 269
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 447f9f69d6b1..95220ddebb45 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -691,11 +691,14 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
691 struct drm_device *dev = (struct drm_device *)arg; 691 struct drm_device *dev = (struct drm_device *)arg;
692 struct drm_nouveau_private *dev_priv = dev->dev_private; 692 struct drm_nouveau_private *dev_priv = dev->dev_private;
693 uint32_t status, fbdev_flags = 0; 693 uint32_t status, fbdev_flags = 0;
694 unsigned long flags;
694 695
695 status = nv_rd32(dev, NV03_PMC_INTR_0); 696 status = nv_rd32(dev, NV03_PMC_INTR_0);
696 if (!status) 697 if (!status)
697 return IRQ_NONE; 698 return IRQ_NONE;
698 699
700 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
701
699 if (dev_priv->fbdev_info) { 702 if (dev_priv->fbdev_info) {
700 fbdev_flags = dev_priv->fbdev_info->flags; 703 fbdev_flags = dev_priv->fbdev_info->flags;
701 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; 704 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
@@ -733,5 +736,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
733 if (dev_priv->fbdev_info) 736 if (dev_priv->fbdev_info)
734 dev_priv->fbdev_info->flags = fbdev_flags; 737 dev_priv->fbdev_info->flags = fbdev_flags;
735 738
739 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
740
736 return IRQ_HANDLED; 741 return IRQ_HANDLED;
737} 742}
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index d99dc087f9b1..9537f3e30115 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -61,11 +61,8 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
61 61
62 chan->notifier_bo = ntfy; 62 chan->notifier_bo = ntfy;
63out_err: 63out_err:
64 if (ret) { 64 if (ret)
65 mutex_lock(&dev->struct_mutex); 65 drm_gem_object_unreference_unlocked(ntfy->gem);
66 drm_gem_object_unreference(ntfy->gem);
67 mutex_unlock(&dev->struct_mutex);
68 }
69 66
70 return ret; 67 return ret;
71} 68}
@@ -81,8 +78,8 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
81 nouveau_bo_unmap(chan->notifier_bo); 78 nouveau_bo_unmap(chan->notifier_bo);
82 mutex_lock(&dev->struct_mutex); 79 mutex_lock(&dev->struct_mutex);
83 nouveau_bo_unpin(chan->notifier_bo); 80 nouveau_bo_unpin(chan->notifier_bo);
84 drm_gem_object_unreference(chan->notifier_bo->gem);
85 mutex_unlock(&dev->struct_mutex); 81 mutex_unlock(&dev->struct_mutex);
82 drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
86 nouveau_mem_takedown(&chan->notifier_heap); 83 nouveau_mem_takedown(&chan->notifier_heap);
87} 84}
88 85
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index a4851af5b05e..eb8f084d5f53 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -29,6 +29,7 @@
29#include "drm_sarea.h" 29#include "drm_sarea.h"
30#include "drm_crtc_helper.h" 30#include "drm_crtc_helper.h"
31#include <linux/vgaarb.h> 31#include <linux/vgaarb.h>
32#include <linux/vga_switcheroo.h>
32 33
33#include "nouveau_drv.h" 34#include "nouveau_drv.h"
34#include "nouveau_drm.h" 35#include "nouveau_drm.h"
@@ -371,6 +372,30 @@ out_err:
371 return ret; 372 return ret;
372} 373}
373 374
375static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
376 enum vga_switcheroo_state state)
377{
378 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
379 if (state == VGA_SWITCHEROO_ON) {
380 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
381 nouveau_pci_resume(pdev);
382 } else {
383 printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
384 nouveau_pci_suspend(pdev, pmm);
385 }
386}
387
388static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
389{
390 struct drm_device *dev = pci_get_drvdata(pdev);
391 bool can_switch;
392
393 spin_lock(&dev->count_lock);
394 can_switch = (dev->open_count == 0);
395 spin_unlock(&dev->count_lock);
396 return can_switch;
397}
398
374int 399int
375nouveau_card_init(struct drm_device *dev) 400nouveau_card_init(struct drm_device *dev)
376{ 401{
@@ -384,6 +409,8 @@ nouveau_card_init(struct drm_device *dev)
384 return 0; 409 return 0;
385 410
386 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); 411 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
412 vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
413 nouveau_switcheroo_can_switch);
387 414
388 /* Initialise internal driver API hooks */ 415 /* Initialise internal driver API hooks */
389 ret = nouveau_init_engine_ptrs(dev); 416 ret = nouveau_init_engine_ptrs(dev);
@@ -391,6 +418,7 @@ nouveau_card_init(struct drm_device *dev)
391 goto out; 418 goto out;
392 engine = &dev_priv->engine; 419 engine = &dev_priv->engine;
393 dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; 420 dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
421 spin_lock_init(&dev_priv->context_switch_lock);
394 422
395 /* Parse BIOS tables / Run init tables if card not POSTed */ 423 /* Parse BIOS tables / Run init tables if card not POSTed */
396 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 424 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -617,11 +645,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
617 NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n", 645 NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
618 dev->pci_vendor, dev->pci_device, dev->pdev->class); 646 dev->pci_vendor, dev->pci_device, dev->pdev->class);
619 647
620 dev_priv->acpi_dsm = nouveau_dsm_probe(dev);
621
622 if (dev_priv->acpi_dsm)
623 nouveau_hybrid_setup(dev);
624
625 dev_priv->wq = create_workqueue("nouveau"); 648 dev_priv->wq = create_workqueue("nouveau");
626 if (!dev_priv->wq) 649 if (!dev_priv->wq)
627 return -EINVAL; 650 return -EINVAL;
@@ -776,13 +799,6 @@ int nouveau_unload(struct drm_device *dev)
776 return 0; 799 return 0;
777} 800}
778 801
779int
780nouveau_ioctl_card_init(struct drm_device *dev, void *data,
781 struct drm_file *file_priv)
782{
783 return nouveau_card_init(dev);
784}
785
786int nouveau_ioctl_getparam(struct drm_device *dev, void *data, 802int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
787 struct drm_file *file_priv) 803 struct drm_file *file_priv)
788{ 804{
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index d2f143ed97c1..a1d1ebb073d9 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -926,9 +926,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
926 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset); 926 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
927 nv_crtc->cursor.show(nv_crtc, true); 927 nv_crtc->cursor.show(nv_crtc, true);
928out: 928out:
929 mutex_lock(&dev->struct_mutex); 929 drm_gem_object_unreference_unlocked(gem);
930 drm_gem_object_unreference(gem);
931 mutex_unlock(&dev->struct_mutex);
932 return ret; 930 return ret;
933} 931}
934 932
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index 1d73b15d70da..1cb19e3acb55 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -230,13 +230,13 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
230 if (dcb->type == OUTPUT_TV) { 230 if (dcb->type == OUTPUT_TV) {
231 testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0); 231 testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0);
232 232
233 if (dev_priv->vbios->tvdactestval) 233 if (dev_priv->vbios.tvdactestval)
234 testval = dev_priv->vbios->tvdactestval; 234 testval = dev_priv->vbios.tvdactestval;
235 } else { 235 } else {
236 testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */ 236 testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */
237 237
238 if (dev_priv->vbios->dactestval) 238 if (dev_priv->vbios.dactestval)
239 testval = dev_priv->vbios->dactestval; 239 testval = dev_priv->vbios.dactestval;
240 } 240 }
241 241
242 saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); 242 saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 483f875bdb6a..41634d4752fe 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -269,10 +269,10 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
269 regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1; 269 regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
270 if (!nv_gf4_disp_arch(dev) || 270 if (!nv_gf4_disp_arch(dev) ||
271 (output_mode->hsync_start - output_mode->hdisplay) >= 271 (output_mode->hsync_start - output_mode->hdisplay) >=
272 dev_priv->vbios->digital_min_front_porch) 272 dev_priv->vbios.digital_min_front_porch)
273 regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay; 273 regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay;
274 else 274 else
275 regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios->digital_min_front_porch - 1; 275 regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios.digital_min_front_porch - 1;
276 regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1; 276 regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1;
277 regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1; 277 regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
278 regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew; 278 regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew;
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index ef77215fa5b9..c7898b4f6dfb 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -93,10 +93,9 @@ int
93nv04_display_create(struct drm_device *dev) 93nv04_display_create(struct drm_device *dev)
94{ 94{
95 struct drm_nouveau_private *dev_priv = dev->dev_private; 95 struct drm_nouveau_private *dev_priv = dev->dev_private;
96 struct parsed_dcb *dcb = dev_priv->vbios->dcb; 96 struct dcb_table *dcb = &dev_priv->vbios.dcb;
97 struct drm_encoder *encoder; 97 struct drm_encoder *encoder;
98 struct drm_crtc *crtc; 98 struct drm_crtc *crtc;
99 uint16_t connector[16] = { 0 };
100 int i, ret; 99 int i, ret;
101 100
102 NV_DEBUG_KMS(dev, "\n"); 101 NV_DEBUG_KMS(dev, "\n");
@@ -154,52 +153,10 @@ nv04_display_create(struct drm_device *dev)
154 153
155 if (ret) 154 if (ret)
156 continue; 155 continue;
157
158 connector[dcbent->connector] |= (1 << dcbent->type);
159 } 156 }
160 157
161 for (i = 0; i < dcb->entries; i++) { 158 for (i = 0; i < dcb->connector.entries; i++)
162 struct dcb_entry *dcbent = &dcb->entry[i]; 159 nouveau_connector_create(dev, &dcb->connector.entry[i]);
163 uint16_t encoders;
164 int type;
165
166 encoders = connector[dcbent->connector];
167 if (!(encoders & (1 << dcbent->type)))
168 continue;
169 connector[dcbent->connector] = 0;
170
171 switch (dcbent->type) {
172 case OUTPUT_ANALOG:
173 if (!MULTIPLE_ENCODERS(encoders))
174 type = DRM_MODE_CONNECTOR_VGA;
175 else
176 type = DRM_MODE_CONNECTOR_DVII;
177 break;
178 case OUTPUT_TMDS:
179 if (!MULTIPLE_ENCODERS(encoders))
180 type = DRM_MODE_CONNECTOR_DVID;
181 else
182 type = DRM_MODE_CONNECTOR_DVII;
183 break;
184 case OUTPUT_LVDS:
185 type = DRM_MODE_CONNECTOR_LVDS;
186#if 0
187 /* don't create i2c adapter when lvds ddc not allowed */
188 if (dcbent->lvdsconf.use_straps_for_mode ||
189 dev_priv->vbios->fp_no_ddc)
190 i2c_index = 0xf;
191#endif
192 break;
193 case OUTPUT_TV:
194 type = DRM_MODE_CONNECTOR_TV;
195 break;
196 default:
197 type = DRM_MODE_CONNECTOR_Unknown;
198 continue;
199 }
200
201 nouveau_connector_create(dev, dcbent->connector, type);
202 }
203 160
204 /* Save previous state */ 161 /* Save previous state */
205 NVLockVgaCrtcs(dev, false); 162 NVLockVgaCrtcs(dev, false);
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index fd01caabd5c3..3da90c2c4e63 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -118,7 +118,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
118 return; 118 return;
119 } 119 }
120 120
121 width = (image->width + 31) & ~31; 121 width = ALIGN(image->width, 32);
122 dsize = (width * image->height) >> 5; 122 dsize = (width * image->height) >> 5;
123 123
124 if (info->fix.visual == FB_VISUAL_TRUECOLOR || 124 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index f31347b8c9b0..66fe55983b6e 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -117,6 +117,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
117{ 117{
118 struct drm_device *dev = chan->dev; 118 struct drm_device *dev = chan->dev;
119 struct drm_nouveau_private *dev_priv = dev->dev_private; 119 struct drm_nouveau_private *dev_priv = dev->dev_private;
120 unsigned long flags;
120 int ret; 121 int ret;
121 122
122 ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0, 123 ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
@@ -127,6 +128,8 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
127 if (ret) 128 if (ret)
128 return ret; 129 return ret;
129 130
131 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
132
130 /* Setup initial state */ 133 /* Setup initial state */
131 dev_priv->engine.instmem.prepare_access(dev, true); 134 dev_priv->engine.instmem.prepare_access(dev, true);
132 RAMFC_WR(DMA_PUT, chan->pushbuf_base); 135 RAMFC_WR(DMA_PUT, chan->pushbuf_base);
@@ -144,6 +147,8 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
144 /* enable the fifo dma operation */ 147 /* enable the fifo dma operation */
145 nv_wr32(dev, NV04_PFIFO_MODE, 148 nv_wr32(dev, NV04_PFIFO_MODE,
146 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); 149 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
150
151 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
147 return 0; 152 return 0;
148} 153}
149 154
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
index 9c63099e9c42..c4e3404337d4 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -262,7 +262,7 @@ int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry)
262 nv_encoder->or = ffs(entry->or) - 1; 262 nv_encoder->or = ffs(entry->or) - 1;
263 263
264 /* Run the slave-specific initialization */ 264 /* Run the slave-specific initialization */
265 adap = &dev_priv->vbios->dcb->i2c[i2c_index].chan->adapter; 265 adap = &dev_priv->vbios.dcb.i2c[i2c_index].chan->adapter;
266 266
267 was_locked = NVLockVgaCrtcs(dev, false); 267 was_locked = NVLockVgaCrtcs(dev, false);
268 268
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 21ac6e49b6ee..74c880374fb9 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -45,8 +45,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
45 45
46#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) 46#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
47 testval = RGB_TEST_DATA(0x82, 0xeb, 0x82); 47 testval = RGB_TEST_DATA(0x82, 0xeb, 0x82);
48 if (dev_priv->vbios->tvdactestval) 48 if (dev_priv->vbios.tvdactestval)
49 testval = dev_priv->vbios->tvdactestval; 49 testval = dev_priv->vbios.tvdactestval;
50 50
51 dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset); 51 dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
52 head = (dacclk & 0x100) >> 8; 52 head = (dacclk & 0x100) >> 8;
@@ -367,7 +367,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
367 !enc->crtc && 367 !enc->crtc &&
368 nv04_dfp_get_bound_head(dev, dcb) == head) { 368 nv04_dfp_get_bound_head(dev, dcb) == head) {
369 nv04_dfp_bind_head(dev, dcb, head ^ 1, 369 nv04_dfp_bind_head(dev, dcb, head ^ 1,
370 dev_priv->VBIOS.fp.dual_link); 370 dev_priv->vbios.fp.dual_link);
371 } 371 }
372 } 372 }
373 373
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index b4f19ccb8b41..6b2ef4a9fce1 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -37,6 +37,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
37 struct drm_device *dev = chan->dev; 37 struct drm_device *dev = chan->dev;
38 struct drm_nouveau_private *dev_priv = dev->dev_private; 38 struct drm_nouveau_private *dev_priv = dev->dev_private;
39 uint32_t fc = NV40_RAMFC(chan->id); 39 uint32_t fc = NV40_RAMFC(chan->id);
40 unsigned long flags;
40 int ret; 41 int ret;
41 42
42 ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, 43 ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
@@ -45,6 +46,8 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
45 if (ret) 46 if (ret)
46 return ret; 47 return ret;
47 48
49 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
50
48 dev_priv->engine.instmem.prepare_access(dev, true); 51 dev_priv->engine.instmem.prepare_access(dev, true);
49 nv_wi32(dev, fc + 0, chan->pushbuf_base); 52 nv_wi32(dev, fc + 0, chan->pushbuf_base);
50 nv_wi32(dev, fc + 4, chan->pushbuf_base); 53 nv_wi32(dev, fc + 4, chan->pushbuf_base);
@@ -63,6 +66,8 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
63 /* enable the fifo dma operation */ 66 /* enable the fifo dma operation */
64 nv_wr32(dev, NV04_PFIFO_MODE, 67 nv_wr32(dev, NV04_PFIFO_MODE,
65 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); 68 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
69
70 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
66 return 0; 71 return 0;
67} 72}
68 73
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index d1a651e3400c..cfabeb974a56 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -358,9 +358,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
358 nv_crtc->cursor.show(nv_crtc, true); 358 nv_crtc->cursor.show(nv_crtc, true);
359 359
360out: 360out:
361 mutex_lock(&dev->struct_mutex); 361 drm_gem_object_unreference_unlocked(gem);
362 drm_gem_object_unreference(gem);
363 mutex_unlock(&dev->struct_mutex);
364 return ret; 362 return ret;
365} 363}
366 364
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index f08f042a8e10..1fd9537beff6 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -79,8 +79,8 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
79 } 79 }
80 80
81 /* Use bios provided value if possible. */ 81 /* Use bios provided value if possible. */
82 if (dev_priv->vbios->dactestval) { 82 if (dev_priv->vbios.dactestval) {
83 load_pattern = dev_priv->vbios->dactestval; 83 load_pattern = dev_priv->vbios.dactestval;
84 NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n", 84 NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n",
85 load_pattern); 85 load_pattern);
86 } else { 86 } else {
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 90f0bf59fbcd..61a89f2dc553 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -370,9 +370,7 @@ nv50_display_init(struct drm_device *dev)
370 struct nouveau_connector *conn = nouveau_connector(connector); 370 struct nouveau_connector *conn = nouveau_connector(connector);
371 struct dcb_gpio_entry *gpio; 371 struct dcb_gpio_entry *gpio;
372 372
373 if (connector->connector_type != DRM_MODE_CONNECTOR_DVII && 373 if (conn->dcb->gpio_tag == 0xff)
374 connector->connector_type != DRM_MODE_CONNECTOR_DVID &&
375 connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
376 continue; 374 continue;
377 375
378 gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag); 376 gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag);
@@ -465,8 +463,7 @@ static int nv50_display_disable(struct drm_device *dev)
465int nv50_display_create(struct drm_device *dev) 463int nv50_display_create(struct drm_device *dev)
466{ 464{
467 struct drm_nouveau_private *dev_priv = dev->dev_private; 465 struct drm_nouveau_private *dev_priv = dev->dev_private;
468 struct parsed_dcb *dcb = dev_priv->vbios->dcb; 466 struct dcb_table *dcb = &dev_priv->vbios.dcb;
469 uint32_t connector[16] = {};
470 int ret, i; 467 int ret, i;
471 468
472 NV_DEBUG_KMS(dev, "\n"); 469 NV_DEBUG_KMS(dev, "\n");
@@ -522,44 +519,13 @@ int nv50_display_create(struct drm_device *dev)
522 NV_WARN(dev, "DCB encoder %d unknown\n", entry->type); 519 NV_WARN(dev, "DCB encoder %d unknown\n", entry->type);
523 continue; 520 continue;
524 } 521 }
525
526 connector[entry->connector] |= (1 << entry->type);
527 } 522 }
528 523
529 /* It appears that DCB 3.0+ VBIOS has a connector table, however, 524 for (i = 0 ; i < dcb->connector.entries; i++) {
530 * I'm not 100% certain how to decode it correctly yet so just 525 if (i != 0 && dcb->connector.entry[i].index ==
531 * look at what encoders are present on each connector index and 526 dcb->connector.entry[i - 1].index)
532 * attempt to derive the connector type from that.
533 */
534 for (i = 0 ; i < dcb->entries; i++) {
535 struct dcb_entry *entry = &dcb->entry[i];
536 uint16_t encoders;
537 int type;
538
539 encoders = connector[entry->connector];
540 if (!(encoders & (1 << entry->type)))
541 continue; 527 continue;
542 connector[entry->connector] = 0; 528 nouveau_connector_create(dev, &dcb->connector.entry[i]);
543
544 if (encoders & (1 << OUTPUT_DP)) {
545 type = DRM_MODE_CONNECTOR_DisplayPort;
546 } else if (encoders & (1 << OUTPUT_TMDS)) {
547 if (encoders & (1 << OUTPUT_ANALOG))
548 type = DRM_MODE_CONNECTOR_DVII;
549 else
550 type = DRM_MODE_CONNECTOR_DVID;
551 } else if (encoders & (1 << OUTPUT_ANALOG)) {
552 type = DRM_MODE_CONNECTOR_VGA;
553 } else if (encoders & (1 << OUTPUT_LVDS)) {
554 type = DRM_MODE_CONNECTOR_LVDS;
555 } else {
556 type = DRM_MODE_CONNECTOR_Unknown;
557 }
558
559 if (type == DRM_MODE_CONNECTOR_Unknown)
560 continue;
561
562 nouveau_connector_create(dev, entry->connector, type);
563 } 529 }
564 530
565 ret = nv50_display_init(dev); 531 ret = nv50_display_init(dev);
@@ -667,8 +633,8 @@ nv50_display_irq_head(struct drm_device *dev, int *phead,
667 return -1; 633 return -1;
668 } 634 }
669 635
670 for (i = 0; i < dev_priv->vbios->dcb->entries; i++) { 636 for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
671 struct dcb_entry *dcbent = &dev_priv->vbios->dcb->entry[i]; 637 struct dcb_entry *dcbent = &dev_priv->vbios.dcb.entry[i];
672 638
673 if (dcbent->type != type) 639 if (dcbent->type != type)
674 continue; 640 continue;
@@ -692,7 +658,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
692 struct drm_nouveau_private *dev_priv = dev->dev_private; 658 struct drm_nouveau_private *dev_priv = dev->dev_private;
693 struct nouveau_connector *nv_connector = NULL; 659 struct nouveau_connector *nv_connector = NULL;
694 struct drm_encoder *encoder; 660 struct drm_encoder *encoder;
695 struct nvbios *bios = &dev_priv->VBIOS; 661 struct nvbios *bios = &dev_priv->vbios;
696 uint32_t mc, script = 0, or; 662 uint32_t mc, script = 0, or;
697 663
698 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 664 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -710,7 +676,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
710 switch (dcbent->type) { 676 switch (dcbent->type) {
711 case OUTPUT_LVDS: 677 case OUTPUT_LVDS:
712 script = (mc >> 8) & 0xf; 678 script = (mc >> 8) & 0xf;
713 if (bios->pub.fp_no_ddc) { 679 if (bios->fp_no_ddc) {
714 if (bios->fp.dual_link) 680 if (bios->fp.dual_link)
715 script |= 0x0100; 681 script |= 0x0100;
716 if (bios->fp.if_is_24bit) 682 if (bios->fp.if_is_24bit)
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 0f57cdf7ccb2..993c7126fbde 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -109,7 +109,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
109 return; 109 return;
110 } 110 }
111 111
112 width = (image->width + 31) & ~31; 112 width = ALIGN(image->width, 32);
113 dwords = (width * image->height) >> 5; 113 dwords = (width * image->height) >> 5;
114 114
115 BEGIN_RING(chan, NvSub2D, 0x0814, 2); 115 BEGIN_RING(chan, NvSub2D, 0x0814, 2);
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 204a79ff10f4..e20c0e2474f3 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -243,6 +243,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
243 struct drm_device *dev = chan->dev; 243 struct drm_device *dev = chan->dev;
244 struct drm_nouveau_private *dev_priv = dev->dev_private; 244 struct drm_nouveau_private *dev_priv = dev->dev_private;
245 struct nouveau_gpuobj *ramfc = NULL; 245 struct nouveau_gpuobj *ramfc = NULL;
246 unsigned long flags;
246 int ret; 247 int ret;
247 248
248 NV_DEBUG(dev, "ch%d\n", chan->id); 249 NV_DEBUG(dev, "ch%d\n", chan->id);
@@ -278,19 +279,21 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
278 return ret; 279 return ret;
279 } 280 }
280 281
282 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
283
281 dev_priv->engine.instmem.prepare_access(dev, true); 284 dev_priv->engine.instmem.prepare_access(dev, true);
282 285
283 nv_wo32(dev, ramfc, 0x08/4, chan->pushbuf_base);
284 nv_wo32(dev, ramfc, 0x10/4, chan->pushbuf_base);
285 nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4); 286 nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
286 nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4)); 287 nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
287 nv_wo32(dev, ramfc, 0x3c/4, 0x00086078);
288 nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff); 288 nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
289 nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff); 289 nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
290 nv_wo32(dev, ramfc, 0x40/4, 0x00000000); 290 nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
291 nv_wo32(dev, ramfc, 0x7c/4, 0x30000001); 291 nv_wo32(dev, ramfc, 0x7c/4, 0x30000001);
292 nv_wo32(dev, ramfc, 0x78/4, 0x00000000); 292 nv_wo32(dev, ramfc, 0x78/4, 0x00000000);
293 nv_wo32(dev, ramfc, 0x4c/4, 0xffffffff); 293 nv_wo32(dev, ramfc, 0x3c/4, 0x403f6078);
294 nv_wo32(dev, ramfc, 0x50/4, chan->pushbuf_base +
295 chan->dma.ib_base * 4);
296 nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16);
294 297
295 if (!IS_G80) { 298 if (!IS_G80) {
296 nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id); 299 nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
@@ -306,10 +309,12 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
306 ret = nv50_fifo_channel_enable(dev, chan->id, false); 309 ret = nv50_fifo_channel_enable(dev, chan->id, false);
307 if (ret) { 310 if (ret) {
308 NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret); 311 NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret);
312 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
309 nouveau_gpuobj_ref_del(dev, &chan->ramfc); 313 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
310 return ret; 314 return ret;
311 } 315 }
312 316
317 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
313 return 0; 318 return 0;
314} 319}
315 320
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 6d504801b514..857a09671a39 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -28,30 +28,7 @@
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30 30
31MODULE_FIRMWARE("nouveau/nv50.ctxprog"); 31#include "nouveau_grctx.h"
32MODULE_FIRMWARE("nouveau/nv50.ctxvals");
33MODULE_FIRMWARE("nouveau/nv84.ctxprog");
34MODULE_FIRMWARE("nouveau/nv84.ctxvals");
35MODULE_FIRMWARE("nouveau/nv86.ctxprog");
36MODULE_FIRMWARE("nouveau/nv86.ctxvals");
37MODULE_FIRMWARE("nouveau/nv92.ctxprog");
38MODULE_FIRMWARE("nouveau/nv92.ctxvals");
39MODULE_FIRMWARE("nouveau/nv94.ctxprog");
40MODULE_FIRMWARE("nouveau/nv94.ctxvals");
41MODULE_FIRMWARE("nouveau/nv96.ctxprog");
42MODULE_FIRMWARE("nouveau/nv96.ctxvals");
43MODULE_FIRMWARE("nouveau/nv98.ctxprog");
44MODULE_FIRMWARE("nouveau/nv98.ctxvals");
45MODULE_FIRMWARE("nouveau/nva0.ctxprog");
46MODULE_FIRMWARE("nouveau/nva0.ctxvals");
47MODULE_FIRMWARE("nouveau/nva5.ctxprog");
48MODULE_FIRMWARE("nouveau/nva5.ctxvals");
49MODULE_FIRMWARE("nouveau/nva8.ctxprog");
50MODULE_FIRMWARE("nouveau/nva8.ctxvals");
51MODULE_FIRMWARE("nouveau/nvaa.ctxprog");
52MODULE_FIRMWARE("nouveau/nvaa.ctxvals");
53MODULE_FIRMWARE("nouveau/nvac.ctxprog");
54MODULE_FIRMWARE("nouveau/nvac.ctxvals");
55 32
56#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) 33#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
57 34
@@ -111,9 +88,34 @@ nv50_graph_init_ctxctl(struct drm_device *dev)
111 88
112 NV_DEBUG(dev, "\n"); 89 NV_DEBUG(dev, "\n");
113 90
114 nouveau_grctx_prog_load(dev); 91 if (nouveau_ctxfw) {
115 if (!dev_priv->engine.graph.ctxprog) 92 nouveau_grctx_prog_load(dev);
116 dev_priv->engine.graph.accel_blocked = true; 93 dev_priv->engine.graph.grctx_size = 0x70000;
94 }
95 if (!dev_priv->engine.graph.ctxprog) {
96 struct nouveau_grctx ctx = {};
97 uint32_t *cp = kmalloc(512 * 4, GFP_KERNEL);
98 int i;
99 if (!cp) {
100 NV_ERROR(dev, "Couldn't alloc ctxprog! Disabling acceleration.\n");
101 dev_priv->engine.graph.accel_blocked = true;
102 return 0;
103 }
104 ctx.dev = dev;
105 ctx.mode = NOUVEAU_GRCTX_PROG;
106 ctx.data = cp;
107 ctx.ctxprog_max = 512;
108 if (!nv50_grctx_init(&ctx)) {
109 dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
110
111 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
112 for (i = 0; i < ctx.ctxprog_len; i++)
113 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
114 } else {
115 dev_priv->engine.graph.accel_blocked = true;
116 }
117 kfree(cp);
118 }
117 119
118 nv_wr32(dev, 0x400320, 4); 120 nv_wr32(dev, 0x400320, 4);
119 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0); 121 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
@@ -193,13 +195,13 @@ nv50_graph_create_context(struct nouveau_channel *chan)
193 struct drm_nouveau_private *dev_priv = dev->dev_private; 195 struct drm_nouveau_private *dev_priv = dev->dev_private;
194 struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; 196 struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
195 struct nouveau_gpuobj *ctx; 197 struct nouveau_gpuobj *ctx;
196 uint32_t grctx_size = 0x70000; 198 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
197 int hdr, ret; 199 int hdr, ret;
198 200
199 NV_DEBUG(dev, "ch%d\n", chan->id); 201 NV_DEBUG(dev, "ch%d\n", chan->id);
200 202
201 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000, 203 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
202 NVOBJ_FLAG_ZERO_ALLOC | 204 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
203 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); 205 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
204 if (ret) 206 if (ret)
205 return ret; 207 return ret;
@@ -209,7 +211,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
209 dev_priv->engine.instmem.prepare_access(dev, true); 211 dev_priv->engine.instmem.prepare_access(dev, true);
210 nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002); 212 nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
211 nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance + 213 nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
212 grctx_size - 1); 214 pgraph->grctx_size - 1);
213 nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance); 215 nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
214 nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0); 216 nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
215 nv_wo32(dev, ramin, (hdr + 0x10)/4, 0); 217 nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
@@ -217,7 +219,15 @@ nv50_graph_create_context(struct nouveau_channel *chan)
217 dev_priv->engine.instmem.finish_access(dev); 219 dev_priv->engine.instmem.finish_access(dev);
218 220
219 dev_priv->engine.instmem.prepare_access(dev, true); 221 dev_priv->engine.instmem.prepare_access(dev, true);
220 nouveau_grctx_vals_load(dev, ctx); 222 if (!pgraph->ctxprog) {
223 struct nouveau_grctx ctx = {};
224 ctx.dev = chan->dev;
225 ctx.mode = NOUVEAU_GRCTX_VALS;
226 ctx.data = chan->ramin_grctx->gpuobj;
227 nv50_grctx_init(&ctx);
228 } else {
229 nouveau_grctx_vals_load(dev, ctx);
230 }
221 nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12); 231 nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
222 if ((dev_priv->chipset & 0xf0) == 0xa0) 232 if ((dev_priv->chipset & 0xf0) == 0xa0)
223 nv_wo32(dev, ctx, 0x00004/4, 0x00000000); 233 nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
new file mode 100644
index 000000000000..d105fcd42ca0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -0,0 +1,2367 @@
1/*
2 * Copyright 2009 Marcin Kościelnicki
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#define CP_FLAG_CLEAR 0
24#define CP_FLAG_SET 1
25#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
26#define CP_FLAG_SWAP_DIRECTION_LOAD 0
27#define CP_FLAG_SWAP_DIRECTION_SAVE 1
28#define CP_FLAG_UNK01 ((0 * 32) + 1)
29#define CP_FLAG_UNK01_CLEAR 0
30#define CP_FLAG_UNK01_SET 1
31#define CP_FLAG_UNK03 ((0 * 32) + 3)
32#define CP_FLAG_UNK03_CLEAR 0
33#define CP_FLAG_UNK03_SET 1
34#define CP_FLAG_USER_SAVE ((0 * 32) + 5)
35#define CP_FLAG_USER_SAVE_NOT_PENDING 0
36#define CP_FLAG_USER_SAVE_PENDING 1
37#define CP_FLAG_USER_LOAD ((0 * 32) + 6)
38#define CP_FLAG_USER_LOAD_NOT_PENDING 0
39#define CP_FLAG_USER_LOAD_PENDING 1
40#define CP_FLAG_UNK0B ((0 * 32) + 0xb)
41#define CP_FLAG_UNK0B_CLEAR 0
42#define CP_FLAG_UNK0B_SET 1
43#define CP_FLAG_UNK1D ((0 * 32) + 0x1d)
44#define CP_FLAG_UNK1D_CLEAR 0
45#define CP_FLAG_UNK1D_SET 1
46#define CP_FLAG_UNK20 ((1 * 32) + 0)
47#define CP_FLAG_UNK20_CLEAR 0
48#define CP_FLAG_UNK20_SET 1
49#define CP_FLAG_STATUS ((2 * 32) + 0)
50#define CP_FLAG_STATUS_BUSY 0
51#define CP_FLAG_STATUS_IDLE 1
52#define CP_FLAG_AUTO_SAVE ((2 * 32) + 4)
53#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0
54#define CP_FLAG_AUTO_SAVE_PENDING 1
55#define CP_FLAG_AUTO_LOAD ((2 * 32) + 5)
56#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
57#define CP_FLAG_AUTO_LOAD_PENDING 1
58#define CP_FLAG_XFER ((2 * 32) + 11)
59#define CP_FLAG_XFER_IDLE 0
60#define CP_FLAG_XFER_BUSY 1
61#define CP_FLAG_NEWCTX ((2 * 32) + 12)
62#define CP_FLAG_NEWCTX_BUSY 0
63#define CP_FLAG_NEWCTX_DONE 1
64#define CP_FLAG_ALWAYS ((2 * 32) + 13)
65#define CP_FLAG_ALWAYS_FALSE 0
66#define CP_FLAG_ALWAYS_TRUE 1
67
68#define CP_CTX 0x00100000
69#define CP_CTX_COUNT 0x000f0000
70#define CP_CTX_COUNT_SHIFT 16
71#define CP_CTX_REG 0x00003fff
72#define CP_LOAD_SR 0x00200000
73#define CP_LOAD_SR_VALUE 0x000fffff
74#define CP_BRA 0x00400000
75#define CP_BRA_IP 0x0001ff00
76#define CP_BRA_IP_SHIFT 8
77#define CP_BRA_IF_CLEAR 0x00000080
78#define CP_BRA_FLAG 0x0000007f
79#define CP_WAIT 0x00500000
80#define CP_WAIT_SET 0x00000080
81#define CP_WAIT_FLAG 0x0000007f
82#define CP_SET 0x00700000
83#define CP_SET_1 0x00000080
84#define CP_SET_FLAG 0x0000007f
85#define CP_NEWCTX 0x00600004
86#define CP_NEXT_TO_SWAP 0x00600005
87#define CP_SET_CONTEXT_POINTER 0x00600006
88#define CP_SET_XFER_POINTER 0x00600007
89#define CP_ENABLE 0x00600009
90#define CP_END 0x0060000c
91#define CP_NEXT_TO_CURRENT 0x0060000d
92#define CP_DISABLE1 0x0090ffff
93#define CP_DISABLE2 0x0091ffff
94#define CP_XFER_1 0x008000ff
95#define CP_XFER_2 0x008800ff
96#define CP_SEEK_1 0x00c000ff
97#define CP_SEEK_2 0x00c800ff
98
99#include "drmP.h"
100#include "nouveau_drv.h"
101#include "nouveau_grctx.h"
102
103/*
104 * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's
105 * the GPU itself that does context-switching, but it needs a special
106 * microcode to do it. And it's the driver's task to supply this microcode,
107 * further known as ctxprog, as well as the initial context values, known
108 * as ctxvals.
109 *
110 * Without ctxprog, you cannot switch contexts. Not even in software, since
111 * the majority of context [xfer strands] isn't accessible directly. You're
112 * stuck with a single channel, and you also suffer all the problems resulting
113 * from missing ctxvals, since you cannot load them.
114 *
115 * Without ctxvals, you're stuck with PGRAPH's default context. It's enough to
116 * run 2d operations, but trying to utilise 3d or CUDA will just lock you up,
117 * since you don't have... some sort of needed setup.
118 *
119 * Nouveau will just disable acceleration if not given ctxprog + ctxvals, since
120 * it's too much hassle to handle no-ctxprog as a special case.
121 */
122
123/*
124 * How ctxprogs work.
125 *
126 * The ctxprog is written in its own kind of microcode, with very small and
127 * crappy set of available commands. You upload it to a small [512 insns]
128 * area of memory on PGRAPH, and it'll be run when PFIFO wants PGRAPH to
129 * switch channel. or when the driver explicitely requests it. Stuff visible
130 * to ctxprog consists of: PGRAPH MMIO registers, PGRAPH context strands,
131 * the per-channel context save area in VRAM [known as ctxvals or grctx],
132 * 4 flags registers, a scratch register, two grctx pointers, plus many
133 * random poorly-understood details.
134 *
135 * When ctxprog runs, it's supposed to check what operations are asked of it,
136 * save old context if requested, optionally reset PGRAPH and switch to the
137 * new channel, and load the new context. Context consists of three major
138 * parts: subset of MMIO registers and two "xfer areas".
139 */
140
141/* TODO:
142 * - document unimplemented bits compared to nvidia
143 * - NVAx: make a TP subroutine, use it.
144 * - use 0x4008fc instead of 0x1540?
145 */
146
147enum cp_label {
148 cp_check_load = 1,
149 cp_setup_auto_load,
150 cp_setup_load,
151 cp_setup_save,
152 cp_swap_state,
153 cp_prepare_exit,
154 cp_exit,
155};
156
157static void nv50_graph_construct_mmio(struct nouveau_grctx *ctx);
158static void nv50_graph_construct_xfer1(struct nouveau_grctx *ctx);
159static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
160
161/* Main function: construct the ctxprog skeleton, call the other functions. */
162
163int
164nv50_grctx_init(struct nouveau_grctx *ctx)
165{
166 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
167
168 switch (dev_priv->chipset) {
169 case 0x50:
170 case 0x84:
171 case 0x86:
172 case 0x92:
173 case 0x94:
174 case 0x96:
175 case 0x98:
176 case 0xa0:
177 case 0xa5:
178 case 0xa8:
179 case 0xaa:
180 case 0xac:
181 break;
182 default:
183 NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for "
184 "your NV%x card.\n", dev_priv->chipset);
185 NV_ERROR(ctx->dev, "Disabling acceleration. Please contact "
186 "the devs.\n");
187 return -ENOSYS;
188 }
189 /* decide whether we're loading/unloading the context */
190 cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
191 cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
192
193 cp_name(ctx, cp_check_load);
194 cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
195 cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
196 cp_bra (ctx, ALWAYS, TRUE, cp_exit);
197
198 /* setup for context load */
199 cp_name(ctx, cp_setup_auto_load);
200 cp_out (ctx, CP_DISABLE1);
201 cp_out (ctx, CP_DISABLE2);
202 cp_out (ctx, CP_ENABLE);
203 cp_out (ctx, CP_NEXT_TO_SWAP);
204 cp_set (ctx, UNK01, SET);
205 cp_name(ctx, cp_setup_load);
206 cp_out (ctx, CP_NEWCTX);
207 cp_wait(ctx, NEWCTX, BUSY);
208 cp_set (ctx, UNK1D, CLEAR);
209 cp_set (ctx, SWAP_DIRECTION, LOAD);
210 cp_bra (ctx, UNK0B, SET, cp_prepare_exit);
211 cp_bra (ctx, ALWAYS, TRUE, cp_swap_state);
212
213 /* setup for context save */
214 cp_name(ctx, cp_setup_save);
215 cp_set (ctx, UNK1D, SET);
216 cp_wait(ctx, STATUS, BUSY);
217 cp_set (ctx, UNK01, SET);
218 cp_set (ctx, SWAP_DIRECTION, SAVE);
219
220 /* general PGRAPH state */
221 cp_name(ctx, cp_swap_state);
222 cp_set (ctx, UNK03, SET);
223 cp_pos (ctx, 0x00004/4);
224 cp_ctx (ctx, 0x400828, 1); /* needed. otherwise, flickering happens. */
225 cp_pos (ctx, 0x00100/4);
226 nv50_graph_construct_mmio(ctx);
227 nv50_graph_construct_xfer1(ctx);
228 nv50_graph_construct_xfer2(ctx);
229
230 cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
231
232 cp_set (ctx, UNK20, SET);
233 cp_set (ctx, SWAP_DIRECTION, SAVE); /* no idea why this is needed, but fixes at least one lockup. */
234 cp_lsr (ctx, ctx->ctxvals_base);
235 cp_out (ctx, CP_SET_XFER_POINTER);
236 cp_lsr (ctx, 4);
237 cp_out (ctx, CP_SEEK_1);
238 cp_out (ctx, CP_XFER_1);
239 cp_wait(ctx, XFER, BUSY);
240
241 /* pre-exit state updates */
242 cp_name(ctx, cp_prepare_exit);
243 cp_set (ctx, UNK01, CLEAR);
244 cp_set (ctx, UNK03, CLEAR);
245 cp_set (ctx, UNK1D, CLEAR);
246
247 cp_bra (ctx, USER_SAVE, PENDING, cp_exit);
248 cp_out (ctx, CP_NEXT_TO_CURRENT);
249
250 cp_name(ctx, cp_exit);
251 cp_set (ctx, USER_SAVE, NOT_PENDING);
252 cp_set (ctx, USER_LOAD, NOT_PENDING);
253 cp_out (ctx, CP_END);
254 ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */
255
256 return 0;
257}
258
259/*
260 * Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which
261 * registers to save/restore and the default values for them.
262 */
263
264static void
265nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
266{
267 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
268 int i, j;
269 int offset, base;
270 uint32_t units = nv_rd32 (ctx->dev, 0x1540);
271
272 /* 0800 */
273 cp_ctx(ctx, 0x400808, 7);
274 gr_def(ctx, 0x400814, 0x00000030);
275 cp_ctx(ctx, 0x400834, 0x32);
276 if (dev_priv->chipset == 0x50) {
277 gr_def(ctx, 0x400834, 0xff400040);
278 gr_def(ctx, 0x400838, 0xfff00080);
279 gr_def(ctx, 0x40083c, 0xfff70090);
280 gr_def(ctx, 0x400840, 0xffe806a8);
281 }
282 gr_def(ctx, 0x400844, 0x00000002);
283 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
284 gr_def(ctx, 0x400894, 0x00001000);
285 gr_def(ctx, 0x4008e8, 0x00000003);
286 gr_def(ctx, 0x4008ec, 0x00001000);
287 if (dev_priv->chipset == 0x50)
288 cp_ctx(ctx, 0x400908, 0xb);
289 else if (dev_priv->chipset < 0xa0)
290 cp_ctx(ctx, 0x400908, 0xc);
291 else
292 cp_ctx(ctx, 0x400908, 0xe);
293
294 if (dev_priv->chipset >= 0xa0)
295 cp_ctx(ctx, 0x400b00, 0x1);
296 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
297 cp_ctx(ctx, 0x400b10, 0x1);
298 gr_def(ctx, 0x400b10, 0x0001629d);
299 cp_ctx(ctx, 0x400b20, 0x1);
300 gr_def(ctx, 0x400b20, 0x0001629d);
301 }
302
303 /* 0C00 */
304 cp_ctx(ctx, 0x400c08, 0x2);
305 gr_def(ctx, 0x400c08, 0x0000fe0c);
306
307 /* 1000 */
308 if (dev_priv->chipset < 0xa0) {
309 cp_ctx(ctx, 0x401008, 0x4);
310 gr_def(ctx, 0x401014, 0x00001000);
311 } else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) {
312 cp_ctx(ctx, 0x401008, 0x5);
313 gr_def(ctx, 0x401018, 0x00001000);
314 } else {
315 cp_ctx(ctx, 0x401008, 0x5);
316 gr_def(ctx, 0x401018, 0x00004000);
317 }
318
319 /* 1400 */
320 cp_ctx(ctx, 0x401400, 0x8);
321 cp_ctx(ctx, 0x401424, 0x3);
322 if (dev_priv->chipset == 0x50)
323 gr_def(ctx, 0x40142c, 0x0001fd87);
324 else
325 gr_def(ctx, 0x40142c, 0x00000187);
326 cp_ctx(ctx, 0x401540, 0x5);
327 gr_def(ctx, 0x401550, 0x00001018);
328
329 /* 1800 */
330 cp_ctx(ctx, 0x401814, 0x1);
331 gr_def(ctx, 0x401814, 0x000000ff);
332 if (dev_priv->chipset == 0x50) {
333 cp_ctx(ctx, 0x40181c, 0xe);
334 gr_def(ctx, 0x401850, 0x00000004);
335 } else if (dev_priv->chipset < 0xa0) {
336 cp_ctx(ctx, 0x40181c, 0xf);
337 gr_def(ctx, 0x401854, 0x00000004);
338 } else {
339 cp_ctx(ctx, 0x40181c, 0x13);
340 gr_def(ctx, 0x401864, 0x00000004);
341 }
342
343 /* 1C00 */
344 cp_ctx(ctx, 0x401c00, 0x1);
345 switch (dev_priv->chipset) {
346 case 0x50:
347 gr_def(ctx, 0x401c00, 0x0001005f);
348 break;
349 case 0x84:
350 case 0x86:
351 case 0x94:
352 gr_def(ctx, 0x401c00, 0x044d00df);
353 break;
354 case 0x92:
355 case 0x96:
356 case 0x98:
357 case 0xa0:
358 case 0xaa:
359 case 0xac:
360 gr_def(ctx, 0x401c00, 0x042500df);
361 break;
362 case 0xa5:
363 case 0xa8:
364 gr_def(ctx, 0x401c00, 0x142500df);
365 break;
366 }
367
368 /* 2400 */
369 cp_ctx(ctx, 0x402400, 0x1);
370 if (dev_priv->chipset == 0x50)
371 cp_ctx(ctx, 0x402408, 0x1);
372 else
373 cp_ctx(ctx, 0x402408, 0x2);
374 gr_def(ctx, 0x402408, 0x00000600);
375
376 /* 2800 */
377 cp_ctx(ctx, 0x402800, 0x1);
378 if (dev_priv->chipset == 0x50)
379 gr_def(ctx, 0x402800, 0x00000006);
380
381 /* 2C00 */
382 cp_ctx(ctx, 0x402c08, 0x6);
383 if (dev_priv->chipset != 0x50)
384 gr_def(ctx, 0x402c14, 0x01000000);
385 gr_def(ctx, 0x402c18, 0x000000ff);
386 if (dev_priv->chipset == 0x50)
387 cp_ctx(ctx, 0x402ca0, 0x1);
388 else
389 cp_ctx(ctx, 0x402ca0, 0x2);
390 if (dev_priv->chipset < 0xa0)
391 gr_def(ctx, 0x402ca0, 0x00000400);
392 else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
393 gr_def(ctx, 0x402ca0, 0x00000800);
394 else
395 gr_def(ctx, 0x402ca0, 0x00000400);
396 cp_ctx(ctx, 0x402cac, 0x4);
397
398 /* 3000 */
399 cp_ctx(ctx, 0x403004, 0x1);
400 gr_def(ctx, 0x403004, 0x00000001);
401
402 /* 3404 */
403 if (dev_priv->chipset >= 0xa0) {
404 cp_ctx(ctx, 0x403404, 0x1);
405 gr_def(ctx, 0x403404, 0x00000001);
406 }
407
408 /* 5000 */
409 cp_ctx(ctx, 0x405000, 0x1);
410 switch (dev_priv->chipset) {
411 case 0x50:
412 gr_def(ctx, 0x405000, 0x00300080);
413 break;
414 case 0x84:
415 case 0xa0:
416 case 0xa5:
417 case 0xa8:
418 case 0xaa:
419 case 0xac:
420 gr_def(ctx, 0x405000, 0x000e0080);
421 break;
422 case 0x86:
423 case 0x92:
424 case 0x94:
425 case 0x96:
426 case 0x98:
427 gr_def(ctx, 0x405000, 0x00000080);
428 break;
429 }
430 cp_ctx(ctx, 0x405014, 0x1);
431 gr_def(ctx, 0x405014, 0x00000004);
432 cp_ctx(ctx, 0x40501c, 0x1);
433 cp_ctx(ctx, 0x405024, 0x1);
434 cp_ctx(ctx, 0x40502c, 0x1);
435
436 /* 5400 or maybe 4800 */
437 if (dev_priv->chipset == 0x50) {
438 offset = 0x405400;
439 cp_ctx(ctx, 0x405400, 0xea);
440 } else if (dev_priv->chipset < 0x94) {
441 offset = 0x405400;
442 cp_ctx(ctx, 0x405400, 0xcb);
443 } else if (dev_priv->chipset < 0xa0) {
444 offset = 0x405400;
445 cp_ctx(ctx, 0x405400, 0xcc);
446 } else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
447 offset = 0x404800;
448 cp_ctx(ctx, 0x404800, 0xda);
449 } else {
450 offset = 0x405400;
451 cp_ctx(ctx, 0x405400, 0xd4);
452 }
453 gr_def(ctx, offset + 0x0c, 0x00000002);
454 gr_def(ctx, offset + 0x10, 0x00000001);
455 if (dev_priv->chipset >= 0x94)
456 offset += 4;
457 gr_def(ctx, offset + 0x1c, 0x00000001);
458 gr_def(ctx, offset + 0x20, 0x00000100);
459 gr_def(ctx, offset + 0x38, 0x00000002);
460 gr_def(ctx, offset + 0x3c, 0x00000001);
461 gr_def(ctx, offset + 0x40, 0x00000001);
462 gr_def(ctx, offset + 0x50, 0x00000001);
463 gr_def(ctx, offset + 0x54, 0x003fffff);
464 gr_def(ctx, offset + 0x58, 0x00001fff);
465 gr_def(ctx, offset + 0x60, 0x00000001);
466 gr_def(ctx, offset + 0x64, 0x00000001);
467 gr_def(ctx, offset + 0x6c, 0x00000001);
468 gr_def(ctx, offset + 0x70, 0x00000001);
469 gr_def(ctx, offset + 0x74, 0x00000001);
470 gr_def(ctx, offset + 0x78, 0x00000004);
471 gr_def(ctx, offset + 0x7c, 0x00000001);
472 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
473 offset += 4;
474 gr_def(ctx, offset + 0x80, 0x00000001);
475 gr_def(ctx, offset + 0x84, 0x00000001);
476 gr_def(ctx, offset + 0x88, 0x00000007);
477 gr_def(ctx, offset + 0x8c, 0x00000001);
478 gr_def(ctx, offset + 0x90, 0x00000007);
479 gr_def(ctx, offset + 0x94, 0x00000001);
480 gr_def(ctx, offset + 0x98, 0x00000001);
481 gr_def(ctx, offset + 0x9c, 0x00000001);
482 if (dev_priv->chipset == 0x50) {
483 gr_def(ctx, offset + 0xb0, 0x00000001);
484 gr_def(ctx, offset + 0xb4, 0x00000001);
485 gr_def(ctx, offset + 0xbc, 0x00000001);
486 gr_def(ctx, offset + 0xc0, 0x0000000a);
487 gr_def(ctx, offset + 0xd0, 0x00000040);
488 gr_def(ctx, offset + 0xd8, 0x00000002);
489 gr_def(ctx, offset + 0xdc, 0x00000100);
490 gr_def(ctx, offset + 0xe0, 0x00000001);
491 gr_def(ctx, offset + 0xe4, 0x00000100);
492 gr_def(ctx, offset + 0x100, 0x00000001);
493 gr_def(ctx, offset + 0x124, 0x00000004);
494 gr_def(ctx, offset + 0x13c, 0x00000001);
495 gr_def(ctx, offset + 0x140, 0x00000100);
496 gr_def(ctx, offset + 0x148, 0x00000001);
497 gr_def(ctx, offset + 0x154, 0x00000100);
498 gr_def(ctx, offset + 0x158, 0x00000001);
499 gr_def(ctx, offset + 0x15c, 0x00000100);
500 gr_def(ctx, offset + 0x164, 0x00000001);
501 gr_def(ctx, offset + 0x170, 0x00000100);
502 gr_def(ctx, offset + 0x174, 0x00000001);
503 gr_def(ctx, offset + 0x17c, 0x00000001);
504 gr_def(ctx, offset + 0x188, 0x00000002);
505 gr_def(ctx, offset + 0x190, 0x00000001);
506 gr_def(ctx, offset + 0x198, 0x00000001);
507 gr_def(ctx, offset + 0x1ac, 0x00000003);
508 offset += 0xd0;
509 } else {
510 gr_def(ctx, offset + 0xb0, 0x00000001);
511 gr_def(ctx, offset + 0xb4, 0x00000100);
512 gr_def(ctx, offset + 0xbc, 0x00000001);
513 gr_def(ctx, offset + 0xc8, 0x00000100);
514 gr_def(ctx, offset + 0xcc, 0x00000001);
515 gr_def(ctx, offset + 0xd0, 0x00000100);
516 gr_def(ctx, offset + 0xd8, 0x00000001);
517 gr_def(ctx, offset + 0xe4, 0x00000100);
518 }
519 gr_def(ctx, offset + 0xf8, 0x00000004);
520 gr_def(ctx, offset + 0xfc, 0x00000070);
521 gr_def(ctx, offset + 0x100, 0x00000080);
522 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
523 offset += 4;
524 gr_def(ctx, offset + 0x114, 0x0000000c);
525 if (dev_priv->chipset == 0x50)
526 offset -= 4;
527 gr_def(ctx, offset + 0x11c, 0x00000008);
528 gr_def(ctx, offset + 0x120, 0x00000014);
529 if (dev_priv->chipset == 0x50) {
530 gr_def(ctx, offset + 0x124, 0x00000026);
531 offset -= 0x18;
532 } else {
533 gr_def(ctx, offset + 0x128, 0x00000029);
534 gr_def(ctx, offset + 0x12c, 0x00000027);
535 gr_def(ctx, offset + 0x130, 0x00000026);
536 gr_def(ctx, offset + 0x134, 0x00000008);
537 gr_def(ctx, offset + 0x138, 0x00000004);
538 gr_def(ctx, offset + 0x13c, 0x00000027);
539 }
540 gr_def(ctx, offset + 0x148, 0x00000001);
541 gr_def(ctx, offset + 0x14c, 0x00000002);
542 gr_def(ctx, offset + 0x150, 0x00000003);
543 gr_def(ctx, offset + 0x154, 0x00000004);
544 gr_def(ctx, offset + 0x158, 0x00000005);
545 gr_def(ctx, offset + 0x15c, 0x00000006);
546 gr_def(ctx, offset + 0x160, 0x00000007);
547 gr_def(ctx, offset + 0x164, 0x00000001);
548 gr_def(ctx, offset + 0x1a8, 0x000000cf);
549 if (dev_priv->chipset == 0x50)
550 offset -= 4;
551 gr_def(ctx, offset + 0x1d8, 0x00000080);
552 gr_def(ctx, offset + 0x1dc, 0x00000004);
553 gr_def(ctx, offset + 0x1e0, 0x00000004);
554 if (dev_priv->chipset == 0x50)
555 offset -= 4;
556 else
557 gr_def(ctx, offset + 0x1e4, 0x00000003);
558 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
559 gr_def(ctx, offset + 0x1ec, 0x00000003);
560 offset += 8;
561 }
562 gr_def(ctx, offset + 0x1e8, 0x00000001);
563 if (dev_priv->chipset == 0x50)
564 offset -= 4;
565 gr_def(ctx, offset + 0x1f4, 0x00000012);
566 gr_def(ctx, offset + 0x1f8, 0x00000010);
567 gr_def(ctx, offset + 0x1fc, 0x0000000c);
568 gr_def(ctx, offset + 0x200, 0x00000001);
569 gr_def(ctx, offset + 0x210, 0x00000004);
570 gr_def(ctx, offset + 0x214, 0x00000002);
571 gr_def(ctx, offset + 0x218, 0x00000004);
572 if (dev_priv->chipset >= 0xa0)
573 offset += 4;
574 gr_def(ctx, offset + 0x224, 0x003fffff);
575 gr_def(ctx, offset + 0x228, 0x00001fff);
576 if (dev_priv->chipset == 0x50)
577 offset -= 0x20;
578 else if (dev_priv->chipset >= 0xa0) {
579 gr_def(ctx, offset + 0x250, 0x00000001);
580 gr_def(ctx, offset + 0x254, 0x00000001);
581 gr_def(ctx, offset + 0x258, 0x00000002);
582 offset += 0x10;
583 }
584 gr_def(ctx, offset + 0x250, 0x00000004);
585 gr_def(ctx, offset + 0x254, 0x00000014);
586 gr_def(ctx, offset + 0x258, 0x00000001);
587 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
588 offset += 4;
589 gr_def(ctx, offset + 0x264, 0x00000002);
590 if (dev_priv->chipset >= 0xa0)
591 offset += 8;
592 gr_def(ctx, offset + 0x270, 0x00000001);
593 gr_def(ctx, offset + 0x278, 0x00000002);
594 gr_def(ctx, offset + 0x27c, 0x00001000);
595 if (dev_priv->chipset == 0x50)
596 offset -= 0xc;
597 else {
598 gr_def(ctx, offset + 0x280, 0x00000e00);
599 gr_def(ctx, offset + 0x284, 0x00001000);
600 gr_def(ctx, offset + 0x288, 0x00001e00);
601 }
602 gr_def(ctx, offset + 0x290, 0x00000001);
603 gr_def(ctx, offset + 0x294, 0x00000001);
604 gr_def(ctx, offset + 0x298, 0x00000001);
605 gr_def(ctx, offset + 0x29c, 0x00000001);
606 gr_def(ctx, offset + 0x2a0, 0x00000001);
607 gr_def(ctx, offset + 0x2b0, 0x00000200);
608 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
609 gr_def(ctx, offset + 0x2b4, 0x00000200);
610 offset += 4;
611 }
612 if (dev_priv->chipset < 0xa0) {
613 gr_def(ctx, offset + 0x2b8, 0x00000001);
614 gr_def(ctx, offset + 0x2bc, 0x00000070);
615 gr_def(ctx, offset + 0x2c0, 0x00000080);
616 gr_def(ctx, offset + 0x2cc, 0x00000001);
617 gr_def(ctx, offset + 0x2d0, 0x00000070);
618 gr_def(ctx, offset + 0x2d4, 0x00000080);
619 } else {
620 gr_def(ctx, offset + 0x2b8, 0x00000001);
621 gr_def(ctx, offset + 0x2bc, 0x000000f0);
622 gr_def(ctx, offset + 0x2c0, 0x000000ff);
623 gr_def(ctx, offset + 0x2cc, 0x00000001);
624 gr_def(ctx, offset + 0x2d0, 0x000000f0);
625 gr_def(ctx, offset + 0x2d4, 0x000000ff);
626 gr_def(ctx, offset + 0x2dc, 0x00000009);
627 offset += 4;
628 }
629 gr_def(ctx, offset + 0x2e4, 0x00000001);
630 gr_def(ctx, offset + 0x2e8, 0x000000cf);
631 gr_def(ctx, offset + 0x2f0, 0x00000001);
632 gr_def(ctx, offset + 0x300, 0x000000cf);
633 gr_def(ctx, offset + 0x308, 0x00000002);
634 gr_def(ctx, offset + 0x310, 0x00000001);
635 gr_def(ctx, offset + 0x318, 0x00000001);
636 gr_def(ctx, offset + 0x320, 0x000000cf);
637 gr_def(ctx, offset + 0x324, 0x000000cf);
638 gr_def(ctx, offset + 0x328, 0x00000001);
639
640 /* 6000? */
641 if (dev_priv->chipset == 0x50)
642 cp_ctx(ctx, 0x4063e0, 0x1);
643
644 /* 6800 */
645 if (dev_priv->chipset < 0x90) {
646 cp_ctx(ctx, 0x406814, 0x2b);
647 gr_def(ctx, 0x406818, 0x00000f80);
648 gr_def(ctx, 0x406860, 0x007f0080);
649 gr_def(ctx, 0x40689c, 0x007f0080);
650 } else {
651 cp_ctx(ctx, 0x406814, 0x4);
652 if (dev_priv->chipset == 0x98)
653 gr_def(ctx, 0x406818, 0x00000f80);
654 else
655 gr_def(ctx, 0x406818, 0x00001f80);
656 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
657 gr_def(ctx, 0x40681c, 0x00000030);
658 cp_ctx(ctx, 0x406830, 0x3);
659 }
660
661 /* 7000: per-ROP group state */
662 for (i = 0; i < 8; i++) {
663 if (units & (1<<(i+16))) {
664 cp_ctx(ctx, 0x407000 + (i<<8), 3);
665 if (dev_priv->chipset == 0x50)
666 gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820);
667 else if (dev_priv->chipset != 0xa5)
668 gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821);
669 else
670 gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821);
671 gr_def(ctx, 0x407004 + (i<<8), 0x89058001);
672
673 if (dev_priv->chipset == 0x50) {
674 cp_ctx(ctx, 0x407010 + (i<<8), 1);
675 } else if (dev_priv->chipset < 0xa0) {
676 cp_ctx(ctx, 0x407010 + (i<<8), 2);
677 gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
678 gr_def(ctx, 0x407014 + (i<<8), 0x0000001f);
679 } else {
680 cp_ctx(ctx, 0x407010 + (i<<8), 3);
681 gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
682 if (dev_priv->chipset != 0xa5)
683 gr_def(ctx, 0x407014 + (i<<8), 0x000000ff);
684 else
685 gr_def(ctx, 0x407014 + (i<<8), 0x000001ff);
686 }
687
688 cp_ctx(ctx, 0x407080 + (i<<8), 4);
689 if (dev_priv->chipset != 0xa5)
690 gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa);
691 else
692 gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa);
693 if (dev_priv->chipset == 0x50)
694 gr_def(ctx, 0x407084 + (i<<8), 0x000000c0);
695 else
696 gr_def(ctx, 0x407084 + (i<<8), 0x400000c0);
697 gr_def(ctx, 0x407088 + (i<<8), 0xb7892080);
698
699 if (dev_priv->chipset < 0xa0)
700 cp_ctx(ctx, 0x407094 + (i<<8), 1);
701 else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
702 cp_ctx(ctx, 0x407094 + (i<<8), 3);
703 else {
704 cp_ctx(ctx, 0x407094 + (i<<8), 4);
705 gr_def(ctx, 0x4070a0 + (i<<8), 1);
706 }
707 }
708 }
709
710 cp_ctx(ctx, 0x407c00, 0x3);
711 if (dev_priv->chipset < 0x90)
712 gr_def(ctx, 0x407c00, 0x00010040);
713 else if (dev_priv->chipset < 0xa0)
714 gr_def(ctx, 0x407c00, 0x00390040);
715 else
716 gr_def(ctx, 0x407c00, 0x003d0040);
717 gr_def(ctx, 0x407c08, 0x00000022);
718 if (dev_priv->chipset >= 0xa0) {
719 cp_ctx(ctx, 0x407c10, 0x3);
720 cp_ctx(ctx, 0x407c20, 0x1);
721 cp_ctx(ctx, 0x407c2c, 0x1);
722 }
723
724 if (dev_priv->chipset < 0xa0) {
725 cp_ctx(ctx, 0x407d00, 0x9);
726 } else {
727 cp_ctx(ctx, 0x407d00, 0x15);
728 }
729 if (dev_priv->chipset == 0x98)
730 gr_def(ctx, 0x407d08, 0x00380040);
731 else {
732 if (dev_priv->chipset < 0x90)
733 gr_def(ctx, 0x407d08, 0x00010040);
734 else if (dev_priv->chipset < 0xa0)
735 gr_def(ctx, 0x407d08, 0x00390040);
736 else
737 gr_def(ctx, 0x407d08, 0x003d0040);
738 gr_def(ctx, 0x407d0c, 0x00000022);
739 }
740
741 /* 8000+: per-TP state */
742 for (i = 0; i < 10; i++) {
743 if (units & (1<<i)) {
744 if (dev_priv->chipset < 0xa0)
745 base = 0x408000 + (i<<12);
746 else
747 base = 0x408000 + (i<<11);
748 if (dev_priv->chipset < 0xa0)
749 offset = base + 0xc00;
750 else
751 offset = base + 0x80;
752 cp_ctx(ctx, offset + 0x00, 1);
753 gr_def(ctx, offset + 0x00, 0x0000ff0a);
754 cp_ctx(ctx, offset + 0x08, 1);
755
756 /* per-MP state */
757 for (j = 0; j < (dev_priv->chipset < 0xa0 ? 2 : 4); j++) {
758 if (!(units & (1 << (j+24)))) continue;
759 if (dev_priv->chipset < 0xa0)
760 offset = base + 0x200 + (j<<7);
761 else
762 offset = base + 0x100 + (j<<7);
763 cp_ctx(ctx, offset, 0x20);
764 gr_def(ctx, offset + 0x00, 0x01800000);
765 gr_def(ctx, offset + 0x04, 0x00160000);
766 gr_def(ctx, offset + 0x08, 0x01800000);
767 gr_def(ctx, offset + 0x18, 0x0003ffff);
768 switch (dev_priv->chipset) {
769 case 0x50:
770 gr_def(ctx, offset + 0x1c, 0x00080000);
771 break;
772 case 0x84:
773 gr_def(ctx, offset + 0x1c, 0x00880000);
774 break;
775 case 0x86:
776 gr_def(ctx, offset + 0x1c, 0x008c0000);
777 break;
778 case 0x92:
779 case 0x96:
780 case 0x98:
781 gr_def(ctx, offset + 0x1c, 0x118c0000);
782 break;
783 case 0x94:
784 gr_def(ctx, offset + 0x1c, 0x10880000);
785 break;
786 case 0xa0:
787 case 0xa5:
788 gr_def(ctx, offset + 0x1c, 0x310c0000);
789 break;
790 case 0xa8:
791 case 0xaa:
792 case 0xac:
793 gr_def(ctx, offset + 0x1c, 0x300c0000);
794 break;
795 }
796 gr_def(ctx, offset + 0x40, 0x00010401);
797 if (dev_priv->chipset == 0x50)
798 gr_def(ctx, offset + 0x48, 0x00000040);
799 else
800 gr_def(ctx, offset + 0x48, 0x00000078);
801 gr_def(ctx, offset + 0x50, 0x000000bf);
802 gr_def(ctx, offset + 0x58, 0x00001210);
803 if (dev_priv->chipset == 0x50)
804 gr_def(ctx, offset + 0x5c, 0x00000080);
805 else
806 gr_def(ctx, offset + 0x5c, 0x08000080);
807 if (dev_priv->chipset >= 0xa0)
808 gr_def(ctx, offset + 0x68, 0x0000003e);
809 }
810
811 if (dev_priv->chipset < 0xa0)
812 cp_ctx(ctx, base + 0x300, 0x4);
813 else
814 cp_ctx(ctx, base + 0x300, 0x5);
815 if (dev_priv->chipset == 0x50)
816 gr_def(ctx, base + 0x304, 0x00007070);
817 else if (dev_priv->chipset < 0xa0)
818 gr_def(ctx, base + 0x304, 0x00027070);
819 else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
820 gr_def(ctx, base + 0x304, 0x01127070);
821 else
822 gr_def(ctx, base + 0x304, 0x05127070);
823
824 if (dev_priv->chipset < 0xa0)
825 cp_ctx(ctx, base + 0x318, 1);
826 else
827 cp_ctx(ctx, base + 0x320, 1);
828 if (dev_priv->chipset == 0x50)
829 gr_def(ctx, base + 0x318, 0x0003ffff);
830 else if (dev_priv->chipset < 0xa0)
831 gr_def(ctx, base + 0x318, 0x03ffffff);
832 else
833 gr_def(ctx, base + 0x320, 0x07ffffff);
834
835 if (dev_priv->chipset < 0xa0)
836 cp_ctx(ctx, base + 0x324, 5);
837 else
838 cp_ctx(ctx, base + 0x328, 4);
839
840 if (dev_priv->chipset < 0xa0) {
841 cp_ctx(ctx, base + 0x340, 9);
842 offset = base + 0x340;
843 } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
844 cp_ctx(ctx, base + 0x33c, 0xb);
845 offset = base + 0x344;
846 } else {
847 cp_ctx(ctx, base + 0x33c, 0xd);
848 offset = base + 0x344;
849 }
850 gr_def(ctx, offset + 0x0, 0x00120407);
851 gr_def(ctx, offset + 0x4, 0x05091507);
852 if (dev_priv->chipset == 0x84)
853 gr_def(ctx, offset + 0x8, 0x05100202);
854 else
855 gr_def(ctx, offset + 0x8, 0x05010202);
856 gr_def(ctx, offset + 0xc, 0x00030201);
857
858 cp_ctx(ctx, base + 0x400, 2);
859 gr_def(ctx, base + 0x404, 0x00000040);
860 cp_ctx(ctx, base + 0x40c, 2);
861 gr_def(ctx, base + 0x40c, 0x0d0c0b0a);
862 gr_def(ctx, base + 0x410, 0x00141210);
863
864 if (dev_priv->chipset < 0xa0)
865 offset = base + 0x800;
866 else
867 offset = base + 0x500;
868 cp_ctx(ctx, offset, 6);
869 gr_def(ctx, offset + 0x0, 0x000001f0);
870 gr_def(ctx, offset + 0x4, 0x00000001);
871 gr_def(ctx, offset + 0x8, 0x00000003);
872 if (dev_priv->chipset == 0x50 || dev_priv->chipset >= 0xaa)
873 gr_def(ctx, offset + 0xc, 0x00008000);
874 gr_def(ctx, offset + 0x14, 0x00039e00);
875 cp_ctx(ctx, offset + 0x1c, 2);
876 if (dev_priv->chipset == 0x50)
877 gr_def(ctx, offset + 0x1c, 0x00000040);
878 else
879 gr_def(ctx, offset + 0x1c, 0x00000100);
880 gr_def(ctx, offset + 0x20, 0x00003800);
881
882 if (dev_priv->chipset >= 0xa0) {
883 cp_ctx(ctx, base + 0x54c, 2);
884 if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
885 gr_def(ctx, base + 0x54c, 0x003fe006);
886 else
887 gr_def(ctx, base + 0x54c, 0x003fe007);
888 gr_def(ctx, base + 0x550, 0x003fe000);
889 }
890
891 if (dev_priv->chipset < 0xa0)
892 offset = base + 0xa00;
893 else
894 offset = base + 0x680;
895 cp_ctx(ctx, offset, 1);
896 gr_def(ctx, offset, 0x00404040);
897
898 if (dev_priv->chipset < 0xa0)
899 offset = base + 0xe00;
900 else
901 offset = base + 0x700;
902 cp_ctx(ctx, offset, 2);
903 if (dev_priv->chipset < 0xa0)
904 gr_def(ctx, offset, 0x0077f005);
905 else if (dev_priv->chipset == 0xa5)
906 gr_def(ctx, offset, 0x6cf7f007);
907 else if (dev_priv->chipset == 0xa8)
908 gr_def(ctx, offset, 0x6cfff007);
909 else if (dev_priv->chipset == 0xac)
910 gr_def(ctx, offset, 0x0cfff007);
911 else
912 gr_def(ctx, offset, 0x0cf7f007);
913 if (dev_priv->chipset == 0x50)
914 gr_def(ctx, offset + 0x4, 0x00007fff);
915 else if (dev_priv->chipset < 0xa0)
916 gr_def(ctx, offset + 0x4, 0x003f7fff);
917 else
918 gr_def(ctx, offset + 0x4, 0x02bf7fff);
919 cp_ctx(ctx, offset + 0x2c, 1);
920 if (dev_priv->chipset == 0x50) {
921 cp_ctx(ctx, offset + 0x50, 9);
922 gr_def(ctx, offset + 0x54, 0x000003ff);
923 gr_def(ctx, offset + 0x58, 0x00000003);
924 gr_def(ctx, offset + 0x5c, 0x00000003);
925 gr_def(ctx, offset + 0x60, 0x000001ff);
926 gr_def(ctx, offset + 0x64, 0x0000001f);
927 gr_def(ctx, offset + 0x68, 0x0000000f);
928 gr_def(ctx, offset + 0x6c, 0x0000000f);
929 } else if(dev_priv->chipset < 0xa0) {
930 cp_ctx(ctx, offset + 0x50, 1);
931 cp_ctx(ctx, offset + 0x70, 1);
932 } else {
933 cp_ctx(ctx, offset + 0x50, 1);
934 cp_ctx(ctx, offset + 0x60, 5);
935 }
936 }
937 }
938}
939
940/*
941 * xfer areas. These are a pain.
942 *
943 * There are 2 xfer areas: the first one is big and contains all sorts of
944 * stuff, the second is small and contains some per-TP context.
945 *
946 * Each area is split into 8 "strands". The areas, when saved to grctx,
947 * are made of 8-word blocks. Each block contains a single word from
948 * each strand. The strands are independent of each other, their
949 * addresses are unrelated to each other, and data in them is closely
950 * packed together. The strand layout varies a bit between cards: here
951 * and there, a single word is thrown out in the middle and the whole
952 * strand is offset by a bit from corresponding one on another chipset.
953 * For this reason, addresses of stuff in strands are almost useless.
954 * Knowing sequence of stuff and size of gaps between them is much more
955 * useful, and that's how we build the strands in our generator.
956 *
957 * NVA0 takes this mess to a whole new level by cutting the old strands
958 * into a few dozen pieces [known as genes], rearranging them randomly,
959 * and putting them back together to make new strands. Hopefully these
960 * genes correspond more or less directly to the same PGRAPH subunits
961 * as in 400040 register.
962 *
963 * The most common value in default context is 0, and when the genes
964 * are separated by 0's, gene bounduaries are quite speculative...
965 * some of them can be clearly deduced, others can be guessed, and yet
966 * others won't be resolved without figuring out the real meaning of
967 * given ctxval. For the same reason, ending point of each strand
968 * is unknown. Except for strand 0, which is the longest strand and
969 * its end corresponds to end of the whole xfer.
970 *
971 * An unsolved mystery is the seek instruction: it takes an argument
972 * in bits 8-18, and that argument is clearly the place in strands to
973 * seek to... but the offsets don't seem to correspond to offsets as
974 * seen in grctx. Perhaps there's another, real, not randomly-changing
975 * addressing in strands, and the xfer insn just happens to skip over
976 * the unused bits? NV10-NV30 PIPE comes to mind...
977 *
978 * As far as I know, there's no way to access the xfer areas directly
979 * without the help of ctxprog.
980 */
981
982static inline void
983xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
984 int i;
985 if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
986 for (i = 0; i < num; i++)
987 nv_wo32(ctx->dev, ctx->data, ctx->ctxvals_pos + (i << 3), val);
988 ctx->ctxvals_pos += num << 3;
989}
990
991/* Gene declarations... */
992
993static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx);
994static void nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx);
995static void nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx);
996static void nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx);
997static void nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx);
998static void nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx);
999static void nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx);
1000static void nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx);
1001static void nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx);
1002static void nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx);
1003static void nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx);
1004static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx);
1005static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
1006
1007static void
1008nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1009{
1010 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1011 int i;
1012 int offset;
1013 int size = 0;
1014 uint32_t units = nv_rd32 (ctx->dev, 0x1540);
1015
1016 offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
1017 ctx->ctxvals_base = offset;
1018
1019 if (dev_priv->chipset < 0xa0) {
1020 /* Strand 0 */
1021 ctx->ctxvals_pos = offset;
1022 switch (dev_priv->chipset) {
1023 case 0x50:
1024 xf_emit(ctx, 0x99, 0);
1025 break;
1026 case 0x84:
1027 case 0x86:
1028 xf_emit(ctx, 0x384, 0);
1029 break;
1030 case 0x92:
1031 case 0x94:
1032 case 0x96:
1033 case 0x98:
1034 xf_emit(ctx, 0x380, 0);
1035 break;
1036 }
1037 nv50_graph_construct_gene_m2mf (ctx);
1038 switch (dev_priv->chipset) {
1039 case 0x50:
1040 case 0x84:
1041 case 0x86:
1042 case 0x98:
1043 xf_emit(ctx, 0x4c4, 0);
1044 break;
1045 case 0x92:
1046 case 0x94:
1047 case 0x96:
1048 xf_emit(ctx, 0x984, 0);
1049 break;
1050 }
1051 nv50_graph_construct_gene_unk5(ctx);
1052 if (dev_priv->chipset == 0x50)
1053 xf_emit(ctx, 0xa, 0);
1054 else
1055 xf_emit(ctx, 0xb, 0);
1056 nv50_graph_construct_gene_unk4(ctx);
1057 nv50_graph_construct_gene_unk3(ctx);
1058 if ((ctx->ctxvals_pos-offset)/8 > size)
1059 size = (ctx->ctxvals_pos-offset)/8;
1060
1061 /* Strand 1 */
1062 ctx->ctxvals_pos = offset + 0x1;
1063 nv50_graph_construct_gene_unk6(ctx);
1064 nv50_graph_construct_gene_unk7(ctx);
1065 nv50_graph_construct_gene_unk8(ctx);
1066 switch (dev_priv->chipset) {
1067 case 0x50:
1068 case 0x92:
1069 xf_emit(ctx, 0xfb, 0);
1070 break;
1071 case 0x84:
1072 xf_emit(ctx, 0xd3, 0);
1073 break;
1074 case 0x94:
1075 case 0x96:
1076 xf_emit(ctx, 0xab, 0);
1077 break;
1078 case 0x86:
1079 case 0x98:
1080 xf_emit(ctx, 0x6b, 0);
1081 break;
1082 }
1083 xf_emit(ctx, 2, 0x4e3bfdf);
1084 xf_emit(ctx, 4, 0);
1085 xf_emit(ctx, 1, 0x0fac6881);
1086 xf_emit(ctx, 0xb, 0);
1087 xf_emit(ctx, 2, 0x4e3bfdf);
1088 if ((ctx->ctxvals_pos-offset)/8 > size)
1089 size = (ctx->ctxvals_pos-offset)/8;
1090
1091 /* Strand 2 */
1092 ctx->ctxvals_pos = offset + 0x2;
1093 switch (dev_priv->chipset) {
1094 case 0x50:
1095 case 0x92:
1096 xf_emit(ctx, 0xa80, 0);
1097 break;
1098 case 0x84:
1099 xf_emit(ctx, 0xa7e, 0);
1100 break;
1101 case 0x94:
1102 case 0x96:
1103 xf_emit(ctx, 0xa7c, 0);
1104 break;
1105 case 0x86:
1106 case 0x98:
1107 xf_emit(ctx, 0xa7a, 0);
1108 break;
1109 }
1110 xf_emit(ctx, 1, 0x3fffff);
1111 xf_emit(ctx, 2, 0);
1112 xf_emit(ctx, 1, 0x1fff);
1113 xf_emit(ctx, 0xe, 0);
1114 nv50_graph_construct_gene_unk9(ctx);
1115 nv50_graph_construct_gene_unk2(ctx);
1116 nv50_graph_construct_gene_unk1(ctx);
1117 nv50_graph_construct_gene_unk10(ctx);
1118 if ((ctx->ctxvals_pos-offset)/8 > size)
1119 size = (ctx->ctxvals_pos-offset)/8;
1120
1121 /* Strand 3: per-ROP group state */
1122 ctx->ctxvals_pos = offset + 3;
1123 for (i = 0; i < 6; i++)
1124 if (units & (1 << (i + 16)))
1125 nv50_graph_construct_gene_ropc(ctx);
1126 if ((ctx->ctxvals_pos-offset)/8 > size)
1127 size = (ctx->ctxvals_pos-offset)/8;
1128
1129 /* Strands 4-7: per-TP state */
1130 for (i = 0; i < 4; i++) {
1131 ctx->ctxvals_pos = offset + 4 + i;
1132 if (units & (1 << (2 * i)))
1133 nv50_graph_construct_xfer_tp(ctx);
1134 if (units & (1 << (2 * i + 1)))
1135 nv50_graph_construct_xfer_tp(ctx);
1136 if ((ctx->ctxvals_pos-offset)/8 > size)
1137 size = (ctx->ctxvals_pos-offset)/8;
1138 }
1139 } else {
1140 /* Strand 0 */
1141 ctx->ctxvals_pos = offset;
1142 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1143 xf_emit(ctx, 0x385, 0);
1144 else
1145 xf_emit(ctx, 0x384, 0);
1146 nv50_graph_construct_gene_m2mf(ctx);
1147 xf_emit(ctx, 0x950, 0);
1148 nv50_graph_construct_gene_unk10(ctx);
1149 xf_emit(ctx, 1, 0x0fac6881);
1150 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
1151 xf_emit(ctx, 1, 1);
1152 xf_emit(ctx, 3, 0);
1153 }
1154 nv50_graph_construct_gene_unk8(ctx);
1155 if (dev_priv->chipset == 0xa0)
1156 xf_emit(ctx, 0x189, 0);
1157 else if (dev_priv->chipset < 0xa8)
1158 xf_emit(ctx, 0x99, 0);
1159 else if (dev_priv->chipset == 0xaa)
1160 xf_emit(ctx, 0x65, 0);
1161 else
1162 xf_emit(ctx, 0x6d, 0);
1163 nv50_graph_construct_gene_unk9(ctx);
1164 if ((ctx->ctxvals_pos-offset)/8 > size)
1165 size = (ctx->ctxvals_pos-offset)/8;
1166
1167 /* Strand 1 */
1168 ctx->ctxvals_pos = offset + 1;
1169 nv50_graph_construct_gene_unk1(ctx);
1170 if ((ctx->ctxvals_pos-offset)/8 > size)
1171 size = (ctx->ctxvals_pos-offset)/8;
1172
1173 /* Strand 2 */
1174 ctx->ctxvals_pos = offset + 2;
1175 if (dev_priv->chipset == 0xa0) {
1176 nv50_graph_construct_gene_unk2(ctx);
1177 }
1178 xf_emit(ctx, 0x36, 0);
1179 nv50_graph_construct_gene_unk5(ctx);
1180 if ((ctx->ctxvals_pos-offset)/8 > size)
1181 size = (ctx->ctxvals_pos-offset)/8;
1182
1183 /* Strand 3 */
1184 ctx->ctxvals_pos = offset + 3;
1185 xf_emit(ctx, 1, 0);
1186 xf_emit(ctx, 1, 1);
1187 nv50_graph_construct_gene_unk6(ctx);
1188 if ((ctx->ctxvals_pos-offset)/8 > size)
1189 size = (ctx->ctxvals_pos-offset)/8;
1190
1191 /* Strand 4 */
1192 ctx->ctxvals_pos = offset + 4;
1193 if (dev_priv->chipset == 0xa0)
1194 xf_emit(ctx, 0xa80, 0);
1195 else
1196 xf_emit(ctx, 0xa7a, 0);
1197 xf_emit(ctx, 1, 0x3fffff);
1198 xf_emit(ctx, 2, 0);
1199 xf_emit(ctx, 1, 0x1fff);
1200 if ((ctx->ctxvals_pos-offset)/8 > size)
1201 size = (ctx->ctxvals_pos-offset)/8;
1202
1203 /* Strand 5 */
1204 ctx->ctxvals_pos = offset + 5;
1205 xf_emit(ctx, 1, 0);
1206 xf_emit(ctx, 1, 0x0fac6881);
1207 xf_emit(ctx, 0xb, 0);
1208 xf_emit(ctx, 2, 0x4e3bfdf);
1209 xf_emit(ctx, 3, 0);
1210 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1211 xf_emit(ctx, 1, 0x11);
1212 xf_emit(ctx, 1, 0);
1213 xf_emit(ctx, 2, 0x4e3bfdf);
1214 xf_emit(ctx, 2, 0);
1215 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1216 xf_emit(ctx, 1, 0x11);
1217 xf_emit(ctx, 1, 0);
1218 for (i = 0; i < 8; i++)
1219 if (units & (1<<(i+16)))
1220 nv50_graph_construct_gene_ropc(ctx);
1221 if ((ctx->ctxvals_pos-offset)/8 > size)
1222 size = (ctx->ctxvals_pos-offset)/8;
1223
1224 /* Strand 6 */
1225 ctx->ctxvals_pos = offset + 6;
1226 nv50_graph_construct_gene_unk3(ctx);
1227 xf_emit(ctx, 0xb, 0);
1228 nv50_graph_construct_gene_unk4(ctx);
1229 nv50_graph_construct_gene_unk7(ctx);
1230 if (units & (1 << 0))
1231 nv50_graph_construct_xfer_tp(ctx);
1232 if (units & (1 << 1))
1233 nv50_graph_construct_xfer_tp(ctx);
1234 if (units & (1 << 2))
1235 nv50_graph_construct_xfer_tp(ctx);
1236 if (units & (1 << 3))
1237 nv50_graph_construct_xfer_tp(ctx);
1238 if ((ctx->ctxvals_pos-offset)/8 > size)
1239 size = (ctx->ctxvals_pos-offset)/8;
1240
1241 /* Strand 7 */
1242 ctx->ctxvals_pos = offset + 7;
1243 if (dev_priv->chipset == 0xa0) {
1244 if (units & (1 << 4))
1245 nv50_graph_construct_xfer_tp(ctx);
1246 if (units & (1 << 5))
1247 nv50_graph_construct_xfer_tp(ctx);
1248 if (units & (1 << 6))
1249 nv50_graph_construct_xfer_tp(ctx);
1250 if (units & (1 << 7))
1251 nv50_graph_construct_xfer_tp(ctx);
1252 if (units & (1 << 8))
1253 nv50_graph_construct_xfer_tp(ctx);
1254 if (units & (1 << 9))
1255 nv50_graph_construct_xfer_tp(ctx);
1256 } else {
1257 nv50_graph_construct_gene_unk2(ctx);
1258 }
1259 if ((ctx->ctxvals_pos-offset)/8 > size)
1260 size = (ctx->ctxvals_pos-offset)/8;
1261 }
1262
1263 ctx->ctxvals_pos = offset + size * 8;
1264 ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f;
1265 cp_lsr (ctx, offset);
1266 cp_out (ctx, CP_SET_XFER_POINTER);
1267 cp_lsr (ctx, size);
1268 cp_out (ctx, CP_SEEK_1);
1269 cp_out (ctx, CP_XFER_1);
1270 cp_wait(ctx, XFER, BUSY);
1271}
1272
1273/*
1274 * non-trivial demagiced parts of ctx init go here
1275 */
1276
1277static void
1278nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
1279{
1280 /* m2mf state */
1281 xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */
1282 xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */
1283 xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */
1284 xf_emit (ctx, 1, 0); /* OFFSET_IN */
1285 xf_emit (ctx, 1, 0); /* OFFSET_OUT */
1286 xf_emit (ctx, 1, 0); /* PITCH_IN */
1287 xf_emit (ctx, 1, 0); /* PITCH_OUT */
1288 xf_emit (ctx, 1, 0); /* LINE_LENGTH */
1289 xf_emit (ctx, 1, 0); /* LINE_COUNT */
1290 xf_emit (ctx, 1, 0x21); /* FORMAT: bits 0-4 INPUT_INC, bits 5-9 OUTPUT_INC */
1291 xf_emit (ctx, 1, 1); /* LINEAR_IN */
1292 xf_emit (ctx, 1, 0x2); /* TILING_MODE_IN: bits 0-2 y tiling, bits 3-5 z tiling */
1293 xf_emit (ctx, 1, 0x100); /* TILING_PITCH_IN */
1294 xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_IN */
1295 xf_emit (ctx, 1, 1); /* TILING_DEPTH_IN */
1296 xf_emit (ctx, 1, 0); /* TILING_POSITION_IN_Z */
1297 xf_emit (ctx, 1, 0); /* TILING_POSITION_IN */
1298 xf_emit (ctx, 1, 1); /* LINEAR_OUT */
1299 xf_emit (ctx, 1, 0x2); /* TILING_MODE_OUT: bits 0-2 y tiling, bits 3-5 z tiling */
1300 xf_emit (ctx, 1, 0x100); /* TILING_PITCH_OUT */
1301 xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_OUT */
1302 xf_emit (ctx, 1, 1); /* TILING_DEPTH_OUT */
1303 xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT_Z */
1304 xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */
1305 xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */
1306 xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */
1307}
1308
1309static void
1310nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx)
1311{
1312 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1313 /* end of area 2 on pre-NVA0, area 1 on NVAx */
1314 xf_emit(ctx, 2, 4);
1315 xf_emit(ctx, 1, 0);
1316 xf_emit(ctx, 1, 0x80);
1317 xf_emit(ctx, 1, 4);
1318 xf_emit(ctx, 1, 0x80c14);
1319 xf_emit(ctx, 1, 0);
1320 if (dev_priv->chipset == 0x50)
1321 xf_emit(ctx, 1, 0x3ff);
1322 else
1323 xf_emit(ctx, 1, 0x7ff);
1324 switch (dev_priv->chipset) {
1325 case 0x50:
1326 case 0x86:
1327 case 0x98:
1328 case 0xaa:
1329 case 0xac:
1330 xf_emit(ctx, 0x542, 0);
1331 break;
1332 case 0x84:
1333 case 0x92:
1334 case 0x94:
1335 case 0x96:
1336 xf_emit(ctx, 0x942, 0);
1337 break;
1338 case 0xa0:
1339 xf_emit(ctx, 0x2042, 0);
1340 break;
1341 case 0xa5:
1342 case 0xa8:
1343 xf_emit(ctx, 0x842, 0);
1344 break;
1345 }
1346 xf_emit(ctx, 2, 4);
1347 xf_emit(ctx, 1, 0);
1348 xf_emit(ctx, 1, 0x80);
1349 xf_emit(ctx, 1, 4);
1350 xf_emit(ctx, 1, 1);
1351 xf_emit(ctx, 1, 0);
1352 xf_emit(ctx, 1, 0x27);
1353 xf_emit(ctx, 1, 0);
1354 xf_emit(ctx, 1, 0x26);
1355 xf_emit(ctx, 3, 0);
1356}
1357
1358static void
1359nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx)
1360{
1361 /* end of area 2 on pre-NVA0, area 1 on NVAx */
1362 xf_emit(ctx, 0x10, 0x04000000);
1363 xf_emit(ctx, 0x24, 0);
1364 xf_emit(ctx, 2, 0x04e3bfdf);
1365 xf_emit(ctx, 2, 0);
1366 xf_emit(ctx, 1, 0x1fe21);
1367}
1368
1369static void
1370nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx)
1371{
1372 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1373 /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
1374 if (dev_priv->chipset != 0x50) {
1375 xf_emit(ctx, 5, 0);
1376 xf_emit(ctx, 1, 0x80c14);
1377 xf_emit(ctx, 2, 0);
1378 xf_emit(ctx, 1, 0x804);
1379 xf_emit(ctx, 1, 0);
1380 xf_emit(ctx, 2, 4);
1381 xf_emit(ctx, 1, 0x8100c12);
1382 }
1383 xf_emit(ctx, 1, 0);
1384 xf_emit(ctx, 2, 4);
1385 xf_emit(ctx, 1, 0);
1386 xf_emit(ctx, 1, 0x10);
1387 if (dev_priv->chipset == 0x50)
1388 xf_emit(ctx, 3, 0);
1389 else
1390 xf_emit(ctx, 4, 0);
1391 xf_emit(ctx, 1, 0x804);
1392 xf_emit(ctx, 1, 1);
1393 xf_emit(ctx, 1, 0x1a);
1394 if (dev_priv->chipset != 0x50)
1395 xf_emit(ctx, 1, 0x7f);
1396 xf_emit(ctx, 1, 0);
1397 xf_emit(ctx, 1, 1);
1398 xf_emit(ctx, 1, 0x80c14);
1399 xf_emit(ctx, 1, 0);
1400 xf_emit(ctx, 1, 0x8100c12);
1401 xf_emit(ctx, 2, 4);
1402 xf_emit(ctx, 1, 0);
1403 xf_emit(ctx, 1, 0x10);
1404 xf_emit(ctx, 3, 0);
1405 xf_emit(ctx, 1, 1);
1406 xf_emit(ctx, 1, 0x8100c12);
1407 xf_emit(ctx, 6, 0);
1408 if (dev_priv->chipset == 0x50)
1409 xf_emit(ctx, 1, 0x3ff);
1410 else
1411 xf_emit(ctx, 1, 0x7ff);
1412 xf_emit(ctx, 1, 0x80c14);
1413 xf_emit(ctx, 0x38, 0);
1414 xf_emit(ctx, 1, 1);
1415 xf_emit(ctx, 2, 0);
1416 xf_emit(ctx, 1, 0x10);
1417 xf_emit(ctx, 0x38, 0);
1418 xf_emit(ctx, 2, 0x88);
1419 xf_emit(ctx, 2, 0);
1420 xf_emit(ctx, 1, 4);
1421 xf_emit(ctx, 0x16, 0);
1422 xf_emit(ctx, 1, 0x26);
1423 xf_emit(ctx, 2, 0);
1424 xf_emit(ctx, 1, 0x3f800000);
1425 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1426 xf_emit(ctx, 4, 0);
1427 else
1428 xf_emit(ctx, 3, 0);
1429 xf_emit(ctx, 1, 0x1a);
1430 xf_emit(ctx, 1, 0x10);
1431 if (dev_priv->chipset != 0x50)
1432 xf_emit(ctx, 0x28, 0);
1433 else
1434 xf_emit(ctx, 0x25, 0);
1435 xf_emit(ctx, 1, 0x52);
1436 xf_emit(ctx, 1, 0);
1437 xf_emit(ctx, 1, 0x26);
1438 xf_emit(ctx, 1, 0);
1439 xf_emit(ctx, 2, 4);
1440 xf_emit(ctx, 1, 0);
1441 xf_emit(ctx, 1, 0x1a);
1442 xf_emit(ctx, 2, 0);
1443 xf_emit(ctx, 1, 0x00ffff00);
1444 xf_emit(ctx, 1, 0);
1445}
1446
1447static void
1448nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx)
1449{
1450 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1451 /* end of area 0 on pre-NVA0, beginning of area 6 on NVAx */
1452 xf_emit(ctx, 1, 0x3f);
1453 xf_emit(ctx, 0xa, 0);
1454 xf_emit(ctx, 1, 2);
1455 xf_emit(ctx, 2, 0x04000000);
1456 xf_emit(ctx, 8, 0);
1457 xf_emit(ctx, 1, 4);
1458 xf_emit(ctx, 3, 0);
1459 xf_emit(ctx, 1, 4);
1460 if (dev_priv->chipset == 0x50)
1461 xf_emit(ctx, 0x10, 0);
1462 else
1463 xf_emit(ctx, 0x11, 0);
1464 xf_emit(ctx, 1, 1);
1465 xf_emit(ctx, 1, 0x1001);
1466 xf_emit(ctx, 4, 0xffff);
1467 xf_emit(ctx, 0x20, 0);
1468 xf_emit(ctx, 0x10, 0x3f800000);
1469 xf_emit(ctx, 1, 0x10);
1470 if (dev_priv->chipset == 0x50)
1471 xf_emit(ctx, 1, 0);
1472 else
1473 xf_emit(ctx, 2, 0);
1474 xf_emit(ctx, 1, 3);
1475 xf_emit(ctx, 2, 0);
1476}
1477
1478static void
1479nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx)
1480{
1481 /* middle of area 0 on pre-NVA0, middle of area 6 on NVAx */
1482 xf_emit(ctx, 2, 0x04000000);
1483 xf_emit(ctx, 1, 0);
1484 xf_emit(ctx, 1, 0x80);
1485 xf_emit(ctx, 3, 0);
1486 xf_emit(ctx, 1, 0x80);
1487 xf_emit(ctx, 1, 0);
1488}
1489
1490static void
1491nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx)
1492{
1493 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1494 /* middle of area 0 on pre-NVA0 [after m2mf], end of area 2 on NVAx */
1495 xf_emit(ctx, 2, 4);
1496 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1497 xf_emit(ctx, 0x1c4d, 0);
1498 else
1499 xf_emit(ctx, 0x1c4b, 0);
1500 xf_emit(ctx, 2, 4);
1501 xf_emit(ctx, 1, 0x8100c12);
1502 if (dev_priv->chipset != 0x50)
1503 xf_emit(ctx, 1, 3);
1504 xf_emit(ctx, 1, 0);
1505 xf_emit(ctx, 1, 0x8100c12);
1506 xf_emit(ctx, 1, 0);
1507 xf_emit(ctx, 1, 0x80c14);
1508 xf_emit(ctx, 1, 1);
1509 if (dev_priv->chipset >= 0xa0)
1510 xf_emit(ctx, 2, 4);
1511 xf_emit(ctx, 1, 0x80c14);
1512 xf_emit(ctx, 2, 0);
1513 xf_emit(ctx, 1, 0x8100c12);
1514 xf_emit(ctx, 1, 0x27);
1515 xf_emit(ctx, 2, 0);
1516 xf_emit(ctx, 1, 1);
1517 xf_emit(ctx, 0x3c1, 0);
1518 xf_emit(ctx, 1, 1);
1519 xf_emit(ctx, 0x16, 0);
1520 xf_emit(ctx, 1, 0x8100c12);
1521 xf_emit(ctx, 1, 0);
1522}
1523
1524static void
1525nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx)
1526{
1527 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1528 /* beginning of area 1 on pre-NVA0 [after m2mf], area 3 on NVAx */
1529 xf_emit(ctx, 4, 0);
1530 xf_emit(ctx, 1, 0xf);
1531 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1532 xf_emit(ctx, 8, 0);
1533 else
1534 xf_emit(ctx, 4, 0);
1535 xf_emit(ctx, 1, 0x20);
1536 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1537 xf_emit(ctx, 0x11, 0);
1538 else if (dev_priv->chipset >= 0xa0)
1539 xf_emit(ctx, 0xf, 0);
1540 else
1541 xf_emit(ctx, 0xe, 0);
1542 xf_emit(ctx, 1, 0x1a);
1543 xf_emit(ctx, 0xd, 0);
1544 xf_emit(ctx, 2, 4);
1545 xf_emit(ctx, 1, 0);
1546 xf_emit(ctx, 1, 4);
1547 xf_emit(ctx, 1, 8);
1548 xf_emit(ctx, 1, 0);
1549 if (dev_priv->chipset == 0x50)
1550 xf_emit(ctx, 1, 0x3ff);
1551 else
1552 xf_emit(ctx, 1, 0x7ff);
1553 if (dev_priv->chipset == 0xa8)
1554 xf_emit(ctx, 1, 0x1e00);
1555 xf_emit(ctx, 0xc, 0);
1556 xf_emit(ctx, 1, 0xf);
1557 if (dev_priv->chipset == 0x50)
1558 xf_emit(ctx, 0x125, 0);
1559 else if (dev_priv->chipset < 0xa0)
1560 xf_emit(ctx, 0x126, 0);
1561 else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
1562 xf_emit(ctx, 0x124, 0);
1563 else
1564 xf_emit(ctx, 0x1f7, 0);
1565 xf_emit(ctx, 1, 0xf);
1566 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1567 xf_emit(ctx, 3, 0);
1568 else
1569 xf_emit(ctx, 1, 0);
1570 xf_emit(ctx, 1, 1);
1571 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1572 xf_emit(ctx, 0xa1, 0);
1573 else
1574 xf_emit(ctx, 0x5a, 0);
1575 xf_emit(ctx, 1, 0xf);
1576 if (dev_priv->chipset < 0xa0)
1577 xf_emit(ctx, 0x834, 0);
1578 else if (dev_priv->chipset == 0xa0)
1579 xf_emit(ctx, 0x1873, 0);
1580 else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1581 xf_emit(ctx, 0x8ba, 0);
1582 else
1583 xf_emit(ctx, 0x833, 0);
1584 xf_emit(ctx, 1, 0xf);
1585 xf_emit(ctx, 0xf, 0);
1586}
1587
1588static void
1589nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx)
1590{
1591 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1592 /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 6 on NVAx */
1593 xf_emit(ctx, 2, 0);
1594 if (dev_priv->chipset == 0x50)
1595 xf_emit(ctx, 2, 1);
1596 else
1597 xf_emit(ctx, 2, 0);
1598 xf_emit(ctx, 1, 0);
1599 xf_emit(ctx, 1, 1);
1600 xf_emit(ctx, 2, 0x100);
1601 xf_emit(ctx, 1, 0x11);
1602 xf_emit(ctx, 1, 0);
1603 xf_emit(ctx, 1, 8);
1604 xf_emit(ctx, 5, 0);
1605 xf_emit(ctx, 1, 1);
1606 xf_emit(ctx, 1, 0);
1607 xf_emit(ctx, 3, 1);
1608 xf_emit(ctx, 1, 0xcf);
1609 xf_emit(ctx, 1, 2);
1610 xf_emit(ctx, 6, 0);
1611 xf_emit(ctx, 1, 1);
1612 xf_emit(ctx, 1, 0);
1613 xf_emit(ctx, 3, 1);
1614 xf_emit(ctx, 4, 0);
1615 xf_emit(ctx, 1, 4);
1616 xf_emit(ctx, 1, 0);
1617 xf_emit(ctx, 1, 1);
1618 xf_emit(ctx, 1, 0x15);
1619 xf_emit(ctx, 3, 0);
1620 xf_emit(ctx, 1, 0x4444480);
1621 xf_emit(ctx, 0x37, 0);
1622}
1623
1624static void
1625nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx)
1626{
1627 /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 0 on NVAx */
1628 xf_emit(ctx, 4, 0);
1629 xf_emit(ctx, 1, 0x8100c12);
1630 xf_emit(ctx, 4, 0);
1631 xf_emit(ctx, 1, 0x100);
1632 xf_emit(ctx, 2, 0);
1633 xf_emit(ctx, 1, 0x10001);
1634 xf_emit(ctx, 1, 0);
1635 xf_emit(ctx, 1, 0x10001);
1636 xf_emit(ctx, 1, 1);
1637 xf_emit(ctx, 1, 0x10001);
1638 xf_emit(ctx, 1, 1);
1639 xf_emit(ctx, 1, 4);
1640 xf_emit(ctx, 1, 2);
1641}
1642
1643static void
1644nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx)
1645{
1646 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1647 /* middle of area 2 on pre-NVA0 [after m2mf], end of area 0 on NVAx */
1648 xf_emit(ctx, 1, 0x3f800000);
1649 xf_emit(ctx, 6, 0);
1650 xf_emit(ctx, 1, 4);
1651 xf_emit(ctx, 1, 0x1a);
1652 xf_emit(ctx, 2, 0);
1653 xf_emit(ctx, 1, 1);
1654 xf_emit(ctx, 0x12, 0);
1655 xf_emit(ctx, 1, 0x00ffff00);
1656 xf_emit(ctx, 6, 0);
1657 xf_emit(ctx, 1, 0xf);
1658 xf_emit(ctx, 7, 0);
1659 xf_emit(ctx, 1, 0x0fac6881);
1660 xf_emit(ctx, 1, 0x11);
1661 xf_emit(ctx, 0xf, 0);
1662 xf_emit(ctx, 1, 4);
1663 xf_emit(ctx, 2, 0);
1664 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1665 xf_emit(ctx, 1, 3);
1666 else if (dev_priv->chipset >= 0xa0)
1667 xf_emit(ctx, 1, 1);
1668 xf_emit(ctx, 2, 0);
1669 xf_emit(ctx, 1, 2);
1670 xf_emit(ctx, 2, 0x04000000);
1671 xf_emit(ctx, 3, 0);
1672 xf_emit(ctx, 1, 5);
1673 xf_emit(ctx, 1, 0x52);
1674 if (dev_priv->chipset == 0x50) {
1675 xf_emit(ctx, 0x13, 0);
1676 } else {
1677 xf_emit(ctx, 4, 0);
1678 xf_emit(ctx, 1, 1);
1679 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1680 xf_emit(ctx, 0x11, 0);
1681 else
1682 xf_emit(ctx, 0x10, 0);
1683 }
1684 xf_emit(ctx, 0x10, 0x3f800000);
1685 xf_emit(ctx, 1, 0x10);
1686 xf_emit(ctx, 0x26, 0);
1687 xf_emit(ctx, 1, 0x8100c12);
1688 xf_emit(ctx, 1, 5);
1689 xf_emit(ctx, 2, 0);
1690 xf_emit(ctx, 1, 1);
1691 xf_emit(ctx, 1, 0);
1692 xf_emit(ctx, 4, 0xffff);
1693 if (dev_priv->chipset != 0x50)
1694 xf_emit(ctx, 1, 3);
1695 if (dev_priv->chipset < 0xa0)
1696 xf_emit(ctx, 0x1f, 0);
1697 else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1698 xf_emit(ctx, 0xc, 0);
1699 else
1700 xf_emit(ctx, 3, 0);
1701 xf_emit(ctx, 1, 0x00ffff00);
1702 xf_emit(ctx, 1, 0x1a);
1703 if (dev_priv->chipset != 0x50) {
1704 xf_emit(ctx, 1, 0);
1705 xf_emit(ctx, 1, 3);
1706 }
1707 if (dev_priv->chipset < 0xa0)
1708 xf_emit(ctx, 0x26, 0);
1709 else
1710 xf_emit(ctx, 0x3c, 0);
1711 xf_emit(ctx, 1, 0x102);
1712 xf_emit(ctx, 1, 0);
1713 xf_emit(ctx, 4, 4);
1714 if (dev_priv->chipset >= 0xa0)
1715 xf_emit(ctx, 8, 0);
1716 xf_emit(ctx, 2, 4);
1717 xf_emit(ctx, 1, 0);
1718 if (dev_priv->chipset == 0x50)
1719 xf_emit(ctx, 1, 0x3ff);
1720 else
1721 xf_emit(ctx, 1, 0x7ff);
1722 xf_emit(ctx, 1, 0);
1723 xf_emit(ctx, 1, 0x102);
1724 xf_emit(ctx, 9, 0);
1725 xf_emit(ctx, 4, 4);
1726 xf_emit(ctx, 0x2c, 0);
1727}
1728
1729static void
1730nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
1731{
1732 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1733 int magic2;
1734 if (dev_priv->chipset == 0x50) {
1735 magic2 = 0x00003e60;
1736 } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
1737 magic2 = 0x001ffe67;
1738 } else {
1739 magic2 = 0x00087e67;
1740 }
1741 xf_emit(ctx, 8, 0);
1742 xf_emit(ctx, 1, 2);
1743 xf_emit(ctx, 1, 0);
1744 xf_emit(ctx, 1, magic2);
1745 xf_emit(ctx, 4, 0);
1746 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1747 xf_emit(ctx, 1, 1);
1748 xf_emit(ctx, 7, 0);
1749 if (dev_priv->chipset >= 0xa0 && dev_priv->chipset < 0xaa)
1750 xf_emit(ctx, 1, 0x15);
1751 xf_emit(ctx, 1, 0);
1752 xf_emit(ctx, 1, 1);
1753 xf_emit(ctx, 1, 0x10);
1754 xf_emit(ctx, 2, 0);
1755 xf_emit(ctx, 1, 1);
1756 xf_emit(ctx, 4, 0);
1757 if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) {
1758 xf_emit(ctx, 1, 4);
1759 xf_emit(ctx, 1, 0x400);
1760 xf_emit(ctx, 1, 0x300);
1761 xf_emit(ctx, 1, 0x1001);
1762 if (dev_priv->chipset != 0xa0) {
1763 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1764 xf_emit(ctx, 1, 0);
1765 else
1766 xf_emit(ctx, 1, 0x15);
1767 }
1768 xf_emit(ctx, 3, 0);
1769 }
1770 xf_emit(ctx, 2, 0);
1771 xf_emit(ctx, 1, 2);
1772 xf_emit(ctx, 8, 0);
1773 xf_emit(ctx, 1, 1);
1774 xf_emit(ctx, 1, 0x10);
1775 xf_emit(ctx, 1, 0);
1776 xf_emit(ctx, 1, 1);
1777 xf_emit(ctx, 0x13, 0);
1778 xf_emit(ctx, 1, 0x10);
1779 xf_emit(ctx, 0x10, 0);
1780 xf_emit(ctx, 0x10, 0x3f800000);
1781 xf_emit(ctx, 0x19, 0);
1782 xf_emit(ctx, 1, 0x10);
1783 xf_emit(ctx, 1, 0);
1784 xf_emit(ctx, 1, 0x3f);
1785 xf_emit(ctx, 6, 0);
1786 xf_emit(ctx, 1, 1);
1787 xf_emit(ctx, 1, 0);
1788 xf_emit(ctx, 1, 1);
1789 xf_emit(ctx, 1, 0);
1790 xf_emit(ctx, 1, 1);
1791 if (dev_priv->chipset >= 0xa0) {
1792 xf_emit(ctx, 2, 0);
1793 xf_emit(ctx, 1, 0x1001);
1794 xf_emit(ctx, 0xb, 0);
1795 } else {
1796 xf_emit(ctx, 0xc, 0);
1797 }
1798 xf_emit(ctx, 1, 0x11);
1799 xf_emit(ctx, 7, 0);
1800 xf_emit(ctx, 1, 0xf);
1801 xf_emit(ctx, 7, 0);
1802 xf_emit(ctx, 1, 0x11);
1803 if (dev_priv->chipset == 0x50)
1804 xf_emit(ctx, 4, 0);
1805 else
1806 xf_emit(ctx, 6, 0);
1807 xf_emit(ctx, 3, 1);
1808 xf_emit(ctx, 1, 2);
1809 xf_emit(ctx, 1, 1);
1810 xf_emit(ctx, 1, 2);
1811 xf_emit(ctx, 1, 1);
1812 xf_emit(ctx, 1, 0);
1813 xf_emit(ctx, 1, magic2);
1814 xf_emit(ctx, 1, 0);
1815 xf_emit(ctx, 1, 0x0fac6881);
1816 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
1817 xf_emit(ctx, 1, 0);
1818 xf_emit(ctx, 0x18, 1);
1819 xf_emit(ctx, 8, 2);
1820 xf_emit(ctx, 8, 1);
1821 xf_emit(ctx, 8, 2);
1822 xf_emit(ctx, 8, 1);
1823 xf_emit(ctx, 3, 0);
1824 xf_emit(ctx, 1, 1);
1825 xf_emit(ctx, 5, 0);
1826 xf_emit(ctx, 1, 1);
1827 xf_emit(ctx, 0x16, 0);
1828 } else {
1829 if (dev_priv->chipset >= 0xa0)
1830 xf_emit(ctx, 0x1b, 0);
1831 else
1832 xf_emit(ctx, 0x15, 0);
1833 }
1834 xf_emit(ctx, 1, 1);
1835 xf_emit(ctx, 1, 2);
1836 xf_emit(ctx, 2, 1);
1837 xf_emit(ctx, 1, 2);
1838 xf_emit(ctx, 2, 1);
1839 if (dev_priv->chipset >= 0xa0)
1840 xf_emit(ctx, 4, 0);
1841 else
1842 xf_emit(ctx, 3, 0);
1843 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
1844 xf_emit(ctx, 0x10, 1);
1845 xf_emit(ctx, 8, 2);
1846 xf_emit(ctx, 0x10, 1);
1847 xf_emit(ctx, 8, 2);
1848 xf_emit(ctx, 8, 1);
1849 xf_emit(ctx, 3, 0);
1850 }
1851 xf_emit(ctx, 1, 0x11);
1852 xf_emit(ctx, 1, 1);
1853 xf_emit(ctx, 0x5b, 0);
1854}
1855
1856static void
1857nv50_graph_construct_xfer_tp_x1(struct nouveau_grctx *ctx)
1858{
1859 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1860 int magic3;
1861 if (dev_priv->chipset == 0x50)
1862 magic3 = 0x1000;
1863 else if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8)
1864 magic3 = 0x1e00;
1865 else
1866 magic3 = 0;
1867 xf_emit(ctx, 1, 0);
1868 xf_emit(ctx, 1, 4);
1869 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1870 xf_emit(ctx, 0x24, 0);
1871 else if (dev_priv->chipset >= 0xa0)
1872 xf_emit(ctx, 0x14, 0);
1873 else
1874 xf_emit(ctx, 0x15, 0);
1875 xf_emit(ctx, 2, 4);
1876 if (dev_priv->chipset >= 0xa0)
1877 xf_emit(ctx, 1, 0x03020100);
1878 else
1879 xf_emit(ctx, 1, 0x00608080);
1880 xf_emit(ctx, 4, 0);
1881 xf_emit(ctx, 1, 4);
1882 xf_emit(ctx, 2, 0);
1883 xf_emit(ctx, 2, 4);
1884 xf_emit(ctx, 1, 0x80);
1885 if (magic3)
1886 xf_emit(ctx, 1, magic3);
1887 xf_emit(ctx, 1, 4);
1888 xf_emit(ctx, 0x24, 0);
1889 xf_emit(ctx, 1, 4);
1890 xf_emit(ctx, 1, 0x80);
1891 xf_emit(ctx, 1, 4);
1892 xf_emit(ctx, 1, 0x03020100);
1893 xf_emit(ctx, 1, 3);
1894 if (magic3)
1895 xf_emit(ctx, 1, magic3);
1896 xf_emit(ctx, 1, 4);
1897 xf_emit(ctx, 4, 0);
1898 xf_emit(ctx, 1, 4);
1899 xf_emit(ctx, 1, 3);
1900 xf_emit(ctx, 3, 0);
1901 xf_emit(ctx, 1, 4);
1902 if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96)
1903 xf_emit(ctx, 0x1024, 0);
1904 else if (dev_priv->chipset < 0xa0)
1905 xf_emit(ctx, 0xa24, 0);
1906 else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
1907 xf_emit(ctx, 0x214, 0);
1908 else
1909 xf_emit(ctx, 0x414, 0);
1910 xf_emit(ctx, 1, 4);
1911 xf_emit(ctx, 1, 3);
1912 xf_emit(ctx, 2, 0);
1913}
1914
1915static void
1916nv50_graph_construct_xfer_tp_x2(struct nouveau_grctx *ctx)
1917{
1918 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
1919 int magic1, magic2;
1920 if (dev_priv->chipset == 0x50) {
1921 magic1 = 0x3ff;
1922 magic2 = 0x00003e60;
1923 } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
1924 magic1 = 0x7ff;
1925 magic2 = 0x001ffe67;
1926 } else {
1927 magic1 = 0x7ff;
1928 magic2 = 0x00087e67;
1929 }
1930 xf_emit(ctx, 3, 0);
1931 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1932 xf_emit(ctx, 1, 1);
1933 xf_emit(ctx, 0xc, 0);
1934 xf_emit(ctx, 1, 0xf);
1935 xf_emit(ctx, 0xb, 0);
1936 xf_emit(ctx, 1, 4);
1937 xf_emit(ctx, 4, 0xffff);
1938 xf_emit(ctx, 8, 0);
1939 xf_emit(ctx, 1, 1);
1940 xf_emit(ctx, 3, 0);
1941 xf_emit(ctx, 1, 1);
1942 xf_emit(ctx, 5, 0);
1943 xf_emit(ctx, 1, 1);
1944 xf_emit(ctx, 2, 0);
1945 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
1946 xf_emit(ctx, 1, 3);
1947 xf_emit(ctx, 1, 0);
1948 } else if (dev_priv->chipset >= 0xa0)
1949 xf_emit(ctx, 1, 1);
1950 xf_emit(ctx, 0xa, 0);
1951 xf_emit(ctx, 2, 1);
1952 xf_emit(ctx, 1, 2);
1953 xf_emit(ctx, 2, 1);
1954 xf_emit(ctx, 1, 2);
1955 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
1956 xf_emit(ctx, 1, 0);
1957 xf_emit(ctx, 0x18, 1);
1958 xf_emit(ctx, 8, 2);
1959 xf_emit(ctx, 8, 1);
1960 xf_emit(ctx, 8, 2);
1961 xf_emit(ctx, 8, 1);
1962 xf_emit(ctx, 1, 0);
1963 }
1964 xf_emit(ctx, 1, 1);
1965 xf_emit(ctx, 1, 0);
1966 xf_emit(ctx, 1, 0x11);
1967 xf_emit(ctx, 7, 0);
1968 xf_emit(ctx, 1, 0x0fac6881);
1969 xf_emit(ctx, 2, 0);
1970 xf_emit(ctx, 1, 4);
1971 xf_emit(ctx, 3, 0);
1972 xf_emit(ctx, 1, 0x11);
1973 xf_emit(ctx, 1, 1);
1974 xf_emit(ctx, 1, 0);
1975 xf_emit(ctx, 3, 0xcf);
1976 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1977 xf_emit(ctx, 1, 1);
1978 xf_emit(ctx, 0xa, 0);
1979 xf_emit(ctx, 2, 1);
1980 xf_emit(ctx, 1, 2);
1981 xf_emit(ctx, 2, 1);
1982 xf_emit(ctx, 1, 2);
1983 xf_emit(ctx, 1, 1);
1984 xf_emit(ctx, 1, 0);
1985 xf_emit(ctx, 8, 1);
1986 xf_emit(ctx, 1, 0x11);
1987 xf_emit(ctx, 7, 0);
1988 xf_emit(ctx, 1, 0x0fac6881);
1989 xf_emit(ctx, 1, 0xf);
1990 xf_emit(ctx, 7, 0);
1991 xf_emit(ctx, 1, magic2);
1992 xf_emit(ctx, 2, 0);
1993 xf_emit(ctx, 1, 0x11);
1994 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
1995 xf_emit(ctx, 2, 1);
1996 else
1997 xf_emit(ctx, 1, 1);
1998 if(dev_priv->chipset == 0x50)
1999 xf_emit(ctx, 1, 0);
2000 else
2001 xf_emit(ctx, 3, 0);
2002 xf_emit(ctx, 1, 4);
2003 xf_emit(ctx, 5, 0);
2004 xf_emit(ctx, 1, 1);
2005 xf_emit(ctx, 4, 0);
2006 xf_emit(ctx, 1, 0x11);
2007 xf_emit(ctx, 7, 0);
2008 xf_emit(ctx, 1, 0x0fac6881);
2009 xf_emit(ctx, 3, 0);
2010 xf_emit(ctx, 1, 0x11);
2011 xf_emit(ctx, 1, 1);
2012 xf_emit(ctx, 1, 0);
2013 xf_emit(ctx, 1, 1);
2014 xf_emit(ctx, 1, 0);
2015 xf_emit(ctx, 1, 1);
2016 xf_emit(ctx, 1, 0);
2017 xf_emit(ctx, 1, magic1);
2018 xf_emit(ctx, 1, 0);
2019 xf_emit(ctx, 1, 1);
2020 xf_emit(ctx, 1, 0);
2021 xf_emit(ctx, 1, 1);
2022 xf_emit(ctx, 2, 0);
2023 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2024 xf_emit(ctx, 1, 1);
2025 xf_emit(ctx, 0x28, 0);
2026 xf_emit(ctx, 8, 8);
2027 xf_emit(ctx, 1, 0x11);
2028 xf_emit(ctx, 7, 0);
2029 xf_emit(ctx, 1, 0x0fac6881);
2030 xf_emit(ctx, 8, 0x400);
2031 xf_emit(ctx, 8, 0x300);
2032 xf_emit(ctx, 1, 1);
2033 xf_emit(ctx, 1, 0xf);
2034 xf_emit(ctx, 7, 0);
2035 xf_emit(ctx, 1, 0x20);
2036 xf_emit(ctx, 1, 0x11);
2037 xf_emit(ctx, 1, 0x100);
2038 xf_emit(ctx, 1, 0);
2039 xf_emit(ctx, 1, 1);
2040 xf_emit(ctx, 2, 0);
2041 xf_emit(ctx, 1, 0x40);
2042 xf_emit(ctx, 1, 0x100);
2043 xf_emit(ctx, 1, 0);
2044 xf_emit(ctx, 1, 3);
2045 xf_emit(ctx, 4, 0);
2046 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2047 xf_emit(ctx, 1, 1);
2048 xf_emit(ctx, 1, magic2);
2049 xf_emit(ctx, 3, 0);
2050 xf_emit(ctx, 1, 2);
2051 xf_emit(ctx, 1, 0x0fac6881);
2052 xf_emit(ctx, 9, 0);
2053 xf_emit(ctx, 1, 1);
2054 xf_emit(ctx, 4, 0);
2055 xf_emit(ctx, 1, 4);
2056 xf_emit(ctx, 1, 0);
2057 xf_emit(ctx, 1, 1);
2058 xf_emit(ctx, 1, 0x400);
2059 xf_emit(ctx, 1, 0x300);
2060 xf_emit(ctx, 1, 0x1001);
2061 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2062 xf_emit(ctx, 4, 0);
2063 else
2064 xf_emit(ctx, 3, 0);
2065 xf_emit(ctx, 1, 0x11);
2066 xf_emit(ctx, 7, 0);
2067 xf_emit(ctx, 1, 0x0fac6881);
2068 xf_emit(ctx, 1, 0xf);
2069 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
2070 xf_emit(ctx, 0x15, 0);
2071 xf_emit(ctx, 1, 1);
2072 xf_emit(ctx, 3, 0);
2073 } else
2074 xf_emit(ctx, 0x17, 0);
2075 if (dev_priv->chipset >= 0xa0)
2076 xf_emit(ctx, 1, 0x0fac6881);
2077 xf_emit(ctx, 1, magic2);
2078 xf_emit(ctx, 3, 0);
2079 xf_emit(ctx, 1, 0x11);
2080 xf_emit(ctx, 2, 0);
2081 xf_emit(ctx, 1, 4);
2082 xf_emit(ctx, 1, 0);
2083 xf_emit(ctx, 2, 1);
2084 xf_emit(ctx, 3, 0);
2085 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2086 xf_emit(ctx, 2, 1);
2087 else
2088 xf_emit(ctx, 1, 1);
2089 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2090 xf_emit(ctx, 2, 0);
2091 else if (dev_priv->chipset != 0x50)
2092 xf_emit(ctx, 1, 0);
2093}
2094
2095static void
2096nv50_graph_construct_xfer_tp_x3(struct nouveau_grctx *ctx)
2097{
2098 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2099 xf_emit(ctx, 3, 0);
2100 xf_emit(ctx, 1, 1);
2101 xf_emit(ctx, 1, 0);
2102 xf_emit(ctx, 1, 1);
2103 if (dev_priv->chipset == 0x50)
2104 xf_emit(ctx, 2, 0);
2105 else
2106 xf_emit(ctx, 3, 0);
2107 xf_emit(ctx, 1, 0x2a712488);
2108 xf_emit(ctx, 1, 0);
2109 xf_emit(ctx, 1, 0x4085c000);
2110 xf_emit(ctx, 1, 0x40);
2111 xf_emit(ctx, 1, 0x100);
2112 xf_emit(ctx, 1, 0x10100);
2113 xf_emit(ctx, 1, 0x02800000);
2114}
2115
2116static void
2117nv50_graph_construct_xfer_tp_x4(struct nouveau_grctx *ctx)
2118{
2119 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2120 xf_emit(ctx, 2, 0x04e3bfdf);
2121 xf_emit(ctx, 1, 1);
2122 xf_emit(ctx, 1, 0);
2123 xf_emit(ctx, 1, 0x00ffff00);
2124 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2125 xf_emit(ctx, 2, 1);
2126 else
2127 xf_emit(ctx, 1, 1);
2128 xf_emit(ctx, 2, 0);
2129 xf_emit(ctx, 1, 0x00ffff00);
2130 xf_emit(ctx, 8, 0);
2131 xf_emit(ctx, 1, 1);
2132 xf_emit(ctx, 1, 0);
2133 xf_emit(ctx, 1, 1);
2134 xf_emit(ctx, 1, 0x30201000);
2135 xf_emit(ctx, 1, 0x70605040);
2136 xf_emit(ctx, 1, 0xb8a89888);
2137 xf_emit(ctx, 1, 0xf8e8d8c8);
2138 xf_emit(ctx, 1, 0);
2139 xf_emit(ctx, 1, 0x1a);
2140}
2141
2142static void
2143nv50_graph_construct_xfer_tp_x5(struct nouveau_grctx *ctx)
2144{
2145 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2146 xf_emit(ctx, 3, 0);
2147 xf_emit(ctx, 1, 0xfac6881);
2148 xf_emit(ctx, 4, 0);
2149 xf_emit(ctx, 1, 4);
2150 xf_emit(ctx, 1, 0);
2151 xf_emit(ctx, 2, 1);
2152 xf_emit(ctx, 2, 0);
2153 xf_emit(ctx, 1, 1);
2154 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2155 xf_emit(ctx, 0xb, 0);
2156 else
2157 xf_emit(ctx, 0xa, 0);
2158 xf_emit(ctx, 8, 1);
2159 xf_emit(ctx, 1, 0x11);
2160 xf_emit(ctx, 7, 0);
2161 xf_emit(ctx, 1, 0xfac6881);
2162 xf_emit(ctx, 1, 0xf);
2163 xf_emit(ctx, 7, 0);
2164 xf_emit(ctx, 1, 0x11);
2165 xf_emit(ctx, 1, 1);
2166 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
2167 xf_emit(ctx, 6, 0);
2168 xf_emit(ctx, 1, 1);
2169 xf_emit(ctx, 6, 0);
2170 } else {
2171 xf_emit(ctx, 0xb, 0);
2172 }
2173}
2174
2175static void
2176nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
2177{
2178 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2179 if (dev_priv->chipset < 0xa0) {
2180 nv50_graph_construct_xfer_tp_x1(ctx);
2181 nv50_graph_construct_xfer_tp_x2(ctx);
2182 nv50_graph_construct_xfer_tp_x3(ctx);
2183 if (dev_priv->chipset == 0x50)
2184 xf_emit(ctx, 0xf, 0);
2185 else
2186 xf_emit(ctx, 0x12, 0);
2187 nv50_graph_construct_xfer_tp_x4(ctx);
2188 } else {
2189 nv50_graph_construct_xfer_tp_x3(ctx);
2190 if (dev_priv->chipset < 0xaa)
2191 xf_emit(ctx, 0xc, 0);
2192 else
2193 xf_emit(ctx, 0xa, 0);
2194 nv50_graph_construct_xfer_tp_x2(ctx);
2195 nv50_graph_construct_xfer_tp_x5(ctx);
2196 nv50_graph_construct_xfer_tp_x4(ctx);
2197 nv50_graph_construct_xfer_tp_x1(ctx);
2198 }
2199}
2200
2201static void
2202nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx)
2203{
2204 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2205 int i, mpcnt;
2206 if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
2207 mpcnt = 1;
2208 else if (dev_priv->chipset < 0xa0 || dev_priv->chipset >= 0xa8)
2209 mpcnt = 2;
2210 else
2211 mpcnt = 3;
2212 for (i = 0; i < mpcnt; i++) {
2213 xf_emit(ctx, 1, 0);
2214 xf_emit(ctx, 1, 0x80);
2215 xf_emit(ctx, 1, 0x80007004);
2216 xf_emit(ctx, 1, 0x04000400);
2217 if (dev_priv->chipset >= 0xa0)
2218 xf_emit(ctx, 1, 0xc0);
2219 xf_emit(ctx, 1, 0x1000);
2220 xf_emit(ctx, 2, 0);
2221 if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8) {
2222 xf_emit(ctx, 1, 0xe00);
2223 xf_emit(ctx, 1, 0x1e00);
2224 }
2225 xf_emit(ctx, 1, 1);
2226 xf_emit(ctx, 2, 0);
2227 if (dev_priv->chipset == 0x50)
2228 xf_emit(ctx, 2, 0x1000);
2229 xf_emit(ctx, 1, 1);
2230 xf_emit(ctx, 1, 0);
2231 xf_emit(ctx, 1, 4);
2232 xf_emit(ctx, 1, 2);
2233 if (dev_priv->chipset >= 0xaa)
2234 xf_emit(ctx, 0xb, 0);
2235 else if (dev_priv->chipset >= 0xa0)
2236 xf_emit(ctx, 0xc, 0);
2237 else
2238 xf_emit(ctx, 0xa, 0);
2239 }
2240 xf_emit(ctx, 1, 0x08100c12);
2241 xf_emit(ctx, 1, 0);
2242 if (dev_priv->chipset >= 0xa0) {
2243 xf_emit(ctx, 1, 0x1fe21);
2244 }
2245 xf_emit(ctx, 5, 0);
2246 xf_emit(ctx, 4, 0xffff);
2247 xf_emit(ctx, 1, 1);
2248 xf_emit(ctx, 2, 0x10001);
2249 xf_emit(ctx, 1, 1);
2250 xf_emit(ctx, 1, 0);
2251 xf_emit(ctx, 1, 0x1fe21);
2252 xf_emit(ctx, 1, 0);
2253 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2254 xf_emit(ctx, 1, 1);
2255 xf_emit(ctx, 4, 0);
2256 xf_emit(ctx, 1, 0x08100c12);
2257 xf_emit(ctx, 1, 4);
2258 xf_emit(ctx, 1, 0);
2259 xf_emit(ctx, 1, 2);
2260 xf_emit(ctx, 1, 0x11);
2261 xf_emit(ctx, 8, 0);
2262 xf_emit(ctx, 1, 0xfac6881);
2263 xf_emit(ctx, 1, 0);
2264 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
2265 xf_emit(ctx, 1, 3);
2266 xf_emit(ctx, 3, 0);
2267 xf_emit(ctx, 1, 4);
2268 xf_emit(ctx, 9, 0);
2269 xf_emit(ctx, 1, 2);
2270 xf_emit(ctx, 2, 1);
2271 xf_emit(ctx, 1, 2);
2272 xf_emit(ctx, 3, 1);
2273 xf_emit(ctx, 1, 0);
2274 if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
2275 xf_emit(ctx, 8, 2);
2276 xf_emit(ctx, 0x10, 1);
2277 xf_emit(ctx, 8, 2);
2278 xf_emit(ctx, 0x18, 1);
2279 xf_emit(ctx, 3, 0);
2280 }
2281 xf_emit(ctx, 1, 4);
2282 if (dev_priv->chipset == 0x50)
2283 xf_emit(ctx, 0x3a0, 0);
2284 else if (dev_priv->chipset < 0x94)
2285 xf_emit(ctx, 0x3a2, 0);
2286 else if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
2287 xf_emit(ctx, 0x39f, 0);
2288 else
2289 xf_emit(ctx, 0x3a3, 0);
2290 xf_emit(ctx, 1, 0x11);
2291 xf_emit(ctx, 1, 0);
2292 xf_emit(ctx, 1, 1);
2293 xf_emit(ctx, 0x2d, 0);
2294}
2295
2296static void
2297nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
2298{
2299 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
2300 int i;
2301 uint32_t offset;
2302 uint32_t units = nv_rd32 (ctx->dev, 0x1540);
2303 int size = 0;
2304
2305 offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
2306
2307 if (dev_priv->chipset < 0xa0) {
2308 for (i = 0; i < 8; i++) {
2309 ctx->ctxvals_pos = offset + i;
2310 if (i == 0)
2311 xf_emit(ctx, 1, 0x08100c12);
2312 if (units & (1 << i))
2313 nv50_graph_construct_xfer_tp2(ctx);
2314 if ((ctx->ctxvals_pos-offset)/8 > size)
2315 size = (ctx->ctxvals_pos-offset)/8;
2316 }
2317 } else {
2318 /* Strand 0: TPs 0, 1 */
2319 ctx->ctxvals_pos = offset;
2320 xf_emit(ctx, 1, 0x08100c12);
2321 if (units & (1 << 0))
2322 nv50_graph_construct_xfer_tp2(ctx);
2323 if (units & (1 << 1))
2324 nv50_graph_construct_xfer_tp2(ctx);
2325 if ((ctx->ctxvals_pos-offset)/8 > size)
2326 size = (ctx->ctxvals_pos-offset)/8;
2327
2328 /* Strand 0: TPs 2, 3 */
2329 ctx->ctxvals_pos = offset + 1;
2330 if (units & (1 << 2))
2331 nv50_graph_construct_xfer_tp2(ctx);
2332 if (units & (1 << 3))
2333 nv50_graph_construct_xfer_tp2(ctx);
2334 if ((ctx->ctxvals_pos-offset)/8 > size)
2335 size = (ctx->ctxvals_pos-offset)/8;
2336
2337 /* Strand 0: TPs 4, 5, 6 */
2338 ctx->ctxvals_pos = offset + 2;
2339 if (units & (1 << 4))
2340 nv50_graph_construct_xfer_tp2(ctx);
2341 if (units & (1 << 5))
2342 nv50_graph_construct_xfer_tp2(ctx);
2343 if (units & (1 << 6))
2344 nv50_graph_construct_xfer_tp2(ctx);
2345 if ((ctx->ctxvals_pos-offset)/8 > size)
2346 size = (ctx->ctxvals_pos-offset)/8;
2347
2348 /* Strand 0: TPs 7, 8, 9 */
2349 ctx->ctxvals_pos = offset + 3;
2350 if (units & (1 << 7))
2351 nv50_graph_construct_xfer_tp2(ctx);
2352 if (units & (1 << 8))
2353 nv50_graph_construct_xfer_tp2(ctx);
2354 if (units & (1 << 9))
2355 nv50_graph_construct_xfer_tp2(ctx);
2356 if ((ctx->ctxvals_pos-offset)/8 > size)
2357 size = (ctx->ctxvals_pos-offset)/8;
2358 }
2359 ctx->ctxvals_pos = offset + size * 8;
2360 ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f;
2361 cp_lsr (ctx, offset);
2362 cp_out (ctx, CP_SET_XFER_POINTER);
2363 cp_lsr (ctx, size);
2364 cp_out (ctx, CP_SEEK_2);
2365 cp_out (ctx, CP_XFER_2);
2366 cp_wait(ctx, XFER, BUSY);
2367}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index f0dc4e36ef05..de1f5b0062c5 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -390,7 +390,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
390 if (gpuobj->im_backing) 390 if (gpuobj->im_backing)
391 return -EINVAL; 391 return -EINVAL;
392 392
393 *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1); 393 *sz = ALIGN(*sz, NV50_INSTMEM_PAGE_SIZE);
394 if (*sz == 0) 394 if (*sz == 0)
395 return -EINVAL; 395 return -EINVAL;
396 396
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 1cc7b937b1ea..ed38262d9985 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -30,6 +30,9 @@ $(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
30$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable 30$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
31 $(call if_changed,mkregtable) 31 $(call if_changed,mkregtable)
32 32
33$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
34 $(call if_changed,mkregtable)
35
33$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h 36$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
34 37
35$(obj)/r200.o: $(obj)/r200_reg_safe.h 38$(obj)/r200.o: $(obj)/r200_reg_safe.h
@@ -42,6 +45,8 @@ $(obj)/r420.o: $(obj)/r420_reg_safe.h
42 45
43$(obj)/rs600.o: $(obj)/rs600_reg_safe.h 46$(obj)/rs600.o: $(obj)/rs600_reg_safe.h
44 47
48$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
49
45radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ 50radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
46 radeon_irq.o r300_cmdbuf.o r600_cp.o 51 radeon_irq.o r300_cmdbuf.o r600_cp.o
47# add KMS driver 52# add KMS driver
@@ -54,8 +59,10 @@ radeon-y += radeon_device.o radeon_kms.o \
54 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ 59 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
55 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ 60 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
56 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ 61 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
57 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o 62 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
63 evergreen.o
58 64
59radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 65radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
66radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
60 67
61obj-$(CONFIG_DRM_RADEON)+= radeon.o 68obj-$(CONFIG_DRM_RADEON)+= radeon.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 7f152f66f196..d75788feac6c 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -881,8 +881,6 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
881 uint8_t attr = U8((*ptr)++), shift; 881 uint8_t attr = U8((*ptr)++), shift;
882 uint32_t saved, dst; 882 uint32_t saved, dst;
883 int dptr = *ptr; 883 int dptr = *ptr;
884 attr &= 0x38;
885 attr |= atom_def_dst[attr >> 3] << 6;
886 SDEBUG(" dst: "); 884 SDEBUG(" dst: ");
887 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 885 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
888 shift = atom_get_src(ctx, attr, ptr); 886 shift = atom_get_src(ctx, attr, ptr);
@@ -897,8 +895,6 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
897 uint8_t attr = U8((*ptr)++), shift; 895 uint8_t attr = U8((*ptr)++), shift;
898 uint32_t saved, dst; 896 uint32_t saved, dst;
899 int dptr = *ptr; 897 int dptr = *ptr;
900 attr &= 0x38;
901 attr |= atom_def_dst[attr >> 3] << 6;
902 SDEBUG(" dst: "); 898 SDEBUG(" dst: ");
903 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 899 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
904 shift = atom_get_src(ctx, attr, ptr); 900 shift = atom_get_src(ctx, attr, ptr);
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 91ad0d1c1b17..6732b5dd8ff4 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2006-2007 Advanced Micro Devices, Inc. 2 * Copyright 2006-2007 Advanced Micro Devices, Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -20,10 +20,12 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 */ 21 */
22 22
23/****************************************************************************/ 23
24/****************************************************************************/
24/*Portion I: Definitions shared between VBIOS and Driver */ 25/*Portion I: Definitions shared between VBIOS and Driver */
25/****************************************************************************/ 26/****************************************************************************/
26 27
28
27#ifndef _ATOMBIOS_H 29#ifndef _ATOMBIOS_H
28#define _ATOMBIOS_H 30#define _ATOMBIOS_H
29 31
@@ -40,39 +42,46 @@
40#endif 42#endif
41 43
42#ifdef _H2INC 44#ifdef _H2INC
43#ifndef ULONG 45 #ifndef ULONG
44typedef unsigned long ULONG; 46 typedef unsigned long ULONG;
45#endif 47 #endif
46 48
47#ifndef UCHAR 49 #ifndef UCHAR
48typedef unsigned char UCHAR; 50 typedef unsigned char UCHAR;
49#endif 51 #endif
50 52
51#ifndef USHORT 53 #ifndef USHORT
52typedef unsigned short USHORT; 54 typedef unsigned short USHORT;
53#endif 55 #endif
54#endif 56#endif
55 57
56#define ATOM_DAC_A 0 58#define ATOM_DAC_A 0
57#define ATOM_DAC_B 1 59#define ATOM_DAC_B 1
58#define ATOM_EXT_DAC 2 60#define ATOM_EXT_DAC 2
59 61
60#define ATOM_CRTC1 0 62#define ATOM_CRTC1 0
61#define ATOM_CRTC2 1 63#define ATOM_CRTC2 1
64#define ATOM_CRTC3 2
65#define ATOM_CRTC4 3
66#define ATOM_CRTC5 4
67#define ATOM_CRTC6 5
68#define ATOM_CRTC_INVALID 0xFF
62 69
63#define ATOM_DIGA 0 70#define ATOM_DIGA 0
64#define ATOM_DIGB 1 71#define ATOM_DIGB 1
65 72
66#define ATOM_PPLL1 0 73#define ATOM_PPLL1 0
67#define ATOM_PPLL2 1 74#define ATOM_PPLL2 1
75#define ATOM_DCPLL 2
76#define ATOM_PPLL_INVALID 0xFF
68 77
69#define ATOM_SCALER1 0 78#define ATOM_SCALER1 0
70#define ATOM_SCALER2 1 79#define ATOM_SCALER2 1
71 80
72#define ATOM_SCALER_DISABLE 0 81#define ATOM_SCALER_DISABLE 0
73#define ATOM_SCALER_CENTER 1 82#define ATOM_SCALER_CENTER 1
74#define ATOM_SCALER_EXPANSION 2 83#define ATOM_SCALER_EXPANSION 2
75#define ATOM_SCALER_MULTI_EX 3 84#define ATOM_SCALER_MULTI_EX 3
76 85
77#define ATOM_DISABLE 0 86#define ATOM_DISABLE 0
78#define ATOM_ENABLE 1 87#define ATOM_ENABLE 1
@@ -82,6 +91,7 @@ typedef unsigned short USHORT;
82#define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5) 91#define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5)
83#define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5) 92#define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5)
84#define ATOM_ENCODER_INIT (ATOM_DISABLE+7) 93#define ATOM_ENCODER_INIT (ATOM_DISABLE+7)
94#define ATOM_GET_STATUS (ATOM_DISABLE+8)
85 95
86#define ATOM_BLANKING 1 96#define ATOM_BLANKING 1
87#define ATOM_BLANKING_OFF 0 97#define ATOM_BLANKING_OFF 0
@@ -114,7 +124,7 @@ typedef unsigned short USHORT;
114#define ATOM_DAC2_CV ATOM_DAC1_CV 124#define ATOM_DAC2_CV ATOM_DAC1_CV
115#define ATOM_DAC2_NTSC ATOM_DAC1_NTSC 125#define ATOM_DAC2_NTSC ATOM_DAC1_NTSC
116#define ATOM_DAC2_PAL ATOM_DAC1_PAL 126#define ATOM_DAC2_PAL ATOM_DAC1_PAL
117 127
118#define ATOM_PM_ON 0 128#define ATOM_PM_ON 0
119#define ATOM_PM_STANDBY 1 129#define ATOM_PM_STANDBY 1
120#define ATOM_PM_SUSPEND 2 130#define ATOM_PM_SUSPEND 2
@@ -134,6 +144,7 @@ typedef unsigned short USHORT;
134#define ATOM_PANEL_MISC_TEMPORAL 0x00000040 144#define ATOM_PANEL_MISC_TEMPORAL 0x00000040
135#define ATOM_PANEL_MISC_API_ENABLED 0x00000080 145#define ATOM_PANEL_MISC_API_ENABLED 0x00000080
136 146
147
137#define MEMTYPE_DDR1 "DDR1" 148#define MEMTYPE_DDR1 "DDR1"
138#define MEMTYPE_DDR2 "DDR2" 149#define MEMTYPE_DDR2 "DDR2"
139#define MEMTYPE_DDR3 "DDR3" 150#define MEMTYPE_DDR3 "DDR3"
@@ -145,19 +156,19 @@ typedef unsigned short USHORT;
145 156
146/* Maximum size of that FireGL flag string */ 157/* Maximum size of that FireGL flag string */
147 158
148#define ATOM_FIREGL_FLAG_STRING "FGL" /* Flag used to enable FireGL Support */ 159#define ATOM_FIREGL_FLAG_STRING "FGL" //Flag used to enable FireGL Support
149#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 /* sizeof( ATOM_FIREGL_FLAG_STRING ) */ 160#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 //sizeof( ATOM_FIREGL_FLAG_STRING )
150 161
151#define ATOM_FAKE_DESKTOP_STRING "DSK" /* Flag used to enable mobile ASIC on Desktop */ 162#define ATOM_FAKE_DESKTOP_STRING "DSK" //Flag used to enable mobile ASIC on Desktop
152#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 163#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING
153 164
154#define ATOM_M54T_FLAG_STRING "M54T" /* Flag used to enable M54T Support */ 165#define ATOM_M54T_FLAG_STRING "M54T" //Flag used to enable M54T Support
155#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 /* sizeof( ATOM_M54T_FLAG_STRING ) */ 166#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 //sizeof( ATOM_M54T_FLAG_STRING )
156 167
157#define HW_ASSISTED_I2C_STATUS_FAILURE 2 168#define HW_ASSISTED_I2C_STATUS_FAILURE 2
158#define HW_ASSISTED_I2C_STATUS_SUCCESS 1 169#define HW_ASSISTED_I2C_STATUS_SUCCESS 1
159 170
160#pragma pack(1) /* BIOS data must use byte aligment */ 171#pragma pack(1) /* BIOS data must use byte aligment */
161 172
162/* Define offset to location of ROM header. */ 173/* Define offset to location of ROM header. */
163 174
@@ -165,367 +176,410 @@ typedef unsigned short USHORT;
165#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE 0x00000002L 176#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE 0x00000002L
166 177
167#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE 0x94 178#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE 0x94
168#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */ 179#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */
169#define OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER 0x002f 180#define OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER 0x002f
170#define OFFSET_TO_GET_ATOMBIOS_STRINGS_START 0x006e 181#define OFFSET_TO_GET_ATOMBIOS_STRINGS_START 0x006e
171 182
172/* Common header for all ROM Data tables. 183/* Common header for all ROM Data tables.
173 Every table pointed _ATOM_MASTER_DATA_TABLE has this common header. 184 Every table pointed _ATOM_MASTER_DATA_TABLE has this common header.
174 And the pointer actually points to this header. */ 185 And the pointer actually points to this header. */
175 186
176typedef struct _ATOM_COMMON_TABLE_HEADER { 187typedef struct _ATOM_COMMON_TABLE_HEADER
177 USHORT usStructureSize; 188{
178 UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */ 189 USHORT usStructureSize;
179 UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */ 190 UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */
180 /*Image can't be updated, while Driver needs to carry the new table! */ 191 UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */
181} ATOM_COMMON_TABLE_HEADER; 192 /*Image can't be updated, while Driver needs to carry the new table! */
182 193}ATOM_COMMON_TABLE_HEADER;
183typedef struct _ATOM_ROM_HEADER { 194
184 ATOM_COMMON_TABLE_HEADER sHeader; 195typedef struct _ATOM_ROM_HEADER
185 UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios, 196{
186 atombios should init it as "ATOM", don't change the position */ 197 ATOM_COMMON_TABLE_HEADER sHeader;
187 USHORT usBiosRuntimeSegmentAddress; 198 UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios,
188 USHORT usProtectedModeInfoOffset; 199 atombios should init it as "ATOM", don't change the position */
189 USHORT usConfigFilenameOffset; 200 USHORT usBiosRuntimeSegmentAddress;
190 USHORT usCRC_BlockOffset; 201 USHORT usProtectedModeInfoOffset;
191 USHORT usBIOS_BootupMessageOffset; 202 USHORT usConfigFilenameOffset;
192 USHORT usInt10Offset; 203 USHORT usCRC_BlockOffset;
193 USHORT usPciBusDevInitCode; 204 USHORT usBIOS_BootupMessageOffset;
194 USHORT usIoBaseAddress; 205 USHORT usInt10Offset;
195 USHORT usSubsystemVendorID; 206 USHORT usPciBusDevInitCode;
196 USHORT usSubsystemID; 207 USHORT usIoBaseAddress;
197 USHORT usPCI_InfoOffset; 208 USHORT usSubsystemVendorID;
198 USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */ 209 USHORT usSubsystemID;
199 USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */ 210 USHORT usPCI_InfoOffset;
200 UCHAR ucExtendedFunctionCode; 211 USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */
201 UCHAR ucReserved; 212 USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */
202} ATOM_ROM_HEADER; 213 UCHAR ucExtendedFunctionCode;
214 UCHAR ucReserved;
215}ATOM_ROM_HEADER;
203 216
204/*==============================Command Table Portion==================================== */ 217/*==============================Command Table Portion==================================== */
205 218
206#ifdef UEFI_BUILD 219#ifdef UEFI_BUILD
207#define UTEMP USHORT 220 #define UTEMP USHORT
208#define USHORT void* 221 #define USHORT void*
209#endif 222#endif
210 223
211typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES { 224typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
212 USHORT ASIC_Init; /* Function Table, used by various SW components,latest version 1.1 */ 225 USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1
213 USHORT GetDisplaySurfaceSize; /* Atomic Table, Used by Bios when enabling HW ICON */ 226 USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON
214 USHORT ASIC_RegistersInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */ 227 USHORT ASIC_RegistersInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
215 USHORT VRAM_BlockVenderDetection; /* Atomic Table, used only by Bios */ 228 USHORT VRAM_BlockVenderDetection; //Atomic Table, used only by Bios
216 USHORT DIGxEncoderControl; /* Only used by Bios */ 229 USHORT DIGxEncoderControl; //Only used by Bios
217 USHORT MemoryControllerInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */ 230 USHORT MemoryControllerInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
218 USHORT EnableCRTCMemReq; /* Function Table,directly used by various SW components,latest version 2.1 */ 231 USHORT EnableCRTCMemReq; //Function Table,directly used by various SW components,latest version 2.1
219 USHORT MemoryParamAdjust; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed */ 232 USHORT MemoryParamAdjust; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed
220 USHORT DVOEncoderControl; /* Function Table,directly used by various SW components,latest version 1.2 */ 233 USHORT DVOEncoderControl; //Function Table,directly used by various SW components,latest version 1.2
221 USHORT GPIOPinControl; /* Atomic Table, only used by Bios */ 234 USHORT GPIOPinControl; //Atomic Table, only used by Bios
222 USHORT SetEngineClock; /*Function Table,directly used by various SW components,latest version 1.1 */ 235 USHORT SetEngineClock; //Function Table,directly used by various SW components,latest version 1.1
223 USHORT SetMemoryClock; /* Function Table,directly used by various SW components,latest version 1.1 */ 236 USHORT SetMemoryClock; //Function Table,directly used by various SW components,latest version 1.1
224 USHORT SetPixelClock; /*Function Table,directly used by various SW components,latest version 1.2 */ 237 USHORT SetPixelClock; //Function Table,directly used by various SW components,latest version 1.2
225 USHORT DynamicClockGating; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */ 238 USHORT DynamicClockGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
226 USHORT ResetMemoryDLL; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ 239 USHORT ResetMemoryDLL; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
227 USHORT ResetMemoryDevice; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ 240 USHORT ResetMemoryDevice; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
228 USHORT MemoryPLLInit; 241 USHORT MemoryPLLInit;
229 USHORT AdjustDisplayPll; /* only used by Bios */ 242 USHORT AdjustDisplayPll; //only used by Bios
230 USHORT AdjustMemoryController; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ 243 USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
231 USHORT EnableASIC_StaticPwrMgt; /* Atomic Table, only used by Bios */ 244 USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios
232 USHORT ASIC_StaticPwrMgtStatusChange; /* Obsolete, only used by Bios */ 245 USHORT ASIC_StaticPwrMgtStatusChange; //Obsolete , only used by Bios
233 USHORT DAC_LoadDetection; /* Atomic Table, directly used by various SW components,latest version 1.2 */ 246 USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2
234 USHORT LVTMAEncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.3 */ 247 USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3
235 USHORT LCD1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 248 USHORT LCD1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
236 USHORT DAC1EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 249 USHORT DAC1EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
237 USHORT DAC2EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 250 USHORT DAC2EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
238 USHORT DVOOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 251 USHORT DVOOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
239 USHORT CV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 252 USHORT CV1OutputControl; //Atomic Table, Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
240 USHORT GetConditionalGoldenSetting; /* only used by Bios */ 253 USHORT GetConditionalGoldenSetting; //only used by Bios
241 USHORT TVEncoderControl; /* Function Table,directly used by various SW components,latest version 1.1 */ 254 USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1
242 USHORT TMDSAEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */ 255 USHORT TMDSAEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
243 USHORT LVDSEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */ 256 USHORT LVDSEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
244 USHORT TV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 257 USHORT TV1OutputControl; //Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
245 USHORT EnableScaler; /* Atomic Table, used only by Bios */ 258 USHORT EnableScaler; //Atomic Table, used only by Bios
246 USHORT BlankCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 259 USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
247 USHORT EnableCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 260 USHORT EnableCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
248 USHORT GetPixelClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 261 USHORT GetPixelClock; //Atomic Table, directly used by various SW components,latest version 1.1
249 USHORT EnableVGA_Render; /* Function Table,directly used by various SW components,latest version 1.1 */ 262 USHORT EnableVGA_Render; //Function Table,directly used by various SW components,latest version 1.1
250 USHORT EnableVGA_Access; /* Obsolete , only used by Bios */ 263 USHORT GetSCLKOverMCLKRatio; //Atomic Table, only used by Bios
251 USHORT SetCRTC_Timing; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 264 USHORT SetCRTC_Timing; //Atomic Table, directly used by various SW components,latest version 1.1
252 USHORT SetCRTC_OverScan; /* Atomic Table, used by various SW components,latest version 1.1 */ 265 USHORT SetCRTC_OverScan; //Atomic Table, used by various SW components,latest version 1.1
253 USHORT SetCRTC_Replication; /* Atomic Table, used only by Bios */ 266 USHORT SetCRTC_Replication; //Atomic Table, used only by Bios
254 USHORT SelectCRTC_Source; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 267 USHORT SelectCRTC_Source; //Atomic Table, directly used by various SW components,latest version 1.1
255 USHORT EnableGraphSurfaces; /* Atomic Table, used only by Bios */ 268 USHORT EnableGraphSurfaces; //Atomic Table, used only by Bios
256 USHORT UpdateCRTC_DoubleBufferRegisters; 269 USHORT UpdateCRTC_DoubleBufferRegisters;
257 USHORT LUT_AutoFill; /* Atomic Table, only used by Bios */ 270 USHORT LUT_AutoFill; //Atomic Table, only used by Bios
258 USHORT EnableHW_IconCursor; /* Atomic Table, only used by Bios */ 271 USHORT EnableHW_IconCursor; //Atomic Table, only used by Bios
259 USHORT GetMemoryClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 272 USHORT GetMemoryClock; //Atomic Table, directly used by various SW components,latest version 1.1
260 USHORT GetEngineClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 273 USHORT GetEngineClock; //Atomic Table, directly used by various SW components,latest version 1.1
261 USHORT SetCRTC_UsingDTDTiming; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 274 USHORT SetCRTC_UsingDTDTiming; //Atomic Table, directly used by various SW components,latest version 1.1
262 USHORT ExternalEncoderControl; /* Atomic Table, directly used by various SW components,latest version 2.1 */ 275 USHORT ExternalEncoderControl; //Atomic Table, directly used by various SW components,latest version 2.1
263 USHORT LVTMAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 276 USHORT LVTMAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
264 USHORT VRAM_BlockDetectionByStrap; /* Atomic Table, used only by Bios */ 277 USHORT VRAM_BlockDetectionByStrap; //Atomic Table, used only by Bios
265 USHORT MemoryCleanUp; /* Atomic Table, only used by Bios */ 278 USHORT MemoryCleanUp; //Atomic Table, only used by Bios
266 USHORT ProcessI2cChannelTransaction; /* Function Table,only used by Bios */ 279 USHORT ProcessI2cChannelTransaction; //Function Table,only used by Bios
267 USHORT WriteOneByteToHWAssistedI2C; /* Function Table,indirectly used by various SW components */ 280 USHORT WriteOneByteToHWAssistedI2C; //Function Table,indirectly used by various SW components
268 USHORT ReadHWAssistedI2CStatus; /* Atomic Table, indirectly used by various SW components */ 281 USHORT ReadHWAssistedI2CStatus; //Atomic Table, indirectly used by various SW components
269 USHORT SpeedFanControl; /* Function Table,indirectly used by various SW components,called from ASIC_Init */ 282 USHORT SpeedFanControl; //Function Table,indirectly used by various SW components,called from ASIC_Init
270 USHORT PowerConnectorDetection; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 283 USHORT PowerConnectorDetection; //Atomic Table, directly used by various SW components,latest version 1.1
271 USHORT MC_Synchronization; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ 284 USHORT MC_Synchronization; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
272 USHORT ComputeMemoryEnginePLL; /* Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock */ 285 USHORT ComputeMemoryEnginePLL; //Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock
273 USHORT MemoryRefreshConversion; /* Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock */ 286 USHORT MemoryRefreshConversion; //Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock
274 USHORT VRAM_GetCurrentInfoBlock; /* Atomic Table, used only by Bios */ 287 USHORT VRAM_GetCurrentInfoBlock; //Atomic Table, used only by Bios
275 USHORT DynamicMemorySettings; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ 288 USHORT DynamicMemorySettings; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
276 USHORT MemoryTraining; /* Atomic Table, used only by Bios */ 289 USHORT MemoryTraining; //Atomic Table, used only by Bios
277 USHORT EnableSpreadSpectrumOnPPLL; /* Atomic Table, directly used by various SW components,latest version 1.2 */ 290 USHORT EnableSpreadSpectrumOnPPLL; //Atomic Table, directly used by various SW components,latest version 1.2
278 USHORT TMDSAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 291 USHORT TMDSAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
279 USHORT SetVoltage; /* Function Table,directly and/or indirectly used by various SW components,latest version 1.1 */ 292 USHORT SetVoltage; //Function Table,directly and/or indirectly used by various SW components,latest version 1.1
280 USHORT DAC1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 293 USHORT DAC1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
281 USHORT DAC2OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ 294 USHORT DAC2OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
282 USHORT SetupHWAssistedI2CStatus; /* Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C" */ 295 USHORT SetupHWAssistedI2CStatus; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
283 USHORT ClockSource; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */ 296 USHORT ClockSource; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
284 USHORT MemoryDeviceInit; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ 297 USHORT MemoryDeviceInit; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
285 USHORT EnableYUV; /* Atomic Table, indirectly used by various SW components,called from EnableVGARender */ 298 USHORT EnableYUV; //Atomic Table, indirectly used by various SW components,called from EnableVGARender
286 USHORT DIG1EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */ 299 USHORT DIG1EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
287 USHORT DIG2EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */ 300 USHORT DIG2EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
288 USHORT DIG1TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */ 301 USHORT DIG1TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
289 USHORT DIG2TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */ 302 USHORT DIG2TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
290 USHORT ProcessAuxChannelTransaction; /* Function Table,only used by Bios */ 303 USHORT ProcessAuxChannelTransaction; //Function Table,only used by Bios
291 USHORT DPEncoderService; /* Function Table,only used by Bios */ 304 USHORT DPEncoderService; //Function Table,only used by Bios
292} ATOM_MASTER_LIST_OF_COMMAND_TABLES; 305}ATOM_MASTER_LIST_OF_COMMAND_TABLES;
293 306
294/* For backward compatible */ 307// For backward compatible
295#define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction 308#define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction
296#define UNIPHYTransmitterControl DIG1TransmitterControl 309#define UNIPHYTransmitterControl DIG1TransmitterControl
297#define LVTMATransmitterControl DIG2TransmitterControl 310#define LVTMATransmitterControl DIG2TransmitterControl
298#define SetCRTC_DPM_State GetConditionalGoldenSetting 311#define SetCRTC_DPM_State GetConditionalGoldenSetting
299#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange 312#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
313#define HPDInterruptService ReadHWAssistedI2CStatus
314#define EnableVGA_Access GetSCLKOverMCLKRatio
300 315
301typedef struct _ATOM_MASTER_COMMAND_TABLE { 316typedef struct _ATOM_MASTER_COMMAND_TABLE
302 ATOM_COMMON_TABLE_HEADER sHeader; 317{
303 ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables; 318 ATOM_COMMON_TABLE_HEADER sHeader;
304} ATOM_MASTER_COMMAND_TABLE; 319 ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables;
305 320}ATOM_MASTER_COMMAND_TABLE;
306/****************************************************************************/ 321
307/* Structures used in every command table */ 322/****************************************************************************/
308/****************************************************************************/ 323// Structures used in every command table
309typedef struct _ATOM_TABLE_ATTRIBUTE { 324/****************************************************************************/
325typedef struct _ATOM_TABLE_ATTRIBUTE
326{
310#if ATOM_BIG_ENDIAN 327#if ATOM_BIG_ENDIAN
311 USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */ 328 USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag
312 USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */ 329 USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword),
313 USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */ 330 USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword),
314#else 331#else
315 USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */ 332 USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword),
316 USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */ 333 USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword),
317 USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */ 334 USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag
318#endif 335#endif
319} ATOM_TABLE_ATTRIBUTE; 336}ATOM_TABLE_ATTRIBUTE;
320
321typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS {
322 ATOM_TABLE_ATTRIBUTE sbfAccess;
323 USHORT susAccess;
324} ATOM_TABLE_ATTRIBUTE_ACCESS;
325 337
326/****************************************************************************/ 338typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS
327/* Common header for all command tables. */ 339{
328/* Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header. */ 340 ATOM_TABLE_ATTRIBUTE sbfAccess;
329/* And the pointer actually points to this header. */ 341 USHORT susAccess;
330/****************************************************************************/ 342}ATOM_TABLE_ATTRIBUTE_ACCESS;
331typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER { 343
332 ATOM_COMMON_TABLE_HEADER CommonHeader; 344/****************************************************************************/
333 ATOM_TABLE_ATTRIBUTE TableAttribute; 345// Common header for all command tables.
334} ATOM_COMMON_ROM_COMMAND_TABLE_HEADER; 346// Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header.
347// And the pointer actually points to this header.
348/****************************************************************************/
349typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER
350{
351 ATOM_COMMON_TABLE_HEADER CommonHeader;
352 ATOM_TABLE_ATTRIBUTE TableAttribute;
353}ATOM_COMMON_ROM_COMMAND_TABLE_HEADER;
335 354
336/****************************************************************************/ 355/****************************************************************************/
337/* Structures used by ComputeMemoryEnginePLLTable */ 356// Structures used by ComputeMemoryEnginePLLTable
338/****************************************************************************/ 357/****************************************************************************/
339#define COMPUTE_MEMORY_PLL_PARAM 1 358#define COMPUTE_MEMORY_PLL_PARAM 1
340#define COMPUTE_ENGINE_PLL_PARAM 2 359#define COMPUTE_ENGINE_PLL_PARAM 2
341 360
342typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS { 361typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
343 ULONG ulClock; /* When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div */ 362{
344 UCHAR ucAction; /* 0:reserved //1:Memory //2:Engine */ 363 ULONG ulClock; //When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div
345 UCHAR ucReserved; /* may expand to return larger Fbdiv later */ 364 UCHAR ucAction; //0:reserved //1:Memory //2:Engine
346 UCHAR ucFbDiv; /* return value */ 365 UCHAR ucReserved; //may expand to return larger Fbdiv later
347 UCHAR ucPostDiv; /* return value */ 366 UCHAR ucFbDiv; //return value
348} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS; 367 UCHAR ucPostDiv; //return value
349 368}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS;
350typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 { 369
351 ULONG ulClock; /* When return, [23:0] return real clock */ 370typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2
352 UCHAR ucAction; /* 0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register */ 371{
353 USHORT usFbDiv; /* return Feedback value to be written to register */ 372 ULONG ulClock; //When return, [23:0] return real clock
354 UCHAR ucPostDiv; /* return post div to be written to register */ 373 UCHAR ucAction; //0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register
355} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2; 374 USHORT usFbDiv; //return Feedback value to be written to register
375 UCHAR ucPostDiv; //return post div to be written to register
376}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2;
356#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS 377#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
357 378
358#define SET_CLOCK_FREQ_MASK 0x00FFFFFF /* Clock change tables only take bit [23:0] as the requested clock value */ 379
359#define USE_NON_BUS_CLOCK_MASK 0x01000000 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */ 380#define SET_CLOCK_FREQ_MASK 0x00FFFFFF //Clock change tables only take bit [23:0] as the requested clock value
360#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */ 381#define USE_NON_BUS_CLOCK_MASK 0x01000000 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
361#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */ 382#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 //Only applicable to memory clock change, when set, using memory self refresh during clock transition
362#define FIRST_TIME_CHANGE_CLOCK 0x08000000 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */ 383#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
363#define SKIP_SW_PROGRAM_PLL 0x10000000 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */ 384#define FIRST_TIME_CHANGE_CLOCK 0x08000000 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
385#define SKIP_SW_PROGRAM_PLL 0x10000000 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
364#define USE_SS_ENABLED_PIXEL_CLOCK USE_NON_BUS_CLOCK_MASK 386#define USE_SS_ENABLED_PIXEL_CLOCK USE_NON_BUS_CLOCK_MASK
365 387
366#define b3USE_NON_BUS_CLOCK_MASK 0x01 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */ 388#define b3USE_NON_BUS_CLOCK_MASK 0x01 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
367#define b3USE_MEMORY_SELF_REFRESH 0x02 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */ 389#define b3USE_MEMORY_SELF_REFRESH 0x02 //Only applicable to memory clock change, when set, using memory self refresh during clock transition
368#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */ 390#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
369#define b3FIRST_TIME_CHANGE_CLOCK 0x08 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */ 391#define b3FIRST_TIME_CHANGE_CLOCK 0x08 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
370#define b3SKIP_SW_PROGRAM_PLL 0x10 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */ 392#define b3SKIP_SW_PROGRAM_PLL 0x10 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
371 393
372typedef struct _ATOM_COMPUTE_CLOCK_FREQ { 394typedef struct _ATOM_COMPUTE_CLOCK_FREQ
395{
373#if ATOM_BIG_ENDIAN 396#if ATOM_BIG_ENDIAN
374 ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */ 397 ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
375 ULONG ulClockFreq:24; /* in unit of 10kHz */ 398 ULONG ulClockFreq:24; // in unit of 10kHz
376#else 399#else
377 ULONG ulClockFreq:24; /* in unit of 10kHz */ 400 ULONG ulClockFreq:24; // in unit of 10kHz
378 ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */ 401 ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
379#endif 402#endif
380} ATOM_COMPUTE_CLOCK_FREQ; 403}ATOM_COMPUTE_CLOCK_FREQ;
381
382typedef struct _ATOM_S_MPLL_FB_DIVIDER {
383 USHORT usFbDivFrac;
384 USHORT usFbDiv;
385} ATOM_S_MPLL_FB_DIVIDER;
386 404
387typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 { 405typedef struct _ATOM_S_MPLL_FB_DIVIDER
388 union { 406{
389 ATOM_COMPUTE_CLOCK_FREQ ulClock; /* Input Parameter */ 407 USHORT usFbDivFrac;
390 ATOM_S_MPLL_FB_DIVIDER ulFbDiv; /* Output Parameter */ 408 USHORT usFbDiv;
391 }; 409}ATOM_S_MPLL_FB_DIVIDER;
392 UCHAR ucRefDiv; /* Output Parameter */
393 UCHAR ucPostDiv; /* Output Parameter */
394 UCHAR ucCntlFlag; /* Output Parameter */
395 UCHAR ucReserved;
396} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
397 410
398/* ucCntlFlag */ 411typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
412{
413 union
414 {
415 ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
416 ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
417 };
418 UCHAR ucRefDiv; //Output Parameter
419 UCHAR ucPostDiv; //Output Parameter
420 UCHAR ucCntlFlag; //Output Parameter
421 UCHAR ucReserved;
422}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
423
424// ucCntlFlag
399#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN 1 425#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN 1
400#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE 2 426#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE 2
401#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE 4 427#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE 4
428#define ATOM_PLL_CNTL_FLAG_SPLL_ISPARE_9 8
402 429
403typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER {
404 ATOM_COMPUTE_CLOCK_FREQ ulClock;
405 ULONG ulReserved[2];
406} DYNAMICE_MEMORY_SETTINGS_PARAMETER;
407
408typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER {
409 ATOM_COMPUTE_CLOCK_FREQ ulClock;
410 ULONG ulMemoryClock;
411 ULONG ulReserved;
412} DYNAMICE_ENGINE_SETTINGS_PARAMETER;
413
414/****************************************************************************/
415/* Structures used by SetEngineClockTable */
416/****************************************************************************/
417typedef struct _SET_ENGINE_CLOCK_PARAMETERS {
418 ULONG ulTargetEngineClock; /* In 10Khz unit */
419} SET_ENGINE_CLOCK_PARAMETERS;
420 430
421typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION { 431// V4 are only used for APU which PLL outside GPU
422 ULONG ulTargetEngineClock; /* In 10Khz unit */ 432typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4
423 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved; 433{
424} SET_ENGINE_CLOCK_PS_ALLOCATION; 434#if ATOM_BIG_ENDIAN
435 ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly
436 ULONG ulClock:24; //Input= target clock, output = actual clock
437#else
438 ULONG ulClock:24; //Input= target clock, output = actual clock
439 ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly
440#endif
441}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
425 442
426/****************************************************************************/ 443typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
427/* Structures used by SetMemoryClockTable */ 444{
428/****************************************************************************/ 445 ATOM_COMPUTE_CLOCK_FREQ ulClock;
429typedef struct _SET_MEMORY_CLOCK_PARAMETERS { 446 ULONG ulReserved[2];
430 ULONG ulTargetMemoryClock; /* In 10Khz unit */ 447}DYNAMICE_MEMORY_SETTINGS_PARAMETER;
431} SET_MEMORY_CLOCK_PARAMETERS;
432 448
433typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION { 449typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER
434 ULONG ulTargetMemoryClock; /* In 10Khz unit */ 450{
435 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved; 451 ATOM_COMPUTE_CLOCK_FREQ ulClock;
436} SET_MEMORY_CLOCK_PS_ALLOCATION; 452 ULONG ulMemoryClock;
453 ULONG ulReserved;
454}DYNAMICE_ENGINE_SETTINGS_PARAMETER;
455
456/****************************************************************************/
457// Structures used by SetEngineClockTable
458/****************************************************************************/
459typedef struct _SET_ENGINE_CLOCK_PARAMETERS
460{
461 ULONG ulTargetEngineClock; //In 10Khz unit
462}SET_ENGINE_CLOCK_PARAMETERS;
437 463
438/****************************************************************************/ 464typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION
439/* Structures used by ASIC_Init.ctb */ 465{
440/****************************************************************************/ 466 ULONG ulTargetEngineClock; //In 10Khz unit
441typedef struct _ASIC_INIT_PARAMETERS { 467 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
442 ULONG ulDefaultEngineClock; /* In 10Khz unit */ 468}SET_ENGINE_CLOCK_PS_ALLOCATION;
443 ULONG ulDefaultMemoryClock; /* In 10Khz unit */ 469
444} ASIC_INIT_PARAMETERS; 470/****************************************************************************/
471// Structures used by SetMemoryClockTable
472/****************************************************************************/
473typedef struct _SET_MEMORY_CLOCK_PARAMETERS
474{
475 ULONG ulTargetMemoryClock; //In 10Khz unit
476}SET_MEMORY_CLOCK_PARAMETERS;
445 477
446typedef struct _ASIC_INIT_PS_ALLOCATION { 478typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION
447 ASIC_INIT_PARAMETERS sASICInitClocks; 479{
448 SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; /* Caller doesn't need to init this structure */ 480 ULONG ulTargetMemoryClock; //In 10Khz unit
449} ASIC_INIT_PS_ALLOCATION; 481 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
482}SET_MEMORY_CLOCK_PS_ALLOCATION;
483
484/****************************************************************************/
485// Structures used by ASIC_Init.ctb
486/****************************************************************************/
487typedef struct _ASIC_INIT_PARAMETERS
488{
489 ULONG ulDefaultEngineClock; //In 10Khz unit
490 ULONG ulDefaultMemoryClock; //In 10Khz unit
491}ASIC_INIT_PARAMETERS;
450 492
451/****************************************************************************/ 493typedef struct _ASIC_INIT_PS_ALLOCATION
452/* Structure used by DynamicClockGatingTable.ctb */ 494{
453/****************************************************************************/ 495 ASIC_INIT_PARAMETERS sASICInitClocks;
454typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS { 496 SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; //Caller doesn't need to init this structure
455 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 497}ASIC_INIT_PS_ALLOCATION;
456 UCHAR ucPadding[3]; 498
457} DYNAMIC_CLOCK_GATING_PARAMETERS; 499/****************************************************************************/
500// Structure used by DynamicClockGatingTable.ctb
501/****************************************************************************/
502typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS
503{
504 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
505 UCHAR ucPadding[3];
506}DYNAMIC_CLOCK_GATING_PARAMETERS;
458#define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS 507#define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS
459 508
460/****************************************************************************/ 509/****************************************************************************/
461/* Structure used by EnableASIC_StaticPwrMgtTable.ctb */ 510// Structure used by EnableASIC_StaticPwrMgtTable.ctb
462/****************************************************************************/ 511/****************************************************************************/
463typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS { 512typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
464 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 513{
465 UCHAR ucPadding[3]; 514 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
466} ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS; 515 UCHAR ucPadding[3];
516}ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS;
467#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS 517#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
468 518
469/****************************************************************************/ 519/****************************************************************************/
470/* Structures used by DAC_LoadDetectionTable.ctb */ 520// Structures used by DAC_LoadDetectionTable.ctb
471/****************************************************************************/ 521/****************************************************************************/
472typedef struct _DAC_LOAD_DETECTION_PARAMETERS { 522typedef struct _DAC_LOAD_DETECTION_PARAMETERS
473 USHORT usDeviceID; /* {ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT} */ 523{
474 UCHAR ucDacType; /* {ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC} */ 524 USHORT usDeviceID; //{ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT}
475 UCHAR ucMisc; /* Valid only when table revision =1.3 and above */ 525 UCHAR ucDacType; //{ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC}
476} DAC_LOAD_DETECTION_PARAMETERS; 526 UCHAR ucMisc; //Valid only when table revision =1.3 and above
527}DAC_LOAD_DETECTION_PARAMETERS;
477 528
478/* DAC_LOAD_DETECTION_PARAMETERS.ucMisc */ 529// DAC_LOAD_DETECTION_PARAMETERS.ucMisc
479#define DAC_LOAD_MISC_YPrPb 0x01 530#define DAC_LOAD_MISC_YPrPb 0x01
480 531
481typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION { 532typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION
482 DAC_LOAD_DETECTION_PARAMETERS sDacload; 533{
483 ULONG Reserved[2]; /* Don't set this one, allocation for EXT DAC */ 534 DAC_LOAD_DETECTION_PARAMETERS sDacload;
484} DAC_LOAD_DETECTION_PS_ALLOCATION; 535 ULONG Reserved[2];// Don't set this one, allocation for EXT DAC
485 536}DAC_LOAD_DETECTION_PS_ALLOCATION;
486/****************************************************************************/ 537
487/* Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb */ 538/****************************************************************************/
488/****************************************************************************/ 539// Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb
489typedef struct _DAC_ENCODER_CONTROL_PARAMETERS { 540/****************************************************************************/
490 USHORT usPixelClock; /* in 10KHz; for bios convenient */ 541typedef struct _DAC_ENCODER_CONTROL_PARAMETERS
491 UCHAR ucDacStandard; /* See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0) */ 542{
492 UCHAR ucAction; /* 0: turn off encoder */ 543 USHORT usPixelClock; // in 10KHz; for bios convenient
493 /* 1: setup and turn on encoder */ 544 UCHAR ucDacStandard; // See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0)
494 /* 7: ATOM_ENCODER_INIT Initialize DAC */ 545 UCHAR ucAction; // 0: turn off encoder
495} DAC_ENCODER_CONTROL_PARAMETERS; 546 // 1: setup and turn on encoder
547 // 7: ATOM_ENCODER_INIT Initialize DAC
548}DAC_ENCODER_CONTROL_PARAMETERS;
496 549
497#define DAC_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PARAMETERS 550#define DAC_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PARAMETERS
498 551
499/****************************************************************************/ 552/****************************************************************************/
500/* Structures used by DIG1EncoderControlTable */ 553// Structures used by DIG1EncoderControlTable
501/* DIG2EncoderControlTable */ 554// DIG2EncoderControlTable
502/* ExternalEncoderControlTable */ 555// ExternalEncoderControlTable
503/****************************************************************************/ 556/****************************************************************************/
504typedef struct _DIG_ENCODER_CONTROL_PARAMETERS { 557typedef struct _DIG_ENCODER_CONTROL_PARAMETERS
505 USHORT usPixelClock; /* in 10KHz; for bios convenient */ 558{
506 UCHAR ucConfig; 559 USHORT usPixelClock; // in 10KHz; for bios convenient
507 /* [2] Link Select: */ 560 UCHAR ucConfig;
508 /* =0: PHY linkA if bfLane<3 */ 561 // [2] Link Select:
509 /* =1: PHY linkB if bfLanes<3 */ 562 // =0: PHY linkA if bfLane<3
510 /* =0: PHY linkA+B if bfLanes=3 */ 563 // =1: PHY linkB if bfLanes<3
511 /* [3] Transmitter Sel */ 564 // =0: PHY linkA+B if bfLanes=3
512 /* =0: UNIPHY or PCIEPHY */ 565 // [3] Transmitter Sel
513 /* =1: LVTMA */ 566 // =0: UNIPHY or PCIEPHY
514 UCHAR ucAction; /* =0: turn off encoder */ 567 // =1: LVTMA
515 /* =1: turn on encoder */ 568 UCHAR ucAction; // =0: turn off encoder
516 UCHAR ucEncoderMode; 569 // =1: turn on encoder
517 /* =0: DP encoder */ 570 UCHAR ucEncoderMode;
518 /* =1: LVDS encoder */ 571 // =0: DP encoder
519 /* =2: DVI encoder */ 572 // =1: LVDS encoder
520 /* =3: HDMI encoder */ 573 // =2: DVI encoder
521 /* =4: SDVO encoder */ 574 // =3: HDMI encoder
522 UCHAR ucLaneNum; /* how many lanes to enable */ 575 // =4: SDVO encoder
523 UCHAR ucReserved[2]; 576 UCHAR ucLaneNum; // how many lanes to enable
524} DIG_ENCODER_CONTROL_PARAMETERS; 577 UCHAR ucReserved[2];
578}DIG_ENCODER_CONTROL_PARAMETERS;
525#define DIG_ENCODER_CONTROL_PS_ALLOCATION DIG_ENCODER_CONTROL_PARAMETERS 579#define DIG_ENCODER_CONTROL_PS_ALLOCATION DIG_ENCODER_CONTROL_PARAMETERS
526#define EXTERNAL_ENCODER_CONTROL_PARAMETER DIG_ENCODER_CONTROL_PARAMETERS 580#define EXTERNAL_ENCODER_CONTROL_PARAMETER DIG_ENCODER_CONTROL_PARAMETERS
527 581
528/* ucConfig */ 582//ucConfig
529#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01 583#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01
530#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00 584#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00
531#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01 585#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01
@@ -539,52 +593,57 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS {
539#define ATOM_ENCODER_CONFIG_LVTMA 0x08 593#define ATOM_ENCODER_CONFIG_LVTMA 0x08
540#define ATOM_ENCODER_CONFIG_TRANSMITTER1 0x00 594#define ATOM_ENCODER_CONFIG_TRANSMITTER1 0x00
541#define ATOM_ENCODER_CONFIG_TRANSMITTER2 0x08 595#define ATOM_ENCODER_CONFIG_TRANSMITTER2 0x08
542#define ATOM_ENCODER_CONFIG_DIGB 0x80 /* VBIOS Internal use, outside SW should set this bit=0 */ 596#define ATOM_ENCODER_CONFIG_DIGB 0x80 // VBIOS Internal use, outside SW should set this bit=0
543/* ucAction */ 597// ucAction
544/* ATOM_ENABLE: Enable Encoder */ 598// ATOM_ENABLE: Enable Encoder
545/* ATOM_DISABLE: Disable Encoder */ 599// ATOM_DISABLE: Disable Encoder
546 600
547/* ucEncoderMode */ 601//ucEncoderMode
548#define ATOM_ENCODER_MODE_DP 0 602#define ATOM_ENCODER_MODE_DP 0
549#define ATOM_ENCODER_MODE_LVDS 1 603#define ATOM_ENCODER_MODE_LVDS 1
550#define ATOM_ENCODER_MODE_DVI 2 604#define ATOM_ENCODER_MODE_DVI 2
551#define ATOM_ENCODER_MODE_HDMI 3 605#define ATOM_ENCODER_MODE_HDMI 3
552#define ATOM_ENCODER_MODE_SDVO 4 606#define ATOM_ENCODER_MODE_SDVO 4
607#define ATOM_ENCODER_MODE_DP_AUDIO 5
553#define ATOM_ENCODER_MODE_TV 13 608#define ATOM_ENCODER_MODE_TV 13
554#define ATOM_ENCODER_MODE_CV 14 609#define ATOM_ENCODER_MODE_CV 14
555#define ATOM_ENCODER_MODE_CRT 15 610#define ATOM_ENCODER_MODE_CRT 15
556 611
557typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 { 612typedef struct _ATOM_DIG_ENCODER_CONFIG_V2
613{
558#if ATOM_BIG_ENDIAN 614#if ATOM_BIG_ENDIAN
559 UCHAR ucReserved1:2; 615 UCHAR ucReserved1:2;
560 UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */ 616 UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF
561 UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */ 617 UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F
562 UCHAR ucReserved:1; 618 UCHAR ucReserved:1;
563 UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */ 619 UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
564#else 620#else
565 UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */ 621 UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
566 UCHAR ucReserved:1; 622 UCHAR ucReserved:1;
567 UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */ 623 UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F
568 UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */ 624 UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF
569 UCHAR ucReserved1:2; 625 UCHAR ucReserved1:2;
570#endif 626#endif
571} ATOM_DIG_ENCODER_CONFIG_V2; 627}ATOM_DIG_ENCODER_CONFIG_V2;
572 628
573typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 {
574 USHORT usPixelClock; /* in 10KHz; for bios convenient */
575 ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
576 UCHAR ucAction;
577 UCHAR ucEncoderMode;
578 /* =0: DP encoder */
579 /* =1: LVDS encoder */
580 /* =2: DVI encoder */
581 /* =3: HDMI encoder */
582 /* =4: SDVO encoder */
583 UCHAR ucLaneNum; /* how many lanes to enable */
584 UCHAR ucReserved[2];
585} DIG_ENCODER_CONTROL_PARAMETERS_V2;
586 629
587/* ucConfig */ 630typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
631{
632 USHORT usPixelClock; // in 10KHz; for bios convenient
633 ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
634 UCHAR ucAction;
635 UCHAR ucEncoderMode;
636 // =0: DP encoder
637 // =1: LVDS encoder
638 // =2: DVI encoder
639 // =3: HDMI encoder
640 // =4: SDVO encoder
641 UCHAR ucLaneNum; // how many lanes to enable
642 UCHAR ucStatus; // = DP_LINK_TRAINING_COMPLETE or DP_LINK_TRAINING_INCOMPLETE, only used by VBIOS with command ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS
643 UCHAR ucReserved;
644}DIG_ENCODER_CONTROL_PARAMETERS_V2;
645
646//ucConfig
588#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK 0x01 647#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK 0x01
589#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ 0x00 648#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ 0x00
590#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ 0x01 649#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ 0x01
@@ -596,58 +655,122 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 {
596#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2 0x08 655#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2 0x08
597#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3 0x10 656#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3 0x10
598 657
599/****************************************************************************/ 658// ucAction:
600/* Structures used by UNIPHYTransmitterControlTable */ 659// ATOM_DISABLE
601/* LVTMATransmitterControlTable */ 660// ATOM_ENABLE
602/* DVOOutputControlTable */ 661#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08
603/****************************************************************************/ 662#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09
604typedef struct _ATOM_DP_VS_MODE { 663#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a
605 UCHAR ucLaneSel; 664#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b
606 UCHAR ucLaneSet; 665#define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c
607} ATOM_DP_VS_MODE; 666#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d
608 667#define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS 0x0e
609typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS { 668#define ATOM_ENCODER_CMD_SETUP 0x0f
610 union { 669
611 USHORT usPixelClock; /* in 10KHz; for bios convenient */ 670// ucStatus
612 USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */ 671#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10
613 ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */ 672#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00
673
674// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
675typedef struct _ATOM_DIG_ENCODER_CONFIG_V3
676{
677#if ATOM_BIG_ENDIAN
678 UCHAR ucReserved1:1;
679 UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F
680 UCHAR ucReserved:3;
681 UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
682#else
683 UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
684 UCHAR ucReserved:3;
685 UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F
686 UCHAR ucReserved1:1;
687#endif
688}ATOM_DIG_ENCODER_CONFIG_V3;
689
690#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70
691
692
693typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
694{
695 USHORT usPixelClock; // in 10KHz; for bios convenient
696 ATOM_DIG_ENCODER_CONFIG_V3 acConfig;
697 UCHAR ucAction;
698 UCHAR ucEncoderMode;
699 // =0: DP encoder
700 // =1: LVDS encoder
701 // =2: DVI encoder
702 // =3: HDMI encoder
703 // =4: SDVO encoder
704 // =5: DP audio
705 UCHAR ucLaneNum; // how many lanes to enable
706 UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
707 UCHAR ucReserved;
708}DIG_ENCODER_CONTROL_PARAMETERS_V3;
709
710
711// define ucBitPerColor:
712#define PANEL_BPC_UNDEFINE 0x00
713#define PANEL_6BIT_PER_COLOR 0x01
714#define PANEL_8BIT_PER_COLOR 0x02
715#define PANEL_10BIT_PER_COLOR 0x03
716#define PANEL_12BIT_PER_COLOR 0x04
717#define PANEL_16BIT_PER_COLOR 0x05
718
719/****************************************************************************/
720// Structures used by UNIPHYTransmitterControlTable
721// LVTMATransmitterControlTable
722// DVOOutputControlTable
723/****************************************************************************/
724typedef struct _ATOM_DP_VS_MODE
725{
726 UCHAR ucLaneSel;
727 UCHAR ucLaneSet;
728}ATOM_DP_VS_MODE;
729
730typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS
731{
732 union
733 {
734 USHORT usPixelClock; // in 10KHz; for bios convenient
735 USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
736 ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
614 }; 737 };
615 UCHAR ucConfig; 738 UCHAR ucConfig;
616 /* [0]=0: 4 lane Link, */ 739 // [0]=0: 4 lane Link,
617 /* =1: 8 lane Link ( Dual Links TMDS ) */ 740 // =1: 8 lane Link ( Dual Links TMDS )
618 /* [1]=0: InCoherent mode */ 741 // [1]=0: InCoherent mode
619 /* =1: Coherent Mode */ 742 // =1: Coherent Mode
620 /* [2] Link Select: */ 743 // [2] Link Select:
621 /* =0: PHY linkA if bfLane<3 */ 744 // =0: PHY linkA if bfLane<3
622 /* =1: PHY linkB if bfLanes<3 */ 745 // =1: PHY linkB if bfLanes<3
623 /* =0: PHY linkA+B if bfLanes=3 */ 746 // =0: PHY linkA+B if bfLanes=3
624 /* [5:4]PCIE lane Sel */ 747 // [5:4]PCIE lane Sel
625 /* =0: lane 0~3 or 0~7 */ 748 // =0: lane 0~3 or 0~7
626 /* =1: lane 4~7 */ 749 // =1: lane 4~7
627 /* =2: lane 8~11 or 8~15 */ 750 // =2: lane 8~11 or 8~15
628 /* =3: lane 12~15 */ 751 // =3: lane 12~15
629 UCHAR ucAction; /* =0: turn off encoder */ 752 UCHAR ucAction; // =0: turn off encoder
630 /* =1: turn on encoder */ 753 // =1: turn on encoder
631 UCHAR ucReserved[4]; 754 UCHAR ucReserved[4];
632} DIG_TRANSMITTER_CONTROL_PARAMETERS; 755}DIG_TRANSMITTER_CONTROL_PARAMETERS;
633 756
634#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS 757#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS
635 758
636/* ucInitInfo */ 759//ucInitInfo
637#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff 760#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff
638 761
639/* ucConfig */ 762//ucConfig
640#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK 0x01 763#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK 0x01
641#define ATOM_TRANSMITTER_CONFIG_COHERENT 0x02 764#define ATOM_TRANSMITTER_CONFIG_COHERENT 0x02
642#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK 0x04 765#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK 0x04
643#define ATOM_TRANSMITTER_CONFIG_LINKA 0x00 766#define ATOM_TRANSMITTER_CONFIG_LINKA 0x00
644#define ATOM_TRANSMITTER_CONFIG_LINKB 0x04 767#define ATOM_TRANSMITTER_CONFIG_LINKB 0x04
645#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00 768#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00
646#define ATOM_TRANSMITTER_CONFIG_LINKB_A 0x04 769#define ATOM_TRANSMITTER_CONFIG_LINKB_A 0x04
647 770
648#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */ 771#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
649#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */ 772#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
650#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */ 773#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
651 774
652#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK 0x30 775#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK 0x30
653#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL 0x00 776#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL 0x00
@@ -661,7 +784,7 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS {
661#define ATOM_TRANSMITTER_CONFIG_LANE_8_15 0x80 784#define ATOM_TRANSMITTER_CONFIG_LANE_8_15 0x80
662#define ATOM_TRANSMITTER_CONFIG_LANE_12_15 0xc0 785#define ATOM_TRANSMITTER_CONFIG_LANE_12_15 0xc0
663 786
664/* ucAction */ 787//ucAction
665#define ATOM_TRANSMITTER_ACTION_DISABLE 0 788#define ATOM_TRANSMITTER_ACTION_DISABLE 0
666#define ATOM_TRANSMITTER_ACTION_ENABLE 1 789#define ATOM_TRANSMITTER_ACTION_ENABLE 1
667#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF 2 790#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF 2
@@ -674,93 +797,168 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS {
674#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT 9 797#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT 9
675#define ATOM_TRANSMITTER_ACTION_SETUP 10 798#define ATOM_TRANSMITTER_ACTION_SETUP 10
676#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH 11 799#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH 11
800#define ATOM_TRANSMITTER_ACTION_POWER_ON 12
801#define ATOM_TRANSMITTER_ACTION_POWER_OFF 13
677 802
678/* Following are used for DigTransmitterControlTable ver1.2 */ 803// Following are used for DigTransmitterControlTable ver1.2
679typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2 { 804typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2
805{
680#if ATOM_BIG_ENDIAN 806#if ATOM_BIG_ENDIAN
681 UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */ 807 UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
682 /* =1 Dig Transmitter 2 ( Uniphy CD ) */ 808 // =1 Dig Transmitter 2 ( Uniphy CD )
683 /* =2 Dig Transmitter 3 ( Uniphy EF ) */ 809 // =2 Dig Transmitter 3 ( Uniphy EF )
684 UCHAR ucReserved:1; 810 UCHAR ucReserved:1;
685 UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */ 811 UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector
686 UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */ 812 UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
687 UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */ 813 UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
688 /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */ 814 // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
689 815
690 UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */ 816 UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
691 UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */ 817 UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
692#else 818#else
693 UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */ 819 UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
694 UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */ 820 UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
695 UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */ 821 UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
696 /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */ 822 // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
697 UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */ 823 UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
698 UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */ 824 UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector
699 UCHAR ucReserved:1; 825 UCHAR ucReserved:1;
700 UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */ 826 UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
701 /* =1 Dig Transmitter 2 ( Uniphy CD ) */ 827 // =1 Dig Transmitter 2 ( Uniphy CD )
702 /* =2 Dig Transmitter 3 ( Uniphy EF ) */ 828 // =2 Dig Transmitter 3 ( Uniphy EF )
703#endif 829#endif
704} ATOM_DIG_TRANSMITTER_CONFIG_V2; 830}ATOM_DIG_TRANSMITTER_CONFIG_V2;
705 831
706/* ucConfig */ 832//ucConfig
707/* Bit0 */ 833//Bit0
708#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR 0x01 834#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR 0x01
709 835
710/* Bit1 */ 836//Bit1
711#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT 0x02 837#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT 0x02
712 838
713/* Bit2 */ 839//Bit2
714#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK 0x04 840#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK 0x04
715#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00 841#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00
716#define ATOM_TRANSMITTER_CONFIG_V2_LINKB 0x04 842#define ATOM_TRANSMITTER_CONFIG_V2_LINKB 0x04
717 843
718/* Bit3 */ 844// Bit3
719#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK 0x08 845#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK 0x08
720#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */ 846#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
721#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */ 847#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
722 848
723/* Bit4 */ 849// Bit4
724#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR 0x10 850#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR 0x10
725 851
726/* Bit7:6 */ 852// Bit7:6
727#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK 0xC0 853#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK 0xC0
728#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 /* AB */ 854#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 //AB
729#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 /* CD */ 855#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 //CD
730#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 /* EF */ 856#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 //EF
731 857
732typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 { 858typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2
733 union { 859{
734 USHORT usPixelClock; /* in 10KHz; for bios convenient */ 860 union
735 USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */ 861 {
736 ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */ 862 USHORT usPixelClock; // in 10KHz; for bios convenient
863 USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
864 ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
737 }; 865 };
738 ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig; 866 ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig;
739 UCHAR ucAction; /* define as ATOM_TRANSMITER_ACTION_XXX */ 867 UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX
740 UCHAR ucReserved[4]; 868 UCHAR ucReserved[4];
741} DIG_TRANSMITTER_CONTROL_PARAMETERS_V2; 869}DIG_TRANSMITTER_CONTROL_PARAMETERS_V2;
742 870
743/****************************************************************************/ 871typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3
744/* Structures used by DAC1OuputControlTable */ 872{
745/* DAC2OuputControlTable */ 873#if ATOM_BIG_ENDIAN
746/* LVTMAOutputControlTable (Before DEC30) */ 874 UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
747/* TMDSAOutputControlTable (Before DEC30) */ 875 // =1 Dig Transmitter 2 ( Uniphy CD )
748/****************************************************************************/ 876 // =2 Dig Transmitter 3 ( Uniphy EF )
749typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS { 877 UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
750 UCHAR ucAction; /* Possible input:ATOM_ENABLE||ATOMDISABLE */ 878 UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
751 /* When the display is LCD, in addition to above: */ 879 UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
752 /* ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START|| */ 880 // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
753 /* ATOM_LCD_SELFTEST_STOP */ 881 UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
882 UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
883#else
884 UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
885 UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
886 UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
887 // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
888 UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
889 UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
890 UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
891 // =1 Dig Transmitter 2 ( Uniphy CD )
892 // =2 Dig Transmitter 3 ( Uniphy EF )
893#endif
894}ATOM_DIG_TRANSMITTER_CONFIG_V3;
754 895
755 UCHAR aucPadding[3]; /* padding to DWORD aligned */ 896typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
756} DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS; 897{
898 union
899 {
900 USHORT usPixelClock; // in 10KHz; for bios convenient
901 USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
902 ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
903 };
904 ATOM_DIG_TRANSMITTER_CONFIG_V3 acConfig;
905 UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX
906 UCHAR ucLaneNum;
907 UCHAR ucReserved[3];
908}DIG_TRANSMITTER_CONTROL_PARAMETERS_V3;
909
910//ucConfig
911//Bit0
912#define ATOM_TRANSMITTER_CONFIG_V3_DUAL_LINK_CONNECTOR 0x01
913
914//Bit1
915#define ATOM_TRANSMITTER_CONFIG_V3_COHERENT 0x02
916
917//Bit2
918#define ATOM_TRANSMITTER_CONFIG_V3_LINK_SEL_MASK 0x04
919#define ATOM_TRANSMITTER_CONFIG_V3_LINKA 0x00
920#define ATOM_TRANSMITTER_CONFIG_V3_LINKB 0x04
921
922// Bit3
923#define ATOM_TRANSMITTER_CONFIG_V3_ENCODER_SEL_MASK 0x08
924#define ATOM_TRANSMITTER_CONFIG_V3_DIG1_ENCODER 0x00
925#define ATOM_TRANSMITTER_CONFIG_V3_DIG2_ENCODER 0x08
926
927// Bit5:4
928#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SEL_MASK 0x30
929#define ATOM_TRASMITTER_CONFIG_V3_P1PLL 0x00
930#define ATOM_TRASMITTER_CONFIG_V3_P2PLL 0x10
931#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SRC_EXT 0x20
932
933// Bit7:6
934#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER_SEL_MASK 0xC0
935#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER1 0x00 //AB
936#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD
937#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF
938
939/****************************************************************************/
940// Structures used by DAC1OuputControlTable
941// DAC2OuputControlTable
942// LVTMAOutputControlTable (Before DEC30)
943// TMDSAOutputControlTable (Before DEC30)
944/****************************************************************************/
945typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
946{
947 UCHAR ucAction; // Possible input:ATOM_ENABLE||ATOMDISABLE
948 // When the display is LCD, in addition to above:
949 // ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START||
950 // ATOM_LCD_SELFTEST_STOP
951
952 UCHAR aucPadding[3]; // padding to DWORD aligned
953}DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS;
757 954
758#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS 955#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
759 956
760#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS 957
958#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
761#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION 959#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
762 960
763#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS 961#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
764#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION 962#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
765 963
766#define CV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS 964#define CV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
@@ -782,397 +980,550 @@ typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS {
782#define DVO_OUTPUT_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PS_ALLOCATION 980#define DVO_OUTPUT_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PS_ALLOCATION
783#define DVO_OUTPUT_CONTROL_PARAMETERS_V3 DIG_TRANSMITTER_CONTROL_PARAMETERS 981#define DVO_OUTPUT_CONTROL_PARAMETERS_V3 DIG_TRANSMITTER_CONTROL_PARAMETERS
784 982
785/****************************************************************************/ 983/****************************************************************************/
786/* Structures used by BlankCRTCTable */ 984// Structures used by BlankCRTCTable
787/****************************************************************************/ 985/****************************************************************************/
788typedef struct _BLANK_CRTC_PARAMETERS { 986typedef struct _BLANK_CRTC_PARAMETERS
789 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 987{
790 UCHAR ucBlanking; /* ATOM_BLANKING or ATOM_BLANKINGOFF */ 988 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
791 USHORT usBlackColorRCr; 989 UCHAR ucBlanking; // ATOM_BLANKING or ATOM_BLANKINGOFF
792 USHORT usBlackColorGY; 990 USHORT usBlackColorRCr;
793 USHORT usBlackColorBCb; 991 USHORT usBlackColorGY;
794} BLANK_CRTC_PARAMETERS; 992 USHORT usBlackColorBCb;
993}BLANK_CRTC_PARAMETERS;
795#define BLANK_CRTC_PS_ALLOCATION BLANK_CRTC_PARAMETERS 994#define BLANK_CRTC_PS_ALLOCATION BLANK_CRTC_PARAMETERS
796 995
797/****************************************************************************/ 996/****************************************************************************/
798/* Structures used by EnableCRTCTable */ 997// Structures used by EnableCRTCTable
799/* EnableCRTCMemReqTable */ 998// EnableCRTCMemReqTable
800/* UpdateCRTC_DoubleBufferRegistersTable */ 999// UpdateCRTC_DoubleBufferRegistersTable
801/****************************************************************************/ 1000/****************************************************************************/
802typedef struct _ENABLE_CRTC_PARAMETERS { 1001typedef struct _ENABLE_CRTC_PARAMETERS
803 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 1002{
804 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 1003 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
805 UCHAR ucPadding[2]; 1004 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
806} ENABLE_CRTC_PARAMETERS; 1005 UCHAR ucPadding[2];
1006}ENABLE_CRTC_PARAMETERS;
807#define ENABLE_CRTC_PS_ALLOCATION ENABLE_CRTC_PARAMETERS 1007#define ENABLE_CRTC_PS_ALLOCATION ENABLE_CRTC_PARAMETERS
808 1008
809/****************************************************************************/ 1009/****************************************************************************/
810/* Structures used by SetCRTC_OverScanTable */ 1010// Structures used by SetCRTC_OverScanTable
811/****************************************************************************/ 1011/****************************************************************************/
812typedef struct _SET_CRTC_OVERSCAN_PARAMETERS { 1012typedef struct _SET_CRTC_OVERSCAN_PARAMETERS
813 USHORT usOverscanRight; /* right */ 1013{
814 USHORT usOverscanLeft; /* left */ 1014 USHORT usOverscanRight; // right
815 USHORT usOverscanBottom; /* bottom */ 1015 USHORT usOverscanLeft; // left
816 USHORT usOverscanTop; /* top */ 1016 USHORT usOverscanBottom; // bottom
817 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 1017 USHORT usOverscanTop; // top
818 UCHAR ucPadding[3]; 1018 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
819} SET_CRTC_OVERSCAN_PARAMETERS; 1019 UCHAR ucPadding[3];
1020}SET_CRTC_OVERSCAN_PARAMETERS;
820#define SET_CRTC_OVERSCAN_PS_ALLOCATION SET_CRTC_OVERSCAN_PARAMETERS 1021#define SET_CRTC_OVERSCAN_PS_ALLOCATION SET_CRTC_OVERSCAN_PARAMETERS
821 1022
822/****************************************************************************/ 1023/****************************************************************************/
823/* Structures used by SetCRTC_ReplicationTable */ 1024// Structures used by SetCRTC_ReplicationTable
824/****************************************************************************/ 1025/****************************************************************************/
825typedef struct _SET_CRTC_REPLICATION_PARAMETERS { 1026typedef struct _SET_CRTC_REPLICATION_PARAMETERS
826 UCHAR ucH_Replication; /* horizontal replication */ 1027{
827 UCHAR ucV_Replication; /* vertical replication */ 1028 UCHAR ucH_Replication; // horizontal replication
828 UCHAR usCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 1029 UCHAR ucV_Replication; // vertical replication
829 UCHAR ucPadding; 1030 UCHAR usCRTC; // ATOM_CRTC1 or ATOM_CRTC2
830} SET_CRTC_REPLICATION_PARAMETERS; 1031 UCHAR ucPadding;
1032}SET_CRTC_REPLICATION_PARAMETERS;
831#define SET_CRTC_REPLICATION_PS_ALLOCATION SET_CRTC_REPLICATION_PARAMETERS 1033#define SET_CRTC_REPLICATION_PS_ALLOCATION SET_CRTC_REPLICATION_PARAMETERS
832 1034
833/****************************************************************************/ 1035/****************************************************************************/
834/* Structures used by SelectCRTC_SourceTable */ 1036// Structures used by SelectCRTC_SourceTable
835/****************************************************************************/ 1037/****************************************************************************/
836typedef struct _SELECT_CRTC_SOURCE_PARAMETERS { 1038typedef struct _SELECT_CRTC_SOURCE_PARAMETERS
837 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 1039{
838 UCHAR ucDevice; /* ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|.... */ 1040 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
839 UCHAR ucPadding[2]; 1041 UCHAR ucDevice; // ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|....
840} SELECT_CRTC_SOURCE_PARAMETERS; 1042 UCHAR ucPadding[2];
1043}SELECT_CRTC_SOURCE_PARAMETERS;
841#define SELECT_CRTC_SOURCE_PS_ALLOCATION SELECT_CRTC_SOURCE_PARAMETERS 1044#define SELECT_CRTC_SOURCE_PS_ALLOCATION SELECT_CRTC_SOURCE_PARAMETERS
842 1045
843typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2 { 1046typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2
844 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 1047{
845 UCHAR ucEncoderID; /* DAC1/DAC2/TVOUT/DIG1/DIG2/DVO */ 1048 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
846 UCHAR ucEncodeMode; /* Encoding mode, only valid when using DIG1/DIG2/DVO */ 1049 UCHAR ucEncoderID; // DAC1/DAC2/TVOUT/DIG1/DIG2/DVO
847 UCHAR ucPadding; 1050 UCHAR ucEncodeMode; // Encoding mode, only valid when using DIG1/DIG2/DVO
848} SELECT_CRTC_SOURCE_PARAMETERS_V2; 1051 UCHAR ucPadding;
849 1052}SELECT_CRTC_SOURCE_PARAMETERS_V2;
850/* ucEncoderID */ 1053
851/* #define ASIC_INT_DAC1_ENCODER_ID 0x00 */ 1054//ucEncoderID
852/* #define ASIC_INT_TV_ENCODER_ID 0x02 */ 1055//#define ASIC_INT_DAC1_ENCODER_ID 0x00
853/* #define ASIC_INT_DIG1_ENCODER_ID 0x03 */ 1056//#define ASIC_INT_TV_ENCODER_ID 0x02
854/* #define ASIC_INT_DAC2_ENCODER_ID 0x04 */ 1057//#define ASIC_INT_DIG1_ENCODER_ID 0x03
855/* #define ASIC_EXT_TV_ENCODER_ID 0x06 */ 1058//#define ASIC_INT_DAC2_ENCODER_ID 0x04
856/* #define ASIC_INT_DVO_ENCODER_ID 0x07 */ 1059//#define ASIC_EXT_TV_ENCODER_ID 0x06
857/* #define ASIC_INT_DIG2_ENCODER_ID 0x09 */ 1060//#define ASIC_INT_DVO_ENCODER_ID 0x07
858/* #define ASIC_EXT_DIG_ENCODER_ID 0x05 */ 1061//#define ASIC_INT_DIG2_ENCODER_ID 0x09
859 1062//#define ASIC_EXT_DIG_ENCODER_ID 0x05
860/* ucEncodeMode */ 1063
861/* #define ATOM_ENCODER_MODE_DP 0 */ 1064//ucEncodeMode
862/* #define ATOM_ENCODER_MODE_LVDS 1 */ 1065//#define ATOM_ENCODER_MODE_DP 0
863/* #define ATOM_ENCODER_MODE_DVI 2 */ 1066//#define ATOM_ENCODER_MODE_LVDS 1
864/* #define ATOM_ENCODER_MODE_HDMI 3 */ 1067//#define ATOM_ENCODER_MODE_DVI 2
865/* #define ATOM_ENCODER_MODE_SDVO 4 */ 1068//#define ATOM_ENCODER_MODE_HDMI 3
866/* #define ATOM_ENCODER_MODE_TV 13 */ 1069//#define ATOM_ENCODER_MODE_SDVO 4
867/* #define ATOM_ENCODER_MODE_CV 14 */ 1070//#define ATOM_ENCODER_MODE_TV 13
868/* #define ATOM_ENCODER_MODE_CRT 15 */ 1071//#define ATOM_ENCODER_MODE_CV 14
869 1072//#define ATOM_ENCODER_MODE_CRT 15
870/****************************************************************************/ 1073
871/* Structures used by SetPixelClockTable */ 1074/****************************************************************************/
872/* GetPixelClockTable */ 1075// Structures used by SetPixelClockTable
873/****************************************************************************/ 1076// GetPixelClockTable
874/* Major revision=1., Minor revision=1 */ 1077/****************************************************************************/
875typedef struct _PIXEL_CLOCK_PARAMETERS { 1078//Major revision=1., Minor revision=1
876 USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */ 1079typedef struct _PIXEL_CLOCK_PARAMETERS
877 /* 0 means disable PPLL */ 1080{
878 USHORT usRefDiv; /* Reference divider */ 1081 USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
879 USHORT usFbDiv; /* feedback divider */ 1082 // 0 means disable PPLL
880 UCHAR ucPostDiv; /* post divider */ 1083 USHORT usRefDiv; // Reference divider
881 UCHAR ucFracFbDiv; /* fractional feedback divider */ 1084 USHORT usFbDiv; // feedback divider
882 UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */ 1085 UCHAR ucPostDiv; // post divider
883 UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */ 1086 UCHAR ucFracFbDiv; // fractional feedback divider
884 UCHAR ucCRTC; /* Which CRTC uses this Ppll */ 1087 UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
885 UCHAR ucPadding; 1088 UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER
886} PIXEL_CLOCK_PARAMETERS; 1089 UCHAR ucCRTC; // Which CRTC uses this Ppll
887 1090 UCHAR ucPadding;
888/* Major revision=1., Minor revision=2, add ucMiscIfno */ 1091}PIXEL_CLOCK_PARAMETERS;
889/* ucMiscInfo: */ 1092
1093//Major revision=1., Minor revision=2, add ucMiscIfno
1094//ucMiscInfo:
890#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1 1095#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1
891#define MISC_DEVICE_INDEX_MASK 0xF0 1096#define MISC_DEVICE_INDEX_MASK 0xF0
892#define MISC_DEVICE_INDEX_SHIFT 4 1097#define MISC_DEVICE_INDEX_SHIFT 4
893 1098
894typedef struct _PIXEL_CLOCK_PARAMETERS_V2 { 1099typedef struct _PIXEL_CLOCK_PARAMETERS_V2
895 USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */ 1100{
896 /* 0 means disable PPLL */ 1101 USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
897 USHORT usRefDiv; /* Reference divider */ 1102 // 0 means disable PPLL
898 USHORT usFbDiv; /* feedback divider */ 1103 USHORT usRefDiv; // Reference divider
899 UCHAR ucPostDiv; /* post divider */ 1104 USHORT usFbDiv; // feedback divider
900 UCHAR ucFracFbDiv; /* fractional feedback divider */ 1105 UCHAR ucPostDiv; // post divider
901 UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */ 1106 UCHAR ucFracFbDiv; // fractional feedback divider
902 UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */ 1107 UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
903 UCHAR ucCRTC; /* Which CRTC uses this Ppll */ 1108 UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER
904 UCHAR ucMiscInfo; /* Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog */ 1109 UCHAR ucCRTC; // Which CRTC uses this Ppll
905} PIXEL_CLOCK_PARAMETERS_V2; 1110 UCHAR ucMiscInfo; // Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog
906 1111}PIXEL_CLOCK_PARAMETERS_V2;
907/* Major revision=1., Minor revision=3, structure/definition change */ 1112
908/* ucEncoderMode: */ 1113//Major revision=1., Minor revision=3, structure/definition change
909/* ATOM_ENCODER_MODE_DP */ 1114//ucEncoderMode:
910/* ATOM_ENOCDER_MODE_LVDS */ 1115//ATOM_ENCODER_MODE_DP
911/* ATOM_ENOCDER_MODE_DVI */ 1116//ATOM_ENOCDER_MODE_LVDS
912/* ATOM_ENOCDER_MODE_HDMI */ 1117//ATOM_ENOCDER_MODE_DVI
913/* ATOM_ENOCDER_MODE_SDVO */ 1118//ATOM_ENOCDER_MODE_HDMI
914/* ATOM_ENCODER_MODE_TV 13 */ 1119//ATOM_ENOCDER_MODE_SDVO
915/* ATOM_ENCODER_MODE_CV 14 */ 1120//ATOM_ENCODER_MODE_TV 13
916/* ATOM_ENCODER_MODE_CRT 15 */ 1121//ATOM_ENCODER_MODE_CV 14
917 1122//ATOM_ENCODER_MODE_CRT 15
918/* ucDVOConfig */ 1123
919/* #define DVO_ENCODER_CONFIG_RATE_SEL 0x01 */ 1124//ucDVOConfig
920/* #define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 */ 1125//#define DVO_ENCODER_CONFIG_RATE_SEL 0x01
921/* #define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 */ 1126//#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00
922/* #define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c */ 1127//#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01
923/* #define DVO_ENCODER_CONFIG_LOW12BIT 0x00 */ 1128//#define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c
924/* #define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 */ 1129//#define DVO_ENCODER_CONFIG_LOW12BIT 0x00
925/* #define DVO_ENCODER_CONFIG_24BIT 0x08 */ 1130//#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04
926 1131//#define DVO_ENCODER_CONFIG_24BIT 0x08
927/* ucMiscInfo: also changed, see below */ 1132
1133//ucMiscInfo: also changed, see below
928#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL 0x01 1134#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL 0x01
929#define PIXEL_CLOCK_MISC_VGA_MODE 0x02 1135#define PIXEL_CLOCK_MISC_VGA_MODE 0x02
930#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK 0x04 1136#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK 0x04
931#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1 0x00 1137#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1 0x00
932#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2 0x04 1138#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2 0x04
933#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK 0x08 1139#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK 0x08
1140#define PIXEL_CLOCK_MISC_REF_DIV_SRC 0x10
1141// V1.4 for RoadRunner
1142#define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10
1143#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20
934 1144
935typedef struct _PIXEL_CLOCK_PARAMETERS_V3 { 1145typedef struct _PIXEL_CLOCK_PARAMETERS_V3
936 USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */ 1146{
937 /* 0 means disable PPLL. For VGA PPLL,make sure this value is not 0. */ 1147 USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
938 USHORT usRefDiv; /* Reference divider */ 1148 // 0 means disable PPLL. For VGA PPLL,make sure this value is not 0.
939 USHORT usFbDiv; /* feedback divider */ 1149 USHORT usRefDiv; // Reference divider
940 UCHAR ucPostDiv; /* post divider */ 1150 USHORT usFbDiv; // feedback divider
941 UCHAR ucFracFbDiv; /* fractional feedback divider */ 1151 UCHAR ucPostDiv; // post divider
942 UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */ 1152 UCHAR ucFracFbDiv; // fractional feedback divider
943 UCHAR ucTransmitterId; /* graphic encoder id defined in objectId.h */ 1153 UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
944 union { 1154 UCHAR ucTransmitterId; // graphic encoder id defined in objectId.h
945 UCHAR ucEncoderMode; /* encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/ */ 1155 union
946 UCHAR ucDVOConfig; /* when use DVO, need to know SDR/DDR, 12bit or 24bit */ 1156 {
1157 UCHAR ucEncoderMode; // encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/
1158 UCHAR ucDVOConfig; // when use DVO, need to know SDR/DDR, 12bit or 24bit
947 }; 1159 };
948 UCHAR ucMiscInfo; /* bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel */ 1160 UCHAR ucMiscInfo; // bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel
949 /* bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source */ 1161 // bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source
950} PIXEL_CLOCK_PARAMETERS_V3; 1162 // bit[4]=0:use XTALIN as the source of reference divider,=1 use the pre-defined clock as the source of reference divider
1163}PIXEL_CLOCK_PARAMETERS_V3;
951 1164
952#define PIXEL_CLOCK_PARAMETERS_LAST PIXEL_CLOCK_PARAMETERS_V2 1165#define PIXEL_CLOCK_PARAMETERS_LAST PIXEL_CLOCK_PARAMETERS_V2
953#define GET_PIXEL_CLOCK_PS_ALLOCATION PIXEL_CLOCK_PARAMETERS_LAST 1166#define GET_PIXEL_CLOCK_PS_ALLOCATION PIXEL_CLOCK_PARAMETERS_LAST
954 1167
955/****************************************************************************/ 1168typedef struct _PIXEL_CLOCK_PARAMETERS_V5
956/* Structures used by AdjustDisplayPllTable */ 1169{
957/****************************************************************************/ 1170 UCHAR ucCRTC; // ATOM_CRTC1~6, indicate the CRTC controller to
958typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS { 1171 // drive the pixel clock. not used for DCPLL case.
1172 union{
1173 UCHAR ucReserved;
1174 UCHAR ucFracFbDiv; // [gphan] temporary to prevent build problem. remove it after driver code is changed.
1175 };
1176 USHORT usPixelClock; // target the pixel clock to drive the CRTC timing
1177 // 0 means disable PPLL/DCPLL.
1178 USHORT usFbDiv; // feedback divider integer part.
1179 UCHAR ucPostDiv; // post divider.
1180 UCHAR ucRefDiv; // Reference divider
1181 UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
1182 UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h,
1183 // indicate which graphic encoder will be used.
1184 UCHAR ucEncoderMode; // Encoder mode:
1185 UCHAR ucMiscInfo; // bit[0]= Force program PPLL
1186 // bit[1]= when VGA timing is used.
1187 // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
1188 // bit[4]= RefClock source for PPLL.
1189 // =0: XTLAIN( default mode )
1190 // =1: other external clock source, which is pre-defined
1191 // by VBIOS depend on the feature required.
1192 // bit[7:5]: reserved.
1193 ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
1194
1195}PIXEL_CLOCK_PARAMETERS_V5;
1196
1197#define PIXEL_CLOCK_V5_MISC_FORCE_PROG_PPLL 0x01
1198#define PIXEL_CLOCK_V5_MISC_VGA_MODE 0x02
1199#define PIXEL_CLOCK_V5_MISC_HDMI_BPP_MASK 0x0c
1200#define PIXEL_CLOCK_V5_MISC_HDMI_24BPP 0x00
1201#define PIXEL_CLOCK_V5_MISC_HDMI_30BPP 0x04
1202#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08
1203#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10
1204
1205typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
1206{
1207 PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput;
1208}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2;
1209
1210typedef struct _GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2
1211{
1212 UCHAR ucStatus;
1213 UCHAR ucRefDivSrc; // =1: reference clock source from XTALIN, =0: source from PCIE ref clock
1214 UCHAR ucReserved[2];
1215}GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2;
1216
1217typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3
1218{
1219 PIXEL_CLOCK_PARAMETERS_V5 sDispClkInput;
1220}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3;
1221
1222/****************************************************************************/
1223// Structures used by AdjustDisplayPllTable
1224/****************************************************************************/
1225typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS
1226{
959 USHORT usPixelClock; 1227 USHORT usPixelClock;
960 UCHAR ucTransmitterID; 1228 UCHAR ucTransmitterID;
961 UCHAR ucEncodeMode; 1229 UCHAR ucEncodeMode;
962 union { 1230 union
963 UCHAR ucDVOConfig; /* if DVO, need passing link rate and output 12bitlow or 24bit */ 1231 {
964 UCHAR ucConfig; /* if none DVO, not defined yet */ 1232 UCHAR ucDVOConfig; //if DVO, need passing link rate and output 12bitlow or 24bit
1233 UCHAR ucConfig; //if none DVO, not defined yet
965 }; 1234 };
966 UCHAR ucReserved[3]; 1235 UCHAR ucReserved[3];
967} ADJUST_DISPLAY_PLL_PARAMETERS; 1236}ADJUST_DISPLAY_PLL_PARAMETERS;
968 1237
969#define ADJUST_DISPLAY_CONFIG_SS_ENABLE 0x10 1238#define ADJUST_DISPLAY_CONFIG_SS_ENABLE 0x10
970
971#define ADJUST_DISPLAY_PLL_PS_ALLOCATION ADJUST_DISPLAY_PLL_PARAMETERS 1239#define ADJUST_DISPLAY_PLL_PS_ALLOCATION ADJUST_DISPLAY_PLL_PARAMETERS
972 1240
973/****************************************************************************/ 1241typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3
974/* Structures used by EnableYUVTable */ 1242{
975/****************************************************************************/ 1243 USHORT usPixelClock; // target pixel clock
976typedef struct _ENABLE_YUV_PARAMETERS { 1244 UCHAR ucTransmitterID; // transmitter id defined in objectid.h
977 UCHAR ucEnable; /* ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB) */ 1245 UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI
978 UCHAR ucCRTC; /* Which CRTC needs this YUV or RGB format */ 1246 UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX
979 UCHAR ucPadding[2]; 1247 UCHAR ucReserved[3];
980} ENABLE_YUV_PARAMETERS; 1248}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3;
1249
1250// usDispPllConfig v1.2 for RoadRunner
1251#define DISPPLL_CONFIG_DVO_RATE_SEL 0x0001 // need only when ucTransmitterID = DVO
1252#define DISPPLL_CONFIG_DVO_DDR_SPEED 0x0000 // need only when ucTransmitterID = DVO
1253#define DISPPLL_CONFIG_DVO_SDR_SPEED 0x0001 // need only when ucTransmitterID = DVO
1254#define DISPPLL_CONFIG_DVO_OUTPUT_SEL 0x000c // need only when ucTransmitterID = DVO
1255#define DISPPLL_CONFIG_DVO_LOW12BIT 0x0000 // need only when ucTransmitterID = DVO
1256#define DISPPLL_CONFIG_DVO_UPPER12BIT 0x0004 // need only when ucTransmitterID = DVO
1257#define DISPPLL_CONFIG_DVO_24BIT 0x0008 // need only when ucTransmitterID = DVO
1258#define DISPPLL_CONFIG_SS_ENABLE 0x0010 // Only used when ucEncoderMode = DP or LVDS
1259#define DISPPLL_CONFIG_COHERENT_MODE 0x0020 // Only used when ucEncoderMode = TMDS or HDMI
1260#define DISPPLL_CONFIG_DUAL_LINK 0x0040 // Only used when ucEncoderMode = TMDS or LVDS
1261
1262
1263typedef struct _ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3
1264{
1265 ULONG ulDispPllFreq; // return display PPLL freq which is used to generate the pixclock, and related idclk, symclk etc
1266 UCHAR ucRefDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider and post_div ( if it is not given )
1267 UCHAR ucPostDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider
1268 UCHAR ucReserved[2];
1269}ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3;
1270
1271typedef struct _ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3
1272{
1273 union
1274 {
1275 ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 sInput;
1276 ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3 sOutput;
1277 };
1278} ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3;
1279
1280/****************************************************************************/
1281// Structures used by EnableYUVTable
1282/****************************************************************************/
1283typedef struct _ENABLE_YUV_PARAMETERS
1284{
1285 UCHAR ucEnable; // ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB)
1286 UCHAR ucCRTC; // Which CRTC needs this YUV or RGB format
1287 UCHAR ucPadding[2];
1288}ENABLE_YUV_PARAMETERS;
981#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS 1289#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS
982 1290
983/****************************************************************************/ 1291/****************************************************************************/
984/* Structures used by GetMemoryClockTable */ 1292// Structures used by GetMemoryClockTable
985/****************************************************************************/ 1293/****************************************************************************/
986typedef struct _GET_MEMORY_CLOCK_PARAMETERS { 1294typedef struct _GET_MEMORY_CLOCK_PARAMETERS
987 ULONG ulReturnMemoryClock; /* current memory speed in 10KHz unit */ 1295{
1296 ULONG ulReturnMemoryClock; // current memory speed in 10KHz unit
988} GET_MEMORY_CLOCK_PARAMETERS; 1297} GET_MEMORY_CLOCK_PARAMETERS;
989#define GET_MEMORY_CLOCK_PS_ALLOCATION GET_MEMORY_CLOCK_PARAMETERS 1298#define GET_MEMORY_CLOCK_PS_ALLOCATION GET_MEMORY_CLOCK_PARAMETERS
990 1299
991/****************************************************************************/ 1300/****************************************************************************/
992/* Structures used by GetEngineClockTable */ 1301// Structures used by GetEngineClockTable
993/****************************************************************************/ 1302/****************************************************************************/
994typedef struct _GET_ENGINE_CLOCK_PARAMETERS { 1303typedef struct _GET_ENGINE_CLOCK_PARAMETERS
995 ULONG ulReturnEngineClock; /* current engine speed in 10KHz unit */ 1304{
1305 ULONG ulReturnEngineClock; // current engine speed in 10KHz unit
996} GET_ENGINE_CLOCK_PARAMETERS; 1306} GET_ENGINE_CLOCK_PARAMETERS;
997#define GET_ENGINE_CLOCK_PS_ALLOCATION GET_ENGINE_CLOCK_PARAMETERS 1307#define GET_ENGINE_CLOCK_PS_ALLOCATION GET_ENGINE_CLOCK_PARAMETERS
998 1308
999/****************************************************************************/ 1309/****************************************************************************/
1000/* Following Structures and constant may be obsolete */ 1310// Following Structures and constant may be obsolete
1001/****************************************************************************/ 1311/****************************************************************************/
1002/* Maxium 8 bytes,the data read in will be placed in the parameter space. */ 1312//Maxium 8 bytes,the data read in will be placed in the parameter space.
1003/* Read operaion successeful when the paramter space is non-zero, otherwise read operation failed */ 1313//Read operaion successeful when the paramter space is non-zero, otherwise read operation failed
1004typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS { 1314typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
1005 USHORT usPrescale; /* Ratio between Engine clock and I2C clock */ 1315{
1006 USHORT usVRAMAddress; /* Adress in Frame Buffer where to pace raw EDID */ 1316 USHORT usPrescale; //Ratio between Engine clock and I2C clock
1007 USHORT usStatus; /* When use output: lower byte EDID checksum, high byte hardware status */ 1317 USHORT usVRAMAddress; //Adress in Frame Buffer where to pace raw EDID
1008 /* WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte */ 1318 USHORT usStatus; //When use output: lower byte EDID checksum, high byte hardware status
1009 UCHAR ucSlaveAddr; /* Read from which slave */ 1319 //WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte
1010 UCHAR ucLineNumber; /* Read from which HW assisted line */ 1320 UCHAR ucSlaveAddr; //Read from which slave
1011} READ_EDID_FROM_HW_I2C_DATA_PARAMETERS; 1321 UCHAR ucLineNumber; //Read from which HW assisted line
1322}READ_EDID_FROM_HW_I2C_DATA_PARAMETERS;
1012#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION READ_EDID_FROM_HW_I2C_DATA_PARAMETERS 1323#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
1013 1324
1325
1014#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE 0 1326#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE 0
1015#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES 1 1327#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES 1
1016#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK 2 1328#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK 2
1017#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK 3 1329#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK 3
1018#define ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK 4 1330#define ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK 4
1019 1331
1020typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS { 1332typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
1021 USHORT usPrescale; /* Ratio between Engine clock and I2C clock */ 1333{
1022 USHORT usByteOffset; /* Write to which byte */ 1334 USHORT usPrescale; //Ratio between Engine clock and I2C clock
1023 /* Upper portion of usByteOffset is Format of data */ 1335 USHORT usByteOffset; //Write to which byte
1024 /* 1bytePS+offsetPS */ 1336 //Upper portion of usByteOffset is Format of data
1025 /* 2bytesPS+offsetPS */ 1337 //1bytePS+offsetPS
1026 /* blockID+offsetPS */ 1338 //2bytesPS+offsetPS
1027 /* blockID+offsetID */ 1339 //blockID+offsetPS
1028 /* blockID+counterID+offsetID */ 1340 //blockID+offsetID
1029 UCHAR ucData; /* PS data1 */ 1341 //blockID+counterID+offsetID
1030 UCHAR ucStatus; /* Status byte 1=success, 2=failure, Also is used as PS data2 */ 1342 UCHAR ucData; //PS data1
1031 UCHAR ucSlaveAddr; /* Write to which slave */ 1343 UCHAR ucStatus; //Status byte 1=success, 2=failure, Also is used as PS data2
1032 UCHAR ucLineNumber; /* Write from which HW assisted line */ 1344 UCHAR ucSlaveAddr; //Write to which slave
1033} WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS; 1345 UCHAR ucLineNumber; //Write from which HW assisted line
1346}WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS;
1034 1347
1035#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS 1348#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
1036 1349
1037typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS { 1350typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS
1038 USHORT usPrescale; /* Ratio between Engine clock and I2C clock */ 1351{
1039 UCHAR ucSlaveAddr; /* Write to which slave */ 1352 USHORT usPrescale; //Ratio between Engine clock and I2C clock
1040 UCHAR ucLineNumber; /* Write from which HW assisted line */ 1353 UCHAR ucSlaveAddr; //Write to which slave
1041} SET_UP_HW_I2C_DATA_PARAMETERS; 1354 UCHAR ucLineNumber; //Write from which HW assisted line
1355}SET_UP_HW_I2C_DATA_PARAMETERS;
1356
1042 1357
1043/**************************************************************************/ 1358/**************************************************************************/
1044#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS 1359#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
1045 1360
1046/****************************************************************************/ 1361/****************************************************************************/
1047/* Structures used by PowerConnectorDetectionTable */ 1362// Structures used by PowerConnectorDetectionTable
1048/****************************************************************************/ 1363/****************************************************************************/
1049typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS { 1364typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS
1050 UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */ 1365{
1051 UCHAR ucPwrBehaviorId; 1366 UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected
1052 USHORT usPwrBudget; /* how much power currently boot to in unit of watt */ 1367 UCHAR ucPwrBehaviorId;
1053} POWER_CONNECTOR_DETECTION_PARAMETERS; 1368 USHORT usPwrBudget; //how much power currently boot to in unit of watt
1054 1369}POWER_CONNECTOR_DETECTION_PARAMETERS;
1055typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION { 1370
1056 UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */ 1371typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION
1057 UCHAR ucReserved; 1372{
1058 USHORT usPwrBudget; /* how much power currently boot to in unit of watt */ 1373 UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected
1059 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; 1374 UCHAR ucReserved;
1060} POWER_CONNECTOR_DETECTION_PS_ALLOCATION; 1375 USHORT usPwrBudget; //how much power currently boot to in unit of watt
1376 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
1377}POWER_CONNECTOR_DETECTION_PS_ALLOCATION;
1061 1378
1062/****************************LVDS SS Command Table Definitions**********************/ 1379/****************************LVDS SS Command Table Definitions**********************/
1063 1380
1064/****************************************************************************/ 1381/****************************************************************************/
1065/* Structures used by EnableSpreadSpectrumOnPPLLTable */ 1382// Structures used by EnableSpreadSpectrumOnPPLLTable
1066/****************************************************************************/ 1383/****************************************************************************/
1067typedef struct _ENABLE_LVDS_SS_PARAMETERS { 1384typedef struct _ENABLE_LVDS_SS_PARAMETERS
1068 USHORT usSpreadSpectrumPercentage; 1385{
1069 UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */ 1386 USHORT usSpreadSpectrumPercentage;
1070 UCHAR ucSpreadSpectrumStepSize_Delay; /* bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY */ 1387 UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
1071 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 1388 UCHAR ucSpreadSpectrumStepSize_Delay; //bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY
1072 UCHAR ucPadding[3]; 1389 UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE
1073} ENABLE_LVDS_SS_PARAMETERS; 1390 UCHAR ucPadding[3];
1074 1391}ENABLE_LVDS_SS_PARAMETERS;
1075/* ucTableFormatRevision=1,ucTableContentRevision=2 */ 1392
1076typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2 { 1393//ucTableFormatRevision=1,ucTableContentRevision=2
1077 USHORT usSpreadSpectrumPercentage; 1394typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2
1078 UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */ 1395{
1079 UCHAR ucSpreadSpectrumStep; /* */ 1396 USHORT usSpreadSpectrumPercentage;
1080 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 1397 UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
1081 UCHAR ucSpreadSpectrumDelay; 1398 UCHAR ucSpreadSpectrumStep; //
1082 UCHAR ucSpreadSpectrumRange; 1399 UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE
1083 UCHAR ucPadding; 1400 UCHAR ucSpreadSpectrumDelay;
1084} ENABLE_LVDS_SS_PARAMETERS_V2; 1401 UCHAR ucSpreadSpectrumRange;
1085 1402 UCHAR ucPadding;
1086/* This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS. */ 1403}ENABLE_LVDS_SS_PARAMETERS_V2;
1087typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL { 1404
1088 USHORT usSpreadSpectrumPercentage; 1405//This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS.
1089 UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */ 1406typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL
1090 UCHAR ucSpreadSpectrumStep; /* */ 1407{
1091 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 1408 USHORT usSpreadSpectrumPercentage;
1092 UCHAR ucSpreadSpectrumDelay; 1409 UCHAR ucSpreadSpectrumType; // Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
1093 UCHAR ucSpreadSpectrumRange; 1410 UCHAR ucSpreadSpectrumStep; //
1094 UCHAR ucPpll; /* ATOM_PPLL1/ATOM_PPLL2 */ 1411 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
1095} ENABLE_SPREAD_SPECTRUM_ON_PPLL; 1412 UCHAR ucSpreadSpectrumDelay;
1413 UCHAR ucSpreadSpectrumRange;
1414 UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2
1415}ENABLE_SPREAD_SPECTRUM_ON_PPLL;
1416
1417typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2
1418{
1419 USHORT usSpreadSpectrumPercentage;
1420 UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread.
1421 // Bit[1]: 1-Ext. 0-Int.
1422 // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
1423 // Bits[7:4] reserved
1424 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
1425 USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]
1426 USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC
1427}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2;
1428
1429#define ATOM_PPLL_SS_TYPE_V2_DOWN_SPREAD 0x00
1430#define ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD 0x01
1431#define ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD 0x02
1432#define ATOM_PPLL_SS_TYPE_V2_PPLL_SEL_MASK 0x0c
1433#define ATOM_PPLL_SS_TYPE_V2_P1PLL 0x00
1434#define ATOM_PPLL_SS_TYPE_V2_P2PLL 0x04
1435#define ATOM_PPLL_SS_TYPE_V2_DCPLL 0x08
1436#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK 0x00FF
1437#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT 0
1438#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00
1439#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8
1096 1440
1097#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL 1441#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL
1098 1442
1099/**************************************************************************/ 1443/**************************************************************************/
1100 1444
1101typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION { 1445typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION
1102 PIXEL_CLOCK_PARAMETERS sPCLKInput; 1446{
1103 ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved; /* Caller doesn't need to init this portion */ 1447 PIXEL_CLOCK_PARAMETERS sPCLKInput;
1104} SET_PIXEL_CLOCK_PS_ALLOCATION; 1448 ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;//Caller doesn't need to init this portion
1449}SET_PIXEL_CLOCK_PS_ALLOCATION;
1105 1450
1106#define ENABLE_VGA_RENDER_PS_ALLOCATION SET_PIXEL_CLOCK_PS_ALLOCATION 1451#define ENABLE_VGA_RENDER_PS_ALLOCATION SET_PIXEL_CLOCK_PS_ALLOCATION
1107 1452
1108/****************************************************************************/ 1453/****************************************************************************/
1109/* Structures used by ### */ 1454// Structures used by ###
1110/****************************************************************************/ 1455/****************************************************************************/
1111typedef struct _MEMORY_TRAINING_PARAMETERS { 1456typedef struct _MEMORY_TRAINING_PARAMETERS
1112 ULONG ulTargetMemoryClock; /* In 10Khz unit */ 1457{
1113} MEMORY_TRAINING_PARAMETERS; 1458 ULONG ulTargetMemoryClock; //In 10Khz unit
1459}MEMORY_TRAINING_PARAMETERS;
1114#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS 1460#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS
1115 1461
1462
1116/****************************LVDS and other encoder command table definitions **********************/ 1463/****************************LVDS and other encoder command table definitions **********************/
1117 1464
1118/****************************************************************************/
1119/* Structures used by LVDSEncoderControlTable (Before DCE30) */
1120/* LVTMAEncoderControlTable (Before DCE30) */
1121/* TMDSAEncoderControlTable (Before DCE30) */
1122/****************************************************************************/
1123typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS {
1124 USHORT usPixelClock; /* in 10KHz; for bios convenient */
1125 UCHAR ucMisc; /* bit0=0: Enable single link */
1126 /* =1: Enable dual link */
1127 /* Bit1=0: 666RGB */
1128 /* =1: 888RGB */
1129 UCHAR ucAction; /* 0: turn off encoder */
1130 /* 1: setup and turn on encoder */
1131} LVDS_ENCODER_CONTROL_PARAMETERS;
1132 1465
1133#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS 1466/****************************************************************************/
1467// Structures used by LVDSEncoderControlTable (Before DCE30)
1468// LVTMAEncoderControlTable (Before DCE30)
1469// TMDSAEncoderControlTable (Before DCE30)
1470/****************************************************************************/
1471typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS
1472{
1473 USHORT usPixelClock; // in 10KHz; for bios convenient
1474 UCHAR ucMisc; // bit0=0: Enable single link
1475 // =1: Enable dual link
1476 // Bit1=0: 666RGB
1477 // =1: 888RGB
1478 UCHAR ucAction; // 0: turn off encoder
1479 // 1: setup and turn on encoder
1480}LVDS_ENCODER_CONTROL_PARAMETERS;
1134 1481
1482#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS
1483
1135#define TMDS1_ENCODER_CONTROL_PARAMETERS LVDS_ENCODER_CONTROL_PARAMETERS 1484#define TMDS1_ENCODER_CONTROL_PARAMETERS LVDS_ENCODER_CONTROL_PARAMETERS
1136#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS 1485#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS
1137 1486
1138#define TMDS2_ENCODER_CONTROL_PARAMETERS TMDS1_ENCODER_CONTROL_PARAMETERS 1487#define TMDS2_ENCODER_CONTROL_PARAMETERS TMDS1_ENCODER_CONTROL_PARAMETERS
1139#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS 1488#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS
1140 1489
1141/* ucTableFormatRevision=1,ucTableContentRevision=2 */
1142typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 {
1143 USHORT usPixelClock; /* in 10KHz; for bios convenient */
1144 UCHAR ucMisc; /* see PANEL_ENCODER_MISC_xx definitions below */
1145 UCHAR ucAction; /* 0: turn off encoder */
1146 /* 1: setup and turn on encoder */
1147 UCHAR ucTruncate; /* bit0=0: Disable truncate */
1148 /* =1: Enable truncate */
1149 /* bit4=0: 666RGB */
1150 /* =1: 888RGB */
1151 UCHAR ucSpatial; /* bit0=0: Disable spatial dithering */
1152 /* =1: Enable spatial dithering */
1153 /* bit4=0: 666RGB */
1154 /* =1: 888RGB */
1155 UCHAR ucTemporal; /* bit0=0: Disable temporal dithering */
1156 /* =1: Enable temporal dithering */
1157 /* bit4=0: 666RGB */
1158 /* =1: 888RGB */
1159 /* bit5=0: Gray level 2 */
1160 /* =1: Gray level 4 */
1161 UCHAR ucFRC; /* bit4=0: 25FRC_SEL pattern E */
1162 /* =1: 25FRC_SEL pattern F */
1163 /* bit6:5=0: 50FRC_SEL pattern A */
1164 /* =1: 50FRC_SEL pattern B */
1165 /* =2: 50FRC_SEL pattern C */
1166 /* =3: 50FRC_SEL pattern D */
1167 /* bit7=0: 75FRC_SEL pattern E */
1168 /* =1: 75FRC_SEL pattern F */
1169} LVDS_ENCODER_CONTROL_PARAMETERS_V2;
1170 1490
1171#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 1491//ucTableFormatRevision=1,ucTableContentRevision=2
1492typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2
1493{
1494 USHORT usPixelClock; // in 10KHz; for bios convenient
1495 UCHAR ucMisc; // see PANEL_ENCODER_MISC_xx defintions below
1496 UCHAR ucAction; // 0: turn off encoder
1497 // 1: setup and turn on encoder
1498 UCHAR ucTruncate; // bit0=0: Disable truncate
1499 // =1: Enable truncate
1500 // bit4=0: 666RGB
1501 // =1: 888RGB
1502 UCHAR ucSpatial; // bit0=0: Disable spatial dithering
1503 // =1: Enable spatial dithering
1504 // bit4=0: 666RGB
1505 // =1: 888RGB
1506 UCHAR ucTemporal; // bit0=0: Disable temporal dithering
1507 // =1: Enable temporal dithering
1508 // bit4=0: 666RGB
1509 // =1: 888RGB
1510 // bit5=0: Gray level 2
1511 // =1: Gray level 4
1512 UCHAR ucFRC; // bit4=0: 25FRC_SEL pattern E
1513 // =1: 25FRC_SEL pattern F
1514 // bit6:5=0: 50FRC_SEL pattern A
1515 // =1: 50FRC_SEL pattern B
1516 // =2: 50FRC_SEL pattern C
1517 // =3: 50FRC_SEL pattern D
1518 // bit7=0: 75FRC_SEL pattern E
1519 // =1: 75FRC_SEL pattern F
1520}LVDS_ENCODER_CONTROL_PARAMETERS_V2;
1172 1521
1522#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
1523
1173#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 1524#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
1174#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2 1525#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
1175 1526
1176#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2 1527#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
1177#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2 1528#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2
1178 1529
@@ -1185,38 +1536,42 @@ typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 {
1185#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3 1536#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3
1186#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3 1537#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3
1187 1538
1188/****************************************************************************/ 1539/****************************************************************************/
1189/* Structures used by ### */ 1540// Structures used by ###
1190/****************************************************************************/ 1541/****************************************************************************/
1191typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS { 1542typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS
1192 UCHAR ucEnable; /* Enable or Disable External TMDS encoder */ 1543{
1193 UCHAR ucMisc; /* Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB} */ 1544 UCHAR ucEnable; // Enable or Disable External TMDS encoder
1194 UCHAR ucPadding[2]; 1545 UCHAR ucMisc; // Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB}
1195} ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS; 1546 UCHAR ucPadding[2];
1196 1547}ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS;
1197typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION { 1548
1198 ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder; 1549typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION
1199 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */ 1550{
1200} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION; 1551 ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder;
1552 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
1553}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION;
1201 1554
1202#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 1555#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
1203 1556
1204typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2 { 1557typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2
1205 ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder; 1558{
1206 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */ 1559 ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder;
1207} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2; 1560 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
1561}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2;
1208 1562
1209typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION { 1563typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION
1210 DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder; 1564{
1211 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; 1565 DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder;
1212} EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION; 1566 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
1567}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION;
1213 1568
1214/****************************************************************************/ 1569/****************************************************************************/
1215/* Structures used by DVOEncoderControlTable */ 1570// Structures used by DVOEncoderControlTable
1216/****************************************************************************/ 1571/****************************************************************************/
1217/* ucTableFormatRevision=1,ucTableContentRevision=3 */ 1572//ucTableFormatRevision=1,ucTableContentRevision=3
1218 1573
1219/* ucDVOConfig: */ 1574//ucDVOConfig:
1220#define DVO_ENCODER_CONFIG_RATE_SEL 0x01 1575#define DVO_ENCODER_CONFIG_RATE_SEL 0x01
1221#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 1576#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00
1222#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 1577#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01
@@ -1225,21 +1580,22 @@ typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION {
1225#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 1580#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04
1226#define DVO_ENCODER_CONFIG_24BIT 0x08 1581#define DVO_ENCODER_CONFIG_24BIT 0x08
1227 1582
1228typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 { 1583typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3
1229 USHORT usPixelClock; 1584{
1230 UCHAR ucDVOConfig; 1585 USHORT usPixelClock;
1231 UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */ 1586 UCHAR ucDVOConfig;
1232 UCHAR ucReseved[4]; 1587 UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
1233} DVO_ENCODER_CONTROL_PARAMETERS_V3; 1588 UCHAR ucReseved[4];
1589}DVO_ENCODER_CONTROL_PARAMETERS_V3;
1234#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3 1590#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3
1235 1591
1236/* ucTableFormatRevision=1 */ 1592//ucTableFormatRevision=1
1237/* ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for */ 1593//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for
1238/* bit1=0: non-coherent mode */ 1594// bit1=0: non-coherent mode
1239/* =1: coherent mode */ 1595// =1: coherent mode
1240 1596
1241/* ========================================================================================== */ 1597//==========================================================================================
1242/* Only change is here next time when changing encoder parameter definitions again! */ 1598//Only change is here next time when changing encoder parameter definitions again!
1243#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3 1599#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3
1244#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST LVDS_ENCODER_CONTROL_PARAMETERS_LAST 1600#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST LVDS_ENCODER_CONTROL_PARAMETERS_LAST
1245 1601
@@ -1252,7 +1608,7 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 {
1252#define DVO_ENCODER_CONTROL_PARAMETERS_LAST DVO_ENCODER_CONTROL_PARAMETERS 1608#define DVO_ENCODER_CONTROL_PARAMETERS_LAST DVO_ENCODER_CONTROL_PARAMETERS
1253#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST DVO_ENCODER_CONTROL_PS_ALLOCATION 1609#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST DVO_ENCODER_CONTROL_PS_ALLOCATION
1254 1610
1255/* ========================================================================================== */ 1611//==========================================================================================
1256#define PANEL_ENCODER_MISC_DUAL 0x01 1612#define PANEL_ENCODER_MISC_DUAL 0x01
1257#define PANEL_ENCODER_MISC_COHERENT 0x02 1613#define PANEL_ENCODER_MISC_COHERENT 0x02
1258#define PANEL_ENCODER_MISC_TMDS_LINKB 0x04 1614#define PANEL_ENCODER_MISC_TMDS_LINKB 0x04
@@ -1281,159 +1637,159 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 {
1281#define PANEL_ENCODER_75FRC_E 0x00 1637#define PANEL_ENCODER_75FRC_E 0x00
1282#define PANEL_ENCODER_75FRC_F 0x80 1638#define PANEL_ENCODER_75FRC_F 0x80
1283 1639
1284/****************************************************************************/ 1640/****************************************************************************/
1285/* Structures used by SetVoltageTable */ 1641// Structures used by SetVoltageTable
1286/****************************************************************************/ 1642/****************************************************************************/
1287#define SET_VOLTAGE_TYPE_ASIC_VDDC 1 1643#define SET_VOLTAGE_TYPE_ASIC_VDDC 1
1288#define SET_VOLTAGE_TYPE_ASIC_MVDDC 2 1644#define SET_VOLTAGE_TYPE_ASIC_MVDDC 2
1289#define SET_VOLTAGE_TYPE_ASIC_MVDDQ 3 1645#define SET_VOLTAGE_TYPE_ASIC_MVDDQ 3
1290#define SET_VOLTAGE_TYPE_ASIC_VDDCI 4 1646#define SET_VOLTAGE_TYPE_ASIC_VDDCI 4
1291#define SET_VOLTAGE_INIT_MODE 5 1647#define SET_VOLTAGE_INIT_MODE 5
1292#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 /* Gets the Max. voltage for the soldered Asic */ 1648#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 //Gets the Max. voltage for the soldered Asic
1293 1649
1294#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE 0x1 1650#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE 0x1
1295#define SET_ASIC_VOLTAGE_MODE_SOURCE_A 0x2 1651#define SET_ASIC_VOLTAGE_MODE_SOURCE_A 0x2
1296#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4 1652#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4
1297 1653
1298#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0 1654#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0
1299#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1 1655#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1
1300#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2 1656#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2
1301 1657
1302typedef struct _SET_VOLTAGE_PARAMETERS { 1658typedef struct _SET_VOLTAGE_PARAMETERS
1303 UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */ 1659{
1304 UCHAR ucVoltageMode; /* To set all, to set source A or source B or ... */ 1660 UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
1305 UCHAR ucVoltageIndex; /* An index to tell which voltage level */ 1661 UCHAR ucVoltageMode; // To set all, to set source A or source B or ...
1306 UCHAR ucReserved; 1662 UCHAR ucVoltageIndex; // An index to tell which voltage level
1307} SET_VOLTAGE_PARAMETERS; 1663 UCHAR ucReserved;
1308 1664}SET_VOLTAGE_PARAMETERS;
1309typedef struct _SET_VOLTAGE_PARAMETERS_V2 {
1310 UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */
1311 UCHAR ucVoltageMode; /* Not used, maybe use for state machine for differen power mode */
1312 USHORT usVoltageLevel; /* real voltage level */
1313} SET_VOLTAGE_PARAMETERS_V2;
1314
1315typedef struct _SET_VOLTAGE_PS_ALLOCATION {
1316 SET_VOLTAGE_PARAMETERS sASICSetVoltage;
1317 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
1318} SET_VOLTAGE_PS_ALLOCATION;
1319
1320/****************************************************************************/
1321/* Structures used by TVEncoderControlTable */
1322/****************************************************************************/
1323typedef struct _TV_ENCODER_CONTROL_PARAMETERS {
1324 USHORT usPixelClock; /* in 10KHz; for bios convenient */
1325 UCHAR ucTvStandard; /* See definition "ATOM_TV_NTSC ..." */
1326 UCHAR ucAction; /* 0: turn off encoder */
1327 /* 1: setup and turn on encoder */
1328} TV_ENCODER_CONTROL_PARAMETERS;
1329
1330typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION {
1331 TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;
1332 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Don't set this one */
1333} TV_ENCODER_CONTROL_PS_ALLOCATION;
1334
1335/* ==============================Data Table Portion==================================== */
1336
1337#ifdef UEFI_BUILD
1338#define UTEMP USHORT
1339#define USHORT void*
1340#endif
1341
1342/****************************************************************************/
1343/* Structure used in Data.mtb */
1344/****************************************************************************/
1345typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES {
1346 USHORT UtilityPipeLine; /* Offest for the utility to get parser info,Don't change this position! */
1347 USHORT MultimediaCapabilityInfo; /* Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios */
1348 USHORT MultimediaConfigInfo; /* Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios */
1349 USHORT StandardVESA_Timing; /* Only used by Bios */
1350 USHORT FirmwareInfo; /* Shared by various SW components,latest version 1.4 */
1351 USHORT DAC_Info; /* Will be obsolete from R600 */
1352 USHORT LVDS_Info; /* Shared by various SW components,latest version 1.1 */
1353 USHORT TMDS_Info; /* Will be obsolete from R600 */
1354 USHORT AnalogTV_Info; /* Shared by various SW components,latest version 1.1 */
1355 USHORT SupportedDevicesInfo; /* Will be obsolete from R600 */
1356 USHORT GPIO_I2C_Info; /* Shared by various SW components,latest version 1.2 will be used from R600 */
1357 USHORT VRAM_UsageByFirmware; /* Shared by various SW components,latest version 1.3 will be used from R600 */
1358 USHORT GPIO_Pin_LUT; /* Shared by various SW components,latest version 1.1 */
1359 USHORT VESA_ToInternalModeLUT; /* Only used by Bios */
1360 USHORT ComponentVideoInfo; /* Shared by various SW components,latest version 2.1 will be used from R600 */
1361 USHORT PowerPlayInfo; /* Shared by various SW components,latest version 2.1,new design from R600 */
1362 USHORT CompassionateData; /* Will be obsolete from R600 */
1363 USHORT SaveRestoreInfo; /* Only used by Bios */
1364 USHORT PPLL_SS_Info; /* Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info */
1365 USHORT OemInfo; /* Defined and used by external SW, should be obsolete soon */
1366 USHORT XTMDS_Info; /* Will be obsolete from R600 */
1367 USHORT MclkSS_Info; /* Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used */
1368 USHORT Object_Header; /* Shared by various SW components,latest version 1.1 */
1369 USHORT IndirectIOAccess; /* Only used by Bios,this table position can't change at all!! */
1370 USHORT MC_InitParameter; /* Only used by command table */
1371 USHORT ASIC_VDDC_Info; /* Will be obsolete from R600 */
1372 USHORT ASIC_InternalSS_Info; /* New tabel name from R600, used to be called "ASIC_MVDDC_Info" */
1373 USHORT TV_VideoMode; /* Only used by command table */
1374 USHORT VRAM_Info; /* Only used by command table, latest version 1.3 */
1375 USHORT MemoryTrainingInfo; /* Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1 */
1376 USHORT IntegratedSystemInfo; /* Shared by various SW components */
1377 USHORT ASIC_ProfilingInfo; /* New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600 */
1378 USHORT VoltageObjectInfo; /* Shared by various SW components, latest version 1.1 */
1379 USHORT PowerSourceInfo; /* Shared by various SW components, latest versoin 1.1 */
1380} ATOM_MASTER_LIST_OF_DATA_TABLES;
1381
1382#ifdef UEFI_BUILD
1383#define USHORT UTEMP
1384#endif
1385 1665
1386typedef struct _ATOM_MASTER_DATA_TABLE { 1666typedef struct _SET_VOLTAGE_PARAMETERS_V2
1387 ATOM_COMMON_TABLE_HEADER sHeader; 1667{
1388 ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables; 1668 UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
1389} ATOM_MASTER_DATA_TABLE; 1669 UCHAR ucVoltageMode; // Not used, maybe use for state machine for differen power mode
1670 USHORT usVoltageLevel; // real voltage level
1671}SET_VOLTAGE_PARAMETERS_V2;
1390 1672
1391/****************************************************************************/ 1673typedef struct _SET_VOLTAGE_PS_ALLOCATION
1392/* Structure used in MultimediaCapabilityInfoTable */ 1674{
1393/****************************************************************************/ 1675 SET_VOLTAGE_PARAMETERS sASICSetVoltage;
1394typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO { 1676 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
1395 ATOM_COMMON_TABLE_HEADER sHeader; 1677}SET_VOLTAGE_PS_ALLOCATION;
1396 ULONG ulSignature; /* HW info table signature string "$ATI" */ 1678
1397 UCHAR ucI2C_Type; /* I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc) */ 1679/****************************************************************************/
1398 UCHAR ucTV_OutInfo; /* Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7) */ 1680// Structures used by TVEncoderControlTable
1399 UCHAR ucVideoPortInfo; /* Provides the video port capabilities */ 1681/****************************************************************************/
1400 UCHAR ucHostPortInfo; /* Provides host port configuration information */ 1682typedef struct _TV_ENCODER_CONTROL_PARAMETERS
1401} ATOM_MULTIMEDIA_CAPABILITY_INFO; 1683{
1684 USHORT usPixelClock; // in 10KHz; for bios convenient
1685 UCHAR ucTvStandard; // See definition "ATOM_TV_NTSC ..."
1686 UCHAR ucAction; // 0: turn off encoder
1687 // 1: setup and turn on encoder
1688}TV_ENCODER_CONTROL_PARAMETERS;
1402 1689
1403/****************************************************************************/ 1690typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION
1404/* Structure used in MultimediaConfigInfoTable */ 1691{
1405/****************************************************************************/ 1692 TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;
1406typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO { 1693 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; // Don't set this one
1407 ATOM_COMMON_TABLE_HEADER sHeader; 1694}TV_ENCODER_CONTROL_PS_ALLOCATION;
1408 ULONG ulSignature; /* MM info table signature sting "$MMT" */
1409 UCHAR ucTunerInfo; /* Type of tuner installed on the adapter (4:0) and video input for tuner (7:5) */
1410 UCHAR ucAudioChipInfo; /* List the audio chip type (3:0) product type (4) and OEM revision (7:5) */
1411 UCHAR ucProductID; /* Defines as OEM ID or ATI board ID dependent on product type setting */
1412 UCHAR ucMiscInfo1; /* Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7) */
1413 UCHAR ucMiscInfo2; /* I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6) */
1414 UCHAR ucMiscInfo3; /* Video Decoder Type (3:0) Video In Standard/Crystal (7:4) */
1415 UCHAR ucMiscInfo4; /* Video Decoder Host Config (2:0) reserved (7:3) */
1416 UCHAR ucVideoInput0Info; /* Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1417 UCHAR ucVideoInput1Info; /* Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1418 UCHAR ucVideoInput2Info; /* Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1419 UCHAR ucVideoInput3Info; /* Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1420 UCHAR ucVideoInput4Info; /* Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1421} ATOM_MULTIMEDIA_CONFIG_INFO;
1422 1695
1423/****************************************************************************/ 1696//==============================Data Table Portion====================================
1424/* Structures used in FirmwareInfoTable */
1425/****************************************************************************/
1426 1697
1427/* usBIOSCapability Definition: */ 1698/****************************************************************************/
1428/* Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; */ 1699// Structure used in Data.mtb
1429/* Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; */ 1700/****************************************************************************/
1430/* Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; */ 1701typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
1431/* Others: Reserved */ 1702{
1703 USHORT UtilityPipeLine; // Offest for the utility to get parser info,Don't change this position!
1704 USHORT MultimediaCapabilityInfo; // Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios
1705 USHORT MultimediaConfigInfo; // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios
1706 USHORT StandardVESA_Timing; // Only used by Bios
1707 USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4
1708 USHORT DAC_Info; // Will be obsolete from R600
1709 USHORT LVDS_Info; // Shared by various SW components,latest version 1.1
1710 USHORT TMDS_Info; // Will be obsolete from R600
1711 USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1
1712 USHORT SupportedDevicesInfo; // Will be obsolete from R600
1713 USHORT GPIO_I2C_Info; // Shared by various SW components,latest version 1.2 will be used from R600
1714 USHORT VRAM_UsageByFirmware; // Shared by various SW components,latest version 1.3 will be used from R600
1715 USHORT GPIO_Pin_LUT; // Shared by various SW components,latest version 1.1
1716 USHORT VESA_ToInternalModeLUT; // Only used by Bios
1717 USHORT ComponentVideoInfo; // Shared by various SW components,latest version 2.1 will be used from R600
1718 USHORT PowerPlayInfo; // Shared by various SW components,latest version 2.1,new design from R600
1719 USHORT CompassionateData; // Will be obsolete from R600
1720 USHORT SaveRestoreInfo; // Only used by Bios
1721 USHORT PPLL_SS_Info; // Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info
1722 USHORT OemInfo; // Defined and used by external SW, should be obsolete soon
1723 USHORT XTMDS_Info; // Will be obsolete from R600
1724 USHORT MclkSS_Info; // Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used
1725 USHORT Object_Header; // Shared by various SW components,latest version 1.1
1726 USHORT IndirectIOAccess; // Only used by Bios,this table position can't change at all!!
1727 USHORT MC_InitParameter; // Only used by command table
1728 USHORT ASIC_VDDC_Info; // Will be obsolete from R600
1729 USHORT ASIC_InternalSS_Info; // New tabel name from R600, used to be called "ASIC_MVDDC_Info"
1730 USHORT TV_VideoMode; // Only used by command table
1731 USHORT VRAM_Info; // Only used by command table, latest version 1.3
1732 USHORT MemoryTrainingInfo; // Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1
1733 USHORT IntegratedSystemInfo; // Shared by various SW components
1734 USHORT ASIC_ProfilingInfo; // New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600
1735 USHORT VoltageObjectInfo; // Shared by various SW components, latest version 1.1
1736 USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1
1737}ATOM_MASTER_LIST_OF_DATA_TABLES;
1738
1739typedef struct _ATOM_MASTER_DATA_TABLE
1740{
1741 ATOM_COMMON_TABLE_HEADER sHeader;
1742 ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
1743}ATOM_MASTER_DATA_TABLE;
1744
1745/****************************************************************************/
1746// Structure used in MultimediaCapabilityInfoTable
1747/****************************************************************************/
1748typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO
1749{
1750 ATOM_COMMON_TABLE_HEADER sHeader;
1751 ULONG ulSignature; // HW info table signature string "$ATI"
1752 UCHAR ucI2C_Type; // I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc)
1753 UCHAR ucTV_OutInfo; // Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7)
1754 UCHAR ucVideoPortInfo; // Provides the video port capabilities
1755 UCHAR ucHostPortInfo; // Provides host port configuration information
1756}ATOM_MULTIMEDIA_CAPABILITY_INFO;
1757
1758/****************************************************************************/
1759// Structure used in MultimediaConfigInfoTable
1760/****************************************************************************/
1761typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO
1762{
1763 ATOM_COMMON_TABLE_HEADER sHeader;
1764 ULONG ulSignature; // MM info table signature sting "$MMT"
1765 UCHAR ucTunerInfo; // Type of tuner installed on the adapter (4:0) and video input for tuner (7:5)
1766 UCHAR ucAudioChipInfo; // List the audio chip type (3:0) product type (4) and OEM revision (7:5)
1767 UCHAR ucProductID; // Defines as OEM ID or ATI board ID dependent on product type setting
1768 UCHAR ucMiscInfo1; // Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7)
1769 UCHAR ucMiscInfo2; // I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6)
1770 UCHAR ucMiscInfo3; // Video Decoder Type (3:0) Video In Standard/Crystal (7:4)
1771 UCHAR ucMiscInfo4; // Video Decoder Host Config (2:0) reserved (7:3)
1772 UCHAR ucVideoInput0Info;// Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
1773 UCHAR ucVideoInput1Info;// Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
1774 UCHAR ucVideoInput2Info;// Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
1775 UCHAR ucVideoInput3Info;// Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
1776 UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
1777}ATOM_MULTIMEDIA_CONFIG_INFO;
1778
1779/****************************************************************************/
1780// Structures used in FirmwareInfoTable
1781/****************************************************************************/
1782
1783// usBIOSCapability Defintion:
1784// Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted;
1785// Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported;
1786// Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported;
1787// Others: Reserved
1432#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED 0x0001 1788#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED 0x0001
1433#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT 0x0002 1789#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT 0x0002
1434#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT 0x0004 1790#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT 0x0004
1435#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008 1791#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008 // (valid from v1.1 ~v1.4):=1: memclk SS enable, =0 memclk SS disable.
1436#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010 1792#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010 // (valid from v1.1 ~v1.4):=1: engclk SS enable, =0 engclk SS disable.
1437#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU 0x0020 1793#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU 0x0020
1438#define ATOM_BIOS_INFO_WMI_SUPPORT 0x0040 1794#define ATOM_BIOS_INFO_WMI_SUPPORT 0x0040
1439#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM 0x0080 1795#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM 0x0080
@@ -1441,242 +1797,292 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO {
1441#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK 0x1E00 1797#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK 0x1E00
1442#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000 1798#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000
1443#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE 0x4000 1799#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE 0x4000
1800#define ATOM_BIOS_INFO_MEMORY_CLOCK_EXT_SS_SUPPORT 0x0008 // (valid from v2.1 ): =1: memclk ss enable with external ss chip
1801#define ATOM_BIOS_INFO_ENGINE_CLOCK_EXT_SS_SUPPORT 0x0010 // (valid from v2.1 ): =1: engclk ss enable with external ss chip
1444 1802
1445#ifndef _H2INC 1803#ifndef _H2INC
1446 1804
1447/* Please don't add or expand this bitfield structure below, this one will retire soon.! */ 1805//Please don't add or expand this bitfield structure below, this one will retire soon.!
1448typedef struct _ATOM_FIRMWARE_CAPABILITY { 1806typedef struct _ATOM_FIRMWARE_CAPABILITY
1807{
1449#if ATOM_BIG_ENDIAN 1808#if ATOM_BIG_ENDIAN
1450 USHORT Reserved:3; 1809 USHORT Reserved:3;
1451 USHORT HyperMemory_Size:4; 1810 USHORT HyperMemory_Size:4;
1452 USHORT HyperMemory_Support:1; 1811 USHORT HyperMemory_Support:1;
1453 USHORT PPMode_Assigned:1; 1812 USHORT PPMode_Assigned:1;
1454 USHORT WMI_SUPPORT:1; 1813 USHORT WMI_SUPPORT:1;
1455 USHORT GPUControlsBL:1; 1814 USHORT GPUControlsBL:1;
1456 USHORT EngineClockSS_Support:1; 1815 USHORT EngineClockSS_Support:1;
1457 USHORT MemoryClockSS_Support:1; 1816 USHORT MemoryClockSS_Support:1;
1458 USHORT ExtendedDesktopSupport:1; 1817 USHORT ExtendedDesktopSupport:1;
1459 USHORT DualCRTC_Support:1; 1818 USHORT DualCRTC_Support:1;
1460 USHORT FirmwarePosted:1; 1819 USHORT FirmwarePosted:1;
1461#else 1820#else
1462 USHORT FirmwarePosted:1; 1821 USHORT FirmwarePosted:1;
1463 USHORT DualCRTC_Support:1; 1822 USHORT DualCRTC_Support:1;
1464 USHORT ExtendedDesktopSupport:1; 1823 USHORT ExtendedDesktopSupport:1;
1465 USHORT MemoryClockSS_Support:1; 1824 USHORT MemoryClockSS_Support:1;
1466 USHORT EngineClockSS_Support:1; 1825 USHORT EngineClockSS_Support:1;
1467 USHORT GPUControlsBL:1; 1826 USHORT GPUControlsBL:1;
1468 USHORT WMI_SUPPORT:1; 1827 USHORT WMI_SUPPORT:1;
1469 USHORT PPMode_Assigned:1; 1828 USHORT PPMode_Assigned:1;
1470 USHORT HyperMemory_Support:1; 1829 USHORT HyperMemory_Support:1;
1471 USHORT HyperMemory_Size:4; 1830 USHORT HyperMemory_Size:4;
1472 USHORT Reserved:3; 1831 USHORT Reserved:3;
1473#endif 1832#endif
1474} ATOM_FIRMWARE_CAPABILITY; 1833}ATOM_FIRMWARE_CAPABILITY;
1475 1834
1476typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS { 1835typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
1477 ATOM_FIRMWARE_CAPABILITY sbfAccess; 1836{
1478 USHORT susAccess; 1837 ATOM_FIRMWARE_CAPABILITY sbfAccess;
1479} ATOM_FIRMWARE_CAPABILITY_ACCESS; 1838 USHORT susAccess;
1839}ATOM_FIRMWARE_CAPABILITY_ACCESS;
1480 1840
1481#else 1841#else
1482 1842
1483typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS { 1843typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
1484 USHORT susAccess; 1844{
1485} ATOM_FIRMWARE_CAPABILITY_ACCESS; 1845 USHORT susAccess;
1846}ATOM_FIRMWARE_CAPABILITY_ACCESS;
1486 1847
1487#endif 1848#endif
1488 1849
1489typedef struct _ATOM_FIRMWARE_INFO { 1850typedef struct _ATOM_FIRMWARE_INFO
1490 ATOM_COMMON_TABLE_HEADER sHeader; 1851{
1491 ULONG ulFirmwareRevision; 1852 ATOM_COMMON_TABLE_HEADER sHeader;
1492 ULONG ulDefaultEngineClock; /* In 10Khz unit */ 1853 ULONG ulFirmwareRevision;
1493 ULONG ulDefaultMemoryClock; /* In 10Khz unit */ 1854 ULONG ulDefaultEngineClock; //In 10Khz unit
1494 ULONG ulDriverTargetEngineClock; /* In 10Khz unit */ 1855 ULONG ulDefaultMemoryClock; //In 10Khz unit
1495 ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */ 1856 ULONG ulDriverTargetEngineClock; //In 10Khz unit
1496 ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */ 1857 ULONG ulDriverTargetMemoryClock; //In 10Khz unit
1497 ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */ 1858 ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
1498 ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */ 1859 ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
1499 ULONG ulASICMaxEngineClock; /* In 10Khz unit */ 1860 ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
1500 ULONG ulASICMaxMemoryClock; /* In 10Khz unit */ 1861 ULONG ulASICMaxEngineClock; //In 10Khz unit
1501 UCHAR ucASICMaxTemperature; 1862 ULONG ulASICMaxMemoryClock; //In 10Khz unit
1502 UCHAR ucPadding[3]; /* Don't use them */ 1863 UCHAR ucASICMaxTemperature;
1503 ULONG aulReservedForBIOS[3]; /* Don't use them */ 1864 UCHAR ucPadding[3]; //Don't use them
1504 USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */ 1865 ULONG aulReservedForBIOS[3]; //Don't use them
1505 USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */ 1866 USHORT usMinEngineClockPLL_Input; //In 10Khz unit
1506 USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */ 1867 USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
1507 USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */ 1868 USHORT usMinEngineClockPLL_Output; //In 10Khz unit
1508 USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */ 1869 USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
1509 USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */ 1870 USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
1510 USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */ 1871 USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
1511 USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */ 1872 USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
1512 USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */ 1873 USHORT usMinPixelClockPLL_Input; //In 10Khz unit
1513 USHORT usMinPixelClockPLL_Output; /* In 10Khz unit, the definitions above can't change!!! */ 1874 USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
1514 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; 1875 USHORT usMinPixelClockPLL_Output; //In 10Khz unit, the definitions above can't change!!!
1515 USHORT usReferenceClock; /* In 10Khz unit */ 1876 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
1516 USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */ 1877 USHORT usReferenceClock; //In 10Khz unit
1517 UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */ 1878 USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
1518 UCHAR ucDesign_ID; /* Indicate what is the board design */ 1879 UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
1519 UCHAR ucMemoryModule_ID; /* Indicate what is the board design */ 1880 UCHAR ucDesign_ID; //Indicate what is the board design
1520} ATOM_FIRMWARE_INFO; 1881 UCHAR ucMemoryModule_ID; //Indicate what is the board design
1521 1882}ATOM_FIRMWARE_INFO;
1522typedef struct _ATOM_FIRMWARE_INFO_V1_2 { 1883
1523 ATOM_COMMON_TABLE_HEADER sHeader; 1884typedef struct _ATOM_FIRMWARE_INFO_V1_2
1524 ULONG ulFirmwareRevision; 1885{
1525 ULONG ulDefaultEngineClock; /* In 10Khz unit */ 1886 ATOM_COMMON_TABLE_HEADER sHeader;
1526 ULONG ulDefaultMemoryClock; /* In 10Khz unit */ 1887 ULONG ulFirmwareRevision;
1527 ULONG ulDriverTargetEngineClock; /* In 10Khz unit */ 1888 ULONG ulDefaultEngineClock; //In 10Khz unit
1528 ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */ 1889 ULONG ulDefaultMemoryClock; //In 10Khz unit
1529 ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */ 1890 ULONG ulDriverTargetEngineClock; //In 10Khz unit
1530 ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */ 1891 ULONG ulDriverTargetMemoryClock; //In 10Khz unit
1531 ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */ 1892 ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
1532 ULONG ulASICMaxEngineClock; /* In 10Khz unit */ 1893 ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
1533 ULONG ulASICMaxMemoryClock; /* In 10Khz unit */ 1894 ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
1534 UCHAR ucASICMaxTemperature; 1895 ULONG ulASICMaxEngineClock; //In 10Khz unit
1535 UCHAR ucMinAllowedBL_Level; 1896 ULONG ulASICMaxMemoryClock; //In 10Khz unit
1536 UCHAR ucPadding[2]; /* Don't use them */ 1897 UCHAR ucASICMaxTemperature;
1537 ULONG aulReservedForBIOS[2]; /* Don't use them */ 1898 UCHAR ucMinAllowedBL_Level;
1538 ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */ 1899 UCHAR ucPadding[2]; //Don't use them
1539 USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */ 1900 ULONG aulReservedForBIOS[2]; //Don't use them
1540 USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */ 1901 ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
1541 USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */ 1902 USHORT usMinEngineClockPLL_Input; //In 10Khz unit
1542 USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */ 1903 USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
1543 USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */ 1904 USHORT usMinEngineClockPLL_Output; //In 10Khz unit
1544 USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */ 1905 USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
1545 USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */ 1906 USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
1546 USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */ 1907 USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
1547 USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */ 1908 USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
1548 USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */ 1909 USHORT usMinPixelClockPLL_Input; //In 10Khz unit
1549 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; 1910 USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
1550 USHORT usReferenceClock; /* In 10Khz unit */ 1911 USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
1551 USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */ 1912 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
1552 UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */ 1913 USHORT usReferenceClock; //In 10Khz unit
1553 UCHAR ucDesign_ID; /* Indicate what is the board design */ 1914 USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
1554 UCHAR ucMemoryModule_ID; /* Indicate what is the board design */ 1915 UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
1555} ATOM_FIRMWARE_INFO_V1_2; 1916 UCHAR ucDesign_ID; //Indicate what is the board design
1556 1917 UCHAR ucMemoryModule_ID; //Indicate what is the board design
1557typedef struct _ATOM_FIRMWARE_INFO_V1_3 { 1918}ATOM_FIRMWARE_INFO_V1_2;
1558 ATOM_COMMON_TABLE_HEADER sHeader; 1919
1559 ULONG ulFirmwareRevision; 1920typedef struct _ATOM_FIRMWARE_INFO_V1_3
1560 ULONG ulDefaultEngineClock; /* In 10Khz unit */ 1921{
1561 ULONG ulDefaultMemoryClock; /* In 10Khz unit */ 1922 ATOM_COMMON_TABLE_HEADER sHeader;
1562 ULONG ulDriverTargetEngineClock; /* In 10Khz unit */ 1923 ULONG ulFirmwareRevision;
1563 ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */ 1924 ULONG ulDefaultEngineClock; //In 10Khz unit
1564 ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */ 1925 ULONG ulDefaultMemoryClock; //In 10Khz unit
1565 ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */ 1926 ULONG ulDriverTargetEngineClock; //In 10Khz unit
1566 ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */ 1927 ULONG ulDriverTargetMemoryClock; //In 10Khz unit
1567 ULONG ulASICMaxEngineClock; /* In 10Khz unit */ 1928 ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
1568 ULONG ulASICMaxMemoryClock; /* In 10Khz unit */ 1929 ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
1569 UCHAR ucASICMaxTemperature; 1930 ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
1570 UCHAR ucMinAllowedBL_Level; 1931 ULONG ulASICMaxEngineClock; //In 10Khz unit
1571 UCHAR ucPadding[2]; /* Don't use them */ 1932 ULONG ulASICMaxMemoryClock; //In 10Khz unit
1572 ULONG aulReservedForBIOS; /* Don't use them */ 1933 UCHAR ucASICMaxTemperature;
1573 ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */ 1934 UCHAR ucMinAllowedBL_Level;
1574 ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */ 1935 UCHAR ucPadding[2]; //Don't use them
1575 USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */ 1936 ULONG aulReservedForBIOS; //Don't use them
1576 USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */ 1937 ULONG ul3DAccelerationEngineClock;//In 10Khz unit
1577 USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */ 1938 ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
1578 USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */ 1939 USHORT usMinEngineClockPLL_Input; //In 10Khz unit
1579 USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */ 1940 USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
1580 USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */ 1941 USHORT usMinEngineClockPLL_Output; //In 10Khz unit
1581 USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */ 1942 USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
1582 USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */ 1943 USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
1583 USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */ 1944 USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
1584 USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */ 1945 USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
1585 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; 1946 USHORT usMinPixelClockPLL_Input; //In 10Khz unit
1586 USHORT usReferenceClock; /* In 10Khz unit */ 1947 USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
1587 USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */ 1948 USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
1588 UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */ 1949 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
1589 UCHAR ucDesign_ID; /* Indicate what is the board design */ 1950 USHORT usReferenceClock; //In 10Khz unit
1590 UCHAR ucMemoryModule_ID; /* Indicate what is the board design */ 1951 USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
1591} ATOM_FIRMWARE_INFO_V1_3; 1952 UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
1592 1953 UCHAR ucDesign_ID; //Indicate what is the board design
1593typedef struct _ATOM_FIRMWARE_INFO_V1_4 { 1954 UCHAR ucMemoryModule_ID; //Indicate what is the board design
1594 ATOM_COMMON_TABLE_HEADER sHeader; 1955}ATOM_FIRMWARE_INFO_V1_3;
1595 ULONG ulFirmwareRevision; 1956
1596 ULONG ulDefaultEngineClock; /* In 10Khz unit */ 1957typedef struct _ATOM_FIRMWARE_INFO_V1_4
1597 ULONG ulDefaultMemoryClock; /* In 10Khz unit */ 1958{
1598 ULONG ulDriverTargetEngineClock; /* In 10Khz unit */ 1959 ATOM_COMMON_TABLE_HEADER sHeader;
1599 ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */ 1960 ULONG ulFirmwareRevision;
1600 ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */ 1961 ULONG ulDefaultEngineClock; //In 10Khz unit
1601 ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */ 1962 ULONG ulDefaultMemoryClock; //In 10Khz unit
1602 ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */ 1963 ULONG ulDriverTargetEngineClock; //In 10Khz unit
1603 ULONG ulASICMaxEngineClock; /* In 10Khz unit */ 1964 ULONG ulDriverTargetMemoryClock; //In 10Khz unit
1604 ULONG ulASICMaxMemoryClock; /* In 10Khz unit */ 1965 ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
1605 UCHAR ucASICMaxTemperature; 1966 ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
1606 UCHAR ucMinAllowedBL_Level; 1967 ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
1607 USHORT usBootUpVDDCVoltage; /* In MV unit */ 1968 ULONG ulASICMaxEngineClock; //In 10Khz unit
1608 USHORT usLcdMinPixelClockPLL_Output; /* In MHz unit */ 1969 ULONG ulASICMaxMemoryClock; //In 10Khz unit
1609 USHORT usLcdMaxPixelClockPLL_Output; /* In MHz unit */ 1970 UCHAR ucASICMaxTemperature;
1610 ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */ 1971 UCHAR ucMinAllowedBL_Level;
1611 ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */ 1972 USHORT usBootUpVDDCVoltage; //In MV unit
1612 USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */ 1973 USHORT usLcdMinPixelClockPLL_Output; // In MHz unit
1613 USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */ 1974 USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
1614 USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */ 1975 ULONG ul3DAccelerationEngineClock;//In 10Khz unit
1615 USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */ 1976 ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
1616 USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */ 1977 USHORT usMinEngineClockPLL_Input; //In 10Khz unit
1617 USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */ 1978 USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
1618 USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */ 1979 USHORT usMinEngineClockPLL_Output; //In 10Khz unit
1619 USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */ 1980 USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
1620 USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */ 1981 USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
1621 USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */ 1982 USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
1622 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; 1983 USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
1623 USHORT usReferenceClock; /* In 10Khz unit */ 1984 USHORT usMinPixelClockPLL_Input; //In 10Khz unit
1624 USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */ 1985 USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
1625 UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */ 1986 USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
1626 UCHAR ucDesign_ID; /* Indicate what is the board design */ 1987 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
1627 UCHAR ucMemoryModule_ID; /* Indicate what is the board design */ 1988 USHORT usReferenceClock; //In 10Khz unit
1628} ATOM_FIRMWARE_INFO_V1_4; 1989 USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
1629 1990 UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
1630#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V1_4 1991 UCHAR ucDesign_ID; //Indicate what is the board design
1631 1992 UCHAR ucMemoryModule_ID; //Indicate what is the board design
1632/****************************************************************************/ 1993}ATOM_FIRMWARE_INFO_V1_4;
1633/* Structures used in IntegratedSystemInfoTable */ 1994
1634/****************************************************************************/ 1995//the structure below to be used from Cypress
1996typedef struct _ATOM_FIRMWARE_INFO_V2_1
1997{
1998 ATOM_COMMON_TABLE_HEADER sHeader;
1999 ULONG ulFirmwareRevision;
2000 ULONG ulDefaultEngineClock; //In 10Khz unit
2001 ULONG ulDefaultMemoryClock; //In 10Khz unit
2002 ULONG ulReserved1;
2003 ULONG ulReserved2;
2004 ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
2005 ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
2006 ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
2007 ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock
2008 ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit
2009 UCHAR ucReserved1; //Was ucASICMaxTemperature;
2010 UCHAR ucMinAllowedBL_Level;
2011 USHORT usBootUpVDDCVoltage; //In MV unit
2012 USHORT usLcdMinPixelClockPLL_Output; // In MHz unit
2013 USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
2014 ULONG ulReserved4; //Was ulAsicMaximumVoltage
2015 ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
2016 USHORT usMinEngineClockPLL_Input; //In 10Khz unit
2017 USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
2018 USHORT usMinEngineClockPLL_Output; //In 10Khz unit
2019 USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
2020 USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
2021 USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
2022 USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
2023 USHORT usMinPixelClockPLL_Input; //In 10Khz unit
2024 USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
2025 USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
2026 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
2027 USHORT usCoreReferenceClock; //In 10Khz unit
2028 USHORT usMemoryReferenceClock; //In 10Khz unit
2029 USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
2030 UCHAR ucMemoryModule_ID; //Indicate what is the board design
2031 UCHAR ucReserved4[3];
2032}ATOM_FIRMWARE_INFO_V2_1;
2033
2034
2035#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_1
2036
2037/****************************************************************************/
2038// Structures used in IntegratedSystemInfoTable
2039/****************************************************************************/
1635#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN 0x2 2040#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN 0x2
1636#define IGP_CAP_FLAG_AC_CARD 0x4 2041#define IGP_CAP_FLAG_AC_CARD 0x4
1637#define IGP_CAP_FLAG_SDVO_CARD 0x8 2042#define IGP_CAP_FLAG_SDVO_CARD 0x8
1638#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE 0x10 2043#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE 0x10
1639 2044
1640typedef struct _ATOM_INTEGRATED_SYSTEM_INFO { 2045typedef struct _ATOM_INTEGRATED_SYSTEM_INFO
1641 ATOM_COMMON_TABLE_HEADER sHeader; 2046{
1642 ULONG ulBootUpEngineClock; /* in 10kHz unit */ 2047 ATOM_COMMON_TABLE_HEADER sHeader;
1643 ULONG ulBootUpMemoryClock; /* in 10kHz unit */ 2048 ULONG ulBootUpEngineClock; //in 10kHz unit
1644 ULONG ulMaxSystemMemoryClock; /* in 10kHz unit */ 2049 ULONG ulBootUpMemoryClock; //in 10kHz unit
1645 ULONG ulMinSystemMemoryClock; /* in 10kHz unit */ 2050 ULONG ulMaxSystemMemoryClock; //in 10kHz unit
1646 UCHAR ucNumberOfCyclesInPeriodHi; 2051 ULONG ulMinSystemMemoryClock; //in 10kHz unit
1647 UCHAR ucLCDTimingSel; /* =0:not valid.!=0 sel this timing descriptor from LCD EDID. */ 2052 UCHAR ucNumberOfCyclesInPeriodHi;
1648 USHORT usReserved1; 2053 UCHAR ucLCDTimingSel; //=0:not valid.!=0 sel this timing descriptor from LCD EDID.
1649 USHORT usInterNBVoltageLow; /* An intermidiate PMW value to set the voltage */ 2054 USHORT usReserved1;
1650 USHORT usInterNBVoltageHigh; /* Another intermidiate PMW value to set the voltage */ 2055 USHORT usInterNBVoltageLow; //An intermidiate PMW value to set the voltage
1651 ULONG ulReserved[2]; 2056 USHORT usInterNBVoltageHigh; //Another intermidiate PMW value to set the voltage
1652 2057 ULONG ulReserved[2];
1653 USHORT usFSBClock; /* In MHz unit */ 2058
1654 USHORT usCapabilityFlag; /* Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable */ 2059 USHORT usFSBClock; //In MHz unit
1655 /* Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card */ 2060 USHORT usCapabilityFlag; //Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable
1656 /* Bit[4]==1: P/2 mode, ==0: P/1 mode */ 2061 //Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card
1657 USHORT usPCIENBCfgReg7; /* bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal */ 2062 //Bit[4]==1: P/2 mode, ==0: P/1 mode
1658 USHORT usK8MemoryClock; /* in MHz unit */ 2063 USHORT usPCIENBCfgReg7; //bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal
1659 USHORT usK8SyncStartDelay; /* in 0.01 us unit */ 2064 USHORT usK8MemoryClock; //in MHz unit
1660 USHORT usK8DataReturnTime; /* in 0.01 us unit */ 2065 USHORT usK8SyncStartDelay; //in 0.01 us unit
1661 UCHAR ucMaxNBVoltage; 2066 USHORT usK8DataReturnTime; //in 0.01 us unit
1662 UCHAR ucMinNBVoltage; 2067 UCHAR ucMaxNBVoltage;
1663 UCHAR ucMemoryType; /* [7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved */ 2068 UCHAR ucMinNBVoltage;
1664 UCHAR ucNumberOfCyclesInPeriod; /* CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod */ 2069 UCHAR ucMemoryType; //[7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved
1665 UCHAR ucStartingPWM_HighTime; /* CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime */ 2070 UCHAR ucNumberOfCyclesInPeriod; //CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod
1666 UCHAR ucHTLinkWidth; /* 16 bit vs. 8 bit */ 2071 UCHAR ucStartingPWM_HighTime; //CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime
1667 UCHAR ucMaxNBVoltageHigh; 2072 UCHAR ucHTLinkWidth; //16 bit vs. 8 bit
1668 UCHAR ucMinNBVoltageHigh; 2073 UCHAR ucMaxNBVoltageHigh;
1669} ATOM_INTEGRATED_SYSTEM_INFO; 2074 UCHAR ucMinNBVoltageHigh;
2075}ATOM_INTEGRATED_SYSTEM_INFO;
1670 2076
1671/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO 2077/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO
1672ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock 2078ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock
1673 For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock 2079 For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock
1674ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 2080ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
1675 For AMD IGP,for now this can be 0 2081 For AMD IGP,for now this can be 0
1676ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 2082ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
1677 For AMD IGP,for now this can be 0 2083 For AMD IGP,for now this can be 0
1678 2084
1679usFSBClock: For Intel IGP,it's FSB Freq 2085usFSBClock: For Intel IGP,it's FSB Freq
1680 For AMD IGP,it's HT Link Speed 2086 For AMD IGP,it's HT Link Speed
1681 2087
1682usK8MemoryClock: For AMD IGP only. For RevF CPU, set it to 200 2088usK8MemoryClock: For AMD IGP only. For RevF CPU, set it to 200
@@ -1687,98 +2093,113 @@ VC:Voltage Control
1687ucMaxNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all. 2093ucMaxNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
1688ucMinNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all. 2094ucMinNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
1689 2095
1690ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value. 2096ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value.
1691ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0 2097ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0
1692 2098
1693ucMaxNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all. 2099ucMaxNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
1694ucMinNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all. 2100ucMinNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
1695 2101
2102
1696usInterNBVoltageLow: Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all. 2103usInterNBVoltageLow: Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all.
1697usInterNBVoltageHigh: Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all. 2104usInterNBVoltageHigh: Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all.
1698*/ 2105*/
1699 2106
2107
1700/* 2108/*
1701The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST; 2109The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST;
1702Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need. 2110Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need.
1703The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries. 2111The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries.
1704 2112
1705SW components can access the IGP system infor structure in the same way as before 2113SW components can access the IGP system infor structure in the same way as before
1706*/ 2114*/
1707 2115
1708typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 { 2116
1709 ATOM_COMMON_TABLE_HEADER sHeader; 2117typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2
1710 ULONG ulBootUpEngineClock; /* in 10kHz unit */ 2118{
1711 ULONG ulReserved1[2]; /* must be 0x0 for the reserved */ 2119 ATOM_COMMON_TABLE_HEADER sHeader;
1712 ULONG ulBootUpUMAClock; /* in 10kHz unit */ 2120 ULONG ulBootUpEngineClock; //in 10kHz unit
1713 ULONG ulBootUpSidePortClock; /* in 10kHz unit */ 2121 ULONG ulReserved1[2]; //must be 0x0 for the reserved
1714 ULONG ulMinSidePortClock; /* in 10kHz unit */ 2122 ULONG ulBootUpUMAClock; //in 10kHz unit
1715 ULONG ulReserved2[6]; /* must be 0x0 for the reserved */ 2123 ULONG ulBootUpSidePortClock; //in 10kHz unit
1716 ULONG ulSystemConfig; /* see explanation below */ 2124 ULONG ulMinSidePortClock; //in 10kHz unit
1717 ULONG ulBootUpReqDisplayVector; 2125 ULONG ulReserved2[6]; //must be 0x0 for the reserved
1718 ULONG ulOtherDisplayMisc; 2126 ULONG ulSystemConfig; //see explanation below
1719 ULONG ulDDISlot1Config; 2127 ULONG ulBootUpReqDisplayVector;
1720 ULONG ulDDISlot2Config; 2128 ULONG ulOtherDisplayMisc;
1721 UCHAR ucMemoryType; /* [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved */ 2129 ULONG ulDDISlot1Config;
1722 UCHAR ucUMAChannelNumber; 2130 ULONG ulDDISlot2Config;
1723 UCHAR ucDockingPinBit; 2131 UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
1724 UCHAR ucDockingPinPolarity; 2132 UCHAR ucUMAChannelNumber;
1725 ULONG ulDockingPinCFGInfo; 2133 UCHAR ucDockingPinBit;
1726 ULONG ulCPUCapInfo; 2134 UCHAR ucDockingPinPolarity;
1727 USHORT usNumberOfCyclesInPeriod; 2135 ULONG ulDockingPinCFGInfo;
1728 USHORT usMaxNBVoltage; 2136 ULONG ulCPUCapInfo;
1729 USHORT usMinNBVoltage; 2137 USHORT usNumberOfCyclesInPeriod;
1730 USHORT usBootUpNBVoltage; 2138 USHORT usMaxNBVoltage;
1731 ULONG ulHTLinkFreq; /* in 10Khz */ 2139 USHORT usMinNBVoltage;
1732 USHORT usMinHTLinkWidth; 2140 USHORT usBootUpNBVoltage;
1733 USHORT usMaxHTLinkWidth; 2141 ULONG ulHTLinkFreq; //in 10Khz
1734 USHORT usUMASyncStartDelay; 2142 USHORT usMinHTLinkWidth;
1735 USHORT usUMADataReturnTime; 2143 USHORT usMaxHTLinkWidth;
1736 USHORT usLinkStatusZeroTime; 2144 USHORT usUMASyncStartDelay;
1737 USHORT usReserved; 2145 USHORT usUMADataReturnTime;
1738 ULONG ulHighVoltageHTLinkFreq; /* in 10Khz */ 2146 USHORT usLinkStatusZeroTime;
1739 ULONG ulLowVoltageHTLinkFreq; /* in 10Khz */ 2147 USHORT usDACEfuse; //for storing badgap value (for RS880 only)
1740 USHORT usMaxUpStreamHTLinkWidth; 2148 ULONG ulHighVoltageHTLinkFreq; // in 10Khz
1741 USHORT usMaxDownStreamHTLinkWidth; 2149 ULONG ulLowVoltageHTLinkFreq; // in 10Khz
1742 USHORT usMinUpStreamHTLinkWidth; 2150 USHORT usMaxUpStreamHTLinkWidth;
1743 USHORT usMinDownStreamHTLinkWidth; 2151 USHORT usMaxDownStreamHTLinkWidth;
1744 ULONG ulReserved3[97]; /* must be 0x0 */ 2152 USHORT usMinUpStreamHTLinkWidth;
1745} ATOM_INTEGRATED_SYSTEM_INFO_V2; 2153 USHORT usMinDownStreamHTLinkWidth;
2154 USHORT usFirmwareVersion; //0 means FW is not supported. Otherwise it's the FW version loaded by SBIOS and driver should enable FW.
2155 USHORT usFullT0Time; // Input to calculate minimum HT link change time required by NB P-State. Unit is 0.01us.
2156 ULONG ulReserved3[96]; //must be 0x0
2157}ATOM_INTEGRATED_SYSTEM_INFO_V2;
1746 2158
1747/* 2159/*
1748ulBootUpEngineClock: Boot-up Engine Clock in 10Khz; 2160ulBootUpEngineClock: Boot-up Engine Clock in 10Khz;
1749ulBootUpUMAClock: Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present 2161ulBootUpUMAClock: Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present
1750ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock 2162ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock
1751 2163
1752ulSystemConfig: 2164ulSystemConfig:
1753Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode; 2165Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode;
1754Bit[1]=1: system boots up at AMD overdrived state or user customized mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state 2166Bit[1]=1: system boots up at AMD overdrived state or user customized mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state
1755 =0: system boots up at driver control state. Power state depends on PowerPlay table. 2167 =0: system boots up at driver control state. Power state depends on PowerPlay table.
1756Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used. 2168Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used.
1757Bit[3]=1: Only one power state(Performance) will be supported. 2169Bit[3]=1: Only one power state(Performance) will be supported.
1758 =0: Multiple power states supported from PowerPlay table. 2170 =0: Multiple power states supported from PowerPlay table.
1759Bit[4]=1: CLMC is supported and enabled on current system. 2171Bit[4]=1: CLMC is supported and enabled on current system.
1760 =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface. 2172 =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface.
1761Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement. 2173Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement.
1762 =0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied. 2174 =0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied.
1763Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored. 2175Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored.
1764 =0: Voltage settings is determined by powerplay table. 2176 =0: Voltage settings is determined by powerplay table.
1765Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue. 2177Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue.
1766 =0: Enable CLMC as regular mode, CDLD and CILR will be enabled. 2178 =0: Enable CLMC as regular mode, CDLD and CILR will be enabled.
2179Bit[8]=1: CDLF is supported and enabled on current system.
2180 =0: CDLF is not supported or enabled on current system.
2181Bit[9]=1: DLL Shut Down feature is enabled on current system.
2182 =0: DLL Shut Down feature is not enabled or supported on current system.
1767 2183
1768ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions. 2184ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions.
1769 2185
1770ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion; 2186ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion;
1771 [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSuppportedStd definition; 2187 [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSupportedStd definition;
1772 2188
1773ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design). 2189ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design).
1774 [3:0] - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12) 2190 [3:0] - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
1775 [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12) 2191 [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 4=1 lane 3:0; bit 5=1 lane 7:4; bit 6=1 lane 11:8; bit 7=1 lane 15:12)
1776 [15:8] - Lane configuration attribute; 2192 When a DDI connector is not "paired" (meaming two connections mutualexclusive on chassis or docking, only one of them can be connected at one time.
2193 in both chassis and docking, SBIOS has to duplicate the same PCIE lane info from chassis to docking or vice versa. For example:
2194 one DDI connector is only populated in docking with PCIE lane 8-11, but there is no paired connection on chassis, SBIOS has to copy bit 6 to bit 2.
2195
2196 [15:8] - Lane configuration attribute;
1777 [23:16]- Connector type, possible value: 2197 [23:16]- Connector type, possible value:
1778 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D 2198 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D
1779 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D 2199 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D
1780 CONNECTOR_OBJECT_ID_HDMI_TYPE_A 2200 CONNECTOR_OBJECT_ID_HDMI_TYPE_A
1781 CONNECTOR_OBJECT_ID_DISPLAYPORT 2201 CONNECTOR_OBJECT_ID_DISPLAYPORT
2202 CONNECTOR_OBJECT_ID_eDP
1782 [31:24]- Reserved 2203 [31:24]- Reserved
1783 2204
1784ulDDISlot2Config: Same as Slot1. 2205ulDDISlot2Config: Same as Slot1.
@@ -1787,29 +2208,31 @@ For IGP, Hypermemory is the only memory type showed in CCC.
1787 2208
1788ucUMAChannelNumber: how many channels for the UMA; 2209ucUMAChannelNumber: how many channels for the UMA;
1789 2210
1790ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin 2211ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin
1791ucDockingPinBit: which bit in this register to read the pin status; 2212ucDockingPinBit: which bit in this register to read the pin status;
1792ucDockingPinPolarity:Polarity of the pin when docked; 2213ucDockingPinPolarity:Polarity of the pin when docked;
1793 2214
1794ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0 2215ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0
1795 2216
1796usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%. 2217usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
1797usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode. 2218
2219usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode.
1798usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode. 2220usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode.
1799 GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0 2221 GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0
1800 PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1 2222 PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1
1801 GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 2223 GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE
2224
1802usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value. 2225usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value.
1803 2226
1804ulHTLinkFreq: Bootup HT link Frequency in 10Khz. 2227ulHTLinkFreq: Bootup HT link Frequency in 10Khz.
1805usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth. 2228usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth.
1806 If CDLW enabled, both upstream and downstream width should be the same during bootup.
1807usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth.
1808 If CDLW enabled, both upstream and downstream width should be the same during bootup. 2229 If CDLW enabled, both upstream and downstream width should be the same during bootup.
2230usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth.
2231 If CDLW enabled, both upstream and downstream width should be the same during bootup.
1809 2232
1810usUMASyncStartDelay: Memory access latency, required for watermark calculation 2233usUMASyncStartDelay: Memory access latency, required for watermark calculation
1811usUMADataReturnTime: Memory access latency, required for watermark calculation 2234usUMADataReturnTime: Memory access latency, required for watermark calculation
1812usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us 2235usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us
1813for Griffin or Greyhound. SBIOS needs to convert to actual time by: 2236for Griffin or Greyhound. SBIOS needs to convert to actual time by:
1814 if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us) 2237 if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us)
1815 if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us) 2238 if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us)
@@ -1817,7 +2240,7 @@ for Griffin or Greyhound. SBIOS needs to convert to actual time by:
1817 if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us) 2240 if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us)
1818 2241
1819ulHighVoltageHTLinkFreq: HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0. 2242ulHighVoltageHTLinkFreq: HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0.
1820 This must be less than or equal to ulHTLinkFreq(bootup frequency). 2243 This must be less than or equal to ulHTLinkFreq(bootup frequency).
1821ulLowVoltageHTLinkFreq: HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0. 2244ulLowVoltageHTLinkFreq: HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0.
1822 This must be less than or equal to ulHighVoltageHTLinkFreq. 2245 This must be less than or equal to ulHighVoltageHTLinkFreq.
1823 2246
@@ -1827,14 +2250,17 @@ usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to rep
1827usMinDownStreamHTLinkWidth: same as above. 2250usMinDownStreamHTLinkWidth: same as above.
1828*/ 2251*/
1829 2252
2253
1830#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001 2254#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001
1831#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002 2255#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002
1832#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004 2256#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004
1833#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY 0x00000008 2257#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY 0x00000008
1834#define SYSTEM_CONFIG_CLMC_ENABLED 0x00000010 2258#define SYSTEM_CONFIG_CLMC_ENABLED 0x00000010
1835#define SYSTEM_CONFIG_CDLW_ENABLED 0x00000020 2259#define SYSTEM_CONFIG_CDLW_ENABLED 0x00000020
1836#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED 0x00000040 2260#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED 0x00000040
1837#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED 0x00000080 2261#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED 0x00000080
2262#define SYSTEM_CONFIG_CDLF_ENABLED 0x00000100
2263#define SYSTEM_CONFIG_DLL_SHUTDOWN_ENABLED 0x00000200
1838 2264
1839#define IGP_DDI_SLOT_LANE_CONFIG_MASK 0x000000FF 2265#define IGP_DDI_SLOT_LANE_CONFIG_MASK 0x000000FF
1840 2266
@@ -1851,6 +2277,41 @@ usMinDownStreamHTLinkWidth: same as above.
1851 2277
1852#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK 0x00FF0000 2278#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK 0x00FF0000
1853 2279
2280// IntegratedSystemInfoTable new Rev is V5 after V2, because of the real rev of V2 is v1.4. This rev is used for RR
2281typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V5
2282{
2283 ATOM_COMMON_TABLE_HEADER sHeader;
2284 ULONG ulBootUpEngineClock; //in 10kHz unit
2285 ULONG ulDentistVCOFreq; //Dentist VCO clock in 10kHz unit, the source of GPU SCLK, LCLK, UCLK and VCLK.
2286 ULONG ulLClockFreq; //GPU Lclk freq in 10kHz unit, have relationship with NCLK in NorthBridge
2287 ULONG ulBootUpUMAClock; //in 10kHz unit
2288 ULONG ulReserved1[8]; //must be 0x0 for the reserved
2289 ULONG ulBootUpReqDisplayVector;
2290 ULONG ulOtherDisplayMisc;
2291 ULONG ulReserved2[4]; //must be 0x0 for the reserved
2292 ULONG ulSystemConfig; //TBD
2293 ULONG ulCPUCapInfo; //TBD
2294 USHORT usMaxNBVoltage; //high NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
2295 USHORT usMinNBVoltage; //low NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
2296 USHORT usBootUpNBVoltage; //boot up NB voltage
2297 UCHAR ucHtcTmpLmt; //bit [22:16] of D24F3x64 Hardware Thermal Control (HTC) Register, may not be needed, TBD
2298 UCHAR ucTjOffset; //bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed, TBD
2299 ULONG ulReserved3[4]; //must be 0x0 for the reserved
2300 ULONG ulDDISlot1Config; //see above ulDDISlot1Config definition
2301 ULONG ulDDISlot2Config;
2302 ULONG ulDDISlot3Config;
2303 ULONG ulDDISlot4Config;
2304 ULONG ulReserved4[4]; //must be 0x0 for the reserved
2305 UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
2306 UCHAR ucUMAChannelNumber;
2307 USHORT usReserved;
2308 ULONG ulReserved5[4]; //must be 0x0 for the reserved
2309 ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];//arrays with values for CSR M3 arbiter for default
2310 ULONG ulCSR_M3_ARB_CNTL_UVD[10]; //arrays with values for CSR M3 arbiter for UVD playback
2311 ULONG ulCSR_M3_ARB_CNTL_FS3D[10];//arrays with values for CSR M3 arbiter for Full Screen 3D applications
2312 ULONG ulReserved6[61]; //must be 0x0
2313}ATOM_INTEGRATED_SYSTEM_INFO_V5;
2314
1854#define ATOM_CRT_INT_ENCODER1_INDEX 0x00000000 2315#define ATOM_CRT_INT_ENCODER1_INDEX 0x00000000
1855#define ATOM_LCD_INT_ENCODER1_INDEX 0x00000001 2316#define ATOM_LCD_INT_ENCODER1_INDEX 0x00000001
1856#define ATOM_TV_INT_ENCODER1_INDEX 0x00000002 2317#define ATOM_TV_INT_ENCODER1_INDEX 0x00000002
@@ -1866,8 +2327,8 @@ usMinDownStreamHTLinkWidth: same as above.
1866#define ATOM_DFP_INT_ENCODER3_INDEX 0x0000000C 2327#define ATOM_DFP_INT_ENCODER3_INDEX 0x0000000C
1867#define ATOM_DFP_INT_ENCODER4_INDEX 0x0000000D 2328#define ATOM_DFP_INT_ENCODER4_INDEX 0x0000000D
1868 2329
1869/* define ASIC internal encoder id ( bit vector ) */ 2330// define ASIC internal encoder id ( bit vector ), used for CRTC_SourceSelTable
1870#define ASIC_INT_DAC1_ENCODER_ID 0x00 2331#define ASIC_INT_DAC1_ENCODER_ID 0x00
1871#define ASIC_INT_TV_ENCODER_ID 0x02 2332#define ASIC_INT_TV_ENCODER_ID 0x02
1872#define ASIC_INT_DIG1_ENCODER_ID 0x03 2333#define ASIC_INT_DIG1_ENCODER_ID 0x03
1873#define ASIC_INT_DAC2_ENCODER_ID 0x04 2334#define ASIC_INT_DAC2_ENCODER_ID 0x04
@@ -1875,10 +2336,24 @@ usMinDownStreamHTLinkWidth: same as above.
1875#define ASIC_INT_DVO_ENCODER_ID 0x07 2336#define ASIC_INT_DVO_ENCODER_ID 0x07
1876#define ASIC_INT_DIG2_ENCODER_ID 0x09 2337#define ASIC_INT_DIG2_ENCODER_ID 0x09
1877#define ASIC_EXT_DIG_ENCODER_ID 0x05 2338#define ASIC_EXT_DIG_ENCODER_ID 0x05
2339#define ASIC_EXT_DIG2_ENCODER_ID 0x08
2340#define ASIC_INT_DIG3_ENCODER_ID 0x0a
2341#define ASIC_INT_DIG4_ENCODER_ID 0x0b
2342#define ASIC_INT_DIG5_ENCODER_ID 0x0c
2343#define ASIC_INT_DIG6_ENCODER_ID 0x0d
1878 2344
1879/* define Encoder attribute */ 2345//define Encoder attribute
1880#define ATOM_ANALOG_ENCODER 0 2346#define ATOM_ANALOG_ENCODER 0
1881#define ATOM_DIGITAL_ENCODER 1 2347#define ATOM_DIGITAL_ENCODER 1
2348#define ATOM_DP_ENCODER 2
2349
2350#define ATOM_ENCODER_ENUM_MASK 0x70
2351#define ATOM_ENCODER_ENUM_ID1 0x00
2352#define ATOM_ENCODER_ENUM_ID2 0x10
2353#define ATOM_ENCODER_ENUM_ID3 0x20
2354#define ATOM_ENCODER_ENUM_ID4 0x30
2355#define ATOM_ENCODER_ENUM_ID5 0x40
2356#define ATOM_ENCODER_ENUM_ID6 0x50
1882 2357
1883#define ATOM_DEVICE_CRT1_INDEX 0x00000000 2358#define ATOM_DEVICE_CRT1_INDEX 0x00000000
1884#define ATOM_DEVICE_LCD1_INDEX 0x00000001 2359#define ATOM_DEVICE_LCD1_INDEX 0x00000001
@@ -1886,45 +2361,40 @@ usMinDownStreamHTLinkWidth: same as above.
1886#define ATOM_DEVICE_DFP1_INDEX 0x00000003 2361#define ATOM_DEVICE_DFP1_INDEX 0x00000003
1887#define ATOM_DEVICE_CRT2_INDEX 0x00000004 2362#define ATOM_DEVICE_CRT2_INDEX 0x00000004
1888#define ATOM_DEVICE_LCD2_INDEX 0x00000005 2363#define ATOM_DEVICE_LCD2_INDEX 0x00000005
1889#define ATOM_DEVICE_TV2_INDEX 0x00000006 2364#define ATOM_DEVICE_DFP6_INDEX 0x00000006
1890#define ATOM_DEVICE_DFP2_INDEX 0x00000007 2365#define ATOM_DEVICE_DFP2_INDEX 0x00000007
1891#define ATOM_DEVICE_CV_INDEX 0x00000008 2366#define ATOM_DEVICE_CV_INDEX 0x00000008
1892#define ATOM_DEVICE_DFP3_INDEX 0x00000009 2367#define ATOM_DEVICE_DFP3_INDEX 0x00000009
1893#define ATOM_DEVICE_DFP4_INDEX 0x0000000A 2368#define ATOM_DEVICE_DFP4_INDEX 0x0000000A
1894#define ATOM_DEVICE_DFP5_INDEX 0x0000000B 2369#define ATOM_DEVICE_DFP5_INDEX 0x0000000B
2370
1895#define ATOM_DEVICE_RESERVEDC_INDEX 0x0000000C 2371#define ATOM_DEVICE_RESERVEDC_INDEX 0x0000000C
1896#define ATOM_DEVICE_RESERVEDD_INDEX 0x0000000D 2372#define ATOM_DEVICE_RESERVEDD_INDEX 0x0000000D
1897#define ATOM_DEVICE_RESERVEDE_INDEX 0x0000000E 2373#define ATOM_DEVICE_RESERVEDE_INDEX 0x0000000E
1898#define ATOM_DEVICE_RESERVEDF_INDEX 0x0000000F 2374#define ATOM_DEVICE_RESERVEDF_INDEX 0x0000000F
1899#define ATOM_MAX_SUPPORTED_DEVICE_INFO (ATOM_DEVICE_DFP3_INDEX+1) 2375#define ATOM_MAX_SUPPORTED_DEVICE_INFO (ATOM_DEVICE_DFP3_INDEX+1)
1900#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2 ATOM_MAX_SUPPORTED_DEVICE_INFO 2376#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2 ATOM_MAX_SUPPORTED_DEVICE_INFO
1901#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1) 2377#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1 )
1902 2378
1903#define ATOM_MAX_SUPPORTED_DEVICE (ATOM_DEVICE_RESERVEDF_INDEX+1) 2379#define ATOM_MAX_SUPPORTED_DEVICE (ATOM_DEVICE_RESERVEDF_INDEX+1)
1904 2380
1905#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX) 2381#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX )
1906#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX) 2382#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX )
1907#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX) 2383#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX )
1908#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX) 2384#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX )
1909#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX) 2385#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX )
1910#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX) 2386#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX )
1911#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX) 2387#define ATOM_DEVICE_DFP6_SUPPORT (0x1L << ATOM_DEVICE_DFP6_INDEX )
1912#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX) 2388#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX )
1913#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX) 2389#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX )
1914#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX) 2390#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX )
1915#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX ) 2391#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX )
1916#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX) 2392#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX )
1917 2393
1918#define ATOM_DEVICE_CRT_SUPPORT \ 2394#define ATOM_DEVICE_CRT_SUPPORT (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT)
1919 (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT) 2395#define ATOM_DEVICE_DFP_SUPPORT (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | ATOM_DEVICE_DFP5_SUPPORT | ATOM_DEVICE_DFP6_SUPPORT)
1920#define ATOM_DEVICE_DFP_SUPPORT \ 2396#define ATOM_DEVICE_TV_SUPPORT (ATOM_DEVICE_TV1_SUPPORT)
1921 (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | \ 2397#define ATOM_DEVICE_LCD_SUPPORT (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
1922 ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | \
1923 ATOM_DEVICE_DFP5_SUPPORT)
1924#define ATOM_DEVICE_TV_SUPPORT \
1925 (ATOM_DEVICE_TV1_SUPPORT | ATOM_DEVICE_TV2_SUPPORT)
1926#define ATOM_DEVICE_LCD_SUPPORT \
1927 (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
1928 2398
1929#define ATOM_DEVICE_CONNECTOR_TYPE_MASK 0x000000F0 2399#define ATOM_DEVICE_CONNECTOR_TYPE_MASK 0x000000F0
1930#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT 0x00000004 2400#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT 0x00000004
@@ -1942,6 +2412,7 @@ usMinDownStreamHTLinkWidth: same as above.
1942#define ATOM_DEVICE_CONNECTOR_CASE_1 0x0000000E 2412#define ATOM_DEVICE_CONNECTOR_CASE_1 0x0000000E
1943#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT 0x0000000F 2413#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT 0x0000000F
1944 2414
2415
1945#define ATOM_DEVICE_DAC_INFO_MASK 0x0000000F 2416#define ATOM_DEVICE_DAC_INFO_MASK 0x0000000F
1946#define ATOM_DEVICE_DAC_INFO_SHIFT 0x00000000 2417#define ATOM_DEVICE_DAC_INFO_SHIFT 0x00000000
1947#define ATOM_DEVICE_DAC_INFO_NODAC 0x00000000 2418#define ATOM_DEVICE_DAC_INFO_NODAC 0x00000000
@@ -1958,139 +2429,150 @@ usMinDownStreamHTLinkWidth: same as above.
1958#define ATOM_DEVICE_I2C_ID_SHIFT 0x00000004 2429#define ATOM_DEVICE_I2C_ID_SHIFT 0x00000004
1959#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE 0x00000001 2430#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE 0x00000001
1960#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE 0x00000002 2431#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE 0x00000002
1961#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 /* For IGP RS600 */ 2432#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 //For IGP RS600
1962#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 /* For IGP RS690 */ 2433#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 //For IGP RS690
1963 2434
1964#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK 0x00000080 2435#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK 0x00000080
1965#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT 0x00000007 2436#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT 0x00000007
1966#define ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C 0x00000000 2437#define ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C 0x00000000
1967#define ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C 0x00000001 2438#define ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C 0x00000001
1968 2439
1969/* usDeviceSupport: */ 2440// usDeviceSupport:
1970/* Bits0 = 0 - no CRT1 support= 1- CRT1 is supported */ 2441// Bits0 = 0 - no CRT1 support= 1- CRT1 is supported
1971/* Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported */ 2442// Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported
1972/* Bit 2 = 0 - no TV1 support= 1- TV1 is supported */ 2443// Bit 2 = 0 - no TV1 support= 1- TV1 is supported
1973/* Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported */ 2444// Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported
1974/* Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported */ 2445// Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported
1975/* Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported */ 2446// Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported
1976/* Bit 6 = 0 - no TV2 support= 1- TV2 is supported */ 2447// Bit 6 = 0 - no DFP6 support= 1- DFP6 is supported
1977/* Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported */ 2448// Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported
1978/* Bit 8 = 0 - no CV support= 1- CV is supported */ 2449// Bit 8 = 0 - no CV support= 1- CV is supported
1979/* Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported */ 2450// Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported
1980/* Byte1 (Supported Device Info) */ 2451// Bit 10 = 0 - no DFP4 support= 1- DFP4 is supported
1981/* Bit 0 = = 0 - no CV support= 1- CV is supported */ 2452// Bit 11 = 0 - no DFP5 support= 1- DFP5 is supported
1982/* */ 2453//
1983/* */ 2454//
1984
1985/* ucI2C_ConfigID */
1986/* [7:0] - I2C LINE Associate ID */
1987/* = 0 - no I2C */
1988/* [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection) */
1989/* = 0, [6:0]=SW assisted I2C ID */
1990/* [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use */
1991/* = 2, HW engine for Multimedia use */
1992/* = 3-7 Reserved for future I2C engines */
1993/* [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C */
1994
1995typedef struct _ATOM_I2C_ID_CONFIG {
1996#if ATOM_BIG_ENDIAN
1997 UCHAR bfHW_Capable:1;
1998 UCHAR bfHW_EngineID:3;
1999 UCHAR bfI2C_LineMux:4;
2000#else
2001 UCHAR bfI2C_LineMux:4;
2002 UCHAR bfHW_EngineID:3;
2003 UCHAR bfHW_Capable:1;
2004#endif
2005} ATOM_I2C_ID_CONFIG;
2006
2007typedef union _ATOM_I2C_ID_CONFIG_ACCESS {
2008 ATOM_I2C_ID_CONFIG sbfAccess;
2009 UCHAR ucAccess;
2010} ATOM_I2C_ID_CONFIG_ACCESS;
2011 2455
2012/****************************************************************************/ 2456/****************************************************************************/
2013/* Structure used in GPIO_I2C_InfoTable */ 2457/* Structure used in MclkSS_InfoTable */
2014/****************************************************************************/ 2458/****************************************************************************/
2015typedef struct _ATOM_GPIO_I2C_ASSIGMENT { 2459// ucI2C_ConfigID
2016 USHORT usClkMaskRegisterIndex; 2460// [7:0] - I2C LINE Associate ID
2017 USHORT usClkEnRegisterIndex; 2461// = 0 - no I2C
2018 USHORT usClkY_RegisterIndex; 2462// [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection)
2019 USHORT usClkA_RegisterIndex; 2463// = 0, [6:0]=SW assisted I2C ID
2020 USHORT usDataMaskRegisterIndex; 2464// [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use
2021 USHORT usDataEnRegisterIndex; 2465// = 2, HW engine for Multimedia use
2022 USHORT usDataY_RegisterIndex; 2466// = 3-7 Reserved for future I2C engines
2023 USHORT usDataA_RegisterIndex; 2467// [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C
2024 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; 2468
2025 UCHAR ucClkMaskShift; 2469typedef struct _ATOM_I2C_ID_CONFIG
2026 UCHAR ucClkEnShift; 2470{
2027 UCHAR ucClkY_Shift; 2471#if ATOM_BIG_ENDIAN
2028 UCHAR ucClkA_Shift; 2472 UCHAR bfHW_Capable:1;
2029 UCHAR ucDataMaskShift; 2473 UCHAR bfHW_EngineID:3;
2030 UCHAR ucDataEnShift; 2474 UCHAR bfI2C_LineMux:4;
2031 UCHAR ucDataY_Shift; 2475#else
2032 UCHAR ucDataA_Shift; 2476 UCHAR bfI2C_LineMux:4;
2033 UCHAR ucReserved1; 2477 UCHAR bfHW_EngineID:3;
2034 UCHAR ucReserved2; 2478 UCHAR bfHW_Capable:1;
2035} ATOM_GPIO_I2C_ASSIGMENT; 2479#endif
2036 2480}ATOM_I2C_ID_CONFIG;
2037typedef struct _ATOM_GPIO_I2C_INFO {
2038 ATOM_COMMON_TABLE_HEADER sHeader;
2039 ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
2040} ATOM_GPIO_I2C_INFO;
2041 2481
2042/****************************************************************************/ 2482typedef union _ATOM_I2C_ID_CONFIG_ACCESS
2043/* Common Structure used in other structures */ 2483{
2044/****************************************************************************/ 2484 ATOM_I2C_ID_CONFIG sbfAccess;
2485 UCHAR ucAccess;
2486}ATOM_I2C_ID_CONFIG_ACCESS;
2487
2488
2489/****************************************************************************/
2490// Structure used in GPIO_I2C_InfoTable
2491/****************************************************************************/
2492typedef struct _ATOM_GPIO_I2C_ASSIGMENT
2493{
2494 USHORT usClkMaskRegisterIndex;
2495 USHORT usClkEnRegisterIndex;
2496 USHORT usClkY_RegisterIndex;
2497 USHORT usClkA_RegisterIndex;
2498 USHORT usDataMaskRegisterIndex;
2499 USHORT usDataEnRegisterIndex;
2500 USHORT usDataY_RegisterIndex;
2501 USHORT usDataA_RegisterIndex;
2502 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
2503 UCHAR ucClkMaskShift;
2504 UCHAR ucClkEnShift;
2505 UCHAR ucClkY_Shift;
2506 UCHAR ucClkA_Shift;
2507 UCHAR ucDataMaskShift;
2508 UCHAR ucDataEnShift;
2509 UCHAR ucDataY_Shift;
2510 UCHAR ucDataA_Shift;
2511 UCHAR ucReserved1;
2512 UCHAR ucReserved2;
2513}ATOM_GPIO_I2C_ASSIGMENT;
2514
2515typedef struct _ATOM_GPIO_I2C_INFO
2516{
2517 ATOM_COMMON_TABLE_HEADER sHeader;
2518 ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
2519}ATOM_GPIO_I2C_INFO;
2520
2521/****************************************************************************/
2522// Common Structure used in other structures
2523/****************************************************************************/
2045 2524
2046#ifndef _H2INC 2525#ifndef _H2INC
2047 2526
2048/* Please don't add or expand this bitfield structure below, this one will retire soon.! */ 2527//Please don't add or expand this bitfield structure below, this one will retire soon.!
2049typedef struct _ATOM_MODE_MISC_INFO { 2528typedef struct _ATOM_MODE_MISC_INFO
2529{
2050#if ATOM_BIG_ENDIAN 2530#if ATOM_BIG_ENDIAN
2051 USHORT Reserved:6; 2531 USHORT Reserved:6;
2052 USHORT RGB888:1; 2532 USHORT RGB888:1;
2053 USHORT DoubleClock:1; 2533 USHORT DoubleClock:1;
2054 USHORT Interlace:1; 2534 USHORT Interlace:1;
2055 USHORT CompositeSync:1; 2535 USHORT CompositeSync:1;
2056 USHORT V_ReplicationBy2:1; 2536 USHORT V_ReplicationBy2:1;
2057 USHORT H_ReplicationBy2:1; 2537 USHORT H_ReplicationBy2:1;
2058 USHORT VerticalCutOff:1; 2538 USHORT VerticalCutOff:1;
2059 USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */ 2539 USHORT VSyncPolarity:1; //0=Active High, 1=Active Low
2060 USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */ 2540 USHORT HSyncPolarity:1; //0=Active High, 1=Active Low
2061 USHORT HorizontalCutOff:1; 2541 USHORT HorizontalCutOff:1;
2062#else 2542#else
2063 USHORT HorizontalCutOff:1; 2543 USHORT HorizontalCutOff:1;
2064 USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */ 2544 USHORT HSyncPolarity:1; //0=Active High, 1=Active Low
2065 USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */ 2545 USHORT VSyncPolarity:1; //0=Active High, 1=Active Low
2066 USHORT VerticalCutOff:1; 2546 USHORT VerticalCutOff:1;
2067 USHORT H_ReplicationBy2:1; 2547 USHORT H_ReplicationBy2:1;
2068 USHORT V_ReplicationBy2:1; 2548 USHORT V_ReplicationBy2:1;
2069 USHORT CompositeSync:1; 2549 USHORT CompositeSync:1;
2070 USHORT Interlace:1; 2550 USHORT Interlace:1;
2071 USHORT DoubleClock:1; 2551 USHORT DoubleClock:1;
2072 USHORT RGB888:1; 2552 USHORT RGB888:1;
2073 USHORT Reserved:6; 2553 USHORT Reserved:6;
2074#endif 2554#endif
2075} ATOM_MODE_MISC_INFO; 2555}ATOM_MODE_MISC_INFO;
2076 2556
2077typedef union _ATOM_MODE_MISC_INFO_ACCESS { 2557typedef union _ATOM_MODE_MISC_INFO_ACCESS
2078 ATOM_MODE_MISC_INFO sbfAccess; 2558{
2079 USHORT usAccess; 2559 ATOM_MODE_MISC_INFO sbfAccess;
2080} ATOM_MODE_MISC_INFO_ACCESS; 2560 USHORT usAccess;
2081 2561}ATOM_MODE_MISC_INFO_ACCESS;
2562
2082#else 2563#else
2083 2564
2084typedef union _ATOM_MODE_MISC_INFO_ACCESS { 2565typedef union _ATOM_MODE_MISC_INFO_ACCESS
2085 USHORT usAccess; 2566{
2086} ATOM_MODE_MISC_INFO_ACCESS; 2567 USHORT usAccess;
2087 2568}ATOM_MODE_MISC_INFO_ACCESS;
2569
2088#endif 2570#endif
2089 2571
2090/* usModeMiscInfo- */ 2572// usModeMiscInfo-
2091#define ATOM_H_CUTOFF 0x01 2573#define ATOM_H_CUTOFF 0x01
2092#define ATOM_HSYNC_POLARITY 0x02 /* 0=Active High, 1=Active Low */ 2574#define ATOM_HSYNC_POLARITY 0x02 //0=Active High, 1=Active Low
2093#define ATOM_VSYNC_POLARITY 0x04 /* 0=Active High, 1=Active Low */ 2575#define ATOM_VSYNC_POLARITY 0x04 //0=Active High, 1=Active Low
2094#define ATOM_V_CUTOFF 0x08 2576#define ATOM_V_CUTOFF 0x08
2095#define ATOM_H_REPLICATIONBY2 0x10 2577#define ATOM_H_REPLICATIONBY2 0x10
2096#define ATOM_V_REPLICATIONBY2 0x20 2578#define ATOM_V_REPLICATIONBY2 0x20
@@ -2099,10 +2581,10 @@ typedef union _ATOM_MODE_MISC_INFO_ACCESS {
2099#define ATOM_DOUBLE_CLOCK_MODE 0x100 2581#define ATOM_DOUBLE_CLOCK_MODE 0x100
2100#define ATOM_RGB888_MODE 0x200 2582#define ATOM_RGB888_MODE 0x200
2101 2583
2102/* usRefreshRate- */ 2584//usRefreshRate-
2103#define ATOM_REFRESH_43 43 2585#define ATOM_REFRESH_43 43
2104#define ATOM_REFRESH_47 47 2586#define ATOM_REFRESH_47 47
2105#define ATOM_REFRESH_56 56 2587#define ATOM_REFRESH_56 56
2106#define ATOM_REFRESH_60 60 2588#define ATOM_REFRESH_60 60
2107#define ATOM_REFRESH_65 65 2589#define ATOM_REFRESH_65 65
2108#define ATOM_REFRESH_70 70 2590#define ATOM_REFRESH_70 70
@@ -2110,192 +2592,233 @@ typedef union _ATOM_MODE_MISC_INFO_ACCESS {
2110#define ATOM_REFRESH_75 75 2592#define ATOM_REFRESH_75 75
2111#define ATOM_REFRESH_85 85 2593#define ATOM_REFRESH_85 85
2112 2594
2113/* ATOM_MODE_TIMING data are exactly the same as VESA timing data. */ 2595// ATOM_MODE_TIMING data are exactly the same as VESA timing data.
2114/* Translation from EDID to ATOM_MODE_TIMING, use the following formula. */ 2596// Translation from EDID to ATOM_MODE_TIMING, use the following formula.
2115/* */ 2597//
2116/* VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK */ 2598// VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK
2117/* = EDID_HA + EDID_HBL */ 2599// = EDID_HA + EDID_HBL
2118/* VESA_HDISP = VESA_ACTIVE = EDID_HA */ 2600// VESA_HDISP = VESA_ACTIVE = EDID_HA
2119/* VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH */ 2601// VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH
2120/* = EDID_HA + EDID_HSO */ 2602// = EDID_HA + EDID_HSO
2121/* VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW */ 2603// VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW
2122/* VESA_BORDER = EDID_BORDER */ 2604// VESA_BORDER = EDID_BORDER
2123 2605
2124/****************************************************************************/ 2606/****************************************************************************/
2125/* Structure used in SetCRTC_UsingDTDTimingTable */ 2607// Structure used in SetCRTC_UsingDTDTimingTable
2126/****************************************************************************/ 2608/****************************************************************************/
2127typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS { 2609typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS
2128 USHORT usH_Size; 2610{
2129 USHORT usH_Blanking_Time; 2611 USHORT usH_Size;
2130 USHORT usV_Size; 2612 USHORT usH_Blanking_Time;
2131 USHORT usV_Blanking_Time; 2613 USHORT usV_Size;
2132 USHORT usH_SyncOffset; 2614 USHORT usV_Blanking_Time;
2133 USHORT usH_SyncWidth; 2615 USHORT usH_SyncOffset;
2134 USHORT usV_SyncOffset; 2616 USHORT usH_SyncWidth;
2135 USHORT usV_SyncWidth; 2617 USHORT usV_SyncOffset;
2136 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; 2618 USHORT usV_SyncWidth;
2137 UCHAR ucH_Border; /* From DFP EDID */ 2619 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
2138 UCHAR ucV_Border; 2620 UCHAR ucH_Border; // From DFP EDID
2139 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 2621 UCHAR ucV_Border;
2140 UCHAR ucPadding[3]; 2622 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
2141} SET_CRTC_USING_DTD_TIMING_PARAMETERS; 2623 UCHAR ucPadding[3];
2142 2624}SET_CRTC_USING_DTD_TIMING_PARAMETERS;
2143/****************************************************************************/ 2625
2144/* Structure used in SetCRTC_TimingTable */ 2626/****************************************************************************/
2145/****************************************************************************/ 2627// Structure used in SetCRTC_TimingTable
2146typedef struct _SET_CRTC_TIMING_PARAMETERS { 2628/****************************************************************************/
2147 USHORT usH_Total; /* horizontal total */ 2629typedef struct _SET_CRTC_TIMING_PARAMETERS
2148 USHORT usH_Disp; /* horizontal display */ 2630{
2149 USHORT usH_SyncStart; /* horozontal Sync start */ 2631 USHORT usH_Total; // horizontal total
2150 USHORT usH_SyncWidth; /* horizontal Sync width */ 2632 USHORT usH_Disp; // horizontal display
2151 USHORT usV_Total; /* vertical total */ 2633 USHORT usH_SyncStart; // horozontal Sync start
2152 USHORT usV_Disp; /* vertical display */ 2634 USHORT usH_SyncWidth; // horizontal Sync width
2153 USHORT usV_SyncStart; /* vertical Sync start */ 2635 USHORT usV_Total; // vertical total
2154 USHORT usV_SyncWidth; /* vertical Sync width */ 2636 USHORT usV_Disp; // vertical display
2155 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; 2637 USHORT usV_SyncStart; // vertical Sync start
2156 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ 2638 USHORT usV_SyncWidth; // vertical Sync width
2157 UCHAR ucOverscanRight; /* right */ 2639 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
2158 UCHAR ucOverscanLeft; /* left */ 2640 UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
2159 UCHAR ucOverscanBottom; /* bottom */ 2641 UCHAR ucOverscanRight; // right
2160 UCHAR ucOverscanTop; /* top */ 2642 UCHAR ucOverscanLeft; // left
2161 UCHAR ucReserved; 2643 UCHAR ucOverscanBottom; // bottom
2162} SET_CRTC_TIMING_PARAMETERS; 2644 UCHAR ucOverscanTop; // top
2645 UCHAR ucReserved;
2646}SET_CRTC_TIMING_PARAMETERS;
2163#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS 2647#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS
2164 2648
2165/****************************************************************************/ 2649/****************************************************************************/
2166/* Structure used in StandardVESA_TimingTable */ 2650// Structure used in StandardVESA_TimingTable
2167/* AnalogTV_InfoTable */ 2651// AnalogTV_InfoTable
2168/* ComponentVideoInfoTable */ 2652// ComponentVideoInfoTable
2169/****************************************************************************/ 2653/****************************************************************************/
2170typedef struct _ATOM_MODE_TIMING { 2654typedef struct _ATOM_MODE_TIMING
2171 USHORT usCRTC_H_Total; 2655{
2172 USHORT usCRTC_H_Disp; 2656 USHORT usCRTC_H_Total;
2173 USHORT usCRTC_H_SyncStart; 2657 USHORT usCRTC_H_Disp;
2174 USHORT usCRTC_H_SyncWidth; 2658 USHORT usCRTC_H_SyncStart;
2175 USHORT usCRTC_V_Total; 2659 USHORT usCRTC_H_SyncWidth;
2176 USHORT usCRTC_V_Disp; 2660 USHORT usCRTC_V_Total;
2177 USHORT usCRTC_V_SyncStart; 2661 USHORT usCRTC_V_Disp;
2178 USHORT usCRTC_V_SyncWidth; 2662 USHORT usCRTC_V_SyncStart;
2179 USHORT usPixelClock; /* in 10Khz unit */ 2663 USHORT usCRTC_V_SyncWidth;
2180 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; 2664 USHORT usPixelClock; //in 10Khz unit
2181 USHORT usCRTC_OverscanRight; 2665 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
2182 USHORT usCRTC_OverscanLeft; 2666 USHORT usCRTC_OverscanRight;
2183 USHORT usCRTC_OverscanBottom; 2667 USHORT usCRTC_OverscanLeft;
2184 USHORT usCRTC_OverscanTop; 2668 USHORT usCRTC_OverscanBottom;
2185 USHORT usReserve; 2669 USHORT usCRTC_OverscanTop;
2186 UCHAR ucInternalModeNumber; 2670 USHORT usReserve;
2187 UCHAR ucRefreshRate; 2671 UCHAR ucInternalModeNumber;
2188} ATOM_MODE_TIMING; 2672 UCHAR ucRefreshRate;
2189 2673}ATOM_MODE_TIMING;
2190typedef struct _ATOM_DTD_FORMAT { 2674
2191 USHORT usPixClk; 2675typedef struct _ATOM_DTD_FORMAT
2192 USHORT usHActive; 2676{
2193 USHORT usHBlanking_Time; 2677 USHORT usPixClk;
2194 USHORT usVActive; 2678 USHORT usHActive;
2195 USHORT usVBlanking_Time; 2679 USHORT usHBlanking_Time;
2196 USHORT usHSyncOffset; 2680 USHORT usVActive;
2197 USHORT usHSyncWidth; 2681 USHORT usVBlanking_Time;
2198 USHORT usVSyncOffset; 2682 USHORT usHSyncOffset;
2199 USHORT usVSyncWidth; 2683 USHORT usHSyncWidth;
2200 USHORT usImageHSize; 2684 USHORT usVSyncOffset;
2201 USHORT usImageVSize; 2685 USHORT usVSyncWidth;
2202 UCHAR ucHBorder; 2686 USHORT usImageHSize;
2203 UCHAR ucVBorder; 2687 USHORT usImageVSize;
2204 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; 2688 UCHAR ucHBorder;
2205 UCHAR ucInternalModeNumber; 2689 UCHAR ucVBorder;
2206 UCHAR ucRefreshRate; 2690 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
2207} ATOM_DTD_FORMAT; 2691 UCHAR ucInternalModeNumber;
2208 2692 UCHAR ucRefreshRate;
2209/****************************************************************************/ 2693}ATOM_DTD_FORMAT;
2210/* Structure used in LVDS_InfoTable */ 2694
2211/* * Need a document to describe this table */ 2695/****************************************************************************/
2212/****************************************************************************/ 2696// Structure used in LVDS_InfoTable
2697// * Need a document to describe this table
2698/****************************************************************************/
2213#define SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004 2699#define SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
2214#define SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008 2700#define SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
2215#define SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010 2701#define SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
2216#define SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020 2702#define SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
2217 2703
2218/* Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. */ 2704//ucTableFormatRevision=1
2219/* Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL */ 2705//ucTableContentRevision=1
2220#define LCDPANEL_CAP_READ_EDID 0x1 2706typedef struct _ATOM_LVDS_INFO
2221 2707{
2222/* ucTableFormatRevision=1 */ 2708 ATOM_COMMON_TABLE_HEADER sHeader;
2223/* ucTableContentRevision=1 */ 2709 ATOM_DTD_FORMAT sLCDTiming;
2224typedef struct _ATOM_LVDS_INFO { 2710 USHORT usModePatchTableOffset;
2225 ATOM_COMMON_TABLE_HEADER sHeader; 2711 USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec.
2226 ATOM_DTD_FORMAT sLCDTiming; 2712 USHORT usOffDelayInMs;
2227 USHORT usModePatchTableOffset; 2713 UCHAR ucPowerSequenceDigOntoDEin10Ms;
2228 USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */ 2714 UCHAR ucPowerSequenceDEtoBLOnin10Ms;
2229 USHORT usOffDelayInMs; 2715 UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
2230 UCHAR ucPowerSequenceDigOntoDEin10Ms; 2716 // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
2231 UCHAR ucPowerSequenceDEtoBLOnin10Ms; 2717 // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
2232 UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */ 2718 // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
2233 /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */ 2719 UCHAR ucPanelDefaultRefreshRate;
2234 /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */ 2720 UCHAR ucPanelIdentification;
2235 /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */ 2721 UCHAR ucSS_Id;
2236 UCHAR ucPanelDefaultRefreshRate; 2722}ATOM_LVDS_INFO;
2237 UCHAR ucPanelIdentification; 2723
2238 UCHAR ucSS_Id; 2724//ucTableFormatRevision=1
2239} ATOM_LVDS_INFO; 2725//ucTableContentRevision=2
2240 2726typedef struct _ATOM_LVDS_INFO_V12
2241/* ucTableFormatRevision=1 */ 2727{
2242/* ucTableContentRevision=2 */ 2728 ATOM_COMMON_TABLE_HEADER sHeader;
2243typedef struct _ATOM_LVDS_INFO_V12 { 2729 ATOM_DTD_FORMAT sLCDTiming;
2244 ATOM_COMMON_TABLE_HEADER sHeader; 2730 USHORT usExtInfoTableOffset;
2245 ATOM_DTD_FORMAT sLCDTiming; 2731 USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec.
2246 USHORT usExtInfoTableOffset; 2732 USHORT usOffDelayInMs;
2247 USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */ 2733 UCHAR ucPowerSequenceDigOntoDEin10Ms;
2248 USHORT usOffDelayInMs; 2734 UCHAR ucPowerSequenceDEtoBLOnin10Ms;
2249 UCHAR ucPowerSequenceDigOntoDEin10Ms; 2735 UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
2250 UCHAR ucPowerSequenceDEtoBLOnin10Ms; 2736 // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
2251 UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */ 2737 // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
2252 /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */ 2738 // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
2253 /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */ 2739 UCHAR ucPanelDefaultRefreshRate;
2254 /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */ 2740 UCHAR ucPanelIdentification;
2255 UCHAR ucPanelDefaultRefreshRate; 2741 UCHAR ucSS_Id;
2256 UCHAR ucPanelIdentification; 2742 USHORT usLCDVenderID;
2257 UCHAR ucSS_Id; 2743 USHORT usLCDProductID;
2258 USHORT usLCDVenderID; 2744 UCHAR ucLCDPanel_SpecialHandlingCap;
2259 USHORT usLCDProductID; 2745 UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
2260 UCHAR ucLCDPanel_SpecialHandlingCap; 2746 UCHAR ucReserved[2];
2261 UCHAR ucPanelInfoSize; /* start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable */ 2747}ATOM_LVDS_INFO_V12;
2262 UCHAR ucReserved[2]; 2748
2263} ATOM_LVDS_INFO_V12; 2749//Definitions for ucLCDPanel_SpecialHandlingCap:
2750
2751//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12.
2752//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL
2753#define LCDPANEL_CAP_READ_EDID 0x1
2754
2755//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
2756//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
2757//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
2758#define LCDPANEL_CAP_DRR_SUPPORTED 0x2
2759
2760//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
2761#define LCDPANEL_CAP_eDP 0x4
2762
2763
2764//Color Bit Depth definition in EDID V1.4 @BYTE 14h
2765//Bit 6 5 4
2766 // 0 0 0 - Color bit depth is undefined
2767 // 0 0 1 - 6 Bits per Primary Color
2768 // 0 1 0 - 8 Bits per Primary Color
2769 // 0 1 1 - 10 Bits per Primary Color
2770 // 1 0 0 - 12 Bits per Primary Color
2771 // 1 0 1 - 14 Bits per Primary Color
2772 // 1 1 0 - 16 Bits per Primary Color
2773 // 1 1 1 - Reserved
2774
2775#define PANEL_COLOR_BIT_DEPTH_MASK 0x70
2776
2777// Bit7:{=0:Random Dithering disabled;1 Random Dithering enabled}
2778#define PANEL_RANDOM_DITHER 0x80
2779#define PANEL_RANDOM_DITHER_MASK 0x80
2780
2264 2781
2265#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 2782#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12
2266 2783
2267typedef struct _ATOM_PATCH_RECORD_MODE { 2784typedef struct _ATOM_PATCH_RECORD_MODE
2268 UCHAR ucRecordType; 2785{
2269 USHORT usHDisp; 2786 UCHAR ucRecordType;
2270 USHORT usVDisp; 2787 USHORT usHDisp;
2271} ATOM_PATCH_RECORD_MODE; 2788 USHORT usVDisp;
2789}ATOM_PATCH_RECORD_MODE;
2272 2790
2273typedef struct _ATOM_LCD_RTS_RECORD { 2791typedef struct _ATOM_LCD_RTS_RECORD
2274 UCHAR ucRecordType; 2792{
2275 UCHAR ucRTSValue; 2793 UCHAR ucRecordType;
2276} ATOM_LCD_RTS_RECORD; 2794 UCHAR ucRTSValue;
2795}ATOM_LCD_RTS_RECORD;
2277 2796
2278/* !! If the record below exits, it shoud always be the first record for easy use in command table!!! */ 2797//!! If the record below exits, it shoud always be the first record for easy use in command table!!!
2279typedef struct _ATOM_LCD_MODE_CONTROL_CAP { 2798// The record below is only used when LVDS_Info is present. From ATOM_LVDS_INFO_V12, use ucLCDPanel_SpecialHandlingCap instead.
2280 UCHAR ucRecordType; 2799typedef struct _ATOM_LCD_MODE_CONTROL_CAP
2281 USHORT usLCDCap; 2800{
2282} ATOM_LCD_MODE_CONTROL_CAP; 2801 UCHAR ucRecordType;
2802 USHORT usLCDCap;
2803}ATOM_LCD_MODE_CONTROL_CAP;
2283 2804
2284#define LCD_MODE_CAP_BL_OFF 1 2805#define LCD_MODE_CAP_BL_OFF 1
2285#define LCD_MODE_CAP_CRTC_OFF 2 2806#define LCD_MODE_CAP_CRTC_OFF 2
2286#define LCD_MODE_CAP_PANEL_OFF 4 2807#define LCD_MODE_CAP_PANEL_OFF 4
2287 2808
2288typedef struct _ATOM_FAKE_EDID_PATCH_RECORD { 2809typedef struct _ATOM_FAKE_EDID_PATCH_RECORD
2289 UCHAR ucRecordType; 2810{
2290 UCHAR ucFakeEDIDLength; 2811 UCHAR ucRecordType;
2291 UCHAR ucFakeEDIDString[1]; /* This actually has ucFakeEdidLength elements. */ 2812 UCHAR ucFakeEDIDLength;
2813 UCHAR ucFakeEDIDString[1]; // This actually has ucFakeEdidLength elements.
2292} ATOM_FAKE_EDID_PATCH_RECORD; 2814} ATOM_FAKE_EDID_PATCH_RECORD;
2293 2815
2294typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD { 2816typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD
2295 UCHAR ucRecordType; 2817{
2296 USHORT usHSize; 2818 UCHAR ucRecordType;
2297 USHORT usVSize; 2819 USHORT usHSize;
2298} ATOM_PANEL_RESOLUTION_PATCH_RECORD; 2820 USHORT usVSize;
2821}ATOM_PANEL_RESOLUTION_PATCH_RECORD;
2299 2822
2300#define LCD_MODE_PATCH_RECORD_MODE_TYPE 1 2823#define LCD_MODE_PATCH_RECORD_MODE_TYPE 1
2301#define LCD_RTS_RECORD_TYPE 2 2824#define LCD_RTS_RECORD_TYPE 2
@@ -2306,21 +2829,25 @@ typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD {
2306 2829
2307/****************************Spread Spectrum Info Table Definitions **********************/ 2830/****************************Spread Spectrum Info Table Definitions **********************/
2308 2831
2309/* ucTableFormatRevision=1 */ 2832//ucTableFormatRevision=1
2310/* ucTableContentRevision=2 */ 2833//ucTableContentRevision=2
2311typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT { 2834typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT
2312 USHORT usSpreadSpectrumPercentage; 2835{
2313 UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */ 2836 USHORT usSpreadSpectrumPercentage;
2314 UCHAR ucSS_Step; 2837 UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Bit2=1: PCIE REFCLK SS =0 iternal PPLL SS Others:TBD
2315 UCHAR ucSS_Delay; 2838 UCHAR ucSS_Step;
2316 UCHAR ucSS_Id; 2839 UCHAR ucSS_Delay;
2317 UCHAR ucRecommendedRef_Div; 2840 UCHAR ucSS_Id;
2318 UCHAR ucSS_Range; /* it was reserved for V11 */ 2841 UCHAR ucRecommendedRef_Div;
2319} ATOM_SPREAD_SPECTRUM_ASSIGNMENT; 2842 UCHAR ucSS_Range; //it was reserved for V11
2843}ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
2320 2844
2321#define ATOM_MAX_SS_ENTRY 16 2845#define ATOM_MAX_SS_ENTRY 16
2322#define ATOM_DP_SS_ID1 0x0f1 /* SS modulation freq=30k */ 2846#define ATOM_DP_SS_ID1 0x0f1 // SS ID for internal DP stream at 2.7Ghz. if ATOM_DP_SS_ID2 does not exist in SS_InfoTable, it is used for internal DP stream at 1.62Ghz as well.
2323#define ATOM_DP_SS_ID2 0x0f2 /* SS modulation freq=33k */ 2847#define ATOM_DP_SS_ID2 0x0f2 // SS ID for internal DP stream at 1.62Ghz, if it exists in SS_InfoTable.
2848#define ATOM_LVLINK_2700MHz_SS_ID 0x0f3 // SS ID for LV link translator chip at 2.7Ghz
2849#define ATOM_LVLINK_1620MHz_SS_ID 0x0f4 // SS ID for LV link translator chip at 1.62Ghz
2850
2324 2851
2325#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000 2852#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000
2326#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000 2853#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000
@@ -2329,29 +2856,30 @@ typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT {
2329#define ATOM_INTERNAL_SS_MASK 0x00000000 2856#define ATOM_INTERNAL_SS_MASK 0x00000000
2330#define ATOM_EXTERNAL_SS_MASK 0x00000002 2857#define ATOM_EXTERNAL_SS_MASK 0x00000002
2331#define EXEC_SS_STEP_SIZE_SHIFT 2 2858#define EXEC_SS_STEP_SIZE_SHIFT 2
2332#define EXEC_SS_DELAY_SHIFT 4 2859#define EXEC_SS_DELAY_SHIFT 4
2333#define ACTIVEDATA_TO_BLON_DELAY_SHIFT 4 2860#define ACTIVEDATA_TO_BLON_DELAY_SHIFT 4
2334 2861
2335typedef struct _ATOM_SPREAD_SPECTRUM_INFO { 2862typedef struct _ATOM_SPREAD_SPECTRUM_INFO
2336 ATOM_COMMON_TABLE_HEADER sHeader; 2863{
2337 ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY]; 2864 ATOM_COMMON_TABLE_HEADER sHeader;
2338} ATOM_SPREAD_SPECTRUM_INFO; 2865 ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY];
2339 2866}ATOM_SPREAD_SPECTRUM_INFO;
2340/****************************************************************************/ 2867
2341/* Structure used in AnalogTV_InfoTable (Top level) */ 2868/****************************************************************************/
2342/****************************************************************************/ 2869// Structure used in AnalogTV_InfoTable (Top level)
2343/* ucTVBootUpDefaultStd definiton: */ 2870/****************************************************************************/
2344 2871//ucTVBootUpDefaultStd definiton:
2345/* ATOM_TV_NTSC 1 */ 2872
2346/* ATOM_TV_NTSCJ 2 */ 2873//ATOM_TV_NTSC 1
2347/* ATOM_TV_PAL 3 */ 2874//ATOM_TV_NTSCJ 2
2348/* ATOM_TV_PALM 4 */ 2875//ATOM_TV_PAL 3
2349/* ATOM_TV_PALCN 5 */ 2876//ATOM_TV_PALM 4
2350/* ATOM_TV_PALN 6 */ 2877//ATOM_TV_PALCN 5
2351/* ATOM_TV_PAL60 7 */ 2878//ATOM_TV_PALN 6
2352/* ATOM_TV_SECAM 8 */ 2879//ATOM_TV_PAL60 7
2353 2880//ATOM_TV_SECAM 8
2354/* ucTVSuppportedStd definition: */ 2881
2882//ucTVSupportedStd definition:
2355#define NTSC_SUPPORT 0x1 2883#define NTSC_SUPPORT 0x1
2356#define NTSCJ_SUPPORT 0x2 2884#define NTSCJ_SUPPORT 0x2
2357 2885
@@ -2364,46 +2892,58 @@ typedef struct _ATOM_SPREAD_SPECTRUM_INFO {
2364 2892
2365#define MAX_SUPPORTED_TV_TIMING 2 2893#define MAX_SUPPORTED_TV_TIMING 2
2366 2894
2367typedef struct _ATOM_ANALOG_TV_INFO { 2895typedef struct _ATOM_ANALOG_TV_INFO
2368 ATOM_COMMON_TABLE_HEADER sHeader; 2896{
2369 UCHAR ucTV_SupportedStandard; 2897 ATOM_COMMON_TABLE_HEADER sHeader;
2370 UCHAR ucTV_BootUpDefaultStandard; 2898 UCHAR ucTV_SupportedStandard;
2371 UCHAR ucExt_TV_ASIC_ID; 2899 UCHAR ucTV_BootUpDefaultStandard;
2372 UCHAR ucExt_TV_ASIC_SlaveAddr; 2900 UCHAR ucExt_TV_ASIC_ID;
2373 /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; */ 2901 UCHAR ucExt_TV_ASIC_SlaveAddr;
2374 ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING]; 2902 /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];*/
2375} ATOM_ANALOG_TV_INFO; 2903 ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING];
2904}ATOM_ANALOG_TV_INFO;
2376 2905
2377#define MAX_SUPPORTED_TV_TIMING_V1_2 3 2906#define MAX_SUPPORTED_TV_TIMING_V1_2 3
2378 2907
2379typedef struct _ATOM_ANALOG_TV_INFO_V1_2 { 2908typedef struct _ATOM_ANALOG_TV_INFO_V1_2
2380 ATOM_COMMON_TABLE_HEADER sHeader; 2909{
2381 UCHAR ucTV_SupportedStandard; 2910 ATOM_COMMON_TABLE_HEADER sHeader;
2382 UCHAR ucTV_BootUpDefaultStandard; 2911 UCHAR ucTV_SupportedStandard;
2383 UCHAR ucExt_TV_ASIC_ID; 2912 UCHAR ucTV_BootUpDefaultStandard;
2384 UCHAR ucExt_TV_ASIC_SlaveAddr; 2913 UCHAR ucExt_TV_ASIC_ID;
2385 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; 2914 UCHAR ucExt_TV_ASIC_SlaveAddr;
2386} ATOM_ANALOG_TV_INFO_V1_2; 2915 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];
2916}ATOM_ANALOG_TV_INFO_V1_2;
2917
2918typedef struct _ATOM_DPCD_INFO
2919{
2920 UCHAR ucRevisionNumber; //10h : Revision 1.0; 11h : Revision 1.1
2921 UCHAR ucMaxLinkRate; //06h : 1.62Gbps per lane; 0Ah = 2.7Gbps per lane
2922 UCHAR ucMaxLane; //Bits 4:0 = MAX_LANE_COUNT (1/2/4). Bit 7 = ENHANCED_FRAME_CAP
2923 UCHAR ucMaxDownSpread; //Bit0 = 0: No Down spread; Bit0 = 1: 0.5% (Subject to change according to DP spec)
2924}ATOM_DPCD_INFO;
2925
2926#define ATOM_DPCD_MAX_LANE_MASK 0x1F
2387 2927
2388/**************************************************************************/ 2928/**************************************************************************/
2389/* VRAM usage and their definitions */ 2929// VRAM usage and their defintions
2390 2930
2391/* One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. */ 2931// One chunk of VRAM used by Bios are for HWICON surfaces,EDID data.
2392/* Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. */ 2932// Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below.
2393/* All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned! */ 2933// All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned!
2394/* To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR */ 2934// To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR
2395/* To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX */ 2935// To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX
2396 2936
2397#ifndef VESA_MEMORY_IN_64K_BLOCK 2937#ifndef VESA_MEMORY_IN_64K_BLOCK
2398#define VESA_MEMORY_IN_64K_BLOCK 0x100 /* 256*64K=16Mb (Max. VESA memory is 16Mb!) */ 2938#define VESA_MEMORY_IN_64K_BLOCK 0x100 //256*64K=16Mb (Max. VESA memory is 16Mb!)
2399#endif 2939#endif
2400 2940
2401#define ATOM_EDID_RAW_DATASIZE 256 /* In Bytes */ 2941#define ATOM_EDID_RAW_DATASIZE 256 //In Bytes
2402#define ATOM_HWICON_SURFACE_SIZE 4096 /* In Bytes */ 2942#define ATOM_HWICON_SURFACE_SIZE 4096 //In Bytes
2403#define ATOM_HWICON_INFOTABLE_SIZE 32 2943#define ATOM_HWICON_INFOTABLE_SIZE 32
2404#define MAX_DTD_MODE_IN_VRAM 6 2944#define MAX_DTD_MODE_IN_VRAM 6
2405#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) /* 28= (SIZEOF ATOM_DTD_FORMAT) */ 2945#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT)
2406#define ATOM_STD_MODE_SUPPORT_TBL_SIZE (32*8) /* 32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) */ 2946#define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT)
2407#define DFP_ENCODER_TYPE_OFFSET 0x80 2947#define DFP_ENCODER_TYPE_OFFSET 0x80
2408#define DP_ENCODER_LANE_NUM_OFFSET 0x84 2948#define DP_ENCODER_LANE_NUM_OFFSET 0x84
2409#define DP_ENCODER_LINK_RATE_OFFSET 0x88 2949#define DP_ENCODER_LINK_RATE_OFFSET 0x88
@@ -2417,7 +2957,7 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
2417 2957
2418#define ATOM_LCD1_EDID_ADDR (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) 2958#define ATOM_LCD1_EDID_ADDR (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2419#define ATOM_LCD1_DTD_MODE_TBL_ADDR (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) 2959#define ATOM_LCD1_DTD_MODE_TBL_ADDR (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2420#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) 2960#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2421 2961
2422#define ATOM_TV1_DTD_MODE_TBL_ADDR (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) 2962#define ATOM_TV1_DTD_MODE_TBL_ADDR (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2423 2963
@@ -2431,13 +2971,13 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
2431 2971
2432#define ATOM_LCD2_EDID_ADDR (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) 2972#define ATOM_LCD2_EDID_ADDR (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2433#define ATOM_LCD2_DTD_MODE_TBL_ADDR (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) 2973#define ATOM_LCD2_DTD_MODE_TBL_ADDR (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2434#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) 2974#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2435 2975
2436#define ATOM_TV2_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) 2976#define ATOM_DFP6_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2437#define ATOM_TV2_DTD_MODE_TBL_ADDR (ATOM_TV2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) 2977#define ATOM_DFP6_DTD_MODE_TBL_ADDR (ATOM_DFP6_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2438#define ATOM_TV2_STD_MODE_TBL_ADDR (ATOM_TV2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) 2978#define ATOM_DFP6_STD_MODE_TBL_ADDR (ATOM_DFP6_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2439 2979
2440#define ATOM_DFP2_EDID_ADDR (ATOM_TV2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) 2980#define ATOM_DFP2_EDID_ADDR (ATOM_DFP6_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2441#define ATOM_DFP2_DTD_MODE_TBL_ADDR (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) 2981#define ATOM_DFP2_DTD_MODE_TBL_ADDR (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2442#define ATOM_DFP2_STD_MODE_TBL_ADDR (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) 2982#define ATOM_DFP2_STD_MODE_TBL_ADDR (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2443 2983
@@ -2457,533 +2997,850 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
2457#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) 2997#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2458#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) 2998#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2459 2999
2460#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE) 3000#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2461 3001
2462#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 256) 3002#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR+256)
2463#define ATOM_STACK_STORAGE_END (ATOM_STACK_STORAGE_START + 512) 3003#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START+512
2464 3004
2465/* The size below is in Kb! */ 3005//The size below is in Kb!
2466#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC) 3006#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
2467 3007
2468#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L 3008#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L
2469#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30 3009#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30
2470#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1 3010#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1
2471#define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0 3011#define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0
2472 3012
2473/***********************************************************************************/ 3013/***********************************************************************************/
2474/* Structure used in VRAM_UsageByFirmwareTable */ 3014// Structure used in VRAM_UsageByFirmwareTable
2475/* Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm */ 3015// Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm
2476/* at running time. */ 3016// at running time.
2477/* note2: From RV770, the memory is more than 32bit addressable, so we will change */ 3017// note2: From RV770, the memory is more than 32bit addressable, so we will change
2478/* ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains */ 3018// ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains
2479/* exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware */ 3019// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware
2480/* (in offset to start of memory address) is KB aligned instead of byte aligend. */ 3020// (in offset to start of memory address) is KB aligned instead of byte aligend.
2481/***********************************************************************************/ 3021/***********************************************************************************/
3022// Note3:
3023/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged constant across VGA or non VGA adapter,
3024for CAIL, The size of FB access area is known, only thing missing is the Offset of FB Access area, so we can have:
3025
3026If (ulStartAddrUsedByFirmware!=0)
3027FBAccessAreaOffset= ulStartAddrUsedByFirmware - usFBUsedbyDrvInKB;
3028Reserved area has been claimed by VBIOS including this FB access area; CAIL doesn't need to reserve any extra area for this purpose
3029else //Non VGA case
3030 if (FB_Size<=2Gb)
3031 FBAccessAreaOffset= FB_Size - usFBUsedbyDrvInKB;
3032 else
3033 FBAccessAreaOffset= Aper_Size - usFBUsedbyDrvInKB
3034
3035CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/
3036
2482#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1 3037#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1
2483 3038
2484typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO { 3039typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO
2485 ULONG ulStartAddrUsedByFirmware; 3040{
2486 USHORT usFirmwareUseInKb; 3041 ULONG ulStartAddrUsedByFirmware;
2487 USHORT usReserved; 3042 USHORT usFirmwareUseInKb;
2488} ATOM_FIRMWARE_VRAM_RESERVE_INFO; 3043 USHORT usReserved;
3044}ATOM_FIRMWARE_VRAM_RESERVE_INFO;
2489 3045
2490typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE { 3046typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE
2491 ATOM_COMMON_TABLE_HEADER sHeader; 3047{
2492 ATOM_FIRMWARE_VRAM_RESERVE_INFO 3048 ATOM_COMMON_TABLE_HEADER sHeader;
2493 asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO]; 3049 ATOM_FIRMWARE_VRAM_RESERVE_INFO asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
2494} ATOM_VRAM_USAGE_BY_FIRMWARE; 3050}ATOM_VRAM_USAGE_BY_FIRMWARE;
2495 3051
2496/****************************************************************************/ 3052// change verion to 1.5, when allow driver to allocate the vram area for command table access.
2497/* Structure used in GPIO_Pin_LUTTable */ 3053typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5
2498/****************************************************************************/ 3054{
2499typedef struct _ATOM_GPIO_PIN_ASSIGNMENT { 3055 ULONG ulStartAddrUsedByFirmware;
2500 USHORT usGpioPin_AIndex; 3056 USHORT usFirmwareUseInKb;
2501 UCHAR ucGpioPinBitShift; 3057 USHORT usFBUsedByDrvInKb;
2502 UCHAR ucGPIO_ID; 3058}ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5;
2503} ATOM_GPIO_PIN_ASSIGNMENT;
2504 3059
2505typedef struct _ATOM_GPIO_PIN_LUT { 3060typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5
2506 ATOM_COMMON_TABLE_HEADER sHeader; 3061{
2507 ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1]; 3062 ATOM_COMMON_TABLE_HEADER sHeader;
2508} ATOM_GPIO_PIN_LUT; 3063 ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5 asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
3064}ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5;
3065
3066/****************************************************************************/
3067// Structure used in GPIO_Pin_LUTTable
3068/****************************************************************************/
3069typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
3070{
3071 USHORT usGpioPin_AIndex;
3072 UCHAR ucGpioPinBitShift;
3073 UCHAR ucGPIO_ID;
3074}ATOM_GPIO_PIN_ASSIGNMENT;
2509 3075
2510/****************************************************************************/ 3076typedef struct _ATOM_GPIO_PIN_LUT
2511/* Structure used in ComponentVideoInfoTable */ 3077{
2512/****************************************************************************/ 3078 ATOM_COMMON_TABLE_HEADER sHeader;
3079 ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1];
3080}ATOM_GPIO_PIN_LUT;
3081
3082/****************************************************************************/
3083// Structure used in ComponentVideoInfoTable
3084/****************************************************************************/
2513#define GPIO_PIN_ACTIVE_HIGH 0x1 3085#define GPIO_PIN_ACTIVE_HIGH 0x1
2514 3086
2515#define MAX_SUPPORTED_CV_STANDARDS 5 3087#define MAX_SUPPORTED_CV_STANDARDS 5
2516 3088
2517/* definitions for ATOM_D_INFO.ucSettings */ 3089// definitions for ATOM_D_INFO.ucSettings
2518#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F /* [4:0] */ 3090#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F // [4:0]
2519#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 /* [6:5] = must be zeroed out */ 3091#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 // [6:5] = must be zeroed out
2520#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 /* [7] */ 3092#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 // [7]
2521 3093
2522typedef struct _ATOM_GPIO_INFO { 3094typedef struct _ATOM_GPIO_INFO
2523 USHORT usAOffset; 3095{
2524 UCHAR ucSettings; 3096 USHORT usAOffset;
2525 UCHAR ucReserved; 3097 UCHAR ucSettings;
2526} ATOM_GPIO_INFO; 3098 UCHAR ucReserved;
3099}ATOM_GPIO_INFO;
2527 3100
2528/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector) */ 3101// definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector)
2529#define ATOM_CV_RESTRICT_FORMAT_SELECTION 0x2 3102#define ATOM_CV_RESTRICT_FORMAT_SELECTION 0x2
2530 3103
2531/* definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i */ 3104// definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i
2532#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 /* [7]; */ 3105#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 //[7];
2533#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F /* [6:0] */ 3106#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F //[6:0]
2534 3107
2535/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode */ 3108// definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode
2536/* Line 3 out put 5V. */ 3109//Line 3 out put 5V.
2537#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 /* represent gpio 3 state for 16:9 */ 3110#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 //represent gpio 3 state for 16:9
2538#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 /* represent gpio 4 state for 16:9 */ 3111#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 //represent gpio 4 state for 16:9
2539#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0 3112#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0
2540 3113
2541/* Line 3 out put 2.2V */ 3114//Line 3 out put 2.2V
2542#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 /* represent gpio 3 state for 4:3 Letter box */ 3115#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 //represent gpio 3 state for 4:3 Letter box
2543#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 /* represent gpio 4 state for 4:3 Letter box */ 3116#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 //represent gpio 4 state for 4:3 Letter box
2544#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2 3117#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2
2545 3118
2546/* Line 3 out put 0V */ 3119//Line 3 out put 0V
2547#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 /* represent gpio 3 state for 4:3 */ 3120#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 //represent gpio 3 state for 4:3
2548#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 /* represent gpio 4 state for 4:3 */ 3121#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 //represent gpio 4 state for 4:3
2549#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4 3122#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4
2550 3123
2551#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F /* bit [5:0] */ 3124#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F // bit [5:0]
2552 3125
2553#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 /* bit 7 */ 3126#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 //bit 7
2554 3127
2555/* GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks. */ 3128//GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks.
2556#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 /* bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */ 3129#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 //bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
2557#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 /* bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */ 3130#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 //bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
2558 3131
2559typedef struct _ATOM_COMPONENT_VIDEO_INFO { 3132
2560 ATOM_COMMON_TABLE_HEADER sHeader; 3133typedef struct _ATOM_COMPONENT_VIDEO_INFO
2561 USHORT usMask_PinRegisterIndex; 3134{
2562 USHORT usEN_PinRegisterIndex; 3135 ATOM_COMMON_TABLE_HEADER sHeader;
2563 USHORT usY_PinRegisterIndex; 3136 USHORT usMask_PinRegisterIndex;
2564 USHORT usA_PinRegisterIndex; 3137 USHORT usEN_PinRegisterIndex;
2565 UCHAR ucBitShift; 3138 USHORT usY_PinRegisterIndex;
2566 UCHAR ucPinActiveState; /* ucPinActiveState: Bit0=1 active high, =0 active low */ 3139 USHORT usA_PinRegisterIndex;
2567 ATOM_DTD_FORMAT sReserved; /* must be zeroed out */ 3140 UCHAR ucBitShift;
2568 UCHAR ucMiscInfo; 3141 UCHAR ucPinActiveState; //ucPinActiveState: Bit0=1 active high, =0 active low
2569 UCHAR uc480i; 3142 ATOM_DTD_FORMAT sReserved; // must be zeroed out
2570 UCHAR uc480p; 3143 UCHAR ucMiscInfo;
2571 UCHAR uc720p; 3144 UCHAR uc480i;
2572 UCHAR uc1080i; 3145 UCHAR uc480p;
2573 UCHAR ucLetterBoxMode; 3146 UCHAR uc720p;
2574 UCHAR ucReserved[3]; 3147 UCHAR uc1080i;
2575 UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */ 3148 UCHAR ucLetterBoxMode;
2576 ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS]; 3149 UCHAR ucReserved[3];
2577 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS]; 3150 UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
2578} ATOM_COMPONENT_VIDEO_INFO; 3151 ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
2579 3152 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
2580/* ucTableFormatRevision=2 */ 3153}ATOM_COMPONENT_VIDEO_INFO;
2581/* ucTableContentRevision=1 */ 3154
2582typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21 { 3155//ucTableFormatRevision=2
2583 ATOM_COMMON_TABLE_HEADER sHeader; 3156//ucTableContentRevision=1
2584 UCHAR ucMiscInfo; 3157typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21
2585 UCHAR uc480i; 3158{
2586 UCHAR uc480p; 3159 ATOM_COMMON_TABLE_HEADER sHeader;
2587 UCHAR uc720p; 3160 UCHAR ucMiscInfo;
2588 UCHAR uc1080i; 3161 UCHAR uc480i;
2589 UCHAR ucReserved; 3162 UCHAR uc480p;
2590 UCHAR ucLetterBoxMode; 3163 UCHAR uc720p;
2591 UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */ 3164 UCHAR uc1080i;
2592 ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS]; 3165 UCHAR ucReserved;
2593 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS]; 3166 UCHAR ucLetterBoxMode;
2594} ATOM_COMPONENT_VIDEO_INFO_V21; 3167 UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
3168 ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
3169 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
3170}ATOM_COMPONENT_VIDEO_INFO_V21;
2595 3171
2596#define ATOM_COMPONENT_VIDEO_INFO_LAST ATOM_COMPONENT_VIDEO_INFO_V21 3172#define ATOM_COMPONENT_VIDEO_INFO_LAST ATOM_COMPONENT_VIDEO_INFO_V21
2597 3173
2598/****************************************************************************/ 3174/****************************************************************************/
2599/* Structure used in object_InfoTable */ 3175// Structure used in object_InfoTable
2600/****************************************************************************/ 3176/****************************************************************************/
2601typedef struct _ATOM_OBJECT_HEADER { 3177typedef struct _ATOM_OBJECT_HEADER
2602 ATOM_COMMON_TABLE_HEADER sHeader; 3178{
2603 USHORT usDeviceSupport; 3179 ATOM_COMMON_TABLE_HEADER sHeader;
2604 USHORT usConnectorObjectTableOffset; 3180 USHORT usDeviceSupport;
2605 USHORT usRouterObjectTableOffset; 3181 USHORT usConnectorObjectTableOffset;
2606 USHORT usEncoderObjectTableOffset; 3182 USHORT usRouterObjectTableOffset;
2607 USHORT usProtectionObjectTableOffset; /* only available when Protection block is independent. */ 3183 USHORT usEncoderObjectTableOffset;
2608 USHORT usDisplayPathTableOffset; 3184 USHORT usProtectionObjectTableOffset; //only available when Protection block is independent.
2609} ATOM_OBJECT_HEADER; 3185 USHORT usDisplayPathTableOffset;
2610 3186}ATOM_OBJECT_HEADER;
2611typedef struct _ATOM_DISPLAY_OBJECT_PATH { 3187
2612 USHORT usDeviceTag; /* supported device */ 3188typedef struct _ATOM_OBJECT_HEADER_V3
2613 USHORT usSize; /* the size of ATOM_DISPLAY_OBJECT_PATH */ 3189{
2614 USHORT usConnObjectId; /* Connector Object ID */ 3190 ATOM_COMMON_TABLE_HEADER sHeader;
2615 USHORT usGPUObjectId; /* GPU ID */ 3191 USHORT usDeviceSupport;
2616 USHORT usGraphicObjIds[1]; /* 1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. */ 3192 USHORT usConnectorObjectTableOffset;
2617} ATOM_DISPLAY_OBJECT_PATH; 3193 USHORT usRouterObjectTableOffset;
2618 3194 USHORT usEncoderObjectTableOffset;
2619typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE { 3195 USHORT usProtectionObjectTableOffset; //only available when Protection block is independent.
2620 UCHAR ucNumOfDispPath; 3196 USHORT usDisplayPathTableOffset;
2621 UCHAR ucVersion; 3197 USHORT usMiscObjectTableOffset;
2622 UCHAR ucPadding[2]; 3198}ATOM_OBJECT_HEADER_V3;
2623 ATOM_DISPLAY_OBJECT_PATH asDispPath[1]; 3199
2624} ATOM_DISPLAY_OBJECT_PATH_TABLE; 3200typedef struct _ATOM_DISPLAY_OBJECT_PATH
2625 3201{
2626typedef struct _ATOM_OBJECT /* each object has this structure */ 3202 USHORT usDeviceTag; //supported device
2627{ 3203 USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH
2628 USHORT usObjectID; 3204 USHORT usConnObjectId; //Connector Object ID
2629 USHORT usSrcDstTableOffset; 3205 USHORT usGPUObjectId; //GPU ID
2630 USHORT usRecordOffset; /* this pointing to a bunch of records defined below */ 3206 USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
2631 USHORT usReserved; 3207}ATOM_DISPLAY_OBJECT_PATH;
2632} ATOM_OBJECT; 3208
2633 3209typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
2634typedef struct _ATOM_OBJECT_TABLE /* Above 4 object table offset pointing to a bunch of objects all have this structure */ 3210{
2635{ 3211 UCHAR ucNumOfDispPath;
2636 UCHAR ucNumberOfObjects; 3212 UCHAR ucVersion;
2637 UCHAR ucPadding[3]; 3213 UCHAR ucPadding[2];
2638 ATOM_OBJECT asObjects[1]; 3214 ATOM_DISPLAY_OBJECT_PATH asDispPath[1];
2639} ATOM_OBJECT_TABLE; 3215}ATOM_DISPLAY_OBJECT_PATH_TABLE;
2640 3216
2641typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT /* usSrcDstTableOffset pointing to this structure */ 3217
2642{ 3218typedef struct _ATOM_OBJECT //each object has this structure
2643 UCHAR ucNumberOfSrc; 3219{
2644 USHORT usSrcObjectID[1]; 3220 USHORT usObjectID;
2645 UCHAR ucNumberOfDst; 3221 USHORT usSrcDstTableOffset;
2646 USHORT usDstObjectID[1]; 3222 USHORT usRecordOffset; //this pointing to a bunch of records defined below
2647} ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT; 3223 USHORT usReserved;
2648 3224}ATOM_OBJECT;
2649/* Related definitions, all records are differnt but they have a commond header */ 3225
2650typedef struct _ATOM_COMMON_RECORD_HEADER { 3226typedef struct _ATOM_OBJECT_TABLE //Above 4 object table offset pointing to a bunch of objects all have this structure
2651 UCHAR ucRecordType; /* An emun to indicate the record type */ 3227{
2652 UCHAR ucRecordSize; /* The size of the whole record in byte */ 3228 UCHAR ucNumberOfObjects;
2653} ATOM_COMMON_RECORD_HEADER; 3229 UCHAR ucPadding[3];
2654 3230 ATOM_OBJECT asObjects[1];
2655#define ATOM_I2C_RECORD_TYPE 1 3231}ATOM_OBJECT_TABLE;
3232
3233typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset pointing to this structure
3234{
3235 UCHAR ucNumberOfSrc;
3236 USHORT usSrcObjectID[1];
3237 UCHAR ucNumberOfDst;
3238 USHORT usDstObjectID[1];
3239}ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
3240
3241
3242//Two definitions below are for OPM on MXM module designs
3243
3244#define EXT_HPDPIN_LUTINDEX_0 0
3245#define EXT_HPDPIN_LUTINDEX_1 1
3246#define EXT_HPDPIN_LUTINDEX_2 2
3247#define EXT_HPDPIN_LUTINDEX_3 3
3248#define EXT_HPDPIN_LUTINDEX_4 4
3249#define EXT_HPDPIN_LUTINDEX_5 5
3250#define EXT_HPDPIN_LUTINDEX_6 6
3251#define EXT_HPDPIN_LUTINDEX_7 7
3252#define MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES (EXT_HPDPIN_LUTINDEX_7+1)
3253
3254#define EXT_AUXDDC_LUTINDEX_0 0
3255#define EXT_AUXDDC_LUTINDEX_1 1
3256#define EXT_AUXDDC_LUTINDEX_2 2
3257#define EXT_AUXDDC_LUTINDEX_3 3
3258#define EXT_AUXDDC_LUTINDEX_4 4
3259#define EXT_AUXDDC_LUTINDEX_5 5
3260#define EXT_AUXDDC_LUTINDEX_6 6
3261#define EXT_AUXDDC_LUTINDEX_7 7
3262#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1)
3263
3264typedef struct _EXT_DISPLAY_PATH
3265{
3266 USHORT usDeviceTag; //A bit vector to show what devices are supported
3267 USHORT usDeviceACPIEnum; //16bit device ACPI id.
3268 USHORT usDeviceConnector; //A physical connector for displays to plug in, using object connector definitions
3269 UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT
3270 UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT
3271 USHORT usExtEncoderObjId; //external encoder object id
3272 USHORT usReserved[3];
3273}EXT_DISPLAY_PATH;
3274
3275#define NUMBER_OF_UCHAR_FOR_GUID 16
3276#define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7
3277
3278typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
3279{
3280 ATOM_COMMON_TABLE_HEADER sHeader;
3281 UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string
3282 EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
3283 UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
3284 UCHAR Reserved [7]; // for potential expansion
3285}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
3286
3287//Related definitions, all records are differnt but they have a commond header
3288typedef struct _ATOM_COMMON_RECORD_HEADER
3289{
3290 UCHAR ucRecordType; //An emun to indicate the record type
3291 UCHAR ucRecordSize; //The size of the whole record in byte
3292}ATOM_COMMON_RECORD_HEADER;
3293
3294
3295#define ATOM_I2C_RECORD_TYPE 1
2656#define ATOM_HPD_INT_RECORD_TYPE 2 3296#define ATOM_HPD_INT_RECORD_TYPE 2
2657#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE 3 3297#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE 3
2658#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE 4 3298#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE 4
2659#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */ 3299#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
2660#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */ 3300#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
2661#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE 7 3301#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE 7
2662#define ATOM_JTAG_RECORD_TYPE 8 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */ 3302#define ATOM_JTAG_RECORD_TYPE 8 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
2663#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE 9 3303#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE 9
2664#define ATOM_ENCODER_DVO_CF_RECORD_TYPE 10 3304#define ATOM_ENCODER_DVO_CF_RECORD_TYPE 10
2665#define ATOM_CONNECTOR_CF_RECORD_TYPE 11 3305#define ATOM_CONNECTOR_CF_RECORD_TYPE 11
2666#define ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE 12 3306#define ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE 12
2667#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE 13 3307#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE 13
2668#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14 3308#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14
2669#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15 3309#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15
2670 3310#define ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE 16 //This is for the case when connectors are not known to object table
2671/* Must be updated when new record type is added,equal to that record definition! */ 3311#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table
2672#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_CF_RECORD_TYPE 3312#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
2673 3313#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19
2674typedef struct _ATOM_I2C_RECORD { 3314
2675 ATOM_COMMON_RECORD_HEADER sheader; 3315
2676 ATOM_I2C_ID_CONFIG sucI2cId; 3316//Must be updated when new record type is added,equal to that record definition!
2677 UCHAR ucI2CAddr; /* The slave address, it's 0 when the record is attached to connector for DDC */ 3317#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE
2678} ATOM_I2C_RECORD; 3318
2679 3319typedef struct _ATOM_I2C_RECORD
2680typedef struct _ATOM_HPD_INT_RECORD { 3320{
2681 ATOM_COMMON_RECORD_HEADER sheader; 3321 ATOM_COMMON_RECORD_HEADER sheader;
2682 UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */ 3322 ATOM_I2C_ID_CONFIG sucI2cId;
2683 UCHAR ucPlugged_PinState; 3323 UCHAR ucI2CAddr; //The slave address, it's 0 when the record is attached to connector for DDC
2684} ATOM_HPD_INT_RECORD; 3324}ATOM_I2C_RECORD;
2685 3325
2686typedef struct _ATOM_OUTPUT_PROTECTION_RECORD { 3326typedef struct _ATOM_HPD_INT_RECORD
2687 ATOM_COMMON_RECORD_HEADER sheader; 3327{
2688 UCHAR ucProtectionFlag; 3328 ATOM_COMMON_RECORD_HEADER sheader;
2689 UCHAR ucReserved; 3329 UCHAR ucHPDIntGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info
2690} ATOM_OUTPUT_PROTECTION_RECORD; 3330 UCHAR ucPlugged_PinState;
2691 3331}ATOM_HPD_INT_RECORD;
2692typedef struct _ATOM_CONNECTOR_DEVICE_TAG { 3332
2693 ULONG ulACPIDeviceEnum; /* Reserved for now */ 3333
2694 USHORT usDeviceID; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT" */ 3334typedef struct _ATOM_OUTPUT_PROTECTION_RECORD
2695 USHORT usPadding; 3335{
2696} ATOM_CONNECTOR_DEVICE_TAG; 3336 ATOM_COMMON_RECORD_HEADER sheader;
2697 3337 UCHAR ucProtectionFlag;
2698typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD { 3338 UCHAR ucReserved;
2699 ATOM_COMMON_RECORD_HEADER sheader; 3339}ATOM_OUTPUT_PROTECTION_RECORD;
2700 UCHAR ucNumberOfDevice; 3340
2701 UCHAR ucReserved; 3341typedef struct _ATOM_CONNECTOR_DEVICE_TAG
2702 ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation */ 3342{
2703} ATOM_CONNECTOR_DEVICE_TAG_RECORD; 3343 ULONG ulACPIDeviceEnum; //Reserved for now
2704 3344 USHORT usDeviceID; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT"
2705typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD { 3345 USHORT usPadding;
2706 ATOM_COMMON_RECORD_HEADER sheader; 3346}ATOM_CONNECTOR_DEVICE_TAG;
2707 UCHAR ucConfigGPIOID; 3347
2708 UCHAR ucConfigGPIOState; /* Set to 1 when it's active high to enable external flow in */ 3348typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD
2709 UCHAR ucFlowinGPIPID; 3349{
2710 UCHAR ucExtInGPIPID; 3350 ATOM_COMMON_RECORD_HEADER sheader;
2711} ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD; 3351 UCHAR ucNumberOfDevice;
2712 3352 UCHAR ucReserved;
2713typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD { 3353 ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation
2714 ATOM_COMMON_RECORD_HEADER sheader; 3354}ATOM_CONNECTOR_DEVICE_TAG_RECORD;
2715 UCHAR ucCTL1GPIO_ID; 3355
2716 UCHAR ucCTL1GPIOState; /* Set to 1 when it's active high */ 3356
2717 UCHAR ucCTL2GPIO_ID; 3357typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD
2718 UCHAR ucCTL2GPIOState; /* Set to 1 when it's active high */ 3358{
2719 UCHAR ucCTL3GPIO_ID; 3359 ATOM_COMMON_RECORD_HEADER sheader;
2720 UCHAR ucCTL3GPIOState; /* Set to 1 when it's active high */ 3360 UCHAR ucConfigGPIOID;
2721 UCHAR ucCTLFPGA_IN_ID; 3361 UCHAR ucConfigGPIOState; //Set to 1 when it's active high to enable external flow in
2722 UCHAR ucPadding[3]; 3362 UCHAR ucFlowinGPIPID;
2723} ATOM_ENCODER_FPGA_CONTROL_RECORD; 3363 UCHAR ucExtInGPIPID;
2724 3364}ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD;
2725typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD { 3365
2726 ATOM_COMMON_RECORD_HEADER sheader; 3366typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD
2727 UCHAR ucGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */ 3367{
2728 UCHAR ucTVActiveState; /* Indicating when the pin==0 or 1 when TV is connected */ 3368 ATOM_COMMON_RECORD_HEADER sheader;
2729} ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD; 3369 UCHAR ucCTL1GPIO_ID;
2730 3370 UCHAR ucCTL1GPIOState; //Set to 1 when it's active high
2731typedef struct _ATOM_JTAG_RECORD { 3371 UCHAR ucCTL2GPIO_ID;
2732 ATOM_COMMON_RECORD_HEADER sheader; 3372 UCHAR ucCTL2GPIOState; //Set to 1 when it's active high
2733 UCHAR ucTMSGPIO_ID; 3373 UCHAR ucCTL3GPIO_ID;
2734 UCHAR ucTMSGPIOState; /* Set to 1 when it's active high */ 3374 UCHAR ucCTL3GPIOState; //Set to 1 when it's active high
2735 UCHAR ucTCKGPIO_ID; 3375 UCHAR ucCTLFPGA_IN_ID;
2736 UCHAR ucTCKGPIOState; /* Set to 1 when it's active high */ 3376 UCHAR ucPadding[3];
2737 UCHAR ucTDOGPIO_ID; 3377}ATOM_ENCODER_FPGA_CONTROL_RECORD;
2738 UCHAR ucTDOGPIOState; /* Set to 1 when it's active high */ 3378
2739 UCHAR ucTDIGPIO_ID; 3379typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD
2740 UCHAR ucTDIGPIOState; /* Set to 1 when it's active high */ 3380{
2741 UCHAR ucPadding[2]; 3381 ATOM_COMMON_RECORD_HEADER sheader;
2742} ATOM_JTAG_RECORD; 3382 UCHAR ucGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info
2743 3383 UCHAR ucTVActiveState; //Indicating when the pin==0 or 1 when TV is connected
2744/* The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually */ 3384}ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD;
2745typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR { 3385
2746 UCHAR ucGPIOID; /* GPIO_ID, find the corresponding ID in GPIO_LUT table */ 3386typedef struct _ATOM_JTAG_RECORD
2747 UCHAR ucGPIO_PinState; /* Pin state showing how to set-up the pin */ 3387{
2748} ATOM_GPIO_PIN_CONTROL_PAIR; 3388 ATOM_COMMON_RECORD_HEADER sheader;
2749 3389 UCHAR ucTMSGPIO_ID;
2750typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD { 3390 UCHAR ucTMSGPIOState; //Set to 1 when it's active high
2751 ATOM_COMMON_RECORD_HEADER sheader; 3391 UCHAR ucTCKGPIO_ID;
2752 UCHAR ucFlags; /* Future expnadibility */ 3392 UCHAR ucTCKGPIOState; //Set to 1 when it's active high
2753 UCHAR ucNumberOfPins; /* Number of GPIO pins used to control the object */ 3393 UCHAR ucTDOGPIO_ID;
2754 ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; /* the real gpio pin pair determined by number of pins ucNumberOfPins */ 3394 UCHAR ucTDOGPIOState; //Set to 1 when it's active high
2755} ATOM_OBJECT_GPIO_CNTL_RECORD; 3395 UCHAR ucTDIGPIO_ID;
2756 3396 UCHAR ucTDIGPIOState; //Set to 1 when it's active high
2757/* Definitions for GPIO pin state */ 3397 UCHAR ucPadding[2];
3398}ATOM_JTAG_RECORD;
3399
3400
3401//The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually
3402typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR
3403{
3404 UCHAR ucGPIOID; // GPIO_ID, find the corresponding ID in GPIO_LUT table
3405 UCHAR ucGPIO_PinState; // Pin state showing how to set-up the pin
3406}ATOM_GPIO_PIN_CONTROL_PAIR;
3407
3408typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD
3409{
3410 ATOM_COMMON_RECORD_HEADER sheader;
3411 UCHAR ucFlags; // Future expnadibility
3412 UCHAR ucNumberOfPins; // Number of GPIO pins used to control the object
3413 ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; // the real gpio pin pair determined by number of pins ucNumberOfPins
3414}ATOM_OBJECT_GPIO_CNTL_RECORD;
3415
3416//Definitions for GPIO pin state
2758#define GPIO_PIN_TYPE_INPUT 0x00 3417#define GPIO_PIN_TYPE_INPUT 0x00
2759#define GPIO_PIN_TYPE_OUTPUT 0x10 3418#define GPIO_PIN_TYPE_OUTPUT 0x10
2760#define GPIO_PIN_TYPE_HW_CONTROL 0x20 3419#define GPIO_PIN_TYPE_HW_CONTROL 0x20
2761 3420
2762/* For GPIO_PIN_TYPE_OUTPUT the following is defined */ 3421//For GPIO_PIN_TYPE_OUTPUT the following is defined
2763#define GPIO_PIN_OUTPUT_STATE_MASK 0x01 3422#define GPIO_PIN_OUTPUT_STATE_MASK 0x01
2764#define GPIO_PIN_OUTPUT_STATE_SHIFT 0 3423#define GPIO_PIN_OUTPUT_STATE_SHIFT 0
2765#define GPIO_PIN_STATE_ACTIVE_LOW 0x0 3424#define GPIO_PIN_STATE_ACTIVE_LOW 0x0
2766#define GPIO_PIN_STATE_ACTIVE_HIGH 0x1 3425#define GPIO_PIN_STATE_ACTIVE_HIGH 0x1
2767 3426
2768typedef struct _ATOM_ENCODER_DVO_CF_RECORD { 3427// Indexes to GPIO array in GLSync record
2769 ATOM_COMMON_RECORD_HEADER sheader; 3428#define ATOM_GPIO_INDEX_GLSYNC_REFCLK 0
2770 ULONG ulStrengthControl; /* DVOA strength control for CF */ 3429#define ATOM_GPIO_INDEX_GLSYNC_HSYNC 1
2771 UCHAR ucPadding[2]; 3430#define ATOM_GPIO_INDEX_GLSYNC_VSYNC 2
2772} ATOM_ENCODER_DVO_CF_RECORD; 3431#define ATOM_GPIO_INDEX_GLSYNC_SWAP_REQ 3
3432#define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT 4
3433#define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5
3434#define ATOM_GPIO_INDEX_GLSYNC_V_RESET 6
3435#define ATOM_GPIO_INDEX_GLSYNC_MAX 7
3436
3437typedef struct _ATOM_ENCODER_DVO_CF_RECORD
3438{
3439 ATOM_COMMON_RECORD_HEADER sheader;
3440 ULONG ulStrengthControl; // DVOA strength control for CF
3441 UCHAR ucPadding[2];
3442}ATOM_ENCODER_DVO_CF_RECORD;
2773 3443
2774/* value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle */ 3444// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
2775#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1 3445#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1
2776#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2 3446#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2
2777 3447
2778typedef struct _ATOM_CONNECTOR_CF_RECORD { 3448typedef struct _ATOM_CONNECTOR_CF_RECORD
2779 ATOM_COMMON_RECORD_HEADER sheader; 3449{
2780 USHORT usMaxPixClk; 3450 ATOM_COMMON_RECORD_HEADER sheader;
2781 UCHAR ucFlowCntlGpioId; 3451 USHORT usMaxPixClk;
2782 UCHAR ucSwapCntlGpioId; 3452 UCHAR ucFlowCntlGpioId;
2783 UCHAR ucConnectedDvoBundle; 3453 UCHAR ucSwapCntlGpioId;
2784 UCHAR ucPadding; 3454 UCHAR ucConnectedDvoBundle;
2785} ATOM_CONNECTOR_CF_RECORD; 3455 UCHAR ucPadding;
2786 3456}ATOM_CONNECTOR_CF_RECORD;
2787typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD { 3457
2788 ATOM_COMMON_RECORD_HEADER sheader; 3458typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD
2789 ATOM_DTD_FORMAT asTiming; 3459{
2790} ATOM_CONNECTOR_HARDCODE_DTD_RECORD; 3460 ATOM_COMMON_RECORD_HEADER sheader;
2791 3461 ATOM_DTD_FORMAT asTiming;
2792typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD { 3462}ATOM_CONNECTOR_HARDCODE_DTD_RECORD;
2793 ATOM_COMMON_RECORD_HEADER sheader; /* ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE */ 3463
2794 UCHAR ucSubConnectorType; /* CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A */ 3464typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD
2795 UCHAR ucReserved; 3465{
2796} ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD; 3466 ATOM_COMMON_RECORD_HEADER sheader; //ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE
2797 3467 UCHAR ucSubConnectorType; //CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A
2798typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD { 3468 UCHAR ucReserved;
2799 ATOM_COMMON_RECORD_HEADER sheader; 3469}ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD;
2800 UCHAR ucMuxType; /* decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state */ 3470
2801 UCHAR ucMuxControlPin; 3471
2802 UCHAR ucMuxState[2]; /* for alligment purpose */ 3472typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD
2803} ATOM_ROUTER_DDC_PATH_SELECT_RECORD; 3473{
2804 3474 ATOM_COMMON_RECORD_HEADER sheader;
2805typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD { 3475 UCHAR ucMuxType; //decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state
2806 ATOM_COMMON_RECORD_HEADER sheader; 3476 UCHAR ucMuxControlPin;
2807 UCHAR ucMuxType; 3477 UCHAR ucMuxState[2]; //for alligment purpose
2808 UCHAR ucMuxControlPin; 3478}ATOM_ROUTER_DDC_PATH_SELECT_RECORD;
2809 UCHAR ucMuxState[2]; /* for alligment purpose */ 3479
2810} ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD; 3480typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD
2811 3481{
2812/* define ucMuxType */ 3482 ATOM_COMMON_RECORD_HEADER sheader;
3483 UCHAR ucMuxType;
3484 UCHAR ucMuxControlPin;
3485 UCHAR ucMuxState[2]; //for alligment purpose
3486}ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD;
3487
3488// define ucMuxType
2813#define ATOM_ROUTER_MUX_PIN_STATE_MASK 0x0f 3489#define ATOM_ROUTER_MUX_PIN_STATE_MASK 0x0f
2814#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT 0x01 3490#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT 0x01
2815 3491
2816/****************************************************************************/ 3492typedef struct _ATOM_CONNECTOR_HPDPIN_LUT_RECORD //record for ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE
2817/* ASIC voltage data table */ 3493{
2818/****************************************************************************/ 3494 ATOM_COMMON_RECORD_HEADER sheader;
2819typedef struct _ATOM_VOLTAGE_INFO_HEADER { 3495 UCHAR ucHPDPINMap[MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES]; //An fixed size array which maps external pins to internal GPIO_PIN_INFO table
2820 USHORT usVDDCBaseLevel; /* In number of 50mv unit */ 3496}ATOM_CONNECTOR_HPDPIN_LUT_RECORD;
2821 USHORT usReserved; /* For possible extension table offset */ 3497
2822 UCHAR ucNumOfVoltageEntries; 3498typedef struct _ATOM_CONNECTOR_AUXDDC_LUT_RECORD //record for ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE
2823 UCHAR ucBytesPerVoltageEntry; 3499{
2824 UCHAR ucVoltageStep; /* Indicating in how many mv increament is one step, 0.5mv unit */ 3500 ATOM_COMMON_RECORD_HEADER sheader;
2825 UCHAR ucDefaultVoltageEntry; 3501 ATOM_I2C_ID_CONFIG ucAUXDDCMap[MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES]; //An fixed size array which maps external pins to internal DDC ID
2826 UCHAR ucVoltageControlI2cLine; 3502}ATOM_CONNECTOR_AUXDDC_LUT_RECORD;
2827 UCHAR ucVoltageControlAddress; 3503
2828 UCHAR ucVoltageControlOffset; 3504typedef struct _ATOM_OBJECT_LINK_RECORD
2829} ATOM_VOLTAGE_INFO_HEADER; 3505{
2830 3506 ATOM_COMMON_RECORD_HEADER sheader;
2831typedef struct _ATOM_VOLTAGE_INFO { 3507 USHORT usObjectID; //could be connector, encorder or other object in object.h
2832 ATOM_COMMON_TABLE_HEADER sHeader; 3508}ATOM_OBJECT_LINK_RECORD;
2833 ATOM_VOLTAGE_INFO_HEADER viHeader; 3509
2834 UCHAR ucVoltageEntries[64]; /* 64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry */ 3510typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD
2835} ATOM_VOLTAGE_INFO; 3511{
2836 3512 ATOM_COMMON_RECORD_HEADER sheader;
2837typedef struct _ATOM_VOLTAGE_FORMULA { 3513 USHORT usReserved;
2838 USHORT usVoltageBaseLevel; /* In number of 1mv unit */ 3514}ATOM_CONNECTOR_REMOTE_CAP_RECORD;
2839 USHORT usVoltageStep; /* Indicating in how many mv increament is one step, 1mv unit */ 3515
2840 UCHAR ucNumOfVoltageEntries; /* Number of Voltage Entry, which indicate max Voltage */ 3516/****************************************************************************/
2841 UCHAR ucFlag; /* bit0=0 :step is 1mv =1 0.5mv */ 3517// ASIC voltage data table
2842 UCHAR ucBaseVID; /* if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep */ 3518/****************************************************************************/
2843 UCHAR ucReserved; 3519typedef struct _ATOM_VOLTAGE_INFO_HEADER
2844 UCHAR ucVIDAdjustEntries[32]; /* 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries */ 3520{
2845} ATOM_VOLTAGE_FORMULA; 3521 USHORT usVDDCBaseLevel; //In number of 50mv unit
2846 3522 USHORT usReserved; //For possible extension table offset
2847typedef struct _ATOM_VOLTAGE_CONTROL { 3523 UCHAR ucNumOfVoltageEntries;
2848 UCHAR ucVoltageControlId; /* Indicate it is controlled by I2C or GPIO or HW state machine */ 3524 UCHAR ucBytesPerVoltageEntry;
2849 UCHAR ucVoltageControlI2cLine; 3525 UCHAR ucVoltageStep; //Indicating in how many mv increament is one step, 0.5mv unit
2850 UCHAR ucVoltageControlAddress; 3526 UCHAR ucDefaultVoltageEntry;
2851 UCHAR ucVoltageControlOffset; 3527 UCHAR ucVoltageControlI2cLine;
2852 USHORT usGpioPin_AIndex; /* GPIO_PAD register index */ 3528 UCHAR ucVoltageControlAddress;
2853 UCHAR ucGpioPinBitShift[9]; /* at most 8 pin support 255 VIDs, termintate with 0xff */ 3529 UCHAR ucVoltageControlOffset;
2854 UCHAR ucReserved; 3530}ATOM_VOLTAGE_INFO_HEADER;
2855} ATOM_VOLTAGE_CONTROL; 3531
2856 3532typedef struct _ATOM_VOLTAGE_INFO
2857/* Define ucVoltageControlId */ 3533{
3534 ATOM_COMMON_TABLE_HEADER sHeader;
3535 ATOM_VOLTAGE_INFO_HEADER viHeader;
3536 UCHAR ucVoltageEntries[64]; //64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry
3537}ATOM_VOLTAGE_INFO;
3538
3539
3540typedef struct _ATOM_VOLTAGE_FORMULA
3541{
3542 USHORT usVoltageBaseLevel; // In number of 1mv unit
3543 USHORT usVoltageStep; // Indicating in how many mv increament is one step, 1mv unit
3544 UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage
3545 UCHAR ucFlag; // bit0=0 :step is 1mv =1 0.5mv
3546 UCHAR ucBaseVID; // if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep
3547 UCHAR ucReserved;
3548 UCHAR ucVIDAdjustEntries[32]; // 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries
3549}ATOM_VOLTAGE_FORMULA;
3550
3551typedef struct _VOLTAGE_LUT_ENTRY
3552{
3553 USHORT usVoltageCode; // The Voltage ID, either GPIO or I2C code
3554 USHORT usVoltageValue; // The corresponding Voltage Value, in mV
3555}VOLTAGE_LUT_ENTRY;
3556
3557typedef struct _ATOM_VOLTAGE_FORMULA_V2
3558{
3559 UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage
3560 UCHAR ucReserved[3];
3561 VOLTAGE_LUT_ENTRY asVIDAdjustEntries[32];// 32 is for allocation, the actual number of entries is in ucNumOfVoltageEntries
3562}ATOM_VOLTAGE_FORMULA_V2;
3563
3564typedef struct _ATOM_VOLTAGE_CONTROL
3565{
3566 UCHAR ucVoltageControlId; //Indicate it is controlled by I2C or GPIO or HW state machine
3567 UCHAR ucVoltageControlI2cLine;
3568 UCHAR ucVoltageControlAddress;
3569 UCHAR ucVoltageControlOffset;
3570 USHORT usGpioPin_AIndex; //GPIO_PAD register index
3571 UCHAR ucGpioPinBitShift[9]; //at most 8 pin support 255 VIDs, termintate with 0xff
3572 UCHAR ucReserved;
3573}ATOM_VOLTAGE_CONTROL;
3574
3575// Define ucVoltageControlId
2858#define VOLTAGE_CONTROLLED_BY_HW 0x00 3576#define VOLTAGE_CONTROLLED_BY_HW 0x00
2859#define VOLTAGE_CONTROLLED_BY_I2C_MASK 0x7F 3577#define VOLTAGE_CONTROLLED_BY_I2C_MASK 0x7F
2860#define VOLTAGE_CONTROLLED_BY_GPIO 0x80 3578#define VOLTAGE_CONTROLLED_BY_GPIO 0x80
2861#define VOLTAGE_CONTROL_ID_LM64 0x01 /* I2C control, used for R5xx Core Voltage */ 3579#define VOLTAGE_CONTROL_ID_LM64 0x01 //I2C control, used for R5xx Core Voltage
2862#define VOLTAGE_CONTROL_ID_DAC 0x02 /* I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI */ 3580#define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI
2863#define VOLTAGE_CONTROL_ID_VT116xM 0x03 /* I2C control, used for R6xx Core Voltage */ 3581#define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage
2864#define VOLTAGE_CONTROL_ID_DS4402 0x04 3582#define VOLTAGE_CONTROL_ID_DS4402 0x04
2865 3583
2866typedef struct _ATOM_VOLTAGE_OBJECT { 3584typedef struct _ATOM_VOLTAGE_OBJECT
2867 UCHAR ucVoltageType; /* Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI */ 3585{
2868 UCHAR ucSize; /* Size of Object */ 3586 UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
2869 ATOM_VOLTAGE_CONTROL asControl; /* describ how to control */ 3587 UCHAR ucSize; //Size of Object
2870 ATOM_VOLTAGE_FORMULA asFormula; /* Indicate How to convert real Voltage to VID */ 3588 ATOM_VOLTAGE_CONTROL asControl; //describ how to control
2871} ATOM_VOLTAGE_OBJECT; 3589 ATOM_VOLTAGE_FORMULA asFormula; //Indicate How to convert real Voltage to VID
2872 3590}ATOM_VOLTAGE_OBJECT;
2873typedef struct _ATOM_VOLTAGE_OBJECT_INFO { 3591
2874 ATOM_COMMON_TABLE_HEADER sHeader; 3592typedef struct _ATOM_VOLTAGE_OBJECT_V2
2875 ATOM_VOLTAGE_OBJECT asVoltageObj[3]; /* Info for Voltage control */ 3593{
2876} ATOM_VOLTAGE_OBJECT_INFO; 3594 UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
2877 3595 UCHAR ucSize; //Size of Object
2878typedef struct _ATOM_LEAKID_VOLTAGE { 3596 ATOM_VOLTAGE_CONTROL asControl; //describ how to control
2879 UCHAR ucLeakageId; 3597 ATOM_VOLTAGE_FORMULA_V2 asFormula; //Indicate How to convert real Voltage to VID
2880 UCHAR ucReserved; 3598}ATOM_VOLTAGE_OBJECT_V2;
2881 USHORT usVoltage; 3599
2882} ATOM_LEAKID_VOLTAGE; 3600typedef struct _ATOM_VOLTAGE_OBJECT_INFO
2883 3601{
2884typedef struct _ATOM_ASIC_PROFILE_VOLTAGE { 3602 ATOM_COMMON_TABLE_HEADER sHeader;
2885 UCHAR ucProfileId; 3603 ATOM_VOLTAGE_OBJECT asVoltageObj[3]; //Info for Voltage control
2886 UCHAR ucReserved; 3604}ATOM_VOLTAGE_OBJECT_INFO;
2887 USHORT usSize; 3605
2888 USHORT usEfuseSpareStartAddr; 3606typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V2
2889 USHORT usFuseIndex[8]; /* from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id, */ 3607{
2890 ATOM_LEAKID_VOLTAGE asLeakVol[2]; /* Leakid and relatd voltage */ 3608 ATOM_COMMON_TABLE_HEADER sHeader;
2891} ATOM_ASIC_PROFILE_VOLTAGE; 3609 ATOM_VOLTAGE_OBJECT_V2 asVoltageObj[3]; //Info for Voltage control
2892 3610}ATOM_VOLTAGE_OBJECT_INFO_V2;
2893/* ucProfileId */ 3611
2894#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1 3612typedef struct _ATOM_LEAKID_VOLTAGE
3613{
3614 UCHAR ucLeakageId;
3615 UCHAR ucReserved;
3616 USHORT usVoltage;
3617}ATOM_LEAKID_VOLTAGE;
3618
3619typedef struct _ATOM_ASIC_PROFILE_VOLTAGE
3620{
3621 UCHAR ucProfileId;
3622 UCHAR ucReserved;
3623 USHORT usSize;
3624 USHORT usEfuseSpareStartAddr;
3625 USHORT usFuseIndex[8]; //from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id,
3626 ATOM_LEAKID_VOLTAGE asLeakVol[2]; //Leakid and relatd voltage
3627}ATOM_ASIC_PROFILE_VOLTAGE;
3628
3629//ucProfileId
3630#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1
2895#define ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE 1 3631#define ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE 1
2896#define ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE 2 3632#define ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE 2
2897 3633
2898typedef struct _ATOM_ASIC_PROFILING_INFO { 3634typedef struct _ATOM_ASIC_PROFILING_INFO
2899 ATOM_COMMON_TABLE_HEADER asHeader; 3635{
2900 ATOM_ASIC_PROFILE_VOLTAGE asVoltage; 3636 ATOM_COMMON_TABLE_HEADER asHeader;
2901} ATOM_ASIC_PROFILING_INFO; 3637 ATOM_ASIC_PROFILE_VOLTAGE asVoltage;
2902 3638}ATOM_ASIC_PROFILING_INFO;
2903typedef struct _ATOM_POWER_SOURCE_OBJECT { 3639
2904 UCHAR ucPwrSrcId; /* Power source */ 3640typedef struct _ATOM_POWER_SOURCE_OBJECT
2905 UCHAR ucPwrSensorType; /* GPIO, I2C or none */ 3641{
2906 UCHAR ucPwrSensId; /* if GPIO detect, it is GPIO id, if I2C detect, it is I2C id */ 3642 UCHAR ucPwrSrcId; // Power source
2907 UCHAR ucPwrSensSlaveAddr; /* Slave address if I2C detect */ 3643 UCHAR ucPwrSensorType; // GPIO, I2C or none
2908 UCHAR ucPwrSensRegIndex; /* I2C register Index if I2C detect */ 3644 UCHAR ucPwrSensId; // if GPIO detect, it is GPIO id, if I2C detect, it is I2C id
2909 UCHAR ucPwrSensRegBitMask; /* detect which bit is used if I2C detect */ 3645 UCHAR ucPwrSensSlaveAddr; // Slave address if I2C detect
2910 UCHAR ucPwrSensActiveState; /* high active or low active */ 3646 UCHAR ucPwrSensRegIndex; // I2C register Index if I2C detect
2911 UCHAR ucReserve[3]; /* reserve */ 3647 UCHAR ucPwrSensRegBitMask; // detect which bit is used if I2C detect
2912 USHORT usSensPwr; /* in unit of watt */ 3648 UCHAR ucPwrSensActiveState; // high active or low active
2913} ATOM_POWER_SOURCE_OBJECT; 3649 UCHAR ucReserve[3]; // reserve
2914 3650 USHORT usSensPwr; // in unit of watt
2915typedef struct _ATOM_POWER_SOURCE_INFO { 3651}ATOM_POWER_SOURCE_OBJECT;
2916 ATOM_COMMON_TABLE_HEADER asHeader; 3652
2917 UCHAR asPwrbehave[16]; 3653typedef struct _ATOM_POWER_SOURCE_INFO
2918 ATOM_POWER_SOURCE_OBJECT asPwrObj[1]; 3654{
2919} ATOM_POWER_SOURCE_INFO; 3655 ATOM_COMMON_TABLE_HEADER asHeader;
2920 3656 UCHAR asPwrbehave[16];
2921/* Define ucPwrSrcId */ 3657 ATOM_POWER_SOURCE_OBJECT asPwrObj[1];
3658}ATOM_POWER_SOURCE_INFO;
3659
3660
3661//Define ucPwrSrcId
2922#define POWERSOURCE_PCIE_ID1 0x00 3662#define POWERSOURCE_PCIE_ID1 0x00
2923#define POWERSOURCE_6PIN_CONNECTOR_ID1 0x01 3663#define POWERSOURCE_6PIN_CONNECTOR_ID1 0x01
2924#define POWERSOURCE_8PIN_CONNECTOR_ID1 0x02 3664#define POWERSOURCE_8PIN_CONNECTOR_ID1 0x02
2925#define POWERSOURCE_6PIN_CONNECTOR_ID2 0x04 3665#define POWERSOURCE_6PIN_CONNECTOR_ID2 0x04
2926#define POWERSOURCE_8PIN_CONNECTOR_ID2 0x08 3666#define POWERSOURCE_8PIN_CONNECTOR_ID2 0x08
2927 3667
2928/* define ucPwrSensorId */ 3668//define ucPwrSensorId
2929#define POWER_SENSOR_ALWAYS 0x00 3669#define POWER_SENSOR_ALWAYS 0x00
2930#define POWER_SENSOR_GPIO 0x01 3670#define POWER_SENSOR_GPIO 0x01
2931#define POWER_SENSOR_I2C 0x02 3671#define POWER_SENSOR_I2C 0x02
2932 3672
3673typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
3674{
3675 ATOM_COMMON_TABLE_HEADER sHeader;
3676 ULONG ulBootUpEngineClock;
3677 ULONG ulDentistVCOFreq;
3678 ULONG ulBootUpUMAClock;
3679 ULONG ulReserved1[8];
3680 ULONG ulBootUpReqDisplayVector;
3681 ULONG ulOtherDisplayMisc;
3682 ULONG ulGPUCapInfo;
3683 ULONG ulReserved2[3];
3684 ULONG ulSystemConfig;
3685 ULONG ulCPUCapInfo;
3686 USHORT usMaxNBVoltage;
3687 USHORT usMinNBVoltage;
3688 USHORT usBootUpNBVoltage;
3689 USHORT usExtDispConnInfoOffset;
3690 UCHAR ucHtcTmpLmt;
3691 UCHAR ucTjOffset;
3692 UCHAR ucMemoryType;
3693 UCHAR ucUMAChannelNumber;
3694 ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];
3695 ULONG ulCSR_M3_ARB_CNTL_UVD[10];
3696 ULONG ulCSR_M3_ARB_CNTL_FS3D[10];
3697 ULONG ulReserved3[42];
3698 ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
3699}ATOM_INTEGRATED_SYSTEM_INFO_V6;
3700
3701/**********************************************************************************************************************
3702// ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
3703//ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit.
3704//ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
3705//ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
3706//ulReserved1[8] Reserved by now, must be 0x0.
3707//ulBootUpReqDisplayVector VBIOS boot up display IDs
3708// ATOM_DEVICE_CRT1_SUPPORT 0x0001
3709// ATOM_DEVICE_CRT2_SUPPORT 0x0010
3710// ATOM_DEVICE_DFP1_SUPPORT 0x0008
3711// ATOM_DEVICE_DFP6_SUPPORT 0x0040
3712// ATOM_DEVICE_DFP2_SUPPORT 0x0080
3713// ATOM_DEVICE_DFP3_SUPPORT 0x0200
3714// ATOM_DEVICE_DFP4_SUPPORT 0x0400
3715// ATOM_DEVICE_DFP5_SUPPORT 0x0800
3716// ATOM_DEVICE_LCD1_SUPPORT 0x0002
3717//ulOtherDisplayMisc Other display related flags, not defined yet.
3718//ulGPUCapInfo TBD
3719//ulReserved2[3] must be 0x0 for the reserved.
3720//ulSystemConfig TBD
3721//ulCPUCapInfo TBD
3722//usMaxNBVoltage High NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
3723//usMinNBVoltage Low NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
3724//usBootUpNBVoltage Boot up NB voltage in unit of mv.
3725//ucHtcTmpLmt Bit [22:16] of D24F3x64 Thermal Control (HTC) Register.
3726//ucTjOffset Bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed.
3727//ucMemoryType [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
3728//ucUMAChannelNumber System memory channel numbers.
3729//usExtDispConnectionInfoOffset ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO offset relative to beginning of this table.
3730//ulCSR_M3_ARB_CNTL_DEFAULT[10] Arrays with values for CSR M3 arbiter for default
3731//ulCSR_M3_ARB_CNTL_UVD[10] Arrays with values for CSR M3 arbiter for UVD playback.
3732//ulCSR_M3_ARB_CNTL_FS3D[10] Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
3733**********************************************************************************************************************/
3734
2933/**************************************************************************/ 3735/**************************************************************************/
2934/* This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design */ 3736// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design
2935/* Memory SS Info Table */ 3737//Memory SS Info Table
2936/* Define Memory Clock SS chip ID */ 3738//Define Memory Clock SS chip ID
2937#define ICS91719 1 3739#define ICS91719 1
2938#define ICS91720 2 3740#define ICS91720 2
2939 3741
2940/* Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol */ 3742//Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol
2941typedef struct _ATOM_I2C_DATA_RECORD { 3743typedef struct _ATOM_I2C_DATA_RECORD
2942 UCHAR ucNunberOfBytes; /* Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop" */ 3744{
2943 UCHAR ucI2CData[1]; /* I2C data in bytes, should be less than 16 bytes usually */ 3745 UCHAR ucNunberOfBytes; //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop"
2944} ATOM_I2C_DATA_RECORD; 3746 UCHAR ucI2CData[1]; //I2C data in bytes, should be less than 16 bytes usually
2945 3747}ATOM_I2C_DATA_RECORD;
2946/* Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information */ 3748
2947typedef struct _ATOM_I2C_DEVICE_SETUP_INFO { 3749
2948 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* I2C line and HW/SW assisted cap. */ 3750//Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information
2949 UCHAR ucSSChipID; /* SS chip being used */ 3751typedef struct _ATOM_I2C_DEVICE_SETUP_INFO
2950 UCHAR ucSSChipSlaveAddr; /* Slave Address to set up this SS chip */ 3752{
2951 UCHAR ucNumOfI2CDataRecords; /* number of data block */ 3753 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //I2C line and HW/SW assisted cap.
2952 ATOM_I2C_DATA_RECORD asI2CData[1]; 3754 UCHAR ucSSChipID; //SS chip being used
2953} ATOM_I2C_DEVICE_SETUP_INFO; 3755 UCHAR ucSSChipSlaveAddr; //Slave Address to set up this SS chip
2954 3756 UCHAR ucNumOfI2CDataRecords; //number of data block
2955/* ========================================================================================== */ 3757 ATOM_I2C_DATA_RECORD asI2CData[1];
2956typedef struct _ATOM_ASIC_MVDD_INFO { 3758}ATOM_I2C_DEVICE_SETUP_INFO;
2957 ATOM_COMMON_TABLE_HEADER sHeader; 3759
2958 ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1]; 3760//==========================================================================================
2959} ATOM_ASIC_MVDD_INFO; 3761typedef struct _ATOM_ASIC_MVDD_INFO
2960 3762{
2961/* ========================================================================================== */ 3763 ATOM_COMMON_TABLE_HEADER sHeader;
3764 ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1];
3765}ATOM_ASIC_MVDD_INFO;
3766
3767//==========================================================================================
2962#define ATOM_MCLK_SS_INFO ATOM_ASIC_MVDD_INFO 3768#define ATOM_MCLK_SS_INFO ATOM_ASIC_MVDD_INFO
2963 3769
2964/* ========================================================================================== */ 3770//==========================================================================================
2965/**************************************************************************/ 3771/**************************************************************************/
2966 3772
2967typedef struct _ATOM_ASIC_SS_ASSIGNMENT { 3773typedef struct _ATOM_ASIC_SS_ASSIGNMENT
2968 ULONG ulTargetClockRange; /* Clock Out frequence (VCO ), in unit of 10Khz */ 3774{
2969 USHORT usSpreadSpectrumPercentage; /* in unit of 0.01% */ 3775 ULONG ulTargetClockRange; //Clock Out frequence (VCO ), in unit of 10Khz
2970 USHORT usSpreadRateInKhz; /* in unit of kHz, modulation freq */ 3776 USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
2971 UCHAR ucClockIndication; /* Indicate which clock source needs SS */ 3777 USHORT usSpreadRateInKhz; //in unit of kHz, modulation freq
2972 UCHAR ucSpreadSpectrumMode; /* Bit1=0 Down Spread,=1 Center Spread. */ 3778 UCHAR ucClockIndication; //Indicate which clock source needs SS
2973 UCHAR ucReserved[2]; 3779 UCHAR ucSpreadSpectrumMode; //Bit1=0 Down Spread,=1 Center Spread.
2974} ATOM_ASIC_SS_ASSIGNMENT; 3780 UCHAR ucReserved[2];
2975 3781}ATOM_ASIC_SS_ASSIGNMENT;
2976/* Define ucSpreadSpectrumType */ 3782
3783//Define ucClockIndication, SW uses the IDs below to search if the SS is requried/enabled on a clock branch/signal type.
3784//SS is not required or enabled if a match is not found.
2977#define ASIC_INTERNAL_MEMORY_SS 1 3785#define ASIC_INTERNAL_MEMORY_SS 1
2978#define ASIC_INTERNAL_ENGINE_SS 2 3786#define ASIC_INTERNAL_ENGINE_SS 2
2979#define ASIC_INTERNAL_UVD_SS 3 3787#define ASIC_INTERNAL_UVD_SS 3
3788#define ASIC_INTERNAL_SS_ON_TMDS 4
3789#define ASIC_INTERNAL_SS_ON_HDMI 5
3790#define ASIC_INTERNAL_SS_ON_LVDS 6
3791#define ASIC_INTERNAL_SS_ON_DP 7
3792#define ASIC_INTERNAL_SS_ON_DCPLL 8
3793
3794typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
3795{
3796 ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
3797 //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
3798 USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
3799 USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq
3800 UCHAR ucClockIndication; //Indicate which clock source needs SS
3801 UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
3802 UCHAR ucReserved[2];
3803}ATOM_ASIC_SS_ASSIGNMENT_V2;
3804
3805//ucSpreadSpectrumMode
3806//#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000
3807//#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000
3808//#define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001
3809//#define ATOM_SS_CENTRE_SPREAD_MODE 0x00000001
3810//#define ATOM_INTERNAL_SS_MASK 0x00000000
3811//#define ATOM_EXTERNAL_SS_MASK 0x00000002
3812
3813typedef struct _ATOM_ASIC_INTERNAL_SS_INFO
3814{
3815 ATOM_COMMON_TABLE_HEADER sHeader;
3816 ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4];
3817}ATOM_ASIC_INTERNAL_SS_INFO;
2980 3818
2981typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { 3819typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2
2982 ATOM_COMMON_TABLE_HEADER sHeader; 3820{
2983 ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4]; 3821 ATOM_COMMON_TABLE_HEADER sHeader;
2984} ATOM_ASIC_INTERNAL_SS_INFO; 3822 ATOM_ASIC_SS_ASSIGNMENT_V2 asSpreadSpectrum[1]; //this is point only.
3823}ATOM_ASIC_INTERNAL_SS_INFO_V2;
2985 3824
2986/* ==============================Scratch Pad Definition Portion=============================== */ 3825typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
3826{
3827 ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
3828 //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
3829 USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
3830 USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq
3831 UCHAR ucClockIndication; //Indicate which clock source needs SS
3832 UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
3833 UCHAR ucReserved[2];
3834}ATOM_ASIC_SS_ASSIGNMENT_V3;
3835
3836typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
3837{
3838 ATOM_COMMON_TABLE_HEADER sHeader;
3839 ATOM_ASIC_SS_ASSIGNMENT_V3 asSpreadSpectrum[1]; //this is pointer only.
3840}ATOM_ASIC_INTERNAL_SS_INFO_V3;
3841
3842
3843//==============================Scratch Pad Definition Portion===============================
2987#define ATOM_DEVICE_CONNECT_INFO_DEF 0 3844#define ATOM_DEVICE_CONNECT_INFO_DEF 0
2988#define ATOM_ROM_LOCATION_DEF 1 3845#define ATOM_ROM_LOCATION_DEF 1
2989#define ATOM_TV_STANDARD_DEF 2 3846#define ATOM_TV_STANDARD_DEF 2
@@ -2995,7 +3852,8 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
2995#define ATOM_I2C_CHANNEL_STATUS_DEF 8 3852#define ATOM_I2C_CHANNEL_STATUS_DEF 8
2996#define ATOM_I2C_CHANNEL_STATUS1_DEF 9 3853#define ATOM_I2C_CHANNEL_STATUS1_DEF 9
2997 3854
2998/* BIOS_0_SCRATCH Definition */ 3855
3856// BIOS_0_SCRATCH Definition
2999#define ATOM_S0_CRT1_MONO 0x00000001L 3857#define ATOM_S0_CRT1_MONO 0x00000001L
3000#define ATOM_S0_CRT1_COLOR 0x00000002L 3858#define ATOM_S0_CRT1_COLOR 0x00000002L
3001#define ATOM_S0_CRT1_MASK (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR) 3859#define ATOM_S0_CRT1_MASK (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR)
@@ -3008,6 +3866,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3008#define ATOM_S0_CV_DIN_A 0x00000020L 3866#define ATOM_S0_CV_DIN_A 0x00000020L
3009#define ATOM_S0_CV_MASK_A (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A) 3867#define ATOM_S0_CV_MASK_A (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A)
3010 3868
3869
3011#define ATOM_S0_CRT2_MONO 0x00000100L 3870#define ATOM_S0_CRT2_MONO 0x00000100L
3012#define ATOM_S0_CRT2_COLOR 0x00000200L 3871#define ATOM_S0_CRT2_COLOR 0x00000200L
3013#define ATOM_S0_CRT2_MASK (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR) 3872#define ATOM_S0_CRT2_MASK (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR)
@@ -3025,28 +3884,27 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3025#define ATOM_S0_DFP2 0x00020000L 3884#define ATOM_S0_DFP2 0x00020000L
3026#define ATOM_S0_LCD1 0x00040000L 3885#define ATOM_S0_LCD1 0x00040000L
3027#define ATOM_S0_LCD2 0x00080000L 3886#define ATOM_S0_LCD2 0x00080000L
3028#define ATOM_S0_TV2 0x00100000L 3887#define ATOM_S0_DFP6 0x00100000L
3029#define ATOM_S0_DFP3 0x00200000L 3888#define ATOM_S0_DFP3 0x00200000L
3030#define ATOM_S0_DFP4 0x00400000L 3889#define ATOM_S0_DFP4 0x00400000L
3031#define ATOM_S0_DFP5 0x00800000L 3890#define ATOM_S0_DFP5 0x00800000L
3032 3891
3033#define ATOM_S0_DFP_MASK \ 3892#define ATOM_S0_DFP_MASK ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5 | ATOM_S0_DFP6
3034 (ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5)
3035 3893
3036#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L /* If set, indicates we are running a PCIE asic with */ 3894#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L // If set, indicates we are running a PCIE asic with
3037 /* the FAD/HDP reg access bug. Bit is read by DAL */ 3895 // the FAD/HDP reg access bug. Bit is read by DAL, this is obsolete from RV5xx
3038 3896
3039#define ATOM_S0_THERMAL_STATE_MASK 0x1C000000L 3897#define ATOM_S0_THERMAL_STATE_MASK 0x1C000000L
3040#define ATOM_S0_THERMAL_STATE_SHIFT 26 3898#define ATOM_S0_THERMAL_STATE_SHIFT 26
3041 3899
3042#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L 3900#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L
3043#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29 3901#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29
3044 3902
3045#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1 3903#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1
3046#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2 3904#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2
3047#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3 3905#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
3048 3906
3049/* Byte aligned definition for BIOS usage */ 3907//Byte aligned defintion for BIOS usage
3050#define ATOM_S0_CRT1_MONOb0 0x01 3908#define ATOM_S0_CRT1_MONOb0 0x01
3051#define ATOM_S0_CRT1_COLORb0 0x02 3909#define ATOM_S0_CRT1_COLORb0 0x02
3052#define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0) 3910#define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
@@ -3076,8 +3934,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3076#define ATOM_S0_DFP2b2 0x02 3934#define ATOM_S0_DFP2b2 0x02
3077#define ATOM_S0_LCD1b2 0x04 3935#define ATOM_S0_LCD1b2 0x04
3078#define ATOM_S0_LCD2b2 0x08 3936#define ATOM_S0_LCD2b2 0x08
3079#define ATOM_S0_TV2b2 0x10 3937#define ATOM_S0_DFP6b2 0x10
3080#define ATOM_S0_DFP3b2 0x20 3938#define ATOM_S0_DFP3b2 0x20
3939#define ATOM_S0_DFP4b2 0x40
3940#define ATOM_S0_DFP5b2 0x80
3941
3081 3942
3082#define ATOM_S0_THERMAL_STATE_MASKb3 0x1C 3943#define ATOM_S0_THERMAL_STATE_MASKb3 0x1C
3083#define ATOM_S0_THERMAL_STATE_SHIFTb3 2 3944#define ATOM_S0_THERMAL_STATE_SHIFTb3 2
@@ -3085,43 +3946,20 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3085#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0 3946#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0
3086#define ATOM_S0_LCD1_SHIFT 18 3947#define ATOM_S0_LCD1_SHIFT 18
3087 3948
3088/* BIOS_1_SCRATCH Definition */ 3949// BIOS_1_SCRATCH Definition
3089#define ATOM_S1_ROM_LOCATION_MASK 0x0000FFFFL 3950#define ATOM_S1_ROM_LOCATION_MASK 0x0000FFFFL
3090#define ATOM_S1_PCI_BUS_DEV_MASK 0xFFFF0000L 3951#define ATOM_S1_PCI_BUS_DEV_MASK 0xFFFF0000L
3091 3952
3092/* BIOS_2_SCRATCH Definition */ 3953// BIOS_2_SCRATCH Definition
3093#define ATOM_S2_TV1_STANDARD_MASK 0x0000000FL 3954#define ATOM_S2_TV1_STANDARD_MASK 0x0000000FL
3094#define ATOM_S2_CURRENT_BL_LEVEL_MASK 0x0000FF00L 3955#define ATOM_S2_CURRENT_BL_LEVEL_MASK 0x0000FF00L
3095#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT 8 3956#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT 8
3096 3957
3097#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
3098#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L
3099#define ATOM_S2_TV1_DPMS_STATE 0x00040000L
3100#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L
3101#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L
3102#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L
3103#define ATOM_S2_TV2_DPMS_STATE 0x00400000L
3104#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L
3105#define ATOM_S2_CV_DPMS_STATE 0x01000000L
3106#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L
3107#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L
3108#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L
3109
3110#define ATOM_S2_DFP_DPM_STATE \
3111 (ATOM_S2_DFP1_DPMS_STATE | ATOM_S2_DFP2_DPMS_STATE | \
3112 ATOM_S2_DFP3_DPMS_STATE | ATOM_S2_DFP4_DPMS_STATE | \
3113 ATOM_S2_DFP5_DPMS_STATE)
3114
3115#define ATOM_S2_DEVICE_DPMS_STATE \
3116 (ATOM_S2_CRT1_DPMS_STATE + ATOM_S2_LCD1_DPMS_STATE + \
3117 ATOM_S2_TV1_DPMS_STATE + ATOM_S2_DFP_DPMS_STATE + \
3118 ATOM_S2_CRT2_DPMS_STATE + ATOM_S2_LCD2_DPMS_STATE + \
3119 ATOM_S2_TV2_DPMS_STATE + ATOM_S2_CV_DPMS_STATE)
3120
3121#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK 0x0C000000L 3958#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK 0x0C000000L
3122#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26 3959#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26
3123#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE 0x10000000L 3960#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE 0x10000000L
3124 3961
3962#define ATOM_S2_DEVICE_DPMS_STATE 0x00010000L
3125#define ATOM_S2_VRI_BRIGHT_ENABLE 0x20000000L 3963#define ATOM_S2_VRI_BRIGHT_ENABLE 0x20000000L
3126 3964
3127#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE 0x0 3965#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE 0x0
@@ -3131,21 +3969,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3131#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30 3969#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30
3132#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L 3970#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L
3133 3971
3134/* Byte aligned definition for BIOS usage */ 3972
3973//Byte aligned defintion for BIOS usage
3135#define ATOM_S2_TV1_STANDARD_MASKb0 0x0F 3974#define ATOM_S2_TV1_STANDARD_MASKb0 0x0F
3136#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF 3975#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
3137#define ATOM_S2_CRT1_DPMS_STATEb2 0x01 3976#define ATOM_S2_DEVICE_DPMS_STATEb2 0x01
3138#define ATOM_S2_LCD1_DPMS_STATEb2 0x02
3139#define ATOM_S2_TV1_DPMS_STATEb2 0x04
3140#define ATOM_S2_DFP1_DPMS_STATEb2 0x08
3141#define ATOM_S2_CRT2_DPMS_STATEb2 0x10
3142#define ATOM_S2_LCD2_DPMS_STATEb2 0x20
3143#define ATOM_S2_TV2_DPMS_STATEb2 0x40
3144#define ATOM_S2_DFP2_DPMS_STATEb2 0x80
3145#define ATOM_S2_CV_DPMS_STATEb3 0x01
3146#define ATOM_S2_DFP3_DPMS_STATEb3 0x02
3147#define ATOM_S2_DFP4_DPMS_STATEb3 0x04
3148#define ATOM_S2_DFP5_DPMS_STATEb3 0x08
3149 3977
3150#define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF 3978#define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF
3151#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C 3979#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C
@@ -3153,21 +3981,22 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3153#define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20 3981#define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20
3154#define ATOM_S2_ROTATION_STATE_MASKb3 0xC0 3982#define ATOM_S2_ROTATION_STATE_MASKb3 0xC0
3155 3983
3156/* BIOS_3_SCRATCH Definition */ 3984
3985// BIOS_3_SCRATCH Definition
3157#define ATOM_S3_CRT1_ACTIVE 0x00000001L 3986#define ATOM_S3_CRT1_ACTIVE 0x00000001L
3158#define ATOM_S3_LCD1_ACTIVE 0x00000002L 3987#define ATOM_S3_LCD1_ACTIVE 0x00000002L
3159#define ATOM_S3_TV1_ACTIVE 0x00000004L 3988#define ATOM_S3_TV1_ACTIVE 0x00000004L
3160#define ATOM_S3_DFP1_ACTIVE 0x00000008L 3989#define ATOM_S3_DFP1_ACTIVE 0x00000008L
3161#define ATOM_S3_CRT2_ACTIVE 0x00000010L 3990#define ATOM_S3_CRT2_ACTIVE 0x00000010L
3162#define ATOM_S3_LCD2_ACTIVE 0x00000020L 3991#define ATOM_S3_LCD2_ACTIVE 0x00000020L
3163#define ATOM_S3_TV2_ACTIVE 0x00000040L 3992#define ATOM_S3_DFP6_ACTIVE 0x00000040L
3164#define ATOM_S3_DFP2_ACTIVE 0x00000080L 3993#define ATOM_S3_DFP2_ACTIVE 0x00000080L
3165#define ATOM_S3_CV_ACTIVE 0x00000100L 3994#define ATOM_S3_CV_ACTIVE 0x00000100L
3166#define ATOM_S3_DFP3_ACTIVE 0x00000200L 3995#define ATOM_S3_DFP3_ACTIVE 0x00000200L
3167#define ATOM_S3_DFP4_ACTIVE 0x00000400L 3996#define ATOM_S3_DFP4_ACTIVE 0x00000400L
3168#define ATOM_S3_DFP5_ACTIVE 0x00000800L 3997#define ATOM_S3_DFP5_ACTIVE 0x00000800L
3169 3998
3170#define ATOM_S3_DEVICE_ACTIVE_MASK 0x000003FFL 3999#define ATOM_S3_DEVICE_ACTIVE_MASK 0x00000FFFL
3171 4000
3172#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE 0x00001000L 4001#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE 0x00001000L
3173#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L 4002#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L
@@ -3178,7 +4007,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3178#define ATOM_S3_DFP1_CRTC_ACTIVE 0x00080000L 4007#define ATOM_S3_DFP1_CRTC_ACTIVE 0x00080000L
3179#define ATOM_S3_CRT2_CRTC_ACTIVE 0x00100000L 4008#define ATOM_S3_CRT2_CRTC_ACTIVE 0x00100000L
3180#define ATOM_S3_LCD2_CRTC_ACTIVE 0x00200000L 4009#define ATOM_S3_LCD2_CRTC_ACTIVE 0x00200000L
3181#define ATOM_S3_TV2_CRTC_ACTIVE 0x00400000L 4010#define ATOM_S3_DFP6_CRTC_ACTIVE 0x00400000L
3182#define ATOM_S3_DFP2_CRTC_ACTIVE 0x00800000L 4011#define ATOM_S3_DFP2_CRTC_ACTIVE 0x00800000L
3183#define ATOM_S3_CV_CRTC_ACTIVE 0x01000000L 4012#define ATOM_S3_CV_CRTC_ACTIVE 0x01000000L
3184#define ATOM_S3_DFP3_CRTC_ACTIVE 0x02000000L 4013#define ATOM_S3_DFP3_CRTC_ACTIVE 0x02000000L
@@ -3187,17 +4016,18 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3187 4016
3188#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L 4017#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L
3189#define ATOM_S3_ASIC_GUI_ENGINE_HUNG 0x20000000L 4018#define ATOM_S3_ASIC_GUI_ENGINE_HUNG 0x20000000L
4019//Below two definitions are not supported in pplib, but in the old powerplay in DAL
3190#define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L 4020#define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L
3191#define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L 4021#define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L
3192 4022
3193/* Byte aligned definition for BIOS usage */ 4023//Byte aligned defintion for BIOS usage
3194#define ATOM_S3_CRT1_ACTIVEb0 0x01 4024#define ATOM_S3_CRT1_ACTIVEb0 0x01
3195#define ATOM_S3_LCD1_ACTIVEb0 0x02 4025#define ATOM_S3_LCD1_ACTIVEb0 0x02
3196#define ATOM_S3_TV1_ACTIVEb0 0x04 4026#define ATOM_S3_TV1_ACTIVEb0 0x04
3197#define ATOM_S3_DFP1_ACTIVEb0 0x08 4027#define ATOM_S3_DFP1_ACTIVEb0 0x08
3198#define ATOM_S3_CRT2_ACTIVEb0 0x10 4028#define ATOM_S3_CRT2_ACTIVEb0 0x10
3199#define ATOM_S3_LCD2_ACTIVEb0 0x20 4029#define ATOM_S3_LCD2_ACTIVEb0 0x20
3200#define ATOM_S3_TV2_ACTIVEb0 0x40 4030#define ATOM_S3_DFP6_ACTIVEb0 0x40
3201#define ATOM_S3_DFP2_ACTIVEb0 0x80 4031#define ATOM_S3_DFP2_ACTIVEb0 0x80
3202#define ATOM_S3_CV_ACTIVEb1 0x01 4032#define ATOM_S3_CV_ACTIVEb1 0x01
3203#define ATOM_S3_DFP3_ACTIVEb1 0x02 4033#define ATOM_S3_DFP3_ACTIVEb1 0x02
@@ -3212,7 +4042,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3212#define ATOM_S3_DFP1_CRTC_ACTIVEb2 0x08 4042#define ATOM_S3_DFP1_CRTC_ACTIVEb2 0x08
3213#define ATOM_S3_CRT2_CRTC_ACTIVEb2 0x10 4043#define ATOM_S3_CRT2_CRTC_ACTIVEb2 0x10
3214#define ATOM_S3_LCD2_CRTC_ACTIVEb2 0x20 4044#define ATOM_S3_LCD2_CRTC_ACTIVEb2 0x20
3215#define ATOM_S3_TV2_CRTC_ACTIVEb2 0x40 4045#define ATOM_S3_DFP6_CRTC_ACTIVEb2 0x40
3216#define ATOM_S3_DFP2_CRTC_ACTIVEb2 0x80 4046#define ATOM_S3_DFP2_CRTC_ACTIVEb2 0x80
3217#define ATOM_S3_CV_CRTC_ACTIVEb3 0x01 4047#define ATOM_S3_CV_CRTC_ACTIVEb3 0x01
3218#define ATOM_S3_DFP3_CRTC_ACTIVEb3 0x02 4048#define ATOM_S3_DFP3_CRTC_ACTIVEb3 0x02
@@ -3221,35 +4051,31 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3221 4051
3222#define ATOM_S3_ACTIVE_CRTC2w1 0xFFF 4052#define ATOM_S3_ACTIVE_CRTC2w1 0xFFF
3223 4053
3224#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20 4054// BIOS_4_SCRATCH Definition
3225#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40
3226#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80
3227
3228/* BIOS_4_SCRATCH Definition */
3229#define ATOM_S4_LCD1_PANEL_ID_MASK 0x000000FFL 4055#define ATOM_S4_LCD1_PANEL_ID_MASK 0x000000FFL
3230#define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L 4056#define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L
3231#define ATOM_S4_LCD1_REFRESH_SHIFT 8 4057#define ATOM_S4_LCD1_REFRESH_SHIFT 8
3232 4058
3233/* Byte aligned definition for BIOS usage */ 4059//Byte aligned defintion for BIOS usage
3234#define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF 4060#define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF
3235#define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0 4061#define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0
3236#define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0 4062#define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0
3237 4063
3238/* BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!! */ 4064// BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!!
3239#define ATOM_S5_DOS_REQ_CRT1b0 0x01 4065#define ATOM_S5_DOS_REQ_CRT1b0 0x01
3240#define ATOM_S5_DOS_REQ_LCD1b0 0x02 4066#define ATOM_S5_DOS_REQ_LCD1b0 0x02
3241#define ATOM_S5_DOS_REQ_TV1b0 0x04 4067#define ATOM_S5_DOS_REQ_TV1b0 0x04
3242#define ATOM_S5_DOS_REQ_DFP1b0 0x08 4068#define ATOM_S5_DOS_REQ_DFP1b0 0x08
3243#define ATOM_S5_DOS_REQ_CRT2b0 0x10 4069#define ATOM_S5_DOS_REQ_CRT2b0 0x10
3244#define ATOM_S5_DOS_REQ_LCD2b0 0x20 4070#define ATOM_S5_DOS_REQ_LCD2b0 0x20
3245#define ATOM_S5_DOS_REQ_TV2b0 0x40 4071#define ATOM_S5_DOS_REQ_DFP6b0 0x40
3246#define ATOM_S5_DOS_REQ_DFP2b0 0x80 4072#define ATOM_S5_DOS_REQ_DFP2b0 0x80
3247#define ATOM_S5_DOS_REQ_CVb1 0x01 4073#define ATOM_S5_DOS_REQ_CVb1 0x01
3248#define ATOM_S5_DOS_REQ_DFP3b1 0x02 4074#define ATOM_S5_DOS_REQ_DFP3b1 0x02
3249#define ATOM_S5_DOS_REQ_DFP4b1 0x04 4075#define ATOM_S5_DOS_REQ_DFP4b1 0x04
3250#define ATOM_S5_DOS_REQ_DFP5b1 0x08 4076#define ATOM_S5_DOS_REQ_DFP5b1 0x08
3251 4077
3252#define ATOM_S5_DOS_REQ_DEVICEw0 0x03FF 4078#define ATOM_S5_DOS_REQ_DEVICEw0 0x0FFF
3253 4079
3254#define ATOM_S5_DOS_REQ_CRT1 0x0001 4080#define ATOM_S5_DOS_REQ_CRT1 0x0001
3255#define ATOM_S5_DOS_REQ_LCD1 0x0002 4081#define ATOM_S5_DOS_REQ_LCD1 0x0002
@@ -3257,22 +4083,21 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3257#define ATOM_S5_DOS_REQ_DFP1 0x0008 4083#define ATOM_S5_DOS_REQ_DFP1 0x0008
3258#define ATOM_S5_DOS_REQ_CRT2 0x0010 4084#define ATOM_S5_DOS_REQ_CRT2 0x0010
3259#define ATOM_S5_DOS_REQ_LCD2 0x0020 4085#define ATOM_S5_DOS_REQ_LCD2 0x0020
3260#define ATOM_S5_DOS_REQ_TV2 0x0040 4086#define ATOM_S5_DOS_REQ_DFP6 0x0040
3261#define ATOM_S5_DOS_REQ_DFP2 0x0080 4087#define ATOM_S5_DOS_REQ_DFP2 0x0080
3262#define ATOM_S5_DOS_REQ_CV 0x0100 4088#define ATOM_S5_DOS_REQ_CV 0x0100
3263#define ATOM_S5_DOS_REQ_DFP3 0x0200 4089#define ATOM_S5_DOS_REQ_DFP3 0x0200
3264#define ATOM_S5_DOS_REQ_DFP4 0x0400 4090#define ATOM_S5_DOS_REQ_DFP4 0x0400
3265#define ATOM_S5_DOS_REQ_DFP5 0x0800 4091#define ATOM_S5_DOS_REQ_DFP5 0x0800
3266 4092
3267#define ATOM_S5_DOS_FORCE_CRT1b2 ATOM_S5_DOS_REQ_CRT1b0 4093#define ATOM_S5_DOS_FORCE_CRT1b2 ATOM_S5_DOS_REQ_CRT1b0
3268#define ATOM_S5_DOS_FORCE_TV1b2 ATOM_S5_DOS_REQ_TV1b0 4094#define ATOM_S5_DOS_FORCE_TV1b2 ATOM_S5_DOS_REQ_TV1b0
3269#define ATOM_S5_DOS_FORCE_CRT2b2 ATOM_S5_DOS_REQ_CRT2b0 4095#define ATOM_S5_DOS_FORCE_CRT2b2 ATOM_S5_DOS_REQ_CRT2b0
3270#define ATOM_S5_DOS_FORCE_CVb3 ATOM_S5_DOS_REQ_CVb1 4096#define ATOM_S5_DOS_FORCE_CVb3 ATOM_S5_DOS_REQ_CVb1
3271#define ATOM_S5_DOS_FORCE_DEVICEw1 \ 4097#define ATOM_S5_DOS_FORCE_DEVICEw1 (ATOM_S5_DOS_FORCE_CRT1b2+ATOM_S5_DOS_FORCE_TV1b2+ATOM_S5_DOS_FORCE_CRT2b2+\
3272 (ATOM_S5_DOS_FORCE_CRT1b2 + ATOM_S5_DOS_FORCE_TV1b2 + \ 4098 (ATOM_S5_DOS_FORCE_CVb3<<8))
3273 ATOM_S5_DOS_FORCE_CRT2b2 + (ATOM_S5_DOS_FORCE_CVb3 << 8))
3274 4099
3275/* BIOS_6_SCRATCH Definition */ 4100// BIOS_6_SCRATCH Definition
3276#define ATOM_S6_DEVICE_CHANGE 0x00000001L 4101#define ATOM_S6_DEVICE_CHANGE 0x00000001L
3277#define ATOM_S6_SCALER_CHANGE 0x00000002L 4102#define ATOM_S6_SCALER_CHANGE 0x00000002L
3278#define ATOM_S6_LID_CHANGE 0x00000004L 4103#define ATOM_S6_LID_CHANGE 0x00000004L
@@ -3285,11 +4110,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3285#define ATOM_S6_HW_I2C_BUSY_STATE 0x00000200L 4110#define ATOM_S6_HW_I2C_BUSY_STATE 0x00000200L
3286#define ATOM_S6_THERMAL_STATE_CHANGE 0x00000400L 4111#define ATOM_S6_THERMAL_STATE_CHANGE 0x00000400L
3287#define ATOM_S6_INTERRUPT_SET_BY_BIOS 0x00000800L 4112#define ATOM_S6_INTERRUPT_SET_BY_BIOS 0x00000800L
3288#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L /* Normal expansion Request bit for LCD */ 4113#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L //Normal expansion Request bit for LCD
3289#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L /* Aspect ratio expansion Request bit for LCD */ 4114#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L //Aspect ratio expansion Request bit for LCD
3290 4115
3291#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L /* This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion */ 4116#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L //This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion
3292#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L /* This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion */ 4117#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L //This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion
3293 4118
3294#define ATOM_S6_ACC_REQ_CRT1 0x00010000L 4119#define ATOM_S6_ACC_REQ_CRT1 0x00010000L
3295#define ATOM_S6_ACC_REQ_LCD1 0x00020000L 4120#define ATOM_S6_ACC_REQ_LCD1 0x00020000L
@@ -3297,7 +4122,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3297#define ATOM_S6_ACC_REQ_DFP1 0x00080000L 4122#define ATOM_S6_ACC_REQ_DFP1 0x00080000L
3298#define ATOM_S6_ACC_REQ_CRT2 0x00100000L 4123#define ATOM_S6_ACC_REQ_CRT2 0x00100000L
3299#define ATOM_S6_ACC_REQ_LCD2 0x00200000L 4124#define ATOM_S6_ACC_REQ_LCD2 0x00200000L
3300#define ATOM_S6_ACC_REQ_TV2 0x00400000L 4125#define ATOM_S6_ACC_REQ_DFP6 0x00400000L
3301#define ATOM_S6_ACC_REQ_DFP2 0x00800000L 4126#define ATOM_S6_ACC_REQ_DFP2 0x00800000L
3302#define ATOM_S6_ACC_REQ_CV 0x01000000L 4127#define ATOM_S6_ACC_REQ_CV 0x01000000L
3303#define ATOM_S6_ACC_REQ_DFP3 0x02000000L 4128#define ATOM_S6_ACC_REQ_DFP3 0x02000000L
@@ -3310,7 +4135,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3310#define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L 4135#define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L
3311#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L 4136#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L
3312 4137
3313/* Byte aligned definition for BIOS usage */ 4138//Byte aligned defintion for BIOS usage
3314#define ATOM_S6_DEVICE_CHANGEb0 0x01 4139#define ATOM_S6_DEVICE_CHANGEb0 0x01
3315#define ATOM_S6_SCALER_CHANGEb0 0x02 4140#define ATOM_S6_SCALER_CHANGEb0 0x02
3316#define ATOM_S6_LID_CHANGEb0 0x04 4141#define ATOM_S6_LID_CHANGEb0 0x04
@@ -3320,11 +4145,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3320#define ATOM_S6_LID_STATEb0 0x40 4145#define ATOM_S6_LID_STATEb0 0x40
3321#define ATOM_S6_DOCK_STATEb0 0x80 4146#define ATOM_S6_DOCK_STATEb0 0x80
3322#define ATOM_S6_CRITICAL_STATEb1 0x01 4147#define ATOM_S6_CRITICAL_STATEb1 0x01
3323#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02 4148#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02
3324#define ATOM_S6_THERMAL_STATE_CHANGEb1 0x04 4149#define ATOM_S6_THERMAL_STATE_CHANGEb1 0x04
3325#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08 4150#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08
3326#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10 4151#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10
3327#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20 4152#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20
3328 4153
3329#define ATOM_S6_ACC_REQ_CRT1b2 0x01 4154#define ATOM_S6_ACC_REQ_CRT1b2 0x01
3330#define ATOM_S6_ACC_REQ_LCD1b2 0x02 4155#define ATOM_S6_ACC_REQ_LCD1b2 0x02
@@ -3332,12 +4157,12 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3332#define ATOM_S6_ACC_REQ_DFP1b2 0x08 4157#define ATOM_S6_ACC_REQ_DFP1b2 0x08
3333#define ATOM_S6_ACC_REQ_CRT2b2 0x10 4158#define ATOM_S6_ACC_REQ_CRT2b2 0x10
3334#define ATOM_S6_ACC_REQ_LCD2b2 0x20 4159#define ATOM_S6_ACC_REQ_LCD2b2 0x20
3335#define ATOM_S6_ACC_REQ_TV2b2 0x40 4160#define ATOM_S6_ACC_REQ_DFP6b2 0x40
3336#define ATOM_S6_ACC_REQ_DFP2b2 0x80 4161#define ATOM_S6_ACC_REQ_DFP2b2 0x80
3337#define ATOM_S6_ACC_REQ_CVb3 0x01 4162#define ATOM_S6_ACC_REQ_CVb3 0x01
3338#define ATOM_S6_ACC_REQ_DFP3b3 0x02 4163#define ATOM_S6_ACC_REQ_DFP3b3 0x02
3339#define ATOM_S6_ACC_REQ_DFP4b3 0x04 4164#define ATOM_S6_ACC_REQ_DFP4b3 0x04
3340#define ATOM_S6_ACC_REQ_DFP5b3 0x08 4165#define ATOM_S6_ACC_REQ_DFP5b3 0x08
3341 4166
3342#define ATOM_S6_ACC_REQ_DEVICEw1 ATOM_S5_DOS_REQ_DEVICEw0 4167#define ATOM_S6_ACC_REQ_DEVICEw1 ATOM_S5_DOS_REQ_DEVICEw0
3343#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10 4168#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10
@@ -3366,7 +4191,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3366#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT 30 4191#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT 30
3367#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT 31 4192#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT 31
3368 4193
3369/* BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!! */ 4194// BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!!
3370#define ATOM_S7_DOS_MODE_TYPEb0 0x03 4195#define ATOM_S7_DOS_MODE_TYPEb0 0x03
3371#define ATOM_S7_DOS_MODE_VGAb0 0x00 4196#define ATOM_S7_DOS_MODE_VGAb0 0x00
3372#define ATOM_S7_DOS_MODE_VESAb0 0x01 4197#define ATOM_S7_DOS_MODE_VESAb0 0x01
@@ -3378,220 +4203,194 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
3378 4203
3379#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8 4204#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8
3380 4205
3381/* BIOS_8_SCRATCH Definition */ 4206// BIOS_8_SCRATCH Definition
3382#define ATOM_S8_I2C_CHANNEL_BUSY_MASK 0x00000FFFF 4207#define ATOM_S8_I2C_CHANNEL_BUSY_MASK 0x00000FFFF
3383#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000 4208#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000
3384 4209
3385#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT 0 4210#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT 0
3386#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT 16 4211#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT 16
3387 4212
3388/* BIOS_9_SCRATCH Definition */ 4213// BIOS_9_SCRATCH Definition
3389#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 4214#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK
3390#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 0x0000FFFF 4215#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 0x0000FFFF
3391#endif 4216#endif
3392#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK 4217#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK
3393#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK 0xFFFF0000 4218#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK 0xFFFF0000
3394#endif 4219#endif
3395#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 4220#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT
3396#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0 4221#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0
3397#endif 4222#endif
3398#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 4223#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT
3399#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 16 4224#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 16
3400#endif 4225#endif
3401 4226
4227
3402#define ATOM_FLAG_SET 0x20 4228#define ATOM_FLAG_SET 0x20
3403#define ATOM_FLAG_CLEAR 0 4229#define ATOM_FLAG_CLEAR 0
3404#define CLEAR_ATOM_S6_ACC_MODE \ 4230#define CLEAR_ATOM_S6_ACC_MODE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR)
3405 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ 4231#define SET_ATOM_S6_DEVICE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
3406 ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR) 4232#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
3407#define SET_ATOM_S6_DEVICE_CHANGE \ 4233#define SET_ATOM_S6_SCALER_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
3408 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ 4234#define SET_ATOM_S6_LID_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
3409 ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
3410#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE \
3411 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3412 ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
3413#define SET_ATOM_S6_SCALER_CHANGE \
3414 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3415 ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
3416#define SET_ATOM_S6_LID_CHANGE \
3417 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3418 ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
3419
3420#define SET_ATOM_S6_LID_STATE \
3421 ((ATOM_ACC_CHANGE_INFO_DEF << 8) |\
3422 ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
3423#define CLEAR_ATOM_S6_LID_STATE \
3424 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3425 ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
3426
3427#define SET_ATOM_S6_DOCK_CHANGE \
3428 ((ATOM_ACC_CHANGE_INFO_DEF << 8)| \
3429 ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
3430#define SET_ATOM_S6_DOCK_STATE \
3431 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3432 ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
3433#define CLEAR_ATOM_S6_DOCK_STATE \
3434 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3435 ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
3436
3437#define SET_ATOM_S6_THERMAL_STATE_CHANGE \
3438 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3439 ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
3440#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE \
3441 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3442 ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
3443#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS \
3444 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3445 ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
3446
3447#define SET_ATOM_S6_CRITICAL_STATE \
3448 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3449 ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
3450#define CLEAR_ATOM_S6_CRITICAL_STATE \
3451 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3452 ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
3453
3454#define SET_ATOM_S6_REQ_SCALER \
3455 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3456 ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)
3457#define CLEAR_ATOM_S6_REQ_SCALER \
3458 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3459 ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
3460
3461#define SET_ATOM_S6_REQ_SCALER_ARATIO \
3462 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3463 ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
3464#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO \
3465 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3466 ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
3467
3468#define SET_ATOM_S6_I2C_STATE_CHANGE \
3469 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3470 ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
3471
3472#define SET_ATOM_S6_DISPLAY_STATE_CHANGE \
3473 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3474 ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
3475
3476#define SET_ATOM_S6_DEVICE_RECONFIG \
3477 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3478 ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
3479#define CLEAR_ATOM_S0_LCD1 \
3480 ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 ) | \
3481 ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
3482#define SET_ATOM_S7_DOS_8BIT_DAC_EN \
3483 ((ATOM_DOS_MODE_INFO_DEF << 8) | \
3484 ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
3485#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN \
3486 ((ATOM_DOS_MODE_INFO_DEF << 8) | \
3487 ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
3488 4235
3489/****************************************************************************/ 4236#define SET_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
3490/* Portion II: Definitinos only used in Driver */ 4237#define CLEAR_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
4238
4239#define SET_ATOM_S6_DOCK_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
4240#define SET_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
4241#define CLEAR_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
4242
4243#define SET_ATOM_S6_THERMAL_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
4244#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
4245#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
4246
4247#define SET_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
4248#define CLEAR_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
4249
4250#define SET_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)
4251#define CLEAR_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
4252
4253#define SET_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
4254#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
4255
4256#define SET_ATOM_S6_I2C_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
4257
4258#define SET_ATOM_S6_DISPLAY_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
4259
4260#define SET_ATOM_S6_DEVICE_RECONFIG ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
4261#define CLEAR_ATOM_S0_LCD1 ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 )| ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
4262#define SET_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
4263#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
4264
4265/****************************************************************************/
4266//Portion II: Definitinos only used in Driver
3491/****************************************************************************/ 4267/****************************************************************************/
3492 4268
3493/* Macros used by driver */ 4269// Macros used by driver
4270#ifdef __cplusplus
4271#define GetIndexIntoMasterTable(MasterOrData, FieldName) ((reinterpret_cast<char*>(&(static_cast<ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*>(0))->FieldName)-static_cast<char*>(0))/sizeof(USHORT))
3494 4272
3495#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char *)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES *)0)->FieldName)-(char *)0)/sizeof(USHORT)) 4273#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F)
4274#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F)
4275#else // not __cplusplus
4276#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT))
3496 4277
3497#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F) 4278#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F)
3498#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F) 4279#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F)
4280#endif // __cplusplus
3499 4281
3500#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION 4282#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION
3501#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION 4283#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION
3502 4284
3503/****************************************************************************/ 4285/****************************************************************************/
3504/* Portion III: Definitinos only used in VBIOS */ 4286//Portion III: Definitinos only used in VBIOS
3505/****************************************************************************/ 4287/****************************************************************************/
3506#define ATOM_DAC_SRC 0x80 4288#define ATOM_DAC_SRC 0x80
3507#define ATOM_SRC_DAC1 0 4289#define ATOM_SRC_DAC1 0
3508#define ATOM_SRC_DAC2 0x80 4290#define ATOM_SRC_DAC2 0x80
3509 4291
3510#ifdef UEFI_BUILD 4292typedef struct _MEMORY_PLLINIT_PARAMETERS
3511#define USHORT UTEMP 4293{
3512#endif 4294 ULONG ulTargetMemoryClock; //In 10Khz unit
3513 4295 UCHAR ucAction; //not define yet
3514typedef struct _MEMORY_PLLINIT_PARAMETERS { 4296 UCHAR ucFbDiv_Hi; //Fbdiv Hi byte
3515 ULONG ulTargetMemoryClock; /* In 10Khz unit */ 4297 UCHAR ucFbDiv; //FB value
3516 UCHAR ucAction; /* not define yet */ 4298 UCHAR ucPostDiv; //Post div
3517 UCHAR ucFbDiv_Hi; /* Fbdiv Hi byte */ 4299}MEMORY_PLLINIT_PARAMETERS;
3518 UCHAR ucFbDiv; /* FB value */
3519 UCHAR ucPostDiv; /* Post div */
3520} MEMORY_PLLINIT_PARAMETERS;
3521 4300
3522#define MEMORY_PLLINIT_PS_ALLOCATION MEMORY_PLLINIT_PARAMETERS 4301#define MEMORY_PLLINIT_PS_ALLOCATION MEMORY_PLLINIT_PARAMETERS
3523 4302
3524#define GPIO_PIN_WRITE 0x01 4303
4304#define GPIO_PIN_WRITE 0x01
3525#define GPIO_PIN_READ 0x00 4305#define GPIO_PIN_READ 0x00
3526 4306
3527typedef struct _GPIO_PIN_CONTROL_PARAMETERS { 4307typedef struct _GPIO_PIN_CONTROL_PARAMETERS
3528 UCHAR ucGPIO_ID; /* return value, read from GPIO pins */ 4308{
3529 UCHAR ucGPIOBitShift; /* define which bit in uGPIOBitVal need to be update */ 4309 UCHAR ucGPIO_ID; //return value, read from GPIO pins
3530 UCHAR ucGPIOBitVal; /* Set/Reset corresponding bit defined in ucGPIOBitMask */ 4310 UCHAR ucGPIOBitShift; //define which bit in uGPIOBitVal need to be update
3531 UCHAR ucAction; /* =GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write */ 4311 UCHAR ucGPIOBitVal; //Set/Reset corresponding bit defined in ucGPIOBitMask
3532} GPIO_PIN_CONTROL_PARAMETERS; 4312 UCHAR ucAction; //=GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write
3533 4313}GPIO_PIN_CONTROL_PARAMETERS;
3534typedef struct _ENABLE_SCALER_PARAMETERS { 4314
3535 UCHAR ucScaler; /* ATOM_SCALER1, ATOM_SCALER2 */ 4315typedef struct _ENABLE_SCALER_PARAMETERS
3536 UCHAR ucEnable; /* ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION */ 4316{
3537 UCHAR ucTVStandard; /* */ 4317 UCHAR ucScaler; // ATOM_SCALER1, ATOM_SCALER2
3538 UCHAR ucPadding[1]; 4318 UCHAR ucEnable; // ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION
3539} ENABLE_SCALER_PARAMETERS; 4319 UCHAR ucTVStandard; //
3540#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS 4320 UCHAR ucPadding[1];
3541 4321}ENABLE_SCALER_PARAMETERS;
3542/* ucEnable: */ 4322#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS
4323
4324//ucEnable:
3543#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION 0 4325#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION 0
3544#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION 1 4326#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION 1
3545#define SCALER_ENABLE_2TAP_ALPHA_MODE 2 4327#define SCALER_ENABLE_2TAP_ALPHA_MODE 2
3546#define SCALER_ENABLE_MULTITAP_MODE 3 4328#define SCALER_ENABLE_MULTITAP_MODE 3
3547 4329
3548typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS { 4330typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS
3549 ULONG usHWIconHorzVertPosn; /* Hardware Icon Vertical position */ 4331{
3550 UCHAR ucHWIconVertOffset; /* Hardware Icon Vertical offset */ 4332 ULONG usHWIconHorzVertPosn; // Hardware Icon Vertical position
3551 UCHAR ucHWIconHorzOffset; /* Hardware Icon Horizontal offset */ 4333 UCHAR ucHWIconVertOffset; // Hardware Icon Vertical offset
3552 UCHAR ucSelection; /* ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2 */ 4334 UCHAR ucHWIconHorzOffset; // Hardware Icon Horizontal offset
3553 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 4335 UCHAR ucSelection; // ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2
3554} ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS; 4336 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
3555 4337}ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS;
3556typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION { 4338
3557 ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon; 4339typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION
3558 ENABLE_CRTC_PARAMETERS sReserved; 4340{
3559} ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION; 4341 ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon;
3560 4342 ENABLE_CRTC_PARAMETERS sReserved;
3561typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS { 4343}ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION;
3562 USHORT usHight; /* Image Hight */ 4344
3563 USHORT usWidth; /* Image Width */ 4345typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS
3564 UCHAR ucSurface; /* Surface 1 or 2 */ 4346{
3565 UCHAR ucPadding[3]; 4347 USHORT usHight; // Image Hight
3566} ENABLE_GRAPH_SURFACE_PARAMETERS; 4348 USHORT usWidth; // Image Width
3567 4349 UCHAR ucSurface; // Surface 1 or 2
3568typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2 { 4350 UCHAR ucPadding[3];
3569 USHORT usHight; /* Image Hight */ 4351}ENABLE_GRAPH_SURFACE_PARAMETERS;
3570 USHORT usWidth; /* Image Width */ 4352
3571 UCHAR ucSurface; /* Surface 1 or 2 */ 4353typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2
3572 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ 4354{
3573 UCHAR ucPadding[2]; 4355 USHORT usHight; // Image Hight
3574} ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2; 4356 USHORT usWidth; // Image Width
3575 4357 UCHAR ucSurface; // Surface 1 or 2
3576typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION { 4358 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
3577 ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface; 4359 UCHAR ucPadding[2];
3578 ENABLE_YUV_PS_ALLOCATION sReserved; /* Don't set this one */ 4360}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2;
3579} ENABLE_GRAPH_SURFACE_PS_ALLOCATION; 4361
3580 4362typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3
3581typedef struct _MEMORY_CLEAN_UP_PARAMETERS { 4363{
3582 USHORT usMemoryStart; /* in 8Kb boundry, offset from memory base address */ 4364 USHORT usHight; // Image Hight
3583 USHORT usMemorySize; /* 8Kb blocks aligned */ 4365 USHORT usWidth; // Image Width
3584} MEMORY_CLEAN_UP_PARAMETERS; 4366 UCHAR ucSurface; // Surface 1 or 2
4367 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
4368 USHORT usDeviceId; // Active Device Id for this surface. If no device, set to 0.
4369}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3;
4370
4371typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION
4372{
4373 ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;
4374 ENABLE_YUV_PS_ALLOCATION sReserved; // Don't set this one
4375}ENABLE_GRAPH_SURFACE_PS_ALLOCATION;
4376
4377typedef struct _MEMORY_CLEAN_UP_PARAMETERS
4378{
4379 USHORT usMemoryStart; //in 8Kb boundry, offset from memory base address
4380 USHORT usMemorySize; //8Kb blocks aligned
4381}MEMORY_CLEAN_UP_PARAMETERS;
3585#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS 4382#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS
3586 4383
3587typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS { 4384typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS
3588 USHORT usX_Size; /* When use as input parameter, usX_Size indicates which CRTC */ 4385{
3589 USHORT usY_Size; 4386 USHORT usX_Size; //When use as input parameter, usX_Size indicates which CRTC
3590} GET_DISPLAY_SURFACE_SIZE_PARAMETERS; 4387 USHORT usY_Size;
4388}GET_DISPLAY_SURFACE_SIZE_PARAMETERS;
3591 4389
3592typedef struct _INDIRECT_IO_ACCESS { 4390typedef struct _INDIRECT_IO_ACCESS
3593 ATOM_COMMON_TABLE_HEADER sHeader; 4391{
3594 UCHAR IOAccessSequence[256]; 4392 ATOM_COMMON_TABLE_HEADER sHeader;
4393 UCHAR IOAccessSequence[256];
3595} INDIRECT_IO_ACCESS; 4394} INDIRECT_IO_ACCESS;
3596 4395
3597#define INDIRECT_READ 0x00 4396#define INDIRECT_READ 0x00
@@ -3615,93 +4414,108 @@ typedef struct _INDIRECT_IO_ACCESS {
3615#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ 4414#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ
3616#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE 4415#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE
3617 4416
3618typedef struct _ATOM_OEM_INFO { 4417typedef struct _ATOM_OEM_INFO
3619 ATOM_COMMON_TABLE_HEADER sHeader; 4418{
3620 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; 4419 ATOM_COMMON_TABLE_HEADER sHeader;
3621} ATOM_OEM_INFO; 4420 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
3622 4421}ATOM_OEM_INFO;
3623typedef struct _ATOM_TV_MODE { 4422
3624 UCHAR ucVMode_Num; /* Video mode number */ 4423typedef struct _ATOM_TV_MODE
3625 UCHAR ucTV_Mode_Num; /* Internal TV mode number */ 4424{
3626} ATOM_TV_MODE; 4425 UCHAR ucVMode_Num; //Video mode number
3627 4426 UCHAR ucTV_Mode_Num; //Internal TV mode number
3628typedef struct _ATOM_BIOS_INT_TVSTD_MODE { 4427}ATOM_TV_MODE;
3629 ATOM_COMMON_TABLE_HEADER sHeader; 4428
3630 USHORT usTV_Mode_LUT_Offset; /* Pointer to standard to internal number conversion table */ 4429typedef struct _ATOM_BIOS_INT_TVSTD_MODE
3631 USHORT usTV_FIFO_Offset; /* Pointer to FIFO entry table */ 4430{
3632 USHORT usNTSC_Tbl_Offset; /* Pointer to SDTV_Mode_NTSC table */ 4431 ATOM_COMMON_TABLE_HEADER sHeader;
3633 USHORT usPAL_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */ 4432 USHORT usTV_Mode_LUT_Offset; // Pointer to standard to internal number conversion table
3634 USHORT usCV_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */ 4433 USHORT usTV_FIFO_Offset; // Pointer to FIFO entry table
3635} ATOM_BIOS_INT_TVSTD_MODE; 4434 USHORT usNTSC_Tbl_Offset; // Pointer to SDTV_Mode_NTSC table
3636 4435 USHORT usPAL_Tbl_Offset; // Pointer to SDTV_Mode_PAL table
3637typedef struct _ATOM_TV_MODE_SCALER_PTR { 4436 USHORT usCV_Tbl_Offset; // Pointer to SDTV_Mode_PAL table
3638 USHORT ucFilter0_Offset; /* Pointer to filter format 0 coefficients */ 4437}ATOM_BIOS_INT_TVSTD_MODE;
3639 USHORT usFilter1_Offset; /* Pointer to filter format 0 coefficients */ 4438
3640 UCHAR ucTV_Mode_Num; 4439
3641} ATOM_TV_MODE_SCALER_PTR; 4440typedef struct _ATOM_TV_MODE_SCALER_PTR
3642 4441{
3643typedef struct _ATOM_STANDARD_VESA_TIMING { 4442 USHORT ucFilter0_Offset; //Pointer to filter format 0 coefficients
3644 ATOM_COMMON_TABLE_HEADER sHeader; 4443 USHORT usFilter1_Offset; //Pointer to filter format 0 coefficients
3645 ATOM_DTD_FORMAT aModeTimings[16]; /* 16 is not the real array number, just for initial allocation */ 4444 UCHAR ucTV_Mode_Num;
3646} ATOM_STANDARD_VESA_TIMING; 4445}ATOM_TV_MODE_SCALER_PTR;
3647 4446
3648typedef struct _ATOM_STD_FORMAT { 4447typedef struct _ATOM_STANDARD_VESA_TIMING
3649 USHORT usSTD_HDisp; 4448{
3650 USHORT usSTD_VDisp; 4449 ATOM_COMMON_TABLE_HEADER sHeader;
3651 USHORT usSTD_RefreshRate; 4450 ATOM_DTD_FORMAT aModeTimings[16]; // 16 is not the real array number, just for initial allocation
3652 USHORT usReserved; 4451}ATOM_STANDARD_VESA_TIMING;
3653} ATOM_STD_FORMAT; 4452
3654 4453
3655typedef struct _ATOM_VESA_TO_EXTENDED_MODE { 4454typedef struct _ATOM_STD_FORMAT
3656 USHORT usVESA_ModeNumber; 4455{
3657 USHORT usExtendedModeNumber; 4456 USHORT usSTD_HDisp;
3658} ATOM_VESA_TO_EXTENDED_MODE; 4457 USHORT usSTD_VDisp;
3659 4458 USHORT usSTD_RefreshRate;
3660typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT { 4459 USHORT usReserved;
3661 ATOM_COMMON_TABLE_HEADER sHeader; 4460}ATOM_STD_FORMAT;
3662 ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76]; 4461
3663} ATOM_VESA_TO_INTENAL_MODE_LUT; 4462typedef struct _ATOM_VESA_TO_EXTENDED_MODE
4463{
4464 USHORT usVESA_ModeNumber;
4465 USHORT usExtendedModeNumber;
4466}ATOM_VESA_TO_EXTENDED_MODE;
4467
4468typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT
4469{
4470 ATOM_COMMON_TABLE_HEADER sHeader;
4471 ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76];
4472}ATOM_VESA_TO_INTENAL_MODE_LUT;
3664 4473
3665/*************** ATOM Memory Related Data Structure ***********************/ 4474/*************** ATOM Memory Related Data Structure ***********************/
3666typedef struct _ATOM_MEMORY_VENDOR_BLOCK { 4475typedef struct _ATOM_MEMORY_VENDOR_BLOCK{
3667 UCHAR ucMemoryType; 4476 UCHAR ucMemoryType;
3668 UCHAR ucMemoryVendor; 4477 UCHAR ucMemoryVendor;
3669 UCHAR ucAdjMCId; 4478 UCHAR ucAdjMCId;
3670 UCHAR ucDynClkId; 4479 UCHAR ucDynClkId;
3671 ULONG ulDllResetClkRange; 4480 ULONG ulDllResetClkRange;
3672} ATOM_MEMORY_VENDOR_BLOCK; 4481}ATOM_MEMORY_VENDOR_BLOCK;
3673 4482
3674typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG { 4483
4484typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG{
3675#if ATOM_BIG_ENDIAN 4485#if ATOM_BIG_ENDIAN
3676 ULONG ucMemBlkId:8; 4486 ULONG ucMemBlkId:8;
3677 ULONG ulMemClockRange:24; 4487 ULONG ulMemClockRange:24;
3678#else 4488#else
3679 ULONG ulMemClockRange:24; 4489 ULONG ulMemClockRange:24;
3680 ULONG ucMemBlkId:8; 4490 ULONG ucMemBlkId:8;
3681#endif 4491#endif
3682} ATOM_MEMORY_SETTING_ID_CONFIG; 4492}ATOM_MEMORY_SETTING_ID_CONFIG;
3683 4493
3684typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS { 4494typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS
3685 ATOM_MEMORY_SETTING_ID_CONFIG slAccess; 4495{
3686 ULONG ulAccess; 4496 ATOM_MEMORY_SETTING_ID_CONFIG slAccess;
3687} ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS; 4497 ULONG ulAccess;
3688 4498}ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS;
3689typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK { 4499
3690 ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID; 4500
3691 ULONG aulMemData[1]; 4501typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{
3692} ATOM_MEMORY_SETTING_DATA_BLOCK; 4502 ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID;
3693 4503 ULONG aulMemData[1];
3694typedef struct _ATOM_INIT_REG_INDEX_FORMAT { 4504}ATOM_MEMORY_SETTING_DATA_BLOCK;
3695 USHORT usRegIndex; /* MC register index */ 4505
3696 UCHAR ucPreRegDataLength; /* offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf */ 4506
3697} ATOM_INIT_REG_INDEX_FORMAT; 4507typedef struct _ATOM_INIT_REG_INDEX_FORMAT{
3698 4508 USHORT usRegIndex; // MC register index
3699typedef struct _ATOM_INIT_REG_BLOCK { 4509 UCHAR ucPreRegDataLength; // offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf
3700 USHORT usRegIndexTblSize; /* size of asRegIndexBuf */ 4510}ATOM_INIT_REG_INDEX_FORMAT;
3701 USHORT usRegDataBlkSize; /* size of ATOM_MEMORY_SETTING_DATA_BLOCK */ 4511
3702 ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1]; 4512
3703 ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1]; 4513typedef struct _ATOM_INIT_REG_BLOCK{
3704} ATOM_INIT_REG_BLOCK; 4514 USHORT usRegIndexTblSize; //size of asRegIndexBuf
4515 USHORT usRegDataBlkSize; //size of ATOM_MEMORY_SETTING_DATA_BLOCK
4516 ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1];
4517 ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1];
4518}ATOM_INIT_REG_BLOCK;
3705 4519
3706#define END_OF_REG_INDEX_BLOCK 0x0ffff 4520#define END_OF_REG_INDEX_BLOCK 0x0ffff
3707#define END_OF_REG_DATA_BLOCK 0x00000000 4521#define END_OF_REG_DATA_BLOCK 0x00000000
@@ -3716,16 +4530,19 @@ typedef struct _ATOM_INIT_REG_BLOCK {
3716#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1) 4530#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1)
3717#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1) 4531#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1)
3718 4532
3719typedef struct _ATOM_MC_INIT_PARAM_TABLE { 4533
3720 ATOM_COMMON_TABLE_HEADER sHeader; 4534typedef struct _ATOM_MC_INIT_PARAM_TABLE
3721 USHORT usAdjustARB_SEQDataOffset; 4535{
3722 USHORT usMCInitMemTypeTblOffset; 4536 ATOM_COMMON_TABLE_HEADER sHeader;
3723 USHORT usMCInitCommonTblOffset; 4537 USHORT usAdjustARB_SEQDataOffset;
3724 USHORT usMCInitPowerDownTblOffset; 4538 USHORT usMCInitMemTypeTblOffset;
3725 ULONG ulARB_SEQDataBuf[32]; 4539 USHORT usMCInitCommonTblOffset;
3726 ATOM_INIT_REG_BLOCK asMCInitMemType; 4540 USHORT usMCInitPowerDownTblOffset;
3727 ATOM_INIT_REG_BLOCK asMCInitCommon; 4541 ULONG ulARB_SEQDataBuf[32];
3728} ATOM_MC_INIT_PARAM_TABLE; 4542 ATOM_INIT_REG_BLOCK asMCInitMemType;
4543 ATOM_INIT_REG_BLOCK asMCInitCommon;
4544}ATOM_MC_INIT_PARAM_TABLE;
4545
3729 4546
3730#define _4Mx16 0x2 4547#define _4Mx16 0x2
3731#define _4Mx32 0x3 4548#define _4Mx32 0x3
@@ -3751,221 +4568,272 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE {
3751 4568
3752#define QIMONDA INFINEON 4569#define QIMONDA INFINEON
3753#define PROMOS MOSEL 4570#define PROMOS MOSEL
4571#define KRETON INFINEON
3754 4572
3755/* ///////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// */ 4573/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
3756 4574
3757#define UCODE_ROM_START_ADDRESS 0x1c000 4575#define UCODE_ROM_START_ADDRESS 0x1c000
3758#define UCODE_SIGNATURE 0x4375434d /* 'MCuC' - MC uCode */ 4576#define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode
3759 4577
3760/* uCode block header for reference */ 4578//uCode block header for reference
3761 4579
3762typedef struct _MCuCodeHeader { 4580typedef struct _MCuCodeHeader
3763 ULONG ulSignature; 4581{
3764 UCHAR ucRevision; 4582 ULONG ulSignature;
3765 UCHAR ucChecksum; 4583 UCHAR ucRevision;
3766 UCHAR ucReserved1; 4584 UCHAR ucChecksum;
3767 UCHAR ucReserved2; 4585 UCHAR ucReserved1;
3768 USHORT usParametersLength; 4586 UCHAR ucReserved2;
3769 USHORT usUCodeLength; 4587 USHORT usParametersLength;
3770 USHORT usReserved1; 4588 USHORT usUCodeLength;
3771 USHORT usReserved2; 4589 USHORT usReserved1;
4590 USHORT usReserved2;
3772} MCuCodeHeader; 4591} MCuCodeHeader;
3773 4592
3774/* //////////////////////////////////////////////////////////////////////////////// */ 4593//////////////////////////////////////////////////////////////////////////////////
3775 4594
3776#define ATOM_MAX_NUMBER_OF_VRAM_MODULE 16 4595#define ATOM_MAX_NUMBER_OF_VRAM_MODULE 16
3777 4596
3778#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK 0xF 4597#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK 0xF
3779typedef struct _ATOM_VRAM_MODULE_V1 { 4598typedef struct _ATOM_VRAM_MODULE_V1
3780 ULONG ulReserved; 4599{
3781 USHORT usEMRSValue; 4600 ULONG ulReserved;
3782 USHORT usMRSValue; 4601 USHORT usEMRSValue;
3783 USHORT usReserved; 4602 USHORT usMRSValue;
3784 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ 4603 USHORT usReserved;
3785 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved; */ 4604 UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
3786 UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender */ 4605 UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved;
3787 UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */ 4606 UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender
3788 UCHAR ucRow; /* Number of Row,in power of 2; */ 4607 UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
3789 UCHAR ucColumn; /* Number of Column,in power of 2; */ 4608 UCHAR ucRow; // Number of Row,in power of 2;
3790 UCHAR ucBank; /* Nunber of Bank; */ 4609 UCHAR ucColumn; // Number of Column,in power of 2;
3791 UCHAR ucRank; /* Number of Rank, in power of 2 */ 4610 UCHAR ucBank; // Nunber of Bank;
3792 UCHAR ucChannelNum; /* Number of channel; */ 4611 UCHAR ucRank; // Number of Rank, in power of 2
3793 UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */ 4612 UCHAR ucChannelNum; // Number of channel;
3794 UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */ 4613 UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
3795 UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */ 4614 UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
3796 UCHAR ucReserved[2]; 4615 UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
3797} ATOM_VRAM_MODULE_V1; 4616 UCHAR ucReserved[2];
3798 4617}ATOM_VRAM_MODULE_V1;
3799typedef struct _ATOM_VRAM_MODULE_V2 { 4618
3800 ULONG ulReserved; 4619
3801 ULONG ulFlags; /* To enable/disable functionalities based on memory type */ 4620typedef struct _ATOM_VRAM_MODULE_V2
3802 ULONG ulEngineClock; /* Override of default engine clock for particular memory type */ 4621{
3803 ULONG ulMemoryClock; /* Override of default memory clock for particular memory type */ 4622 ULONG ulReserved;
3804 USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */ 4623 ULONG ulFlags; // To enable/disable functionalities based on memory type
3805 USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */ 4624 ULONG ulEngineClock; // Override of default engine clock for particular memory type
3806 USHORT usEMRSValue; 4625 ULONG ulMemoryClock; // Override of default memory clock for particular memory type
3807 USHORT usMRSValue; 4626 USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
3808 USHORT usReserved; 4627 USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
3809 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ 4628 USHORT usEMRSValue;
3810 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */ 4629 USHORT usMRSValue;
3811 UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */ 4630 USHORT usReserved;
3812 UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */ 4631 UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
3813 UCHAR ucRow; /* Number of Row,in power of 2; */ 4632 UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
3814 UCHAR ucColumn; /* Number of Column,in power of 2; */ 4633 UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
3815 UCHAR ucBank; /* Nunber of Bank; */ 4634 UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
3816 UCHAR ucRank; /* Number of Rank, in power of 2 */ 4635 UCHAR ucRow; // Number of Row,in power of 2;
3817 UCHAR ucChannelNum; /* Number of channel; */ 4636 UCHAR ucColumn; // Number of Column,in power of 2;
3818 UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */ 4637 UCHAR ucBank; // Nunber of Bank;
3819 UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */ 4638 UCHAR ucRank; // Number of Rank, in power of 2
3820 UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */ 4639 UCHAR ucChannelNum; // Number of channel;
3821 UCHAR ucRefreshRateFactor; 4640 UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
3822 UCHAR ucReserved[3]; 4641 UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
3823} ATOM_VRAM_MODULE_V2; 4642 UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
3824 4643 UCHAR ucRefreshRateFactor;
3825typedef struct _ATOM_MEMORY_TIMING_FORMAT { 4644 UCHAR ucReserved[3];
3826 ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */ 4645}ATOM_VRAM_MODULE_V2;
3827 union { 4646
3828 USHORT usMRS; /* mode register */ 4647
3829 USHORT usDDR3_MR0; 4648typedef struct _ATOM_MEMORY_TIMING_FORMAT
3830 }; 4649{
3831 union { 4650 ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
3832 USHORT usEMRS; /* extended mode register */ 4651 union{
3833 USHORT usDDR3_MR1; 4652 USHORT usMRS; // mode register
3834 }; 4653 USHORT usDDR3_MR0;
3835 UCHAR ucCL; /* CAS latency */ 4654 };
3836 UCHAR ucWL; /* WRITE Latency */ 4655 union{
3837 UCHAR uctRAS; /* tRAS */ 4656 USHORT usEMRS; // extended mode register
3838 UCHAR uctRC; /* tRC */ 4657 USHORT usDDR3_MR1;
3839 UCHAR uctRFC; /* tRFC */ 4658 };
3840 UCHAR uctRCDR; /* tRCDR */ 4659 UCHAR ucCL; // CAS latency
3841 UCHAR uctRCDW; /* tRCDW */ 4660 UCHAR ucWL; // WRITE Latency
3842 UCHAR uctRP; /* tRP */ 4661 UCHAR uctRAS; // tRAS
3843 UCHAR uctRRD; /* tRRD */ 4662 UCHAR uctRC; // tRC
3844 UCHAR uctWR; /* tWR */ 4663 UCHAR uctRFC; // tRFC
3845 UCHAR uctWTR; /* tWTR */ 4664 UCHAR uctRCDR; // tRCDR
3846 UCHAR uctPDIX; /* tPDIX */ 4665 UCHAR uctRCDW; // tRCDW
3847 UCHAR uctFAW; /* tFAW */ 4666 UCHAR uctRP; // tRP
3848 UCHAR uctAOND; /* tAOND */ 4667 UCHAR uctRRD; // tRRD
3849 union { 4668 UCHAR uctWR; // tWR
3850 struct { 4669 UCHAR uctWTR; // tWTR
3851 UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */ 4670 UCHAR uctPDIX; // tPDIX
3852 UCHAR ucReserved; 4671 UCHAR uctFAW; // tFAW
3853 }; 4672 UCHAR uctAOND; // tAOND
3854 USHORT usDDR3_MR2; 4673 union
3855 }; 4674 {
3856} ATOM_MEMORY_TIMING_FORMAT; 4675 struct {
3857 4676 UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
3858typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1 { 4677 UCHAR ucReserved;
3859 ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */ 4678 };
3860 USHORT usMRS; /* mode register */ 4679 USHORT usDDR3_MR2;
3861 USHORT usEMRS; /* extended mode register */ 4680 };
3862 UCHAR ucCL; /* CAS latency */ 4681}ATOM_MEMORY_TIMING_FORMAT;
3863 UCHAR ucWL; /* WRITE Latency */ 4682
3864 UCHAR uctRAS; /* tRAS */ 4683
3865 UCHAR uctRC; /* tRC */ 4684typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1
3866 UCHAR uctRFC; /* tRFC */ 4685{
3867 UCHAR uctRCDR; /* tRCDR */ 4686 ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
3868 UCHAR uctRCDW; /* tRCDW */ 4687 USHORT usMRS; // mode register
3869 UCHAR uctRP; /* tRP */ 4688 USHORT usEMRS; // extended mode register
3870 UCHAR uctRRD; /* tRRD */ 4689 UCHAR ucCL; // CAS latency
3871 UCHAR uctWR; /* tWR */ 4690 UCHAR ucWL; // WRITE Latency
3872 UCHAR uctWTR; /* tWTR */ 4691 UCHAR uctRAS; // tRAS
3873 UCHAR uctPDIX; /* tPDIX */ 4692 UCHAR uctRC; // tRC
3874 UCHAR uctFAW; /* tFAW */ 4693 UCHAR uctRFC; // tRFC
3875 UCHAR uctAOND; /* tAOND */ 4694 UCHAR uctRCDR; // tRCDR
3876 UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */ 4695 UCHAR uctRCDW; // tRCDW
3877/* ///////////////////////GDDR parameters/////////////////////////////////// */ 4696 UCHAR uctRP; // tRP
3878 UCHAR uctCCDL; /* */ 4697 UCHAR uctRRD; // tRRD
3879 UCHAR uctCRCRL; /* */ 4698 UCHAR uctWR; // tWR
3880 UCHAR uctCRCWL; /* */ 4699 UCHAR uctWTR; // tWTR
3881 UCHAR uctCKE; /* */ 4700 UCHAR uctPDIX; // tPDIX
3882 UCHAR uctCKRSE; /* */ 4701 UCHAR uctFAW; // tFAW
3883 UCHAR uctCKRSX; /* */ 4702 UCHAR uctAOND; // tAOND
3884 UCHAR uctFAW32; /* */ 4703 UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
3885 UCHAR ucReserved1; /* */ 4704////////////////////////////////////GDDR parameters///////////////////////////////////
3886 UCHAR ucReserved2; /* */ 4705 UCHAR uctCCDL; //
3887 UCHAR ucTerminator; 4706 UCHAR uctCRCRL; //
3888} ATOM_MEMORY_TIMING_FORMAT_V1; 4707 UCHAR uctCRCWL; //
3889 4708 UCHAR uctCKE; //
3890typedef struct _ATOM_MEMORY_FORMAT { 4709 UCHAR uctCKRSE; //
3891 ULONG ulDllDisClock; /* memory DLL will be disable when target memory clock is below this clock */ 4710 UCHAR uctCKRSX; //
3892 union { 4711 UCHAR uctFAW32; //
3893 USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */ 4712 UCHAR ucMR5lo; //
3894 USHORT usDDR3_Reserved; /* Not used for DDR3 memory */ 4713 UCHAR ucMR5hi; //
3895 }; 4714 UCHAR ucTerminator;
3896 union { 4715}ATOM_MEMORY_TIMING_FORMAT_V1;
3897 USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */ 4716
3898 USHORT usDDR3_MR3; /* Used for DDR3 memory */ 4717typedef struct _ATOM_MEMORY_TIMING_FORMAT_V2
3899 }; 4718{
3900 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */ 4719 ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
3901 UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */ 4720 USHORT usMRS; // mode register
3902 UCHAR ucRow; /* Number of Row,in power of 2; */ 4721 USHORT usEMRS; // extended mode register
3903 UCHAR ucColumn; /* Number of Column,in power of 2; */ 4722 UCHAR ucCL; // CAS latency
3904 UCHAR ucBank; /* Nunber of Bank; */ 4723 UCHAR ucWL; // WRITE Latency
3905 UCHAR ucRank; /* Number of Rank, in power of 2 */ 4724 UCHAR uctRAS; // tRAS
3906 UCHAR ucBurstSize; /* burst size, 0= burst size=4 1= burst size=8 */ 4725 UCHAR uctRC; // tRC
3907 UCHAR ucDllDisBit; /* position of DLL Enable/Disable bit in EMRS ( Extended Mode Register ) */ 4726 UCHAR uctRFC; // tRFC
3908 UCHAR ucRefreshRateFactor; /* memory refresh rate in unit of ms */ 4727 UCHAR uctRCDR; // tRCDR
3909 UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */ 4728 UCHAR uctRCDW; // tRCDW
3910 UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */ 4729 UCHAR uctRP; // tRP
3911 UCHAR ucMemAttrib; /* Memory Device Addribute, like RDBI/WDBI etc */ 4730 UCHAR uctRRD; // tRRD
3912 ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */ 4731 UCHAR uctWR; // tWR
3913} ATOM_MEMORY_FORMAT; 4732 UCHAR uctWTR; // tWTR
3914 4733 UCHAR uctPDIX; // tPDIX
3915typedef struct _ATOM_VRAM_MODULE_V3 { 4734 UCHAR uctFAW; // tFAW
3916 ULONG ulChannelMapCfg; /* board dependent paramenter:Channel combination */ 4735 UCHAR uctAOND; // tAOND
3917 USHORT usSize; /* size of ATOM_VRAM_MODULE_V3 */ 4736 UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
3918 USHORT usDefaultMVDDQ; /* board dependent parameter:Default Memory Core Voltage */ 4737////////////////////////////////////GDDR parameters///////////////////////////////////
3919 USHORT usDefaultMVDDC; /* board dependent parameter:Default Memory IO Voltage */ 4738 UCHAR uctCCDL; //
3920 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ 4739 UCHAR uctCRCRL; //
3921 UCHAR ucChannelNum; /* board dependent parameter:Number of channel; */ 4740 UCHAR uctCRCWL; //
3922 UCHAR ucChannelSize; /* board dependent parameter:32bit or 64bit */ 4741 UCHAR uctCKE; //
3923 UCHAR ucVREFI; /* board dependnt parameter: EXT or INT +160mv to -140mv */ 4742 UCHAR uctCKRSE; //
3924 UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */ 4743 UCHAR uctCKRSX; //
3925 UCHAR ucFlag; /* To enable/disable functionalities based on memory type */ 4744 UCHAR uctFAW32; //
3926 ATOM_MEMORY_FORMAT asMemory; /* describ all of video memory parameters from memory spec */ 4745 UCHAR ucMR4lo; //
3927} ATOM_VRAM_MODULE_V3; 4746 UCHAR ucMR4hi; //
3928 4747 UCHAR ucMR5lo; //
3929/* ATOM_VRAM_MODULE_V3.ucNPL_RT */ 4748 UCHAR ucMR5hi; //
4749 UCHAR ucTerminator;
4750 UCHAR ucReserved;
4751}ATOM_MEMORY_TIMING_FORMAT_V2;
4752
4753typedef struct _ATOM_MEMORY_FORMAT
4754{
4755 ULONG ulDllDisClock; // memory DLL will be disable when target memory clock is below this clock
4756 union{
4757 USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
4758 USHORT usDDR3_Reserved; // Not used for DDR3 memory
4759 };
4760 union{
4761 USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
4762 USHORT usDDR3_MR3; // Used for DDR3 memory
4763 };
4764 UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
4765 UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
4766 UCHAR ucRow; // Number of Row,in power of 2;
4767 UCHAR ucColumn; // Number of Column,in power of 2;
4768 UCHAR ucBank; // Nunber of Bank;
4769 UCHAR ucRank; // Number of Rank, in power of 2
4770 UCHAR ucBurstSize; // burst size, 0= burst size=4 1= burst size=8
4771 UCHAR ucDllDisBit; // position of DLL Enable/Disable bit in EMRS ( Extended Mode Register )
4772 UCHAR ucRefreshRateFactor; // memory refresh rate in unit of ms
4773 UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
4774 UCHAR ucPreamble; //[7:4] Write Preamble, [3:0] Read Preamble
4775 UCHAR ucMemAttrib; // Memory Device Addribute, like RDBI/WDBI etc
4776 ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; //Memory Timing block sort from lower clock to higher clock
4777}ATOM_MEMORY_FORMAT;
4778
4779
4780typedef struct _ATOM_VRAM_MODULE_V3
4781{
4782 ULONG ulChannelMapCfg; // board dependent paramenter:Channel combination
4783 USHORT usSize; // size of ATOM_VRAM_MODULE_V3
4784 USHORT usDefaultMVDDQ; // board dependent parameter:Default Memory Core Voltage
4785 USHORT usDefaultMVDDC; // board dependent parameter:Default Memory IO Voltage
4786 UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
4787 UCHAR ucChannelNum; // board dependent parameter:Number of channel;
4788 UCHAR ucChannelSize; // board dependent parameter:32bit or 64bit
4789 UCHAR ucVREFI; // board dependnt parameter: EXT or INT +160mv to -140mv
4790 UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
4791 UCHAR ucFlag; // To enable/disable functionalities based on memory type
4792 ATOM_MEMORY_FORMAT asMemory; // describ all of video memory parameters from memory spec
4793}ATOM_VRAM_MODULE_V3;
4794
4795
4796//ATOM_VRAM_MODULE_V3.ucNPL_RT
3930#define NPL_RT_MASK 0x0f 4797#define NPL_RT_MASK 0x0f
3931#define BATTERY_ODT_MASK 0xc0 4798#define BATTERY_ODT_MASK 0xc0
3932 4799
3933#define ATOM_VRAM_MODULE ATOM_VRAM_MODULE_V3 4800#define ATOM_VRAM_MODULE ATOM_VRAM_MODULE_V3
3934 4801
3935typedef struct _ATOM_VRAM_MODULE_V4 { 4802typedef struct _ATOM_VRAM_MODULE_V4
3936 ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */ 4803{
3937 USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */ 4804 ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
3938 USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */ 4805 USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
3939 /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */ 4806 USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
3940 USHORT usReserved; 4807 // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
3941 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ 4808 USHORT usReserved;
3942 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */ 4809 UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
3943 UCHAR ucChannelNum; /* Number of channels present in this module config */ 4810 UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
3944 UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */ 4811 UCHAR ucChannelNum; // Number of channels present in this module config
3945 UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */ 4812 UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
3946 UCHAR ucFlag; /* To enable/disable functionalities based on memory type */ 4813 UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
3947 UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */ 4814 UCHAR ucFlag; // To enable/disable functionalities based on memory type
3948 UCHAR ucVREFI; /* board dependent parameter */ 4815 UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
3949 UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */ 4816 UCHAR ucVREFI; // board dependent parameter
3950 UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */ 4817 UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
3951 UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */ 4818 UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
3952 /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */ 4819 UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
3953 UCHAR ucReserved[3]; 4820 // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
3954 4821 UCHAR ucReserved[3];
3955/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */ 4822
3956 union { 4823//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
3957 USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */ 4824 union{
3958 USHORT usDDR3_Reserved; 4825 USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
3959 }; 4826 USHORT usDDR3_Reserved;
3960 union { 4827 };
3961 USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */ 4828 union{
3962 USHORT usDDR3_MR3; /* Used for DDR3 memory */ 4829 USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
3963 }; 4830 USHORT usDDR3_MR3; // Used for DDR3 memory
3964 UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */ 4831 };
3965 UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */ 4832 UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
3966 UCHAR ucReserved2[2]; 4833 UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
3967 ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */ 4834 UCHAR ucReserved2[2];
3968} ATOM_VRAM_MODULE_V4; 4835 ATOM_MEMORY_TIMING_FORMAT asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
4836}ATOM_VRAM_MODULE_V4;
3969 4837
3970#define VRAM_MODULE_V4_MISC_RANK_MASK 0x3 4838#define VRAM_MODULE_V4_MISC_RANK_MASK 0x3
3971#define VRAM_MODULE_V4_MISC_DUAL_RANK 0x1 4839#define VRAM_MODULE_V4_MISC_DUAL_RANK 0x1
@@ -3973,96 +4841,139 @@ typedef struct _ATOM_VRAM_MODULE_V4 {
3973#define VRAM_MODULE_V4_MISC_BL8 0x4 4841#define VRAM_MODULE_V4_MISC_BL8 0x4
3974#define VRAM_MODULE_V4_MISC_DUAL_CS 0x10 4842#define VRAM_MODULE_V4_MISC_DUAL_CS 0x10
3975 4843
3976typedef struct _ATOM_VRAM_MODULE_V5 { 4844typedef struct _ATOM_VRAM_MODULE_V5
3977 ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */ 4845{
3978 USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */ 4846 ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
3979 USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */ 4847 USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
3980 /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */ 4848 USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
3981 USHORT usReserved; 4849 // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
3982 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ 4850 USHORT usReserved;
3983 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */ 4851 UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
3984 UCHAR ucChannelNum; /* Number of channels present in this module config */ 4852 UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
3985 UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */ 4853 UCHAR ucChannelNum; // Number of channels present in this module config
3986 UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */ 4854 UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
3987 UCHAR ucFlag; /* To enable/disable functionalities based on memory type */ 4855 UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
3988 UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */ 4856 UCHAR ucFlag; // To enable/disable functionalities based on memory type
3989 UCHAR ucVREFI; /* board dependent parameter */ 4857 UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
3990 UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */ 4858 UCHAR ucVREFI; // board dependent parameter
3991 UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */ 4859 UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
3992 UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */ 4860 UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
3993 /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */ 4861 UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
3994 UCHAR ucReserved[3]; 4862 // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
4863 UCHAR ucReserved[3];
4864
4865//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
4866 USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
4867 USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
4868 UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
4869 UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
4870 UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
4871 UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
4872 ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
4873}ATOM_VRAM_MODULE_V5;
4874
4875typedef struct _ATOM_VRAM_MODULE_V6
4876{
4877 ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
4878 USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
4879 USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
4880 // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
4881 USHORT usReserved;
4882 UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
4883 UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
4884 UCHAR ucChannelNum; // Number of channels present in this module config
4885 UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
4886 UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
4887 UCHAR ucFlag; // To enable/disable functionalities based on memory type
4888 UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
4889 UCHAR ucVREFI; // board dependent parameter
4890 UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
4891 UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
4892 UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
4893 // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
4894 UCHAR ucReserved[3];
4895
4896//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
4897 USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
4898 USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
4899 UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
4900 UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
4901 UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
4902 UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
4903 ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
4904}ATOM_VRAM_MODULE_V6;
4905
4906
4907
4908typedef struct _ATOM_VRAM_INFO_V2
4909{
4910 ATOM_COMMON_TABLE_HEADER sHeader;
4911 UCHAR ucNumOfVRAMModule;
4912 ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
4913}ATOM_VRAM_INFO_V2;
3995 4914
3996/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */ 4915typedef struct _ATOM_VRAM_INFO_V3
3997 USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */ 4916{
3998 USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */ 4917 ATOM_COMMON_TABLE_HEADER sHeader;
3999 UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */ 4918 USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
4000 UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */ 4919 USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
4001 UCHAR ucFIFODepth; /* FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth */ 4920 USHORT usRerseved;
4002 UCHAR ucCDR_Bandwidth; /* [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth */ 4921 UCHAR aVID_PinsShift[9]; // 8 bit strap maximum+terminator
4003 ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */ 4922 UCHAR ucNumOfVRAMModule;
4004} ATOM_VRAM_MODULE_V5; 4923 ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
4005 4924 ATOM_INIT_REG_BLOCK asMemPatch; // for allocation
4006typedef struct _ATOM_VRAM_INFO_V2 { 4925 // ATOM_INIT_REG_BLOCK aMemAdjust;
4007 ATOM_COMMON_TABLE_HEADER sHeader; 4926}ATOM_VRAM_INFO_V3;
4008 UCHAR ucNumOfVRAMModule;
4009 ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
4010} ATOM_VRAM_INFO_V2;
4011
4012typedef struct _ATOM_VRAM_INFO_V3 {
4013 ATOM_COMMON_TABLE_HEADER sHeader;
4014 USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */
4015 USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */
4016 USHORT usRerseved;
4017 UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */
4018 UCHAR ucNumOfVRAMModule;
4019 ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
4020 ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */
4021 /* ATOM_INIT_REG_BLOCK aMemAdjust; */
4022} ATOM_VRAM_INFO_V3;
4023 4927
4024#define ATOM_VRAM_INFO_LAST ATOM_VRAM_INFO_V3 4928#define ATOM_VRAM_INFO_LAST ATOM_VRAM_INFO_V3
4025 4929
4026typedef struct _ATOM_VRAM_INFO_V4 { 4930typedef struct _ATOM_VRAM_INFO_V4
4027 ATOM_COMMON_TABLE_HEADER sHeader; 4931{
4028 USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */ 4932 ATOM_COMMON_TABLE_HEADER sHeader;
4029 USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */ 4933 USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
4030 USHORT usRerseved; 4934 USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
4031 UCHAR ucMemDQ7_0ByteRemap; /* DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3 */ 4935 USHORT usRerseved;
4032 ULONG ulMemDQ7_0BitRemap; /* each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21] */ 4936 UCHAR ucMemDQ7_0ByteRemap; // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3
4033 UCHAR ucReservde[4]; 4937 ULONG ulMemDQ7_0BitRemap; // each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21]
4034 UCHAR ucNumOfVRAMModule; 4938 UCHAR ucReservde[4];
4035 ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */ 4939 UCHAR ucNumOfVRAMModule;
4036 ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */ 4940 ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
4037 /* ATOM_INIT_REG_BLOCK aMemAdjust; */ 4941 ATOM_INIT_REG_BLOCK asMemPatch; // for allocation
4038} ATOM_VRAM_INFO_V4; 4942 // ATOM_INIT_REG_BLOCK aMemAdjust;
4039 4943}ATOM_VRAM_INFO_V4;
4040typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO { 4944
4041 ATOM_COMMON_TABLE_HEADER sHeader; 4945typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO
4042 UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */ 4946{
4043} ATOM_VRAM_GPIO_DETECTION_INFO; 4947 ATOM_COMMON_TABLE_HEADER sHeader;
4044 4948 UCHAR aVID_PinsShift[9]; //8 bit strap maximum+terminator
4045typedef struct _ATOM_MEMORY_TRAINING_INFO { 4949}ATOM_VRAM_GPIO_DETECTION_INFO;
4046 ATOM_COMMON_TABLE_HEADER sHeader; 4950
4047 UCHAR ucTrainingLoop; 4951
4048 UCHAR ucReserved[3]; 4952typedef struct _ATOM_MEMORY_TRAINING_INFO
4049 ATOM_INIT_REG_BLOCK asMemTrainingSetting; 4953{
4050} ATOM_MEMORY_TRAINING_INFO; 4954 ATOM_COMMON_TABLE_HEADER sHeader;
4051 4955 UCHAR ucTrainingLoop;
4052typedef struct SW_I2C_CNTL_DATA_PARAMETERS { 4956 UCHAR ucReserved[3];
4053 UCHAR ucControl; 4957 ATOM_INIT_REG_BLOCK asMemTrainingSetting;
4054 UCHAR ucData; 4958}ATOM_MEMORY_TRAINING_INFO;
4055 UCHAR ucSatus; 4959
4056 UCHAR ucTemp; 4960
4961typedef struct SW_I2C_CNTL_DATA_PARAMETERS
4962{
4963 UCHAR ucControl;
4964 UCHAR ucData;
4965 UCHAR ucSatus;
4966 UCHAR ucTemp;
4057} SW_I2C_CNTL_DATA_PARAMETERS; 4967} SW_I2C_CNTL_DATA_PARAMETERS;
4058 4968
4059#define SW_I2C_CNTL_DATA_PS_ALLOCATION SW_I2C_CNTL_DATA_PARAMETERS 4969#define SW_I2C_CNTL_DATA_PS_ALLOCATION SW_I2C_CNTL_DATA_PARAMETERS
4060 4970
4061typedef struct _SW_I2C_IO_DATA_PARAMETERS { 4971typedef struct _SW_I2C_IO_DATA_PARAMETERS
4062 USHORT GPIO_Info; 4972{
4063 UCHAR ucAct; 4973 USHORT GPIO_Info;
4064 UCHAR ucData; 4974 UCHAR ucAct;
4065} SW_I2C_IO_DATA_PARAMETERS; 4975 UCHAR ucData;
4976 } SW_I2C_IO_DATA_PARAMETERS;
4066 4977
4067#define SW_I2C_IO_DATA_PS_ALLOCATION SW_I2C_IO_DATA_PARAMETERS 4978#define SW_I2C_IO_DATA_PS_ALLOCATION SW_I2C_IO_DATA_PARAMETERS
4068 4979
@@ -4087,127 +4998,136 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS {
4087#define SW_I2C_CNTL_CLOSE 5 4998#define SW_I2C_CNTL_CLOSE 5
4088#define SW_I2C_CNTL_WRITE1BIT 6 4999#define SW_I2C_CNTL_WRITE1BIT 6
4089 5000
4090/* ==============================VESA definition Portion=============================== */ 5001//==============================VESA definition Portion===============================
4091#define VESA_OEM_PRODUCT_REV '01.00' 5002#define VESA_OEM_PRODUCT_REV '01.00'
4092#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB /* refer to VBE spec p.32, no TTY support */ 5003#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support
4093#define VESA_MODE_WIN_ATTRIBUTE 7 5004#define VESA_MODE_WIN_ATTRIBUTE 7
4094#define VESA_WIN_SIZE 64 5005#define VESA_WIN_SIZE 64
4095 5006
4096typedef struct _PTR_32_BIT_STRUCTURE { 5007typedef struct _PTR_32_BIT_STRUCTURE
4097 USHORT Offset16; 5008{
4098 USHORT Segment16; 5009 USHORT Offset16;
5010 USHORT Segment16;
4099} PTR_32_BIT_STRUCTURE; 5011} PTR_32_BIT_STRUCTURE;
4100 5012
4101typedef union _PTR_32_BIT_UNION { 5013typedef union _PTR_32_BIT_UNION
4102 PTR_32_BIT_STRUCTURE SegmentOffset; 5014{
4103 ULONG Ptr32_Bit; 5015 PTR_32_BIT_STRUCTURE SegmentOffset;
5016 ULONG Ptr32_Bit;
4104} PTR_32_BIT_UNION; 5017} PTR_32_BIT_UNION;
4105 5018
4106typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE { 5019typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE
4107 UCHAR VbeSignature[4]; 5020{
4108 USHORT VbeVersion; 5021 UCHAR VbeSignature[4];
4109 PTR_32_BIT_UNION OemStringPtr; 5022 USHORT VbeVersion;
4110 UCHAR Capabilities[4]; 5023 PTR_32_BIT_UNION OemStringPtr;
4111 PTR_32_BIT_UNION VideoModePtr; 5024 UCHAR Capabilities[4];
4112 USHORT TotalMemory; 5025 PTR_32_BIT_UNION VideoModePtr;
5026 USHORT TotalMemory;
4113} VBE_1_2_INFO_BLOCK_UPDATABLE; 5027} VBE_1_2_INFO_BLOCK_UPDATABLE;
4114 5028
4115typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE { 5029
4116 VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock; 5030typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE
4117 USHORT OemSoftRev; 5031{
4118 PTR_32_BIT_UNION OemVendorNamePtr; 5032 VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock;
4119 PTR_32_BIT_UNION OemProductNamePtr; 5033 USHORT OemSoftRev;
4120 PTR_32_BIT_UNION OemProductRevPtr; 5034 PTR_32_BIT_UNION OemVendorNamePtr;
5035 PTR_32_BIT_UNION OemProductNamePtr;
5036 PTR_32_BIT_UNION OemProductRevPtr;
4121} VBE_2_0_INFO_BLOCK_UPDATABLE; 5037} VBE_2_0_INFO_BLOCK_UPDATABLE;
4122 5038
4123typedef union _VBE_VERSION_UNION { 5039typedef union _VBE_VERSION_UNION
4124 VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock; 5040{
4125 VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock; 5041 VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock;
5042 VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock;
4126} VBE_VERSION_UNION; 5043} VBE_VERSION_UNION;
4127 5044
4128typedef struct _VBE_INFO_BLOCK { 5045typedef struct _VBE_INFO_BLOCK
4129 VBE_VERSION_UNION UpdatableVBE_Info; 5046{
4130 UCHAR Reserved[222]; 5047 VBE_VERSION_UNION UpdatableVBE_Info;
4131 UCHAR OemData[256]; 5048 UCHAR Reserved[222];
5049 UCHAR OemData[256];
4132} VBE_INFO_BLOCK; 5050} VBE_INFO_BLOCK;
4133 5051
4134typedef struct _VBE_FP_INFO { 5052typedef struct _VBE_FP_INFO
4135 USHORT HSize; 5053{
4136 USHORT VSize; 5054 USHORT HSize;
4137 USHORT FPType; 5055 USHORT VSize;
4138 UCHAR RedBPP; 5056 USHORT FPType;
4139 UCHAR GreenBPP; 5057 UCHAR RedBPP;
4140 UCHAR BlueBPP; 5058 UCHAR GreenBPP;
4141 UCHAR ReservedBPP; 5059 UCHAR BlueBPP;
4142 ULONG RsvdOffScrnMemSize; 5060 UCHAR ReservedBPP;
4143 ULONG RsvdOffScrnMEmPtr; 5061 ULONG RsvdOffScrnMemSize;
4144 UCHAR Reserved[14]; 5062 ULONG RsvdOffScrnMEmPtr;
5063 UCHAR Reserved[14];
4145} VBE_FP_INFO; 5064} VBE_FP_INFO;
4146 5065
4147typedef struct _VESA_MODE_INFO_BLOCK { 5066typedef struct _VESA_MODE_INFO_BLOCK
4148/* Mandatory information for all VBE revisions */ 5067{
4149 USHORT ModeAttributes; /* dw ? ; mode attributes */ 5068// Mandatory information for all VBE revisions
4150 UCHAR WinAAttributes; /* db ? ; window A attributes */ 5069 USHORT ModeAttributes; // dw ? ; mode attributes
4151 UCHAR WinBAttributes; /* db ? ; window B attributes */ 5070 UCHAR WinAAttributes; // db ? ; window A attributes
4152 USHORT WinGranularity; /* dw ? ; window granularity */ 5071 UCHAR WinBAttributes; // db ? ; window B attributes
4153 USHORT WinSize; /* dw ? ; window size */ 5072 USHORT WinGranularity; // dw ? ; window granularity
4154 USHORT WinASegment; /* dw ? ; window A start segment */ 5073 USHORT WinSize; // dw ? ; window size
4155 USHORT WinBSegment; /* dw ? ; window B start segment */ 5074 USHORT WinASegment; // dw ? ; window A start segment
4156 ULONG WinFuncPtr; /* dd ? ; real mode pointer to window function */ 5075 USHORT WinBSegment; // dw ? ; window B start segment
4157 USHORT BytesPerScanLine; /* dw ? ; bytes per scan line */ 5076 ULONG WinFuncPtr; // dd ? ; real mode pointer to window function
4158 5077 USHORT BytesPerScanLine;// dw ? ; bytes per scan line
4159/* ; Mandatory information for VBE 1.2 and above */ 5078
4160 USHORT XResolution; /* dw ? ; horizontal resolution in pixels or characters */ 5079//; Mandatory information for VBE 1.2 and above
4161 USHORT YResolution; /* dw ? ; vertical resolution in pixels or characters */ 5080 USHORT XResolution; // dw ? ; horizontal resolution in pixels or characters
4162 UCHAR XCharSize; /* db ? ; character cell width in pixels */ 5081 USHORT YResolution; // dw ? ; vertical resolution in pixels or characters
4163 UCHAR YCharSize; /* db ? ; character cell height in pixels */ 5082 UCHAR XCharSize; // db ? ; character cell width in pixels
4164 UCHAR NumberOfPlanes; /* db ? ; number of memory planes */ 5083 UCHAR YCharSize; // db ? ; character cell height in pixels
4165 UCHAR BitsPerPixel; /* db ? ; bits per pixel */ 5084 UCHAR NumberOfPlanes; // db ? ; number of memory planes
4166 UCHAR NumberOfBanks; /* db ? ; number of banks */ 5085 UCHAR BitsPerPixel; // db ? ; bits per pixel
4167 UCHAR MemoryModel; /* db ? ; memory model type */ 5086 UCHAR NumberOfBanks; // db ? ; number of banks
4168 UCHAR BankSize; /* db ? ; bank size in KB */ 5087 UCHAR MemoryModel; // db ? ; memory model type
4169 UCHAR NumberOfImagePages; /* db ? ; number of images */ 5088 UCHAR BankSize; // db ? ; bank size in KB
4170 UCHAR ReservedForPageFunction; /* db 1 ; reserved for page function */ 5089 UCHAR NumberOfImagePages;// db ? ; number of images
4171 5090 UCHAR ReservedForPageFunction;//db 1 ; reserved for page function
4172/* ; Direct Color fields(required for direct/6 and YUV/7 memory models) */ 5091
4173 UCHAR RedMaskSize; /* db ? ; size of direct color red mask in bits */ 5092//; Direct Color fields(required for direct/6 and YUV/7 memory models)
4174 UCHAR RedFieldPosition; /* db ? ; bit position of lsb of red mask */ 5093 UCHAR RedMaskSize; // db ? ; size of direct color red mask in bits
4175 UCHAR GreenMaskSize; /* db ? ; size of direct color green mask in bits */ 5094 UCHAR RedFieldPosition; // db ? ; bit position of lsb of red mask
4176 UCHAR GreenFieldPosition; /* db ? ; bit position of lsb of green mask */ 5095 UCHAR GreenMaskSize; // db ? ; size of direct color green mask in bits
4177 UCHAR BlueMaskSize; /* db ? ; size of direct color blue mask in bits */ 5096 UCHAR GreenFieldPosition; // db ? ; bit position of lsb of green mask
4178 UCHAR BlueFieldPosition; /* db ? ; bit position of lsb of blue mask */ 5097 UCHAR BlueMaskSize; // db ? ; size of direct color blue mask in bits
4179 UCHAR RsvdMaskSize; /* db ? ; size of direct color reserved mask in bits */ 5098 UCHAR BlueFieldPosition; // db ? ; bit position of lsb of blue mask
4180 UCHAR RsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask */ 5099 UCHAR RsvdMaskSize; // db ? ; size of direct color reserved mask in bits
4181 UCHAR DirectColorModeInfo; /* db ? ; direct color mode attributes */ 5100 UCHAR RsvdFieldPosition; // db ? ; bit position of lsb of reserved mask
4182 5101 UCHAR DirectColorModeInfo;// db ? ; direct color mode attributes
4183/* ; Mandatory information for VBE 2.0 and above */ 5102
4184 ULONG PhysBasePtr; /* dd ? ; physical address for flat memory frame buffer */ 5103//; Mandatory information for VBE 2.0 and above
4185 ULONG Reserved_1; /* dd 0 ; reserved - always set to 0 */ 5104 ULONG PhysBasePtr; // dd ? ; physical address for flat memory frame buffer
4186 USHORT Reserved_2; /* dw 0 ; reserved - always set to 0 */ 5105 ULONG Reserved_1; // dd 0 ; reserved - always set to 0
4187 5106 USHORT Reserved_2; // dw 0 ; reserved - always set to 0
4188/* ; Mandatory information for VBE 3.0 and above */ 5107
4189 USHORT LinBytesPerScanLine; /* dw ? ; bytes per scan line for linear modes */ 5108//; Mandatory information for VBE 3.0 and above
4190 UCHAR BnkNumberOfImagePages; /* db ? ; number of images for banked modes */ 5109 USHORT LinBytesPerScanLine; // dw ? ; bytes per scan line for linear modes
4191 UCHAR LinNumberOfImagPages; /* db ? ; number of images for linear modes */ 5110 UCHAR BnkNumberOfImagePages;// db ? ; number of images for banked modes
4192 UCHAR LinRedMaskSize; /* db ? ; size of direct color red mask(linear modes) */ 5111 UCHAR LinNumberOfImagPages; // db ? ; number of images for linear modes
4193 UCHAR LinRedFieldPosition; /* db ? ; bit position of lsb of red mask(linear modes) */ 5112 UCHAR LinRedMaskSize; // db ? ; size of direct color red mask(linear modes)
4194 UCHAR LinGreenMaskSize; /* db ? ; size of direct color green mask(linear modes) */ 5113 UCHAR LinRedFieldPosition; // db ? ; bit position of lsb of red mask(linear modes)
4195 UCHAR LinGreenFieldPosition; /* db ? ; bit position of lsb of green mask(linear modes) */ 5114 UCHAR LinGreenMaskSize; // db ? ; size of direct color green mask(linear modes)
4196 UCHAR LinBlueMaskSize; /* db ? ; size of direct color blue mask(linear modes) */ 5115 UCHAR LinGreenFieldPosition;// db ? ; bit position of lsb of green mask(linear modes)
4197 UCHAR LinBlueFieldPosition; /* db ? ; bit position of lsb of blue mask(linear modes) */ 5116 UCHAR LinBlueMaskSize; // db ? ; size of direct color blue mask(linear modes)
4198 UCHAR LinRsvdMaskSize; /* db ? ; size of direct color reserved mask(linear modes) */ 5117 UCHAR LinBlueFieldPosition; // db ? ; bit position of lsb of blue mask(linear modes)
4199 UCHAR LinRsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask(linear modes) */ 5118 UCHAR LinRsvdMaskSize; // db ? ; size of direct color reserved mask(linear modes)
4200 ULONG MaxPixelClock; /* dd ? ; maximum pixel clock(in Hz) for graphics mode */ 5119 UCHAR LinRsvdFieldPosition; // db ? ; bit position of lsb of reserved mask(linear modes)
4201 UCHAR Reserved; /* db 190 dup (0) */ 5120 ULONG MaxPixelClock; // dd ? ; maximum pixel clock(in Hz) for graphics mode
5121 UCHAR Reserved; // db 190 dup (0)
4202} VESA_MODE_INFO_BLOCK; 5122} VESA_MODE_INFO_BLOCK;
4203 5123
4204/* BIOS function CALLS */ 5124// BIOS function CALLS
4205#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 /* ATI Extended Function code */ 5125#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 // ATI Extended Function code
4206#define ATOM_BIOS_FUNCTION_COP_MODE 0x00 5126#define ATOM_BIOS_FUNCTION_COP_MODE 0x00
4207#define ATOM_BIOS_FUNCTION_SHORT_QUERY1 0x04 5127#define ATOM_BIOS_FUNCTION_SHORT_QUERY1 0x04
4208#define ATOM_BIOS_FUNCTION_SHORT_QUERY2 0x05 5128#define ATOM_BIOS_FUNCTION_SHORT_QUERY2 0x05
4209#define ATOM_BIOS_FUNCTION_SHORT_QUERY3 0x06 5129#define ATOM_BIOS_FUNCTION_SHORT_QUERY3 0x06
4210#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B 5130#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B
4211#define ATOM_BIOS_FUNCTION_ASIC_DSTATE 0x0E 5131#define ATOM_BIOS_FUNCTION_ASIC_DSTATE 0x0E
4212#define ATOM_BIOS_FUNCTION_DEBUG_PLAY 0x0F 5132#define ATOM_BIOS_FUNCTION_DEBUG_PLAY 0x0F
4213#define ATOM_BIOS_FUNCTION_STV_STD 0x16 5133#define ATOM_BIOS_FUNCTION_STV_STD 0x16
@@ -4217,100 +5137,135 @@ typedef struct _VESA_MODE_INFO_BLOCK {
4217#define ATOM_BIOS_FUNCTION_PANEL_CONTROL 0x82 5137#define ATOM_BIOS_FUNCTION_PANEL_CONTROL 0x82
4218#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET 0x83 5138#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET 0x83
4219#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH 0x84 5139#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH 0x84
4220#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A 5140#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A
4221#define ATOM_BIOS_FUNCTION_SET_CMOS 0x8B 5141#define ATOM_BIOS_FUNCTION_SET_CMOS 0x8B
4222#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 /* Sub function 80 */ 5142#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 // Sub function 80
4223#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 /* Sub function 80 */ 5143#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 // Sub function 80
4224 5144
4225#define ATOM_BIOS_FUNCTION_DISPLAY_INFO 0x8D 5145#define ATOM_BIOS_FUNCTION_DISPLAY_INFO 0x8D
4226#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF 0x8E 5146#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF 0x8E
4227#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F 5147#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F
4228#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 /* Sub function 03 */ 5148#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 // Sub function 03
4229#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 /* Sub function 7 */ 5149#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 // Sub function 7
4230#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 /* Notify caller the current thermal state */ 5150#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 // Notify caller the current thermal state
4231#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 /* Notify caller the current critical state */ 5151#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 // Notify caller the current critical state
4232#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 /* Sub function 85 */ 5152#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 // Sub function 85
4233#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900 /* Sub function 89 */ 5153#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900// Sub function 89
4234#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 /* Notify caller that ADC is supported */ 5154#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 // Notify caller that ADC is supported
4235 5155
4236#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 /* Set DPMS */ 5156
4237#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 /* BL: Sub function 01 */ 5157#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 // Set DPMS
4238#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 /* BL: Sub function 02 */ 5158#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 // BL: Sub function 01
4239#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 /* BH Parameter for DPMS ON. */ 5159#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 // BL: Sub function 02
4240#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 /* BH Parameter for DPMS STANDBY */ 5160#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 // BH Parameter for DPMS ON.
4241#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 /* BH Parameter for DPMS SUSPEND */ 5161#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 // BH Parameter for DPMS STANDBY
4242#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 /* BH Parameter for DPMS OFF */ 5162#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 // BH Parameter for DPMS SUSPEND
4243#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 /* BH Parameter for DPMS REDUCE ON (NOT SUPPORTED) */ 5163#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 // BH Parameter for DPMS OFF
5164#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 // BH Parameter for DPMS REDUCE ON (NOT SUPPORTED)
4244 5165
4245#define ATOM_BIOS_RETURN_CODE_MASK 0x0000FF00L 5166#define ATOM_BIOS_RETURN_CODE_MASK 0x0000FF00L
4246#define ATOM_BIOS_REG_HIGH_MASK 0x0000FF00L 5167#define ATOM_BIOS_REG_HIGH_MASK 0x0000FF00L
4247#define ATOM_BIOS_REG_LOW_MASK 0x000000FFL 5168#define ATOM_BIOS_REG_LOW_MASK 0x000000FFL
4248 5169
4249/* structure used for VBIOS only */ 5170// structure used for VBIOS only
4250 5171
4251/* DispOutInfoTable */ 5172//DispOutInfoTable
4252typedef struct _ASIC_TRANSMITTER_INFO { 5173typedef struct _ASIC_TRANSMITTER_INFO
5174{
4253 USHORT usTransmitterObjId; 5175 USHORT usTransmitterObjId;
4254 USHORT usSupportDevice; 5176 USHORT usSupportDevice;
4255 UCHAR ucTransmitterCmdTblId; 5177 UCHAR ucTransmitterCmdTblId;
4256 UCHAR ucConfig; 5178 UCHAR ucConfig;
4257 UCHAR ucEncoderID; /* available 1st encoder ( default ) */ 5179 UCHAR ucEncoderID; //available 1st encoder ( default )
4258 UCHAR ucOptionEncoderID; /* available 2nd encoder ( optional ) */ 5180 UCHAR ucOptionEncoderID; //available 2nd encoder ( optional )
4259 UCHAR uc2ndEncoderID; 5181 UCHAR uc2ndEncoderID;
4260 UCHAR ucReserved; 5182 UCHAR ucReserved;
4261} ASIC_TRANSMITTER_INFO; 5183}ASIC_TRANSMITTER_INFO;
4262 5184
4263typedef struct _ASIC_ENCODER_INFO { 5185typedef struct _ASIC_ENCODER_INFO
5186{
4264 UCHAR ucEncoderID; 5187 UCHAR ucEncoderID;
4265 UCHAR ucEncoderConfig; 5188 UCHAR ucEncoderConfig;
4266 USHORT usEncoderCmdTblId; 5189 USHORT usEncoderCmdTblId;
4267} ASIC_ENCODER_INFO; 5190}ASIC_ENCODER_INFO;
5191
5192typedef struct _ATOM_DISP_OUT_INFO
5193{
5194 ATOM_COMMON_TABLE_HEADER sHeader;
5195 USHORT ptrTransmitterInfo;
5196 USHORT ptrEncoderInfo;
5197 ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
5198 ASIC_ENCODER_INFO asEncoderInfo[1];
5199}ATOM_DISP_OUT_INFO;
4268 5200
4269typedef struct _ATOM_DISP_OUT_INFO { 5201typedef struct _ATOM_DISP_OUT_INFO_V2
4270 ATOM_COMMON_TABLE_HEADER sHeader; 5202{
5203 ATOM_COMMON_TABLE_HEADER sHeader;
4271 USHORT ptrTransmitterInfo; 5204 USHORT ptrTransmitterInfo;
4272 USHORT ptrEncoderInfo; 5205 USHORT ptrEncoderInfo;
4273 ASIC_TRANSMITTER_INFO asTransmitterInfo[1]; 5206 USHORT ptrMainCallParserFar; // direct address of main parser call in VBIOS binary.
4274 ASIC_ENCODER_INFO asEncoderInfo[1]; 5207 ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
4275} ATOM_DISP_OUT_INFO; 5208 ASIC_ENCODER_INFO asEncoderInfo[1];
5209}ATOM_DISP_OUT_INFO_V2;
4276 5210
4277/* DispDevicePriorityInfo */ 5211// DispDevicePriorityInfo
4278typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO { 5212typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO
4279 ATOM_COMMON_TABLE_HEADER sHeader; 5213{
5214 ATOM_COMMON_TABLE_HEADER sHeader;
4280 USHORT asDevicePriority[16]; 5215 USHORT asDevicePriority[16];
4281} ATOM_DISPLAY_DEVICE_PRIORITY_INFO; 5216}ATOM_DISPLAY_DEVICE_PRIORITY_INFO;
4282 5217
4283/* ProcessAuxChannelTransactionTable */ 5218//ProcessAuxChannelTransactionTable
4284typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS { 5219typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
4285 USHORT lpAuxRequest; 5220{
4286 USHORT lpDataOut; 5221 USHORT lpAuxRequest;
4287 UCHAR ucChannelID; 5222 USHORT lpDataOut;
4288 union { 5223 UCHAR ucChannelID;
4289 UCHAR ucReplyStatus; 5224 union
4290 UCHAR ucDelay; 5225 {
5226 UCHAR ucReplyStatus;
5227 UCHAR ucDelay;
5228 };
5229 UCHAR ucDataOutLen;
5230 UCHAR ucReserved;
5231}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS;
5232
5233//ProcessAuxChannelTransactionTable
5234typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2
5235{
5236 USHORT lpAuxRequest;
5237 USHORT lpDataOut;
5238 UCHAR ucChannelID;
5239 union
5240 {
5241 UCHAR ucReplyStatus;
5242 UCHAR ucDelay;
4291 }; 5243 };
4292 UCHAR ucDataOutLen; 5244 UCHAR ucDataOutLen;
4293 UCHAR ucReserved; 5245 UCHAR ucHPD_ID; //=0: HPD1, =1: HPD2, =2: HPD3, =3: HPD4, =4: HPD5, =5: HPD6
4294} PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS; 5246}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2;
4295 5247
4296#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS 5248#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
4297 5249
4298/* GetSinkType */ 5250//GetSinkType
4299 5251
4300typedef struct _DP_ENCODER_SERVICE_PARAMETERS { 5252typedef struct _DP_ENCODER_SERVICE_PARAMETERS
5253{
4301 USHORT ucLinkClock; 5254 USHORT ucLinkClock;
4302 union { 5255 union
4303 UCHAR ucConfig; /* for DP training command */ 5256 {
4304 UCHAR ucI2cId; /* use for GET_SINK_TYPE command */ 5257 UCHAR ucConfig; // for DP training command
5258 UCHAR ucI2cId; // use for GET_SINK_TYPE command
4305 }; 5259 };
4306 UCHAR ucAction; 5260 UCHAR ucAction;
4307 UCHAR ucStatus; 5261 UCHAR ucStatus;
4308 UCHAR ucLaneNum; 5262 UCHAR ucLaneNum;
4309 UCHAR ucReserved[2]; 5263 UCHAR ucReserved[2];
4310} DP_ENCODER_SERVICE_PARAMETERS; 5264}DP_ENCODER_SERVICE_PARAMETERS;
4311 5265
4312/* ucAction */ 5266// ucAction
4313#define ATOM_DP_ACTION_GET_SINK_TYPE 0x01 5267#define ATOM_DP_ACTION_GET_SINK_TYPE 0x01
5268/* obselete */
4314#define ATOM_DP_ACTION_TRAINING_START 0x02 5269#define ATOM_DP_ACTION_TRAINING_START 0x02
4315#define ATOM_DP_ACTION_TRAINING_COMPLETE 0x03 5270#define ATOM_DP_ACTION_TRAINING_COMPLETE 0x03
4316#define ATOM_DP_ACTION_TRAINING_PATTERN_SEL 0x04 5271#define ATOM_DP_ACTION_TRAINING_PATTERN_SEL 0x04
@@ -4318,7 +5273,7 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
4318#define ATOM_DP_ACTION_GET_VSWING_PREEMP 0x06 5273#define ATOM_DP_ACTION_GET_VSWING_PREEMP 0x06
4319#define ATOM_DP_ACTION_BLANKING 0x07 5274#define ATOM_DP_ACTION_BLANKING 0x07
4320 5275
4321/* ucConfig */ 5276// ucConfig
4322#define ATOM_DP_CONFIG_ENCODER_SEL_MASK 0x03 5277#define ATOM_DP_CONFIG_ENCODER_SEL_MASK 0x03
4323#define ATOM_DP_CONFIG_DIG1_ENCODER 0x00 5278#define ATOM_DP_CONFIG_DIG1_ENCODER 0x00
4324#define ATOM_DP_CONFIG_DIG2_ENCODER 0x01 5279#define ATOM_DP_CONFIG_DIG2_ENCODER 0x01
@@ -4326,14 +5281,14 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
4326#define ATOM_DP_CONFIG_LINK_SEL_MASK 0x04 5281#define ATOM_DP_CONFIG_LINK_SEL_MASK 0x04
4327#define ATOM_DP_CONFIG_LINK_A 0x00 5282#define ATOM_DP_CONFIG_LINK_A 0x00
4328#define ATOM_DP_CONFIG_LINK_B 0x04 5283#define ATOM_DP_CONFIG_LINK_B 0x04
4329 5284/* /obselete */
4330#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS 5285#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
4331 5286
4332/* DP_TRAINING_TABLE */ 5287// DP_TRAINING_TABLE
4333#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR 5288#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR
4334#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 ) 5289#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 )
4335#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16) 5290#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16 )
4336#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24) 5291#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24 )
4337#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 32) 5292#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 32)
4338#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 40) 5293#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 40)
4339#define DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 48) 5294#define DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 48)
@@ -4341,183 +5296,241 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
4341#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 64) 5296#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 64)
4342#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 72) 5297#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 72)
4343#define DP_I2C_AUX_DDC_READ_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 76) 5298#define DP_I2C_AUX_DDC_READ_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 76)
4344#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80) 5299#define DP_I2C_AUX_DDC_WRITE_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80)
5300#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 84)
4345 5301
4346typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS { 5302typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
4347 UCHAR ucI2CSpeed; 5303{
4348 union { 5304 UCHAR ucI2CSpeed;
4349 UCHAR ucRegIndex; 5305 union
4350 UCHAR ucStatus; 5306 {
5307 UCHAR ucRegIndex;
5308 UCHAR ucStatus;
4351 }; 5309 };
4352 USHORT lpI2CDataOut; 5310 USHORT lpI2CDataOut;
4353 UCHAR ucFlag; 5311 UCHAR ucFlag;
4354 UCHAR ucTransBytes; 5312 UCHAR ucTransBytes;
4355 UCHAR ucSlaveAddr; 5313 UCHAR ucSlaveAddr;
4356 UCHAR ucLineNumber; 5314 UCHAR ucLineNumber;
4357} PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS; 5315}PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS;
4358 5316
4359#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS 5317#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
4360 5318
4361/* ucFlag */ 5319//ucFlag
4362#define HW_I2C_WRITE 1 5320#define HW_I2C_WRITE 1
4363#define HW_I2C_READ 0 5321#define HW_I2C_READ 0
5322#define I2C_2BYTE_ADDR 0x02
4364 5323
5324typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
5325{
5326 UCHAR ucHWBlkInst; // HW block instance, 0, 1, 2, ...
5327 UCHAR ucReserved[3];
5328}SET_HWBLOCK_INSTANCE_PARAMETER_V2;
5329
5330#define HWBLKINST_INSTANCE_MASK 0x07
5331#define HWBLKINST_HWBLK_MASK 0xF0
5332#define HWBLKINST_HWBLK_SHIFT 0x04
5333
5334//ucHWBlock
5335#define SELECT_DISP_ENGINE 0
5336#define SELECT_DISP_PLL 1
5337#define SELECT_DCIO_UNIPHY_LINK0 2
5338#define SELECT_DCIO_UNIPHY_LINK1 3
5339#define SELECT_DCIO_IMPCAL 4
5340#define SELECT_DCIO_DIG 6
5341#define SELECT_CRTC_PIXEL_RATE 7
5342
5343/****************************************************************************/
5344//Portion VI: Definitinos for vbios MC scratch registers that driver used
4365/****************************************************************************/ 5345/****************************************************************************/
4366/* Portion VI: Definitinos being oboselete */ 5346
5347#define MC_MISC0__MEMORY_TYPE_MASK 0xF0000000
5348#define MC_MISC0__MEMORY_TYPE__GDDR1 0x10000000
5349#define MC_MISC0__MEMORY_TYPE__DDR2 0x20000000
5350#define MC_MISC0__MEMORY_TYPE__GDDR3 0x30000000
5351#define MC_MISC0__MEMORY_TYPE__GDDR4 0x40000000
5352#define MC_MISC0__MEMORY_TYPE__GDDR5 0x50000000
5353#define MC_MISC0__MEMORY_TYPE__DDR3 0xB0000000
5354
5355/****************************************************************************/
5356//Portion VI: Definitinos being oboselete
4367/****************************************************************************/ 5357/****************************************************************************/
4368 5358
4369/* ========================================================================================== */ 5359//==========================================================================================
4370/* Remove the definitions below when driver is ready! */ 5360//Remove the definitions below when driver is ready!
4371typedef struct _ATOM_DAC_INFO { 5361typedef struct _ATOM_DAC_INFO
4372 ATOM_COMMON_TABLE_HEADER sHeader; 5362{
4373 USHORT usMaxFrequency; /* in 10kHz unit */ 5363 ATOM_COMMON_TABLE_HEADER sHeader;
4374 USHORT usReserved; 5364 USHORT usMaxFrequency; // in 10kHz unit
4375} ATOM_DAC_INFO; 5365 USHORT usReserved;
4376 5366}ATOM_DAC_INFO;
4377typedef struct _COMPASSIONATE_DATA { 5367
4378 ATOM_COMMON_TABLE_HEADER sHeader; 5368
4379 5369typedef struct _COMPASSIONATE_DATA
4380 /* ============================== DAC1 portion */ 5370{
4381 UCHAR ucDAC1_BG_Adjustment; 5371 ATOM_COMMON_TABLE_HEADER sHeader;
4382 UCHAR ucDAC1_DAC_Adjustment; 5372
4383 USHORT usDAC1_FORCE_Data; 5373 //============================== DAC1 portion
4384 /* ============================== DAC2 portion */ 5374 UCHAR ucDAC1_BG_Adjustment;
4385 UCHAR ucDAC2_CRT2_BG_Adjustment; 5375 UCHAR ucDAC1_DAC_Adjustment;
4386 UCHAR ucDAC2_CRT2_DAC_Adjustment; 5376 USHORT usDAC1_FORCE_Data;
4387 USHORT usDAC2_CRT2_FORCE_Data; 5377 //============================== DAC2 portion
4388 USHORT usDAC2_CRT2_MUX_RegisterIndex; 5378 UCHAR ucDAC2_CRT2_BG_Adjustment;
4389 UCHAR ucDAC2_CRT2_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */ 5379 UCHAR ucDAC2_CRT2_DAC_Adjustment;
4390 UCHAR ucDAC2_NTSC_BG_Adjustment; 5380 USHORT usDAC2_CRT2_FORCE_Data;
4391 UCHAR ucDAC2_NTSC_DAC_Adjustment; 5381 USHORT usDAC2_CRT2_MUX_RegisterIndex;
4392 USHORT usDAC2_TV1_FORCE_Data; 5382 UCHAR ucDAC2_CRT2_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
4393 USHORT usDAC2_TV1_MUX_RegisterIndex; 5383 UCHAR ucDAC2_NTSC_BG_Adjustment;
4394 UCHAR ucDAC2_TV1_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */ 5384 UCHAR ucDAC2_NTSC_DAC_Adjustment;
4395 UCHAR ucDAC2_CV_BG_Adjustment; 5385 USHORT usDAC2_TV1_FORCE_Data;
4396 UCHAR ucDAC2_CV_DAC_Adjustment; 5386 USHORT usDAC2_TV1_MUX_RegisterIndex;
4397 USHORT usDAC2_CV_FORCE_Data; 5387 UCHAR ucDAC2_TV1_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
4398 USHORT usDAC2_CV_MUX_RegisterIndex; 5388 UCHAR ucDAC2_CV_BG_Adjustment;
4399 UCHAR ucDAC2_CV_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */ 5389 UCHAR ucDAC2_CV_DAC_Adjustment;
4400 UCHAR ucDAC2_PAL_BG_Adjustment; 5390 USHORT usDAC2_CV_FORCE_Data;
4401 UCHAR ucDAC2_PAL_DAC_Adjustment; 5391 USHORT usDAC2_CV_MUX_RegisterIndex;
4402 USHORT usDAC2_TV2_FORCE_Data; 5392 UCHAR ucDAC2_CV_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
4403} COMPASSIONATE_DATA; 5393 UCHAR ucDAC2_PAL_BG_Adjustment;
5394 UCHAR ucDAC2_PAL_DAC_Adjustment;
5395 USHORT usDAC2_TV2_FORCE_Data;
5396}COMPASSIONATE_DATA;
4404 5397
4405/****************************Supported Device Info Table Definitions**********************/ 5398/****************************Supported Device Info Table Definitions**********************/
4406/* ucConnectInfo: */ 5399// ucConnectInfo:
4407/* [7:4] - connector type */ 5400// [7:4] - connector type
4408/* = 1 - VGA connector */ 5401// = 1 - VGA connector
4409/* = 2 - DVI-I */ 5402// = 2 - DVI-I
4410/* = 3 - DVI-D */ 5403// = 3 - DVI-D
4411/* = 4 - DVI-A */ 5404// = 4 - DVI-A
4412/* = 5 - SVIDEO */ 5405// = 5 - SVIDEO
4413/* = 6 - COMPOSITE */ 5406// = 6 - COMPOSITE
4414/* = 7 - LVDS */ 5407// = 7 - LVDS
4415/* = 8 - DIGITAL LINK */ 5408// = 8 - DIGITAL LINK
4416/* = 9 - SCART */ 5409// = 9 - SCART
4417/* = 0xA - HDMI_type A */ 5410// = 0xA - HDMI_type A
4418/* = 0xB - HDMI_type B */ 5411// = 0xB - HDMI_type B
4419/* = 0xE - Special case1 (DVI+DIN) */ 5412// = 0xE - Special case1 (DVI+DIN)
4420/* Others=TBD */ 5413// Others=TBD
4421/* [3:0] - DAC Associated */ 5414// [3:0] - DAC Associated
4422/* = 0 - no DAC */ 5415// = 0 - no DAC
4423/* = 1 - DACA */ 5416// = 1 - DACA
4424/* = 2 - DACB */ 5417// = 2 - DACB
4425/* = 3 - External DAC */ 5418// = 3 - External DAC
4426/* Others=TBD */ 5419// Others=TBD
4427/* */ 5420//
4428 5421
4429typedef struct _ATOM_CONNECTOR_INFO { 5422typedef struct _ATOM_CONNECTOR_INFO
5423{
4430#if ATOM_BIG_ENDIAN 5424#if ATOM_BIG_ENDIAN
4431 UCHAR bfConnectorType:4; 5425 UCHAR bfConnectorType:4;
4432 UCHAR bfAssociatedDAC:4; 5426 UCHAR bfAssociatedDAC:4;
4433#else 5427#else
4434 UCHAR bfAssociatedDAC:4; 5428 UCHAR bfAssociatedDAC:4;
4435 UCHAR bfConnectorType:4; 5429 UCHAR bfConnectorType:4;
4436#endif 5430#endif
4437} ATOM_CONNECTOR_INFO; 5431}ATOM_CONNECTOR_INFO;
5432
5433typedef union _ATOM_CONNECTOR_INFO_ACCESS
5434{
5435 ATOM_CONNECTOR_INFO sbfAccess;
5436 UCHAR ucAccess;
5437}ATOM_CONNECTOR_INFO_ACCESS;
4438 5438
4439typedef union _ATOM_CONNECTOR_INFO_ACCESS { 5439typedef struct _ATOM_CONNECTOR_INFO_I2C
4440 ATOM_CONNECTOR_INFO sbfAccess; 5440{
4441 UCHAR ucAccess; 5441 ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
4442} ATOM_CONNECTOR_INFO_ACCESS; 5442 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
5443}ATOM_CONNECTOR_INFO_I2C;
4443 5444
4444typedef struct _ATOM_CONNECTOR_INFO_I2C {
4445 ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
4446 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
4447} ATOM_CONNECTOR_INFO_I2C;
4448 5445
4449typedef struct _ATOM_SUPPORTED_DEVICES_INFO { 5446typedef struct _ATOM_SUPPORTED_DEVICES_INFO
4450 ATOM_COMMON_TABLE_HEADER sHeader; 5447{
4451 USHORT usDeviceSupport; 5448 ATOM_COMMON_TABLE_HEADER sHeader;
4452 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO]; 5449 USHORT usDeviceSupport;
4453} ATOM_SUPPORTED_DEVICES_INFO; 5450 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO];
5451}ATOM_SUPPORTED_DEVICES_INFO;
4454 5452
4455#define NO_INT_SRC_MAPPED 0xFF 5453#define NO_INT_SRC_MAPPED 0xFF
4456 5454
4457typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP { 5455typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP
4458 UCHAR ucIntSrcBitmap; 5456{
4459} ATOM_CONNECTOR_INC_SRC_BITMAP; 5457 UCHAR ucIntSrcBitmap;
4460 5458}ATOM_CONNECTOR_INC_SRC_BITMAP;
4461typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2 { 5459
4462 ATOM_COMMON_TABLE_HEADER sHeader; 5460typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2
4463 USHORT usDeviceSupport; 5461{
4464 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2]; 5462 ATOM_COMMON_TABLE_HEADER sHeader;
4465 ATOM_CONNECTOR_INC_SRC_BITMAP 5463 USHORT usDeviceSupport;
4466 asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2]; 5464 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
4467} ATOM_SUPPORTED_DEVICES_INFO_2; 5465 ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
4468 5466}ATOM_SUPPORTED_DEVICES_INFO_2;
4469typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 { 5467
4470 ATOM_COMMON_TABLE_HEADER sHeader; 5468typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1
4471 USHORT usDeviceSupport; 5469{
4472 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE]; 5470 ATOM_COMMON_TABLE_HEADER sHeader;
4473 ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE]; 5471 USHORT usDeviceSupport;
4474} ATOM_SUPPORTED_DEVICES_INFO_2d1; 5472 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE];
5473 ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE];
5474}ATOM_SUPPORTED_DEVICES_INFO_2d1;
4475 5475
4476#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1 5476#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1
4477 5477
4478typedef struct _ATOM_MISC_CONTROL_INFO { 5478
4479 USHORT usFrequency; 5479
4480 UCHAR ucPLL_ChargePump; /* PLL charge-pump gain control */ 5480typedef struct _ATOM_MISC_CONTROL_INFO
4481 UCHAR ucPLL_DutyCycle; /* PLL duty cycle control */ 5481{
4482 UCHAR ucPLL_VCO_Gain; /* PLL VCO gain control */ 5482 USHORT usFrequency;
4483 UCHAR ucPLL_VoltageSwing; /* PLL driver voltage swing control */ 5483 UCHAR ucPLL_ChargePump; // PLL charge-pump gain control
4484} ATOM_MISC_CONTROL_INFO; 5484 UCHAR ucPLL_DutyCycle; // PLL duty cycle control
5485 UCHAR ucPLL_VCO_Gain; // PLL VCO gain control
5486 UCHAR ucPLL_VoltageSwing; // PLL driver voltage swing control
5487}ATOM_MISC_CONTROL_INFO;
5488
4485 5489
4486#define ATOM_MAX_MISC_INFO 4 5490#define ATOM_MAX_MISC_INFO 4
4487 5491
4488typedef struct _ATOM_TMDS_INFO { 5492typedef struct _ATOM_TMDS_INFO
4489 ATOM_COMMON_TABLE_HEADER sHeader; 5493{
4490 USHORT usMaxFrequency; /* in 10Khz */ 5494 ATOM_COMMON_TABLE_HEADER sHeader;
4491 ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO]; 5495 USHORT usMaxFrequency; // in 10Khz
4492} ATOM_TMDS_INFO; 5496 ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO];
5497}ATOM_TMDS_INFO;
5498
5499
5500typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE
5501{
5502 UCHAR ucTVStandard; //Same as TV standards defined above,
5503 UCHAR ucPadding[1];
5504}ATOM_ENCODER_ANALOG_ATTRIBUTE;
4493 5505
4494typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE { 5506typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE
4495 UCHAR ucTVStandard; /* Same as TV standards defined above, */ 5507{
4496 UCHAR ucPadding[1]; 5508 UCHAR ucAttribute; //Same as other digital encoder attributes defined above
4497} ATOM_ENCODER_ANALOG_ATTRIBUTE; 5509 UCHAR ucPadding[1];
5510}ATOM_ENCODER_DIGITAL_ATTRIBUTE;
4498 5511
4499typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE { 5512typedef union _ATOM_ENCODER_ATTRIBUTE
4500 UCHAR ucAttribute; /* Same as other digital encoder attributes defined above */ 5513{
4501 UCHAR ucPadding[1]; 5514 ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
4502} ATOM_ENCODER_DIGITAL_ATTRIBUTE; 5515 ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
5516}ATOM_ENCODER_ATTRIBUTE;
4503 5517
4504typedef union _ATOM_ENCODER_ATTRIBUTE {
4505 ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
4506 ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
4507} ATOM_ENCODER_ATTRIBUTE;
4508 5518
4509typedef struct _DVO_ENCODER_CONTROL_PARAMETERS { 5519typedef struct _DVO_ENCODER_CONTROL_PARAMETERS
4510 USHORT usPixelClock; 5520{
4511 USHORT usEncoderID; 5521 USHORT usPixelClock;
4512 UCHAR ucDeviceType; /* Use ATOM_DEVICE_xxx1_Index to indicate device type only. */ 5522 USHORT usEncoderID;
4513 UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */ 5523 UCHAR ucDeviceType; //Use ATOM_DEVICE_xxx1_Index to indicate device type only.
4514 ATOM_ENCODER_ATTRIBUTE usDevAttr; 5524 UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
4515} DVO_ENCODER_CONTROL_PARAMETERS; 5525 ATOM_ENCODER_ATTRIBUTE usDevAttr;
5526}DVO_ENCODER_CONTROL_PARAMETERS;
5527
5528typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION
5529{
5530 DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder;
5531 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
5532}DVO_ENCODER_CONTROL_PS_ALLOCATION;
4516 5533
4517typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION {
4518 DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder;
4519 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
4520} DVO_ENCODER_CONTROL_PS_ALLOCATION;
4521 5534
4522#define ATOM_XTMDS_ASIC_SI164_ID 1 5535#define ATOM_XTMDS_ASIC_SI164_ID 1
4523#define ATOM_XTMDS_ASIC_SI178_ID 2 5536#define ATOM_XTMDS_ASIC_SI178_ID 2
@@ -4526,27 +5539,30 @@ typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION {
4526#define ATOM_XTMDS_SUPPORTED_DUALLINK 0x00000002 5539#define ATOM_XTMDS_SUPPORTED_DUALLINK 0x00000002
4527#define ATOM_XTMDS_MVPU_FPGA 0x00000004 5540#define ATOM_XTMDS_MVPU_FPGA 0x00000004
4528 5541
4529typedef struct _ATOM_XTMDS_INFO { 5542
4530 ATOM_COMMON_TABLE_HEADER sHeader; 5543typedef struct _ATOM_XTMDS_INFO
4531 USHORT usSingleLinkMaxFrequency; 5544{
4532 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* Point the ID on which I2C is used to control external chip */ 5545 ATOM_COMMON_TABLE_HEADER sHeader;
4533 UCHAR ucXtransimitterID; 5546 USHORT usSingleLinkMaxFrequency;
4534 UCHAR ucSupportedLink; /* Bit field, bit0=1, single link supported;bit1=1,dual link supported */ 5547 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //Point the ID on which I2C is used to control external chip
4535 UCHAR ucSequnceAlterID; /* Even with the same external TMDS asic, it's possible that the program seqence alters */ 5548 UCHAR ucXtransimitterID;
4536 /* due to design. This ID is used to alert driver that the sequence is not "standard"! */ 5549 UCHAR ucSupportedLink; // Bit field, bit0=1, single link supported;bit1=1,dual link supported
4537 UCHAR ucMasterAddress; /* Address to control Master xTMDS Chip */ 5550 UCHAR ucSequnceAlterID; // Even with the same external TMDS asic, it's possible that the program seqence alters
4538 UCHAR ucSlaveAddress; /* Address to control Slave xTMDS Chip */ 5551 // due to design. This ID is used to alert driver that the sequence is not "standard"!
4539} ATOM_XTMDS_INFO; 5552 UCHAR ucMasterAddress; // Address to control Master xTMDS Chip
4540 5553 UCHAR ucSlaveAddress; // Address to control Slave xTMDS Chip
4541typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS { 5554}ATOM_XTMDS_INFO;
4542 UCHAR ucEnable; /* ATOM_ENABLE=On or ATOM_DISABLE=Off */ 5555
4543 UCHAR ucDevice; /* ATOM_DEVICE_DFP1_INDEX.... */ 5556typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS
4544 UCHAR ucPadding[2]; 5557{
4545} DFP_DPMS_STATUS_CHANGE_PARAMETERS; 5558 UCHAR ucEnable; // ATOM_ENABLE=On or ATOM_DISABLE=Off
5559 UCHAR ucDevice; // ATOM_DEVICE_DFP1_INDEX....
5560 UCHAR ucPadding[2];
5561}DFP_DPMS_STATUS_CHANGE_PARAMETERS;
4546 5562
4547/****************************Legacy Power Play Table Definitions **********************/ 5563/****************************Legacy Power Play Table Definitions **********************/
4548 5564
4549/* Definitions for ulPowerPlayMiscInfo */ 5565//Definitions for ulPowerPlayMiscInfo
4550#define ATOM_PM_MISCINFO_SPLIT_CLOCK 0x00000000L 5566#define ATOM_PM_MISCINFO_SPLIT_CLOCK 0x00000000L
4551#define ATOM_PM_MISCINFO_USING_MCLK_SRC 0x00000001L 5567#define ATOM_PM_MISCINFO_USING_MCLK_SRC 0x00000001L
4552#define ATOM_PM_MISCINFO_USING_SCLK_SRC 0x00000002L 5568#define ATOM_PM_MISCINFO_USING_SCLK_SRC 0x00000002L
@@ -4558,8 +5574,8 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
4558 5574
4559#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN 0x00000020L 5575#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN 0x00000020L
4560#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN 0x00000040L 5576#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN 0x00000040L
4561#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L /* When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program */ 5577#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L //When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program
4562 5578
4563#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN 0x00000100L 5579#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN 0x00000100L
4564#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN 0x00000200L 5580#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN 0x00000200L
4565#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN 0x00000400L 5581#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN 0x00000400L
@@ -4569,22 +5585,22 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
4569#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE 0x00004000L 5585#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE 0x00004000L
4570 5586
4571#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE 0x00008000L 5587#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE 0x00008000L
4572#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L 5588#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L
4573#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE 0x00020000L 5589#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE 0x00020000L
4574#define ATOM_PM_MISCINFO_POWER_SAVING_MODE 0x00040000L 5590#define ATOM_PM_MISCINFO_POWER_SAVING_MODE 0x00040000L
4575#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE 0x00080000L 5591#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE 0x00080000L
4576 5592
4577#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L /* 0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved */ 5593#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L //0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved
4578#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20 5594#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20
4579 5595
4580#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE 0x00400000L 5596#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE 0x00400000L
4581#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2 0x00800000L 5597#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2 0x00800000L
4582#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4 0x01000000L 5598#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4 0x01000000L
4583#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L /* When set, Dynamic */ 5599#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L //When set, Dynamic
4584#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L /* When set, Dynamic */ 5600#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L //When set, Dynamic
4585#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L /* When set, This mode is for acceleated 3D mode */ 5601#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L //When set, This mode is for acceleated 3D mode
4586 5602
4587#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L /* 1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks) */ 5603#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L //1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks)
4588#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT 28 5604#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT 28
4589#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS 0x80000000L 5605#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS 0x80000000L
4590 5606
@@ -4594,55 +5610,59 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
4594#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO 0x00000008L 5610#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO 0x00000008L
4595#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE 0x00000010L 5611#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE 0x00000010L
4596#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN 0x00000020L 5612#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN 0x00000020L
4597#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L /* If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption. */ 5613#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L //If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption.
4598 /* If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback */ 5614 //If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback
4599#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC 0x00000080L 5615#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC 0x00000080L
4600#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN 0x00000100L 5616#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN 0x00000100L
4601#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L 5617#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L
4602 5618
4603/* ucTableFormatRevision=1 */ 5619//ucTableFormatRevision=1
4604/* ucTableContentRevision=1 */ 5620//ucTableContentRevision=1
4605typedef struct _ATOM_POWERMODE_INFO { 5621typedef struct _ATOM_POWERMODE_INFO
4606 ULONG ulMiscInfo; /* The power level should be arranged in ascending order */ 5622{
4607 ULONG ulReserved1; /* must set to 0 */ 5623 ULONG ulMiscInfo; //The power level should be arranged in ascending order
4608 ULONG ulReserved2; /* must set to 0 */ 5624 ULONG ulReserved1; // must set to 0
4609 USHORT usEngineClock; 5625 ULONG ulReserved2; // must set to 0
4610 USHORT usMemoryClock; 5626 USHORT usEngineClock;
4611 UCHAR ucVoltageDropIndex; /* index to GPIO table */ 5627 USHORT usMemoryClock;
4612 UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */ 5628 UCHAR ucVoltageDropIndex; // index to GPIO table
4613 UCHAR ucMinTemperature; 5629 UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
4614 UCHAR ucMaxTemperature; 5630 UCHAR ucMinTemperature;
4615 UCHAR ucNumPciELanes; /* number of PCIE lanes */ 5631 UCHAR ucMaxTemperature;
4616} ATOM_POWERMODE_INFO; 5632 UCHAR ucNumPciELanes; // number of PCIE lanes
4617 5633}ATOM_POWERMODE_INFO;
4618/* ucTableFormatRevision=2 */ 5634
4619/* ucTableContentRevision=1 */ 5635//ucTableFormatRevision=2
4620typedef struct _ATOM_POWERMODE_INFO_V2 { 5636//ucTableContentRevision=1
4621 ULONG ulMiscInfo; /* The power level should be arranged in ascending order */ 5637typedef struct _ATOM_POWERMODE_INFO_V2
4622 ULONG ulMiscInfo2; 5638{
4623 ULONG ulEngineClock; 5639 ULONG ulMiscInfo; //The power level should be arranged in ascending order
4624 ULONG ulMemoryClock; 5640 ULONG ulMiscInfo2;
4625 UCHAR ucVoltageDropIndex; /* index to GPIO table */ 5641 ULONG ulEngineClock;
4626 UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */ 5642 ULONG ulMemoryClock;
4627 UCHAR ucMinTemperature; 5643 UCHAR ucVoltageDropIndex; // index to GPIO table
4628 UCHAR ucMaxTemperature; 5644 UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
4629 UCHAR ucNumPciELanes; /* number of PCIE lanes */ 5645 UCHAR ucMinTemperature;
4630} ATOM_POWERMODE_INFO_V2; 5646 UCHAR ucMaxTemperature;
4631 5647 UCHAR ucNumPciELanes; // number of PCIE lanes
4632/* ucTableFormatRevision=2 */ 5648}ATOM_POWERMODE_INFO_V2;
4633/* ucTableContentRevision=2 */ 5649
4634typedef struct _ATOM_POWERMODE_INFO_V3 { 5650//ucTableFormatRevision=2
4635 ULONG ulMiscInfo; /* The power level should be arranged in ascending order */ 5651//ucTableContentRevision=2
4636 ULONG ulMiscInfo2; 5652typedef struct _ATOM_POWERMODE_INFO_V3
4637 ULONG ulEngineClock; 5653{
4638 ULONG ulMemoryClock; 5654 ULONG ulMiscInfo; //The power level should be arranged in ascending order
4639 UCHAR ucVoltageDropIndex; /* index to Core (VDDC) votage table */ 5655 ULONG ulMiscInfo2;
4640 UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */ 5656 ULONG ulEngineClock;
4641 UCHAR ucMinTemperature; 5657 ULONG ulMemoryClock;
4642 UCHAR ucMaxTemperature; 5658 UCHAR ucVoltageDropIndex; // index to Core (VDDC) votage table
4643 UCHAR ucNumPciELanes; /* number of PCIE lanes */ 5659 UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
4644 UCHAR ucVDDCI_VoltageDropIndex; /* index to VDDCI votage table */ 5660 UCHAR ucMinTemperature;
4645} ATOM_POWERMODE_INFO_V3; 5661 UCHAR ucMaxTemperature;
5662 UCHAR ucNumPciELanes; // number of PCIE lanes
5663 UCHAR ucVDDCI_VoltageDropIndex; // index to VDDCI votage table
5664}ATOM_POWERMODE_INFO_V3;
5665
4646 5666
4647#define ATOM_MAX_NUMBEROF_POWER_BLOCK 8 5667#define ATOM_MAX_NUMBEROF_POWER_BLOCK 8
4648 5668
@@ -4655,40 +5675,44 @@ typedef struct _ATOM_POWERMODE_INFO_V3 {
4655#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649 0x04 5675#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649 0x04
4656#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64 0x05 5676#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64 0x05
4657#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375 0x06 5677#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375 0x06
4658#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 /* Andigilog */ 5678#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 // Andigilog
4659 5679
4660typedef struct _ATOM_POWERPLAY_INFO { 5680
4661 ATOM_COMMON_TABLE_HEADER sHeader; 5681typedef struct _ATOM_POWERPLAY_INFO
4662 UCHAR ucOverdriveThermalController; 5682{
4663 UCHAR ucOverdriveI2cLine; 5683 ATOM_COMMON_TABLE_HEADER sHeader;
4664 UCHAR ucOverdriveIntBitmap; 5684 UCHAR ucOverdriveThermalController;
4665 UCHAR ucOverdriveControllerAddress; 5685 UCHAR ucOverdriveI2cLine;
4666 UCHAR ucSizeOfPowerModeEntry; 5686 UCHAR ucOverdriveIntBitmap;
4667 UCHAR ucNumOfPowerModeEntries; 5687 UCHAR ucOverdriveControllerAddress;
4668 ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; 5688 UCHAR ucSizeOfPowerModeEntry;
4669} ATOM_POWERPLAY_INFO; 5689 UCHAR ucNumOfPowerModeEntries;
4670 5690 ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
4671typedef struct _ATOM_POWERPLAY_INFO_V2 { 5691}ATOM_POWERPLAY_INFO;
4672 ATOM_COMMON_TABLE_HEADER sHeader; 5692
4673 UCHAR ucOverdriveThermalController; 5693typedef struct _ATOM_POWERPLAY_INFO_V2
4674 UCHAR ucOverdriveI2cLine; 5694{
4675 UCHAR ucOverdriveIntBitmap; 5695 ATOM_COMMON_TABLE_HEADER sHeader;
4676 UCHAR ucOverdriveControllerAddress; 5696 UCHAR ucOverdriveThermalController;
4677 UCHAR ucSizeOfPowerModeEntry; 5697 UCHAR ucOverdriveI2cLine;
4678 UCHAR ucNumOfPowerModeEntries; 5698 UCHAR ucOverdriveIntBitmap;
4679 ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; 5699 UCHAR ucOverdriveControllerAddress;
4680} ATOM_POWERPLAY_INFO_V2; 5700 UCHAR ucSizeOfPowerModeEntry;
4681 5701 UCHAR ucNumOfPowerModeEntries;
4682typedef struct _ATOM_POWERPLAY_INFO_V3 { 5702 ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
4683 ATOM_COMMON_TABLE_HEADER sHeader; 5703}ATOM_POWERPLAY_INFO_V2;
4684 UCHAR ucOverdriveThermalController; 5704
4685 UCHAR ucOverdriveI2cLine; 5705typedef struct _ATOM_POWERPLAY_INFO_V3
4686 UCHAR ucOverdriveIntBitmap; 5706{
4687 UCHAR ucOverdriveControllerAddress; 5707 ATOM_COMMON_TABLE_HEADER sHeader;
4688 UCHAR ucSizeOfPowerModeEntry; 5708 UCHAR ucOverdriveThermalController;
4689 UCHAR ucNumOfPowerModeEntries; 5709 UCHAR ucOverdriveI2cLine;
4690 ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; 5710 UCHAR ucOverdriveIntBitmap;
4691} ATOM_POWERPLAY_INFO_V3; 5711 UCHAR ucOverdriveControllerAddress;
5712 UCHAR ucSizeOfPowerModeEntry;
5713 UCHAR ucNumOfPowerModeEntries;
5714 ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
5715}ATOM_POWERPLAY_INFO_V3;
4692 5716
4693/* New PPlib */ 5717/* New PPlib */
4694/**************************************************************************/ 5718/**************************************************************************/
@@ -4873,40 +5897,42 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
4873 UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16} 5897 UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
4874 UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement. 5898 UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
4875 USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200). 5899 USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
4876 ULONG ulFlags; 5900 ULONG ulFlags;
4877} ATOM_PPLIB_RS780_CLOCK_INFO; 5901} ATOM_PPLIB_RS780_CLOCK_INFO;
4878 5902
4879#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0 5903#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
4880#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1 5904#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
4881#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2 5905#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
4882#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3 5906#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
4883 5907
4884#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is. 5908#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
4885#define ATOM_PPLIB_RS780_SPMCLK_LOW 1 5909#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
4886#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2 5910#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
4887 5911
4888#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0 5912#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
4889#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1 5913#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
4890#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2 5914#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
4891 5915
4892/**************************************************************************/ 5916/**************************************************************************/
4893 5917
4894/* Following definitions are for compatiblity issue in different SW components. */ 5918
5919// Following definitions are for compatiblity issue in different SW components.
4895#define ATOM_MASTER_DATA_TABLE_REVISION 0x01 5920#define ATOM_MASTER_DATA_TABLE_REVISION 0x01
4896#define Object_Info Object_Header 5921#define Object_Info Object_Header
4897#define AdjustARB_SEQ MC_InitParameter 5922#define AdjustARB_SEQ MC_InitParameter
4898#define VRAM_GPIO_DetectionInfo VoltageObjectInfo 5923#define VRAM_GPIO_DetectionInfo VoltageObjectInfo
4899#define ASIC_VDDCI_Info ASIC_ProfilingInfo 5924#define ASIC_VDDCI_Info ASIC_ProfilingInfo
4900#define ASIC_MVDDQ_Info MemoryTrainingInfo 5925#define ASIC_MVDDQ_Info MemoryTrainingInfo
4901#define SS_Info PPLL_SS_Info 5926#define SS_Info PPLL_SS_Info
4902#define ASIC_MVDDC_Info ASIC_InternalSS_Info 5927#define ASIC_MVDDC_Info ASIC_InternalSS_Info
4903#define DispDevicePriorityInfo SaveRestoreInfo 5928#define DispDevicePriorityInfo SaveRestoreInfo
4904#define DispOutInfo TV_VideoMode 5929#define DispOutInfo TV_VideoMode
4905 5930
5931
4906#define ATOM_ENCODER_OBJECT_TABLE ATOM_OBJECT_TABLE 5932#define ATOM_ENCODER_OBJECT_TABLE ATOM_OBJECT_TABLE
4907#define ATOM_CONNECTOR_OBJECT_TABLE ATOM_OBJECT_TABLE 5933#define ATOM_CONNECTOR_OBJECT_TABLE ATOM_OBJECT_TABLE
4908 5934
4909/* New device naming, remove them when both DAL/VBIOS is ready */ 5935//New device naming, remove them when both DAL/VBIOS is ready
4910#define DFP2I_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS 5936#define DFP2I_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS
4911#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS 5937#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS
4912 5938
@@ -4921,7 +5947,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
4921 5947
4922#define ATOM_DEVICE_DFP1I_INDEX ATOM_DEVICE_DFP1_INDEX 5948#define ATOM_DEVICE_DFP1I_INDEX ATOM_DEVICE_DFP1_INDEX
4923#define ATOM_DEVICE_DFP1X_INDEX ATOM_DEVICE_DFP2_INDEX 5949#define ATOM_DEVICE_DFP1X_INDEX ATOM_DEVICE_DFP2_INDEX
4924 5950
4925#define ATOM_DEVICE_DFP2I_INDEX 0x00000009 5951#define ATOM_DEVICE_DFP2I_INDEX 0x00000009
4926#define ATOM_DEVICE_DFP2I_SUPPORT (0x1L << ATOM_DEVICE_DFP2I_INDEX) 5952#define ATOM_DEVICE_DFP2I_SUPPORT (0x1L << ATOM_DEVICE_DFP2I_INDEX)
4927 5953
@@ -4939,7 +5965,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
4939 5965
4940#define ATOM_S3_DFP2I_ACTIVEb1 0x02 5966#define ATOM_S3_DFP2I_ACTIVEb1 0x02
4941 5967
4942#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE 5968#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE
4943#define ATOM_S3_DFP1X_ACTIVE ATOM_S3_DFP2_ACTIVE 5969#define ATOM_S3_DFP1X_ACTIVE ATOM_S3_DFP2_ACTIVE
4944 5970
4945#define ATOM_S3_DFP2I_ACTIVE 0x00000200L 5971#define ATOM_S3_DFP2I_ACTIVE 0x00000200L
@@ -4958,14 +5984,14 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
4958#define ATOM_S6_ACC_REQ_DFP2Ib3 0x02 5984#define ATOM_S6_ACC_REQ_DFP2Ib3 0x02
4959#define ATOM_S6_ACC_REQ_DFP2I 0x02000000L 5985#define ATOM_S6_ACC_REQ_DFP2I 0x02000000L
4960 5986
4961#define TMDS1XEncoderControl DVOEncoderControl 5987#define TMDS1XEncoderControl DVOEncoderControl
4962#define DFP1XOutputControl DVOOutputControl 5988#define DFP1XOutputControl DVOOutputControl
4963 5989
4964#define ExternalDFPOutputControl DFP1XOutputControl 5990#define ExternalDFPOutputControl DFP1XOutputControl
4965#define EnableExternalTMDS_Encoder TMDS1XEncoderControl 5991#define EnableExternalTMDS_Encoder TMDS1XEncoderControl
4966 5992
4967#define DFP1IOutputControl TMDSAOutputControl 5993#define DFP1IOutputControl TMDSAOutputControl
4968#define DFP2IOutputControl LVTMAOutputControl 5994#define DFP2IOutputControl LVTMAOutputControl
4969 5995
4970#define DAC1_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS 5996#define DAC1_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS
4971#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION 5997#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
@@ -4974,7 +6000,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
4974#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION 6000#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
4975 6001
4976#define ucDac1Standard ucDacStandard 6002#define ucDac1Standard ucDacStandard
4977#define ucDac2Standard ucDacStandard 6003#define ucDac2Standard ucDacStandard
4978 6004
4979#define TMDS1EncoderControl TMDSAEncoderControl 6005#define TMDS1EncoderControl TMDSAEncoderControl
4980#define TMDS2EncoderControl LVTMAEncoderControl 6006#define TMDS2EncoderControl LVTMAEncoderControl
@@ -4984,12 +6010,56 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
4984#define CRT1OutputControl DAC1OutputControl 6010#define CRT1OutputControl DAC1OutputControl
4985#define CRT2OutputControl DAC2OutputControl 6011#define CRT2OutputControl DAC2OutputControl
4986 6012
4987/* These two lines will be removed for sure in a few days, will follow up with Michael V. */ 6013//These two lines will be removed for sure in a few days, will follow up with Michael V.
4988#define EnableLVDS_SS EnableSpreadSpectrumOnPPLL 6014#define EnableLVDS_SS EnableSpreadSpectrumOnPPLL
4989#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL 6015#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL
6016
6017//#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
6018//#define ATOM_S2_LCD1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
6019//#define ATOM_S2_TV1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
6020//#define ATOM_S2_DFP1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
6021//#define ATOM_S2_CRT2_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
6022
6023#define ATOM_S6_ACC_REQ_TV2 0x00400000L
6024#define ATOM_DEVICE_TV2_INDEX 0x00000006
6025#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX)
6026#define ATOM_S0_TV2 0x00100000L
6027#define ATOM_S3_TV2_ACTIVE ATOM_S3_DFP6_ACTIVE
6028#define ATOM_S3_TV2_CRTC_ACTIVE ATOM_S3_DFP6_CRTC_ACTIVE
6029
6030//
6031#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
6032#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L
6033#define ATOM_S2_TV1_DPMS_STATE 0x00040000L
6034#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L
6035#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L
6036#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L
6037#define ATOM_S2_TV2_DPMS_STATE 0x00400000L
6038#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L
6039#define ATOM_S2_CV_DPMS_STATE 0x01000000L
6040#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L
6041#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L
6042#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L
6043
6044#define ATOM_S2_CRT1_DPMS_STATEb2 0x01
6045#define ATOM_S2_LCD1_DPMS_STATEb2 0x02
6046#define ATOM_S2_TV1_DPMS_STATEb2 0x04
6047#define ATOM_S2_DFP1_DPMS_STATEb2 0x08
6048#define ATOM_S2_CRT2_DPMS_STATEb2 0x10
6049#define ATOM_S2_LCD2_DPMS_STATEb2 0x20
6050#define ATOM_S2_TV2_DPMS_STATEb2 0x40
6051#define ATOM_S2_DFP2_DPMS_STATEb2 0x80
6052#define ATOM_S2_CV_DPMS_STATEb3 0x01
6053#define ATOM_S2_DFP3_DPMS_STATEb3 0x02
6054#define ATOM_S2_DFP4_DPMS_STATEb3 0x04
6055#define ATOM_S2_DFP5_DPMS_STATEb3 0x08
6056
6057#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20
6058#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40
6059#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80
4990 6060
4991/*********************************************************************************/ 6061/*********************************************************************************/
4992 6062
4993#pragma pack() /* BIOS data must use byte aligment */ 6063#pragma pack() // BIOS data must use byte aligment
4994 6064
4995#endif /* _ATOMBIOS_H */ 6065#endif /* _ATOMBIOS_H */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index af464e351fbd..dd9fdf560611 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -245,21 +245,25 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
245 245
246 switch (mode) { 246 switch (mode) {
247 case DRM_MODE_DPMS_ON: 247 case DRM_MODE_DPMS_ON:
248 atombios_enable_crtc(crtc, 1); 248 atombios_enable_crtc(crtc, ATOM_ENABLE);
249 if (ASIC_IS_DCE3(rdev)) 249 if (ASIC_IS_DCE3(rdev))
250 atombios_enable_crtc_memreq(crtc, 1); 250 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
251 atombios_blank_crtc(crtc, 0); 251 atombios_blank_crtc(crtc, ATOM_DISABLE);
252 drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); 252 /* XXX re-enable when interrupt support is added */
253 if (!ASIC_IS_DCE4(rdev))
254 drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
253 radeon_crtc_load_lut(crtc); 255 radeon_crtc_load_lut(crtc);
254 break; 256 break;
255 case DRM_MODE_DPMS_STANDBY: 257 case DRM_MODE_DPMS_STANDBY:
256 case DRM_MODE_DPMS_SUSPEND: 258 case DRM_MODE_DPMS_SUSPEND:
257 case DRM_MODE_DPMS_OFF: 259 case DRM_MODE_DPMS_OFF:
258 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); 260 /* XXX re-enable when interrupt support is added */
259 atombios_blank_crtc(crtc, 1); 261 if (!ASIC_IS_DCE4(rdev))
262 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
263 atombios_blank_crtc(crtc, ATOM_ENABLE);
260 if (ASIC_IS_DCE3(rdev)) 264 if (ASIC_IS_DCE3(rdev))
261 atombios_enable_crtc_memreq(crtc, 0); 265 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
262 atombios_enable_crtc(crtc, 0); 266 atombios_enable_crtc(crtc, ATOM_DISABLE);
263 break; 267 break;
264 } 268 }
265} 269}
@@ -349,6 +353,11 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
349 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 353 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
350} 354}
351 355
356union atom_enable_ss {
357 ENABLE_LVDS_SS_PARAMETERS legacy;
358 ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
359};
360
352static void atombios_set_ss(struct drm_crtc *crtc, int enable) 361static void atombios_set_ss(struct drm_crtc *crtc, int enable)
353{ 362{
354 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 363 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -358,11 +367,14 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
358 struct radeon_encoder *radeon_encoder = NULL; 367 struct radeon_encoder *radeon_encoder = NULL;
359 struct radeon_encoder_atom_dig *dig = NULL; 368 struct radeon_encoder_atom_dig *dig = NULL;
360 int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); 369 int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
361 ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION args; 370 union atom_enable_ss args;
362 ENABLE_LVDS_SS_PARAMETERS legacy_args;
363 uint16_t percentage = 0; 371 uint16_t percentage = 0;
364 uint8_t type = 0, step = 0, delay = 0, range = 0; 372 uint8_t type = 0, step = 0, delay = 0, range = 0;
365 373
374 /* XXX add ss support for DCE4 */
375 if (ASIC_IS_DCE4(rdev))
376 return;
377
366 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 378 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
367 if (encoder->crtc == crtc) { 379 if (encoder->crtc == crtc) {
368 radeon_encoder = to_radeon_encoder(encoder); 380 radeon_encoder = to_radeon_encoder(encoder);
@@ -386,29 +398,28 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
386 if (!radeon_encoder) 398 if (!radeon_encoder)
387 return; 399 return;
388 400
401 memset(&args, 0, sizeof(args));
389 if (ASIC_IS_AVIVO(rdev)) { 402 if (ASIC_IS_AVIVO(rdev)) {
390 memset(&args, 0, sizeof(args)); 403 args.v1.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
391 args.usSpreadSpectrumPercentage = cpu_to_le16(percentage); 404 args.v1.ucSpreadSpectrumType = type;
392 args.ucSpreadSpectrumType = type; 405 args.v1.ucSpreadSpectrumStep = step;
393 args.ucSpreadSpectrumStep = step; 406 args.v1.ucSpreadSpectrumDelay = delay;
394 args.ucSpreadSpectrumDelay = delay; 407 args.v1.ucSpreadSpectrumRange = range;
395 args.ucSpreadSpectrumRange = range; 408 args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
396 args.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 409 args.v1.ucEnable = enable;
397 args.ucEnable = enable;
398 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
399 } else { 410 } else {
400 memset(&legacy_args, 0, sizeof(legacy_args)); 411 args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
401 legacy_args.usSpreadSpectrumPercentage = cpu_to_le16(percentage); 412 args.legacy.ucSpreadSpectrumType = type;
402 legacy_args.ucSpreadSpectrumType = type; 413 args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
403 legacy_args.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2; 414 args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
404 legacy_args.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4; 415 args.legacy.ucEnable = enable;
405 legacy_args.ucEnable = enable;
406 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&legacy_args);
407 } 416 }
417 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
408} 418}
409 419
410union adjust_pixel_clock { 420union adjust_pixel_clock {
411 ADJUST_DISPLAY_PLL_PS_ALLOCATION v1; 421 ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
422 ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3;
412}; 423};
413 424
414static u32 atombios_adjust_pll(struct drm_crtc *crtc, 425static u32 atombios_adjust_pll(struct drm_crtc *crtc,
@@ -420,10 +431,24 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
420 struct drm_encoder *encoder = NULL; 431 struct drm_encoder *encoder = NULL;
421 struct radeon_encoder *radeon_encoder = NULL; 432 struct radeon_encoder *radeon_encoder = NULL;
422 u32 adjusted_clock = mode->clock; 433 u32 adjusted_clock = mode->clock;
434 int encoder_mode = 0;
423 435
424 /* reset the pll flags */ 436 /* reset the pll flags */
425 pll->flags = 0; 437 pll->flags = 0;
426 438
439 /* select the PLL algo */
440 if (ASIC_IS_AVIVO(rdev)) {
441 if (radeon_new_pll == 0)
442 pll->algo = PLL_ALGO_LEGACY;
443 else
444 pll->algo = PLL_ALGO_NEW;
445 } else {
446 if (radeon_new_pll == 1)
447 pll->algo = PLL_ALGO_NEW;
448 else
449 pll->algo = PLL_ALGO_LEGACY;
450 }
451
427 if (ASIC_IS_AVIVO(rdev)) { 452 if (ASIC_IS_AVIVO(rdev)) {
428 if ((rdev->family == CHIP_RS600) || 453 if ((rdev->family == CHIP_RS600) ||
429 (rdev->family == CHIP_RS690) || 454 (rdev->family == CHIP_RS690) ||
@@ -448,10 +473,16 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
448 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 473 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
449 if (encoder->crtc == crtc) { 474 if (encoder->crtc == crtc) {
450 radeon_encoder = to_radeon_encoder(encoder); 475 radeon_encoder = to_radeon_encoder(encoder);
476 encoder_mode = atombios_get_encoder_mode(encoder);
451 if (ASIC_IS_AVIVO(rdev)) { 477 if (ASIC_IS_AVIVO(rdev)) {
452 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 478 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
453 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) 479 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
454 adjusted_clock = mode->clock * 2; 480 adjusted_clock = mode->clock * 2;
481 /* LVDS PLL quirks */
482 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
483 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
484 pll->algo = dig->pll_algo;
485 }
455 } else { 486 } else {
456 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 487 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
457 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; 488 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -468,14 +499,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
468 */ 499 */
469 if (ASIC_IS_DCE3(rdev)) { 500 if (ASIC_IS_DCE3(rdev)) {
470 union adjust_pixel_clock args; 501 union adjust_pixel_clock args;
471 struct radeon_encoder_atom_dig *dig;
472 u8 frev, crev; 502 u8 frev, crev;
473 int index; 503 int index;
474 504
475 if (!radeon_encoder->enc_priv)
476 return adjusted_clock;
477 dig = radeon_encoder->enc_priv;
478
479 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); 505 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
480 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 506 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
481 &crev); 507 &crev);
@@ -489,12 +515,51 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
489 case 2: 515 case 2:
490 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); 516 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
491 args.v1.ucTransmitterID = radeon_encoder->encoder_id; 517 args.v1.ucTransmitterID = radeon_encoder->encoder_id;
492 args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder); 518 args.v1.ucEncodeMode = encoder_mode;
493 519
494 atom_execute_table(rdev->mode_info.atom_context, 520 atom_execute_table(rdev->mode_info.atom_context,
495 index, (uint32_t *)&args); 521 index, (uint32_t *)&args);
496 adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10; 522 adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
497 break; 523 break;
524 case 3:
525 args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10);
526 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
527 args.v3.sInput.ucEncodeMode = encoder_mode;
528 args.v3.sInput.ucDispPllConfig = 0;
529 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
530 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
531
532 if (encoder_mode == ATOM_ENCODER_MODE_DP)
533 args.v3.sInput.ucDispPllConfig |=
534 DISPPLL_CONFIG_COHERENT_MODE;
535 else {
536 if (dig->coherent_mode)
537 args.v3.sInput.ucDispPllConfig |=
538 DISPPLL_CONFIG_COHERENT_MODE;
539 if (mode->clock > 165000)
540 args.v3.sInput.ucDispPllConfig |=
541 DISPPLL_CONFIG_DUAL_LINK;
542 }
543 } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
544 /* may want to enable SS on DP/eDP eventually */
545 args.v3.sInput.ucDispPllConfig |=
546 DISPPLL_CONFIG_SS_ENABLE;
547 if (mode->clock > 165000)
548 args.v3.sInput.ucDispPllConfig |=
549 DISPPLL_CONFIG_DUAL_LINK;
550 }
551 atom_execute_table(rdev->mode_info.atom_context,
552 index, (uint32_t *)&args);
553 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
554 if (args.v3.sOutput.ucRefDiv) {
555 pll->flags |= RADEON_PLL_USE_REF_DIV;
556 pll->reference_div = args.v3.sOutput.ucRefDiv;
557 }
558 if (args.v3.sOutput.ucPostDiv) {
559 pll->flags |= RADEON_PLL_USE_POST_DIV;
560 pll->post_div = args.v3.sOutput.ucPostDiv;
561 }
562 break;
498 default: 563 default:
499 DRM_ERROR("Unknown table version %d %d\n", frev, crev); 564 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
500 return adjusted_clock; 565 return adjusted_clock;
@@ -513,9 +578,47 @@ union set_pixel_clock {
513 PIXEL_CLOCK_PARAMETERS v1; 578 PIXEL_CLOCK_PARAMETERS v1;
514 PIXEL_CLOCK_PARAMETERS_V2 v2; 579 PIXEL_CLOCK_PARAMETERS_V2 v2;
515 PIXEL_CLOCK_PARAMETERS_V3 v3; 580 PIXEL_CLOCK_PARAMETERS_V3 v3;
581 PIXEL_CLOCK_PARAMETERS_V5 v5;
516}; 582};
517 583
518void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) 584static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
585{
586 struct drm_device *dev = crtc->dev;
587 struct radeon_device *rdev = dev->dev_private;
588 u8 frev, crev;
589 int index;
590 union set_pixel_clock args;
591
592 memset(&args, 0, sizeof(args));
593
594 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
595 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
596 &crev);
597
598 switch (frev) {
599 case 1:
600 switch (crev) {
601 case 5:
602 /* if the default dcpll clock is specified,
603 * SetPixelClock provides the dividers
604 */
605 args.v5.ucCRTC = ATOM_CRTC_INVALID;
606 args.v5.usPixelClock = rdev->clock.default_dispclk;
607 args.v5.ucPpll = ATOM_DCPLL;
608 break;
609 default:
610 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
611 return;
612 }
613 break;
614 default:
615 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
616 return;
617 }
618 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
619}
620
621static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
519{ 622{
520 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 623 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
521 struct drm_device *dev = crtc->dev; 624 struct drm_device *dev = crtc->dev;
@@ -529,12 +632,14 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
529 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; 632 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
530 struct radeon_pll *pll; 633 struct radeon_pll *pll;
531 u32 adjusted_clock; 634 u32 adjusted_clock;
635 int encoder_mode = 0;
532 636
533 memset(&args, 0, sizeof(args)); 637 memset(&args, 0, sizeof(args));
534 638
535 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 639 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
536 if (encoder->crtc == crtc) { 640 if (encoder->crtc == crtc) {
537 radeon_encoder = to_radeon_encoder(encoder); 641 radeon_encoder = to_radeon_encoder(encoder);
642 encoder_mode = atombios_get_encoder_mode(encoder);
538 break; 643 break;
539 } 644 }
540 } 645 }
@@ -542,26 +647,24 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
542 if (!radeon_encoder) 647 if (!radeon_encoder)
543 return; 648 return;
544 649
545 if (radeon_crtc->crtc_id == 0) 650 switch (radeon_crtc->pll_id) {
651 case ATOM_PPLL1:
546 pll = &rdev->clock.p1pll; 652 pll = &rdev->clock.p1pll;
547 else 653 break;
654 case ATOM_PPLL2:
548 pll = &rdev->clock.p2pll; 655 pll = &rdev->clock.p2pll;
656 break;
657 case ATOM_DCPLL:
658 case ATOM_PPLL_INVALID:
659 pll = &rdev->clock.dcpll;
660 break;
661 }
549 662
550 /* adjust pixel clock as needed */ 663 /* adjust pixel clock as needed */
551 adjusted_clock = atombios_adjust_pll(crtc, mode, pll); 664 adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
552 665
553 if (ASIC_IS_AVIVO(rdev)) { 666 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
554 if (radeon_new_pll) 667 &ref_div, &post_div);
555 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
556 &fb_div, &frac_fb_div,
557 &ref_div, &post_div);
558 else
559 radeon_compute_pll(pll, adjusted_clock, &pll_clock,
560 &fb_div, &frac_fb_div,
561 &ref_div, &post_div);
562 } else
563 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
564 &ref_div, &post_div);
565 668
566 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); 669 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
567 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 670 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -576,8 +679,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
576 args.v1.usFbDiv = cpu_to_le16(fb_div); 679 args.v1.usFbDiv = cpu_to_le16(fb_div);
577 args.v1.ucFracFbDiv = frac_fb_div; 680 args.v1.ucFracFbDiv = frac_fb_div;
578 args.v1.ucPostDiv = post_div; 681 args.v1.ucPostDiv = post_div;
579 args.v1.ucPpll = 682 args.v1.ucPpll = radeon_crtc->pll_id;
580 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
581 args.v1.ucCRTC = radeon_crtc->crtc_id; 683 args.v1.ucCRTC = radeon_crtc->crtc_id;
582 args.v1.ucRefDivSrc = 1; 684 args.v1.ucRefDivSrc = 1;
583 break; 685 break;
@@ -587,8 +689,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
587 args.v2.usFbDiv = cpu_to_le16(fb_div); 689 args.v2.usFbDiv = cpu_to_le16(fb_div);
588 args.v2.ucFracFbDiv = frac_fb_div; 690 args.v2.ucFracFbDiv = frac_fb_div;
589 args.v2.ucPostDiv = post_div; 691 args.v2.ucPostDiv = post_div;
590 args.v2.ucPpll = 692 args.v2.ucPpll = radeon_crtc->pll_id;
591 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
592 args.v2.ucCRTC = radeon_crtc->crtc_id; 693 args.v2.ucCRTC = radeon_crtc->crtc_id;
593 args.v2.ucRefDivSrc = 1; 694 args.v2.ucRefDivSrc = 1;
594 break; 695 break;
@@ -598,12 +699,22 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
598 args.v3.usFbDiv = cpu_to_le16(fb_div); 699 args.v3.usFbDiv = cpu_to_le16(fb_div);
599 args.v3.ucFracFbDiv = frac_fb_div; 700 args.v3.ucFracFbDiv = frac_fb_div;
600 args.v3.ucPostDiv = post_div; 701 args.v3.ucPostDiv = post_div;
601 args.v3.ucPpll = 702 args.v3.ucPpll = radeon_crtc->pll_id;
602 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 703 args.v3.ucMiscInfo = (radeon_crtc->pll_id << 2);
603 args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2);
604 args.v3.ucTransmitterId = radeon_encoder->encoder_id; 704 args.v3.ucTransmitterId = radeon_encoder->encoder_id;
605 args.v3.ucEncoderMode = 705 args.v3.ucEncoderMode = encoder_mode;
606 atombios_get_encoder_mode(encoder); 706 break;
707 case 5:
708 args.v5.ucCRTC = radeon_crtc->crtc_id;
709 args.v5.usPixelClock = cpu_to_le16(mode->clock / 10);
710 args.v5.ucRefDiv = ref_div;
711 args.v5.usFbDiv = cpu_to_le16(fb_div);
712 args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
713 args.v5.ucPostDiv = post_div;
714 args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
715 args.v5.ucTransmitterID = radeon_encoder->encoder_id;
716 args.v5.ucEncoderMode = encoder_mode;
717 args.v5.ucPpll = radeon_crtc->pll_id;
607 break; 718 break;
608 default: 719 default:
609 DRM_ERROR("Unknown table version %d %d\n", frev, crev); 720 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
@@ -618,6 +729,140 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
618 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 729 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
619} 730}
620 731
732static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
733 struct drm_framebuffer *old_fb)
734{
735 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
736 struct drm_device *dev = crtc->dev;
737 struct radeon_device *rdev = dev->dev_private;
738 struct radeon_framebuffer *radeon_fb;
739 struct drm_gem_object *obj;
740 struct radeon_bo *rbo;
741 uint64_t fb_location;
742 uint32_t fb_format, fb_pitch_pixels, tiling_flags;
743 int r;
744
745 /* no fb bound */
746 if (!crtc->fb) {
747 DRM_DEBUG("No FB bound\n");
748 return 0;
749 }
750
751 radeon_fb = to_radeon_framebuffer(crtc->fb);
752
753 /* Pin framebuffer & get tilling informations */
754 obj = radeon_fb->obj;
755 rbo = obj->driver_private;
756 r = radeon_bo_reserve(rbo, false);
757 if (unlikely(r != 0))
758 return r;
759 r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
760 if (unlikely(r != 0)) {
761 radeon_bo_unreserve(rbo);
762 return -EINVAL;
763 }
764 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
765 radeon_bo_unreserve(rbo);
766
767 switch (crtc->fb->bits_per_pixel) {
768 case 8:
769 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
770 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
771 break;
772 case 15:
773 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
774 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
775 break;
776 case 16:
777 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
778 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
779 break;
780 case 24:
781 case 32:
782 fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
783 EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
784 break;
785 default:
786 DRM_ERROR("Unsupported screen depth %d\n",
787 crtc->fb->bits_per_pixel);
788 return -EINVAL;
789 }
790
791 switch (radeon_crtc->crtc_id) {
792 case 0:
793 WREG32(AVIVO_D1VGA_CONTROL, 0);
794 break;
795 case 1:
796 WREG32(AVIVO_D2VGA_CONTROL, 0);
797 break;
798 case 2:
799 WREG32(EVERGREEN_D3VGA_CONTROL, 0);
800 break;
801 case 3:
802 WREG32(EVERGREEN_D4VGA_CONTROL, 0);
803 break;
804 case 4:
805 WREG32(EVERGREEN_D5VGA_CONTROL, 0);
806 break;
807 case 5:
808 WREG32(EVERGREEN_D6VGA_CONTROL, 0);
809 break;
810 default:
811 break;
812 }
813
814 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
815 upper_32_bits(fb_location));
816 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
817 upper_32_bits(fb_location));
818 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
819 (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
820 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
821 (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
822 WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
823
824 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
825 WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
826 WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
827 WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0);
828 WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
829 WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
830
831 fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
832 WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
833 WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
834
835 WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
836 crtc->mode.vdisplay);
837 x &= ~3;
838 y &= ~1;
839 WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
840 (x << 16) | y);
841 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
842 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
843
844 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
845 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
846 EVERGREEN_INTERLEAVE_EN);
847 else
848 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
849
850 if (old_fb && old_fb != crtc->fb) {
851 radeon_fb = to_radeon_framebuffer(old_fb);
852 rbo = radeon_fb->obj->driver_private;
853 r = radeon_bo_reserve(rbo, false);
854 if (unlikely(r != 0))
855 return r;
856 radeon_bo_unpin(rbo);
857 radeon_bo_unreserve(rbo);
858 }
859
860 /* Bytes per pixel may have changed */
861 radeon_bandwidth_update(rdev);
862
863 return 0;
864}
865
621static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, 866static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
622 struct drm_framebuffer *old_fb) 867 struct drm_framebuffer *old_fb)
623{ 868{
@@ -755,7 +1000,9 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
755 struct drm_device *dev = crtc->dev; 1000 struct drm_device *dev = crtc->dev;
756 struct radeon_device *rdev = dev->dev_private; 1001 struct radeon_device *rdev = dev->dev_private;
757 1002
758 if (ASIC_IS_AVIVO(rdev)) 1003 if (ASIC_IS_DCE4(rdev))
1004 return evergreen_crtc_set_base(crtc, x, y, old_fb);
1005 else if (ASIC_IS_AVIVO(rdev))
759 return avivo_crtc_set_base(crtc, x, y, old_fb); 1006 return avivo_crtc_set_base(crtc, x, y, old_fb);
760 else 1007 else
761 return radeon_crtc_set_base(crtc, x, y, old_fb); 1008 return radeon_crtc_set_base(crtc, x, y, old_fb);
@@ -785,6 +1032,46 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
785 } 1032 }
786} 1033}
787 1034
1035static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1036{
1037 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1038 struct drm_device *dev = crtc->dev;
1039 struct radeon_device *rdev = dev->dev_private;
1040 struct drm_encoder *test_encoder;
1041 struct drm_crtc *test_crtc;
1042 uint32_t pll_in_use = 0;
1043
1044 if (ASIC_IS_DCE4(rdev)) {
1045 /* if crtc is driving DP and we have an ext clock, use that */
1046 list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
1047 if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
1048 if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) {
1049 if (rdev->clock.dp_extclk)
1050 return ATOM_PPLL_INVALID;
1051 }
1052 }
1053 }
1054
1055 /* otherwise, pick one of the plls */
1056 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
1057 struct radeon_crtc *radeon_test_crtc;
1058
1059 if (crtc == test_crtc)
1060 continue;
1061
1062 radeon_test_crtc = to_radeon_crtc(test_crtc);
1063 if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
1064 (radeon_test_crtc->pll_id <= ATOM_PPLL2))
1065 pll_in_use |= (1 << radeon_test_crtc->pll_id);
1066 }
1067 if (!(pll_in_use & 1))
1068 return ATOM_PPLL1;
1069 return ATOM_PPLL2;
1070 } else
1071 return radeon_crtc->crtc_id;
1072
1073}
1074
788int atombios_crtc_mode_set(struct drm_crtc *crtc, 1075int atombios_crtc_mode_set(struct drm_crtc *crtc,
789 struct drm_display_mode *mode, 1076 struct drm_display_mode *mode,
790 struct drm_display_mode *adjusted_mode, 1077 struct drm_display_mode *adjusted_mode,
@@ -796,19 +1083,27 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
796 1083
797 /* TODO color tiling */ 1084 /* TODO color tiling */
798 1085
1086 /* pick pll */
1087 radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
1088
799 atombios_set_ss(crtc, 0); 1089 atombios_set_ss(crtc, 0);
1090 /* always set DCPLL */
1091 if (ASIC_IS_DCE4(rdev))
1092 atombios_crtc_set_dcpll(crtc);
800 atombios_crtc_set_pll(crtc, adjusted_mode); 1093 atombios_crtc_set_pll(crtc, adjusted_mode);
801 atombios_set_ss(crtc, 1); 1094 atombios_set_ss(crtc, 1);
802 atombios_crtc_set_timing(crtc, adjusted_mode);
803 1095
804 if (ASIC_IS_AVIVO(rdev)) 1096 if (ASIC_IS_DCE4(rdev))
805 atombios_crtc_set_base(crtc, x, y, old_fb); 1097 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
1098 else if (ASIC_IS_AVIVO(rdev))
1099 atombios_crtc_set_timing(crtc, adjusted_mode);
806 else { 1100 else {
1101 atombios_crtc_set_timing(crtc, adjusted_mode);
807 if (radeon_crtc->crtc_id == 0) 1102 if (radeon_crtc->crtc_id == 0)
808 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 1103 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
809 atombios_crtc_set_base(crtc, x, y, old_fb);
810 radeon_legacy_atom_fixup(crtc); 1104 radeon_legacy_atom_fixup(crtc);
811 } 1105 }
1106 atombios_crtc_set_base(crtc, x, y, old_fb);
812 atombios_overscan_setup(crtc, mode, adjusted_mode); 1107 atombios_overscan_setup(crtc, mode, adjusted_mode);
813 atombios_scaler_setup(crtc); 1108 atombios_scaler_setup(crtc);
814 return 0; 1109 return 0;
@@ -825,14 +1120,14 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
825 1120
826static void atombios_crtc_prepare(struct drm_crtc *crtc) 1121static void atombios_crtc_prepare(struct drm_crtc *crtc)
827{ 1122{
828 atombios_lock_crtc(crtc, 1); 1123 atombios_lock_crtc(crtc, ATOM_ENABLE);
829 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1124 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
830} 1125}
831 1126
832static void atombios_crtc_commit(struct drm_crtc *crtc) 1127static void atombios_crtc_commit(struct drm_crtc *crtc)
833{ 1128{
834 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 1129 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
835 atombios_lock_crtc(crtc, 0); 1130 atombios_lock_crtc(crtc, ATOM_DISABLE);
836} 1131}
837 1132
838static const struct drm_crtc_helper_funcs atombios_helper_funcs = { 1133static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
@@ -848,8 +1143,37 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
848void radeon_atombios_init_crtc(struct drm_device *dev, 1143void radeon_atombios_init_crtc(struct drm_device *dev,
849 struct radeon_crtc *radeon_crtc) 1144 struct radeon_crtc *radeon_crtc)
850{ 1145{
851 if (radeon_crtc->crtc_id == 1) 1146 struct radeon_device *rdev = dev->dev_private;
852 radeon_crtc->crtc_offset = 1147
853 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; 1148 if (ASIC_IS_DCE4(rdev)) {
1149 switch (radeon_crtc->crtc_id) {
1150 case 0:
1151 default:
1152 radeon_crtc->crtc_offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
1153 break;
1154 case 1:
1155 radeon_crtc->crtc_offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
1156 break;
1157 case 2:
1158 radeon_crtc->crtc_offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
1159 break;
1160 case 3:
1161 radeon_crtc->crtc_offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
1162 break;
1163 case 4:
1164 radeon_crtc->crtc_offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
1165 break;
1166 case 5:
1167 radeon_crtc->crtc_offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
1168 break;
1169 }
1170 } else {
1171 if (radeon_crtc->crtc_id == 1)
1172 radeon_crtc->crtc_offset =
1173 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
1174 else
1175 radeon_crtc->crtc_offset = 0;
1176 }
1177 radeon_crtc->pll_id = -1;
854 drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); 1178 drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
855} 1179}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 99915a682d59..8a133bda00a2 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -321,6 +321,10 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
321 train_set[lane] = v | p; 321 train_set[lane] = v | p;
322} 322}
323 323
324union aux_channel_transaction {
325 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
326 PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
327};
324 328
325/* radeon aux chan functions */ 329/* radeon aux chan functions */
326bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, 330bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
@@ -329,7 +333,7 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
329{ 333{
330 struct drm_device *dev = chan->dev; 334 struct drm_device *dev = chan->dev;
331 struct radeon_device *rdev = dev->dev_private; 335 struct radeon_device *rdev = dev->dev_private;
332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; 336 union aux_channel_transaction args;
333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); 337 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
334 unsigned char *base; 338 unsigned char *base;
335 int retry_count = 0; 339 int retry_count = 0;
@@ -341,31 +345,33 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
341retry: 345retry:
342 memcpy(base, req_bytes, num_bytes); 346 memcpy(base, req_bytes, num_bytes);
343 347
344 args.lpAuxRequest = 0; 348 args.v1.lpAuxRequest = 0;
345 args.lpDataOut = 16; 349 args.v1.lpDataOut = 16;
346 args.ucDataOutLen = 0; 350 args.v1.ucDataOutLen = 0;
347 args.ucChannelID = chan->rec.i2c_id; 351 args.v1.ucChannelID = chan->rec.i2c_id;
348 args.ucDelay = delay / 10; 352 args.v1.ucDelay = delay / 10;
353 if (ASIC_IS_DCE4(rdev))
354 args.v2.ucHPD_ID = chan->rec.hpd_id;
349 355
350 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 356 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
351 357
352 if (args.ucReplyStatus && !args.ucDataOutLen) { 358 if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) {
353 if (args.ucReplyStatus == 0x20 && retry_count++ < 10) 359 if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10)
354 goto retry; 360 goto retry;
355 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", 361 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
356 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], 362 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
357 chan->rec.i2c_id, args.ucReplyStatus, retry_count); 363 chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count);
358 return false; 364 return false;
359 } 365 }
360 366
361 if (args.ucDataOutLen && read_byte && read_buf_len) { 367 if (args.v1.ucDataOutLen && read_byte && read_buf_len) {
362 if (read_buf_len < args.ucDataOutLen) { 368 if (read_buf_len < args.v1.ucDataOutLen) {
363 DRM_ERROR("Buffer to small for return answer %d %d\n", 369 DRM_ERROR("Buffer to small for return answer %d %d\n",
364 read_buf_len, args.ucDataOutLen); 370 read_buf_len, args.v1.ucDataOutLen);
365 return false; 371 return false;
366 } 372 }
367 { 373 {
368 int len = min(read_buf_len, args.ucDataOutLen); 374 int len = min(read_buf_len, args.v1.ucDataOutLen);
369 memcpy(read_byte, base + 16, len); 375 memcpy(read_byte, base + 16, len);
370 } 376 }
371 } 377 }
@@ -626,12 +632,19 @@ void dp_link_train(struct drm_encoder *encoder,
626 dp_set_link_bw_lanes(radeon_connector, link_configuration); 632 dp_set_link_bw_lanes(radeon_connector, link_configuration);
627 /* disable downspread on the sink */ 633 /* disable downspread on the sink */
628 dp_set_downspread(radeon_connector, 0); 634 dp_set_downspread(radeon_connector, 0);
629 /* start training on the source */ 635 if (ASIC_IS_DCE4(rdev)) {
630 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START, 636 /* start training on the source */
631 dig_connector->dp_clock, enc_id, 0); 637 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START);
632 /* set training pattern 1 on the source */ 638 /* set training pattern 1 on the source */
633 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, 639 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1);
634 dig_connector->dp_clock, enc_id, 0); 640 } else {
641 /* start training on the source */
642 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
643 dig_connector->dp_clock, enc_id, 0);
644 /* set training pattern 1 on the source */
645 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
646 dig_connector->dp_clock, enc_id, 0);
647 }
635 648
636 /* set initial vs/emph */ 649 /* set initial vs/emph */
637 memset(train_set, 0, 4); 650 memset(train_set, 0, 4);
@@ -691,8 +704,11 @@ void dp_link_train(struct drm_encoder *encoder,
691 /* set training pattern 2 on the sink */ 704 /* set training pattern 2 on the sink */
692 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2); 705 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
693 /* set training pattern 2 on the source */ 706 /* set training pattern 2 on the source */
694 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, 707 if (ASIC_IS_DCE4(rdev))
695 dig_connector->dp_clock, enc_id, 1); 708 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2);
709 else
710 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
711 dig_connector->dp_clock, enc_id, 1);
696 712
697 /* channel equalization loop */ 713 /* channel equalization loop */
698 tries = 0; 714 tries = 0;
@@ -729,7 +745,11 @@ void dp_link_train(struct drm_encoder *encoder,
729 >> DP_TRAIN_PRE_EMPHASIS_SHIFT); 745 >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
730 746
731 /* disable the training pattern on the sink */ 747 /* disable the training pattern on the sink */
732 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE); 748 if (ASIC_IS_DCE4(rdev))
749 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE);
750 else
751 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
752 dig_connector->dp_clock, enc_id, 0);
733 753
734 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, 754 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
735 dig_connector->dp_clock, enc_id, 0); 755 dig_connector->dp_clock, enc_id, 0);
diff --git a/drivers/gpu/drm/radeon/avivod.h b/drivers/gpu/drm/radeon/avivod.h
index d4e6e6e4a938..3c391e7e9fd4 100644
--- a/drivers/gpu/drm/radeon/avivod.h
+++ b/drivers/gpu/drm/radeon/avivod.h
@@ -30,11 +30,13 @@
30 30
31#define D1CRTC_CONTROL 0x6080 31#define D1CRTC_CONTROL 0x6080
32#define CRTC_EN (1 << 0) 32#define CRTC_EN (1 << 0)
33#define D1CRTC_STATUS 0x609c
33#define D1CRTC_UPDATE_LOCK 0x60E8 34#define D1CRTC_UPDATE_LOCK 0x60E8
34#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 35#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
35#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118 36#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
36 37
37#define D2CRTC_CONTROL 0x6880 38#define D2CRTC_CONTROL 0x6880
39#define D2CRTC_STATUS 0x689c
38#define D2CRTC_UPDATE_LOCK 0x68E8 40#define D2CRTC_UPDATE_LOCK 0x68E8
39#define D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910 41#define D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910
40#define D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918 42#define D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
new file mode 100644
index 000000000000..bd2e7aa85c1d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -0,0 +1,767 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <linux/platform_device.h>
26#include "drmP.h"
27#include "radeon.h"
28#include "radeon_drm.h"
29#include "rv770d.h"
30#include "atom.h"
31#include "avivod.h"
32#include "evergreen_reg.h"
33
34static void evergreen_gpu_init(struct radeon_device *rdev);
35void evergreen_fini(struct radeon_device *rdev);
36
37bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
38{
39 bool connected = false;
40 /* XXX */
41 return connected;
42}
43
44void evergreen_hpd_set_polarity(struct radeon_device *rdev,
45 enum radeon_hpd_id hpd)
46{
47 /* XXX */
48}
49
50void evergreen_hpd_init(struct radeon_device *rdev)
51{
52 /* XXX */
53}
54
55
56void evergreen_bandwidth_update(struct radeon_device *rdev)
57{
58 /* XXX */
59}
60
61void evergreen_hpd_fini(struct radeon_device *rdev)
62{
63 /* XXX */
64}
65
66static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
67{
68 unsigned i;
69 u32 tmp;
70
71 for (i = 0; i < rdev->usec_timeout; i++) {
72 /* read MC_STATUS */
73 tmp = RREG32(SRBM_STATUS) & 0x1F00;
74 if (!tmp)
75 return 0;
76 udelay(1);
77 }
78 return -1;
79}
80
81/*
82 * GART
83 */
84int evergreen_pcie_gart_enable(struct radeon_device *rdev)
85{
86 u32 tmp;
87 int r, i;
88
89 if (rdev->gart.table.vram.robj == NULL) {
90 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
91 return -EINVAL;
92 }
93 r = radeon_gart_table_vram_pin(rdev);
94 if (r)
95 return r;
96 radeon_gart_restore(rdev);
97 /* Setup L2 cache */
98 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
99 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
100 EFFECTIVE_L2_QUEUE_SIZE(7));
101 WREG32(VM_L2_CNTL2, 0);
102 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
103 /* Setup TLB control */
104 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
105 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
106 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
107 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
108 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
109 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
110 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
111 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
112 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
113 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
114 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
115 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
116 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
117 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
118 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
119 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
120 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
121 (u32)(rdev->dummy_page.addr >> 12));
122 for (i = 1; i < 7; i++)
123 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
124
125 r600_pcie_gart_tlb_flush(rdev);
126 rdev->gart.ready = true;
127 return 0;
128}
129
130void evergreen_pcie_gart_disable(struct radeon_device *rdev)
131{
132 u32 tmp;
133 int i, r;
134
135 /* Disable all tables */
136 for (i = 0; i < 7; i++)
137 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
138
139 /* Setup L2 cache */
140 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
141 EFFECTIVE_L2_QUEUE_SIZE(7));
142 WREG32(VM_L2_CNTL2, 0);
143 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
144 /* Setup TLB control */
145 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
146 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
147 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
148 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
149 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
150 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
151 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
152 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
153 if (rdev->gart.table.vram.robj) {
154 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
155 if (likely(r == 0)) {
156 radeon_bo_kunmap(rdev->gart.table.vram.robj);
157 radeon_bo_unpin(rdev->gart.table.vram.robj);
158 radeon_bo_unreserve(rdev->gart.table.vram.robj);
159 }
160 }
161}
162
163void evergreen_pcie_gart_fini(struct radeon_device *rdev)
164{
165 evergreen_pcie_gart_disable(rdev);
166 radeon_gart_table_vram_free(rdev);
167 radeon_gart_fini(rdev);
168}
169
170
171void evergreen_agp_enable(struct radeon_device *rdev)
172{
173 u32 tmp;
174 int i;
175
176 /* Setup L2 cache */
177 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
178 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
179 EFFECTIVE_L2_QUEUE_SIZE(7));
180 WREG32(VM_L2_CNTL2, 0);
181 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
182 /* Setup TLB control */
183 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
184 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
185 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
186 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
187 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
188 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
189 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
190 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
191 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
192 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
193 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
194 for (i = 0; i < 7; i++)
195 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
196}
197
198static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
199{
200 save->vga_control[0] = RREG32(D1VGA_CONTROL);
201 save->vga_control[1] = RREG32(D2VGA_CONTROL);
202 save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
203 save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
204 save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
205 save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
206 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
207 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
208 save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
209 save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
210 save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
211 save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
212 save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
213 save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
214
215 /* Stop all video */
216 WREG32(VGA_RENDER_CONTROL, 0);
217 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
218 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
219 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
220 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
221 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
222 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
223 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
224 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
225 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
226 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
227 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
228 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
229 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
230 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
231 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
232 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
233 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
234 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
235
236 WREG32(D1VGA_CONTROL, 0);
237 WREG32(D2VGA_CONTROL, 0);
238 WREG32(EVERGREEN_D3VGA_CONTROL, 0);
239 WREG32(EVERGREEN_D4VGA_CONTROL, 0);
240 WREG32(EVERGREEN_D5VGA_CONTROL, 0);
241 WREG32(EVERGREEN_D6VGA_CONTROL, 0);
242}
243
244static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
245{
246 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
247 upper_32_bits(rdev->mc.vram_start));
248 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
249 upper_32_bits(rdev->mc.vram_start));
250 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
251 (u32)rdev->mc.vram_start);
252 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
253 (u32)rdev->mc.vram_start);
254
255 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
256 upper_32_bits(rdev->mc.vram_start));
257 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
258 upper_32_bits(rdev->mc.vram_start));
259 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
260 (u32)rdev->mc.vram_start);
261 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
262 (u32)rdev->mc.vram_start);
263
264 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
265 upper_32_bits(rdev->mc.vram_start));
266 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
267 upper_32_bits(rdev->mc.vram_start));
268 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
269 (u32)rdev->mc.vram_start);
270 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
271 (u32)rdev->mc.vram_start);
272
273 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
274 upper_32_bits(rdev->mc.vram_start));
275 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
276 upper_32_bits(rdev->mc.vram_start));
277 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
278 (u32)rdev->mc.vram_start);
279 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
280 (u32)rdev->mc.vram_start);
281
282 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
283 upper_32_bits(rdev->mc.vram_start));
284 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
285 upper_32_bits(rdev->mc.vram_start));
286 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
287 (u32)rdev->mc.vram_start);
288 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
289 (u32)rdev->mc.vram_start);
290
291 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
292 upper_32_bits(rdev->mc.vram_start));
293 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
294 upper_32_bits(rdev->mc.vram_start));
295 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
296 (u32)rdev->mc.vram_start);
297 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
298 (u32)rdev->mc.vram_start);
299
300 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
301 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
302 /* Unlock host access */
303 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
304 mdelay(1);
305 /* Restore video state */
306 WREG32(D1VGA_CONTROL, save->vga_control[0]);
307 WREG32(D2VGA_CONTROL, save->vga_control[1]);
308 WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
309 WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
310 WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
311 WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
312 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
313 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
314 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
315 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
316 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
317 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
318 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
319 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
320 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
321 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
322 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
323 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
324 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
325 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
326 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
327 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
328 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
329 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
330 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
331}
332
333static void evergreen_mc_program(struct radeon_device *rdev)
334{
335 struct evergreen_mc_save save;
336 u32 tmp;
337 int i, j;
338
339 /* Initialize HDP */
340 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
341 WREG32((0x2c14 + j), 0x00000000);
342 WREG32((0x2c18 + j), 0x00000000);
343 WREG32((0x2c1c + j), 0x00000000);
344 WREG32((0x2c20 + j), 0x00000000);
345 WREG32((0x2c24 + j), 0x00000000);
346 }
347 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
348
349 evergreen_mc_stop(rdev, &save);
350 if (evergreen_mc_wait_for_idle(rdev)) {
351 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
352 }
353 /* Lockout access through VGA aperture*/
354 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
355 /* Update configuration */
356 if (rdev->flags & RADEON_IS_AGP) {
357 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
358 /* VRAM before AGP */
359 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
360 rdev->mc.vram_start >> 12);
361 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
362 rdev->mc.gtt_end >> 12);
363 } else {
364 /* VRAM after AGP */
365 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
366 rdev->mc.gtt_start >> 12);
367 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
368 rdev->mc.vram_end >> 12);
369 }
370 } else {
371 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
372 rdev->mc.vram_start >> 12);
373 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
374 rdev->mc.vram_end >> 12);
375 }
376 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
377 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
378 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
379 WREG32(MC_VM_FB_LOCATION, tmp);
380 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
381 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
382 WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
383 if (rdev->flags & RADEON_IS_AGP) {
384 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
385 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
386 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
387 } else {
388 WREG32(MC_VM_AGP_BASE, 0);
389 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
390 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
391 }
392 if (evergreen_mc_wait_for_idle(rdev)) {
393 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
394 }
395 evergreen_mc_resume(rdev, &save);
396 /* we need to own VRAM, so turn off the VGA renderer here
397 * to stop it overwriting our objects */
398 rv515_vga_render_disable(rdev);
399}
400
401#if 0
402/*
403 * CP.
404 */
405static void evergreen_cp_stop(struct radeon_device *rdev)
406{
407 /* XXX */
408}
409
410
411static int evergreen_cp_load_microcode(struct radeon_device *rdev)
412{
413 /* XXX */
414
415 return 0;
416}
417
418
419/*
420 * Core functions
421 */
422static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
423 u32 num_backends,
424 u32 backend_disable_mask)
425{
426 u32 backend_map = 0;
427
428 return backend_map;
429}
430#endif
431
432static void evergreen_gpu_init(struct radeon_device *rdev)
433{
434 /* XXX */
435}
436
437int evergreen_mc_init(struct radeon_device *rdev)
438{
439 fixed20_12 a;
440 u32 tmp;
441 int chansize, numchan;
442
443 /* Get VRAM informations */
444 rdev->mc.vram_is_ddr = true;
445 tmp = RREG32(MC_ARB_RAMCFG);
446 if (tmp & CHANSIZE_OVERRIDE) {
447 chansize = 16;
448 } else if (tmp & CHANSIZE_MASK) {
449 chansize = 64;
450 } else {
451 chansize = 32;
452 }
453 tmp = RREG32(MC_SHARED_CHMAP);
454 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
455 case 0:
456 default:
457 numchan = 1;
458 break;
459 case 1:
460 numchan = 2;
461 break;
462 case 2:
463 numchan = 4;
464 break;
465 case 3:
466 numchan = 8;
467 break;
468 }
469 rdev->mc.vram_width = numchan * chansize;
470 /* Could aper size report 0 ? */
471 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
472 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
473 /* Setup GPU memory space */
474 /* size in MB on evergreen */
475 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
476 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
477 rdev->mc.visible_vram_size = rdev->mc.aper_size;
478 /* FIXME remove this once we support unmappable VRAM */
479 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
480 rdev->mc.mc_vram_size = rdev->mc.aper_size;
481 rdev->mc.real_vram_size = rdev->mc.aper_size;
482 }
483 r600_vram_gtt_location(rdev, &rdev->mc);
484 /* FIXME: we should enforce default clock in case GPU is not in
485 * default setup
486 */
487 a.full = rfixed_const(100);
488 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
489 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
490 return 0;
491}
492
493int evergreen_gpu_reset(struct radeon_device *rdev)
494{
495 /* FIXME: implement for evergreen */
496 return 0;
497}
498
499static int evergreen_startup(struct radeon_device *rdev)
500{
501#if 0
502 int r;
503
504 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
505 r = r600_init_microcode(rdev);
506 if (r) {
507 DRM_ERROR("Failed to load firmware!\n");
508 return r;
509 }
510 }
511#endif
512 evergreen_mc_program(rdev);
513#if 0
514 if (rdev->flags & RADEON_IS_AGP) {
515 evergreem_agp_enable(rdev);
516 } else {
517 r = evergreen_pcie_gart_enable(rdev);
518 if (r)
519 return r;
520 }
521#endif
522 evergreen_gpu_init(rdev);
523#if 0
524 if (!rdev->r600_blit.shader_obj) {
525 r = r600_blit_init(rdev);
526 if (r) {
527 DRM_ERROR("radeon: failed blitter (%d).\n", r);
528 return r;
529 }
530 }
531
532 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
533 if (unlikely(r != 0))
534 return r;
535 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
536 &rdev->r600_blit.shader_gpu_addr);
537 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
538 if (r) {
539 DRM_ERROR("failed to pin blit object %d\n", r);
540 return r;
541 }
542
543 /* Enable IRQ */
544 r = r600_irq_init(rdev);
545 if (r) {
546 DRM_ERROR("radeon: IH init failed (%d).\n", r);
547 radeon_irq_kms_fini(rdev);
548 return r;
549 }
550 r600_irq_set(rdev);
551
552 r = radeon_ring_init(rdev, rdev->cp.ring_size);
553 if (r)
554 return r;
555 r = evergreen_cp_load_microcode(rdev);
556 if (r)
557 return r;
558 r = r600_cp_resume(rdev);
559 if (r)
560 return r;
561 /* write back buffer are not vital so don't worry about failure */
562 r600_wb_enable(rdev);
563#endif
564 return 0;
565}
566
567int evergreen_resume(struct radeon_device *rdev)
568{
569 int r;
570
571 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
572 * posting will perform necessary task to bring back GPU into good
573 * shape.
574 */
575 /* post card */
576 atom_asic_init(rdev->mode_info.atom_context);
577 /* Initialize clocks */
578 r = radeon_clocks_init(rdev);
579 if (r) {
580 return r;
581 }
582
583 r = evergreen_startup(rdev);
584 if (r) {
585 DRM_ERROR("r600 startup failed on resume\n");
586 return r;
587 }
588#if 0
589 r = r600_ib_test(rdev);
590 if (r) {
591 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
592 return r;
593 }
594#endif
595 return r;
596
597}
598
599int evergreen_suspend(struct radeon_device *rdev)
600{
601#if 0
602 int r;
603
604 /* FIXME: we should wait for ring to be empty */
605 r700_cp_stop(rdev);
606 rdev->cp.ready = false;
607 r600_wb_disable(rdev);
608 evergreen_pcie_gart_disable(rdev);
609 /* unpin shaders bo */
610 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
611 if (likely(r == 0)) {
612 radeon_bo_unpin(rdev->r600_blit.shader_obj);
613 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
614 }
615#endif
616 return 0;
617}
618
619static bool evergreen_card_posted(struct radeon_device *rdev)
620{
621 u32 reg;
622
623 /* first check CRTCs */
624 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
625 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
626 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
627 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
628 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
629 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
630 if (reg & EVERGREEN_CRTC_MASTER_EN)
631 return true;
632
633 /* then check MEM_SIZE, in case the crtcs are off */
634 if (RREG32(CONFIG_MEMSIZE))
635 return true;
636
637 return false;
638}
639
640/* Plan is to move initialization in that function and use
641 * helper function so that radeon_device_init pretty much
642 * do nothing more than calling asic specific function. This
643 * should also allow to remove a bunch of callback function
644 * like vram_info.
645 */
646int evergreen_init(struct radeon_device *rdev)
647{
648 int r;
649
650 r = radeon_dummy_page_init(rdev);
651 if (r)
652 return r;
653 /* This don't do much */
654 r = radeon_gem_init(rdev);
655 if (r)
656 return r;
657 /* Read BIOS */
658 if (!radeon_get_bios(rdev)) {
659 if (ASIC_IS_AVIVO(rdev))
660 return -EINVAL;
661 }
662 /* Must be an ATOMBIOS */
663 if (!rdev->is_atom_bios) {
664 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
665 return -EINVAL;
666 }
667 r = radeon_atombios_init(rdev);
668 if (r)
669 return r;
670 /* Post card if necessary */
671 if (!evergreen_card_posted(rdev)) {
672 if (!rdev->bios) {
673 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
674 return -EINVAL;
675 }
676 DRM_INFO("GPU not posted. posting now...\n");
677 atom_asic_init(rdev->mode_info.atom_context);
678 }
679 /* Initialize scratch registers */
680 r600_scratch_init(rdev);
681 /* Initialize surface registers */
682 radeon_surface_init(rdev);
683 /* Initialize clocks */
684 radeon_get_clock_info(rdev->ddev);
685 r = radeon_clocks_init(rdev);
686 if (r)
687 return r;
688 /* Initialize power management */
689 radeon_pm_init(rdev);
690 /* Fence driver */
691 r = radeon_fence_driver_init(rdev);
692 if (r)
693 return r;
694 /* initialize AGP */
695 if (rdev->flags & RADEON_IS_AGP) {
696 r = radeon_agp_init(rdev);
697 if (r)
698 radeon_agp_disable(rdev);
699 }
700 /* initialize memory controller */
701 r = evergreen_mc_init(rdev);
702 if (r)
703 return r;
704 /* Memory manager */
705 r = radeon_bo_init(rdev);
706 if (r)
707 return r;
708#if 0
709 r = radeon_irq_kms_init(rdev);
710 if (r)
711 return r;
712
713 rdev->cp.ring_obj = NULL;
714 r600_ring_init(rdev, 1024 * 1024);
715
716 rdev->ih.ring_obj = NULL;
717 r600_ih_ring_init(rdev, 64 * 1024);
718
719 r = r600_pcie_gart_init(rdev);
720 if (r)
721 return r;
722#endif
723 rdev->accel_working = false;
724 r = evergreen_startup(rdev);
725 if (r) {
726 evergreen_suspend(rdev);
727 /*r600_wb_fini(rdev);*/
728 /*radeon_ring_fini(rdev);*/
729 /*evergreen_pcie_gart_fini(rdev);*/
730 rdev->accel_working = false;
731 }
732 if (rdev->accel_working) {
733 r = radeon_ib_pool_init(rdev);
734 if (r) {
735 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
736 rdev->accel_working = false;
737 }
738 r = r600_ib_test(rdev);
739 if (r) {
740 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
741 rdev->accel_working = false;
742 }
743 }
744 return 0;
745}
746
747void evergreen_fini(struct radeon_device *rdev)
748{
749 evergreen_suspend(rdev);
750#if 0
751 r600_blit_fini(rdev);
752 r600_irq_fini(rdev);
753 radeon_irq_kms_fini(rdev);
754 radeon_ring_fini(rdev);
755 r600_wb_fini(rdev);
756 evergreen_pcie_gart_fini(rdev);
757#endif
758 radeon_gem_fini(rdev);
759 radeon_fence_driver_fini(rdev);
760 radeon_clocks_fini(rdev);
761 radeon_agp_fini(rdev);
762 radeon_bo_fini(rdev);
763 radeon_atombios_fini(rdev);
764 kfree(rdev->bios);
765 rdev->bios = NULL;
766 radeon_dummy_page_fini(rdev);
767}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
new file mode 100644
index 000000000000..f7c7c9643433
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -0,0 +1,176 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#ifndef __EVERGREEN_REG_H__
25#define __EVERGREEN_REG_H__
26
27/* evergreen */
28#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310
29#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324
30#define EVERGREEN_D3VGA_CONTROL 0x3e0
31#define EVERGREEN_D4VGA_CONTROL 0x3e4
32#define EVERGREEN_D5VGA_CONTROL 0x3e8
33#define EVERGREEN_D6VGA_CONTROL 0x3ec
34
35#define EVERGREEN_P1PLL_SS_CNTL 0x414
36#define EVERGREEN_P2PLL_SS_CNTL 0x454
37# define EVERGREEN_PxPLL_SS_EN (1 << 12)
38/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
39#define EVERGREEN_GRPH_ENABLE 0x6800
40#define EVERGREEN_GRPH_CONTROL 0x6804
41# define EVERGREEN_GRPH_DEPTH(x) (((x) & 0x3) << 0)
42# define EVERGREEN_GRPH_DEPTH_8BPP 0
43# define EVERGREEN_GRPH_DEPTH_16BPP 1
44# define EVERGREEN_GRPH_DEPTH_32BPP 2
45# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
46/* 8 BPP */
47# define EVERGREEN_GRPH_FORMAT_INDEXED 0
48/* 16 BPP */
49# define EVERGREEN_GRPH_FORMAT_ARGB1555 0
50# define EVERGREEN_GRPH_FORMAT_ARGB565 1
51# define EVERGREEN_GRPH_FORMAT_ARGB4444 2
52# define EVERGREEN_GRPH_FORMAT_AI88 3
53# define EVERGREEN_GRPH_FORMAT_MONO16 4
54# define EVERGREEN_GRPH_FORMAT_BGRA5551 5
55/* 32 BPP */
56# define EVERGREEN_GRPH_FORMAT_ARGB8888 0
57# define EVERGREEN_GRPH_FORMAT_ARGB2101010 1
58# define EVERGREEN_GRPH_FORMAT_32BPP_DIG 2
59# define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010 3
60# define EVERGREEN_GRPH_FORMAT_BGRA1010102 4
61# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
62# define EVERGREEN_GRPH_FORMAT_RGB111110 6
63# define EVERGREEN_GRPH_FORMAT_BGR101111 7
64#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c
65# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
66# define EVERGREEN_GRPH_ENDIAN_NONE 0
67# define EVERGREEN_GRPH_ENDIAN_8IN16 1
68# define EVERGREEN_GRPH_ENDIAN_8IN32 2
69# define EVERGREEN_GRPH_ENDIAN_8IN64 3
70# define EVERGREEN_GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
71# define EVERGREEN_GRPH_RED_SEL_R 0
72# define EVERGREEN_GRPH_RED_SEL_G 1
73# define EVERGREEN_GRPH_RED_SEL_B 2
74# define EVERGREEN_GRPH_RED_SEL_A 3
75# define EVERGREEN_GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
76# define EVERGREEN_GRPH_GREEN_SEL_G 0
77# define EVERGREEN_GRPH_GREEN_SEL_B 1
78# define EVERGREEN_GRPH_GREEN_SEL_A 2
79# define EVERGREEN_GRPH_GREEN_SEL_R 3
80# define EVERGREEN_GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
81# define EVERGREEN_GRPH_BLUE_SEL_B 0
82# define EVERGREEN_GRPH_BLUE_SEL_A 1
83# define EVERGREEN_GRPH_BLUE_SEL_R 2
84# define EVERGREEN_GRPH_BLUE_SEL_G 3
85# define EVERGREEN_GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
86# define EVERGREEN_GRPH_ALPHA_SEL_A 0
87# define EVERGREEN_GRPH_ALPHA_SEL_R 1
88# define EVERGREEN_GRPH_ALPHA_SEL_G 2
89# define EVERGREEN_GRPH_ALPHA_SEL_B 3
90#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x6810
91#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x6814
92# define EVERGREEN_GRPH_DFQ_ENABLE (1 << 0)
93# define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK 0xffffff00
94#define EVERGREEN_GRPH_PITCH 0x6818
95#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x681c
96#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x6820
97#define EVERGREEN_GRPH_SURFACE_OFFSET_X 0x6824
98#define EVERGREEN_GRPH_SURFACE_OFFSET_Y 0x6828
99#define EVERGREEN_GRPH_X_START 0x682c
100#define EVERGREEN_GRPH_Y_START 0x6830
101#define EVERGREEN_GRPH_X_END 0x6834
102#define EVERGREEN_GRPH_Y_END 0x6838
103
104/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
105#define EVERGREEN_CUR_CONTROL 0x6998
106# define EVERGREEN_CURSOR_EN (1 << 0)
107# define EVERGREEN_CURSOR_MODE(x) (((x) & 0x3) << 8)
108# define EVERGREEN_CURSOR_MONO 0
109# define EVERGREEN_CURSOR_24_1 1
110# define EVERGREEN_CURSOR_24_8_PRE_MULT 2
111# define EVERGREEN_CURSOR_24_8_UNPRE_MULT 3
112# define EVERGREEN_CURSOR_2X_MAGNIFY (1 << 16)
113# define EVERGREEN_CURSOR_FORCE_MC_ON (1 << 20)
114# define EVERGREEN_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
115# define EVERGREEN_CURSOR_URGENT_ALWAYS 0
116# define EVERGREEN_CURSOR_URGENT_1_8 1
117# define EVERGREEN_CURSOR_URGENT_1_4 2
118# define EVERGREEN_CURSOR_URGENT_3_8 3
119# define EVERGREEN_CURSOR_URGENT_1_2 4
120#define EVERGREEN_CUR_SURFACE_ADDRESS 0x699c
121# define EVERGREEN_CUR_SURFACE_ADDRESS_MASK 0xfffff000
122#define EVERGREEN_CUR_SIZE 0x69a0
123#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH 0x69a4
124#define EVERGREEN_CUR_POSITION 0x69a8
125#define EVERGREEN_CUR_HOT_SPOT 0x69ac
126#define EVERGREEN_CUR_COLOR1 0x69b0
127#define EVERGREEN_CUR_COLOR2 0x69b4
128#define EVERGREEN_CUR_UPDATE 0x69b8
129# define EVERGREEN_CURSOR_UPDATE_PENDING (1 << 0)
130# define EVERGREEN_CURSOR_UPDATE_TAKEN (1 << 1)
131# define EVERGREEN_CURSOR_UPDATE_LOCK (1 << 16)
132# define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
133
134/* LUT blocks at 0x69e0, 0x75e0, 0x101e0, 0x10de0, 0x119e0, 0x125e0 */
135#define EVERGREEN_DC_LUT_RW_MODE 0x69e0
136#define EVERGREEN_DC_LUT_RW_INDEX 0x69e4
137#define EVERGREEN_DC_LUT_SEQ_COLOR 0x69e8
138#define EVERGREEN_DC_LUT_PWL_DATA 0x69ec
139#define EVERGREEN_DC_LUT_30_COLOR 0x69f0
140#define EVERGREEN_DC_LUT_VGA_ACCESS_ENABLE 0x69f4
141#define EVERGREEN_DC_LUT_WRITE_EN_MASK 0x69f8
142#define EVERGREEN_DC_LUT_AUTOFILL 0x69fc
143#define EVERGREEN_DC_LUT_CONTROL 0x6a00
144#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE 0x6a04
145#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN 0x6a08
146#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED 0x6a0c
147#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE 0x6a10
148#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN 0x6a14
149#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED 0x6a18
150
151#define EVERGREEN_DATA_FORMAT 0x6b00
152# define EVERGREEN_INTERLEAVE_EN (1 << 0)
153#define EVERGREEN_DESKTOP_HEIGHT 0x6b04
154
155#define EVERGREEN_VIEWPORT_START 0x6d70
156#define EVERGREEN_VIEWPORT_SIZE 0x6d74
157
158/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
159#define EVERGREEN_CRTC0_REGISTER_OFFSET (0x6df0 - 0x6df0)
160#define EVERGREEN_CRTC1_REGISTER_OFFSET (0x79f0 - 0x6df0)
161#define EVERGREEN_CRTC2_REGISTER_OFFSET (0x105f0 - 0x6df0)
162#define EVERGREEN_CRTC3_REGISTER_OFFSET (0x111f0 - 0x6df0)
163#define EVERGREEN_CRTC4_REGISTER_OFFSET (0x11df0 - 0x6df0)
164#define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0)
165
166/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
167#define EVERGREEN_CRTC_CONTROL 0x6e70
168# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
169#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
170
171#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
172#define EVERGREEN_DC_GPIO_HPD_A 0x64b4
173#define EVERGREEN_DC_GPIO_HPD_EN 0x64b8
174#define EVERGREEN_DC_GPIO_HPD_Y 0x64bc
175
176#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c0d4650cdb79..91eb762eb3f9 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -197,13 +197,13 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
197{ 197{
198 uint32_t tmp; 198 uint32_t tmp;
199 199
200 radeon_gart_restore(rdev);
200 /* discard memory request outside of configured range */ 201 /* discard memory request outside of configured range */
201 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; 202 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
202 WREG32(RADEON_AIC_CNTL, tmp); 203 WREG32(RADEON_AIC_CNTL, tmp);
203 /* set address range for PCI address translate */ 204 /* set address range for PCI address translate */
204 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location); 205 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
205 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 206 WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
206 WREG32(RADEON_AIC_HI_ADDR, tmp);
207 /* set PCI GART page-table base address */ 207 /* set PCI GART page-table base address */
208 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); 208 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
209 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; 209 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
@@ -312,9 +312,11 @@ int r100_irq_process(struct radeon_device *rdev)
312 /* Vertical blank interrupts */ 312 /* Vertical blank interrupts */
313 if (status & RADEON_CRTC_VBLANK_STAT) { 313 if (status & RADEON_CRTC_VBLANK_STAT) {
314 drm_handle_vblank(rdev->ddev, 0); 314 drm_handle_vblank(rdev->ddev, 0);
315 wake_up(&rdev->irq.vblank_queue);
315 } 316 }
316 if (status & RADEON_CRTC2_VBLANK_STAT) { 317 if (status & RADEON_CRTC2_VBLANK_STAT) {
317 drm_handle_vblank(rdev->ddev, 1); 318 drm_handle_vblank(rdev->ddev, 1);
319 wake_up(&rdev->irq.vblank_queue);
318 } 320 }
319 if (status & RADEON_FP_DETECT_STAT) { 321 if (status & RADEON_FP_DETECT_STAT) {
320 queue_hotplug = true; 322 queue_hotplug = true;
@@ -366,8 +368,8 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
366 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); 368 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
367 radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL); 369 radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
368 /* Wait until IDLE & CLEAN */ 370 /* Wait until IDLE & CLEAN */
369 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 371 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
370 radeon_ring_write(rdev, (1 << 16) | (1 << 17)); 372 radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
371 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 373 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
372 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl | 374 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
373 RADEON_HDP_READ_BUFFER_INVALIDATE); 375 RADEON_HDP_READ_BUFFER_INVALIDATE);
@@ -1701,7 +1703,7 @@ int r100_gui_wait_for_idle(struct radeon_device *rdev)
1701 } 1703 }
1702 for (i = 0; i < rdev->usec_timeout; i++) { 1704 for (i = 0; i < rdev->usec_timeout; i++) {
1703 tmp = RREG32(RADEON_RBBM_STATUS); 1705 tmp = RREG32(RADEON_RBBM_STATUS);
1704 if (!(tmp & (1 << 31))) { 1706 if (!(tmp & RADEON_RBBM_ACTIVE)) {
1705 return 0; 1707 return 0;
1706 } 1708 }
1707 DRM_UDELAY(1); 1709 DRM_UDELAY(1);
@@ -1716,8 +1718,8 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
1716 1718
1717 for (i = 0; i < rdev->usec_timeout; i++) { 1719 for (i = 0; i < rdev->usec_timeout; i++) {
1718 /* read MC_STATUS */ 1720 /* read MC_STATUS */
1719 tmp = RREG32(0x0150); 1721 tmp = RREG32(RADEON_MC_STATUS);
1720 if (tmp & (1 << 2)) { 1722 if (tmp & RADEON_MC_IDLE) {
1721 return 0; 1723 return 0;
1722 } 1724 }
1723 DRM_UDELAY(1); 1725 DRM_UDELAY(1);
@@ -1790,7 +1792,7 @@ int r100_gpu_reset(struct radeon_device *rdev)
1790 } 1792 }
1791 /* Check if GPU is idle */ 1793 /* Check if GPU is idle */
1792 status = RREG32(RADEON_RBBM_STATUS); 1794 status = RREG32(RADEON_RBBM_STATUS);
1793 if (status & (1 << 31)) { 1795 if (status & RADEON_RBBM_ACTIVE) {
1794 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); 1796 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
1795 return -1; 1797 return -1;
1796 } 1798 }
@@ -1800,6 +1802,9 @@ int r100_gpu_reset(struct radeon_device *rdev)
1800 1802
1801void r100_set_common_regs(struct radeon_device *rdev) 1803void r100_set_common_regs(struct radeon_device *rdev)
1802{ 1804{
1805 struct drm_device *dev = rdev->ddev;
1806 bool force_dac2 = false;
1807
1803 /* set these so they don't interfere with anything */ 1808 /* set these so they don't interfere with anything */
1804 WREG32(RADEON_OV0_SCALE_CNTL, 0); 1809 WREG32(RADEON_OV0_SCALE_CNTL, 0);
1805 WREG32(RADEON_SUBPIC_CNTL, 0); 1810 WREG32(RADEON_SUBPIC_CNTL, 0);
@@ -1808,6 +1813,68 @@ void r100_set_common_regs(struct radeon_device *rdev)
1808 WREG32(RADEON_DVI_I2C_CNTL_1, 0); 1813 WREG32(RADEON_DVI_I2C_CNTL_1, 0);
1809 WREG32(RADEON_CAP0_TRIG_CNTL, 0); 1814 WREG32(RADEON_CAP0_TRIG_CNTL, 0);
1810 WREG32(RADEON_CAP1_TRIG_CNTL, 0); 1815 WREG32(RADEON_CAP1_TRIG_CNTL, 0);
1816
1817 /* always set up dac2 on rn50 and some rv100 as lots
1818 * of servers seem to wire it up to a VGA port but
1819 * don't report it in the bios connector
1820 * table.
1821 */
1822 switch (dev->pdev->device) {
1823 /* RN50 */
1824 case 0x515e:
1825 case 0x5969:
1826 force_dac2 = true;
1827 break;
1828 /* RV100*/
1829 case 0x5159:
1830 case 0x515a:
1831 /* DELL triple head servers */
1832 if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
1833 ((dev->pdev->subsystem_device == 0x016c) ||
1834 (dev->pdev->subsystem_device == 0x016d) ||
1835 (dev->pdev->subsystem_device == 0x016e) ||
1836 (dev->pdev->subsystem_device == 0x016f) ||
1837 (dev->pdev->subsystem_device == 0x0170) ||
1838 (dev->pdev->subsystem_device == 0x017d) ||
1839 (dev->pdev->subsystem_device == 0x017e) ||
1840 (dev->pdev->subsystem_device == 0x0183) ||
1841 (dev->pdev->subsystem_device == 0x018a) ||
1842 (dev->pdev->subsystem_device == 0x019a)))
1843 force_dac2 = true;
1844 break;
1845 }
1846
1847 if (force_dac2) {
1848 u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
1849 u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
1850 u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
1851
1852 /* For CRT on DAC2, don't turn it on if BIOS didn't
1853 enable it, even it's detected.
1854 */
1855
1856 /* force it to crtc0 */
1857 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
1858 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
1859 disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
1860
1861 /* set up the TV DAC */
1862 tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
1863 RADEON_TV_DAC_STD_MASK |
1864 RADEON_TV_DAC_RDACPD |
1865 RADEON_TV_DAC_GDACPD |
1866 RADEON_TV_DAC_BDACPD |
1867 RADEON_TV_DAC_BGADJ_MASK |
1868 RADEON_TV_DAC_DACADJ_MASK);
1869 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
1870 RADEON_TV_DAC_NHOLD |
1871 RADEON_TV_DAC_STD_PS2 |
1872 (0x58 << 16));
1873
1874 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
1875 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
1876 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
1877 }
1811} 1878}
1812 1879
1813/* 1880/*
@@ -1889,17 +1956,20 @@ static u32 r100_get_accessible_vram(struct radeon_device *rdev)
1889void r100_vram_init_sizes(struct radeon_device *rdev) 1956void r100_vram_init_sizes(struct radeon_device *rdev)
1890{ 1957{
1891 u64 config_aper_size; 1958 u64 config_aper_size;
1892 u32 accessible;
1893 1959
1960 /* work out accessible VRAM */
1961 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1962 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1963 rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
1964 /* FIXME we don't use the second aperture yet when we could use it */
1965 if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
1966 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1894 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 1967 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
1895
1896 if (rdev->flags & RADEON_IS_IGP) { 1968 if (rdev->flags & RADEON_IS_IGP) {
1897 uint32_t tom; 1969 uint32_t tom;
1898 /* read NB_TOM to get the amount of ram stolen for the GPU */ 1970 /* read NB_TOM to get the amount of ram stolen for the GPU */
1899 tom = RREG32(RADEON_NB_TOM); 1971 tom = RREG32(RADEON_NB_TOM);
1900 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); 1972 rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
1901 /* for IGPs we need to keep VRAM where it was put by the BIOS */
1902 rdev->mc.vram_location = (tom & 0xffff) << 16;
1903 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 1973 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
1904 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 1974 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
1905 } else { 1975 } else {
@@ -1911,30 +1981,19 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
1911 rdev->mc.real_vram_size = 8192 * 1024; 1981 rdev->mc.real_vram_size = 8192 * 1024;
1912 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); 1982 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
1913 } 1983 }
1914 /* let driver place VRAM */ 1984 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
1915 rdev->mc.vram_location = 0xFFFFFFFFUL; 1985 * Novell bug 204882 + along with lots of ubuntu ones
1916 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 1986 */
1917 * Novell bug 204882 + along with lots of ubuntu ones */
1918 if (config_aper_size > rdev->mc.real_vram_size) 1987 if (config_aper_size > rdev->mc.real_vram_size)
1919 rdev->mc.mc_vram_size = config_aper_size; 1988 rdev->mc.mc_vram_size = config_aper_size;
1920 else 1989 else
1921 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 1990 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
1922 } 1991 }
1923 1992 /* FIXME remove this once we support unmappable VRAM */
1924 /* work out accessible VRAM */ 1993 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
1925 accessible = r100_get_accessible_vram(rdev);
1926
1927 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1928 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1929
1930 if (accessible > rdev->mc.aper_size)
1931 accessible = rdev->mc.aper_size;
1932
1933 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
1934 rdev->mc.mc_vram_size = rdev->mc.aper_size; 1994 rdev->mc.mc_vram_size = rdev->mc.aper_size;
1935
1936 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
1937 rdev->mc.real_vram_size = rdev->mc.aper_size; 1995 rdev->mc.real_vram_size = rdev->mc.aper_size;
1996 }
1938} 1997}
1939 1998
1940void r100_vga_set_state(struct radeon_device *rdev, bool state) 1999void r100_vga_set_state(struct radeon_device *rdev, bool state)
@@ -1951,11 +2010,18 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state)
1951 WREG32(RADEON_CONFIG_CNTL, temp); 2010 WREG32(RADEON_CONFIG_CNTL, temp);
1952} 2011}
1953 2012
1954void r100_vram_info(struct radeon_device *rdev) 2013void r100_mc_init(struct radeon_device *rdev)
1955{ 2014{
1956 r100_vram_get_type(rdev); 2015 u64 base;
1957 2016
2017 r100_vram_get_type(rdev);
1958 r100_vram_init_sizes(rdev); 2018 r100_vram_init_sizes(rdev);
2019 base = rdev->mc.aper_base;
2020 if (rdev->flags & RADEON_IS_IGP)
2021 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
2022 radeon_vram_location(rdev, &rdev->mc, base);
2023 if (!(rdev->flags & RADEON_IS_AGP))
2024 radeon_gtt_location(rdev, &rdev->mc);
1959} 2025}
1960 2026
1961 2027
@@ -3226,10 +3292,9 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3226void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) 3292void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
3227{ 3293{
3228 /* Update base address for crtc */ 3294 /* Update base address for crtc */
3229 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_location); 3295 WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3230 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3296 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3231 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, 3297 WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3232 rdev->mc.vram_location);
3233 } 3298 }
3234 /* Restore CRTC registers */ 3299 /* Restore CRTC registers */
3235 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT); 3300 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
@@ -3390,32 +3455,6 @@ void r100_fini(struct radeon_device *rdev)
3390 rdev->bios = NULL; 3455 rdev->bios = NULL;
3391} 3456}
3392 3457
3393int r100_mc_init(struct radeon_device *rdev)
3394{
3395 int r;
3396 u32 tmp;
3397
3398 /* Setup GPU memory space */
3399 rdev->mc.vram_location = 0xFFFFFFFFUL;
3400 rdev->mc.gtt_location = 0xFFFFFFFFUL;
3401 if (rdev->flags & RADEON_IS_IGP) {
3402 tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
3403 rdev->mc.vram_location = tmp << 16;
3404 }
3405 if (rdev->flags & RADEON_IS_AGP) {
3406 r = radeon_agp_init(rdev);
3407 if (r) {
3408 radeon_agp_disable(rdev);
3409 } else {
3410 rdev->mc.gtt_location = rdev->mc.agp_base;
3411 }
3412 }
3413 r = radeon_mc_setup(rdev);
3414 if (r)
3415 return r;
3416 return 0;
3417}
3418
3419int r100_init(struct radeon_device *rdev) 3458int r100_init(struct radeon_device *rdev)
3420{ 3459{
3421 int r; 3460 int r;
@@ -3458,12 +3497,15 @@ int r100_init(struct radeon_device *rdev)
3458 radeon_get_clock_info(rdev->ddev); 3497 radeon_get_clock_info(rdev->ddev);
3459 /* Initialize power management */ 3498 /* Initialize power management */
3460 radeon_pm_init(rdev); 3499 radeon_pm_init(rdev);
3461 /* Get vram informations */ 3500 /* initialize AGP */
3462 r100_vram_info(rdev); 3501 if (rdev->flags & RADEON_IS_AGP) {
3463 /* Initialize memory controller (also test AGP) */ 3502 r = radeon_agp_init(rdev);
3464 r = r100_mc_init(rdev); 3503 if (r) {
3465 if (r) 3504 radeon_agp_disable(rdev);
3466 return r; 3505 }
3506 }
3507 /* initialize VRAM */
3508 r100_mc_init(rdev);
3467 /* Fence driver */ 3509 /* Fence driver */
3468 r = radeon_fence_driver_init(rdev); 3510 r = radeon_fence_driver_init(rdev);
3469 if (r) 3511 if (r)
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index ff1e0cd608bf..1146c9909c2c 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -31,6 +31,7 @@
31#include "radeon_reg.h" 31#include "radeon_reg.h"
32#include "radeon.h" 32#include "radeon.h"
33 33
34#include "r100d.h"
34#include "r200_reg_safe.h" 35#include "r200_reg_safe.h"
35 36
36#include "r100_track.h" 37#include "r100_track.h"
@@ -79,6 +80,51 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
79 return vtx_size; 80 return vtx_size;
80} 81}
81 82
83int r200_copy_dma(struct radeon_device *rdev,
84 uint64_t src_offset,
85 uint64_t dst_offset,
86 unsigned num_pages,
87 struct radeon_fence *fence)
88{
89 uint32_t size;
90 uint32_t cur_size;
91 int i, num_loops;
92 int r = 0;
93
94 /* radeon pitch is /64 */
95 size = num_pages << PAGE_SHIFT;
96 num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
97 r = radeon_ring_lock(rdev, num_loops * 4 + 64);
98 if (r) {
99 DRM_ERROR("radeon: moving bo (%d).\n", r);
100 return r;
101 }
102 /* Must wait for 2D idle & clean before DMA or hangs might happen */
103 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
104 radeon_ring_write(rdev, (1 << 16));
105 for (i = 0; i < num_loops; i++) {
106 cur_size = size;
107 if (cur_size > 0x1FFFFF) {
108 cur_size = 0x1FFFFF;
109 }
110 size -= cur_size;
111 radeon_ring_write(rdev, PACKET0(0x720, 2));
112 radeon_ring_write(rdev, src_offset);
113 radeon_ring_write(rdev, dst_offset);
114 radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
115 src_offset += cur_size;
116 dst_offset += cur_size;
117 }
118 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
119 radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
120 if (fence) {
121 r = radeon_fence_emit(rdev, fence);
122 }
123 radeon_ring_unlock_commit(rdev);
124 return r;
125}
126
127
82static int r200_get_vtx_size_1(uint32_t vtx_fmt_1) 128static int r200_get_vtx_size_1(uint32_t vtx_fmt_1)
83{ 129{
84 int vtx_size, i, tex_size; 130 int vtx_size, i, tex_size;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 43b55a030b4d..4cef90cd74e5 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -117,18 +117,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
117 r = radeon_gart_table_vram_pin(rdev); 117 r = radeon_gart_table_vram_pin(rdev);
118 if (r) 118 if (r)
119 return r; 119 return r;
120 radeon_gart_restore(rdev);
120 /* discard memory request outside of configured range */ 121 /* discard memory request outside of configured range */
121 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 122 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
122 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 123 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
123 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location); 124 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
124 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE; 125 tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
125 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); 126 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
126 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); 127 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
127 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); 128 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
128 table_addr = rdev->gart.table_addr; 129 table_addr = rdev->gart.table_addr;
129 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); 130 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
130 /* FIXME: setup default page */ 131 /* FIXME: setup default page */
131 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location); 132 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
132 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); 133 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
133 /* Clear error */ 134 /* Clear error */
134 WREG32_PCIE(0x18, 0); 135 WREG32_PCIE(0x18, 0);
@@ -174,18 +175,20 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
174 /* Who ever call radeon_fence_emit should call ring_lock and ask 175 /* Who ever call radeon_fence_emit should call ring_lock and ask
175 * for enough space (today caller are ib schedule and buffer move) */ 176 * for enough space (today caller are ib schedule and buffer move) */
176 /* Write SC register so SC & US assert idle */ 177 /* Write SC register so SC & US assert idle */
177 radeon_ring_write(rdev, PACKET0(0x43E0, 0)); 178 radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
178 radeon_ring_write(rdev, 0); 179 radeon_ring_write(rdev, 0);
179 radeon_ring_write(rdev, PACKET0(0x43E4, 0)); 180 radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
180 radeon_ring_write(rdev, 0); 181 radeon_ring_write(rdev, 0);
181 /* Flush 3D cache */ 182 /* Flush 3D cache */
182 radeon_ring_write(rdev, PACKET0(0x4E4C, 0)); 183 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
183 radeon_ring_write(rdev, (2 << 0)); 184 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
184 radeon_ring_write(rdev, PACKET0(0x4F18, 0)); 185 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
185 radeon_ring_write(rdev, (1 << 0)); 186 radeon_ring_write(rdev, R300_ZC_FLUSH);
186 /* Wait until IDLE & CLEAN */ 187 /* Wait until IDLE & CLEAN */
187 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 188 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
188 radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9)); 189 radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
190 RADEON_WAIT_2D_IDLECLEAN |
191 RADEON_WAIT_DMA_GUI_IDLE));
189 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 192 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
190 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl | 193 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
191 RADEON_HDP_READ_BUFFER_INVALIDATE); 194 RADEON_HDP_READ_BUFFER_INVALIDATE);
@@ -198,50 +201,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
198 radeon_ring_write(rdev, RADEON_SW_INT_FIRE); 201 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
199} 202}
200 203
201int r300_copy_dma(struct radeon_device *rdev,
202 uint64_t src_offset,
203 uint64_t dst_offset,
204 unsigned num_pages,
205 struct radeon_fence *fence)
206{
207 uint32_t size;
208 uint32_t cur_size;
209 int i, num_loops;
210 int r = 0;
211
212 /* radeon pitch is /64 */
213 size = num_pages << PAGE_SHIFT;
214 num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
215 r = radeon_ring_lock(rdev, num_loops * 4 + 64);
216 if (r) {
217 DRM_ERROR("radeon: moving bo (%d).\n", r);
218 return r;
219 }
220 /* Must wait for 2D idle & clean before DMA or hangs might happen */
221 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 ));
222 radeon_ring_write(rdev, (1 << 16));
223 for (i = 0; i < num_loops; i++) {
224 cur_size = size;
225 if (cur_size > 0x1FFFFF) {
226 cur_size = 0x1FFFFF;
227 }
228 size -= cur_size;
229 radeon_ring_write(rdev, PACKET0(0x720, 2));
230 radeon_ring_write(rdev, src_offset);
231 radeon_ring_write(rdev, dst_offset);
232 radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
233 src_offset += cur_size;
234 dst_offset += cur_size;
235 }
236 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
237 radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
238 if (fence) {
239 r = radeon_fence_emit(rdev, fence);
240 }
241 radeon_ring_unlock_commit(rdev);
242 return r;
243}
244
245void r300_ring_start(struct radeon_device *rdev) 204void r300_ring_start(struct radeon_device *rdev)
246{ 205{
247 unsigned gb_tile_config; 206 unsigned gb_tile_config;
@@ -281,8 +240,8 @@ void r300_ring_start(struct radeon_device *rdev)
281 radeon_ring_write(rdev, 240 radeon_ring_write(rdev,
282 RADEON_WAIT_2D_IDLECLEAN | 241 RADEON_WAIT_2D_IDLECLEAN |
283 RADEON_WAIT_3D_IDLECLEAN); 242 RADEON_WAIT_3D_IDLECLEAN);
284 radeon_ring_write(rdev, PACKET0(0x170C, 0)); 243 radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
285 radeon_ring_write(rdev, 1 << 31); 244 radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
286 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); 245 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
287 radeon_ring_write(rdev, 0); 246 radeon_ring_write(rdev, 0);
288 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); 247 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
@@ -349,8 +308,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev)
349 308
350 for (i = 0; i < rdev->usec_timeout; i++) { 309 for (i = 0; i < rdev->usec_timeout; i++) {
351 /* read MC_STATUS */ 310 /* read MC_STATUS */
352 tmp = RREG32(0x0150); 311 tmp = RREG32(RADEON_MC_STATUS);
353 if (tmp & (1 << 4)) { 312 if (tmp & R300_MC_IDLE) {
354 return 0; 313 return 0;
355 } 314 }
356 DRM_UDELAY(1); 315 DRM_UDELAY(1);
@@ -395,8 +354,8 @@ void r300_gpu_init(struct radeon_device *rdev)
395 "programming pipes. Bad things might happen.\n"); 354 "programming pipes. Bad things might happen.\n");
396 } 355 }
397 356
398 tmp = RREG32(0x170C); 357 tmp = RREG32(R300_DST_PIPE_CONFIG);
399 WREG32(0x170C, tmp | (1 << 31)); 358 WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
400 359
401 WREG32(R300_RB2D_DSTCACHE_MODE, 360 WREG32(R300_RB2D_DSTCACHE_MODE,
402 R300_DC_AUTOFLUSH_ENABLE | 361 R300_DC_AUTOFLUSH_ENABLE |
@@ -437,8 +396,8 @@ int r300_ga_reset(struct radeon_device *rdev)
437 /* GA still busy soft reset it */ 396 /* GA still busy soft reset it */
438 WREG32(0x429C, 0x200); 397 WREG32(0x429C, 0x200);
439 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); 398 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
440 WREG32(0x43E0, 0); 399 WREG32(R300_RE_SCISSORS_TL, 0);
441 WREG32(0x43E4, 0); 400 WREG32(R300_RE_SCISSORS_BR, 0);
442 WREG32(0x24AC, 0); 401 WREG32(0x24AC, 0);
443 } 402 }
444 /* Wait to prevent race in RBBM_STATUS */ 403 /* Wait to prevent race in RBBM_STATUS */
@@ -488,7 +447,7 @@ int r300_gpu_reset(struct radeon_device *rdev)
488 } 447 }
489 /* Check if GPU is idle */ 448 /* Check if GPU is idle */
490 status = RREG32(RADEON_RBBM_STATUS); 449 status = RREG32(RADEON_RBBM_STATUS);
491 if (status & (1 << 31)) { 450 if (status & RADEON_RBBM_ACTIVE) {
492 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); 451 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
493 return -1; 452 return -1;
494 } 453 }
@@ -500,13 +459,13 @@ int r300_gpu_reset(struct radeon_device *rdev)
500/* 459/*
501 * r300,r350,rv350,rv380 VRAM info 460 * r300,r350,rv350,rv380 VRAM info
502 */ 461 */
503void r300_vram_info(struct radeon_device *rdev) 462void r300_mc_init(struct radeon_device *rdev)
504{ 463{
505 uint32_t tmp; 464 u64 base;
465 u32 tmp;
506 466
507 /* DDR for all card after R300 & IGP */ 467 /* DDR for all card after R300 & IGP */
508 rdev->mc.vram_is_ddr = true; 468 rdev->mc.vram_is_ddr = true;
509
510 tmp = RREG32(RADEON_MEM_CNTL); 469 tmp = RREG32(RADEON_MEM_CNTL);
511 tmp &= R300_MEM_NUM_CHANNELS_MASK; 470 tmp &= R300_MEM_NUM_CHANNELS_MASK;
512 switch (tmp) { 471 switch (tmp) {
@@ -515,8 +474,13 @@ void r300_vram_info(struct radeon_device *rdev)
515 case 2: rdev->mc.vram_width = 256; break; 474 case 2: rdev->mc.vram_width = 256; break;
516 default: rdev->mc.vram_width = 128; break; 475 default: rdev->mc.vram_width = 128; break;
517 } 476 }
518
519 r100_vram_init_sizes(rdev); 477 r100_vram_init_sizes(rdev);
478 base = rdev->mc.aper_base;
479 if (rdev->flags & RADEON_IS_IGP)
480 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
481 radeon_vram_location(rdev, &rdev->mc, base);
482 if (!(rdev->flags & RADEON_IS_AGP))
483 radeon_gtt_location(rdev, &rdev->mc);
520} 484}
521 485
522void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) 486void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
@@ -578,6 +542,40 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
578 542
579} 543}
580 544
545int rv370_get_pcie_lanes(struct radeon_device *rdev)
546{
547 u32 link_width_cntl;
548
549 if (rdev->flags & RADEON_IS_IGP)
550 return 0;
551
552 if (!(rdev->flags & RADEON_IS_PCIE))
553 return 0;
554
555 /* FIXME wait for idle */
556
557 if (rdev->family < CHIP_R600)
558 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
559 else
560 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
561
562 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
563 case RADEON_PCIE_LC_LINK_WIDTH_X0:
564 return 0;
565 case RADEON_PCIE_LC_LINK_WIDTH_X1:
566 return 1;
567 case RADEON_PCIE_LC_LINK_WIDTH_X2:
568 return 2;
569 case RADEON_PCIE_LC_LINK_WIDTH_X4:
570 return 4;
571 case RADEON_PCIE_LC_LINK_WIDTH_X8:
572 return 8;
573 case RADEON_PCIE_LC_LINK_WIDTH_X16:
574 default:
575 return 16;
576 }
577}
578
581#if defined(CONFIG_DEBUG_FS) 579#if defined(CONFIG_DEBUG_FS)
582static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) 580static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
583{ 581{
@@ -707,6 +705,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
707 tile_flags |= R300_TXO_MACRO_TILE; 705 tile_flags |= R300_TXO_MACRO_TILE;
708 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 706 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
709 tile_flags |= R300_TXO_MICRO_TILE; 707 tile_flags |= R300_TXO_MICRO_TILE;
708 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
709 tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
710 710
711 tmp = idx_value + ((u32)reloc->lobj.gpu_offset); 711 tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
712 tmp |= tile_flags; 712 tmp |= tile_flags;
@@ -757,6 +757,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
757 tile_flags |= R300_COLOR_TILE_ENABLE; 757 tile_flags |= R300_COLOR_TILE_ENABLE;
758 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 758 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
759 tile_flags |= R300_COLOR_MICROTILE_ENABLE; 759 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
760 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
761 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
760 762
761 tmp = idx_value & ~(0x7 << 16); 763 tmp = idx_value & ~(0x7 << 16);
762 tmp |= tile_flags; 764 tmp |= tile_flags;
@@ -828,7 +830,9 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
828 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 830 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
829 tile_flags |= R300_DEPTHMACROTILE_ENABLE; 831 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
830 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 832 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
831 tile_flags |= R300_DEPTHMICROTILE_TILED;; 833 tile_flags |= R300_DEPTHMICROTILE_TILED;
834 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
835 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
832 836
833 tmp = idx_value & ~(0x7 << 16); 837 tmp = idx_value & ~(0x7 << 16);
834 tmp |= tile_flags; 838 tmp |= tile_flags;
@@ -1387,12 +1391,15 @@ int r300_init(struct radeon_device *rdev)
1387 radeon_get_clock_info(rdev->ddev); 1391 radeon_get_clock_info(rdev->ddev);
1388 /* Initialize power management */ 1392 /* Initialize power management */
1389 radeon_pm_init(rdev); 1393 radeon_pm_init(rdev);
1390 /* Get vram informations */ 1394 /* initialize AGP */
1391 r300_vram_info(rdev); 1395 if (rdev->flags & RADEON_IS_AGP) {
1392 /* Initialize memory controller (also test AGP) */ 1396 r = radeon_agp_init(rdev);
1393 r = r420_mc_init(rdev); 1397 if (r) {
1394 if (r) 1398 radeon_agp_disable(rdev);
1395 return r; 1399 }
1400 }
1401 /* initialize memory controller */
1402 r300_mc_init(rdev);
1396 /* Fence driver */ 1403 /* Fence driver */
1397 r = radeon_fence_driver_init(rdev); 1404 r = radeon_fence_driver_init(rdev);
1398 if (r) 1405 if (r)
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 34bffa0e4b73..ea46d558e8f3 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -33,6 +33,7 @@
33 33
34#include "drmP.h" 34#include "drmP.h"
35#include "drm.h" 35#include "drm.h"
36#include "drm_buffer.h"
36#include "radeon_drm.h" 37#include "radeon_drm.h"
37#include "radeon_drv.h" 38#include "radeon_drv.h"
38#include "r300_reg.h" 39#include "r300_reg.h"
@@ -299,46 +300,42 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
299 int reg; 300 int reg;
300 int sz; 301 int sz;
301 int i; 302 int i;
302 int values[64]; 303 u32 *value;
303 RING_LOCALS; 304 RING_LOCALS;
304 305
305 sz = header.packet0.count; 306 sz = header.packet0.count;
306 reg = (header.packet0.reghi << 8) | header.packet0.reglo; 307 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
307 308
308 if ((sz > 64) || (sz < 0)) { 309 if ((sz > 64) || (sz < 0)) {
309 DRM_ERROR 310 DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
310 ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", 311 reg, sz);
311 reg, sz);
312 return -EINVAL; 312 return -EINVAL;
313 } 313 }
314
314 for (i = 0; i < sz; i++) { 315 for (i = 0; i < sz; i++) {
315 values[i] = ((int *)cmdbuf->buf)[i];
316 switch (r300_reg_flags[(reg >> 2) + i]) { 316 switch (r300_reg_flags[(reg >> 2) + i]) {
317 case MARK_SAFE: 317 case MARK_SAFE:
318 break; 318 break;
319 case MARK_CHECK_OFFSET: 319 case MARK_CHECK_OFFSET:
320 if (!radeon_check_offset(dev_priv, (u32) values[i])) { 320 value = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
321 DRM_ERROR 321 if (!radeon_check_offset(dev_priv, *value)) {
322 ("Offset failed range check (reg=%04x sz=%d)\n", 322 DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n",
323 reg, sz); 323 reg, sz);
324 return -EINVAL; 324 return -EINVAL;
325 } 325 }
326 break; 326 break;
327 default: 327 default:
328 DRM_ERROR("Register %04x failed check as flag=%02x\n", 328 DRM_ERROR("Register %04x failed check as flag=%02x\n",
329 reg + i * 4, r300_reg_flags[(reg >> 2) + i]); 329 reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
330 return -EINVAL; 330 return -EINVAL;
331 } 331 }
332 } 332 }
333 333
334 BEGIN_RING(1 + sz); 334 BEGIN_RING(1 + sz);
335 OUT_RING(CP_PACKET0(reg, sz - 1)); 335 OUT_RING(CP_PACKET0(reg, sz - 1));
336 OUT_RING_TABLE(values, sz); 336 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
337 ADVANCE_RING(); 337 ADVANCE_RING();
338 338
339 cmdbuf->buf += sz * 4;
340 cmdbuf->bufsz -= sz * 4;
341
342 return 0; 339 return 0;
343} 340}
344 341
@@ -362,7 +359,7 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
362 if (!sz) 359 if (!sz)
363 return 0; 360 return 0;
364 361
365 if (sz * 4 > cmdbuf->bufsz) 362 if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
366 return -EINVAL; 363 return -EINVAL;
367 364
368 if (reg + sz * 4 >= 0x10000) { 365 if (reg + sz * 4 >= 0x10000) {
@@ -380,12 +377,9 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
380 377
381 BEGIN_RING(1 + sz); 378 BEGIN_RING(1 + sz);
382 OUT_RING(CP_PACKET0(reg, sz - 1)); 379 OUT_RING(CP_PACKET0(reg, sz - 1));
383 OUT_RING_TABLE((int *)cmdbuf->buf, sz); 380 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
384 ADVANCE_RING(); 381 ADVANCE_RING();
385 382
386 cmdbuf->buf += sz * 4;
387 cmdbuf->bufsz -= sz * 4;
388
389 return 0; 383 return 0;
390} 384}
391 385
@@ -407,7 +401,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
407 401
408 if (!sz) 402 if (!sz)
409 return 0; 403 return 0;
410 if (sz * 16 > cmdbuf->bufsz) 404 if (sz * 16 > drm_buffer_unprocessed(cmdbuf->buffer))
411 return -EINVAL; 405 return -EINVAL;
412 406
413 /* VAP is very sensitive so we purge cache before we program it 407 /* VAP is very sensitive so we purge cache before we program it
@@ -426,7 +420,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
426 BEGIN_RING(3 + sz * 4); 420 BEGIN_RING(3 + sz * 4);
427 OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr); 421 OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
428 OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1)); 422 OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
429 OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4); 423 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * 4);
430 ADVANCE_RING(); 424 ADVANCE_RING();
431 425
432 BEGIN_RING(2); 426 BEGIN_RING(2);
@@ -434,9 +428,6 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
434 OUT_RING(0); 428 OUT_RING(0);
435 ADVANCE_RING(); 429 ADVANCE_RING();
436 430
437 cmdbuf->buf += sz * 16;
438 cmdbuf->bufsz -= sz * 16;
439
440 return 0; 431 return 0;
441} 432}
442 433
@@ -449,14 +440,14 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
449{ 440{
450 RING_LOCALS; 441 RING_LOCALS;
451 442
452 if (8 * 4 > cmdbuf->bufsz) 443 if (8 * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
453 return -EINVAL; 444 return -EINVAL;
454 445
455 BEGIN_RING(10); 446 BEGIN_RING(10);
456 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8)); 447 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
457 OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING | 448 OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
458 (1 << R300_PRIM_NUM_VERTICES_SHIFT)); 449 (1 << R300_PRIM_NUM_VERTICES_SHIFT));
459 OUT_RING_TABLE((int *)cmdbuf->buf, 8); 450 OUT_RING_DRM_BUFFER(cmdbuf->buffer, 8);
460 ADVANCE_RING(); 451 ADVANCE_RING();
461 452
462 BEGIN_RING(4); 453 BEGIN_RING(4);
@@ -468,9 +459,6 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
468 /* set flush flag */ 459 /* set flush flag */
469 dev_priv->track_flush |= RADEON_FLUSH_EMITED; 460 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
470 461
471 cmdbuf->buf += 8 * 4;
472 cmdbuf->bufsz -= 8 * 4;
473
474 return 0; 462 return 0;
475} 463}
476 464
@@ -480,28 +468,29 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
480{ 468{
481 int count, i, k; 469 int count, i, k;
482#define MAX_ARRAY_PACKET 64 470#define MAX_ARRAY_PACKET 64
483 u32 payload[MAX_ARRAY_PACKET]; 471 u32 *data;
484 u32 narrays; 472 u32 narrays;
485 RING_LOCALS; 473 RING_LOCALS;
486 474
487 count = (header >> 16) & 0x3fff; 475 count = (header & RADEON_CP_PACKET_COUNT_MASK) >> 16;
488 476
489 if ((count + 1) > MAX_ARRAY_PACKET) { 477 if ((count + 1) > MAX_ARRAY_PACKET) {
490 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", 478 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
491 count); 479 count);
492 return -EINVAL; 480 return -EINVAL;
493 } 481 }
494 memset(payload, 0, MAX_ARRAY_PACKET * 4);
495 memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
496
497 /* carefully check packet contents */ 482 /* carefully check packet contents */
498 483
499 narrays = payload[0]; 484 /* We have already read the header so advance the buffer. */
485 drm_buffer_advance(cmdbuf->buffer, 4);
486
487 narrays = *(u32 *)drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
500 k = 0; 488 k = 0;
501 i = 1; 489 i = 1;
502 while ((k < narrays) && (i < (count + 1))) { 490 while ((k < narrays) && (i < (count + 1))) {
503 i++; /* skip attribute field */ 491 i++; /* skip attribute field */
504 if (!radeon_check_offset(dev_priv, payload[i])) { 492 data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
493 if (!radeon_check_offset(dev_priv, *data)) {
505 DRM_ERROR 494 DRM_ERROR
506 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", 495 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
507 k, i); 496 k, i);
@@ -512,7 +501,8 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
512 if (k == narrays) 501 if (k == narrays)
513 break; 502 break;
514 /* have one more to process, they come in pairs */ 503 /* have one more to process, they come in pairs */
515 if (!radeon_check_offset(dev_priv, payload[i])) { 504 data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
505 if (!radeon_check_offset(dev_priv, *data)) {
516 DRM_ERROR 506 DRM_ERROR
517 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", 507 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
518 k, i); 508 k, i);
@@ -533,30 +523,30 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
533 523
534 BEGIN_RING(count + 2); 524 BEGIN_RING(count + 2);
535 OUT_RING(header); 525 OUT_RING(header);
536 OUT_RING_TABLE(payload, count + 1); 526 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 1);
537 ADVANCE_RING(); 527 ADVANCE_RING();
538 528
539 cmdbuf->buf += (count + 2) * 4;
540 cmdbuf->bufsz -= (count + 2) * 4;
541
542 return 0; 529 return 0;
543} 530}
544 531
545static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, 532static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
546 drm_radeon_kcmd_buffer_t *cmdbuf) 533 drm_radeon_kcmd_buffer_t *cmdbuf)
547{ 534{
548 u32 *cmd = (u32 *) cmdbuf->buf; 535 u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
549 int count, ret; 536 int count, ret;
550 RING_LOCALS; 537 RING_LOCALS;
551 538
552 count=(cmd[0]>>16) & 0x3fff;
553 539
554 if (cmd[0] & 0x8000) { 540 count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
555 u32 offset;
556 541
557 if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL 542 if (*cmd & 0x8000) {
543 u32 offset;
544 u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
545 if (*cmd1 & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
558 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 546 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
559 offset = cmd[2] << 10; 547
548 u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
549 offset = *cmd2 << 10;
560 ret = !radeon_check_offset(dev_priv, offset); 550 ret = !radeon_check_offset(dev_priv, offset);
561 if (ret) { 551 if (ret) {
562 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset); 552 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
@@ -564,9 +554,10 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
564 } 554 }
565 } 555 }
566 556
567 if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && 557 if ((*cmd1 & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
568 (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 558 (*cmd1 & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
569 offset = cmd[3] << 10; 559 u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
560 offset = *cmd3 << 10;
570 ret = !radeon_check_offset(dev_priv, offset); 561 ret = !radeon_check_offset(dev_priv, offset);
571 if (ret) { 562 if (ret) {
572 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset); 563 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
@@ -577,28 +568,25 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
577 } 568 }
578 569
579 BEGIN_RING(count+2); 570 BEGIN_RING(count+2);
580 OUT_RING(cmd[0]); 571 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
581 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
582 ADVANCE_RING(); 572 ADVANCE_RING();
583 573
584 cmdbuf->buf += (count+2)*4;
585 cmdbuf->bufsz -= (count+2)*4;
586
587 return 0; 574 return 0;
588} 575}
589 576
590static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv, 577static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
591 drm_radeon_kcmd_buffer_t *cmdbuf) 578 drm_radeon_kcmd_buffer_t *cmdbuf)
592{ 579{
593 u32 *cmd; 580 u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
581 u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
594 int count; 582 int count;
595 int expected_count; 583 int expected_count;
596 RING_LOCALS; 584 RING_LOCALS;
597 585
598 cmd = (u32 *) cmdbuf->buf; 586 count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
599 count = (cmd[0]>>16) & 0x3fff; 587
600 expected_count = cmd[1] >> 16; 588 expected_count = *cmd1 >> 16;
601 if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit)) 589 if (!(*cmd1 & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
602 expected_count = (expected_count+1)/2; 590 expected_count = (expected_count+1)/2;
603 591
604 if (count && count != expected_count) { 592 if (count && count != expected_count) {
@@ -608,55 +596,53 @@ static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
608 } 596 }
609 597
610 BEGIN_RING(count+2); 598 BEGIN_RING(count+2);
611 OUT_RING(cmd[0]); 599 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
612 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
613 ADVANCE_RING(); 600 ADVANCE_RING();
614 601
615 cmdbuf->buf += (count+2)*4;
616 cmdbuf->bufsz -= (count+2)*4;
617
618 if (!count) { 602 if (!count) {
619 drm_r300_cmd_header_t header; 603 drm_r300_cmd_header_t stack_header, *header;
604 u32 *cmd1, *cmd2, *cmd3;
620 605
621 if (cmdbuf->bufsz < 4*4 + sizeof(header)) { 606 if (drm_buffer_unprocessed(cmdbuf->buffer)
607 < 4*4 + sizeof(stack_header)) {
622 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n"); 608 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
623 return -EINVAL; 609 return -EINVAL;
624 } 610 }
625 611
626 header.u = *(unsigned int *)cmdbuf->buf; 612 header = drm_buffer_read_object(cmdbuf->buffer,
613 sizeof(stack_header), &stack_header);
627 614
628 cmdbuf->buf += sizeof(header); 615 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
629 cmdbuf->bufsz -= sizeof(header); 616 cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
630 cmd = (u32 *) cmdbuf->buf; 617 cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
618 cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
631 619
632 if (header.header.cmd_type != R300_CMD_PACKET3 || 620 if (header->header.cmd_type != R300_CMD_PACKET3 ||
633 header.packet3.packet != R300_CMD_PACKET3_RAW || 621 header->packet3.packet != R300_CMD_PACKET3_RAW ||
634 cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) { 622 *cmd != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
635 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n"); 623 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
636 return -EINVAL; 624 return -EINVAL;
637 } 625 }
638 626
639 if ((cmd[1] & 0x8000ffff) != 0x80000810) { 627 if ((*cmd1 & 0x8000ffff) != 0x80000810) {
640 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); 628 DRM_ERROR("Invalid indx_buffer reg address %08X\n",
629 *cmd1);
641 return -EINVAL; 630 return -EINVAL;
642 } 631 }
643 if (!radeon_check_offset(dev_priv, cmd[2])) { 632 if (!radeon_check_offset(dev_priv, *cmd2)) {
644 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); 633 DRM_ERROR("Invalid indx_buffer offset is %08X\n",
634 *cmd2);
645 return -EINVAL; 635 return -EINVAL;
646 } 636 }
647 if (cmd[3] != expected_count) { 637 if (*cmd3 != expected_count) {
648 DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n", 638 DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
649 cmd[3], expected_count); 639 *cmd3, expected_count);
650 return -EINVAL; 640 return -EINVAL;
651 } 641 }
652 642
653 BEGIN_RING(4); 643 BEGIN_RING(4);
654 OUT_RING(cmd[0]); 644 OUT_RING_DRM_BUFFER(cmdbuf->buffer, 4);
655 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
656 ADVANCE_RING(); 645 ADVANCE_RING();
657
658 cmdbuf->buf += 4*4;
659 cmdbuf->bufsz -= 4*4;
660 } 646 }
661 647
662 return 0; 648 return 0;
@@ -665,39 +651,39 @@ static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
665static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, 651static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
666 drm_radeon_kcmd_buffer_t *cmdbuf) 652 drm_radeon_kcmd_buffer_t *cmdbuf)
667{ 653{
668 u32 header; 654 u32 *header;
669 int count; 655 int count;
670 RING_LOCALS; 656 RING_LOCALS;
671 657
672 if (4 > cmdbuf->bufsz) 658 if (4 > drm_buffer_unprocessed(cmdbuf->buffer))
673 return -EINVAL; 659 return -EINVAL;
674 660
675 /* Fixme !! This simply emits a packet without much checking. 661 /* Fixme !! This simply emits a packet without much checking.
676 We need to be smarter. */ 662 We need to be smarter. */
677 663
678 /* obtain first word - actual packet3 header */ 664 /* obtain first word - actual packet3 header */
679 header = *(u32 *) cmdbuf->buf; 665 header = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
680 666
681 /* Is it packet 3 ? */ 667 /* Is it packet 3 ? */
682 if ((header >> 30) != 0x3) { 668 if ((*header >> 30) != 0x3) {
683 DRM_ERROR("Not a packet3 header (0x%08x)\n", header); 669 DRM_ERROR("Not a packet3 header (0x%08x)\n", *header);
684 return -EINVAL; 670 return -EINVAL;
685 } 671 }
686 672
687 count = (header >> 16) & 0x3fff; 673 count = (*header >> 16) & 0x3fff;
688 674
689 /* Check again now that we know how much data to expect */ 675 /* Check again now that we know how much data to expect */
690 if ((count + 2) * 4 > cmdbuf->bufsz) { 676 if ((count + 2) * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) {
691 DRM_ERROR 677 DRM_ERROR
692 ("Expected packet3 of length %d but have only %d bytes left\n", 678 ("Expected packet3 of length %d but have only %d bytes left\n",
693 (count + 2) * 4, cmdbuf->bufsz); 679 (count + 2) * 4, drm_buffer_unprocessed(cmdbuf->buffer));
694 return -EINVAL; 680 return -EINVAL;
695 } 681 }
696 682
697 /* Is it a packet type we know about ? */ 683 /* Is it a packet type we know about ? */
698 switch (header & 0xff00) { 684 switch (*header & 0xff00) {
699 case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */ 685 case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
700 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header); 686 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, *header);
701 687
702 case RADEON_CNTL_BITBLT_MULTI: 688 case RADEON_CNTL_BITBLT_MULTI:
703 return r300_emit_bitblt_multi(dev_priv, cmdbuf); 689 return r300_emit_bitblt_multi(dev_priv, cmdbuf);
@@ -723,18 +709,14 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
723 /* these packets are safe */ 709 /* these packets are safe */
724 break; 710 break;
725 default: 711 default:
726 DRM_ERROR("Unknown packet3 header (0x%08x)\n", header); 712 DRM_ERROR("Unknown packet3 header (0x%08x)\n", *header);
727 return -EINVAL; 713 return -EINVAL;
728 } 714 }
729 715
730 BEGIN_RING(count + 2); 716 BEGIN_RING(count + 2);
731 OUT_RING(header); 717 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
732 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
733 ADVANCE_RING(); 718 ADVANCE_RING();
734 719
735 cmdbuf->buf += (count + 2) * 4;
736 cmdbuf->bufsz -= (count + 2) * 4;
737
738 return 0; 720 return 0;
739} 721}
740 722
@@ -748,8 +730,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
748{ 730{
749 int n; 731 int n;
750 int ret; 732 int ret;
751 char *orig_buf = cmdbuf->buf; 733 int orig_iter = cmdbuf->buffer->iterator;
752 int orig_bufsz = cmdbuf->bufsz;
753 734
754 /* This is a do-while-loop so that we run the interior at least once, 735 /* This is a do-while-loop so that we run the interior at least once,
755 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale. 736 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
@@ -761,8 +742,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
761 if (ret) 742 if (ret)
762 return ret; 743 return ret;
763 744
764 cmdbuf->buf = orig_buf; 745 cmdbuf->buffer->iterator = orig_iter;
765 cmdbuf->bufsz = orig_bufsz;
766 } 746 }
767 747
768 switch (header.packet3.packet) { 748 switch (header.packet3.packet) {
@@ -785,9 +765,9 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
785 break; 765 break;
786 766
787 default: 767 default:
788 DRM_ERROR("bad packet3 type %i at %p\n", 768 DRM_ERROR("bad packet3 type %i at byte %d\n",
789 header.packet3.packet, 769 header.packet3.packet,
790 cmdbuf->buf - sizeof(header)); 770 cmdbuf->buffer->iterator - (int)sizeof(header));
791 return -EINVAL; 771 return -EINVAL;
792 } 772 }
793 773
@@ -923,12 +903,13 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
923 drm_r300_cmd_header_t header) 903 drm_r300_cmd_header_t header)
924{ 904{
925 u32 *ref_age_base; 905 u32 *ref_age_base;
926 u32 i, buf_idx, h_pending; 906 u32 i, *buf_idx, h_pending;
927 u64 ptr_addr; 907 u64 *ptr_addr;
908 u64 stack_ptr_addr;
928 RING_LOCALS; 909 RING_LOCALS;
929 910
930 if (cmdbuf->bufsz < 911 if (drm_buffer_unprocessed(cmdbuf->buffer) <
931 (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) { 912 (sizeof(u64) + header.scratch.n_bufs * sizeof(*buf_idx))) {
932 return -EINVAL; 913 return -EINVAL;
933 } 914 }
934 915
@@ -938,36 +919,35 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
938 919
939 dev_priv->scratch_ages[header.scratch.reg]++; 920 dev_priv->scratch_ages[header.scratch.reg]++;
940 921
941 ptr_addr = get_unaligned((u64 *)cmdbuf->buf); 922 ptr_addr = drm_buffer_read_object(cmdbuf->buffer,
942 ref_age_base = (u32 *)(unsigned long)ptr_addr; 923 sizeof(stack_ptr_addr), &stack_ptr_addr);
943 924 ref_age_base = (u32 *)(unsigned long)*ptr_addr;
944 cmdbuf->buf += sizeof(u64);
945 cmdbuf->bufsz -= sizeof(u64);
946 925
947 for (i=0; i < header.scratch.n_bufs; i++) { 926 for (i=0; i < header.scratch.n_bufs; i++) {
948 buf_idx = *(u32 *)cmdbuf->buf; 927 buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
949 buf_idx *= 2; /* 8 bytes per buf */ 928 *buf_idx *= 2; /* 8 bytes per buf */
950 929
951 if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) { 930 if (DRM_COPY_TO_USER(ref_age_base + *buf_idx,
931 &dev_priv->scratch_ages[header.scratch.reg],
932 sizeof(u32)))
952 return -EINVAL; 933 return -EINVAL;
953 }
954 934
955 if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) { 935 if (DRM_COPY_FROM_USER(&h_pending,
936 ref_age_base + *buf_idx + 1,
937 sizeof(u32)))
956 return -EINVAL; 938 return -EINVAL;
957 }
958 939
959 if (h_pending == 0) { 940 if (h_pending == 0)
960 return -EINVAL; 941 return -EINVAL;
961 }
962 942
963 h_pending--; 943 h_pending--;
964 944
965 if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) { 945 if (DRM_COPY_TO_USER(ref_age_base + *buf_idx + 1,
946 &h_pending,
947 sizeof(u32)))
966 return -EINVAL; 948 return -EINVAL;
967 }
968 949
969 cmdbuf->buf += sizeof(buf_idx); 950 drm_buffer_advance(cmdbuf->buffer, sizeof(*buf_idx));
970 cmdbuf->bufsz -= sizeof(buf_idx);
971 } 951 }
972 952
973 BEGIN_RING(2); 953 BEGIN_RING(2);
@@ -1009,19 +989,16 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
1009 DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type); 989 DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
1010 if (!sz) 990 if (!sz)
1011 return 0; 991 return 0;
1012 if (sz * stride * 4 > cmdbuf->bufsz) 992 if (sz * stride * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
1013 return -EINVAL; 993 return -EINVAL;
1014 994
1015 BEGIN_RING(3 + sz * stride); 995 BEGIN_RING(3 + sz * stride);
1016 OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr); 996 OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
1017 OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1)); 997 OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
1018 OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride); 998 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * stride);
1019 999
1020 ADVANCE_RING(); 1000 ADVANCE_RING();
1021 1001
1022 cmdbuf->buf += sz * stride * 4;
1023 cmdbuf->bufsz -= sz * stride * 4;
1024
1025 return 0; 1002 return 0;
1026} 1003}
1027 1004
@@ -1053,19 +1030,18 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1053 goto cleanup; 1030 goto cleanup;
1054 } 1031 }
1055 1032
1056 while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) { 1033 while (drm_buffer_unprocessed(cmdbuf->buffer)
1034 >= sizeof(drm_r300_cmd_header_t)) {
1057 int idx; 1035 int idx;
1058 drm_r300_cmd_header_t header; 1036 drm_r300_cmd_header_t *header, stack_header;
1059
1060 header.u = *(unsigned int *)cmdbuf->buf;
1061 1037
1062 cmdbuf->buf += sizeof(header); 1038 header = drm_buffer_read_object(cmdbuf->buffer,
1063 cmdbuf->bufsz -= sizeof(header); 1039 sizeof(stack_header), &stack_header);
1064 1040
1065 switch (header.header.cmd_type) { 1041 switch (header->header.cmd_type) {
1066 case R300_CMD_PACKET0: 1042 case R300_CMD_PACKET0:
1067 DRM_DEBUG("R300_CMD_PACKET0\n"); 1043 DRM_DEBUG("R300_CMD_PACKET0\n");
1068 ret = r300_emit_packet0(dev_priv, cmdbuf, header); 1044 ret = r300_emit_packet0(dev_priv, cmdbuf, *header);
1069 if (ret) { 1045 if (ret) {
1070 DRM_ERROR("r300_emit_packet0 failed\n"); 1046 DRM_ERROR("r300_emit_packet0 failed\n");
1071 goto cleanup; 1047 goto cleanup;
@@ -1074,7 +1050,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1074 1050
1075 case R300_CMD_VPU: 1051 case R300_CMD_VPU:
1076 DRM_DEBUG("R300_CMD_VPU\n"); 1052 DRM_DEBUG("R300_CMD_VPU\n");
1077 ret = r300_emit_vpu(dev_priv, cmdbuf, header); 1053 ret = r300_emit_vpu(dev_priv, cmdbuf, *header);
1078 if (ret) { 1054 if (ret) {
1079 DRM_ERROR("r300_emit_vpu failed\n"); 1055 DRM_ERROR("r300_emit_vpu failed\n");
1080 goto cleanup; 1056 goto cleanup;
@@ -1083,7 +1059,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1083 1059
1084 case R300_CMD_PACKET3: 1060 case R300_CMD_PACKET3:
1085 DRM_DEBUG("R300_CMD_PACKET3\n"); 1061 DRM_DEBUG("R300_CMD_PACKET3\n");
1086 ret = r300_emit_packet3(dev_priv, cmdbuf, header); 1062 ret = r300_emit_packet3(dev_priv, cmdbuf, *header);
1087 if (ret) { 1063 if (ret) {
1088 DRM_ERROR("r300_emit_packet3 failed\n"); 1064 DRM_ERROR("r300_emit_packet3 failed\n");
1089 goto cleanup; 1065 goto cleanup;
@@ -1117,8 +1093,8 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1117 int i; 1093 int i;
1118 RING_LOCALS; 1094 RING_LOCALS;
1119 1095
1120 BEGIN_RING(header.delay.count); 1096 BEGIN_RING(header->delay.count);
1121 for (i = 0; i < header.delay.count; i++) 1097 for (i = 0; i < header->delay.count; i++)
1122 OUT_RING(RADEON_CP_PACKET2); 1098 OUT_RING(RADEON_CP_PACKET2);
1123 ADVANCE_RING(); 1099 ADVANCE_RING();
1124 } 1100 }
@@ -1126,7 +1102,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1126 1102
1127 case R300_CMD_DMA_DISCARD: 1103 case R300_CMD_DMA_DISCARD:
1128 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); 1104 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
1129 idx = header.dma.buf_idx; 1105 idx = header->dma.buf_idx;
1130 if (idx < 0 || idx >= dma->buf_count) { 1106 if (idx < 0 || idx >= dma->buf_count) {
1131 DRM_ERROR("buffer index %d (of %d max)\n", 1107 DRM_ERROR("buffer index %d (of %d max)\n",
1132 idx, dma->buf_count - 1); 1108 idx, dma->buf_count - 1);
@@ -1149,12 +1125,12 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1149 1125
1150 case R300_CMD_WAIT: 1126 case R300_CMD_WAIT:
1151 DRM_DEBUG("R300_CMD_WAIT\n"); 1127 DRM_DEBUG("R300_CMD_WAIT\n");
1152 r300_cmd_wait(dev_priv, header); 1128 r300_cmd_wait(dev_priv, *header);
1153 break; 1129 break;
1154 1130
1155 case R300_CMD_SCRATCH: 1131 case R300_CMD_SCRATCH:
1156 DRM_DEBUG("R300_CMD_SCRATCH\n"); 1132 DRM_DEBUG("R300_CMD_SCRATCH\n");
1157 ret = r300_scratch(dev_priv, cmdbuf, header); 1133 ret = r300_scratch(dev_priv, cmdbuf, *header);
1158 if (ret) { 1134 if (ret) {
1159 DRM_ERROR("r300_scratch failed\n"); 1135 DRM_ERROR("r300_scratch failed\n");
1160 goto cleanup; 1136 goto cleanup;
@@ -1168,16 +1144,16 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
1168 goto cleanup; 1144 goto cleanup;
1169 } 1145 }
1170 DRM_DEBUG("R300_CMD_R500FP\n"); 1146 DRM_DEBUG("R300_CMD_R500FP\n");
1171 ret = r300_emit_r500fp(dev_priv, cmdbuf, header); 1147 ret = r300_emit_r500fp(dev_priv, cmdbuf, *header);
1172 if (ret) { 1148 if (ret) {
1173 DRM_ERROR("r300_emit_r500fp failed\n"); 1149 DRM_ERROR("r300_emit_r500fp failed\n");
1174 goto cleanup; 1150 goto cleanup;
1175 } 1151 }
1176 break; 1152 break;
1177 default: 1153 default:
1178 DRM_ERROR("bad cmd_type %i at %p\n", 1154 DRM_ERROR("bad cmd_type %i at byte %d\n",
1179 header.header.cmd_type, 1155 header->header.cmd_type,
1180 cmdbuf->buf - sizeof(header)); 1156 cmdbuf->buffer->iterator - (int)sizeof(*header));
1181 ret = -EINVAL; 1157 ret = -EINVAL;
1182 goto cleanup; 1158 goto cleanup;
1183 } 1159 }
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 1735a2b69580..1a0d5362cd79 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -952,6 +952,7 @@
952# define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0) 952# define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
953# define R300_TXO_MACRO_TILE (1 << 2) 953# define R300_TXO_MACRO_TILE (1 << 2)
954# define R300_TXO_MICRO_TILE (1 << 3) 954# define R300_TXO_MICRO_TILE (1 << 3)
955# define R300_TXO_MICRO_TILE_SQUARE (2 << 3)
955# define R300_TXO_OFFSET_MASK 0xffffffe0 956# define R300_TXO_OFFSET_MASK 0xffffffe0
956# define R300_TXO_OFFSET_SHIFT 5 957# define R300_TXO_OFFSET_SHIFT 5
957 /* END: Guess from R200 */ 958 /* END: Guess from R200 */
@@ -1360,6 +1361,7 @@
1360# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */ 1361# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */
1361# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */ 1362# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */
1362# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */ 1363# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */
1364# define R300_COLOR_MICROTILE_SQUARE_ENABLE (2 << 17)
1363# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */ 1365# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
1364# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */ 1366# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
1365# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */ 1367# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index d9373246c97f..c7593b8f58ee 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -40,28 +40,6 @@ static void r420_set_reg_safe(struct radeon_device *rdev)
40 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm); 40 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
41} 41}
42 42
43int r420_mc_init(struct radeon_device *rdev)
44{
45 int r;
46
47 /* Setup GPU memory space */
48 rdev->mc.vram_location = 0xFFFFFFFFUL;
49 rdev->mc.gtt_location = 0xFFFFFFFFUL;
50 if (rdev->flags & RADEON_IS_AGP) {
51 r = radeon_agp_init(rdev);
52 if (r) {
53 radeon_agp_disable(rdev);
54 } else {
55 rdev->mc.gtt_location = rdev->mc.agp_base;
56 }
57 }
58 r = radeon_mc_setup(rdev);
59 if (r) {
60 return r;
61 }
62 return 0;
63}
64
65void r420_pipes_init(struct radeon_device *rdev) 43void r420_pipes_init(struct radeon_device *rdev)
66{ 44{
67 unsigned tmp; 45 unsigned tmp;
@@ -69,7 +47,8 @@ void r420_pipes_init(struct radeon_device *rdev)
69 unsigned num_pipes; 47 unsigned num_pipes;
70 48
71 /* GA_ENHANCE workaround TCL deadlock issue */ 49 /* GA_ENHANCE workaround TCL deadlock issue */
72 WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)); 50 WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL |
51 (1 << 2) | (1 << 3));
73 /* add idle wait as per freedesktop.org bug 24041 */ 52 /* add idle wait as per freedesktop.org bug 24041 */
74 if (r100_gui_wait_for_idle(rdev)) { 53 if (r100_gui_wait_for_idle(rdev)) {
75 printk(KERN_WARNING "Failed to wait GUI idle while " 54 printk(KERN_WARNING "Failed to wait GUI idle while "
@@ -97,17 +76,17 @@ void r420_pipes_init(struct radeon_device *rdev)
97 tmp = (7 << 1); 76 tmp = (7 << 1);
98 break; 77 break;
99 } 78 }
100 WREG32(0x42C8, (1 << num_pipes) - 1); 79 WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1);
101 /* Sub pixel 1/12 so we can have 4K rendering according to doc */ 80 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
102 tmp |= (1 << 4) | (1 << 0); 81 tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING;
103 WREG32(0x4018, tmp); 82 WREG32(R300_GB_TILE_CONFIG, tmp);
104 if (r100_gui_wait_for_idle(rdev)) { 83 if (r100_gui_wait_for_idle(rdev)) {
105 printk(KERN_WARNING "Failed to wait GUI idle while " 84 printk(KERN_WARNING "Failed to wait GUI idle while "
106 "programming pipes. Bad things might happen.\n"); 85 "programming pipes. Bad things might happen.\n");
107 } 86 }
108 87
109 tmp = RREG32(0x170C); 88 tmp = RREG32(R300_DST_PIPE_CONFIG);
110 WREG32(0x170C, tmp | (1 << 31)); 89 WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
111 90
112 WREG32(R300_RB2D_DSTCACHE_MODE, 91 WREG32(R300_RB2D_DSTCACHE_MODE,
113 RREG32(R300_RB2D_DSTCACHE_MODE) | 92 RREG32(R300_RB2D_DSTCACHE_MODE) |
@@ -348,13 +327,15 @@ int r420_init(struct radeon_device *rdev)
348 radeon_get_clock_info(rdev->ddev); 327 radeon_get_clock_info(rdev->ddev);
349 /* Initialize power management */ 328 /* Initialize power management */
350 radeon_pm_init(rdev); 329 radeon_pm_init(rdev);
351 /* Get vram informations */ 330 /* initialize AGP */
352 r300_vram_info(rdev); 331 if (rdev->flags & RADEON_IS_AGP) {
353 /* Initialize memory controller (also test AGP) */ 332 r = radeon_agp_init(rdev);
354 r = r420_mc_init(rdev); 333 if (r) {
355 if (r) { 334 radeon_agp_disable(rdev);
356 return r; 335 }
357 } 336 }
337 /* initialize memory controller */
338 r300_mc_init(rdev);
358 r420_debugfs(rdev); 339 r420_debugfs(rdev);
359 /* Fence driver */ 340 /* Fence driver */
360 r = radeon_fence_driver_init(rdev); 341 r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 74ad89bdf2b5..0cf2ad2a5585 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -717,54 +717,62 @@
717#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988 717#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988
718 718
719#define AVIVO_DC_GPIO_HPD_A 0x7e94 719#define AVIVO_DC_GPIO_HPD_A 0x7e94
720
721#define AVIVO_GPIO_0 0x7e30
722#define AVIVO_GPIO_1 0x7e40
723#define AVIVO_GPIO_2 0x7e50
724#define AVIVO_GPIO_3 0x7e60
725
726#define AVIVO_DC_GPIO_HPD_Y 0x7e9c 720#define AVIVO_DC_GPIO_HPD_Y 0x7e9c
727 721
728#define AVIVO_I2C_STATUS 0x7d30 722#define AVIVO_DC_I2C_STATUS1 0x7d30
729# define AVIVO_I2C_STATUS_DONE (1 << 0) 723# define AVIVO_DC_I2C_DONE (1 << 0)
730# define AVIVO_I2C_STATUS_NACK (1 << 1) 724# define AVIVO_DC_I2C_NACK (1 << 1)
731# define AVIVO_I2C_STATUS_HALT (1 << 2) 725# define AVIVO_DC_I2C_HALT (1 << 2)
732# define AVIVO_I2C_STATUS_GO (1 << 3) 726# define AVIVO_DC_I2C_GO (1 << 3)
733# define AVIVO_I2C_STATUS_MASK 0x7 727#define AVIVO_DC_I2C_RESET 0x7d34
734/* If radeon_mm_i2c is to be believed, this is HALT, NACK, and maybe 728# define AVIVO_DC_I2C_SOFT_RESET (1 << 0)
735 * DONE? */ 729# define AVIVO_DC_I2C_ABORT (1 << 8)
736# define AVIVO_I2C_STATUS_CMD_RESET 0x7 730#define AVIVO_DC_I2C_CONTROL1 0x7d38
737# define AVIVO_I2C_STATUS_CMD_WAIT (1 << 3) 731# define AVIVO_DC_I2C_START (1 << 0)
738#define AVIVO_I2C_STOP 0x7d34 732# define AVIVO_DC_I2C_STOP (1 << 1)
739#define AVIVO_I2C_START_CNTL 0x7d38 733# define AVIVO_DC_I2C_RECEIVE (1 << 2)
740# define AVIVO_I2C_START (1 << 8) 734# define AVIVO_DC_I2C_EN (1 << 8)
741# define AVIVO_I2C_CONNECTOR0 (0 << 16) 735# define AVIVO_DC_I2C_PIN_SELECT(x) ((x) << 16)
742# define AVIVO_I2C_CONNECTOR1 (1 << 16) 736# define AVIVO_SEL_DDC1 0
743#define R520_I2C_START (1<<0) 737# define AVIVO_SEL_DDC2 1
744#define R520_I2C_STOP (1<<1) 738# define AVIVO_SEL_DDC3 2
745#define R520_I2C_RX (1<<2) 739#define AVIVO_DC_I2C_CONTROL2 0x7d3c
746#define R520_I2C_EN (1<<8) 740# define AVIVO_DC_I2C_ADDR_COUNT(x) ((x) << 0)
747#define R520_I2C_DDC1 (0<<16) 741# define AVIVO_DC_I2C_DATA_COUNT(x) ((x) << 8)
748#define R520_I2C_DDC2 (1<<16) 742#define AVIVO_DC_I2C_CONTROL3 0x7d40
749#define R520_I2C_DDC3 (2<<16) 743# define AVIVO_DC_I2C_DATA_DRIVE_EN (1 << 0)
750#define R520_I2C_DDC_MASK (3<<16) 744# define AVIVO_DC_I2C_DATA_DRIVE_SEL (1 << 1)
751#define AVIVO_I2C_CONTROL2 0x7d3c 745# define AVIVO_DC_I2C_CLK_DRIVE_EN (1 << 7)
752# define AVIVO_I2C_7D3C_SIZE_SHIFT 8 746# define AVIVO_DC_I2C_RD_INTRA_BYTE_DELAY(x) ((x) << 8)
753# define AVIVO_I2C_7D3C_SIZE_MASK (0xf << 8) 747# define AVIVO_DC_I2C_WR_INTRA_BYTE_DELAY(x) ((x) << 16)
754#define AVIVO_I2C_CONTROL3 0x7d40 748# define AVIVO_DC_I2C_TIME_LIMIT(x) ((x) << 24)
755/* Reading is done 4 bytes at a time: read the bottom 8 bits from 749#define AVIVO_DC_I2C_DATA 0x7d44
756 * 7d44, four times in a row. 750#define AVIVO_DC_I2C_INTERRUPT_CONTROL 0x7d48
757 * Writing is a little more complex. First write DATA with 751# define AVIVO_DC_I2C_INTERRUPT_STATUS (1 << 0)
758 * 0xnnnnnnzz, then 0xnnnnnnyy, where nnnnnn is some non-deterministic 752# define AVIVO_DC_I2C_INTERRUPT_AK (1 << 8)
759 * magic number, zz is, I think, the slave address, and yy is the byte 753# define AVIVO_DC_I2C_INTERRUPT_ENABLE (1 << 16)
760 * you want to write. */ 754#define AVIVO_DC_I2C_ARBITRATION 0x7d50
761#define AVIVO_I2C_DATA 0x7d44 755# define AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C (1 << 0)
762#define R520_I2C_ADDR_COUNT_MASK (0x7) 756# define AVIVO_DC_I2C_SW_CAN_USE_I2C (1 << 1)
763#define R520_I2C_DATA_COUNT_SHIFT (8) 757# define AVIVO_DC_I2C_SW_DONE_USING_I2C (1 << 8)
764#define R520_I2C_DATA_COUNT_MASK (0xF00) 758# define AVIVO_DC_I2C_HW_NEEDS_I2C (1 << 9)
765#define AVIVO_I2C_CNTL 0x7d50 759# define AVIVO_DC_I2C_ABORT_HDCP_I2C (1 << 16)
766# define AVIVO_I2C_EN (1 << 0) 760# define AVIVO_DC_I2C_HW_USING_I2C (1 << 17)
767# define AVIVO_I2C_RESET (1 << 8) 761
762#define AVIVO_DC_GPIO_DDC1_MASK 0x7e40
763#define AVIVO_DC_GPIO_DDC1_A 0x7e44
764#define AVIVO_DC_GPIO_DDC1_EN 0x7e48
765#define AVIVO_DC_GPIO_DDC1_Y 0x7e4c
766
767#define AVIVO_DC_GPIO_DDC2_MASK 0x7e50
768#define AVIVO_DC_GPIO_DDC2_A 0x7e54
769#define AVIVO_DC_GPIO_DDC2_EN 0x7e58
770#define AVIVO_DC_GPIO_DDC2_Y 0x7e5c
771
772#define AVIVO_DC_GPIO_DDC3_MASK 0x7e60
773#define AVIVO_DC_GPIO_DDC3_A 0x7e64
774#define AVIVO_DC_GPIO_DDC3_EN 0x7e68
775#define AVIVO_DC_GPIO_DDC3_Y 0x7e6c
768 776
769#define AVIVO_DISP_INTERRUPT_STATUS 0x7edc 777#define AVIVO_DISP_INTERRUPT_STATUS 0x7edc
770# define AVIVO_D1_VBLANK_INTERRUPT (1 << 4) 778# define AVIVO_D1_VBLANK_INTERRUPT (1 << 4)
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index ddf5731eba0d..2b8a5dd13516 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -119,13 +119,15 @@ static void r520_vram_get_type(struct radeon_device *rdev)
119 rdev->mc.vram_width *= 2; 119 rdev->mc.vram_width *= 2;
120} 120}
121 121
122void r520_vram_info(struct radeon_device *rdev) 122void r520_mc_init(struct radeon_device *rdev)
123{ 123{
124 fixed20_12 a; 124 fixed20_12 a;
125 125
126 r520_vram_get_type(rdev); 126 r520_vram_get_type(rdev);
127
128 r100_vram_init_sizes(rdev); 127 r100_vram_init_sizes(rdev);
128 radeon_vram_location(rdev, &rdev->mc, 0);
129 if (!(rdev->flags & RADEON_IS_AGP))
130 radeon_gtt_location(rdev, &rdev->mc);
129 /* FIXME: we should enforce default clock in case GPU is not in 131 /* FIXME: we should enforce default clock in case GPU is not in
130 * default setup 132 * default setup
131 */ 133 */
@@ -267,12 +269,15 @@ int r520_init(struct radeon_device *rdev)
267 radeon_get_clock_info(rdev->ddev); 269 radeon_get_clock_info(rdev->ddev);
268 /* Initialize power management */ 270 /* Initialize power management */
269 radeon_pm_init(rdev); 271 radeon_pm_init(rdev);
270 /* Get vram informations */ 272 /* initialize AGP */
271 r520_vram_info(rdev); 273 if (rdev->flags & RADEON_IS_AGP) {
272 /* Initialize memory controller (also test AGP) */ 274 r = radeon_agp_init(rdev);
273 r = r420_mc_init(rdev); 275 if (r) {
274 if (r) 276 radeon_agp_disable(rdev);
275 return r; 277 }
278 }
279 /* initialize memory controller */
280 r520_mc_init(rdev);
276 rv515_debugfs(rdev); 281 rv515_debugfs(rdev);
277 /* Fence driver */ 282 /* Fence driver */
278 r = radeon_fence_driver_init(rdev); 283 r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2ffcf5a03551..c52290197292 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -353,23 +353,14 @@ void r600_hpd_fini(struct radeon_device *rdev)
353/* 353/*
354 * R600 PCIE GART 354 * R600 PCIE GART
355 */ 355 */
356int r600_gart_clear_page(struct radeon_device *rdev, int i)
357{
358 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
359 u64 pte;
360
361 if (i < 0 || i > rdev->gart.num_gpu_pages)
362 return -EINVAL;
363 pte = 0;
364 writeq(pte, ((void __iomem *)ptr) + (i * 8));
365 return 0;
366}
367
368void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) 356void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
369{ 357{
370 unsigned i; 358 unsigned i;
371 u32 tmp; 359 u32 tmp;
372 360
361 /* flush hdp cache so updates hit vram */
362 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
363
373 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12); 364 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
374 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12); 365 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
375 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); 366 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
@@ -416,6 +407,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
416 r = radeon_gart_table_vram_pin(rdev); 407 r = radeon_gart_table_vram_pin(rdev);
417 if (r) 408 if (r)
418 return r; 409 return r;
410 radeon_gart_restore(rdev);
419 411
420 /* Setup L2 cache */ 412 /* Setup L2 cache */
421 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 413 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -619,6 +611,68 @@ static void r600_mc_program(struct radeon_device *rdev)
619 rv515_vga_render_disable(rdev); 611 rv515_vga_render_disable(rdev);
620} 612}
621 613
614/**
615 * r600_vram_gtt_location - try to find VRAM & GTT location
616 * @rdev: radeon device structure holding all necessary informations
617 * @mc: memory controller structure holding memory informations
618 *
619 * Function will place try to place VRAM at same place as in CPU (PCI)
620 * address space as some GPU seems to have issue when we reprogram at
621 * different address space.
622 *
623 * If there is not enough space to fit the unvisible VRAM after the
624 * aperture then we limit the VRAM size to the aperture.
625 *
626 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
627 * them to be in one from GPU point of view so that we can program GPU to
628 * catch access outside them (weird GPU policy see ??).
629 *
630 * This function will never fails, worst case are limiting VRAM or GTT.
631 *
632 * Note: GTT start, end, size should be initialized before calling this
633 * function on AGP platform.
634 */
635void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
636{
637 u64 size_bf, size_af;
638
639 if (mc->mc_vram_size > 0xE0000000) {
640 /* leave room for at least 512M GTT */
641 dev_warn(rdev->dev, "limiting VRAM\n");
642 mc->real_vram_size = 0xE0000000;
643 mc->mc_vram_size = 0xE0000000;
644 }
645 if (rdev->flags & RADEON_IS_AGP) {
646 size_bf = mc->gtt_start;
647 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
648 if (size_bf > size_af) {
649 if (mc->mc_vram_size > size_bf) {
650 dev_warn(rdev->dev, "limiting VRAM\n");
651 mc->real_vram_size = size_bf;
652 mc->mc_vram_size = size_bf;
653 }
654 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
655 } else {
656 if (mc->mc_vram_size > size_af) {
657 dev_warn(rdev->dev, "limiting VRAM\n");
658 mc->real_vram_size = size_af;
659 mc->mc_vram_size = size_af;
660 }
661 mc->vram_start = mc->gtt_end;
662 }
663 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
664 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
665 mc->mc_vram_size >> 20, mc->vram_start,
666 mc->vram_end, mc->real_vram_size >> 20);
667 } else {
668 u64 base = 0;
669 if (rdev->flags & RADEON_IS_IGP)
670 base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
671 radeon_vram_location(rdev, &rdev->mc, base);
672 radeon_gtt_location(rdev, mc);
673 }
674}
675
622int r600_mc_init(struct radeon_device *rdev) 676int r600_mc_init(struct radeon_device *rdev)
623{ 677{
624 fixed20_12 a; 678 fixed20_12 a;
@@ -658,75 +712,21 @@ int r600_mc_init(struct radeon_device *rdev)
658 /* Setup GPU memory space */ 712 /* Setup GPU memory space */
659 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 713 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
660 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 714 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
661 715 rdev->mc.visible_vram_size = rdev->mc.aper_size;
662 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) 716 /* FIXME remove this once we support unmappable VRAM */
717 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
663 rdev->mc.mc_vram_size = rdev->mc.aper_size; 718 rdev->mc.mc_vram_size = rdev->mc.aper_size;
664
665 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
666 rdev->mc.real_vram_size = rdev->mc.aper_size; 719 rdev->mc.real_vram_size = rdev->mc.aper_size;
667
668 if (rdev->flags & RADEON_IS_AGP) {
669 /* gtt_size is setup by radeon_agp_init */
670 rdev->mc.gtt_location = rdev->mc.agp_base;
671 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
672 /* Try to put vram before or after AGP because we
673 * we want SYSTEM_APERTURE to cover both VRAM and
674 * AGP so that GPU can catch out of VRAM/AGP access
675 */
676 if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
677 /* Enough place before */
678 rdev->mc.vram_location = rdev->mc.gtt_location -
679 rdev->mc.mc_vram_size;
680 } else if (tmp > rdev->mc.mc_vram_size) {
681 /* Enough place after */
682 rdev->mc.vram_location = rdev->mc.gtt_location +
683 rdev->mc.gtt_size;
684 } else {
685 /* Try to setup VRAM then AGP might not
686 * not work on some card
687 */
688 rdev->mc.vram_location = 0x00000000UL;
689 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
690 }
691 } else {
692 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
693 rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
694 0xFFFF) << 24;
695 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
696 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
697 /* Enough place after vram */
698 rdev->mc.gtt_location = tmp;
699 } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
700 /* Enough place before vram */
701 rdev->mc.gtt_location = 0;
702 } else {
703 /* Not enough place after or before shrink
704 * gart size
705 */
706 if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
707 rdev->mc.gtt_location = 0;
708 rdev->mc.gtt_size = rdev->mc.vram_location;
709 } else {
710 rdev->mc.gtt_location = tmp;
711 rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
712 }
713 }
714 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
715 } 720 }
716 rdev->mc.vram_start = rdev->mc.vram_location; 721 r600_vram_gtt_location(rdev, &rdev->mc);
717 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
718 rdev->mc.gtt_start = rdev->mc.gtt_location;
719 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
720 /* FIXME: we should enforce default clock in case GPU is not in 722 /* FIXME: we should enforce default clock in case GPU is not in
721 * default setup 723 * default setup
722 */ 724 */
723 a.full = rfixed_const(100); 725 a.full = rfixed_const(100);
724 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); 726 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
725 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); 727 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
726
727 if (rdev->flags & RADEON_IS_IGP) 728 if (rdev->flags & RADEON_IS_IGP)
728 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 729 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
729
730 return 0; 730 return 0;
731} 731}
732 732
@@ -981,6 +981,9 @@ void r600_gpu_init(struct radeon_device *rdev)
981{ 981{
982 u32 tiling_config; 982 u32 tiling_config;
983 u32 ramcfg; 983 u32 ramcfg;
984 u32 backend_map;
985 u32 cc_rb_backend_disable;
986 u32 cc_gc_shader_pipe_config;
984 u32 tmp; 987 u32 tmp;
985 int i, j; 988 int i, j;
986 u32 sq_config; 989 u32 sq_config;
@@ -1090,8 +1093,11 @@ void r600_gpu_init(struct radeon_device *rdev)
1090 default: 1093 default:
1091 break; 1094 break;
1092 } 1095 }
1096 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1097 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1093 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 1098 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1094 tiling_config |= GROUP_SIZE(0); 1099 tiling_config |= GROUP_SIZE(0);
1100 rdev->config.r600.tiling_group_size = 256;
1095 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; 1101 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1096 if (tmp > 3) { 1102 if (tmp > 3) {
1097 tiling_config |= ROW_TILING(3); 1103 tiling_config |= ROW_TILING(3);
@@ -1101,24 +1107,33 @@ void r600_gpu_init(struct radeon_device *rdev)
1101 tiling_config |= SAMPLE_SPLIT(tmp); 1107 tiling_config |= SAMPLE_SPLIT(tmp);
1102 } 1108 }
1103 tiling_config |= BANK_SWAPS(1); 1109 tiling_config |= BANK_SWAPS(1);
1104 tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes, 1110
1105 rdev->config.r600.max_backends, 1111 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1106 (0xff << rdev->config.r600.max_backends) & 0xff); 1112 cc_rb_backend_disable |=
1107 tiling_config |= BACKEND_MAP(tmp); 1113 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1114
1115 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1116 cc_gc_shader_pipe_config |=
1117 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1118 cc_gc_shader_pipe_config |=
1119 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1120
1121 backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1122 (R6XX_MAX_BACKENDS -
1123 r600_count_pipe_bits((cc_rb_backend_disable &
1124 R6XX_MAX_BACKENDS_MASK) >> 16)),
1125 (cc_rb_backend_disable >> 16));
1126
1127 tiling_config |= BACKEND_MAP(backend_map);
1108 WREG32(GB_TILING_CONFIG, tiling_config); 1128 WREG32(GB_TILING_CONFIG, tiling_config);
1109 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); 1129 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1110 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); 1130 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1111 1131
1112 tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1113 WREG32(CC_RB_BACKEND_DISABLE, tmp);
1114
1115 /* Setup pipes */ 1132 /* Setup pipes */
1116 tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK); 1133 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1117 tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK); 1134 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1118 WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
1119 WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
1120 1135
1121 tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK); 1136 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1122 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); 1137 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1123 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK); 1138 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1124 1139
@@ -1783,12 +1798,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
1783 struct radeon_fence *fence) 1798 struct radeon_fence *fence)
1784{ 1799{
1785 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */ 1800 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
1801
1802 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
1803 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
1804 /* wait for 3D idle clean */
1805 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1806 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
1807 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
1786 /* Emit fence sequence & fire IRQ */ 1808 /* Emit fence sequence & fire IRQ */
1787 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 1809 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1788 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 1810 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1789 radeon_ring_write(rdev, fence->seq); 1811 radeon_ring_write(rdev, fence->seq);
1790 radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
1791 radeon_ring_write(rdev, 1);
1792 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ 1812 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1793 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); 1813 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1794 radeon_ring_write(rdev, RB_INT_STAT); 1814 radeon_ring_write(rdev, RB_INT_STAT);
@@ -2745,6 +2765,7 @@ restart_ih:
2745 case 0: /* D1 vblank */ 2765 case 0: /* D1 vblank */
2746 if (disp_int & LB_D1_VBLANK_INTERRUPT) { 2766 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
2747 drm_handle_vblank(rdev->ddev, 0); 2767 drm_handle_vblank(rdev->ddev, 0);
2768 wake_up(&rdev->irq.vblank_queue);
2748 disp_int &= ~LB_D1_VBLANK_INTERRUPT; 2769 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2749 DRM_DEBUG("IH: D1 vblank\n"); 2770 DRM_DEBUG("IH: D1 vblank\n");
2750 } 2771 }
@@ -2765,6 +2786,7 @@ restart_ih:
2765 case 0: /* D2 vblank */ 2786 case 0: /* D2 vblank */
2766 if (disp_int & LB_D2_VBLANK_INTERRUPT) { 2787 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
2767 drm_handle_vblank(rdev->ddev, 1); 2788 drm_handle_vblank(rdev->ddev, 1);
2789 wake_up(&rdev->irq.vblank_queue);
2768 disp_int &= ~LB_D2_VBLANK_INTERRUPT; 2790 disp_int &= ~LB_D2_VBLANK_INTERRUPT;
2769 DRM_DEBUG("IH: D2 vblank\n"); 2791 DRM_DEBUG("IH: D2 vblank\n");
2770 } 2792 }
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 0dcb6904c4ff..db928016d034 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -35,7 +35,7 @@
35 */ 35 */
36static int r600_audio_chipset_supported(struct radeon_device *rdev) 36static int r600_audio_chipset_supported(struct radeon_device *rdev)
37{ 37{
38 return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710) 38 return rdev->family >= CHIP_R600
39 || rdev->family == CHIP_RS600 39 || rdev->family == CHIP_RS600
40 || rdev->family == CHIP_RS690 40 || rdev->family == CHIP_RS690
41 || rdev->family == CHIP_RS740; 41 || rdev->family == CHIP_RS740;
@@ -147,15 +147,23 @@ static void r600_audio_update_hdmi(unsigned long param)
147} 147}
148 148
149/* 149/*
150 * turn on/off audio engine
151 */
152static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
153{
154 DRM_INFO("%s audio support", enable ? "Enabling" : "Disabling");
155 WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
156}
157
158/*
150 * initialize the audio vars and register the update timer 159 * initialize the audio vars and register the update timer
151 */ 160 */
152int r600_audio_init(struct radeon_device *rdev) 161int r600_audio_init(struct radeon_device *rdev)
153{ 162{
154 if (!r600_audio_chipset_supported(rdev)) 163 if (!radeon_audio || !r600_audio_chipset_supported(rdev))
155 return 0; 164 return 0;
156 165
157 DRM_INFO("%s audio support", radeon_audio ? "Enabling" : "Disabling"); 166 r600_audio_engine_enable(rdev, true);
158 WREG32_P(R600_AUDIO_ENABLE, radeon_audio ? 0x81000000 : 0x0, ~0x81000000);
159 167
160 rdev->audio_channels = -1; 168 rdev->audio_channels = -1;
161 rdev->audio_rate = -1; 169 rdev->audio_rate = -1;
@@ -258,9 +266,10 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
258 */ 266 */
259void r600_audio_fini(struct radeon_device *rdev) 267void r600_audio_fini(struct radeon_device *rdev)
260{ 268{
261 if (!r600_audio_chipset_supported(rdev)) 269 if (!radeon_audio || !r600_audio_chipset_supported(rdev))
262 return; 270 return;
263 271
264 del_timer(&rdev->audio_timer); 272 del_timer(&rdev->audio_timer);
265 WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000); 273
274 r600_audio_engine_enable(rdev, false);
266} 275}
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index 5ea432347589..f4fb88ece2bb 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -49,7 +49,7 @@ set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64
49 RING_LOCALS; 49 RING_LOCALS;
50 DRM_DEBUG("\n"); 50 DRM_DEBUG("\n");
51 51
52 h = (h + 7) & ~7; 52 h = ALIGN(h, 8);
53 if (h < 8) 53 if (h < 8)
54 h = 8; 54 h = 8;
55 55
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 446b765ac72a..f6c6c77db7e0 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -25,7 +25,7 @@ set_render_target(struct radeon_device *rdev, int format,
25 u32 cb_color_info; 25 u32 cb_color_info;
26 int pitch, slice; 26 int pitch, slice;
27 27
28 h = (h + 7) & ~7; 28 h = ALIGN(h, 8);
29 if (h < 8) 29 if (h < 8)
30 h = 8; 30 h = 8;
31 31
@@ -396,15 +396,13 @@ set_default_state(struct radeon_device *rdev)
396 NUM_ES_STACK_ENTRIES(num_es_stack_entries)); 396 NUM_ES_STACK_ENTRIES(num_es_stack_entries));
397 397
398 /* emit an IB pointing at default state */ 398 /* emit an IB pointing at default state */
399 dwords = (rdev->r600_blit.state_len + 0xf) & ~0xf; 399 dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
400 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; 400 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
401 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 401 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
402 radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); 402 radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
403 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); 403 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
404 radeon_ring_write(rdev, dwords); 404 radeon_ring_write(rdev, dwords);
405 405
406 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
407 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
408 /* SQ config */ 406 /* SQ config */
409 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6)); 407 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6));
410 radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 408 radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
@@ -578,9 +576,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
578 ring_size = num_loops * dwords_per_loop; 576 ring_size = num_loops * dwords_per_loop;
579 /* set default + shaders */ 577 /* set default + shaders */
580 ring_size += 40; /* shaders + def state */ 578 ring_size += 40; /* shaders + def state */
581 ring_size += 7; /* fence emit for VB IB */ 579 ring_size += 10; /* fence emit for VB IB */
582 ring_size += 5; /* done copy */ 580 ring_size += 5; /* done copy */
583 ring_size += 7; /* fence emit for done copy */ 581 ring_size += 10; /* fence emit for done copy */
584 r = radeon_ring_lock(rdev, ring_size); 582 r = radeon_ring_lock(rdev, ring_size);
585 if (r) 583 if (r)
586 return r; 584 return r;
@@ -594,13 +592,6 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
594{ 592{
595 int r; 593 int r;
596 594
597 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
598 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
599 /* wait for 3D idle clean */
600 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
601 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
602 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
603
604 if (rdev->r600_blit.vb_ib) 595 if (rdev->r600_blit.vb_ib)
605 r600_vb_ib_put(rdev); 596 r600_vb_ib_put(rdev);
606 597
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index d745e815c2e8..a112c59f9d82 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -9,11 +9,6 @@ const u32 r6xx_default_state[] =
9 0xc0012800, 9 0xc0012800,
10 0x80000000, 10 0x80000000,
11 0x80000000, 11 0x80000000,
12 0xc0004600,
13 0x00000016,
14 0xc0016800,
15 0x00000010,
16 0x00028000,
17 0xc0016800, 12 0xc0016800,
18 0x00000010, 13 0x00000010,
19 0x00008000, 14 0x00008000,
@@ -531,11 +526,6 @@ const u32 r7xx_default_state[] =
531 0xc0012800, 526 0xc0012800,
532 0x80000000, 527 0x80000000,
533 0x80000000, 528 0x80000000,
534 0xc0004600,
535 0x00000016,
536 0xc0016800,
537 0x00000010,
538 0x00028000,
539 0xc0016800, 529 0xc0016800,
540 0x00000010, 530 0x00000010,
541 0x00008000, 531 0x00008000,
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 75bcf35a0931..40416c068d9f 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -734,8 +734,8 @@ static void r600_gfx_init(struct drm_device *dev,
734 u32 hdp_host_path_cntl; 734 u32 hdp_host_path_cntl;
735 u32 backend_map; 735 u32 backend_map;
736 u32 gb_tiling_config = 0; 736 u32 gb_tiling_config = 0;
737 u32 cc_rb_backend_disable = 0; 737 u32 cc_rb_backend_disable;
738 u32 cc_gc_shader_pipe_config = 0; 738 u32 cc_gc_shader_pipe_config;
739 u32 ramcfg; 739 u32 ramcfg;
740 740
741 /* setup chip specs */ 741 /* setup chip specs */
@@ -857,29 +857,44 @@ static void r600_gfx_init(struct drm_device *dev,
857 857
858 gb_tiling_config |= R600_BANK_SWAPS(1); 858 gb_tiling_config |= R600_BANK_SWAPS(1);
859 859
860 backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes, 860 cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
861 dev_priv->r600_max_backends, 861 cc_rb_backend_disable |=
862 (0xff << dev_priv->r600_max_backends) & 0xff); 862 R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK);
863 gb_tiling_config |= R600_BACKEND_MAP(backend_map);
864 863
865 cc_gc_shader_pipe_config = 864 cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
865 cc_gc_shader_pipe_config |=
866 R600_INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R6XX_MAX_PIPES_MASK); 866 R600_INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R6XX_MAX_PIPES_MASK);
867 cc_gc_shader_pipe_config |= 867 cc_gc_shader_pipe_config |=
868 R600_INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R6XX_MAX_SIMDS_MASK); 868 R600_INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R6XX_MAX_SIMDS_MASK);
869 869
870 cc_rb_backend_disable = 870 backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
871 R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK); 871 (R6XX_MAX_BACKENDS -
872 r600_count_pipe_bits((cc_rb_backend_disable &
873 R6XX_MAX_BACKENDS_MASK) >> 16)),
874 (cc_rb_backend_disable >> 16));
875 gb_tiling_config |= R600_BACKEND_MAP(backend_map);
872 876
873 RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config); 877 RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config);
874 RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 878 RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
875 RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 879 RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
880 if (gb_tiling_config & 0xc0) {
881 dev_priv->r600_group_size = 512;
882 } else {
883 dev_priv->r600_group_size = 256;
884 }
885 dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
886 if (gb_tiling_config & 0x30) {
887 dev_priv->r600_nbanks = 8;
888 } else {
889 dev_priv->r600_nbanks = 4;
890 }
876 891
877 RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 892 RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
878 RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 893 RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
879 RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 894 RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
880 895
881 num_qd_pipes = 896 num_qd_pipes =
882 R6XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK); 897 R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
883 RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK); 898 RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
884 RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK); 899 RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
885 900
@@ -1151,7 +1166,8 @@ static void r600_gfx_init(struct drm_device *dev,
1151 1166
1152} 1167}
1153 1168
1154static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes, 1169static u32 r700_get_tile_pipe_to_backend_map(drm_radeon_private_t *dev_priv,
1170 u32 num_tile_pipes,
1155 u32 num_backends, 1171 u32 num_backends,
1156 u32 backend_disable_mask) 1172 u32 backend_disable_mask)
1157{ 1173{
@@ -1162,6 +1178,7 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1162 u32 swizzle_pipe[R7XX_MAX_PIPES]; 1178 u32 swizzle_pipe[R7XX_MAX_PIPES];
1163 u32 cur_backend; 1179 u32 cur_backend;
1164 u32 i; 1180 u32 i;
1181 bool force_no_swizzle;
1165 1182
1166 if (num_tile_pipes > R7XX_MAX_PIPES) 1183 if (num_tile_pipes > R7XX_MAX_PIPES)
1167 num_tile_pipes = R7XX_MAX_PIPES; 1184 num_tile_pipes = R7XX_MAX_PIPES;
@@ -1191,6 +1208,18 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1191 if (enabled_backends_count != num_backends) 1208 if (enabled_backends_count != num_backends)
1192 num_backends = enabled_backends_count; 1209 num_backends = enabled_backends_count;
1193 1210
1211 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
1212 case CHIP_RV770:
1213 case CHIP_RV730:
1214 force_no_swizzle = false;
1215 break;
1216 case CHIP_RV710:
1217 case CHIP_RV740:
1218 default:
1219 force_no_swizzle = true;
1220 break;
1221 }
1222
1194 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES); 1223 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
1195 switch (num_tile_pipes) { 1224 switch (num_tile_pipes) {
1196 case 1: 1225 case 1:
@@ -1201,49 +1230,100 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1201 swizzle_pipe[1] = 1; 1230 swizzle_pipe[1] = 1;
1202 break; 1231 break;
1203 case 3: 1232 case 3:
1204 swizzle_pipe[0] = 0; 1233 if (force_no_swizzle) {
1205 swizzle_pipe[1] = 2; 1234 swizzle_pipe[0] = 0;
1206 swizzle_pipe[2] = 1; 1235 swizzle_pipe[1] = 1;
1236 swizzle_pipe[2] = 2;
1237 } else {
1238 swizzle_pipe[0] = 0;
1239 swizzle_pipe[1] = 2;
1240 swizzle_pipe[2] = 1;
1241 }
1207 break; 1242 break;
1208 case 4: 1243 case 4:
1209 swizzle_pipe[0] = 0; 1244 if (force_no_swizzle) {
1210 swizzle_pipe[1] = 2; 1245 swizzle_pipe[0] = 0;
1211 swizzle_pipe[2] = 3; 1246 swizzle_pipe[1] = 1;
1212 swizzle_pipe[3] = 1; 1247 swizzle_pipe[2] = 2;
1248 swizzle_pipe[3] = 3;
1249 } else {
1250 swizzle_pipe[0] = 0;
1251 swizzle_pipe[1] = 2;
1252 swizzle_pipe[2] = 3;
1253 swizzle_pipe[3] = 1;
1254 }
1213 break; 1255 break;
1214 case 5: 1256 case 5:
1215 swizzle_pipe[0] = 0; 1257 if (force_no_swizzle) {
1216 swizzle_pipe[1] = 2; 1258 swizzle_pipe[0] = 0;
1217 swizzle_pipe[2] = 4; 1259 swizzle_pipe[1] = 1;
1218 swizzle_pipe[3] = 1; 1260 swizzle_pipe[2] = 2;
1219 swizzle_pipe[4] = 3; 1261 swizzle_pipe[3] = 3;
1262 swizzle_pipe[4] = 4;
1263 } else {
1264 swizzle_pipe[0] = 0;
1265 swizzle_pipe[1] = 2;
1266 swizzle_pipe[2] = 4;
1267 swizzle_pipe[3] = 1;
1268 swizzle_pipe[4] = 3;
1269 }
1220 break; 1270 break;
1221 case 6: 1271 case 6:
1222 swizzle_pipe[0] = 0; 1272 if (force_no_swizzle) {
1223 swizzle_pipe[1] = 2; 1273 swizzle_pipe[0] = 0;
1224 swizzle_pipe[2] = 4; 1274 swizzle_pipe[1] = 1;
1225 swizzle_pipe[3] = 5; 1275 swizzle_pipe[2] = 2;
1226 swizzle_pipe[4] = 3; 1276 swizzle_pipe[3] = 3;
1227 swizzle_pipe[5] = 1; 1277 swizzle_pipe[4] = 4;
1278 swizzle_pipe[5] = 5;
1279 } else {
1280 swizzle_pipe[0] = 0;
1281 swizzle_pipe[1] = 2;
1282 swizzle_pipe[2] = 4;
1283 swizzle_pipe[3] = 5;
1284 swizzle_pipe[4] = 3;
1285 swizzle_pipe[5] = 1;
1286 }
1228 break; 1287 break;
1229 case 7: 1288 case 7:
1230 swizzle_pipe[0] = 0; 1289 if (force_no_swizzle) {
1231 swizzle_pipe[1] = 2; 1290 swizzle_pipe[0] = 0;
1232 swizzle_pipe[2] = 4; 1291 swizzle_pipe[1] = 1;
1233 swizzle_pipe[3] = 6; 1292 swizzle_pipe[2] = 2;
1234 swizzle_pipe[4] = 3; 1293 swizzle_pipe[3] = 3;
1235 swizzle_pipe[5] = 1; 1294 swizzle_pipe[4] = 4;
1236 swizzle_pipe[6] = 5; 1295 swizzle_pipe[5] = 5;
1296 swizzle_pipe[6] = 6;
1297 } else {
1298 swizzle_pipe[0] = 0;
1299 swizzle_pipe[1] = 2;
1300 swizzle_pipe[2] = 4;
1301 swizzle_pipe[3] = 6;
1302 swizzle_pipe[4] = 3;
1303 swizzle_pipe[5] = 1;
1304 swizzle_pipe[6] = 5;
1305 }
1237 break; 1306 break;
1238 case 8: 1307 case 8:
1239 swizzle_pipe[0] = 0; 1308 if (force_no_swizzle) {
1240 swizzle_pipe[1] = 2; 1309 swizzle_pipe[0] = 0;
1241 swizzle_pipe[2] = 4; 1310 swizzle_pipe[1] = 1;
1242 swizzle_pipe[3] = 6; 1311 swizzle_pipe[2] = 2;
1243 swizzle_pipe[4] = 3; 1312 swizzle_pipe[3] = 3;
1244 swizzle_pipe[5] = 1; 1313 swizzle_pipe[4] = 4;
1245 swizzle_pipe[6] = 7; 1314 swizzle_pipe[5] = 5;
1246 swizzle_pipe[7] = 5; 1315 swizzle_pipe[6] = 6;
1316 swizzle_pipe[7] = 7;
1317 } else {
1318 swizzle_pipe[0] = 0;
1319 swizzle_pipe[1] = 2;
1320 swizzle_pipe[2] = 4;
1321 swizzle_pipe[3] = 6;
1322 swizzle_pipe[4] = 3;
1323 swizzle_pipe[5] = 1;
1324 swizzle_pipe[6] = 7;
1325 swizzle_pipe[7] = 5;
1326 }
1247 break; 1327 break;
1248 } 1328 }
1249 1329
@@ -1264,8 +1344,10 @@ static void r700_gfx_init(struct drm_device *dev,
1264 drm_radeon_private_t *dev_priv) 1344 drm_radeon_private_t *dev_priv)
1265{ 1345{
1266 int i, j, num_qd_pipes; 1346 int i, j, num_qd_pipes;
1347 u32 ta_aux_cntl;
1267 u32 sx_debug_1; 1348 u32 sx_debug_1;
1268 u32 smx_dc_ctl0; 1349 u32 smx_dc_ctl0;
1350 u32 db_debug3;
1269 u32 num_gs_verts_per_thread; 1351 u32 num_gs_verts_per_thread;
1270 u32 vgt_gs_per_es; 1352 u32 vgt_gs_per_es;
1271 u32 gs_prim_buffer_depth = 0; 1353 u32 gs_prim_buffer_depth = 0;
@@ -1276,8 +1358,8 @@ static void r700_gfx_init(struct drm_device *dev,
1276 u32 sq_dyn_gpr_size_simd_ab_0; 1358 u32 sq_dyn_gpr_size_simd_ab_0;
1277 u32 backend_map; 1359 u32 backend_map;
1278 u32 gb_tiling_config = 0; 1360 u32 gb_tiling_config = 0;
1279 u32 cc_rb_backend_disable = 0; 1361 u32 cc_rb_backend_disable;
1280 u32 cc_gc_shader_pipe_config = 0; 1362 u32 cc_gc_shader_pipe_config;
1281 u32 mc_arb_ramcfg; 1363 u32 mc_arb_ramcfg;
1282 u32 db_debug4; 1364 u32 db_debug4;
1283 1365
@@ -1428,38 +1510,51 @@ static void r700_gfx_init(struct drm_device *dev,
1428 1510
1429 gb_tiling_config |= R600_BANK_SWAPS(1); 1511 gb_tiling_config |= R600_BANK_SWAPS(1);
1430 1512
1431 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740) 1513 cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1432 backend_map = 0x28; 1514 cc_rb_backend_disable |=
1433 else 1515 R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK);
1434 backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
1435 dev_priv->r600_max_backends,
1436 (0xff << dev_priv->r600_max_backends) & 0xff);
1437 gb_tiling_config |= R600_BACKEND_MAP(backend_map);
1438 1516
1439 cc_gc_shader_pipe_config = 1517 cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1518 cc_gc_shader_pipe_config |=
1440 R600_INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R7XX_MAX_PIPES_MASK); 1519 R600_INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R7XX_MAX_PIPES_MASK);
1441 cc_gc_shader_pipe_config |= 1520 cc_gc_shader_pipe_config |=
1442 R600_INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R7XX_MAX_SIMDS_MASK); 1521 R600_INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R7XX_MAX_SIMDS_MASK);
1443 1522
1444 cc_rb_backend_disable = 1523 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
1445 R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK); 1524 backend_map = 0x28;
1525 else
1526 backend_map = r700_get_tile_pipe_to_backend_map(dev_priv,
1527 dev_priv->r600_max_tile_pipes,
1528 (R7XX_MAX_BACKENDS -
1529 r600_count_pipe_bits((cc_rb_backend_disable &
1530 R7XX_MAX_BACKENDS_MASK) >> 16)),
1531 (cc_rb_backend_disable >> 16));
1532 gb_tiling_config |= R600_BACKEND_MAP(backend_map);
1446 1533
1447 RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config); 1534 RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config);
1448 RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 1535 RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
1449 RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 1536 RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
1537 if (gb_tiling_config & 0xc0) {
1538 dev_priv->r600_group_size = 512;
1539 } else {
1540 dev_priv->r600_group_size = 256;
1541 }
1542 dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
1543 if (gb_tiling_config & 0x30) {
1544 dev_priv->r600_nbanks = 8;
1545 } else {
1546 dev_priv->r600_nbanks = 4;
1547 }
1450 1548
1451 RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1549 RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1452 RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 1550 RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1453 RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1454 1551
1455 RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1552 RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1456 RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0); 1553 RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0);
1457 RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0); 1554 RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0);
1458 RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0);
1459 RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0);
1460 1555
1461 num_qd_pipes = 1556 num_qd_pipes =
1462 R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK); 1557 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
1463 RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK); 1558 RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
1464 RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK); 1559 RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
1465 1560
@@ -1469,10 +1564,8 @@ static void r700_gfx_init(struct drm_device *dev,
1469 1564
1470 RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, R700_STQ_SPLIT(0x30)); 1565 RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, R700_STQ_SPLIT(0x30));
1471 1566
1472 RADEON_WRITE(R600_TA_CNTL_AUX, (R600_DISABLE_CUBE_ANISO | 1567 ta_aux_cntl = RADEON_READ(R600_TA_CNTL_AUX);
1473 R600_SYNC_GRADIENT | 1568 RADEON_WRITE(R600_TA_CNTL_AUX, ta_aux_cntl | R600_DISABLE_CUBE_ANISO);
1474 R600_SYNC_WALKER |
1475 R600_SYNC_ALIGNER));
1476 1569
1477 sx_debug_1 = RADEON_READ(R700_SX_DEBUG_1); 1570 sx_debug_1 = RADEON_READ(R700_SX_DEBUG_1);
1478 sx_debug_1 |= R700_ENABLE_NEW_SMX_ADDRESS; 1571 sx_debug_1 |= R700_ENABLE_NEW_SMX_ADDRESS;
@@ -1483,14 +1576,28 @@ static void r700_gfx_init(struct drm_device *dev,
1483 smx_dc_ctl0 |= R700_CACHE_DEPTH((dev_priv->r700_sx_num_of_sets * 64) - 1); 1576 smx_dc_ctl0 |= R700_CACHE_DEPTH((dev_priv->r700_sx_num_of_sets * 64) - 1);
1484 RADEON_WRITE(R600_SMX_DC_CTL0, smx_dc_ctl0); 1577 RADEON_WRITE(R600_SMX_DC_CTL0, smx_dc_ctl0);
1485 1578
1486 RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) | 1579 if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV740)
1487 R700_GS_FLUSH_CTL(4) | 1580 RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) |
1488 R700_ACK_FLUSH_CTL(3) | 1581 R700_GS_FLUSH_CTL(4) |
1489 R700_SYNC_FLUSH_CTL)); 1582 R700_ACK_FLUSH_CTL(3) |
1583 R700_SYNC_FLUSH_CTL));
1490 1584
1491 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770) 1585 db_debug3 = RADEON_READ(R700_DB_DEBUG3);
1492 RADEON_WRITE(R700_DB_DEBUG3, R700_DB_CLK_OFF_DELAY(0x1f)); 1586 db_debug3 &= ~R700_DB_CLK_OFF_DELAY(0x1f);
1493 else { 1587 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
1588 case CHIP_RV770:
1589 case CHIP_RV740:
1590 db_debug3 |= R700_DB_CLK_OFF_DELAY(0x1f);
1591 break;
1592 case CHIP_RV710:
1593 case CHIP_RV730:
1594 default:
1595 db_debug3 |= R700_DB_CLK_OFF_DELAY(2);
1596 break;
1597 }
1598 RADEON_WRITE(R700_DB_DEBUG3, db_debug3);
1599
1600 if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV770) {
1494 db_debug4 = RADEON_READ(RV700_DB_DEBUG4); 1601 db_debug4 = RADEON_READ(RV700_DB_DEBUG4);
1495 db_debug4 |= RV700_DISABLE_TILE_COVERED_FOR_PS_ITER; 1602 db_debug4 |= RV700_DISABLE_TILE_COVERED_FOR_PS_ITER;
1496 RADEON_WRITE(RV700_DB_DEBUG4, db_debug4); 1603 RADEON_WRITE(RV700_DB_DEBUG4, db_debug4);
@@ -1519,10 +1626,10 @@ static void r700_gfx_init(struct drm_device *dev,
1519 R600_ALU_UPDATE_FIFO_HIWATER(0x8)); 1626 R600_ALU_UPDATE_FIFO_HIWATER(0x8));
1520 switch (dev_priv->flags & RADEON_FAMILY_MASK) { 1627 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
1521 case CHIP_RV770: 1628 case CHIP_RV770:
1522 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1);
1523 break;
1524 case CHIP_RV730: 1629 case CHIP_RV730:
1525 case CHIP_RV710: 1630 case CHIP_RV710:
1631 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1);
1632 break;
1526 case CHIP_RV740: 1633 case CHIP_RV740:
1527 default: 1634 default:
1528 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4); 1635 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4);
@@ -2529,3 +2636,12 @@ out:
2529 mutex_unlock(&dev_priv->cs_mutex); 2636 mutex_unlock(&dev_priv->cs_mutex);
2530 return r; 2637 return r;
2531} 2638}
2639
2640void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size)
2641{
2642 struct drm_radeon_private *dev_priv = dev->dev_private;
2643
2644 *npipes = dev_priv->r600_npipes;
2645 *nbanks = dev_priv->r600_nbanks;
2646 *group_size = dev_priv->r600_group_size;
2647}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index e4c45ec16507..cd2c63bce501 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -28,6 +28,7 @@
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon.h" 29#include "radeon.h"
30#include "r600d.h" 30#include "r600d.h"
31#include "r600_reg_safe.h"
31 32
32static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 33static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
33 struct radeon_cs_reloc **cs_reloc); 34 struct radeon_cs_reloc **cs_reloc);
@@ -35,11 +36,313 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
35 struct radeon_cs_reloc **cs_reloc); 36 struct radeon_cs_reloc **cs_reloc);
36typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); 37typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
37static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; 38static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
39extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
40
38 41
39struct r600_cs_track { 42struct r600_cs_track {
40 u32 cb_color0_base_last; 43 /* configuration we miror so that we use same code btw kms/ums */
44 u32 group_size;
45 u32 nbanks;
46 u32 npipes;
47 /* value we track */
48 u32 nsamples;
49 u32 cb_color_base_last[8];
50 struct radeon_bo *cb_color_bo[8];
51 u32 cb_color_bo_offset[8];
52 struct radeon_bo *cb_color_frag_bo[8];
53 struct radeon_bo *cb_color_tile_bo[8];
54 u32 cb_color_info[8];
55 u32 cb_color_size_idx[8];
56 u32 cb_target_mask;
57 u32 cb_shader_mask;
58 u32 cb_color_size[8];
59 u32 vgt_strmout_en;
60 u32 vgt_strmout_buffer_en;
61 u32 db_depth_control;
62 u32 db_depth_info;
63 u32 db_depth_size_idx;
64 u32 db_depth_view;
65 u32 db_depth_size;
66 u32 db_offset;
67 struct radeon_bo *db_bo;
41}; 68};
42 69
70static inline int r600_bpe_from_format(u32 *bpe, u32 format)
71{
72 switch (format) {
73 case V_038004_COLOR_8:
74 case V_038004_COLOR_4_4:
75 case V_038004_COLOR_3_3_2:
76 case V_038004_FMT_1:
77 *bpe = 1;
78 break;
79 case V_038004_COLOR_16:
80 case V_038004_COLOR_16_FLOAT:
81 case V_038004_COLOR_8_8:
82 case V_038004_COLOR_5_6_5:
83 case V_038004_COLOR_6_5_5:
84 case V_038004_COLOR_1_5_5_5:
85 case V_038004_COLOR_4_4_4_4:
86 case V_038004_COLOR_5_5_5_1:
87 *bpe = 2;
88 break;
89 case V_038004_FMT_8_8_8:
90 *bpe = 3;
91 break;
92 case V_038004_COLOR_32:
93 case V_038004_COLOR_32_FLOAT:
94 case V_038004_COLOR_16_16:
95 case V_038004_COLOR_16_16_FLOAT:
96 case V_038004_COLOR_8_24:
97 case V_038004_COLOR_8_24_FLOAT:
98 case V_038004_COLOR_24_8:
99 case V_038004_COLOR_24_8_FLOAT:
100 case V_038004_COLOR_10_11_11:
101 case V_038004_COLOR_10_11_11_FLOAT:
102 case V_038004_COLOR_11_11_10:
103 case V_038004_COLOR_11_11_10_FLOAT:
104 case V_038004_COLOR_2_10_10_10:
105 case V_038004_COLOR_8_8_8_8:
106 case V_038004_COLOR_10_10_10_2:
107 case V_038004_FMT_5_9_9_9_SHAREDEXP:
108 case V_038004_FMT_32_AS_8:
109 case V_038004_FMT_32_AS_8_8:
110 *bpe = 4;
111 break;
112 case V_038004_COLOR_X24_8_32_FLOAT:
113 case V_038004_COLOR_32_32:
114 case V_038004_COLOR_32_32_FLOAT:
115 case V_038004_COLOR_16_16_16_16:
116 case V_038004_COLOR_16_16_16_16_FLOAT:
117 *bpe = 8;
118 break;
119 case V_038004_FMT_16_16_16:
120 case V_038004_FMT_16_16_16_FLOAT:
121 *bpe = 6;
122 break;
123 case V_038004_FMT_32_32_32:
124 case V_038004_FMT_32_32_32_FLOAT:
125 *bpe = 12;
126 break;
127 case V_038004_COLOR_32_32_32_32:
128 case V_038004_COLOR_32_32_32_32_FLOAT:
129 *bpe = 16;
130 break;
131 case V_038004_FMT_GB_GR:
132 case V_038004_FMT_BG_RG:
133 case V_038004_COLOR_INVALID:
134 *bpe = 16;
135 return -EINVAL;
136 }
137 return 0;
138}
139
140static void r600_cs_track_init(struct r600_cs_track *track)
141{
142 int i;
143
144 for (i = 0; i < 8; i++) {
145 track->cb_color_base_last[i] = 0;
146 track->cb_color_size[i] = 0;
147 track->cb_color_size_idx[i] = 0;
148 track->cb_color_info[i] = 0;
149 track->cb_color_bo[i] = NULL;
150 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
151 }
152 track->cb_target_mask = 0xFFFFFFFF;
153 track->cb_shader_mask = 0xFFFFFFFF;
154 track->db_bo = NULL;
155 /* assume the biggest format and that htile is enabled */
156 track->db_depth_info = 7 | (1 << 25);
157 track->db_depth_view = 0xFFFFC000;
158 track->db_depth_size = 0xFFFFFFFF;
159 track->db_depth_size_idx = 0;
160 track->db_depth_control = 0xFFFFFFFF;
161}
162
163static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
164{
165 struct r600_cs_track *track = p->track;
166 u32 bpe = 0, pitch, slice_tile_max, size, tmp, height;
167 volatile u32 *ib = p->ib->ptr;
168
169 if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
170 dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
171 return -EINVAL;
172 }
173 size = radeon_bo_size(track->cb_color_bo[i]);
174 if (r600_bpe_from_format(&bpe, G_0280A0_FORMAT(track->cb_color_info[i]))) {
175 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
176 __func__, __LINE__, G_0280A0_FORMAT(track->cb_color_info[i]),
177 i, track->cb_color_info[i]);
178 return -EINVAL;
179 }
180 pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) << 3;
181 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
182 if (!pitch) {
183 dev_warn(p->dev, "%s:%d cb pitch (%d) for %d invalid (0x%08X)\n",
184 __func__, __LINE__, pitch, i, track->cb_color_size[i]);
185 return -EINVAL;
186 }
187 height = size / (pitch * bpe);
188 if (height > 8192)
189 height = 8192;
190 switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) {
191 case V_0280A0_ARRAY_LINEAR_GENERAL:
192 case V_0280A0_ARRAY_LINEAR_ALIGNED:
193 if (pitch & 0x3f) {
194 dev_warn(p->dev, "%s:%d cb pitch (%d x %d = %d) invalid\n",
195 __func__, __LINE__, pitch, bpe, pitch * bpe);
196 return -EINVAL;
197 }
198 if ((pitch * bpe) & (track->group_size - 1)) {
199 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
200 __func__, __LINE__, pitch);
201 return -EINVAL;
202 }
203 break;
204 case V_0280A0_ARRAY_1D_TILED_THIN1:
205 if ((pitch * 8 * bpe * track->nsamples) & (track->group_size - 1)) {
206 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
207 __func__, __LINE__, pitch);
208 return -EINVAL;
209 }
210 height &= ~0x7;
211 if (!height)
212 height = 8;
213 break;
214 case V_0280A0_ARRAY_2D_TILED_THIN1:
215 if (pitch & ((8 * track->nbanks) - 1)) {
216 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
217 __func__, __LINE__, pitch);
218 return -EINVAL;
219 }
220 tmp = pitch * 8 * bpe * track->nsamples;
221 tmp = tmp / track->nbanks;
222 if (tmp & (track->group_size - 1)) {
223 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
224 __func__, __LINE__, pitch);
225 return -EINVAL;
226 }
227 height &= ~((16 * track->npipes) - 1);
228 if (!height)
229 height = 16 * track->npipes;
230 break;
231 default:
232 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
233 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
234 track->cb_color_info[i]);
235 return -EINVAL;
236 }
237 /* check offset */
238 tmp = height * pitch;
239 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
240 dev_warn(p->dev, "%s offset[%d] %d to big\n", __func__, i, track->cb_color_bo_offset[i]);
241 return -EINVAL;
242 }
243 /* limit max tile */
244 tmp = (height * pitch) >> 6;
245 if (tmp < slice_tile_max)
246 slice_tile_max = tmp;
247 tmp = S_028060_PITCH_TILE_MAX((pitch >> 3) - 1) |
248 S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
249 ib[track->cb_color_size_idx[i]] = tmp;
250 return 0;
251}
252
253static int r600_cs_track_check(struct radeon_cs_parser *p)
254{
255 struct r600_cs_track *track = p->track;
256 u32 tmp;
257 int r, i;
258 volatile u32 *ib = p->ib->ptr;
259
260 /* on legacy kernel we don't perform advanced check */
261 if (p->rdev == NULL)
262 return 0;
263 /* we don't support out buffer yet */
264 if (track->vgt_strmout_en || track->vgt_strmout_buffer_en) {
265 dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
266 return -EINVAL;
267 }
268 /* check that we have a cb for each enabled target, we don't check
269 * shader_mask because it seems mesa isn't always setting it :(
270 */
271 tmp = track->cb_target_mask;
272 for (i = 0; i < 8; i++) {
273 if ((tmp >> (i * 4)) & 0xF) {
274 /* at least one component is enabled */
275 if (track->cb_color_bo[i] == NULL) {
276 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
277 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
278 return -EINVAL;
279 }
280 /* perform rewrite of CB_COLOR[0-7]_SIZE */
281 r = r600_cs_track_validate_cb(p, i);
282 if (r)
283 return r;
284 }
285 }
286 /* Check depth buffer */
287 if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
288 G_028800_Z_ENABLE(track->db_depth_control)) {
289 u32 nviews, bpe, ntiles;
290 if (track->db_bo == NULL) {
291 dev_warn(p->dev, "z/stencil with no depth buffer\n");
292 return -EINVAL;
293 }
294 if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
295 dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n");
296 return -EINVAL;
297 }
298 switch (G_028010_FORMAT(track->db_depth_info)) {
299 case V_028010_DEPTH_16:
300 bpe = 2;
301 break;
302 case V_028010_DEPTH_X8_24:
303 case V_028010_DEPTH_8_24:
304 case V_028010_DEPTH_X8_24_FLOAT:
305 case V_028010_DEPTH_8_24_FLOAT:
306 case V_028010_DEPTH_32_FLOAT:
307 bpe = 4;
308 break;
309 case V_028010_DEPTH_X24_8_32_FLOAT:
310 bpe = 8;
311 break;
312 default:
313 dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
314 return -EINVAL;
315 }
316 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
317 if (!track->db_depth_size_idx) {
318 dev_warn(p->dev, "z/stencil buffer size not set\n");
319 return -EINVAL;
320 }
321 printk_once(KERN_WARNING "You have old & broken userspace please consider updating mesa\n");
322 tmp = radeon_bo_size(track->db_bo) - track->db_offset;
323 tmp = (tmp / bpe) >> 6;
324 if (!tmp) {
325 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
326 track->db_depth_size, bpe, track->db_offset,
327 radeon_bo_size(track->db_bo));
328 return -EINVAL;
329 }
330 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
331 } else {
332 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
333 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
334 tmp = ntiles * bpe * 64 * nviews;
335 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
336 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n",
337 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
338 radeon_bo_size(track->db_bo));
339 return -EINVAL;
340 }
341 }
342 }
343 return 0;
344}
345
43/** 346/**
44 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet 347 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
45 * @parser: parser structure holding parsing context. 348 * @parser: parser structure holding parsing context.
@@ -359,6 +662,334 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
359 return 0; 662 return 0;
360} 663}
361 664
665/**
666 * r600_cs_check_reg() - check if register is authorized or not
667 * @parser: parser structure holding parsing context
668 * @reg: register we are testing
669 * @idx: index into the cs buffer
670 *
671 * This function will test against r600_reg_safe_bm and return 0
672 * if register is safe. If register is not flag as safe this function
673 * will test it against a list of register needind special handling.
674 */
675static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
676{
677 struct r600_cs_track *track = (struct r600_cs_track *)p->track;
678 struct radeon_cs_reloc *reloc;
679 u32 last_reg = ARRAY_SIZE(r600_reg_safe_bm);
680 u32 m, i, tmp, *ib;
681 int r;
682
683 i = (reg >> 7);
684 if (i > last_reg) {
685 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
686 return -EINVAL;
687 }
688 m = 1 << ((reg >> 2) & 31);
689 if (!(r600_reg_safe_bm[i] & m))
690 return 0;
691 ib = p->ib->ptr;
692 switch (reg) {
693 /* force following reg to 0 in an attemp to disable out buffer
694 * which will need us to better understand how it works to perform
695 * security check on it (Jerome)
696 */
697 case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
698 case R_008C44_SQ_ESGS_RING_SIZE:
699 case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
700 case R_008C54_SQ_ESTMP_RING_SIZE:
701 case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
702 case R_008C74_SQ_FBUF_RING_SIZE:
703 case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
704 case R_008C5C_SQ_GSTMP_RING_SIZE:
705 case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
706 case R_008C4C_SQ_GSVS_RING_SIZE:
707 case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
708 case R_008C6C_SQ_PSTMP_RING_SIZE:
709 case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
710 case R_008C7C_SQ_REDUC_RING_SIZE:
711 case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
712 case R_008C64_SQ_VSTMP_RING_SIZE:
713 case R_0288C8_SQ_GS_VERT_ITEMSIZE:
714 /* get value to populate the IB don't remove */
715 tmp =radeon_get_ib_value(p, idx);
716 ib[idx] = 0;
717 break;
718 case R_028800_DB_DEPTH_CONTROL:
719 track->db_depth_control = radeon_get_ib_value(p, idx);
720 break;
721 case R_028010_DB_DEPTH_INFO:
722 track->db_depth_info = radeon_get_ib_value(p, idx);
723 break;
724 case R_028004_DB_DEPTH_VIEW:
725 track->db_depth_view = radeon_get_ib_value(p, idx);
726 break;
727 case R_028000_DB_DEPTH_SIZE:
728 track->db_depth_size = radeon_get_ib_value(p, idx);
729 track->db_depth_size_idx = idx;
730 break;
731 case R_028AB0_VGT_STRMOUT_EN:
732 track->vgt_strmout_en = radeon_get_ib_value(p, idx);
733 break;
734 case R_028B20_VGT_STRMOUT_BUFFER_EN:
735 track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
736 break;
737 case R_028238_CB_TARGET_MASK:
738 track->cb_target_mask = radeon_get_ib_value(p, idx);
739 break;
740 case R_02823C_CB_SHADER_MASK:
741 track->cb_shader_mask = radeon_get_ib_value(p, idx);
742 break;
743 case R_028C04_PA_SC_AA_CONFIG:
744 tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
745 track->nsamples = 1 << tmp;
746 break;
747 case R_0280A0_CB_COLOR0_INFO:
748 case R_0280A4_CB_COLOR1_INFO:
749 case R_0280A8_CB_COLOR2_INFO:
750 case R_0280AC_CB_COLOR3_INFO:
751 case R_0280B0_CB_COLOR4_INFO:
752 case R_0280B4_CB_COLOR5_INFO:
753 case R_0280B8_CB_COLOR6_INFO:
754 case R_0280BC_CB_COLOR7_INFO:
755 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
756 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
757 break;
758 case R_028060_CB_COLOR0_SIZE:
759 case R_028064_CB_COLOR1_SIZE:
760 case R_028068_CB_COLOR2_SIZE:
761 case R_02806C_CB_COLOR3_SIZE:
762 case R_028070_CB_COLOR4_SIZE:
763 case R_028074_CB_COLOR5_SIZE:
764 case R_028078_CB_COLOR6_SIZE:
765 case R_02807C_CB_COLOR7_SIZE:
766 tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
767 track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
768 track->cb_color_size_idx[tmp] = idx;
769 break;
770 /* This register were added late, there is userspace
771 * which does provide relocation for those but set
772 * 0 offset. In order to avoid breaking old userspace
773 * we detect this and set address to point to last
774 * CB_COLOR0_BASE, note that if userspace doesn't set
775 * CB_COLOR0_BASE before this register we will report
776 * error. Old userspace always set CB_COLOR0_BASE
777 * before any of this.
778 */
779 case R_0280E0_CB_COLOR0_FRAG:
780 case R_0280E4_CB_COLOR1_FRAG:
781 case R_0280E8_CB_COLOR2_FRAG:
782 case R_0280EC_CB_COLOR3_FRAG:
783 case R_0280F0_CB_COLOR4_FRAG:
784 case R_0280F4_CB_COLOR5_FRAG:
785 case R_0280F8_CB_COLOR6_FRAG:
786 case R_0280FC_CB_COLOR7_FRAG:
787 tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
788 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
789 if (!track->cb_color_base_last[tmp]) {
790 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
791 return -EINVAL;
792 }
793 ib[idx] = track->cb_color_base_last[tmp];
794 printk_once(KERN_WARNING "You have old & broken userspace "
795 "please consider updating mesa & xf86-video-ati\n");
796 track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
797 } else {
798 r = r600_cs_packet_next_reloc(p, &reloc);
799 if (r) {
800 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
801 return -EINVAL;
802 }
803 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
804 track->cb_color_frag_bo[tmp] = reloc->robj;
805 }
806 break;
807 case R_0280C0_CB_COLOR0_TILE:
808 case R_0280C4_CB_COLOR1_TILE:
809 case R_0280C8_CB_COLOR2_TILE:
810 case R_0280CC_CB_COLOR3_TILE:
811 case R_0280D0_CB_COLOR4_TILE:
812 case R_0280D4_CB_COLOR5_TILE:
813 case R_0280D8_CB_COLOR6_TILE:
814 case R_0280DC_CB_COLOR7_TILE:
815 tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
816 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
817 if (!track->cb_color_base_last[tmp]) {
818 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
819 return -EINVAL;
820 }
821 ib[idx] = track->cb_color_base_last[tmp];
822 printk_once(KERN_WARNING "You have old & broken userspace "
823 "please consider updating mesa & xf86-video-ati\n");
824 track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
825 } else {
826 r = r600_cs_packet_next_reloc(p, &reloc);
827 if (r) {
828 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
829 return -EINVAL;
830 }
831 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
832 track->cb_color_tile_bo[tmp] = reloc->robj;
833 }
834 break;
835 case CB_COLOR0_BASE:
836 case CB_COLOR1_BASE:
837 case CB_COLOR2_BASE:
838 case CB_COLOR3_BASE:
839 case CB_COLOR4_BASE:
840 case CB_COLOR5_BASE:
841 case CB_COLOR6_BASE:
842 case CB_COLOR7_BASE:
843 r = r600_cs_packet_next_reloc(p, &reloc);
844 if (r) {
845 dev_warn(p->dev, "bad SET_CONTEXT_REG "
846 "0x%04X\n", reg);
847 return -EINVAL;
848 }
849 tmp = (reg - CB_COLOR0_BASE) / 4;
850 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
851 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
852 track->cb_color_base_last[tmp] = ib[idx];
853 track->cb_color_bo[tmp] = reloc->robj;
854 break;
855 case DB_DEPTH_BASE:
856 r = r600_cs_packet_next_reloc(p, &reloc);
857 if (r) {
858 dev_warn(p->dev, "bad SET_CONTEXT_REG "
859 "0x%04X\n", reg);
860 return -EINVAL;
861 }
862 track->db_offset = radeon_get_ib_value(p, idx);
863 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
864 track->db_bo = reloc->robj;
865 break;
866 case DB_HTILE_DATA_BASE:
867 case SQ_PGM_START_FS:
868 case SQ_PGM_START_ES:
869 case SQ_PGM_START_VS:
870 case SQ_PGM_START_GS:
871 case SQ_PGM_START_PS:
872 r = r600_cs_packet_next_reloc(p, &reloc);
873 if (r) {
874 dev_warn(p->dev, "bad SET_CONTEXT_REG "
875 "0x%04X\n", reg);
876 return -EINVAL;
877 }
878 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
879 break;
880 default:
881 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
882 return -EINVAL;
883 }
884 return 0;
885}
886
887static inline unsigned minify(unsigned size, unsigned levels)
888{
889 size = size >> levels;
890 if (size < 1)
891 size = 1;
892 return size;
893}
894
895static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels,
896 unsigned w0, unsigned h0, unsigned d0, unsigned bpe,
897 unsigned *l0_size, unsigned *mipmap_size)
898{
899 unsigned offset, i, level, face;
900 unsigned width, height, depth, rowstride, size;
901
902 w0 = minify(w0, 0);
903 h0 = minify(h0, 0);
904 d0 = minify(d0, 0);
905 for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
906 width = minify(w0, i);
907 height = minify(h0, i);
908 depth = minify(d0, i);
909 for(face = 0; face < nfaces; face++) {
910 rowstride = ((width * bpe) + 255) & ~255;
911 size = height * rowstride * depth;
912 offset += size;
913 offset = (offset + 0x1f) & ~0x1f;
914 }
915 }
916 *l0_size = (((w0 * bpe) + 255) & ~255) * h0 * d0;
917 *mipmap_size = offset;
918 if (!blevel)
919 *mipmap_size -= *l0_size;
920 if (!nlevels)
921 *mipmap_size = *l0_size;
922}
923
924/**
925 * r600_check_texture_resource() - check if register is authorized or not
926 * @p: parser structure holding parsing context
927 * @idx: index into the cs buffer
928 * @texture: texture's bo structure
929 * @mipmap: mipmap's bo structure
930 *
931 * This function will check that the resource has valid field and that
932 * the texture and mipmap bo object are big enough to cover this resource.
933 */
934static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
935 struct radeon_bo *texture,
936 struct radeon_bo *mipmap)
937{
938 u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
939 u32 word0, word1, l0_size, mipmap_size;
940
941 /* on legacy kernel we don't perform advanced check */
942 if (p->rdev == NULL)
943 return 0;
944 word0 = radeon_get_ib_value(p, idx + 0);
945 word1 = radeon_get_ib_value(p, idx + 1);
946 w0 = G_038000_TEX_WIDTH(word0) + 1;
947 h0 = G_038004_TEX_HEIGHT(word1) + 1;
948 d0 = G_038004_TEX_DEPTH(word1);
949 nfaces = 1;
950 switch (G_038000_DIM(word0)) {
951 case V_038000_SQ_TEX_DIM_1D:
952 case V_038000_SQ_TEX_DIM_2D:
953 case V_038000_SQ_TEX_DIM_3D:
954 break;
955 case V_038000_SQ_TEX_DIM_CUBEMAP:
956 nfaces = 6;
957 break;
958 case V_038000_SQ_TEX_DIM_1D_ARRAY:
959 case V_038000_SQ_TEX_DIM_2D_ARRAY:
960 case V_038000_SQ_TEX_DIM_2D_MSAA:
961 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
962 default:
963 dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
964 return -EINVAL;
965 }
966 if (r600_bpe_from_format(&bpe, G_038004_DATA_FORMAT(word1))) {
967 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
968 __func__, __LINE__, G_038004_DATA_FORMAT(word1));
969 return -EINVAL;
970 }
971 word0 = radeon_get_ib_value(p, idx + 4);
972 word1 = radeon_get_ib_value(p, idx + 5);
973 blevel = G_038010_BASE_LEVEL(word0);
974 nlevels = G_038014_LAST_LEVEL(word1);
975 r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe, &l0_size, &mipmap_size);
976 /* using get ib will give us the offset into the texture bo */
977 word0 = radeon_get_ib_value(p, idx + 2);
978 if ((l0_size + word0) > radeon_bo_size(texture)) {
979 dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
980 w0, h0, bpe, word0, l0_size, radeon_bo_size(texture));
981 return -EINVAL;
982 }
983 /* using get ib will give us the offset into the mipmap bo */
984 word0 = radeon_get_ib_value(p, idx + 3);
985 if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
986 dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
987 w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));
988 return -EINVAL;
989 }
990 return 0;
991}
992
362static int r600_packet3_check(struct radeon_cs_parser *p, 993static int r600_packet3_check(struct radeon_cs_parser *p,
363 struct radeon_cs_packet *pkt) 994 struct radeon_cs_packet *pkt)
364{ 995{
@@ -408,12 +1039,22 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
408 } 1039 }
409 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); 1040 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
410 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 1041 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1042 r = r600_cs_track_check(p);
1043 if (r) {
1044 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1045 return r;
1046 }
411 break; 1047 break;
412 case PACKET3_DRAW_INDEX_AUTO: 1048 case PACKET3_DRAW_INDEX_AUTO:
413 if (pkt->count != 1) { 1049 if (pkt->count != 1) {
414 DRM_ERROR("bad DRAW_INDEX_AUTO\n"); 1050 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
415 return -EINVAL; 1051 return -EINVAL;
416 } 1052 }
1053 r = r600_cs_track_check(p);
1054 if (r) {
1055 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1056 return r;
1057 }
417 break; 1058 break;
418 case PACKET3_DRAW_INDEX_IMMD_BE: 1059 case PACKET3_DRAW_INDEX_IMMD_BE:
419 case PACKET3_DRAW_INDEX_IMMD: 1060 case PACKET3_DRAW_INDEX_IMMD:
@@ -421,6 +1062,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
421 DRM_ERROR("bad DRAW_INDEX_IMMD\n"); 1062 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
422 return -EINVAL; 1063 return -EINVAL;
423 } 1064 }
1065 r = r600_cs_track_check(p);
1066 if (r) {
1067 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1068 return r;
1069 }
424 break; 1070 break;
425 case PACKET3_WAIT_REG_MEM: 1071 case PACKET3_WAIT_REG_MEM:
426 if (pkt->count != 5) { 1072 if (pkt->count != 5) {
@@ -493,30 +1139,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
493 } 1139 }
494 for (i = 0; i < pkt->count; i++) { 1140 for (i = 0; i < pkt->count; i++) {
495 reg = start_reg + (4 * i); 1141 reg = start_reg + (4 * i);
496 switch (reg) { 1142 r = r600_cs_check_reg(p, reg, idx+1+i);
497 case SQ_ESGS_RING_BASE: 1143 if (r)
498 case SQ_GSVS_RING_BASE: 1144 return r;
499 case SQ_ESTMP_RING_BASE:
500 case SQ_GSTMP_RING_BASE:
501 case SQ_VSTMP_RING_BASE:
502 case SQ_PSTMP_RING_BASE:
503 case SQ_FBUF_RING_BASE:
504 case SQ_REDUC_RING_BASE:
505 case SX_MEMORY_EXPORT_BASE:
506 r = r600_cs_packet_next_reloc(p, &reloc);
507 if (r) {
508 DRM_ERROR("bad SET_CONFIG_REG "
509 "0x%04X\n", reg);
510 return -EINVAL;
511 }
512 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
513 break;
514 case CP_COHER_BASE:
515 /* use PACKET3_SURFACE_SYNC */
516 return -EINVAL;
517 default:
518 break;
519 }
520 } 1145 }
521 break; 1146 break;
522 case PACKET3_SET_CONTEXT_REG: 1147 case PACKET3_SET_CONTEXT_REG:
@@ -530,106 +1155,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
530 } 1155 }
531 for (i = 0; i < pkt->count; i++) { 1156 for (i = 0; i < pkt->count; i++) {
532 reg = start_reg + (4 * i); 1157 reg = start_reg + (4 * i);
533 switch (reg) { 1158 r = r600_cs_check_reg(p, reg, idx+1+i);
534 /* This register were added late, there is userspace 1159 if (r)
535 * which does provide relocation for those but set 1160 return r;
536 * 0 offset. In order to avoid breaking old userspace
537 * we detect this and set address to point to last
538 * CB_COLOR0_BASE, note that if userspace doesn't set
539 * CB_COLOR0_BASE before this register we will report
540 * error. Old userspace always set CB_COLOR0_BASE
541 * before any of this.
542 */
543 case R_0280E0_CB_COLOR0_FRAG:
544 case R_0280E4_CB_COLOR1_FRAG:
545 case R_0280E8_CB_COLOR2_FRAG:
546 case R_0280EC_CB_COLOR3_FRAG:
547 case R_0280F0_CB_COLOR4_FRAG:
548 case R_0280F4_CB_COLOR5_FRAG:
549 case R_0280F8_CB_COLOR6_FRAG:
550 case R_0280FC_CB_COLOR7_FRAG:
551 case R_0280C0_CB_COLOR0_TILE:
552 case R_0280C4_CB_COLOR1_TILE:
553 case R_0280C8_CB_COLOR2_TILE:
554 case R_0280CC_CB_COLOR3_TILE:
555 case R_0280D0_CB_COLOR4_TILE:
556 case R_0280D4_CB_COLOR5_TILE:
557 case R_0280D8_CB_COLOR6_TILE:
558 case R_0280DC_CB_COLOR7_TILE:
559 if (!r600_cs_packet_next_is_pkt3_nop(p)) {
560 if (!track->cb_color0_base_last) {
561 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
562 return -EINVAL;
563 }
564 ib[idx+1+i] = track->cb_color0_base_last;
565 printk_once(KERN_WARNING "radeon: You have old & broken userspace "
566 "please consider updating mesa & xf86-video-ati\n");
567 } else {
568 r = r600_cs_packet_next_reloc(p, &reloc);
569 if (r) {
570 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
571 return -EINVAL;
572 }
573 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
574 }
575 break;
576 case DB_DEPTH_BASE:
577 case DB_HTILE_DATA_BASE:
578 case CB_COLOR0_BASE:
579 r = r600_cs_packet_next_reloc(p, &reloc);
580 if (r) {
581 DRM_ERROR("bad SET_CONTEXT_REG "
582 "0x%04X\n", reg);
583 return -EINVAL;
584 }
585 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
586 track->cb_color0_base_last = ib[idx+1+i];
587 break;
588 case CB_COLOR1_BASE:
589 case CB_COLOR2_BASE:
590 case CB_COLOR3_BASE:
591 case CB_COLOR4_BASE:
592 case CB_COLOR5_BASE:
593 case CB_COLOR6_BASE:
594 case CB_COLOR7_BASE:
595 case SQ_PGM_START_FS:
596 case SQ_PGM_START_ES:
597 case SQ_PGM_START_VS:
598 case SQ_PGM_START_GS:
599 case SQ_PGM_START_PS:
600 r = r600_cs_packet_next_reloc(p, &reloc);
601 if (r) {
602 DRM_ERROR("bad SET_CONTEXT_REG "
603 "0x%04X\n", reg);
604 return -EINVAL;
605 }
606 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
607 break;
608 case VGT_DMA_BASE:
609 case VGT_DMA_BASE_HI:
610 /* These should be handled by DRAW_INDEX packet 3 */
611 case VGT_STRMOUT_BASE_OFFSET_0:
612 case VGT_STRMOUT_BASE_OFFSET_1:
613 case VGT_STRMOUT_BASE_OFFSET_2:
614 case VGT_STRMOUT_BASE_OFFSET_3:
615 case VGT_STRMOUT_BASE_OFFSET_HI_0:
616 case VGT_STRMOUT_BASE_OFFSET_HI_1:
617 case VGT_STRMOUT_BASE_OFFSET_HI_2:
618 case VGT_STRMOUT_BASE_OFFSET_HI_3:
619 case VGT_STRMOUT_BUFFER_BASE_0:
620 case VGT_STRMOUT_BUFFER_BASE_1:
621 case VGT_STRMOUT_BUFFER_BASE_2:
622 case VGT_STRMOUT_BUFFER_BASE_3:
623 case VGT_STRMOUT_BUFFER_OFFSET_0:
624 case VGT_STRMOUT_BUFFER_OFFSET_1:
625 case VGT_STRMOUT_BUFFER_OFFSET_2:
626 case VGT_STRMOUT_BUFFER_OFFSET_3:
627 /* These should be handled by STRMOUT_BUFFER packet 3 */
628 DRM_ERROR("bad context reg: 0x%08x\n", reg);
629 return -EINVAL;
630 default:
631 break;
632 }
633 } 1161 }
634 break; 1162 break;
635 case PACKET3_SET_RESOURCE: 1163 case PACKET3_SET_RESOURCE:
@@ -646,6 +1174,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
646 return -EINVAL; 1174 return -EINVAL;
647 } 1175 }
648 for (i = 0; i < (pkt->count / 7); i++) { 1176 for (i = 0; i < (pkt->count / 7); i++) {
1177 struct radeon_bo *texture, *mipmap;
1178 u32 size, offset;
1179
649 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) { 1180 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
650 case SQ_TEX_VTX_VALID_TEXTURE: 1181 case SQ_TEX_VTX_VALID_TEXTURE:
651 /* tex base */ 1182 /* tex base */
@@ -655,6 +1186,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
655 return -EINVAL; 1186 return -EINVAL;
656 } 1187 }
657 ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1188 ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1189 texture = reloc->robj;
658 /* tex mip base */ 1190 /* tex mip base */
659 r = r600_cs_packet_next_reloc(p, &reloc); 1191 r = r600_cs_packet_next_reloc(p, &reloc);
660 if (r) { 1192 if (r) {
@@ -662,6 +1194,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
662 return -EINVAL; 1194 return -EINVAL;
663 } 1195 }
664 ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1196 ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1197 mipmap = reloc->robj;
1198 r = r600_check_texture_resource(p, idx+(i*7)+1,
1199 texture, mipmap);
1200 if (r)
1201 return r;
665 break; 1202 break;
666 case SQ_TEX_VTX_VALID_BUFFER: 1203 case SQ_TEX_VTX_VALID_BUFFER:
667 /* vtx base */ 1204 /* vtx base */
@@ -670,6 +1207,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
670 DRM_ERROR("bad SET_RESOURCE\n"); 1207 DRM_ERROR("bad SET_RESOURCE\n");
671 return -EINVAL; 1208 return -EINVAL;
672 } 1209 }
1210 offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
1211 size = radeon_get_ib_value(p, idx+1+(i*7)+1);
1212 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
1213 /* force size to size of the buffer */
1214 dev_warn(p->dev, "vbo resource seems too big for the bo\n");
1215 ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj);
1216 }
673 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); 1217 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
674 ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 1218 ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
675 break; 1219 break;
@@ -760,11 +1304,28 @@ int r600_cs_parse(struct radeon_cs_parser *p)
760 struct r600_cs_track *track; 1304 struct r600_cs_track *track;
761 int r; 1305 int r;
762 1306
763 track = kzalloc(sizeof(*track), GFP_KERNEL); 1307 if (p->track == NULL) {
764 p->track = track; 1308 /* initialize tracker, we are in kms */
1309 track = kzalloc(sizeof(*track), GFP_KERNEL);
1310 if (track == NULL)
1311 return -ENOMEM;
1312 r600_cs_track_init(track);
1313 if (p->rdev->family < CHIP_RV770) {
1314 track->npipes = p->rdev->config.r600.tiling_npipes;
1315 track->nbanks = p->rdev->config.r600.tiling_nbanks;
1316 track->group_size = p->rdev->config.r600.tiling_group_size;
1317 } else if (p->rdev->family <= CHIP_RV740) {
1318 track->npipes = p->rdev->config.rv770.tiling_npipes;
1319 track->nbanks = p->rdev->config.rv770.tiling_nbanks;
1320 track->group_size = p->rdev->config.rv770.tiling_group_size;
1321 }
1322 p->track = track;
1323 }
765 do { 1324 do {
766 r = r600_cs_packet_parse(p, &pkt, p->idx); 1325 r = r600_cs_packet_parse(p, &pkt, p->idx);
767 if (r) { 1326 if (r) {
1327 kfree(p->track);
1328 p->track = NULL;
768 return r; 1329 return r;
769 } 1330 }
770 p->idx += pkt.count + 2; 1331 p->idx += pkt.count + 2;
@@ -779,9 +1340,13 @@ int r600_cs_parse(struct radeon_cs_parser *p)
779 break; 1340 break;
780 default: 1341 default:
781 DRM_ERROR("Unknown packet type %d !\n", pkt.type); 1342 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1343 kfree(p->track);
1344 p->track = NULL;
782 return -EINVAL; 1345 return -EINVAL;
783 } 1346 }
784 if (r) { 1347 if (r) {
1348 kfree(p->track);
1349 p->track = NULL;
785 return r; 1350 return r;
786 } 1351 }
787 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 1352 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
@@ -791,6 +1356,8 @@ int r600_cs_parse(struct radeon_cs_parser *p)
791 mdelay(1); 1356 mdelay(1);
792 } 1357 }
793#endif 1358#endif
1359 kfree(p->track);
1360 p->track = NULL;
794 return 0; 1361 return 0;
795} 1362}
796 1363
@@ -833,9 +1400,16 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
833{ 1400{
834 struct radeon_cs_parser parser; 1401 struct radeon_cs_parser parser;
835 struct radeon_cs_chunk *ib_chunk; 1402 struct radeon_cs_chunk *ib_chunk;
836 struct radeon_ib fake_ib; 1403 struct radeon_ib fake_ib;
1404 struct r600_cs_track *track;
837 int r; 1405 int r;
838 1406
1407 /* initialize tracker */
1408 track = kzalloc(sizeof(*track), GFP_KERNEL);
1409 if (track == NULL)
1410 return -ENOMEM;
1411 r600_cs_track_init(track);
1412 r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
839 /* initialize parser */ 1413 /* initialize parser */
840 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 1414 memset(&parser, 0, sizeof(struct radeon_cs_parser));
841 parser.filp = filp; 1415 parser.filp = filp;
@@ -843,6 +1417,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
843 parser.rdev = NULL; 1417 parser.rdev = NULL;
844 parser.family = family; 1418 parser.family = family;
845 parser.ib = &fake_ib; 1419 parser.ib = &fake_ib;
1420 parser.track = track;
846 fake_ib.ptr = ib; 1421 fake_ib.ptr = ib;
847 r = radeon_cs_parser_init(&parser, data); 1422 r = radeon_cs_parser_init(&parser, data);
848 if (r) { 1423 if (r) {
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 30480881aed1..5b2e4d442823 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -883,6 +883,16 @@
883 883
884#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 884#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
885 885
886#define R_028C04_PA_SC_AA_CONFIG 0x028C04
887#define S_028C04_MSAA_NUM_SAMPLES(x) (((x) & 0x3) << 0)
888#define G_028C04_MSAA_NUM_SAMPLES(x) (((x) >> 0) & 0x3)
889#define C_028C04_MSAA_NUM_SAMPLES 0xFFFFFFFC
890#define S_028C04_AA_MASK_CENTROID_DTMN(x) (((x) & 0x1) << 4)
891#define G_028C04_AA_MASK_CENTROID_DTMN(x) (((x) >> 4) & 0x1)
892#define C_028C04_AA_MASK_CENTROID_DTMN 0xFFFFFFEF
893#define S_028C04_MAX_SAMPLE_DIST(x) (((x) & 0xF) << 13)
894#define G_028C04_MAX_SAMPLE_DIST(x) (((x) >> 13) & 0xF)
895#define C_028C04_MAX_SAMPLE_DIST 0xFFFE1FFF
886#define R_0280E0_CB_COLOR0_FRAG 0x0280E0 896#define R_0280E0_CB_COLOR0_FRAG 0x0280E0
887#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0) 897#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
888#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF) 898#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
@@ -905,6 +915,461 @@
905#define R_0280D4_CB_COLOR5_TILE 0x0280D4 915#define R_0280D4_CB_COLOR5_TILE 0x0280D4
906#define R_0280D8_CB_COLOR6_TILE 0x0280D8 916#define R_0280D8_CB_COLOR6_TILE 0x0280D8
907#define R_0280DC_CB_COLOR7_TILE 0x0280DC 917#define R_0280DC_CB_COLOR7_TILE 0x0280DC
908 918#define R_0280A0_CB_COLOR0_INFO 0x0280A0
919#define S_0280A0_ENDIAN(x) (((x) & 0x3) << 0)
920#define G_0280A0_ENDIAN(x) (((x) >> 0) & 0x3)
921#define C_0280A0_ENDIAN 0xFFFFFFFC
922#define S_0280A0_FORMAT(x) (((x) & 0x3F) << 2)
923#define G_0280A0_FORMAT(x) (((x) >> 2) & 0x3F)
924#define C_0280A0_FORMAT 0xFFFFFF03
925#define V_0280A0_COLOR_INVALID 0x00000000
926#define V_0280A0_COLOR_8 0x00000001
927#define V_0280A0_COLOR_4_4 0x00000002
928#define V_0280A0_COLOR_3_3_2 0x00000003
929#define V_0280A0_COLOR_16 0x00000005
930#define V_0280A0_COLOR_16_FLOAT 0x00000006
931#define V_0280A0_COLOR_8_8 0x00000007
932#define V_0280A0_COLOR_5_6_5 0x00000008
933#define V_0280A0_COLOR_6_5_5 0x00000009
934#define V_0280A0_COLOR_1_5_5_5 0x0000000A
935#define V_0280A0_COLOR_4_4_4_4 0x0000000B
936#define V_0280A0_COLOR_5_5_5_1 0x0000000C
937#define V_0280A0_COLOR_32 0x0000000D
938#define V_0280A0_COLOR_32_FLOAT 0x0000000E
939#define V_0280A0_COLOR_16_16 0x0000000F
940#define V_0280A0_COLOR_16_16_FLOAT 0x00000010
941#define V_0280A0_COLOR_8_24 0x00000011
942#define V_0280A0_COLOR_8_24_FLOAT 0x00000012
943#define V_0280A0_COLOR_24_8 0x00000013
944#define V_0280A0_COLOR_24_8_FLOAT 0x00000014
945#define V_0280A0_COLOR_10_11_11 0x00000015
946#define V_0280A0_COLOR_10_11_11_FLOAT 0x00000016
947#define V_0280A0_COLOR_11_11_10 0x00000017
948#define V_0280A0_COLOR_11_11_10_FLOAT 0x00000018
949#define V_0280A0_COLOR_2_10_10_10 0x00000019
950#define V_0280A0_COLOR_8_8_8_8 0x0000001A
951#define V_0280A0_COLOR_10_10_10_2 0x0000001B
952#define V_0280A0_COLOR_X24_8_32_FLOAT 0x0000001C
953#define V_0280A0_COLOR_32_32 0x0000001D
954#define V_0280A0_COLOR_32_32_FLOAT 0x0000001E
955#define V_0280A0_COLOR_16_16_16_16 0x0000001F
956#define V_0280A0_COLOR_16_16_16_16_FLOAT 0x00000020
957#define V_0280A0_COLOR_32_32_32_32 0x00000022
958#define V_0280A0_COLOR_32_32_32_32_FLOAT 0x00000023
959#define S_0280A0_ARRAY_MODE(x) (((x) & 0xF) << 8)
960#define G_0280A0_ARRAY_MODE(x) (((x) >> 8) & 0xF)
961#define C_0280A0_ARRAY_MODE 0xFFFFF0FF
962#define V_0280A0_ARRAY_LINEAR_GENERAL 0x00000000
963#define V_0280A0_ARRAY_LINEAR_ALIGNED 0x00000001
964#define V_0280A0_ARRAY_1D_TILED_THIN1 0x00000002
965#define V_0280A0_ARRAY_2D_TILED_THIN1 0x00000004
966#define S_0280A0_NUMBER_TYPE(x) (((x) & 0x7) << 12)
967#define G_0280A0_NUMBER_TYPE(x) (((x) >> 12) & 0x7)
968#define C_0280A0_NUMBER_TYPE 0xFFFF8FFF
969#define S_0280A0_READ_SIZE(x) (((x) & 0x1) << 15)
970#define G_0280A0_READ_SIZE(x) (((x) >> 15) & 0x1)
971#define C_0280A0_READ_SIZE 0xFFFF7FFF
972#define S_0280A0_COMP_SWAP(x) (((x) & 0x3) << 16)
973#define G_0280A0_COMP_SWAP(x) (((x) >> 16) & 0x3)
974#define C_0280A0_COMP_SWAP 0xFFFCFFFF
975#define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18)
976#define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3)
977#define C_0280A0_TILE_MODE 0xFFF3FFFF
978#define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20)
979#define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1)
980#define C_0280A0_BLEND_CLAMP 0xFFEFFFFF
981#define S_0280A0_CLEAR_COLOR(x) (((x) & 0x1) << 21)
982#define G_0280A0_CLEAR_COLOR(x) (((x) >> 21) & 0x1)
983#define C_0280A0_CLEAR_COLOR 0xFFDFFFFF
984#define S_0280A0_BLEND_BYPASS(x) (((x) & 0x1) << 22)
985#define G_0280A0_BLEND_BYPASS(x) (((x) >> 22) & 0x1)
986#define C_0280A0_BLEND_BYPASS 0xFFBFFFFF
987#define S_0280A0_BLEND_FLOAT32(x) (((x) & 0x1) << 23)
988#define G_0280A0_BLEND_FLOAT32(x) (((x) >> 23) & 0x1)
989#define C_0280A0_BLEND_FLOAT32 0xFF7FFFFF
990#define S_0280A0_SIMPLE_FLOAT(x) (((x) & 0x1) << 24)
991#define G_0280A0_SIMPLE_FLOAT(x) (((x) >> 24) & 0x1)
992#define C_0280A0_SIMPLE_FLOAT 0xFEFFFFFF
993#define S_0280A0_ROUND_MODE(x) (((x) & 0x1) << 25)
994#define G_0280A0_ROUND_MODE(x) (((x) >> 25) & 0x1)
995#define C_0280A0_ROUND_MODE 0xFDFFFFFF
996#define S_0280A0_TILE_COMPACT(x) (((x) & 0x1) << 26)
997#define G_0280A0_TILE_COMPACT(x) (((x) >> 26) & 0x1)
998#define C_0280A0_TILE_COMPACT 0xFBFFFFFF
999#define S_0280A0_SOURCE_FORMAT(x) (((x) & 0x1) << 27)
1000#define G_0280A0_SOURCE_FORMAT(x) (((x) >> 27) & 0x1)
1001#define C_0280A0_SOURCE_FORMAT 0xF7FFFFFF
1002#define R_0280A4_CB_COLOR1_INFO 0x0280A4
1003#define R_0280A8_CB_COLOR2_INFO 0x0280A8
1004#define R_0280AC_CB_COLOR3_INFO 0x0280AC
1005#define R_0280B0_CB_COLOR4_INFO 0x0280B0
1006#define R_0280B4_CB_COLOR5_INFO 0x0280B4
1007#define R_0280B8_CB_COLOR6_INFO 0x0280B8
1008#define R_0280BC_CB_COLOR7_INFO 0x0280BC
1009#define R_028060_CB_COLOR0_SIZE 0x028060
1010#define S_028060_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0)
1011#define G_028060_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF)
1012#define C_028060_PITCH_TILE_MAX 0xFFFFFC00
1013#define S_028060_SLICE_TILE_MAX(x) (((x) & 0xFFFFF) << 10)
1014#define G_028060_SLICE_TILE_MAX(x) (((x) >> 10) & 0xFFFFF)
1015#define C_028060_SLICE_TILE_MAX 0xC00003FF
1016#define R_028064_CB_COLOR1_SIZE 0x028064
1017#define R_028068_CB_COLOR2_SIZE 0x028068
1018#define R_02806C_CB_COLOR3_SIZE 0x02806C
1019#define R_028070_CB_COLOR4_SIZE 0x028070
1020#define R_028074_CB_COLOR5_SIZE 0x028074
1021#define R_028078_CB_COLOR6_SIZE 0x028078
1022#define R_02807C_CB_COLOR7_SIZE 0x02807C
1023#define R_028238_CB_TARGET_MASK 0x028238
1024#define S_028238_TARGET0_ENABLE(x) (((x) & 0xF) << 0)
1025#define G_028238_TARGET0_ENABLE(x) (((x) >> 0) & 0xF)
1026#define C_028238_TARGET0_ENABLE 0xFFFFFFF0
1027#define S_028238_TARGET1_ENABLE(x) (((x) & 0xF) << 4)
1028#define G_028238_TARGET1_ENABLE(x) (((x) >> 4) & 0xF)
1029#define C_028238_TARGET1_ENABLE 0xFFFFFF0F
1030#define S_028238_TARGET2_ENABLE(x) (((x) & 0xF) << 8)
1031#define G_028238_TARGET2_ENABLE(x) (((x) >> 8) & 0xF)
1032#define C_028238_TARGET2_ENABLE 0xFFFFF0FF
1033#define S_028238_TARGET3_ENABLE(x) (((x) & 0xF) << 12)
1034#define G_028238_TARGET3_ENABLE(x) (((x) >> 12) & 0xF)
1035#define C_028238_TARGET3_ENABLE 0xFFFF0FFF
1036#define S_028238_TARGET4_ENABLE(x) (((x) & 0xF) << 16)
1037#define G_028238_TARGET4_ENABLE(x) (((x) >> 16) & 0xF)
1038#define C_028238_TARGET4_ENABLE 0xFFF0FFFF
1039#define S_028238_TARGET5_ENABLE(x) (((x) & 0xF) << 20)
1040#define G_028238_TARGET5_ENABLE(x) (((x) >> 20) & 0xF)
1041#define C_028238_TARGET5_ENABLE 0xFF0FFFFF
1042#define S_028238_TARGET6_ENABLE(x) (((x) & 0xF) << 24)
1043#define G_028238_TARGET6_ENABLE(x) (((x) >> 24) & 0xF)
1044#define C_028238_TARGET6_ENABLE 0xF0FFFFFF
1045#define S_028238_TARGET7_ENABLE(x) (((x) & 0xF) << 28)
1046#define G_028238_TARGET7_ENABLE(x) (((x) >> 28) & 0xF)
1047#define C_028238_TARGET7_ENABLE 0x0FFFFFFF
1048#define R_02823C_CB_SHADER_MASK 0x02823C
1049#define S_02823C_OUTPUT0_ENABLE(x) (((x) & 0xF) << 0)
1050#define G_02823C_OUTPUT0_ENABLE(x) (((x) >> 0) & 0xF)
1051#define C_02823C_OUTPUT0_ENABLE 0xFFFFFFF0
1052#define S_02823C_OUTPUT1_ENABLE(x) (((x) & 0xF) << 4)
1053#define G_02823C_OUTPUT1_ENABLE(x) (((x) >> 4) & 0xF)
1054#define C_02823C_OUTPUT1_ENABLE 0xFFFFFF0F
1055#define S_02823C_OUTPUT2_ENABLE(x) (((x) & 0xF) << 8)
1056#define G_02823C_OUTPUT2_ENABLE(x) (((x) >> 8) & 0xF)
1057#define C_02823C_OUTPUT2_ENABLE 0xFFFFF0FF
1058#define S_02823C_OUTPUT3_ENABLE(x) (((x) & 0xF) << 12)
1059#define G_02823C_OUTPUT3_ENABLE(x) (((x) >> 12) & 0xF)
1060#define C_02823C_OUTPUT3_ENABLE 0xFFFF0FFF
1061#define S_02823C_OUTPUT4_ENABLE(x) (((x) & 0xF) << 16)
1062#define G_02823C_OUTPUT4_ENABLE(x) (((x) >> 16) & 0xF)
1063#define C_02823C_OUTPUT4_ENABLE 0xFFF0FFFF
1064#define S_02823C_OUTPUT5_ENABLE(x) (((x) & 0xF) << 20)
1065#define G_02823C_OUTPUT5_ENABLE(x) (((x) >> 20) & 0xF)
1066#define C_02823C_OUTPUT5_ENABLE 0xFF0FFFFF
1067#define S_02823C_OUTPUT6_ENABLE(x) (((x) & 0xF) << 24)
1068#define G_02823C_OUTPUT6_ENABLE(x) (((x) >> 24) & 0xF)
1069#define C_02823C_OUTPUT6_ENABLE 0xF0FFFFFF
1070#define S_02823C_OUTPUT7_ENABLE(x) (((x) & 0xF) << 28)
1071#define G_02823C_OUTPUT7_ENABLE(x) (((x) >> 28) & 0xF)
1072#define C_02823C_OUTPUT7_ENABLE 0x0FFFFFFF
1073#define R_028AB0_VGT_STRMOUT_EN 0x028AB0
1074#define S_028AB0_STREAMOUT(x) (((x) & 0x1) << 0)
1075#define G_028AB0_STREAMOUT(x) (((x) >> 0) & 0x1)
1076#define C_028AB0_STREAMOUT 0xFFFFFFFE
1077#define R_028B20_VGT_STRMOUT_BUFFER_EN 0x028B20
1078#define S_028B20_BUFFER_0_EN(x) (((x) & 0x1) << 0)
1079#define G_028B20_BUFFER_0_EN(x) (((x) >> 0) & 0x1)
1080#define C_028B20_BUFFER_0_EN 0xFFFFFFFE
1081#define S_028B20_BUFFER_1_EN(x) (((x) & 0x1) << 1)
1082#define G_028B20_BUFFER_1_EN(x) (((x) >> 1) & 0x1)
1083#define C_028B20_BUFFER_1_EN 0xFFFFFFFD
1084#define S_028B20_BUFFER_2_EN(x) (((x) & 0x1) << 2)
1085#define G_028B20_BUFFER_2_EN(x) (((x) >> 2) & 0x1)
1086#define C_028B20_BUFFER_2_EN 0xFFFFFFFB
1087#define S_028B20_BUFFER_3_EN(x) (((x) & 0x1) << 3)
1088#define G_028B20_BUFFER_3_EN(x) (((x) >> 3) & 0x1)
1089#define C_028B20_BUFFER_3_EN 0xFFFFFFF7
1090#define S_028B20_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1091#define G_028B20_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1092#define C_028B20_SIZE 0x00000000
1093#define R_038000_SQ_TEX_RESOURCE_WORD0_0 0x038000
1094#define S_038000_DIM(x) (((x) & 0x7) << 0)
1095#define G_038000_DIM(x) (((x) >> 0) & 0x7)
1096#define C_038000_DIM 0xFFFFFFF8
1097#define V_038000_SQ_TEX_DIM_1D 0x00000000
1098#define V_038000_SQ_TEX_DIM_2D 0x00000001
1099#define V_038000_SQ_TEX_DIM_3D 0x00000002
1100#define V_038000_SQ_TEX_DIM_CUBEMAP 0x00000003
1101#define V_038000_SQ_TEX_DIM_1D_ARRAY 0x00000004
1102#define V_038000_SQ_TEX_DIM_2D_ARRAY 0x00000005
1103#define V_038000_SQ_TEX_DIM_2D_MSAA 0x00000006
1104#define V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA 0x00000007
1105#define S_038000_TILE_MODE(x) (((x) & 0xF) << 3)
1106#define G_038000_TILE_MODE(x) (((x) >> 3) & 0xF)
1107#define C_038000_TILE_MODE 0xFFFFFF87
1108#define S_038000_TILE_TYPE(x) (((x) & 0x1) << 7)
1109#define G_038000_TILE_TYPE(x) (((x) >> 7) & 0x1)
1110#define C_038000_TILE_TYPE 0xFFFFFF7F
1111#define S_038000_PITCH(x) (((x) & 0x7FF) << 8)
1112#define G_038000_PITCH(x) (((x) >> 8) & 0x7FF)
1113#define C_038000_PITCH 0xFFF800FF
1114#define S_038000_TEX_WIDTH(x) (((x) & 0x1FFF) << 19)
1115#define G_038000_TEX_WIDTH(x) (((x) >> 19) & 0x1FFF)
1116#define C_038000_TEX_WIDTH 0x0007FFFF
1117#define R_038004_SQ_TEX_RESOURCE_WORD1_0 0x038004
1118#define S_038004_TEX_HEIGHT(x) (((x) & 0x1FFF) << 0)
1119#define G_038004_TEX_HEIGHT(x) (((x) >> 0) & 0x1FFF)
1120#define C_038004_TEX_HEIGHT 0xFFFFE000
1121#define S_038004_TEX_DEPTH(x) (((x) & 0x1FFF) << 13)
1122#define G_038004_TEX_DEPTH(x) (((x) >> 13) & 0x1FFF)
1123#define C_038004_TEX_DEPTH 0xFC001FFF
1124#define S_038004_DATA_FORMAT(x) (((x) & 0x3F) << 26)
1125#define G_038004_DATA_FORMAT(x) (((x) >> 26) & 0x3F)
1126#define C_038004_DATA_FORMAT 0x03FFFFFF
1127#define V_038004_COLOR_INVALID 0x00000000
1128#define V_038004_COLOR_8 0x00000001
1129#define V_038004_COLOR_4_4 0x00000002
1130#define V_038004_COLOR_3_3_2 0x00000003
1131#define V_038004_COLOR_16 0x00000005
1132#define V_038004_COLOR_16_FLOAT 0x00000006
1133#define V_038004_COLOR_8_8 0x00000007
1134#define V_038004_COLOR_5_6_5 0x00000008
1135#define V_038004_COLOR_6_5_5 0x00000009
1136#define V_038004_COLOR_1_5_5_5 0x0000000A
1137#define V_038004_COLOR_4_4_4_4 0x0000000B
1138#define V_038004_COLOR_5_5_5_1 0x0000000C
1139#define V_038004_COLOR_32 0x0000000D
1140#define V_038004_COLOR_32_FLOAT 0x0000000E
1141#define V_038004_COLOR_16_16 0x0000000F
1142#define V_038004_COLOR_16_16_FLOAT 0x00000010
1143#define V_038004_COLOR_8_24 0x00000011
1144#define V_038004_COLOR_8_24_FLOAT 0x00000012
1145#define V_038004_COLOR_24_8 0x00000013
1146#define V_038004_COLOR_24_8_FLOAT 0x00000014
1147#define V_038004_COLOR_10_11_11 0x00000015
1148#define V_038004_COLOR_10_11_11_FLOAT 0x00000016
1149#define V_038004_COLOR_11_11_10 0x00000017
1150#define V_038004_COLOR_11_11_10_FLOAT 0x00000018
1151#define V_038004_COLOR_2_10_10_10 0x00000019
1152#define V_038004_COLOR_8_8_8_8 0x0000001A
1153#define V_038004_COLOR_10_10_10_2 0x0000001B
1154#define V_038004_COLOR_X24_8_32_FLOAT 0x0000001C
1155#define V_038004_COLOR_32_32 0x0000001D
1156#define V_038004_COLOR_32_32_FLOAT 0x0000001E
1157#define V_038004_COLOR_16_16_16_16 0x0000001F
1158#define V_038004_COLOR_16_16_16_16_FLOAT 0x00000020
1159#define V_038004_COLOR_32_32_32_32 0x00000022
1160#define V_038004_COLOR_32_32_32_32_FLOAT 0x00000023
1161#define V_038004_FMT_1 0x00000025
1162#define V_038004_FMT_GB_GR 0x00000027
1163#define V_038004_FMT_BG_RG 0x00000028
1164#define V_038004_FMT_32_AS_8 0x00000029
1165#define V_038004_FMT_32_AS_8_8 0x0000002A
1166#define V_038004_FMT_5_9_9_9_SHAREDEXP 0x0000002B
1167#define V_038004_FMT_8_8_8 0x0000002C
1168#define V_038004_FMT_16_16_16 0x0000002D
1169#define V_038004_FMT_16_16_16_FLOAT 0x0000002E
1170#define V_038004_FMT_32_32_32 0x0000002F
1171#define V_038004_FMT_32_32_32_FLOAT 0x00000030
1172#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010
1173#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
1174#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
1175#define C_038010_FORMAT_COMP_X 0xFFFFFFFC
1176#define S_038010_FORMAT_COMP_Y(x) (((x) & 0x3) << 2)
1177#define G_038010_FORMAT_COMP_Y(x) (((x) >> 2) & 0x3)
1178#define C_038010_FORMAT_COMP_Y 0xFFFFFFF3
1179#define S_038010_FORMAT_COMP_Z(x) (((x) & 0x3) << 4)
1180#define G_038010_FORMAT_COMP_Z(x) (((x) >> 4) & 0x3)
1181#define C_038010_FORMAT_COMP_Z 0xFFFFFFCF
1182#define S_038010_FORMAT_COMP_W(x) (((x) & 0x3) << 6)
1183#define G_038010_FORMAT_COMP_W(x) (((x) >> 6) & 0x3)
1184#define C_038010_FORMAT_COMP_W 0xFFFFFF3F
1185#define S_038010_NUM_FORMAT_ALL(x) (((x) & 0x3) << 8)
1186#define G_038010_NUM_FORMAT_ALL(x) (((x) >> 8) & 0x3)
1187#define C_038010_NUM_FORMAT_ALL 0xFFFFFCFF
1188#define S_038010_SRF_MODE_ALL(x) (((x) & 0x1) << 10)
1189#define G_038010_SRF_MODE_ALL(x) (((x) >> 10) & 0x1)
1190#define C_038010_SRF_MODE_ALL 0xFFFFFBFF
1191#define S_038010_FORCE_DEGAMMA(x) (((x) & 0x1) << 11)
1192#define G_038010_FORCE_DEGAMMA(x) (((x) >> 11) & 0x1)
1193#define C_038010_FORCE_DEGAMMA 0xFFFFF7FF
1194#define S_038010_ENDIAN_SWAP(x) (((x) & 0x3) << 12)
1195#define G_038010_ENDIAN_SWAP(x) (((x) >> 12) & 0x3)
1196#define C_038010_ENDIAN_SWAP 0xFFFFCFFF
1197#define S_038010_REQUEST_SIZE(x) (((x) & 0x3) << 14)
1198#define G_038010_REQUEST_SIZE(x) (((x) >> 14) & 0x3)
1199#define C_038010_REQUEST_SIZE 0xFFFF3FFF
1200#define S_038010_DST_SEL_X(x) (((x) & 0x7) << 16)
1201#define G_038010_DST_SEL_X(x) (((x) >> 16) & 0x7)
1202#define C_038010_DST_SEL_X 0xFFF8FFFF
1203#define S_038010_DST_SEL_Y(x) (((x) & 0x7) << 19)
1204#define G_038010_DST_SEL_Y(x) (((x) >> 19) & 0x7)
1205#define C_038010_DST_SEL_Y 0xFFC7FFFF
1206#define S_038010_DST_SEL_Z(x) (((x) & 0x7) << 22)
1207#define G_038010_DST_SEL_Z(x) (((x) >> 22) & 0x7)
1208#define C_038010_DST_SEL_Z 0xFE3FFFFF
1209#define S_038010_DST_SEL_W(x) (((x) & 0x7) << 25)
1210#define G_038010_DST_SEL_W(x) (((x) >> 25) & 0x7)
1211#define C_038010_DST_SEL_W 0xF1FFFFFF
1212#define S_038010_BASE_LEVEL(x) (((x) & 0xF) << 28)
1213#define G_038010_BASE_LEVEL(x) (((x) >> 28) & 0xF)
1214#define C_038010_BASE_LEVEL 0x0FFFFFFF
1215#define R_038014_SQ_TEX_RESOURCE_WORD5_0 0x038014
1216#define S_038014_LAST_LEVEL(x) (((x) & 0xF) << 0)
1217#define G_038014_LAST_LEVEL(x) (((x) >> 0) & 0xF)
1218#define C_038014_LAST_LEVEL 0xFFFFFFF0
1219#define S_038014_BASE_ARRAY(x) (((x) & 0x1FFF) << 4)
1220#define G_038014_BASE_ARRAY(x) (((x) >> 4) & 0x1FFF)
1221#define C_038014_BASE_ARRAY 0xFFFE000F
1222#define S_038014_LAST_ARRAY(x) (((x) & 0x1FFF) << 17)
1223#define G_038014_LAST_ARRAY(x) (((x) >> 17) & 0x1FFF)
1224#define C_038014_LAST_ARRAY 0xC001FFFF
1225#define R_0288A8_SQ_ESGS_RING_ITEMSIZE 0x0288A8
1226#define S_0288A8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1227#define G_0288A8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1228#define C_0288A8_ITEMSIZE 0xFFFF8000
1229#define R_008C44_SQ_ESGS_RING_SIZE 0x008C44
1230#define S_008C44_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1231#define G_008C44_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1232#define C_008C44_MEM_SIZE 0x00000000
1233#define R_0288B0_SQ_ESTMP_RING_ITEMSIZE 0x0288B0
1234#define S_0288B0_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1235#define G_0288B0_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1236#define C_0288B0_ITEMSIZE 0xFFFF8000
1237#define R_008C54_SQ_ESTMP_RING_SIZE 0x008C54
1238#define S_008C54_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1239#define G_008C54_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1240#define C_008C54_MEM_SIZE 0x00000000
1241#define R_0288C0_SQ_FBUF_RING_ITEMSIZE 0x0288C0
1242#define S_0288C0_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1243#define G_0288C0_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1244#define C_0288C0_ITEMSIZE 0xFFFF8000
1245#define R_008C74_SQ_FBUF_RING_SIZE 0x008C74
1246#define S_008C74_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1247#define G_008C74_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1248#define C_008C74_MEM_SIZE 0x00000000
1249#define R_0288B4_SQ_GSTMP_RING_ITEMSIZE 0x0288B4
1250#define S_0288B4_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1251#define G_0288B4_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1252#define C_0288B4_ITEMSIZE 0xFFFF8000
1253#define R_008C5C_SQ_GSTMP_RING_SIZE 0x008C5C
1254#define S_008C5C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1255#define G_008C5C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1256#define C_008C5C_MEM_SIZE 0x00000000
1257#define R_0288AC_SQ_GSVS_RING_ITEMSIZE 0x0288AC
1258#define S_0288AC_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1259#define G_0288AC_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1260#define C_0288AC_ITEMSIZE 0xFFFF8000
1261#define R_008C4C_SQ_GSVS_RING_SIZE 0x008C4C
1262#define S_008C4C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1263#define G_008C4C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1264#define C_008C4C_MEM_SIZE 0x00000000
1265#define R_0288BC_SQ_PSTMP_RING_ITEMSIZE 0x0288BC
1266#define S_0288BC_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1267#define G_0288BC_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1268#define C_0288BC_ITEMSIZE 0xFFFF8000
1269#define R_008C6C_SQ_PSTMP_RING_SIZE 0x008C6C
1270#define S_008C6C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1271#define G_008C6C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1272#define C_008C6C_MEM_SIZE 0x00000000
1273#define R_0288C4_SQ_REDUC_RING_ITEMSIZE 0x0288C4
1274#define S_0288C4_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1275#define G_0288C4_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1276#define C_0288C4_ITEMSIZE 0xFFFF8000
1277#define R_008C7C_SQ_REDUC_RING_SIZE 0x008C7C
1278#define S_008C7C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1279#define G_008C7C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1280#define C_008C7C_MEM_SIZE 0x00000000
1281#define R_0288B8_SQ_VSTMP_RING_ITEMSIZE 0x0288B8
1282#define S_0288B8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1283#define G_0288B8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1284#define C_0288B8_ITEMSIZE 0xFFFF8000
1285#define R_008C64_SQ_VSTMP_RING_SIZE 0x008C64
1286#define S_008C64_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
1287#define G_008C64_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
1288#define C_008C64_MEM_SIZE 0x00000000
1289#define R_0288C8_SQ_GS_VERT_ITEMSIZE 0x0288C8
1290#define S_0288C8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
1291#define G_0288C8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
1292#define C_0288C8_ITEMSIZE 0xFFFF8000
1293#define R_028010_DB_DEPTH_INFO 0x028010
1294#define S_028010_FORMAT(x) (((x) & 0x7) << 0)
1295#define G_028010_FORMAT(x) (((x) >> 0) & 0x7)
1296#define C_028010_FORMAT 0xFFFFFFF8
1297#define V_028010_DEPTH_INVALID 0x00000000
1298#define V_028010_DEPTH_16 0x00000001
1299#define V_028010_DEPTH_X8_24 0x00000002
1300#define V_028010_DEPTH_8_24 0x00000003
1301#define V_028010_DEPTH_X8_24_FLOAT 0x00000004
1302#define V_028010_DEPTH_8_24_FLOAT 0x00000005
1303#define V_028010_DEPTH_32_FLOAT 0x00000006
1304#define V_028010_DEPTH_X24_8_32_FLOAT 0x00000007
1305#define S_028010_READ_SIZE(x) (((x) & 0x1) << 3)
1306#define G_028010_READ_SIZE(x) (((x) >> 3) & 0x1)
1307#define C_028010_READ_SIZE 0xFFFFFFF7
1308#define S_028010_ARRAY_MODE(x) (((x) & 0xF) << 15)
1309#define G_028010_ARRAY_MODE(x) (((x) >> 15) & 0xF)
1310#define C_028010_ARRAY_MODE 0xFFF87FFF
1311#define S_028010_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 25)
1312#define G_028010_TILE_SURFACE_ENABLE(x) (((x) >> 25) & 0x1)
1313#define C_028010_TILE_SURFACE_ENABLE 0xFDFFFFFF
1314#define S_028010_TILE_COMPACT(x) (((x) & 0x1) << 26)
1315#define G_028010_TILE_COMPACT(x) (((x) >> 26) & 0x1)
1316#define C_028010_TILE_COMPACT 0xFBFFFFFF
1317#define S_028010_ZRANGE_PRECISION(x) (((x) & 0x1) << 31)
1318#define G_028010_ZRANGE_PRECISION(x) (((x) >> 31) & 0x1)
1319#define C_028010_ZRANGE_PRECISION 0x7FFFFFFF
1320#define R_028000_DB_DEPTH_SIZE 0x028000
1321#define S_028000_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0)
1322#define G_028000_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF)
1323#define C_028000_PITCH_TILE_MAX 0xFFFFFC00
1324#define S_028000_SLICE_TILE_MAX(x) (((x) & 0xFFFFF) << 10)
1325#define G_028000_SLICE_TILE_MAX(x) (((x) >> 10) & 0xFFFFF)
1326#define C_028000_SLICE_TILE_MAX 0xC00003FF
1327#define R_028004_DB_DEPTH_VIEW 0x028004
1328#define S_028004_SLICE_START(x) (((x) & 0x7FF) << 0)
1329#define G_028004_SLICE_START(x) (((x) >> 0) & 0x7FF)
1330#define C_028004_SLICE_START 0xFFFFF800
1331#define S_028004_SLICE_MAX(x) (((x) & 0x7FF) << 13)
1332#define G_028004_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
1333#define C_028004_SLICE_MAX 0xFF001FFF
1334#define R_028800_DB_DEPTH_CONTROL 0x028800
1335#define S_028800_STENCIL_ENABLE(x) (((x) & 0x1) << 0)
1336#define G_028800_STENCIL_ENABLE(x) (((x) >> 0) & 0x1)
1337#define C_028800_STENCIL_ENABLE 0xFFFFFFFE
1338#define S_028800_Z_ENABLE(x) (((x) & 0x1) << 1)
1339#define G_028800_Z_ENABLE(x) (((x) >> 1) & 0x1)
1340#define C_028800_Z_ENABLE 0xFFFFFFFD
1341#define S_028800_Z_WRITE_ENABLE(x) (((x) & 0x1) << 2)
1342#define G_028800_Z_WRITE_ENABLE(x) (((x) >> 2) & 0x1)
1343#define C_028800_Z_WRITE_ENABLE 0xFFFFFFFB
1344#define S_028800_ZFUNC(x) (((x) & 0x7) << 4)
1345#define G_028800_ZFUNC(x) (((x) >> 4) & 0x7)
1346#define C_028800_ZFUNC 0xFFFFFF8F
1347#define S_028800_BACKFACE_ENABLE(x) (((x) & 0x1) << 7)
1348#define G_028800_BACKFACE_ENABLE(x) (((x) >> 7) & 0x1)
1349#define C_028800_BACKFACE_ENABLE 0xFFFFFF7F
1350#define S_028800_STENCILFUNC(x) (((x) & 0x7) << 8)
1351#define G_028800_STENCILFUNC(x) (((x) >> 8) & 0x7)
1352#define C_028800_STENCILFUNC 0xFFFFF8FF
1353#define S_028800_STENCILFAIL(x) (((x) & 0x7) << 11)
1354#define G_028800_STENCILFAIL(x) (((x) >> 11) & 0x7)
1355#define C_028800_STENCILFAIL 0xFFFFC7FF
1356#define S_028800_STENCILZPASS(x) (((x) & 0x7) << 14)
1357#define G_028800_STENCILZPASS(x) (((x) >> 14) & 0x7)
1358#define C_028800_STENCILZPASS 0xFFFE3FFF
1359#define S_028800_STENCILZFAIL(x) (((x) & 0x7) << 17)
1360#define G_028800_STENCILZFAIL(x) (((x) >> 17) & 0x7)
1361#define C_028800_STENCILZFAIL 0xFFF1FFFF
1362#define S_028800_STENCILFUNC_BF(x) (((x) & 0x7) << 20)
1363#define G_028800_STENCILFUNC_BF(x) (((x) >> 20) & 0x7)
1364#define C_028800_STENCILFUNC_BF 0xFF8FFFFF
1365#define S_028800_STENCILFAIL_BF(x) (((x) & 0x7) << 23)
1366#define G_028800_STENCILFAIL_BF(x) (((x) >> 23) & 0x7)
1367#define C_028800_STENCILFAIL_BF 0xFC7FFFFF
1368#define S_028800_STENCILZPASS_BF(x) (((x) & 0x7) << 26)
1369#define G_028800_STENCILZPASS_BF(x) (((x) >> 26) & 0x7)
1370#define C_028800_STENCILZPASS_BF 0xE3FFFFFF
1371#define S_028800_STENCILZFAIL_BF(x) (((x) & 0x7) << 29)
1372#define G_028800_STENCILZFAIL_BF(x) (((x) >> 29) & 0x7)
1373#define C_028800_STENCILZFAIL_BF 0x1FFFFFFF
909 1374
910#endif 1375#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index c0356bb193e5..829e26e8a4bb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -89,6 +89,7 @@ extern int radeon_testing;
89extern int radeon_connector_table; 89extern int radeon_connector_table;
90extern int radeon_tv; 90extern int radeon_tv;
91extern int radeon_new_pll; 91extern int radeon_new_pll;
92extern int radeon_dynpm;
92extern int radeon_audio; 93extern int radeon_audio;
93 94
94/* 95/*
@@ -118,6 +119,21 @@ struct radeon_device;
118/* 119/*
119 * BIOS. 120 * BIOS.
120 */ 121 */
122#define ATRM_BIOS_PAGE 4096
123
124#if defined(CONFIG_VGA_SWITCHEROO)
125bool radeon_atrm_supported(struct pci_dev *pdev);
126int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
127#else
128static inline bool radeon_atrm_supported(struct pci_dev *pdev)
129{
130 return false;
131}
132
133static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
134 return -EINVAL;
135}
136#endif
121bool radeon_get_bios(struct radeon_device *rdev); 137bool radeon_get_bios(struct radeon_device *rdev);
122 138
123 139
@@ -138,17 +154,23 @@ void radeon_dummy_page_fini(struct radeon_device *rdev);
138struct radeon_clock { 154struct radeon_clock {
139 struct radeon_pll p1pll; 155 struct radeon_pll p1pll;
140 struct radeon_pll p2pll; 156 struct radeon_pll p2pll;
157 struct radeon_pll dcpll;
141 struct radeon_pll spll; 158 struct radeon_pll spll;
142 struct radeon_pll mpll; 159 struct radeon_pll mpll;
143 /* 10 Khz units */ 160 /* 10 Khz units */
144 uint32_t default_mclk; 161 uint32_t default_mclk;
145 uint32_t default_sclk; 162 uint32_t default_sclk;
163 uint32_t default_dispclk;
164 uint32_t dp_extclk;
146}; 165};
147 166
148/* 167/*
149 * Power management 168 * Power management
150 */ 169 */
151int radeon_pm_init(struct radeon_device *rdev); 170int radeon_pm_init(struct radeon_device *rdev);
171void radeon_pm_compute_clocks(struct radeon_device *rdev);
172void radeon_combios_get_power_modes(struct radeon_device *rdev);
173void radeon_atombios_get_power_modes(struct radeon_device *rdev);
152 174
153/* 175/*
154 * Fences. 176 * Fences.
@@ -275,6 +297,7 @@ union radeon_gart_table {
275}; 297};
276 298
277#define RADEON_GPU_PAGE_SIZE 4096 299#define RADEON_GPU_PAGE_SIZE 4096
300#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
278 301
279struct radeon_gart { 302struct radeon_gart {
280 dma_addr_t table_addr; 303 dma_addr_t table_addr;
@@ -309,21 +332,19 @@ struct radeon_mc {
309 /* for some chips with <= 32MB we need to lie 332 /* for some chips with <= 32MB we need to lie
310 * about vram size near mc fb location */ 333 * about vram size near mc fb location */
311 u64 mc_vram_size; 334 u64 mc_vram_size;
312 u64 gtt_location; 335 u64 visible_vram_size;
313 u64 gtt_size; 336 u64 gtt_size;
314 u64 gtt_start; 337 u64 gtt_start;
315 u64 gtt_end; 338 u64 gtt_end;
316 u64 vram_location;
317 u64 vram_start; 339 u64 vram_start;
318 u64 vram_end; 340 u64 vram_end;
319 unsigned vram_width; 341 unsigned vram_width;
320 u64 real_vram_size; 342 u64 real_vram_size;
321 int vram_mtrr; 343 int vram_mtrr;
322 bool vram_is_ddr; 344 bool vram_is_ddr;
323 bool igp_sideport_enabled; 345 bool igp_sideport_enabled;
324}; 346};
325 347
326int radeon_mc_setup(struct radeon_device *rdev);
327bool radeon_combios_sideport_present(struct radeon_device *rdev); 348bool radeon_combios_sideport_present(struct radeon_device *rdev);
328bool radeon_atombios_sideport_present(struct radeon_device *rdev); 349bool radeon_atombios_sideport_present(struct radeon_device *rdev);
329 350
@@ -348,6 +369,7 @@ struct radeon_irq {
348 bool sw_int; 369 bool sw_int;
349 /* FIXME: use a define max crtc rather than hardcode it */ 370 /* FIXME: use a define max crtc rather than hardcode it */
350 bool crtc_vblank_int[2]; 371 bool crtc_vblank_int[2];
372 wait_queue_head_t vblank_queue;
351 /* FIXME: use defines for max hpd/dacs */ 373 /* FIXME: use defines for max hpd/dacs */
352 bool hpd[6]; 374 bool hpd[6];
353 spinlock_t sw_lock; 375 spinlock_t sw_lock;
@@ -379,6 +401,7 @@ struct radeon_ib {
379struct radeon_ib_pool { 401struct radeon_ib_pool {
380 struct mutex mutex; 402 struct mutex mutex;
381 struct radeon_bo *robj; 403 struct radeon_bo *robj;
404 struct list_head bogus_ib;
382 struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; 405 struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
383 bool ready; 406 bool ready;
384 unsigned head_id; 407 unsigned head_id;
@@ -433,6 +456,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
433int radeon_ib_pool_init(struct radeon_device *rdev); 456int radeon_ib_pool_init(struct radeon_device *rdev);
434void radeon_ib_pool_fini(struct radeon_device *rdev); 457void radeon_ib_pool_fini(struct radeon_device *rdev);
435int radeon_ib_test(struct radeon_device *rdev); 458int radeon_ib_test(struct radeon_device *rdev);
459extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
436/* Ring access between begin & end cannot sleep */ 460/* Ring access between begin & end cannot sleep */
437void radeon_ring_free_size(struct radeon_device *rdev); 461void radeon_ring_free_size(struct radeon_device *rdev);
438int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw); 462int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
@@ -570,7 +594,99 @@ struct radeon_wb {
570 * Equation between gpu/memory clock and available bandwidth is hw dependent 594 * Equation between gpu/memory clock and available bandwidth is hw dependent
571 * (type of memory, bus size, efficiency, ...) 595 * (type of memory, bus size, efficiency, ...)
572 */ 596 */
597enum radeon_pm_state {
598 PM_STATE_DISABLED,
599 PM_STATE_MINIMUM,
600 PM_STATE_PAUSED,
601 PM_STATE_ACTIVE
602};
603enum radeon_pm_action {
604 PM_ACTION_NONE,
605 PM_ACTION_MINIMUM,
606 PM_ACTION_DOWNCLOCK,
607 PM_ACTION_UPCLOCK
608};
609
610enum radeon_voltage_type {
611 VOLTAGE_NONE = 0,
612 VOLTAGE_GPIO,
613 VOLTAGE_VDDC,
614 VOLTAGE_SW
615};
616
617enum radeon_pm_state_type {
618 POWER_STATE_TYPE_DEFAULT,
619 POWER_STATE_TYPE_POWERSAVE,
620 POWER_STATE_TYPE_BATTERY,
621 POWER_STATE_TYPE_BALANCED,
622 POWER_STATE_TYPE_PERFORMANCE,
623};
624
625enum radeon_pm_clock_mode_type {
626 POWER_MODE_TYPE_DEFAULT,
627 POWER_MODE_TYPE_LOW,
628 POWER_MODE_TYPE_MID,
629 POWER_MODE_TYPE_HIGH,
630};
631
632struct radeon_voltage {
633 enum radeon_voltage_type type;
634 /* gpio voltage */
635 struct radeon_gpio_rec gpio;
636 u32 delay; /* delay in usec from voltage drop to sclk change */
637 bool active_high; /* voltage drop is active when bit is high */
638 /* VDDC voltage */
639 u8 vddc_id; /* index into vddc voltage table */
640 u8 vddci_id; /* index into vddci voltage table */
641 bool vddci_enabled;
642 /* r6xx+ sw */
643 u32 voltage;
644};
645
646struct radeon_pm_non_clock_info {
647 /* pcie lanes */
648 int pcie_lanes;
649 /* standardized non-clock flags */
650 u32 flags;
651};
652
653struct radeon_pm_clock_info {
654 /* memory clock */
655 u32 mclk;
656 /* engine clock */
657 u32 sclk;
658 /* voltage info */
659 struct radeon_voltage voltage;
660 /* standardized clock flags - not sure we'll need these */
661 u32 flags;
662};
663
664struct radeon_power_state {
665 enum radeon_pm_state_type type;
666 /* XXX: use a define for num clock modes */
667 struct radeon_pm_clock_info clock_info[8];
668 /* number of valid clock modes in this power state */
669 int num_clock_modes;
670 struct radeon_pm_clock_info *default_clock_mode;
671 /* non clock info about this state */
672 struct radeon_pm_non_clock_info non_clock_info;
673 bool voltage_drop_active;
674};
675
676/*
677 * Some modes are overclocked by very low value, accept them
678 */
679#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
680
573struct radeon_pm { 681struct radeon_pm {
682 struct mutex mutex;
683 struct delayed_work idle_work;
684 enum radeon_pm_state state;
685 enum radeon_pm_action planned_action;
686 unsigned long action_timeout;
687 bool downclocked;
688 int active_crtcs;
689 int req_vblank;
574 fixed20_12 max_bandwidth; 690 fixed20_12 max_bandwidth;
575 fixed20_12 igp_sideport_mclk; 691 fixed20_12 igp_sideport_mclk;
576 fixed20_12 igp_system_mclk; 692 fixed20_12 igp_system_mclk;
@@ -582,6 +698,15 @@ struct radeon_pm {
582 fixed20_12 core_bandwidth; 698 fixed20_12 core_bandwidth;
583 fixed20_12 sclk; 699 fixed20_12 sclk;
584 fixed20_12 needed_bandwidth; 700 fixed20_12 needed_bandwidth;
701 /* XXX: use a define for num power modes */
702 struct radeon_power_state power_state[8];
703 /* number of valid power states */
704 int num_power_states;
705 struct radeon_power_state *current_power_state;
706 struct radeon_pm_clock_info *current_clock_mode;
707 struct radeon_power_state *requested_power_state;
708 struct radeon_pm_clock_info *requested_clock_mode;
709 struct radeon_power_state *default_power_state;
585}; 710};
586 711
587 712
@@ -651,6 +776,7 @@ struct radeon_asic {
651 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); 776 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
652 uint32_t (*get_memory_clock)(struct radeon_device *rdev); 777 uint32_t (*get_memory_clock)(struct radeon_device *rdev);
653 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); 778 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
779 int (*get_pcie_lanes)(struct radeon_device *rdev);
654 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); 780 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
655 void (*set_clock_gating)(struct radeon_device *rdev, int enable); 781 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
656 int (*set_surface_reg)(struct radeon_device *rdev, int reg, 782 int (*set_surface_reg)(struct radeon_device *rdev, int reg,
@@ -701,6 +827,9 @@ struct r600_asic {
701 unsigned sx_max_export_pos_size; 827 unsigned sx_max_export_pos_size;
702 unsigned sx_max_export_smx_size; 828 unsigned sx_max_export_smx_size;
703 unsigned sq_num_cf_insts; 829 unsigned sq_num_cf_insts;
830 unsigned tiling_nbanks;
831 unsigned tiling_npipes;
832 unsigned tiling_group_size;
704}; 833};
705 834
706struct rv770_asic { 835struct rv770_asic {
@@ -721,6 +850,9 @@ struct rv770_asic {
721 unsigned sc_prim_fifo_size; 850 unsigned sc_prim_fifo_size;
722 unsigned sc_hiz_tile_fifo_size; 851 unsigned sc_hiz_tile_fifo_size;
723 unsigned sc_earlyz_tile_fifo_fize; 852 unsigned sc_earlyz_tile_fifo_fize;
853 unsigned tiling_nbanks;
854 unsigned tiling_npipes;
855 unsigned tiling_group_size;
724}; 856};
725 857
726union radeon_asic_config { 858union radeon_asic_config {
@@ -830,6 +962,8 @@ struct radeon_device {
830 struct r600_ih ih; /* r6/700 interrupt ring */ 962 struct r600_ih ih; /* r6/700 interrupt ring */
831 struct workqueue_struct *wq; 963 struct workqueue_struct *wq;
832 struct work_struct hotplug_work; 964 struct work_struct hotplug_work;
965 int num_crtc; /* number of crtcs */
966 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
833 967
834 /* audio stuff */ 968 /* audio stuff */
835 struct timer_list audio_timer; 969 struct timer_list audio_timer;
@@ -838,6 +972,8 @@ struct radeon_device {
838 int audio_bits_per_sample; 972 int audio_bits_per_sample;
839 uint8_t audio_status_bits; 973 uint8_t audio_status_bits;
840 uint8_t audio_category_code; 974 uint8_t audio_category_code;
975
976 bool powered_down;
841}; 977};
842 978
843int radeon_device_init(struct radeon_device *rdev, 979int radeon_device_init(struct radeon_device *rdev,
@@ -895,6 +1031,8 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
895#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v)) 1031#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
896#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg)) 1032#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
897#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v)) 1033#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
1034#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg))
1035#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
898#define WREG32_P(reg, val, mask) \ 1036#define WREG32_P(reg, val, mask) \
899 do { \ 1037 do { \
900 uint32_t tmp_ = RREG32(reg); \ 1038 uint32_t tmp_ = RREG32(reg); \
@@ -956,7 +1094,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
956#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) 1094#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
957#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) 1095#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
958#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) 1096#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
959 1097#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
960 1098
961/* 1099/*
962 * BIOS helpers. 1100 * BIOS helpers.
@@ -1015,6 +1153,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
1015#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) 1153#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
1016#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev)) 1154#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
1017#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e)) 1155#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
1156#define radeon_get_pcie_lanes(rdev) (rdev)->asic->get_pcie_lanes((rdev))
1018#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) 1157#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
1019#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) 1158#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
1020#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) 1159#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
@@ -1029,6 +1168,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
1029/* AGP */ 1168/* AGP */
1030extern void radeon_agp_disable(struct radeon_device *rdev); 1169extern void radeon_agp_disable(struct radeon_device *rdev);
1031extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); 1170extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
1171extern void radeon_gart_restore(struct radeon_device *rdev);
1032extern int radeon_modeset_init(struct radeon_device *rdev); 1172extern int radeon_modeset_init(struct radeon_device *rdev);
1033extern void radeon_modeset_fini(struct radeon_device *rdev); 1173extern void radeon_modeset_fini(struct radeon_device *rdev);
1034extern bool radeon_card_posted(struct radeon_device *rdev); 1174extern bool radeon_card_posted(struct radeon_device *rdev);
@@ -1042,6 +1182,10 @@ extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enabl
1042extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); 1182extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
1043extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); 1183extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
1044extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); 1184extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
1185extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
1186extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
1187extern int radeon_resume_kms(struct drm_device *dev);
1188extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
1045 1189
1046/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ 1190/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
1047struct r100_mc_save { 1191struct r100_mc_save {
@@ -1096,7 +1240,7 @@ extern void r200_set_safe_registers(struct radeon_device *rdev);
1096/* r300,r350,rv350,rv370,rv380 */ 1240/* r300,r350,rv350,rv370,rv380 */
1097extern void r300_set_reg_safe(struct radeon_device *rdev); 1241extern void r300_set_reg_safe(struct radeon_device *rdev);
1098extern void r300_mc_program(struct radeon_device *rdev); 1242extern void r300_mc_program(struct radeon_device *rdev);
1099extern void r300_vram_info(struct radeon_device *rdev); 1243extern void r300_mc_init(struct radeon_device *rdev);
1100extern void r300_clock_startup(struct radeon_device *rdev); 1244extern void r300_clock_startup(struct radeon_device *rdev);
1101extern int r300_mc_wait_for_idle(struct radeon_device *rdev); 1245extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
1102extern int rv370_pcie_gart_init(struct radeon_device *rdev); 1246extern int rv370_pcie_gart_init(struct radeon_device *rdev);
@@ -1105,7 +1249,6 @@ extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
1105extern void rv370_pcie_gart_disable(struct radeon_device *rdev); 1249extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
1106 1250
1107/* r420,r423,rv410 */ 1251/* r420,r423,rv410 */
1108extern int r420_mc_init(struct radeon_device *rdev);
1109extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg); 1252extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
1110extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v); 1253extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1111extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev); 1254extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
@@ -1147,13 +1290,13 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
1147 struct drm_display_mode *mode2); 1290 struct drm_display_mode *mode2);
1148 1291
1149/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ 1292/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
1293extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
1150extern bool r600_card_posted(struct radeon_device *rdev); 1294extern bool r600_card_posted(struct radeon_device *rdev);
1151extern void r600_cp_stop(struct radeon_device *rdev); 1295extern void r600_cp_stop(struct radeon_device *rdev);
1152extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); 1296extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
1153extern int r600_cp_resume(struct radeon_device *rdev); 1297extern int r600_cp_resume(struct radeon_device *rdev);
1154extern void r600_cp_fini(struct radeon_device *rdev); 1298extern void r600_cp_fini(struct radeon_device *rdev);
1155extern int r600_count_pipe_bits(uint32_t val); 1299extern int r600_count_pipe_bits(uint32_t val);
1156extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
1157extern int r600_mc_wait_for_idle(struct radeon_device *rdev); 1300extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
1158extern int r600_pcie_gart_init(struct radeon_device *rdev); 1301extern int r600_pcie_gart_init(struct radeon_device *rdev);
1159extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); 1302extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
@@ -1189,6 +1332,14 @@ extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
1189 uint8_t status_bits, 1332 uint8_t status_bits,
1190 uint8_t category_code); 1333 uint8_t category_code);
1191 1334
1335/* evergreen */
1336struct evergreen_mc_save {
1337 u32 vga_control[6];
1338 u32 vga_render_control;
1339 u32 vga_hdp_control;
1340 u32 crtc_control[6];
1341};
1342
1192#include "radeon_object.h" 1343#include "radeon_object.h"
1193 1344
1194#endif 1345#endif
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index c0681a5556dc..c4457791dff1 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -237,6 +237,10 @@ int radeon_agp_init(struct radeon_device *rdev)
237 237
238 rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base; 238 rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base;
239 rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20; 239 rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20;
240 rdev->mc.gtt_start = rdev->mc.agp_base;
241 rdev->mc.gtt_end = rdev->mc.gtt_start + rdev->mc.gtt_size - 1;
242 dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
243 rdev->mc.gtt_size >> 20, rdev->mc.gtt_start, rdev->mc.gtt_end);
240 244
241 /* workaround some hw issues */ 245 /* workaround some hw issues */
242 if (rdev->family < CHIP_R200) { 246 if (rdev->family < CHIP_R200) {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 05ee1aeac3fd..d3a157b2bcb7 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -43,7 +43,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock
43void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); 43void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
44 44
45/* 45/*
46 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 46 * r100,rv100,rs100,rv200,rs200
47 */ 47 */
48extern int r100_init(struct radeon_device *rdev); 48extern int r100_init(struct radeon_device *rdev);
49extern void r100_fini(struct radeon_device *rdev); 49extern void r100_fini(struct radeon_device *rdev);
@@ -108,6 +108,52 @@ static struct radeon_asic r100_asic = {
108 .set_engine_clock = &radeon_legacy_set_engine_clock, 108 .set_engine_clock = &radeon_legacy_set_engine_clock,
109 .get_memory_clock = &radeon_legacy_get_memory_clock, 109 .get_memory_clock = &radeon_legacy_get_memory_clock,
110 .set_memory_clock = NULL, 110 .set_memory_clock = NULL,
111 .get_pcie_lanes = NULL,
112 .set_pcie_lanes = NULL,
113 .set_clock_gating = &radeon_legacy_set_clock_gating,
114 .set_surface_reg = r100_set_surface_reg,
115 .clear_surface_reg = r100_clear_surface_reg,
116 .bandwidth_update = &r100_bandwidth_update,
117 .hpd_init = &r100_hpd_init,
118 .hpd_fini = &r100_hpd_fini,
119 .hpd_sense = &r100_hpd_sense,
120 .hpd_set_polarity = &r100_hpd_set_polarity,
121 .ioctl_wait_idle = NULL,
122};
123
124/*
125 * r200,rv250,rs300,rv280
126 */
127extern int r200_copy_dma(struct radeon_device *rdev,
128 uint64_t src_offset,
129 uint64_t dst_offset,
130 unsigned num_pages,
131 struct radeon_fence *fence);
132static struct radeon_asic r200_asic = {
133 .init = &r100_init,
134 .fini = &r100_fini,
135 .suspend = &r100_suspend,
136 .resume = &r100_resume,
137 .vga_set_state = &r100_vga_set_state,
138 .gpu_reset = &r100_gpu_reset,
139 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
140 .gart_set_page = &r100_pci_gart_set_page,
141 .cp_commit = &r100_cp_commit,
142 .ring_start = &r100_ring_start,
143 .ring_test = &r100_ring_test,
144 .ring_ib_execute = &r100_ring_ib_execute,
145 .irq_set = &r100_irq_set,
146 .irq_process = &r100_irq_process,
147 .get_vblank_counter = &r100_get_vblank_counter,
148 .fence_ring_emit = &r100_fence_ring_emit,
149 .cs_parse = &r100_cs_parse,
150 .copy_blit = &r100_copy_blit,
151 .copy_dma = &r200_copy_dma,
152 .copy = &r100_copy_blit,
153 .get_engine_clock = &radeon_legacy_get_engine_clock,
154 .set_engine_clock = &radeon_legacy_set_engine_clock,
155 .get_memory_clock = &radeon_legacy_get_memory_clock,
156 .set_memory_clock = NULL,
111 .set_pcie_lanes = NULL, 157 .set_pcie_lanes = NULL,
112 .set_clock_gating = &radeon_legacy_set_clock_gating, 158 .set_clock_gating = &radeon_legacy_set_clock_gating,
113 .set_surface_reg = r100_set_surface_reg, 159 .set_surface_reg = r100_set_surface_reg,
@@ -138,11 +184,8 @@ extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t
138extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); 184extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
139extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 185extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
140extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); 186extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
141extern int r300_copy_dma(struct radeon_device *rdev, 187extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
142 uint64_t src_offset, 188
143 uint64_t dst_offset,
144 unsigned num_pages,
145 struct radeon_fence *fence);
146static struct radeon_asic r300_asic = { 189static struct radeon_asic r300_asic = {
147 .init = &r300_init, 190 .init = &r300_init,
148 .fini = &r300_fini, 191 .fini = &r300_fini,
@@ -162,7 +205,46 @@ static struct radeon_asic r300_asic = {
162 .fence_ring_emit = &r300_fence_ring_emit, 205 .fence_ring_emit = &r300_fence_ring_emit,
163 .cs_parse = &r300_cs_parse, 206 .cs_parse = &r300_cs_parse,
164 .copy_blit = &r100_copy_blit, 207 .copy_blit = &r100_copy_blit,
165 .copy_dma = &r300_copy_dma, 208 .copy_dma = &r200_copy_dma,
209 .copy = &r100_copy_blit,
210 .get_engine_clock = &radeon_legacy_get_engine_clock,
211 .set_engine_clock = &radeon_legacy_set_engine_clock,
212 .get_memory_clock = &radeon_legacy_get_memory_clock,
213 .set_memory_clock = NULL,
214 .get_pcie_lanes = &rv370_get_pcie_lanes,
215 .set_pcie_lanes = &rv370_set_pcie_lanes,
216 .set_clock_gating = &radeon_legacy_set_clock_gating,
217 .set_surface_reg = r100_set_surface_reg,
218 .clear_surface_reg = r100_clear_surface_reg,
219 .bandwidth_update = &r100_bandwidth_update,
220 .hpd_init = &r100_hpd_init,
221 .hpd_fini = &r100_hpd_fini,
222 .hpd_sense = &r100_hpd_sense,
223 .hpd_set_polarity = &r100_hpd_set_polarity,
224 .ioctl_wait_idle = NULL,
225};
226
227
228static struct radeon_asic r300_asic_pcie = {
229 .init = &r300_init,
230 .fini = &r300_fini,
231 .suspend = &r300_suspend,
232 .resume = &r300_resume,
233 .vga_set_state = &r100_vga_set_state,
234 .gpu_reset = &r300_gpu_reset,
235 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
236 .gart_set_page = &rv370_pcie_gart_set_page,
237 .cp_commit = &r100_cp_commit,
238 .ring_start = &r300_ring_start,
239 .ring_test = &r100_ring_test,
240 .ring_ib_execute = &r100_ring_ib_execute,
241 .irq_set = &r100_irq_set,
242 .irq_process = &r100_irq_process,
243 .get_vblank_counter = &r100_get_vblank_counter,
244 .fence_ring_emit = &r300_fence_ring_emit,
245 .cs_parse = &r300_cs_parse,
246 .copy_blit = &r100_copy_blit,
247 .copy_dma = &r200_copy_dma,
166 .copy = &r100_copy_blit, 248 .copy = &r100_copy_blit,
167 .get_engine_clock = &radeon_legacy_get_engine_clock, 249 .get_engine_clock = &radeon_legacy_get_engine_clock,
168 .set_engine_clock = &radeon_legacy_set_engine_clock, 250 .set_engine_clock = &radeon_legacy_set_engine_clock,
@@ -206,12 +288,13 @@ static struct radeon_asic r420_asic = {
206 .fence_ring_emit = &r300_fence_ring_emit, 288 .fence_ring_emit = &r300_fence_ring_emit,
207 .cs_parse = &r300_cs_parse, 289 .cs_parse = &r300_cs_parse,
208 .copy_blit = &r100_copy_blit, 290 .copy_blit = &r100_copy_blit,
209 .copy_dma = &r300_copy_dma, 291 .copy_dma = &r200_copy_dma,
210 .copy = &r100_copy_blit, 292 .copy = &r100_copy_blit,
211 .get_engine_clock = &radeon_atom_get_engine_clock, 293 .get_engine_clock = &radeon_atom_get_engine_clock,
212 .set_engine_clock = &radeon_atom_set_engine_clock, 294 .set_engine_clock = &radeon_atom_set_engine_clock,
213 .get_memory_clock = &radeon_atom_get_memory_clock, 295 .get_memory_clock = &radeon_atom_get_memory_clock,
214 .set_memory_clock = &radeon_atom_set_memory_clock, 296 .set_memory_clock = &radeon_atom_set_memory_clock,
297 .get_pcie_lanes = &rv370_get_pcie_lanes,
215 .set_pcie_lanes = &rv370_set_pcie_lanes, 298 .set_pcie_lanes = &rv370_set_pcie_lanes,
216 .set_clock_gating = &radeon_atom_set_clock_gating, 299 .set_clock_gating = &radeon_atom_set_clock_gating,
217 .set_surface_reg = r100_set_surface_reg, 300 .set_surface_reg = r100_set_surface_reg,
@@ -255,12 +338,13 @@ static struct radeon_asic rs400_asic = {
255 .fence_ring_emit = &r300_fence_ring_emit, 338 .fence_ring_emit = &r300_fence_ring_emit,
256 .cs_parse = &r300_cs_parse, 339 .cs_parse = &r300_cs_parse,
257 .copy_blit = &r100_copy_blit, 340 .copy_blit = &r100_copy_blit,
258 .copy_dma = &r300_copy_dma, 341 .copy_dma = &r200_copy_dma,
259 .copy = &r100_copy_blit, 342 .copy = &r100_copy_blit,
260 .get_engine_clock = &radeon_legacy_get_engine_clock, 343 .get_engine_clock = &radeon_legacy_get_engine_clock,
261 .set_engine_clock = &radeon_legacy_set_engine_clock, 344 .set_engine_clock = &radeon_legacy_set_engine_clock,
262 .get_memory_clock = &radeon_legacy_get_memory_clock, 345 .get_memory_clock = &radeon_legacy_get_memory_clock,
263 .set_memory_clock = NULL, 346 .set_memory_clock = NULL,
347 .get_pcie_lanes = NULL,
264 .set_pcie_lanes = NULL, 348 .set_pcie_lanes = NULL,
265 .set_clock_gating = &radeon_legacy_set_clock_gating, 349 .set_clock_gating = &radeon_legacy_set_clock_gating,
266 .set_surface_reg = r100_set_surface_reg, 350 .set_surface_reg = r100_set_surface_reg,
@@ -314,14 +398,17 @@ static struct radeon_asic rs600_asic = {
314 .fence_ring_emit = &r300_fence_ring_emit, 398 .fence_ring_emit = &r300_fence_ring_emit,
315 .cs_parse = &r300_cs_parse, 399 .cs_parse = &r300_cs_parse,
316 .copy_blit = &r100_copy_blit, 400 .copy_blit = &r100_copy_blit,
317 .copy_dma = &r300_copy_dma, 401 .copy_dma = &r200_copy_dma,
318 .copy = &r100_copy_blit, 402 .copy = &r100_copy_blit,
319 .get_engine_clock = &radeon_atom_get_engine_clock, 403 .get_engine_clock = &radeon_atom_get_engine_clock,
320 .set_engine_clock = &radeon_atom_set_engine_clock, 404 .set_engine_clock = &radeon_atom_set_engine_clock,
321 .get_memory_clock = &radeon_atom_get_memory_clock, 405 .get_memory_clock = &radeon_atom_get_memory_clock,
322 .set_memory_clock = &radeon_atom_set_memory_clock, 406 .set_memory_clock = &radeon_atom_set_memory_clock,
407 .get_pcie_lanes = NULL,
323 .set_pcie_lanes = NULL, 408 .set_pcie_lanes = NULL,
324 .set_clock_gating = &radeon_atom_set_clock_gating, 409 .set_clock_gating = &radeon_atom_set_clock_gating,
410 .set_surface_reg = r100_set_surface_reg,
411 .clear_surface_reg = r100_clear_surface_reg,
325 .bandwidth_update = &rs600_bandwidth_update, 412 .bandwidth_update = &rs600_bandwidth_update,
326 .hpd_init = &rs600_hpd_init, 413 .hpd_init = &rs600_hpd_init,
327 .hpd_fini = &rs600_hpd_fini, 414 .hpd_fini = &rs600_hpd_fini,
@@ -360,12 +447,13 @@ static struct radeon_asic rs690_asic = {
360 .fence_ring_emit = &r300_fence_ring_emit, 447 .fence_ring_emit = &r300_fence_ring_emit,
361 .cs_parse = &r300_cs_parse, 448 .cs_parse = &r300_cs_parse,
362 .copy_blit = &r100_copy_blit, 449 .copy_blit = &r100_copy_blit,
363 .copy_dma = &r300_copy_dma, 450 .copy_dma = &r200_copy_dma,
364 .copy = &r300_copy_dma, 451 .copy = &r200_copy_dma,
365 .get_engine_clock = &radeon_atom_get_engine_clock, 452 .get_engine_clock = &radeon_atom_get_engine_clock,
366 .set_engine_clock = &radeon_atom_set_engine_clock, 453 .set_engine_clock = &radeon_atom_set_engine_clock,
367 .get_memory_clock = &radeon_atom_get_memory_clock, 454 .get_memory_clock = &radeon_atom_get_memory_clock,
368 .set_memory_clock = &radeon_atom_set_memory_clock, 455 .set_memory_clock = &radeon_atom_set_memory_clock,
456 .get_pcie_lanes = NULL,
369 .set_pcie_lanes = NULL, 457 .set_pcie_lanes = NULL,
370 .set_clock_gating = &radeon_atom_set_clock_gating, 458 .set_clock_gating = &radeon_atom_set_clock_gating,
371 .set_surface_reg = r100_set_surface_reg, 459 .set_surface_reg = r100_set_surface_reg,
@@ -412,12 +500,13 @@ static struct radeon_asic rv515_asic = {
412 .fence_ring_emit = &r300_fence_ring_emit, 500 .fence_ring_emit = &r300_fence_ring_emit,
413 .cs_parse = &r300_cs_parse, 501 .cs_parse = &r300_cs_parse,
414 .copy_blit = &r100_copy_blit, 502 .copy_blit = &r100_copy_blit,
415 .copy_dma = &r300_copy_dma, 503 .copy_dma = &r200_copy_dma,
416 .copy = &r100_copy_blit, 504 .copy = &r100_copy_blit,
417 .get_engine_clock = &radeon_atom_get_engine_clock, 505 .get_engine_clock = &radeon_atom_get_engine_clock,
418 .set_engine_clock = &radeon_atom_set_engine_clock, 506 .set_engine_clock = &radeon_atom_set_engine_clock,
419 .get_memory_clock = &radeon_atom_get_memory_clock, 507 .get_memory_clock = &radeon_atom_get_memory_clock,
420 .set_memory_clock = &radeon_atom_set_memory_clock, 508 .set_memory_clock = &radeon_atom_set_memory_clock,
509 .get_pcie_lanes = &rv370_get_pcie_lanes,
421 .set_pcie_lanes = &rv370_set_pcie_lanes, 510 .set_pcie_lanes = &rv370_set_pcie_lanes,
422 .set_clock_gating = &radeon_atom_set_clock_gating, 511 .set_clock_gating = &radeon_atom_set_clock_gating,
423 .set_surface_reg = r100_set_surface_reg, 512 .set_surface_reg = r100_set_surface_reg,
@@ -455,12 +544,13 @@ static struct radeon_asic r520_asic = {
455 .fence_ring_emit = &r300_fence_ring_emit, 544 .fence_ring_emit = &r300_fence_ring_emit,
456 .cs_parse = &r300_cs_parse, 545 .cs_parse = &r300_cs_parse,
457 .copy_blit = &r100_copy_blit, 546 .copy_blit = &r100_copy_blit,
458 .copy_dma = &r300_copy_dma, 547 .copy_dma = &r200_copy_dma,
459 .copy = &r100_copy_blit, 548 .copy = &r100_copy_blit,
460 .get_engine_clock = &radeon_atom_get_engine_clock, 549 .get_engine_clock = &radeon_atom_get_engine_clock,
461 .set_engine_clock = &radeon_atom_set_engine_clock, 550 .set_engine_clock = &radeon_atom_set_engine_clock,
462 .get_memory_clock = &radeon_atom_get_memory_clock, 551 .get_memory_clock = &radeon_atom_get_memory_clock,
463 .set_memory_clock = &radeon_atom_set_memory_clock, 552 .set_memory_clock = &radeon_atom_set_memory_clock,
553 .get_pcie_lanes = &rv370_get_pcie_lanes,
464 .set_pcie_lanes = &rv370_set_pcie_lanes, 554 .set_pcie_lanes = &rv370_set_pcie_lanes,
465 .set_clock_gating = &radeon_atom_set_clock_gating, 555 .set_clock_gating = &radeon_atom_set_clock_gating,
466 .set_surface_reg = r100_set_surface_reg, 556 .set_surface_reg = r100_set_surface_reg,
@@ -538,8 +628,9 @@ static struct radeon_asic r600_asic = {
538 .set_engine_clock = &radeon_atom_set_engine_clock, 628 .set_engine_clock = &radeon_atom_set_engine_clock,
539 .get_memory_clock = &radeon_atom_get_memory_clock, 629 .get_memory_clock = &radeon_atom_get_memory_clock,
540 .set_memory_clock = &radeon_atom_set_memory_clock, 630 .set_memory_clock = &radeon_atom_set_memory_clock,
631 .get_pcie_lanes = &rv370_get_pcie_lanes,
541 .set_pcie_lanes = NULL, 632 .set_pcie_lanes = NULL,
542 .set_clock_gating = &radeon_atom_set_clock_gating, 633 .set_clock_gating = NULL,
543 .set_surface_reg = r600_set_surface_reg, 634 .set_surface_reg = r600_set_surface_reg,
544 .clear_surface_reg = r600_clear_surface_reg, 635 .clear_surface_reg = r600_clear_surface_reg,
545 .bandwidth_update = &rv515_bandwidth_update, 636 .bandwidth_update = &rv515_bandwidth_update,
@@ -583,6 +674,7 @@ static struct radeon_asic rv770_asic = {
583 .set_engine_clock = &radeon_atom_set_engine_clock, 674 .set_engine_clock = &radeon_atom_set_engine_clock,
584 .get_memory_clock = &radeon_atom_get_memory_clock, 675 .get_memory_clock = &radeon_atom_get_memory_clock,
585 .set_memory_clock = &radeon_atom_set_memory_clock, 676 .set_memory_clock = &radeon_atom_set_memory_clock,
677 .get_pcie_lanes = &rv370_get_pcie_lanes,
586 .set_pcie_lanes = NULL, 678 .set_pcie_lanes = NULL,
587 .set_clock_gating = &radeon_atom_set_clock_gating, 679 .set_clock_gating = &radeon_atom_set_clock_gating,
588 .set_surface_reg = r600_set_surface_reg, 680 .set_surface_reg = r600_set_surface_reg,
@@ -595,4 +687,54 @@ static struct radeon_asic rv770_asic = {
595 .ioctl_wait_idle = r600_ioctl_wait_idle, 687 .ioctl_wait_idle = r600_ioctl_wait_idle,
596}; 688};
597 689
690/*
691 * evergreen
692 */
693int evergreen_init(struct radeon_device *rdev);
694void evergreen_fini(struct radeon_device *rdev);
695int evergreen_suspend(struct radeon_device *rdev);
696int evergreen_resume(struct radeon_device *rdev);
697int evergreen_gpu_reset(struct radeon_device *rdev);
698void evergreen_bandwidth_update(struct radeon_device *rdev);
699void evergreen_hpd_init(struct radeon_device *rdev);
700void evergreen_hpd_fini(struct radeon_device *rdev);
701bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
702void evergreen_hpd_set_polarity(struct radeon_device *rdev,
703 enum radeon_hpd_id hpd);
704
705static struct radeon_asic evergreen_asic = {
706 .init = &evergreen_init,
707 .fini = &evergreen_fini,
708 .suspend = &evergreen_suspend,
709 .resume = &evergreen_resume,
710 .cp_commit = NULL,
711 .gpu_reset = &evergreen_gpu_reset,
712 .vga_set_state = &r600_vga_set_state,
713 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
714 .gart_set_page = &rs600_gart_set_page,
715 .ring_test = NULL,
716 .ring_ib_execute = NULL,
717 .irq_set = NULL,
718 .irq_process = NULL,
719 .get_vblank_counter = NULL,
720 .fence_ring_emit = NULL,
721 .cs_parse = NULL,
722 .copy_blit = NULL,
723 .copy_dma = NULL,
724 .copy = NULL,
725 .get_engine_clock = &radeon_atom_get_engine_clock,
726 .set_engine_clock = &radeon_atom_set_engine_clock,
727 .get_memory_clock = &radeon_atom_get_memory_clock,
728 .set_memory_clock = &radeon_atom_set_memory_clock,
729 .set_pcie_lanes = NULL,
730 .set_clock_gating = NULL,
731 .set_surface_reg = r600_set_surface_reg,
732 .clear_surface_reg = r600_clear_surface_reg,
733 .bandwidth_update = &evergreen_bandwidth_update,
734 .hpd_init = &evergreen_hpd_init,
735 .hpd_fini = &evergreen_hpd_fini,
736 .hpd_sense = &evergreen_hpd_sense,
737 .hpd_set_polarity = &evergreen_hpd_set_polarity,
738};
739
598#endif 740#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 4d8831548a5f..93783b15c81d 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -159,8 +159,15 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device
159 struct radeon_gpio_rec *gpio) 159 struct radeon_gpio_rec *gpio)
160{ 160{
161 struct radeon_hpd hpd; 161 struct radeon_hpd hpd;
162 u32 reg;
163
164 if (ASIC_IS_DCE4(rdev))
165 reg = EVERGREEN_DC_GPIO_HPD_A;
166 else
167 reg = AVIVO_DC_GPIO_HPD_A;
168
162 hpd.gpio = *gpio; 169 hpd.gpio = *gpio;
163 if (gpio->reg == AVIVO_DC_GPIO_HPD_A) { 170 if (gpio->reg == reg) {
164 switch(gpio->mask) { 171 switch(gpio->mask) {
165 case (1 << 0): 172 case (1 << 0):
166 hpd.hpd = RADEON_HPD_1; 173 hpd.hpd = RADEON_HPD_1;
@@ -574,6 +581,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
574 ddc_bus.valid = false; 581 ddc_bus.valid = false;
575 } 582 }
576 583
584 /* needed for aux chan transactions */
585 ddc_bus.hpd_id = hpd.hpd ? (hpd.hpd - 1) : 0;
586
577 conn_id = le16_to_cpu(path->usConnObjectId); 587 conn_id = le16_to_cpu(path->usConnObjectId);
578 588
579 if (!radeon_atom_apply_quirks 589 if (!radeon_atom_apply_quirks
@@ -838,6 +848,7 @@ union firmware_info {
838 ATOM_FIRMWARE_INFO_V1_2 info_12; 848 ATOM_FIRMWARE_INFO_V1_2 info_12;
839 ATOM_FIRMWARE_INFO_V1_3 info_13; 849 ATOM_FIRMWARE_INFO_V1_3 info_13;
840 ATOM_FIRMWARE_INFO_V1_4 info_14; 850 ATOM_FIRMWARE_INFO_V1_4 info_14;
851 ATOM_FIRMWARE_INFO_V2_1 info_21;
841}; 852};
842 853
843bool radeon_atom_get_clock_info(struct drm_device *dev) 854bool radeon_atom_get_clock_info(struct drm_device *dev)
@@ -849,6 +860,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
849 uint8_t frev, crev; 860 uint8_t frev, crev;
850 struct radeon_pll *p1pll = &rdev->clock.p1pll; 861 struct radeon_pll *p1pll = &rdev->clock.p1pll;
851 struct radeon_pll *p2pll = &rdev->clock.p2pll; 862 struct radeon_pll *p2pll = &rdev->clock.p2pll;
863 struct radeon_pll *dcpll = &rdev->clock.dcpll;
852 struct radeon_pll *spll = &rdev->clock.spll; 864 struct radeon_pll *spll = &rdev->clock.spll;
853 struct radeon_pll *mpll = &rdev->clock.mpll; 865 struct radeon_pll *mpll = &rdev->clock.mpll;
854 uint16_t data_offset; 866 uint16_t data_offset;
@@ -951,8 +963,19 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
951 rdev->clock.default_mclk = 963 rdev->clock.default_mclk =
952 le32_to_cpu(firmware_info->info.ulDefaultMemoryClock); 964 le32_to_cpu(firmware_info->info.ulDefaultMemoryClock);
953 965
966 if (ASIC_IS_DCE4(rdev)) {
967 rdev->clock.default_dispclk =
968 le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
969 if (rdev->clock.default_dispclk == 0)
970 rdev->clock.default_dispclk = 60000; /* 600 Mhz */
971 rdev->clock.dp_extclk =
972 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
973 }
974 *dcpll = *p1pll;
975
954 return true; 976 return true;
955 } 977 }
978
956 return false; 979 return false;
957} 980}
958 981
@@ -1091,6 +1114,30 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
1091 return ss; 1114 return ss;
1092} 1115}
1093 1116
1117static void radeon_atom_apply_lvds_quirks(struct drm_device *dev,
1118 struct radeon_encoder_atom_dig *lvds)
1119{
1120
1121 /* Toshiba A300-1BU laptop panel doesn't like new pll divider algo */
1122 if ((dev->pdev->device == 0x95c4) &&
1123 (dev->pdev->subsystem_vendor == 0x1179) &&
1124 (dev->pdev->subsystem_device == 0xff50)) {
1125 if ((lvds->native_mode.hdisplay == 1280) &&
1126 (lvds->native_mode.vdisplay == 800))
1127 lvds->pll_algo = PLL_ALGO_LEGACY;
1128 }
1129
1130 /* Dell Studio 15 laptop panel doesn't like new pll divider algo */
1131 if ((dev->pdev->device == 0x95c4) &&
1132 (dev->pdev->subsystem_vendor == 0x1028) &&
1133 (dev->pdev->subsystem_device == 0x029f)) {
1134 if ((lvds->native_mode.hdisplay == 1280) &&
1135 (lvds->native_mode.vdisplay == 800))
1136 lvds->pll_algo = PLL_ALGO_LEGACY;
1137 }
1138
1139}
1140
1094union lvds_info { 1141union lvds_info {
1095 struct _ATOM_LVDS_INFO info; 1142 struct _ATOM_LVDS_INFO info;
1096 struct _ATOM_LVDS_INFO_V12 info_12; 1143 struct _ATOM_LVDS_INFO_V12 info_12;
@@ -1161,6 +1208,21 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1161 1208
1162 lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id); 1209 lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id);
1163 1210
1211 if (ASIC_IS_AVIVO(rdev)) {
1212 if (radeon_new_pll == 0)
1213 lvds->pll_algo = PLL_ALGO_LEGACY;
1214 else
1215 lvds->pll_algo = PLL_ALGO_NEW;
1216 } else {
1217 if (radeon_new_pll == 1)
1218 lvds->pll_algo = PLL_ALGO_NEW;
1219 else
1220 lvds->pll_algo = PLL_ALGO_LEGACY;
1221 }
1222
1223 /* LVDS quirks */
1224 radeon_atom_apply_lvds_quirks(dev, lvds);
1225
1164 encoder->native_mode = lvds->native_mode; 1226 encoder->native_mode = lvds->native_mode;
1165 } 1227 }
1166 return lvds; 1228 return lvds;
@@ -1385,20 +1447,375 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
1385 return tv_dac; 1447 return tv_dac;
1386} 1448}
1387 1449
1388void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) 1450union power_info {
1451 struct _ATOM_POWERPLAY_INFO info;
1452 struct _ATOM_POWERPLAY_INFO_V2 info_2;
1453 struct _ATOM_POWERPLAY_INFO_V3 info_3;
1454 struct _ATOM_PPLIB_POWERPLAYTABLE info_4;
1455};
1456
1457void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1389{ 1458{
1390 DYNAMIC_CLOCK_GATING_PS_ALLOCATION args; 1459 struct radeon_mode_info *mode_info = &rdev->mode_info;
1391 int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating); 1460 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
1461 u16 data_offset;
1462 u8 frev, crev;
1463 u32 misc, misc2 = 0, sclk, mclk;
1464 union power_info *power_info;
1465 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
1466 struct _ATOM_PPLIB_STATE *power_state;
1467 int num_modes = 0, i, j;
1468 int state_index = 0, mode_index = 0;
1392 1469
1393 args.ucEnable = enable; 1470 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
1394 1471
1395 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1472 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
1473
1474 rdev->pm.default_power_state = NULL;
1475
1476 if (power_info) {
1477 if (frev < 4) {
1478 num_modes = power_info->info.ucNumOfPowerModeEntries;
1479 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
1480 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
1481 for (i = 0; i < num_modes; i++) {
1482 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
1483 switch (frev) {
1484 case 1:
1485 rdev->pm.power_state[state_index].num_clock_modes = 1;
1486 rdev->pm.power_state[state_index].clock_info[0].mclk =
1487 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
1488 rdev->pm.power_state[state_index].clock_info[0].sclk =
1489 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
1490 /* skip invalid modes */
1491 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
1492 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
1493 continue;
1494 /* skip overclock modes for now */
1495 if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
1496 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
1497 (rdev->pm.power_state[state_index].clock_info[0].sclk >
1498 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
1499 continue;
1500 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
1501 power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
1502 misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
1503 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
1504 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1505 VOLTAGE_GPIO;
1506 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
1507 radeon_lookup_gpio(rdev,
1508 power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
1509 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
1510 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1511 true;
1512 else
1513 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1514 false;
1515 } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
1516 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1517 VOLTAGE_VDDC;
1518 rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
1519 power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
1520 }
1521 /* order matters! */
1522 if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
1523 rdev->pm.power_state[state_index].type =
1524 POWER_STATE_TYPE_POWERSAVE;
1525 if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
1526 rdev->pm.power_state[state_index].type =
1527 POWER_STATE_TYPE_BATTERY;
1528 if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
1529 rdev->pm.power_state[state_index].type =
1530 POWER_STATE_TYPE_BATTERY;
1531 if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
1532 rdev->pm.power_state[state_index].type =
1533 POWER_STATE_TYPE_BALANCED;
1534 if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
1535 rdev->pm.power_state[state_index].type =
1536 POWER_STATE_TYPE_PERFORMANCE;
1537 if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
1538 rdev->pm.power_state[state_index].type =
1539 POWER_STATE_TYPE_DEFAULT;
1540 rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
1541 rdev->pm.power_state[state_index].default_clock_mode =
1542 &rdev->pm.power_state[state_index].clock_info[0];
1543 }
1544 state_index++;
1545 break;
1546 case 2:
1547 rdev->pm.power_state[state_index].num_clock_modes = 1;
1548 rdev->pm.power_state[state_index].clock_info[0].mclk =
1549 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
1550 rdev->pm.power_state[state_index].clock_info[0].sclk =
1551 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
1552 /* skip invalid modes */
1553 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
1554 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
1555 continue;
1556 /* skip overclock modes for now */
1557 if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
1558 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
1559 (rdev->pm.power_state[state_index].clock_info[0].sclk >
1560 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
1561 continue;
1562 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
1563 power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
1564 misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
1565 misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
1566 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
1567 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1568 VOLTAGE_GPIO;
1569 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
1570 radeon_lookup_gpio(rdev,
1571 power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
1572 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
1573 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1574 true;
1575 else
1576 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1577 false;
1578 } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
1579 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1580 VOLTAGE_VDDC;
1581 rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
1582 power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
1583 }
1584 /* order matters! */
1585 if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
1586 rdev->pm.power_state[state_index].type =
1587 POWER_STATE_TYPE_POWERSAVE;
1588 if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
1589 rdev->pm.power_state[state_index].type =
1590 POWER_STATE_TYPE_BATTERY;
1591 if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
1592 rdev->pm.power_state[state_index].type =
1593 POWER_STATE_TYPE_BATTERY;
1594 if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
1595 rdev->pm.power_state[state_index].type =
1596 POWER_STATE_TYPE_BALANCED;
1597 if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
1598 rdev->pm.power_state[state_index].type =
1599 POWER_STATE_TYPE_PERFORMANCE;
1600 if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
1601 rdev->pm.power_state[state_index].type =
1602 POWER_STATE_TYPE_BALANCED;
1603 if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
1604 rdev->pm.power_state[state_index].type =
1605 POWER_STATE_TYPE_DEFAULT;
1606 rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
1607 rdev->pm.power_state[state_index].default_clock_mode =
1608 &rdev->pm.power_state[state_index].clock_info[0];
1609 }
1610 state_index++;
1611 break;
1612 case 3:
1613 rdev->pm.power_state[state_index].num_clock_modes = 1;
1614 rdev->pm.power_state[state_index].clock_info[0].mclk =
1615 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
1616 rdev->pm.power_state[state_index].clock_info[0].sclk =
1617 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
1618 /* skip invalid modes */
1619 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
1620 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
1621 continue;
1622 /* skip overclock modes for now */
1623 if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
1624 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
1625 (rdev->pm.power_state[state_index].clock_info[0].sclk >
1626 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
1627 continue;
1628 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
1629 power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
1630 misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
1631 misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
1632 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
1633 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1634 VOLTAGE_GPIO;
1635 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
1636 radeon_lookup_gpio(rdev,
1637 power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
1638 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
1639 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1640 true;
1641 else
1642 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1643 false;
1644 } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
1645 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1646 VOLTAGE_VDDC;
1647 rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
1648 power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
1649 if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
1650 rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
1651 true;
1652 rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
1653 power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
1654 }
1655 }
1656 /* order matters! */
1657 if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
1658 rdev->pm.power_state[state_index].type =
1659 POWER_STATE_TYPE_POWERSAVE;
1660 if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
1661 rdev->pm.power_state[state_index].type =
1662 POWER_STATE_TYPE_BATTERY;
1663 if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
1664 rdev->pm.power_state[state_index].type =
1665 POWER_STATE_TYPE_BATTERY;
1666 if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
1667 rdev->pm.power_state[state_index].type =
1668 POWER_STATE_TYPE_BALANCED;
1669 if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
1670 rdev->pm.power_state[state_index].type =
1671 POWER_STATE_TYPE_PERFORMANCE;
1672 if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
1673 rdev->pm.power_state[state_index].type =
1674 POWER_STATE_TYPE_BALANCED;
1675 if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
1676 rdev->pm.power_state[state_index].type =
1677 POWER_STATE_TYPE_DEFAULT;
1678 rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
1679 rdev->pm.power_state[state_index].default_clock_mode =
1680 &rdev->pm.power_state[state_index].clock_info[0];
1681 }
1682 state_index++;
1683 break;
1684 }
1685 }
1686 } else if (frev == 4) {
1687 for (i = 0; i < power_info->info_4.ucNumStates; i++) {
1688 mode_index = 0;
1689 power_state = (struct _ATOM_PPLIB_STATE *)
1690 (mode_info->atom_context->bios +
1691 data_offset +
1692 le16_to_cpu(power_info->info_4.usStateArrayOffset) +
1693 i * power_info->info_4.ucStateEntrySize);
1694 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
1695 (mode_info->atom_context->bios +
1696 data_offset +
1697 le16_to_cpu(power_info->info_4.usNonClockInfoArrayOffset) +
1698 (power_state->ucNonClockStateIndex *
1699 power_info->info_4.ucNonClockSize));
1700 for (j = 0; j < (power_info->info_4.ucStateEntrySize - 1); j++) {
1701 if (rdev->flags & RADEON_IS_IGP) {
1702 struct _ATOM_PPLIB_RS780_CLOCK_INFO *clock_info =
1703 (struct _ATOM_PPLIB_RS780_CLOCK_INFO *)
1704 (mode_info->atom_context->bios +
1705 data_offset +
1706 le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
1707 (power_state->ucClockStateIndices[j] *
1708 power_info->info_4.ucClockInfoSize));
1709 sclk = le16_to_cpu(clock_info->usLowEngineClockLow);
1710 sclk |= clock_info->ucLowEngineClockHigh << 16;
1711 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
1712 /* skip invalid modes */
1713 if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
1714 continue;
1715 /* skip overclock modes for now */
1716 if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
1717 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)
1718 continue;
1719 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
1720 VOLTAGE_SW;
1721 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
1722 clock_info->usVDDC;
1723 mode_index++;
1724 } else {
1725 struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
1726 (struct _ATOM_PPLIB_R600_CLOCK_INFO *)
1727 (mode_info->atom_context->bios +
1728 data_offset +
1729 le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
1730 (power_state->ucClockStateIndices[j] *
1731 power_info->info_4.ucClockInfoSize));
1732 sclk = le16_to_cpu(clock_info->usEngineClockLow);
1733 sclk |= clock_info->ucEngineClockHigh << 16;
1734 mclk = le16_to_cpu(clock_info->usMemoryClockLow);
1735 mclk |= clock_info->ucMemoryClockHigh << 16;
1736 rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
1737 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
1738 /* skip invalid modes */
1739 if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
1740 (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
1741 continue;
1742 /* skip overclock modes for now */
1743 if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk >
1744 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
1745 (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
1746 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
1747 continue;
1748 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
1749 VOLTAGE_SW;
1750 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
1751 clock_info->usVDDC;
1752 mode_index++;
1753 }
1754 }
1755 rdev->pm.power_state[state_index].num_clock_modes = mode_index;
1756 if (mode_index) {
1757 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
1758 misc2 = le16_to_cpu(non_clock_info->usClassification);
1759 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
1760 ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
1761 ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
1762 switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
1763 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
1764 rdev->pm.power_state[state_index].type =
1765 POWER_STATE_TYPE_BATTERY;
1766 break;
1767 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
1768 rdev->pm.power_state[state_index].type =
1769 POWER_STATE_TYPE_BALANCED;
1770 break;
1771 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
1772 rdev->pm.power_state[state_index].type =
1773 POWER_STATE_TYPE_PERFORMANCE;
1774 break;
1775 }
1776 if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
1777 rdev->pm.power_state[state_index].type =
1778 POWER_STATE_TYPE_DEFAULT;
1779 rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
1780 rdev->pm.power_state[state_index].default_clock_mode =
1781 &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
1782 }
1783 state_index++;
1784 }
1785 }
1786 }
1787 } else {
1788 /* XXX figure out some good default low power mode for cards w/out power tables */
1789 }
1790
1791 if (rdev->pm.default_power_state == NULL) {
1792 /* add the default mode */
1793 rdev->pm.power_state[state_index].type =
1794 POWER_STATE_TYPE_DEFAULT;
1795 rdev->pm.power_state[state_index].num_clock_modes = 1;
1796 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
1797 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
1798 rdev->pm.power_state[state_index].default_clock_mode =
1799 &rdev->pm.power_state[state_index].clock_info[0];
1800 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
1801 if (rdev->asic->get_pcie_lanes)
1802 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
1803 else
1804 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
1805 rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
1806 state_index++;
1807 }
1808 rdev->pm.num_power_states = state_index;
1809
1810 rdev->pm.current_power_state = rdev->pm.default_power_state;
1811 rdev->pm.current_clock_mode =
1812 rdev->pm.default_power_state->default_clock_mode;
1396} 1813}
1397 1814
1398void radeon_atom_static_pwrmgt_setup(struct radeon_device *rdev, int enable) 1815void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
1399{ 1816{
1400 ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION args; 1817 DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
1401 int index = GetIndexIntoMasterTable(COMMAND, EnableASIC_StaticPwrMgt); 1818 int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
1402 1819
1403 args.ucEnable = enable; 1820 args.ucEnable = enable;
1404 1821
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
new file mode 100644
index 000000000000..3f557c4151e0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -0,0 +1,257 @@
1/*
2 * Copyright (c) 2010 Red Hat Inc.
3 * Author : Dave Airlie <airlied@redhat.com>
4 *
5 * Licensed under GPLv2
6 *
7 * ATPX support for both Intel/ATI
8 */
9#include <linux/vga_switcheroo.h>
10#include <acpi/acpi.h>
11#include <acpi/acpi_bus.h>
12#include <linux/pci.h>
13
14#define ATPX_VERSION 0
15#define ATPX_GPU_PWR 2
16#define ATPX_MUX_SELECT 3
17
18#define ATPX_INTEGRATED 0
19#define ATPX_DISCRETE 1
20
21#define ATPX_MUX_IGD 0
22#define ATPX_MUX_DISCRETE 1
23
24static struct radeon_atpx_priv {
25 bool atpx_detected;
26 /* handle for device - and atpx */
27 acpi_handle dhandle;
28 acpi_handle atpx_handle;
29 acpi_handle atrm_handle;
30} radeon_atpx_priv;
31
32/* retrieve the ROM in 4k blocks */
33static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
34 int offset, int len)
35{
36 acpi_status status;
37 union acpi_object atrm_arg_elements[2], *obj;
38 struct acpi_object_list atrm_arg;
39 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
40
41 atrm_arg.count = 2;
42 atrm_arg.pointer = &atrm_arg_elements[0];
43
44 atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
45 atrm_arg_elements[0].integer.value = offset;
46
47 atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
48 atrm_arg_elements[1].integer.value = len;
49
50 status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
51 if (ACPI_FAILURE(status)) {
52 printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
53 return -ENODEV;
54 }
55
56 obj = (union acpi_object *)buffer.pointer;
57 memcpy(bios+offset, obj->buffer.pointer, len);
58 kfree(buffer.pointer);
59 return len;
60}
61
62bool radeon_atrm_supported(struct pci_dev *pdev)
63{
64 /* get the discrete ROM only via ATRM */
65 if (!radeon_atpx_priv.atpx_detected)
66 return false;
67
68 if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
69 return false;
70 return true;
71}
72
73
74int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
75{
76 return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len);
77}
78
79static int radeon_atpx_get_version(acpi_handle handle)
80{
81 acpi_status status;
82 union acpi_object atpx_arg_elements[2], *obj;
83 struct acpi_object_list atpx_arg;
84 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
85
86 atpx_arg.count = 2;
87 atpx_arg.pointer = &atpx_arg_elements[0];
88
89 atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
90 atpx_arg_elements[0].integer.value = ATPX_VERSION;
91
92 atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
93 atpx_arg_elements[1].integer.value = ATPX_VERSION;
94
95 status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
96 if (ACPI_FAILURE(status)) {
97 printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
98 return -ENOSYS;
99 }
100 obj = (union acpi_object *)buffer.pointer;
101 if (obj && (obj->type == ACPI_TYPE_BUFFER))
102 printk(KERN_INFO "radeon atpx: version is %d\n", *((u8 *)(obj->buffer.pointer) + 2));
103 kfree(buffer.pointer);
104 return 0;
105}
106
107static int radeon_atpx_execute(acpi_handle handle, int cmd_id, u16 value)
108{
109 acpi_status status;
110 union acpi_object atpx_arg_elements[2];
111 struct acpi_object_list atpx_arg;
112 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
113 uint8_t buf[4] = {0};
114
115 if (!handle)
116 return -EINVAL;
117
118 atpx_arg.count = 2;
119 atpx_arg.pointer = &atpx_arg_elements[0];
120
121 atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
122 atpx_arg_elements[0].integer.value = cmd_id;
123
124 buf[2] = value & 0xff;
125 buf[3] = (value >> 8) & 0xff;
126
127 atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
128 atpx_arg_elements[1].buffer.length = 4;
129 atpx_arg_elements[1].buffer.pointer = buf;
130
131 status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
132 if (ACPI_FAILURE(status)) {
133 printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
134 return -ENOSYS;
135 }
136 kfree(buffer.pointer);
137
138 return 0;
139}
140
141static int radeon_atpx_set_discrete_state(acpi_handle handle, int state)
142{
143 return radeon_atpx_execute(handle, ATPX_GPU_PWR, state);
144}
145
146static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id)
147{
148 return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id);
149}
150
151
152static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
153{
154 if (id == VGA_SWITCHEROO_IGD)
155 radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 0);
156 else
157 radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 1);
158 return 0;
159}
160
161static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
162 enum vga_switcheroo_state state)
163{
164 /* on w500 ACPI can't change intel gpu state */
165 if (id == VGA_SWITCHEROO_IGD)
166 return 0;
167
168 radeon_atpx_set_discrete_state(radeon_atpx_priv.atpx_handle, state);
169 return 0;
170}
171
172static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
173{
174 acpi_handle dhandle, atpx_handle, atrm_handle;
175 acpi_status status;
176
177 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
178 if (!dhandle)
179 return false;
180
181 status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
182 if (ACPI_FAILURE(status))
183 return false;
184
185 status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
186 if (ACPI_FAILURE(status))
187 return false;
188
189 radeon_atpx_priv.dhandle = dhandle;
190 radeon_atpx_priv.atpx_handle = atpx_handle;
191 radeon_atpx_priv.atrm_handle = atrm_handle;
192 return true;
193}
194
195static int radeon_atpx_init(void)
196{
197 /* set up the ATPX handle */
198
199 radeon_atpx_get_version(radeon_atpx_priv.atpx_handle);
200 return 0;
201}
202
203static int radeon_atpx_get_client_id(struct pci_dev *pdev)
204{
205 if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
206 return VGA_SWITCHEROO_IGD;
207 else
208 return VGA_SWITCHEROO_DIS;
209}
210
211static struct vga_switcheroo_handler radeon_atpx_handler = {
212 .switchto = radeon_atpx_switchto,
213 .power_state = radeon_atpx_power_state,
214 .init = radeon_atpx_init,
215 .get_client_id = radeon_atpx_get_client_id,
216};
217
218static bool radeon_atpx_detect(void)
219{
220 char acpi_method_name[255] = { 0 };
221 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
222 struct pci_dev *pdev = NULL;
223 bool has_atpx = false;
224 int vga_count = 0;
225
226 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
227 vga_count++;
228
229 has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
230 }
231
232 if (has_atpx && vga_count == 2) {
233 acpi_get_name(radeon_atpx_priv.atpx_handle, ACPI_FULL_PATHNAME, &buffer);
234 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
235 acpi_method_name);
236 radeon_atpx_priv.atpx_detected = true;
237 return true;
238 }
239 return false;
240}
241
242void radeon_register_atpx_handler(void)
243{
244 bool r;
245
246 /* detect if we have any ATPX + 2 VGA in the system */
247 r = radeon_atpx_detect();
248 if (!r)
249 return;
250
251 vga_switcheroo_register_handler(&radeon_atpx_handler);
252}
253
254void radeon_unregister_atpx_handler(void)
255{
256 vga_switcheroo_unregister_handler();
257}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 906921740c60..557240460526 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -30,6 +30,7 @@
30#include "radeon.h" 30#include "radeon.h"
31#include "atom.h" 31#include "atom.h"
32 32
33#include <linux/vga_switcheroo.h>
33/* 34/*
34 * BIOS. 35 * BIOS.
35 */ 36 */
@@ -62,7 +63,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
62 iounmap(bios); 63 iounmap(bios);
63 return false; 64 return false;
64 } 65 }
65 memcpy(rdev->bios, bios, size); 66 memcpy_fromio(rdev->bios, bios, size);
66 iounmap(bios); 67 iounmap(bios);
67 return true; 68 return true;
68} 69}
@@ -93,6 +94,38 @@ static bool radeon_read_bios(struct radeon_device *rdev)
93 return true; 94 return true;
94} 95}
95 96
97/* ATRM is used to get the BIOS on the discrete cards in
98 * dual-gpu systems.
99 */
100static bool radeon_atrm_get_bios(struct radeon_device *rdev)
101{
102 int ret;
103 int size = 64 * 1024;
104 int i;
105
106 if (!radeon_atrm_supported(rdev->pdev))
107 return false;
108
109 rdev->bios = kmalloc(size, GFP_KERNEL);
110 if (!rdev->bios) {
111 DRM_ERROR("Unable to allocate bios\n");
112 return false;
113 }
114
115 for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
116 ret = radeon_atrm_get_bios_chunk(rdev->bios,
117 (i * ATRM_BIOS_PAGE),
118 ATRM_BIOS_PAGE);
119 if (ret <= 0)
120 break;
121 }
122
123 if (i == 0 || rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
124 kfree(rdev->bios);
125 return false;
126 }
127 return true;
128}
96static bool r700_read_disabled_bios(struct radeon_device *rdev) 129static bool r700_read_disabled_bios(struct radeon_device *rdev)
97{ 130{
98 uint32_t viph_control; 131 uint32_t viph_control;
@@ -388,16 +421,16 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
388 return legacy_read_disabled_bios(rdev); 421 return legacy_read_disabled_bios(rdev);
389} 422}
390 423
424
391bool radeon_get_bios(struct radeon_device *rdev) 425bool radeon_get_bios(struct radeon_device *rdev)
392{ 426{
393 bool r; 427 bool r;
394 uint16_t tmp; 428 uint16_t tmp;
395 429
396 if (rdev->flags & RADEON_IS_IGP) { 430 r = radeon_atrm_get_bios(rdev);
431 if (r == false)
397 r = igp_read_bios_from_vram(rdev); 432 r = igp_read_bios_from_vram(rdev);
398 if (r == false) 433 if (r == false)
399 r = radeon_read_bios(rdev);
400 } else
401 r = radeon_read_bios(rdev); 434 r = radeon_read_bios(rdev);
402 if (r == false) { 435 if (r == false) {
403 r = radeon_read_disabled_bios(rdev); 436 r = radeon_read_disabled_bios(rdev);
@@ -408,6 +441,13 @@ bool radeon_get_bios(struct radeon_device *rdev)
408 return false; 441 return false;
409 } 442 }
410 if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) { 443 if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
444 printk("BIOS signature incorrect %x %x\n", rdev->bios[0], rdev->bios[1]);
445 goto free_bios;
446 }
447
448 tmp = RBIOS16(0x18);
449 if (RBIOS8(tmp + 0x14) != 0x0) {
450 DRM_INFO("Not an x86 BIOS ROM, not using.\n");
411 goto free_bios; 451 goto free_bios;
412 } 452 }
413 453
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 73c4405bf42f..f64936cc4dd9 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -96,6 +96,7 @@ void radeon_get_clock_info(struct drm_device *dev)
96 struct radeon_device *rdev = dev->dev_private; 96 struct radeon_device *rdev = dev->dev_private;
97 struct radeon_pll *p1pll = &rdev->clock.p1pll; 97 struct radeon_pll *p1pll = &rdev->clock.p1pll;
98 struct radeon_pll *p2pll = &rdev->clock.p2pll; 98 struct radeon_pll *p2pll = &rdev->clock.p2pll;
99 struct radeon_pll *dcpll = &rdev->clock.dcpll;
99 struct radeon_pll *spll = &rdev->clock.spll; 100 struct radeon_pll *spll = &rdev->clock.spll;
100 struct radeon_pll *mpll = &rdev->clock.mpll; 101 struct radeon_pll *mpll = &rdev->clock.mpll;
101 int ret; 102 int ret;
@@ -204,6 +205,17 @@ void radeon_get_clock_info(struct drm_device *dev)
204 p2pll->max_frac_feedback_div = 0; 205 p2pll->max_frac_feedback_div = 0;
205 } 206 }
206 207
208 /* dcpll is DCE4 only */
209 dcpll->min_post_div = 2;
210 dcpll->max_post_div = 0x7f;
211 dcpll->min_frac_feedback_div = 0;
212 dcpll->max_frac_feedback_div = 9;
213 dcpll->min_ref_div = 2;
214 dcpll->max_ref_div = 0x3ff;
215 dcpll->min_feedback_div = 4;
216 dcpll->max_feedback_div = 0xfff;
217 dcpll->best_vco = 0;
218
207 p1pll->min_ref_div = 2; 219 p1pll->min_ref_div = 2;
208 p1pll->max_ref_div = 0x3ff; 220 p1pll->max_ref_div = 0x3ff;
209 p1pll->min_feedback_div = 4; 221 p1pll->min_feedback_div = 4;
@@ -846,8 +858,10 @@ int radeon_static_clocks_init(struct drm_device *dev)
846 /* XXX make sure engine is idle */ 858 /* XXX make sure engine is idle */
847 859
848 if (radeon_dynclks != -1) { 860 if (radeon_dynclks != -1) {
849 if (radeon_dynclks) 861 if (radeon_dynclks) {
850 radeon_set_clock_gating(rdev, 1); 862 if (rdev->asic->set_clock_gating)
863 radeon_set_clock_gating(rdev, 1);
864 }
851 } 865 }
852 radeon_apply_clock_quirks(rdev); 866 radeon_apply_clock_quirks(rdev);
853 return 0; 867 return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 22d476160d52..e9ea38ece375 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -150,6 +150,9 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
150 int rev; 150 int rev;
151 uint16_t offset = 0, check_offset; 151 uint16_t offset = 0, check_offset;
152 152
153 if (!rdev->bios)
154 return 0;
155
153 switch (table) { 156 switch (table) {
154 /* absolute offset tables */ 157 /* absolute offset tables */
155 case COMBIOS_ASIC_INIT_1_TABLE: 158 case COMBIOS_ASIC_INIT_1_TABLE:
@@ -443,6 +446,39 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
443 446
444} 447}
445 448
449bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
450{
451 int edid_info;
452 struct edid *edid;
453 edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
454 if (!edid_info)
455 return false;
456
457 edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
458 GFP_KERNEL);
459 if (edid == NULL)
460 return false;
461
462 memcpy((unsigned char *)edid,
463 (unsigned char *)(rdev->bios + edid_info), EDID_LENGTH);
464
465 if (!drm_edid_is_valid(edid)) {
466 kfree(edid);
467 return false;
468 }
469
470 rdev->mode_info.bios_hardcoded_edid = edid;
471 return true;
472}
473
474struct edid *
475radeon_combios_get_hardcoded_edid(struct radeon_device *rdev)
476{
477 if (rdev->mode_info.bios_hardcoded_edid)
478 return rdev->mode_info.bios_hardcoded_edid;
479 return NULL;
480}
481
446static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev, 482static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
447 int ddc_line) 483 int ddc_line)
448{ 484{
@@ -486,9 +522,65 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
486 i2c.y_data_reg = ddc_line; 522 i2c.y_data_reg = ddc_line;
487 } 523 }
488 524
489 if (rdev->family < CHIP_R200) 525 switch (rdev->family) {
490 i2c.hw_capable = false; 526 case CHIP_R100:
491 else { 527 case CHIP_RV100:
528 case CHIP_RS100:
529 case CHIP_RV200:
530 case CHIP_RS200:
531 case CHIP_RS300:
532 switch (ddc_line) {
533 case RADEON_GPIO_DVI_DDC:
534 /* in theory this should be hw capable,
535 * but it doesn't seem to work
536 */
537 i2c.hw_capable = false;
538 break;
539 default:
540 i2c.hw_capable = false;
541 break;
542 }
543 break;
544 case CHIP_R200:
545 switch (ddc_line) {
546 case RADEON_GPIO_DVI_DDC:
547 case RADEON_GPIO_MONID:
548 i2c.hw_capable = true;
549 break;
550 default:
551 i2c.hw_capable = false;
552 break;
553 }
554 break;
555 case CHIP_RV250:
556 case CHIP_RV280:
557 switch (ddc_line) {
558 case RADEON_GPIO_VGA_DDC:
559 case RADEON_GPIO_DVI_DDC:
560 case RADEON_GPIO_CRT2_DDC:
561 i2c.hw_capable = true;
562 break;
563 default:
564 i2c.hw_capable = false;
565 break;
566 }
567 break;
568 case CHIP_R300:
569 case CHIP_R350:
570 switch (ddc_line) {
571 case RADEON_GPIO_VGA_DDC:
572 case RADEON_GPIO_DVI_DDC:
573 i2c.hw_capable = true;
574 break;
575 default:
576 i2c.hw_capable = false;
577 break;
578 }
579 break;
580 case CHIP_RV350:
581 case CHIP_RV380:
582 case CHIP_RS400:
583 case CHIP_RS480:
492 switch (ddc_line) { 584 switch (ddc_line) {
493 case RADEON_GPIO_VGA_DDC: 585 case RADEON_GPIO_VGA_DDC:
494 case RADEON_GPIO_DVI_DDC: 586 case RADEON_GPIO_DVI_DDC:
@@ -504,9 +596,14 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
504 i2c.hw_capable = false; 596 i2c.hw_capable = false;
505 break; 597 break;
506 } 598 }
599 break;
600 default:
601 i2c.hw_capable = false;
602 break;
507 } 603 }
508 i2c.mm_i2c = false; 604 i2c.mm_i2c = false;
509 i2c.i2c_id = 0; 605 i2c.i2c_id = 0;
606 i2c.hpd_id = 0;
510 607
511 if (ddc_line) 608 if (ddc_line)
512 i2c.valid = true; 609 i2c.valid = true;
@@ -527,9 +624,6 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
527 int8_t rev; 624 int8_t rev;
528 uint16_t sclk, mclk; 625 uint16_t sclk, mclk;
529 626
530 if (rdev->bios == NULL)
531 return false;
532
533 pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE); 627 pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
534 if (pll_info) { 628 if (pll_info) {
535 rev = RBIOS8(pll_info); 629 rev = RBIOS8(pll_info);
@@ -654,9 +748,6 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
654 if (!p_dac) 748 if (!p_dac)
655 return NULL; 749 return NULL;
656 750
657 if (rdev->bios == NULL)
658 goto out;
659
660 /* check CRT table */ 751 /* check CRT table */
661 dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); 752 dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
662 if (dac_info) { 753 if (dac_info) {
@@ -673,7 +764,6 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
673 found = 1; 764 found = 1;
674 } 765 }
675 766
676out:
677 if (!found) /* fallback to defaults */ 767 if (!found) /* fallback to defaults */
678 radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac); 768 radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
679 769
@@ -687,9 +777,6 @@ radeon_combios_get_tv_info(struct radeon_device *rdev)
687 uint16_t tv_info; 777 uint16_t tv_info;
688 enum radeon_tv_std tv_std = TV_STD_NTSC; 778 enum radeon_tv_std tv_std = TV_STD_NTSC;
689 779
690 if (rdev->bios == NULL)
691 return tv_std;
692
693 tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); 780 tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
694 if (tv_info) { 781 if (tv_info) {
695 if (RBIOS8(tv_info + 6) == 'T') { 782 if (RBIOS8(tv_info + 6) == 'T') {
@@ -793,9 +880,6 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
793 if (!tv_dac) 880 if (!tv_dac)
794 return NULL; 881 return NULL;
795 882
796 if (rdev->bios == NULL)
797 goto out;
798
799 /* first check TV table */ 883 /* first check TV table */
800 dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); 884 dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
801 if (dac_info) { 885 if (dac_info) {
@@ -857,7 +941,6 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
857 } 941 }
858 } 942 }
859 943
860out:
861 if (!found) /* fallback to defaults */ 944 if (!found) /* fallback to defaults */
862 radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac); 945 radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac);
863 946
@@ -945,11 +1028,6 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
945 int tmp, i; 1028 int tmp, i;
946 struct radeon_encoder_lvds *lvds = NULL; 1029 struct radeon_encoder_lvds *lvds = NULL;
947 1030
948 if (rdev->bios == NULL) {
949 lvds = radeon_legacy_get_lvds_info_from_regs(rdev);
950 goto out;
951 }
952
953 lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE); 1031 lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
954 1032
955 if (lcd_info) { 1033 if (lcd_info) {
@@ -1050,7 +1128,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
1050 DRM_INFO("No panel info found in BIOS\n"); 1128 DRM_INFO("No panel info found in BIOS\n");
1051 lvds = radeon_legacy_get_lvds_info_from_regs(rdev); 1129 lvds = radeon_legacy_get_lvds_info_from_regs(rdev);
1052 } 1130 }
1053out: 1131
1054 if (lvds) 1132 if (lvds)
1055 encoder->native_mode = lvds->native_mode; 1133 encoder->native_mode = lvds->native_mode;
1056 return lvds; 1134 return lvds;
@@ -1102,9 +1180,6 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
1102 int i, n; 1180 int i, n;
1103 uint8_t ver; 1181 uint8_t ver;
1104 1182
1105 if (rdev->bios == NULL)
1106 return false;
1107
1108 tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE); 1183 tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
1109 1184
1110 if (tmds_info) { 1185 if (tmds_info) {
@@ -1184,9 +1259,6 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
1184 enum radeon_combios_ddc gpio; 1259 enum radeon_combios_ddc gpio;
1185 struct radeon_i2c_bus_rec i2c_bus; 1260 struct radeon_i2c_bus_rec i2c_bus;
1186 1261
1187 if (rdev->bios == NULL)
1188 return false;
1189
1190 tmds->i2c_bus = NULL; 1262 tmds->i2c_bus = NULL;
1191 if (rdev->flags & RADEON_IS_IGP) { 1263 if (rdev->flags & RADEON_IS_IGP) {
1192 offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE); 1264 offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
@@ -1253,7 +1325,10 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
1253 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); 1325 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
1254 break; 1326 break;
1255 case DDC_LCD: /* MM i2c */ 1327 case DDC_LCD: /* MM i2c */
1256 DRM_ERROR("MM i2c requires hw i2c engine\n"); 1328 i2c_bus.valid = true;
1329 i2c_bus.hw_capable = true;
1330 i2c_bus.mm_i2c = true;
1331 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
1257 break; 1332 break;
1258 default: 1333 default:
1259 DRM_ERROR("Unsupported gpio %d\n", gpio); 1334 DRM_ERROR("Unsupported gpio %d\n", gpio);
@@ -1909,9 +1984,6 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1909 struct radeon_i2c_bus_rec ddc_i2c; 1984 struct radeon_i2c_bus_rec ddc_i2c;
1910 struct radeon_hpd hpd; 1985 struct radeon_hpd hpd;
1911 1986
1912 if (rdev->bios == NULL)
1913 return false;
1914
1915 conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE); 1987 conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE);
1916 if (conn_info) { 1988 if (conn_info) {
1917 for (i = 0; i < 4; i++) { 1989 for (i = 0; i < 4; i++) {
@@ -2278,6 +2350,115 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2278 return true; 2350 return true;
2279} 2351}
2280 2352
2353void radeon_combios_get_power_modes(struct radeon_device *rdev)
2354{
2355 struct drm_device *dev = rdev->ddev;
2356 u16 offset, misc, misc2 = 0;
2357 u8 rev, blocks, tmp;
2358 int state_index = 0;
2359
2360 rdev->pm.default_power_state = NULL;
2361
2362 if (rdev->flags & RADEON_IS_MOBILITY) {
2363 offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
2364 if (offset) {
2365 rev = RBIOS8(offset);
2366 blocks = RBIOS8(offset + 0x2);
2367 /* power mode 0 tends to be the only valid one */
2368 rdev->pm.power_state[state_index].num_clock_modes = 1;
2369 rdev->pm.power_state[state_index].clock_info[0].mclk = RBIOS32(offset + 0x5 + 0x2);
2370 rdev->pm.power_state[state_index].clock_info[0].sclk = RBIOS32(offset + 0x5 + 0x6);
2371 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
2372 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
2373 goto default_mode;
2374 /* skip overclock modes for now */
2375 if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
2376 rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
2377 (rdev->pm.power_state[state_index].clock_info[0].sclk >
2378 rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
2379 goto default_mode;
2380 rdev->pm.power_state[state_index].type =
2381 POWER_STATE_TYPE_BATTERY;
2382 misc = RBIOS16(offset + 0x5 + 0x0);
2383 if (rev > 4)
2384 misc2 = RBIOS16(offset + 0x5 + 0xe);
2385 if (misc & 0x4) {
2386 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO;
2387 if (misc & 0x8)
2388 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
2389 true;
2390 else
2391 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
2392 false;
2393 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = true;
2394 if (rev < 6) {
2395 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
2396 RBIOS16(offset + 0x5 + 0xb) * 4;
2397 tmp = RBIOS8(offset + 0x5 + 0xd);
2398 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
2399 } else {
2400 u8 entries = RBIOS8(offset + 0x5 + 0xb);
2401 u16 voltage_table_offset = RBIOS16(offset + 0x5 + 0xc);
2402 if (entries && voltage_table_offset) {
2403 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
2404 RBIOS16(voltage_table_offset) * 4;
2405 tmp = RBIOS8(voltage_table_offset + 0x2);
2406 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
2407 } else
2408 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = false;
2409 }
2410 switch ((misc2 & 0x700) >> 8) {
2411 case 0:
2412 default:
2413 rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 0;
2414 break;
2415 case 1:
2416 rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 33;
2417 break;
2418 case 2:
2419 rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 66;
2420 break;
2421 case 3:
2422 rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 99;
2423 break;
2424 case 4:
2425 rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 132;
2426 break;
2427 }
2428 } else
2429 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2430 if (rev > 6)
2431 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
2432 RBIOS8(offset + 0x5 + 0x10);
2433 state_index++;
2434 } else {
2435 /* XXX figure out some good default low power mode for mobility cards w/out power tables */
2436 }
2437 } else {
2438 /* XXX figure out some good default low power mode for desktop cards */
2439 }
2440
2441default_mode:
2442 /* add the default mode */
2443 rdev->pm.power_state[state_index].type =
2444 POWER_STATE_TYPE_DEFAULT;
2445 rdev->pm.power_state[state_index].num_clock_modes = 1;
2446 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
2447 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
2448 rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
2449 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2450 if (rdev->asic->get_pcie_lanes)
2451 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
2452 else
2453 rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
2454 rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
2455 rdev->pm.num_power_states = state_index + 1;
2456
2457 rdev->pm.current_power_state = rdev->pm.default_power_state;
2458 rdev->pm.current_clock_mode =
2459 rdev->pm.default_power_state->default_clock_mode;
2460}
2461
2281void radeon_external_tmds_setup(struct drm_encoder *encoder) 2462void radeon_external_tmds_setup(struct drm_encoder *encoder)
2282{ 2463{
2283 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 2464 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -2289,23 +2470,21 @@ void radeon_external_tmds_setup(struct drm_encoder *encoder)
2289 switch (tmds->dvo_chip) { 2470 switch (tmds->dvo_chip) {
2290 case DVO_SIL164: 2471 case DVO_SIL164:
2291 /* sil 164 */ 2472 /* sil 164 */
2292 radeon_i2c_do_lock(tmds->i2c_bus, 1); 2473 radeon_i2c_put_byte(tmds->i2c_bus,
2293 radeon_i2c_sw_put_byte(tmds->i2c_bus, 2474 tmds->slave_addr,
2294 tmds->slave_addr, 2475 0x08, 0x30);
2295 0x08, 0x30); 2476 radeon_i2c_put_byte(tmds->i2c_bus,
2296 radeon_i2c_sw_put_byte(tmds->i2c_bus,
2297 tmds->slave_addr, 2477 tmds->slave_addr,
2298 0x09, 0x00); 2478 0x09, 0x00);
2299 radeon_i2c_sw_put_byte(tmds->i2c_bus, 2479 radeon_i2c_put_byte(tmds->i2c_bus,
2300 tmds->slave_addr, 2480 tmds->slave_addr,
2301 0x0a, 0x90); 2481 0x0a, 0x90);
2302 radeon_i2c_sw_put_byte(tmds->i2c_bus, 2482 radeon_i2c_put_byte(tmds->i2c_bus,
2303 tmds->slave_addr, 2483 tmds->slave_addr,
2304 0x0c, 0x89); 2484 0x0c, 0x89);
2305 radeon_i2c_sw_put_byte(tmds->i2c_bus, 2485 radeon_i2c_put_byte(tmds->i2c_bus,
2306 tmds->slave_addr, 2486 tmds->slave_addr,
2307 0x08, 0x3b); 2487 0x08, 0x3b);
2308 radeon_i2c_do_lock(tmds->i2c_bus, 0);
2309 break; 2488 break;
2310 case DVO_SIL1178: 2489 case DVO_SIL1178:
2311 /* sil 1178 - untested */ 2490 /* sil 1178 - untested */
@@ -2338,9 +2517,6 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
2338 uint32_t reg, val, and_mask, or_mask; 2517 uint32_t reg, val, and_mask, or_mask;
2339 struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; 2518 struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
2340 2519
2341 if (rdev->bios == NULL)
2342 return false;
2343
2344 if (!tmds) 2520 if (!tmds)
2345 return false; 2521 return false;
2346 2522
@@ -2390,11 +2566,9 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
2390 index++; 2566 index++;
2391 val = RBIOS8(index); 2567 val = RBIOS8(index);
2392 index++; 2568 index++;
2393 radeon_i2c_do_lock(tmds->i2c_bus, 1); 2569 radeon_i2c_put_byte(tmds->i2c_bus,
2394 radeon_i2c_sw_put_byte(tmds->i2c_bus, 2570 slave_addr,
2395 slave_addr, 2571 reg, val);
2396 reg, val);
2397 radeon_i2c_do_lock(tmds->i2c_bus, 0);
2398 break; 2572 break;
2399 default: 2573 default:
2400 DRM_ERROR("Unknown id %d\n", id >> 13); 2574 DRM_ERROR("Unknown id %d\n", id >> 13);
@@ -2447,11 +2621,9 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
2447 reg = id & 0x1fff; 2621 reg = id & 0x1fff;
2448 val = RBIOS8(index); 2622 val = RBIOS8(index);
2449 index += 1; 2623 index += 1;
2450 radeon_i2c_do_lock(tmds->i2c_bus, 1); 2624 radeon_i2c_put_byte(tmds->i2c_bus,
2451 radeon_i2c_sw_put_byte(tmds->i2c_bus, 2625 tmds->slave_addr,
2452 tmds->slave_addr, 2626 reg, val);
2453 reg, val);
2454 radeon_i2c_do_lock(tmds->i2c_bus, 0);
2455 break; 2627 break;
2456 default: 2628 default:
2457 DRM_ERROR("Unknown id %d\n", id >> 13); 2629 DRM_ERROR("Unknown id %d\n", id >> 13);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 65f81942f399..ee0083f982d8 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -479,10 +479,8 @@ static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connec
479 ret = connector_status_connected; 479 ret = connector_status_connected;
480 else { 480 else {
481 if (radeon_connector->ddc_bus) { 481 if (radeon_connector->ddc_bus) {
482 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
483 radeon_connector->edid = drm_get_edid(&radeon_connector->base, 482 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
484 &radeon_connector->ddc_bus->adapter); 483 &radeon_connector->ddc_bus->adapter);
485 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
486 if (radeon_connector->edid) 484 if (radeon_connector->edid)
487 ret = connector_status_connected; 485 ret = connector_status_connected;
488 } 486 }
@@ -587,19 +585,14 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
587 if (!encoder) 585 if (!encoder)
588 ret = connector_status_disconnected; 586 ret = connector_status_disconnected;
589 587
590 if (radeon_connector->ddc_bus) { 588 if (radeon_connector->ddc_bus)
591 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
592 dret = radeon_ddc_probe(radeon_connector); 589 dret = radeon_ddc_probe(radeon_connector);
593 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
594 }
595 if (dret) { 590 if (dret) {
596 if (radeon_connector->edid) { 591 if (radeon_connector->edid) {
597 kfree(radeon_connector->edid); 592 kfree(radeon_connector->edid);
598 radeon_connector->edid = NULL; 593 radeon_connector->edid = NULL;
599 } 594 }
600 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
601 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 595 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
602 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
603 596
604 if (!radeon_connector->edid) { 597 if (!radeon_connector->edid) {
605 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", 598 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -744,19 +737,14 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
744 enum drm_connector_status ret = connector_status_disconnected; 737 enum drm_connector_status ret = connector_status_disconnected;
745 bool dret = false; 738 bool dret = false;
746 739
747 if (radeon_connector->ddc_bus) { 740 if (radeon_connector->ddc_bus)
748 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
749 dret = radeon_ddc_probe(radeon_connector); 741 dret = radeon_ddc_probe(radeon_connector);
750 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
751 }
752 if (dret) { 742 if (dret) {
753 if (radeon_connector->edid) { 743 if (radeon_connector->edid) {
754 kfree(radeon_connector->edid); 744 kfree(radeon_connector->edid);
755 radeon_connector->edid = NULL; 745 radeon_connector->edid = NULL;
756 } 746 }
757 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
758 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 747 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
759 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
760 748
761 if (!radeon_connector->edid) { 749 if (!radeon_connector->edid) {
762 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", 750 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -952,7 +940,7 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector)
952 if (radeon_connector->edid) 940 if (radeon_connector->edid)
953 kfree(radeon_connector->edid); 941 kfree(radeon_connector->edid);
954 if (radeon_dig_connector->dp_i2c_bus) 942 if (radeon_dig_connector->dp_i2c_bus)
955 radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus); 943 radeon_i2c_destroy_dp(radeon_dig_connector->dp_i2c_bus);
956 kfree(radeon_connector->con_priv); 944 kfree(radeon_connector->con_priv);
957 drm_sysfs_connector_remove(connector); 945 drm_sysfs_connector_remove(connector);
958 drm_connector_cleanup(connector); 946 drm_connector_cleanup(connector);
@@ -988,12 +976,10 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
988 ret = connector_status_connected; 976 ret = connector_status_connected;
989 } 977 }
990 } else { 978 } else {
991 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
992 if (radeon_ddc_probe(radeon_connector)) { 979 if (radeon_ddc_probe(radeon_connector)) {
993 radeon_dig_connector->dp_sink_type = sink_type; 980 radeon_dig_connector->dp_sink_type = sink_type;
994 ret = connector_status_connected; 981 ret = connector_status_connected;
995 } 982 }
996 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
997 } 983 }
998 984
999 return ret; 985 return ret;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 06123ba31d31..dc6eba6b96dd 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1644,6 +1644,7 @@ static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_pri
1644 radeon_cp_load_microcode(dev_priv); 1644 radeon_cp_load_microcode(dev_priv);
1645 radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); 1645 radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
1646 1646
1647 dev_priv->have_z_offset = 0;
1647 radeon_do_engine_reset(dev); 1648 radeon_do_engine_reset(dev);
1648 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); 1649 radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
1649 1650
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index e9d085021c1f..70ba02ed7723 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -194,11 +194,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
194 } 194 }
195 radeon_bo_list_unreserve(&parser->validated); 195 radeon_bo_list_unreserve(&parser->validated);
196 for (i = 0; i < parser->nrelocs; i++) { 196 for (i = 0; i < parser->nrelocs; i++) {
197 if (parser->relocs[i].gobj) { 197 if (parser->relocs[i].gobj)
198 mutex_lock(&parser->rdev->ddev->struct_mutex); 198 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
199 drm_gem_object_unreference(parser->relocs[i].gobj);
200 mutex_unlock(&parser->rdev->ddev->struct_mutex);
201 }
202 } 199 }
203 kfree(parser->track); 200 kfree(parser->track);
204 kfree(parser->relocs); 201 kfree(parser->relocs);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 28772a37009c..b7023fff89eb 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -36,7 +36,14 @@ static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
36 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 36 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
37 uint32_t cur_lock; 37 uint32_t cur_lock;
38 38
39 if (ASIC_IS_AVIVO(rdev)) { 39 if (ASIC_IS_DCE4(rdev)) {
40 cur_lock = RREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset);
41 if (lock)
42 cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
43 else
44 cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
45 WREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
46 } else if (ASIC_IS_AVIVO(rdev)) {
40 cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset); 47 cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
41 if (lock) 48 if (lock)
42 cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK; 49 cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
@@ -58,7 +65,10 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
58 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 65 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
59 struct radeon_device *rdev = crtc->dev->dev_private; 66 struct radeon_device *rdev = crtc->dev->dev_private;
60 67
61 if (ASIC_IS_AVIVO(rdev)) { 68 if (ASIC_IS_DCE4(rdev)) {
69 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
70 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
71 } else if (ASIC_IS_AVIVO(rdev)) {
62 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); 72 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
63 WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); 73 WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
64 } else { 74 } else {
@@ -81,10 +91,14 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
81 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 91 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
82 struct radeon_device *rdev = crtc->dev->dev_private; 92 struct radeon_device *rdev = crtc->dev->dev_private;
83 93
84 if (ASIC_IS_AVIVO(rdev)) { 94 if (ASIC_IS_DCE4(rdev)) {
95 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
96 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
97 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
98 } else if (ASIC_IS_AVIVO(rdev)) {
85 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); 99 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
86 WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | 100 WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
87 (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); 101 (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
88 } else { 102 } else {
89 switch (radeon_crtc->crtc_id) { 103 switch (radeon_crtc->crtc_id) {
90 case 0: 104 case 0:
@@ -109,7 +123,10 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
109 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 123 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
110 struct radeon_device *rdev = crtc->dev->dev_private; 124 struct radeon_device *rdev = crtc->dev->dev_private;
111 125
112 if (ASIC_IS_AVIVO(rdev)) { 126 if (ASIC_IS_DCE4(rdev)) {
127 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 0);
128 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
129 } else if (ASIC_IS_AVIVO(rdev)) {
113 if (rdev->family >= CHIP_RV770) { 130 if (rdev->family >= CHIP_RV770) {
114 if (radeon_crtc->crtc_id) 131 if (radeon_crtc->crtc_id)
115 WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0); 132 WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0);
@@ -169,17 +186,13 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
169unpin: 186unpin:
170 if (radeon_crtc->cursor_bo) { 187 if (radeon_crtc->cursor_bo) {
171 radeon_gem_object_unpin(radeon_crtc->cursor_bo); 188 radeon_gem_object_unpin(radeon_crtc->cursor_bo);
172 mutex_lock(&crtc->dev->struct_mutex); 189 drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
173 drm_gem_object_unreference(radeon_crtc->cursor_bo);
174 mutex_unlock(&crtc->dev->struct_mutex);
175 } 190 }
176 191
177 radeon_crtc->cursor_bo = obj; 192 radeon_crtc->cursor_bo = obj;
178 return 0; 193 return 0;
179fail: 194fail:
180 mutex_lock(&crtc->dev->struct_mutex); 195 drm_gem_object_unreference_unlocked(obj);
181 drm_gem_object_unreference(obj);
182 mutex_unlock(&crtc->dev->struct_mutex);
183 196
184 return 0; 197 return 0;
185} 198}
@@ -201,7 +214,20 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
201 yorigin = CURSOR_HEIGHT - 1; 214 yorigin = CURSOR_HEIGHT - 1;
202 215
203 radeon_lock_cursor(crtc, true); 216 radeon_lock_cursor(crtc, true);
204 if (ASIC_IS_AVIVO(rdev)) { 217 if (ASIC_IS_DCE4(rdev)) {
218 /* cursors are offset into the total surface */
219 x += crtc->x;
220 y += crtc->y;
221 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
222
223 /* XXX: check if evergreen has the same issues as avivo chips */
224 WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
225 ((xorigin ? 0 : x) << 16) |
226 (yorigin ? 0 : y));
227 WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
228 WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
229 ((radeon_crtc->cursor_width - 1) << 16) | (radeon_crtc->cursor_height - 1));
230 } else if (ASIC_IS_AVIVO(rdev)) {
205 int w = radeon_crtc->cursor_width; 231 int w = radeon_crtc->cursor_width;
206 int i = 0; 232 int i = 0;
207 struct drm_crtc *crtc_p; 233 struct drm_crtc *crtc_p;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 768b1509fa03..e28e4ed5f720 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -30,6 +30,7 @@
30#include <drm/drm_crtc_helper.h> 30#include <drm/drm_crtc_helper.h>
31#include <drm/radeon_drm.h> 31#include <drm/radeon_drm.h>
32#include <linux/vgaarb.h> 32#include <linux/vgaarb.h>
33#include <linux/vga_switcheroo.h>
33#include "radeon_reg.h" 34#include "radeon_reg.h"
34#include "radeon.h" 35#include "radeon.h"
35#include "radeon_asic.h" 36#include "radeon_asic.h"
@@ -100,80 +101,103 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
100 } 101 }
101} 102}
102 103
103/* 104/**
104 * MC common functions 105 * radeon_vram_location - try to find VRAM location
106 * @rdev: radeon device structure holding all necessary informations
107 * @mc: memory controller structure holding memory informations
108 * @base: base address at which to put VRAM
109 *
110 * Function will place try to place VRAM at base address provided
111 * as parameter (which is so far either PCI aperture address or
112 * for IGP TOM base address).
113 *
114 * If there is not enough space to fit the unvisible VRAM in the 32bits
115 * address space then we limit the VRAM size to the aperture.
116 *
117 * If we are using AGP and if the AGP aperture doesn't allow us to have
118 * room for all the VRAM than we restrict the VRAM to the PCI aperture
119 * size and print a warning.
120 *
121 * This function will never fails, worst case are limiting VRAM.
122 *
123 * Note: GTT start, end, size should be initialized before calling this
124 * function on AGP platform.
125 *
126 * Note: We don't explictly enforce VRAM start to be aligned on VRAM size,
127 * this shouldn't be a problem as we are using the PCI aperture as a reference.
128 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
129 * not IGP.
130 *
131 * Note: we use mc_vram_size as on some board we need to program the mc to
132 * cover the whole aperture even if VRAM size is inferior to aperture size
133 * Novell bug 204882 + along with lots of ubuntu ones
134 *
135 * Note: when limiting vram it's safe to overwritte real_vram_size because
136 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
137 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
138 * ones)
139 *
140 * Note: IGP TOM addr should be the same as the aperture addr, we don't
141 * explicitly check for that thought.
142 *
143 * FIXME: when reducing VRAM size align new size on power of 2.
144 */
145void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
146{
147 mc->vram_start = base;
148 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
149 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
150 mc->real_vram_size = mc->aper_size;
151 mc->mc_vram_size = mc->aper_size;
152 }
153 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
154 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) {
155 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
156 mc->real_vram_size = mc->aper_size;
157 mc->mc_vram_size = mc->aper_size;
158 }
159 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
160 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
161 mc->mc_vram_size >> 20, mc->vram_start,
162 mc->vram_end, mc->real_vram_size >> 20);
163}
164
165/**
166 * radeon_gtt_location - try to find GTT location
167 * @rdev: radeon device structure holding all necessary informations
168 * @mc: memory controller structure holding memory informations
169 *
170 * Function will place try to place GTT before or after VRAM.
171 *
172 * If GTT size is bigger than space left then we ajust GTT size.
173 * Thus function will never fails.
174 *
175 * FIXME: when reducing GTT size align new size on power of 2.
105 */ 176 */
106int radeon_mc_setup(struct radeon_device *rdev) 177void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
107{ 178{
108 uint32_t tmp; 179 u64 size_af, size_bf;
109 180
110 /* Some chips have an "issue" with the memory controller, the 181 size_af = 0xFFFFFFFF - mc->vram_end;
111 * location must be aligned to the size. We just align it down, 182 size_bf = mc->vram_start;
112 * too bad if we walk over the top of system memory, we don't 183 if (size_bf > size_af) {
113 * use DMA without a remapped anyway. 184 if (mc->gtt_size > size_bf) {
114 * Affected chips are rv280, all r3xx, and all r4xx, but not IGP 185 dev_warn(rdev->dev, "limiting GTT\n");
115 */ 186 mc->gtt_size = size_bf;
116 /* FGLRX seems to setup like this, VRAM a 0, then GART.
117 */
118 /*
119 * Note: from R6xx the address space is 40bits but here we only
120 * use 32bits (still have to see a card which would exhaust 4G
121 * address space).
122 */
123 if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
124 /* vram location was already setup try to put gtt after
125 * if it fits */
126 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
127 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
128 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
129 rdev->mc.gtt_location = tmp;
130 } else {
131 if (rdev->mc.gtt_size >= rdev->mc.vram_location) {
132 printk(KERN_ERR "[drm] GTT too big to fit "
133 "before or after vram location.\n");
134 return -EINVAL;
135 }
136 rdev->mc.gtt_location = 0;
137 }
138 } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
139 /* gtt location was already setup try to put vram before
140 * if it fits */
141 if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) {
142 rdev->mc.vram_location = 0;
143 } else {
144 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
145 tmp += (rdev->mc.mc_vram_size - 1);
146 tmp &= ~(rdev->mc.mc_vram_size - 1);
147 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) {
148 rdev->mc.vram_location = tmp;
149 } else {
150 printk(KERN_ERR "[drm] vram too big to fit "
151 "before or after GTT location.\n");
152 return -EINVAL;
153 }
154 } 187 }
188 mc->gtt_start = mc->vram_start - mc->gtt_size;
155 } else { 189 } else {
156 rdev->mc.vram_location = 0; 190 if (mc->gtt_size > size_af) {
157 tmp = rdev->mc.mc_vram_size; 191 dev_warn(rdev->dev, "limiting GTT\n");
158 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); 192 mc->gtt_size = size_af;
159 rdev->mc.gtt_location = tmp; 193 }
160 } 194 mc->gtt_start = mc->vram_end + 1;
161 rdev->mc.vram_start = rdev->mc.vram_location; 195 }
162 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; 196 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
163 rdev->mc.gtt_start = rdev->mc.gtt_location; 197 dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
164 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 198 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
165 DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20));
166 DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
167 (unsigned)rdev->mc.vram_location,
168 (unsigned)(rdev->mc.vram_location + rdev->mc.mc_vram_size - 1));
169 DRM_INFO("radeon: GTT %uM\n", (unsigned)(rdev->mc.gtt_size >> 20));
170 DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
171 (unsigned)rdev->mc.gtt_location,
172 (unsigned)(rdev->mc.gtt_location + rdev->mc.gtt_size - 1));
173 return 0;
174} 199}
175 200
176
177/* 201/*
178 * GPU helpers function. 202 * GPU helpers function.
179 */ 203 */
@@ -182,7 +206,16 @@ bool radeon_card_posted(struct radeon_device *rdev)
182 uint32_t reg; 206 uint32_t reg;
183 207
184 /* first check CRTCs */ 208 /* first check CRTCs */
185 if (ASIC_IS_AVIVO(rdev)) { 209 if (ASIC_IS_DCE4(rdev)) {
210 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
211 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
212 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
213 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
214 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
215 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
216 if (reg & EVERGREEN_CRTC_MASTER_EN)
217 return true;
218 } else if (ASIC_IS_AVIVO(rdev)) {
186 reg = RREG32(AVIVO_D1CRTC_CONTROL) | 219 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
187 RREG32(AVIVO_D2CRTC_CONTROL); 220 RREG32(AVIVO_D2CRTC_CONTROL);
188 if (reg & AVIVO_CRTC_EN) { 221 if (reg & AVIVO_CRTC_EN) {
@@ -229,6 +262,8 @@ bool radeon_boot_test_post_card(struct radeon_device *rdev)
229 262
230int radeon_dummy_page_init(struct radeon_device *rdev) 263int radeon_dummy_page_init(struct radeon_device *rdev)
231{ 264{
265 if (rdev->dummy_page.page)
266 return 0;
232 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 267 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
233 if (rdev->dummy_page.page == NULL) 268 if (rdev->dummy_page.page == NULL)
234 return -ENOMEM; 269 return -ENOMEM;
@@ -310,7 +345,7 @@ void radeon_register_accessor_init(struct radeon_device *rdev)
310 rdev->mc_rreg = &rs600_mc_rreg; 345 rdev->mc_rreg = &rs600_mc_rreg;
311 rdev->mc_wreg = &rs600_mc_wreg; 346 rdev->mc_wreg = &rs600_mc_wreg;
312 } 347 }
313 if (rdev->family >= CHIP_R600) { 348 if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
314 rdev->pciep_rreg = &r600_pciep_rreg; 349 rdev->pciep_rreg = &r600_pciep_rreg;
315 rdev->pciep_wreg = &r600_pciep_wreg; 350 rdev->pciep_wreg = &r600_pciep_wreg;
316 } 351 }
@@ -329,21 +364,22 @@ int radeon_asic_init(struct radeon_device *rdev)
329 case CHIP_RS100: 364 case CHIP_RS100:
330 case CHIP_RV200: 365 case CHIP_RV200:
331 case CHIP_RS200: 366 case CHIP_RS200:
367 rdev->asic = &r100_asic;
368 break;
332 case CHIP_R200: 369 case CHIP_R200:
333 case CHIP_RV250: 370 case CHIP_RV250:
334 case CHIP_RS300: 371 case CHIP_RS300:
335 case CHIP_RV280: 372 case CHIP_RV280:
336 rdev->asic = &r100_asic; 373 rdev->asic = &r200_asic;
337 break; 374 break;
338 case CHIP_R300: 375 case CHIP_R300:
339 case CHIP_R350: 376 case CHIP_R350:
340 case CHIP_RV350: 377 case CHIP_RV350:
341 case CHIP_RV380: 378 case CHIP_RV380:
342 rdev->asic = &r300_asic; 379 if (rdev->flags & RADEON_IS_PCIE)
343 if (rdev->flags & RADEON_IS_PCIE) { 380 rdev->asic = &r300_asic_pcie;
344 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; 381 else
345 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; 382 rdev->asic = &r300_asic;
346 }
347 break; 383 break;
348 case CHIP_R420: 384 case CHIP_R420:
349 case CHIP_R423: 385 case CHIP_R423:
@@ -387,6 +423,13 @@ int radeon_asic_init(struct radeon_device *rdev)
387 case CHIP_RV740: 423 case CHIP_RV740:
388 rdev->asic = &rv770_asic; 424 rdev->asic = &rv770_asic;
389 break; 425 break;
426 case CHIP_CEDAR:
427 case CHIP_REDWOOD:
428 case CHIP_JUNIPER:
429 case CHIP_CYPRESS:
430 case CHIP_HEMLOCK:
431 rdev->asic = &evergreen_asic;
432 break;
390 default: 433 default:
391 /* FIXME: not supported yet */ 434 /* FIXME: not supported yet */
392 return -EINVAL; 435 return -EINVAL;
@@ -613,6 +656,36 @@ void radeon_check_arguments(struct radeon_device *rdev)
613 } 656 }
614} 657}
615 658
659static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
660{
661 struct drm_device *dev = pci_get_drvdata(pdev);
662 struct radeon_device *rdev = dev->dev_private;
663 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
664 if (state == VGA_SWITCHEROO_ON) {
665 printk(KERN_INFO "radeon: switched on\n");
666 /* don't suspend or resume card normally */
667 rdev->powered_down = false;
668 radeon_resume_kms(dev);
669 } else {
670 printk(KERN_INFO "radeon: switched off\n");
671 radeon_suspend_kms(dev, pmm);
672 /* don't suspend or resume card normally */
673 rdev->powered_down = true;
674 }
675}
676
677static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
678{
679 struct drm_device *dev = pci_get_drvdata(pdev);
680 bool can_switch;
681
682 spin_lock(&dev->count_lock);
683 can_switch = (dev->open_count == 0);
684 spin_unlock(&dev->count_lock);
685 return can_switch;
686}
687
688
616int radeon_device_init(struct radeon_device *rdev, 689int radeon_device_init(struct radeon_device *rdev,
617 struct drm_device *ddev, 690 struct drm_device *ddev,
618 struct pci_dev *pdev, 691 struct pci_dev *pdev,
@@ -638,11 +711,14 @@ int radeon_device_init(struct radeon_device *rdev,
638 mutex_init(&rdev->cs_mutex); 711 mutex_init(&rdev->cs_mutex);
639 mutex_init(&rdev->ib_pool.mutex); 712 mutex_init(&rdev->ib_pool.mutex);
640 mutex_init(&rdev->cp.mutex); 713 mutex_init(&rdev->cp.mutex);
714 mutex_init(&rdev->dc_hw_i2c_mutex);
641 if (rdev->family >= CHIP_R600) 715 if (rdev->family >= CHIP_R600)
642 spin_lock_init(&rdev->ih.lock); 716 spin_lock_init(&rdev->ih.lock);
643 mutex_init(&rdev->gem.mutex); 717 mutex_init(&rdev->gem.mutex);
718 mutex_init(&rdev->pm.mutex);
644 rwlock_init(&rdev->fence_drv.lock); 719 rwlock_init(&rdev->fence_drv.lock);
645 INIT_LIST_HEAD(&rdev->gem.objects); 720 INIT_LIST_HEAD(&rdev->gem.objects);
721 init_waitqueue_head(&rdev->irq.vblank_queue);
646 722
647 /* setup workqueue */ 723 /* setup workqueue */
648 rdev->wq = create_workqueue("radeon"); 724 rdev->wq = create_workqueue("radeon");
@@ -692,6 +768,9 @@ int radeon_device_init(struct radeon_device *rdev,
692 /* this will fail for cards that aren't VGA class devices, just 768 /* this will fail for cards that aren't VGA class devices, just
693 * ignore it */ 769 * ignore it */
694 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 770 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
771 vga_switcheroo_register_client(rdev->pdev,
772 radeon_switcheroo_set_state,
773 radeon_switcheroo_can_switch);
695 774
696 r = radeon_init(rdev); 775 r = radeon_init(rdev);
697 if (r) 776 if (r)
@@ -723,6 +802,7 @@ void radeon_device_fini(struct radeon_device *rdev)
723 rdev->shutdown = true; 802 rdev->shutdown = true;
724 radeon_fini(rdev); 803 radeon_fini(rdev);
725 destroy_workqueue(rdev->wq); 804 destroy_workqueue(rdev->wq);
805 vga_switcheroo_unregister_client(rdev->pdev);
726 vga_client_register(rdev->pdev, NULL, NULL, NULL); 806 vga_client_register(rdev->pdev, NULL, NULL, NULL);
727 iounmap(rdev->rmmio); 807 iounmap(rdev->rmmio);
728 rdev->rmmio = NULL; 808 rdev->rmmio = NULL;
@@ -746,6 +826,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
746 } 826 }
747 rdev = dev->dev_private; 827 rdev = dev->dev_private;
748 828
829 if (rdev->powered_down)
830 return 0;
749 /* unpin the front buffers */ 831 /* unpin the front buffers */
750 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 832 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
751 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 833 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
@@ -791,6 +873,9 @@ int radeon_resume_kms(struct drm_device *dev)
791{ 873{
792 struct radeon_device *rdev = dev->dev_private; 874 struct radeon_device *rdev = dev->dev_private;
793 875
876 if (rdev->powered_down)
877 return 0;
878
794 acquire_console_sem(); 879 acquire_console_sem();
795 pci_set_power_state(dev->pdev, PCI_D0); 880 pci_set_power_state(dev->pdev, PCI_D0);
796 pci_restore_state(dev->pdev); 881 pci_restore_state(dev->pdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 7e17a362b54b..ba8d806dcf39 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -68,6 +68,36 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
68 WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id); 68 WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
69} 69}
70 70
71static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
72{
73 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
74 struct drm_device *dev = crtc->dev;
75 struct radeon_device *rdev = dev->dev_private;
76 int i;
77
78 DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
79 WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
80
81 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
82 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
83 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
84
85 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
86 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
87 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
88
89 WREG32(EVERGREEN_DC_LUT_RW_MODE, radeon_crtc->crtc_id);
90 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK, 0x00000007);
91
92 WREG32(EVERGREEN_DC_LUT_RW_INDEX, 0);
93 for (i = 0; i < 256; i++) {
94 WREG32(EVERGREEN_DC_LUT_30_COLOR,
95 (radeon_crtc->lut_r[i] << 20) |
96 (radeon_crtc->lut_g[i] << 10) |
97 (radeon_crtc->lut_b[i] << 0));
98 }
99}
100
71static void legacy_crtc_load_lut(struct drm_crtc *crtc) 101static void legacy_crtc_load_lut(struct drm_crtc *crtc)
72{ 102{
73 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 103 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -100,7 +130,9 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc)
100 if (!crtc->enabled) 130 if (!crtc->enabled)
101 return; 131 return;
102 132
103 if (ASIC_IS_AVIVO(rdev)) 133 if (ASIC_IS_DCE4(rdev))
134 evergreen_crtc_load_lut(crtc);
135 else if (ASIC_IS_AVIVO(rdev))
104 avivo_crtc_load_lut(crtc); 136 avivo_crtc_load_lut(crtc);
105 else 137 else
106 legacy_crtc_load_lut(crtc); 138 legacy_crtc_load_lut(crtc);
@@ -361,6 +393,8 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
361 393
362int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) 394int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
363{ 395{
396 struct drm_device *dev = radeon_connector->base.dev;
397 struct radeon_device *rdev = dev->dev_private;
364 int ret = 0; 398 int ret = 0;
365 399
366 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 400 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
@@ -373,11 +407,11 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
373 if (!radeon_connector->ddc_bus) 407 if (!radeon_connector->ddc_bus)
374 return -1; 408 return -1;
375 if (!radeon_connector->edid) { 409 if (!radeon_connector->edid) {
376 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
377 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 410 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
378 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
379 } 411 }
380 412 /* some servers provide a hardcoded edid in rom for KVMs */
413 if (!radeon_connector->edid)
414 radeon_connector->edid = radeon_combios_get_hardcoded_edid(rdev);
381 if (radeon_connector->edid) { 415 if (radeon_connector->edid) {
382 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); 416 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
383 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); 417 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
@@ -395,9 +429,7 @@ static int radeon_ddc_dump(struct drm_connector *connector)
395 429
396 if (!radeon_connector->ddc_bus) 430 if (!radeon_connector->ddc_bus)
397 return -1; 431 return -1;
398 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
399 edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); 432 edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
400 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
401 if (edid) { 433 if (edid) {
402 kfree(edid); 434 kfree(edid);
403 } 435 }
@@ -414,13 +446,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d)
414 return n; 446 return n;
415} 447}
416 448
417void radeon_compute_pll(struct radeon_pll *pll, 449static void radeon_compute_pll_legacy(struct radeon_pll *pll,
418 uint64_t freq, 450 uint64_t freq,
419 uint32_t *dot_clock_p, 451 uint32_t *dot_clock_p,
420 uint32_t *fb_div_p, 452 uint32_t *fb_div_p,
421 uint32_t *frac_fb_div_p, 453 uint32_t *frac_fb_div_p,
422 uint32_t *ref_div_p, 454 uint32_t *ref_div_p,
423 uint32_t *post_div_p) 455 uint32_t *post_div_p)
424{ 456{
425 uint32_t min_ref_div = pll->min_ref_div; 457 uint32_t min_ref_div = pll->min_ref_div;
426 uint32_t max_ref_div = pll->max_ref_div; 458 uint32_t max_ref_div = pll->max_ref_div;
@@ -580,95 +612,194 @@ void radeon_compute_pll(struct radeon_pll *pll,
580 *post_div_p = best_post_div; 612 *post_div_p = best_post_div;
581} 613}
582 614
583void radeon_compute_pll_avivo(struct radeon_pll *pll, 615static bool
584 uint64_t freq, 616calc_fb_div(struct radeon_pll *pll,
585 uint32_t *dot_clock_p, 617 uint32_t freq,
586 uint32_t *fb_div_p, 618 uint32_t post_div,
587 uint32_t *frac_fb_div_p, 619 uint32_t ref_div,
588 uint32_t *ref_div_p, 620 uint32_t *fb_div,
589 uint32_t *post_div_p) 621 uint32_t *fb_div_frac)
590{ 622{
591 fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq; 623 fixed20_12 feedback_divider, a, b;
592 fixed20_12 pll_out_max, pll_out_min; 624 u32 vco_freq;
593 fixed20_12 pll_in_max, pll_in_min; 625
594 fixed20_12 reference_freq; 626 vco_freq = freq * post_div;
595 fixed20_12 error, ffreq, a, b; 627 /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
596 628 a.full = rfixed_const(pll->reference_freq);
597 pll_out_max.full = rfixed_const(pll->pll_out_max); 629 feedback_divider.full = rfixed_const(vco_freq);
598 pll_out_min.full = rfixed_const(pll->pll_out_min); 630 feedback_divider.full = rfixed_div(feedback_divider, a);
599 pll_in_max.full = rfixed_const(pll->pll_in_max); 631 a.full = rfixed_const(ref_div);
600 pll_in_min.full = rfixed_const(pll->pll_in_min); 632 feedback_divider.full = rfixed_mul(feedback_divider, a);
601 reference_freq.full = rfixed_const(pll->reference_freq); 633
602 do_div(freq, 10); 634 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
635 /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
636 a.full = rfixed_const(10);
637 feedback_divider.full = rfixed_mul(feedback_divider, a);
638 feedback_divider.full += rfixed_const_half(0);
639 feedback_divider.full = rfixed_floor(feedback_divider);
640 feedback_divider.full = rfixed_div(feedback_divider, a);
641
642 /* *fb_div = floor(feedback_divider); */
643 a.full = rfixed_floor(feedback_divider);
644 *fb_div = rfixed_trunc(a);
645 /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
646 a.full = rfixed_const(10);
647 b.full = rfixed_mul(feedback_divider, a);
648
649 feedback_divider.full = rfixed_floor(feedback_divider);
650 feedback_divider.full = rfixed_mul(feedback_divider, a);
651 feedback_divider.full = b.full - feedback_divider.full;
652 *fb_div_frac = rfixed_trunc(feedback_divider);
653 } else {
654 /* *fb_div = floor(feedback_divider + 0.5); */
655 feedback_divider.full += rfixed_const_half(0);
656 feedback_divider.full = rfixed_floor(feedback_divider);
657
658 *fb_div = rfixed_trunc(feedback_divider);
659 *fb_div_frac = 0;
660 }
661
662 if (((*fb_div) < pll->min_feedback_div) || ((*fb_div) > pll->max_feedback_div))
663 return false;
664 else
665 return true;
666}
667
668static bool
669calc_fb_ref_div(struct radeon_pll *pll,
670 uint32_t freq,
671 uint32_t post_div,
672 uint32_t *fb_div,
673 uint32_t *fb_div_frac,
674 uint32_t *ref_div)
675{
676 fixed20_12 ffreq, max_error, error, pll_out, a;
677 u32 vco;
678
603 ffreq.full = rfixed_const(freq); 679 ffreq.full = rfixed_const(freq);
604 error.full = rfixed_const(100 * 100); 680 /* max_error = ffreq * 0.0025; */
681 a.full = rfixed_const(400);
682 max_error.full = rfixed_div(ffreq, a);
605 683
606 /* max p */ 684 for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
607 p.full = rfixed_div(pll_out_max, ffreq); 685 if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
608 p.full = rfixed_floor(p); 686 vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac));
687 vco = vco / ((*ref_div) * 10);
609 688
610 /* min m */ 689 if ((vco < pll->pll_out_min) || (vco > pll->pll_out_max))
611 m.full = rfixed_div(reference_freq, pll_in_max); 690 continue;
612 m.full = rfixed_ceil(m);
613 691
614 while (1) { 692 /* pll_out = vco / post_div; */
615 n.full = rfixed_div(ffreq, reference_freq); 693 a.full = rfixed_const(post_div);
616 n.full = rfixed_mul(n, m); 694 pll_out.full = rfixed_const(vco);
617 n.full = rfixed_mul(n, p); 695 pll_out.full = rfixed_div(pll_out, a);
618 696
619 f_vco.full = rfixed_div(n, m); 697 if (pll_out.full >= ffreq.full) {
620 f_vco.full = rfixed_mul(f_vco, reference_freq); 698 error.full = pll_out.full - ffreq.full;
699 if (error.full <= max_error.full)
700 return true;
701 }
702 }
703 }
704 return false;
705}
621 706
622 f_pclk.full = rfixed_div(f_vco, p); 707static void radeon_compute_pll_new(struct radeon_pll *pll,
708 uint64_t freq,
709 uint32_t *dot_clock_p,
710 uint32_t *fb_div_p,
711 uint32_t *frac_fb_div_p,
712 uint32_t *ref_div_p,
713 uint32_t *post_div_p)
714{
715 u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0;
716 u32 best_freq = 0, vco_frequency;
623 717
624 if (f_pclk.full > ffreq.full) 718 /* freq = freq / 10; */
625 error.full = f_pclk.full - ffreq.full; 719 do_div(freq, 10);
626 else
627 error.full = ffreq.full - f_pclk.full;
628 error.full = rfixed_div(error, f_pclk);
629 a.full = rfixed_const(100 * 100);
630 error.full = rfixed_mul(error, a);
631
632 a.full = rfixed_mul(m, p);
633 a.full = rfixed_div(n, a);
634 best_freq.full = rfixed_mul(reference_freq, a);
635
636 if (rfixed_trunc(error) < 25)
637 break;
638
639 a.full = rfixed_const(1);
640 m.full = m.full + a.full;
641 a.full = rfixed_div(reference_freq, m);
642 if (a.full >= pll_in_min.full)
643 continue;
644 720
645 m.full = rfixed_div(reference_freq, pll_in_max); 721 if (pll->flags & RADEON_PLL_USE_POST_DIV) {
646 m.full = rfixed_ceil(m); 722 post_div = pll->post_div;
647 a.full= rfixed_const(1); 723 if ((post_div < pll->min_post_div) || (post_div > pll->max_post_div))
648 p.full = p.full - a.full; 724 goto done;
649 a.full = rfixed_mul(p, ffreq); 725
650 if (a.full >= pll_out_min.full) 726 vco_frequency = freq * post_div;
651 continue; 727 if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
652 else { 728 goto done;
653 DRM_ERROR("Unable to find pll dividers\n"); 729
654 break; 730 if (pll->flags & RADEON_PLL_USE_REF_DIV) {
731 ref_div = pll->reference_div;
732 if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
733 goto done;
734 if (!calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
735 goto done;
736 }
737 } else {
738 for (post_div = pll->max_post_div; post_div >= pll->min_post_div; --post_div) {
739 if (pll->flags & RADEON_PLL_LEGACY) {
740 if ((post_div == 5) ||
741 (post_div == 7) ||
742 (post_div == 9) ||
743 (post_div == 10) ||
744 (post_div == 11))
745 continue;
746 }
747
748 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
749 continue;
750
751 vco_frequency = freq * post_div;
752 if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
753 continue;
754 if (pll->flags & RADEON_PLL_USE_REF_DIV) {
755 ref_div = pll->reference_div;
756 if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
757 goto done;
758 if (calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
759 break;
760 } else {
761 if (calc_fb_ref_div(pll, freq, post_div, &fb_div, &fb_div_frac, &ref_div))
762 break;
763 }
655 } 764 }
656 } 765 }
657 766
658 a.full = rfixed_const(10); 767 best_freq = pll->reference_freq * 10 * fb_div;
659 b.full = rfixed_mul(n, a); 768 best_freq += pll->reference_freq * fb_div_frac;
769 best_freq = best_freq / (ref_div * post_div);
660 770
661 frac_n.full = rfixed_floor(n); 771done:
662 frac_n.full = rfixed_mul(frac_n, a); 772 if (best_freq == 0)
663 frac_n.full = b.full - frac_n.full; 773 DRM_ERROR("Couldn't find valid PLL dividers\n");
664 774
665 *dot_clock_p = rfixed_trunc(best_freq); 775 *dot_clock_p = best_freq / 10;
666 *fb_div_p = rfixed_trunc(n); 776 *fb_div_p = fb_div;
667 *frac_fb_div_p = rfixed_trunc(frac_n); 777 *frac_fb_div_p = fb_div_frac;
668 *ref_div_p = rfixed_trunc(m); 778 *ref_div_p = ref_div;
669 *post_div_p = rfixed_trunc(p); 779 *post_div_p = post_div;
670 780
671 DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p); 781 DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
782}
783
784void radeon_compute_pll(struct radeon_pll *pll,
785 uint64_t freq,
786 uint32_t *dot_clock_p,
787 uint32_t *fb_div_p,
788 uint32_t *frac_fb_div_p,
789 uint32_t *ref_div_p,
790 uint32_t *post_div_p)
791{
792 switch (pll->algo) {
793 case PLL_ALGO_NEW:
794 radeon_compute_pll_new(pll, freq, dot_clock_p, fb_div_p,
795 frac_fb_div_p, ref_div_p, post_div_p);
796 break;
797 case PLL_ALGO_LEGACY:
798 default:
799 radeon_compute_pll_legacy(pll, freq, dot_clock_p, fb_div_p,
800 frac_fb_div_p, ref_div_p, post_div_p);
801 break;
802 }
672} 803}
673 804
674static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) 805static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -679,11 +810,8 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
679 if (fb->fbdev) 810 if (fb->fbdev)
680 radeonfb_remove(dev, fb); 811 radeonfb_remove(dev, fb);
681 812
682 if (radeon_fb->obj) { 813 if (radeon_fb->obj)
683 mutex_lock(&dev->struct_mutex); 814 drm_gem_object_unreference_unlocked(radeon_fb->obj);
684 drm_gem_object_unreference(radeon_fb->obj);
685 mutex_unlock(&dev->struct_mutex);
686 }
687 drm_framebuffer_cleanup(fb); 815 drm_framebuffer_cleanup(fb);
688 kfree(radeon_fb); 816 kfree(radeon_fb);
689} 817}
@@ -819,7 +947,7 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
819 947
820int radeon_modeset_init(struct radeon_device *rdev) 948int radeon_modeset_init(struct radeon_device *rdev)
821{ 949{
822 int num_crtc = 2, i; 950 int i;
823 int ret; 951 int ret;
824 952
825 drm_mode_config_init(rdev->ddev); 953 drm_mode_config_init(rdev->ddev);
@@ -842,11 +970,23 @@ int radeon_modeset_init(struct radeon_device *rdev)
842 return ret; 970 return ret;
843 } 971 }
844 972
973 /* check combios for a valid hardcoded EDID - Sun servers */
974 if (!rdev->is_atom_bios) {
975 /* check for hardcoded EDID in BIOS */
976 radeon_combios_check_hardcoded_edid(rdev);
977 }
978
845 if (rdev->flags & RADEON_SINGLE_CRTC) 979 if (rdev->flags & RADEON_SINGLE_CRTC)
846 num_crtc = 1; 980 rdev->num_crtc = 1;
981 else {
982 if (ASIC_IS_DCE4(rdev))
983 rdev->num_crtc = 6;
984 else
985 rdev->num_crtc = 2;
986 }
847 987
848 /* allocate crtcs */ 988 /* allocate crtcs */
849 for (i = 0; i < num_crtc; i++) { 989 for (i = 0; i < rdev->num_crtc; i++) {
850 radeon_crtc_init(rdev->ddev, i); 990 radeon_crtc_init(rdev->ddev, i);
851 } 991 }
852 992
@@ -863,6 +1003,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
863 1003
864void radeon_modeset_fini(struct radeon_device *rdev) 1004void radeon_modeset_fini(struct radeon_device *rdev)
865{ 1005{
1006 kfree(rdev->mode_info.bios_hardcoded_edid);
1007
866 if (rdev->mode_info.mode_config_initialized) { 1008 if (rdev->mode_info.mode_config_initialized) {
867 radeon_hpd_fini(rdev); 1009 radeon_hpd_fini(rdev);
868 drm_mode_config_cleanup(rdev->ddev); 1010 drm_mode_config_cleanup(rdev->ddev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 8ba3de7994d4..6eec0ece6a6c 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -40,9 +40,11 @@
40 40
41/* 41/*
42 * KMS wrapper. 42 * KMS wrapper.
43 * - 2.0.0 - initial interface
44 * - 2.1.0 - add square tiling interface
43 */ 45 */
44#define KMS_DRIVER_MAJOR 2 46#define KMS_DRIVER_MAJOR 2
45#define KMS_DRIVER_MINOR 0 47#define KMS_DRIVER_MINOR 1
46#define KMS_DRIVER_PATCHLEVEL 0 48#define KMS_DRIVER_PATCHLEVEL 0
47int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 49int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
48int radeon_driver_unload_kms(struct drm_device *dev); 50int radeon_driver_unload_kms(struct drm_device *dev);
@@ -86,7 +88,8 @@ int radeon_benchmarking = 0;
86int radeon_testing = 0; 88int radeon_testing = 0;
87int radeon_connector_table = 0; 89int radeon_connector_table = 0;
88int radeon_tv = 1; 90int radeon_tv = 1;
89int radeon_new_pll = 1; 91int radeon_new_pll = -1;
92int radeon_dynpm = -1;
90int radeon_audio = 1; 93int radeon_audio = 1;
91 94
92MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 95MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
@@ -122,9 +125,12 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
122MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); 125MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
123module_param_named(tv, radeon_tv, int, 0444); 126module_param_named(tv, radeon_tv, int, 0444);
124 127
125MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips"); 128MODULE_PARM_DESC(new_pll, "Select new PLL code");
126module_param_named(new_pll, radeon_new_pll, int, 0444); 129module_param_named(new_pll, radeon_new_pll, int, 0444);
127 130
131MODULE_PARM_DESC(dynpm, "Disable/Enable dynamic power management (1 = enable)");
132module_param_named(dynpm, radeon_dynpm, int, 0444);
133
128MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); 134MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
129module_param_named(audio, radeon_audio, int, 0444); 135module_param_named(audio, radeon_audio, int, 0444);
130 136
@@ -339,6 +345,7 @@ static int __init radeon_init(void)
339 driver = &kms_driver; 345 driver = &kms_driver;
340 driver->driver_features |= DRIVER_MODESET; 346 driver->driver_features |= DRIVER_MODESET;
341 driver->num_ioctls = radeon_max_kms_ioctl; 347 driver->num_ioctls = radeon_max_kms_ioctl;
348 radeon_register_atpx_handler();
342 } 349 }
343 /* if the vga console setting is enabled still 350 /* if the vga console setting is enabled still
344 * let modprobe override it */ 351 * let modprobe override it */
@@ -348,6 +355,7 @@ static int __init radeon_init(void)
348static void __exit radeon_exit(void) 355static void __exit radeon_exit(void)
349{ 356{
350 drm_exit(driver); 357 drm_exit(driver);
358 radeon_unregister_atpx_handler();
351} 359}
352 360
353module_init(radeon_init); 361module_init(radeon_init);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index c57ad606504d..ec55f2b23c22 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -268,6 +268,8 @@ typedef struct drm_radeon_private {
268 268
269 u32 scratch_ages[5]; 269 u32 scratch_ages[5];
270 270
271 int have_z_offset;
272
271 /* starting from here on, data is preserved accross an open */ 273 /* starting from here on, data is preserved accross an open */
272 uint32_t flags; /* see radeon_chip_flags */ 274 uint32_t flags; /* see radeon_chip_flags */
273 resource_size_t fb_aper_offset; 275 resource_size_t fb_aper_offset;
@@ -295,6 +297,9 @@ typedef struct drm_radeon_private {
295 int r700_sc_prim_fifo_size; 297 int r700_sc_prim_fifo_size;
296 int r700_sc_hiz_tile_fifo_size; 298 int r700_sc_hiz_tile_fifo_size;
297 int r700_sc_earlyz_tile_fifo_fize; 299 int r700_sc_earlyz_tile_fifo_fize;
300 int r600_group_size;
301 int r600_npipes;
302 int r600_nbanks;
298 303
299 struct mutex cs_mutex; 304 struct mutex cs_mutex;
300 u32 cs_id_scnt; 305 u32 cs_id_scnt;
@@ -310,9 +315,11 @@ typedef struct drm_radeon_buf_priv {
310 u32 age; 315 u32 age;
311} drm_radeon_buf_priv_t; 316} drm_radeon_buf_priv_t;
312 317
318struct drm_buffer;
319
313typedef struct drm_radeon_kcmd_buffer { 320typedef struct drm_radeon_kcmd_buffer {
314 int bufsz; 321 int bufsz;
315 char *buf; 322 struct drm_buffer *buffer;
316 int nbox; 323 int nbox;
317 struct drm_clip_rect __user *boxes; 324 struct drm_clip_rect __user *boxes;
318} drm_radeon_kcmd_buffer_t; 325} drm_radeon_kcmd_buffer_t;
@@ -455,6 +462,15 @@ extern void r600_blit_swap(struct drm_device *dev,
455 int sx, int sy, int dx, int dy, 462 int sx, int sy, int dx, int dy,
456 int w, int h, int src_pitch, int dst_pitch, int cpp); 463 int w, int h, int src_pitch, int dst_pitch, int cpp);
457 464
465/* atpx handler */
466#if defined(CONFIG_VGA_SWITCHEROO)
467void radeon_register_atpx_handler(void);
468void radeon_unregister_atpx_handler(void);
469#else
470static inline void radeon_register_atpx_handler(void) {}
471static inline void radeon_unregister_atpx_handler(void) {}
472#endif
473
458/* Flags for stats.boxes 474/* Flags for stats.boxes
459 */ 475 */
460#define RADEON_BOX_DMA_IDLE 0x1 476#define RADEON_BOX_DMA_IDLE 0x1
@@ -2122,4 +2138,32 @@ extern void radeon_commit_ring(drm_radeon_private_t *dev_priv);
2122 write &= mask; \ 2138 write &= mask; \
2123} while (0) 2139} while (0)
2124 2140
2141/**
2142 * Copy given number of dwords from drm buffer to the ring buffer.
2143 */
2144#define OUT_RING_DRM_BUFFER(buf, sz) do { \
2145 int _size = (sz) * 4; \
2146 struct drm_buffer *_buf = (buf); \
2147 int _part_size; \
2148 while (_size > 0) { \
2149 _part_size = _size; \
2150 \
2151 if (write + _part_size/4 > mask) \
2152 _part_size = ((mask + 1) - write)*4; \
2153 \
2154 if (drm_buffer_index(_buf) + _part_size > PAGE_SIZE) \
2155 _part_size = PAGE_SIZE - drm_buffer_index(_buf);\
2156 \
2157 \
2158 \
2159 memcpy(ring + write, &_buf->data[drm_buffer_page(_buf)] \
2160 [drm_buffer_index(_buf)], _part_size); \
2161 \
2162 _size -= _part_size; \
2163 write = (write + _part_size/4) & mask; \
2164 drm_buffer_advance(_buf, _part_size); \
2165 } \
2166} while (0)
2167
2168
2125#endif /* __RADEON_DRV_H__ */ 2169#endif /* __RADEON_DRV_H__ */
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 3c91724457ca..bc926ea0a530 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -53,7 +53,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
53 /* DVO requires 2x ppll clocks depending on tmds chip */ 53 /* DVO requires 2x ppll clocks depending on tmds chip */
54 if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) 54 if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
55 return index_mask; 55 return index_mask;
56 56
57 count = -1; 57 count = -1;
58 list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) { 58 list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
59 struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder); 59 struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
@@ -228,6 +228,32 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
228 return NULL; 228 return NULL;
229} 229}
230 230
231static struct radeon_connector_atom_dig *
232radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder)
233{
234 struct drm_device *dev = encoder->dev;
235 struct radeon_device *rdev = dev->dev_private;
236 struct drm_connector *connector;
237 struct radeon_connector *radeon_connector;
238 struct radeon_connector_atom_dig *dig_connector;
239
240 if (!rdev->is_atom_bios)
241 return NULL;
242
243 connector = radeon_get_connector_for_encoder(encoder);
244 if (!connector)
245 return NULL;
246
247 radeon_connector = to_radeon_connector(connector);
248
249 if (!radeon_connector->con_priv)
250 return NULL;
251
252 dig_connector = radeon_connector->con_priv;
253
254 return dig_connector;
255}
256
231static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, 257static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
232 struct drm_display_mode *mode, 258 struct drm_display_mode *mode,
233 struct drm_display_mode *adjusted_mode) 259 struct drm_display_mode *adjusted_mode)
@@ -236,6 +262,9 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
236 struct drm_device *dev = encoder->dev; 262 struct drm_device *dev = encoder->dev;
237 struct radeon_device *rdev = dev->dev_private; 263 struct radeon_device *rdev = dev->dev_private;
238 264
265 /* adjust pm to upcoming mode change */
266 radeon_pm_compute_clocks(rdev);
267
239 /* set the active encoder to connector routing */ 268 /* set the active encoder to connector routing */
240 radeon_encoder_set_active_device(encoder); 269 radeon_encoder_set_active_device(encoder);
241 drm_mode_set_crtcinfo(adjusted_mode, 0); 270 drm_mode_set_crtcinfo(adjusted_mode, 0);
@@ -458,34 +487,20 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
458 struct drm_device *dev = encoder->dev; 487 struct drm_device *dev = encoder->dev;
459 struct radeon_device *rdev = dev->dev_private; 488 struct radeon_device *rdev = dev->dev_private;
460 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 489 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
490 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
491 struct radeon_connector_atom_dig *dig_connector =
492 radeon_get_atom_connector_priv_from_encoder(encoder);
461 union lvds_encoder_control args; 493 union lvds_encoder_control args;
462 int index = 0; 494 int index = 0;
463 int hdmi_detected = 0; 495 int hdmi_detected = 0;
464 uint8_t frev, crev; 496 uint8_t frev, crev;
465 struct radeon_encoder_atom_dig *dig;
466 struct drm_connector *connector;
467 struct radeon_connector *radeon_connector;
468 struct radeon_connector_atom_dig *dig_connector;
469 497
470 connector = radeon_get_connector_for_encoder(encoder); 498 if (!dig || !dig_connector)
471 if (!connector)
472 return; 499 return;
473 500
474 radeon_connector = to_radeon_connector(connector); 501 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
475
476 if (!radeon_encoder->enc_priv)
477 return;
478
479 dig = radeon_encoder->enc_priv;
480
481 if (!radeon_connector->con_priv)
482 return;
483
484 if (drm_detect_hdmi_monitor(radeon_connector->edid))
485 hdmi_detected = 1; 502 hdmi_detected = 1;
486 503
487 dig_connector = radeon_connector->con_priv;
488
489 memset(&args, 0, sizeof(args)); 504 memset(&args, 0, sizeof(args));
490 505
491 switch (radeon_encoder->encoder_id) { 506 switch (radeon_encoder->encoder_id) {
@@ -586,7 +601,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
586{ 601{
587 struct drm_connector *connector; 602 struct drm_connector *connector;
588 struct radeon_connector *radeon_connector; 603 struct radeon_connector *radeon_connector;
589 struct radeon_connector_atom_dig *radeon_dig_connector; 604 struct radeon_connector_atom_dig *dig_connector;
590 605
591 connector = radeon_get_connector_for_encoder(encoder); 606 connector = radeon_get_connector_for_encoder(encoder);
592 if (!connector) 607 if (!connector)
@@ -617,9 +632,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
617 break; 632 break;
618 case DRM_MODE_CONNECTOR_DisplayPort: 633 case DRM_MODE_CONNECTOR_DisplayPort:
619 case DRM_MODE_CONNECTOR_eDP: 634 case DRM_MODE_CONNECTOR_eDP:
620 radeon_dig_connector = radeon_connector->con_priv; 635 dig_connector = radeon_connector->con_priv;
621 if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 636 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
622 (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 637 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
623 return ATOM_ENCODER_MODE_DP; 638 return ATOM_ENCODER_MODE_DP;
624 else if (drm_detect_hdmi_monitor(radeon_connector->edid)) 639 else if (drm_detect_hdmi_monitor(radeon_connector->edid))
625 return ATOM_ENCODER_MODE_HDMI; 640 return ATOM_ENCODER_MODE_HDMI;
@@ -656,6 +671,18 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
656 * - 2 DIG encoder blocks. 671 * - 2 DIG encoder blocks.
657 * DIG1/2 can drive UNIPHY0/1/2 link A or link B 672 * DIG1/2 can drive UNIPHY0/1/2 link A or link B
658 * 673 *
674 * DCE 4.0
675 * - 3 DIG transmitter blocks UNPHY0/1/2 (links A and B).
676 * Supports up to 6 digital outputs
677 * - 6 DIG encoder blocks.
678 * - DIG to PHY mapping is hardcoded
679 * DIG1 drives UNIPHY0 link A, A+B
680 * DIG2 drives UNIPHY0 link B
681 * DIG3 drives UNIPHY1 link A, A+B
682 * DIG4 drives UNIPHY1 link B
683 * DIG5 drives UNIPHY2 link A, A+B
684 * DIG6 drives UNIPHY2 link B
685 *
659 * Routing 686 * Routing
660 * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) 687 * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
661 * Examples: 688 * Examples:
@@ -664,88 +691,78 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
664 * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS 691 * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
665 * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI 692 * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
666 */ 693 */
667static void 694
695union dig_encoder_control {
696 DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
697 DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
698 DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
699};
700
701void
668atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) 702atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
669{ 703{
670 struct drm_device *dev = encoder->dev; 704 struct drm_device *dev = encoder->dev;
671 struct radeon_device *rdev = dev->dev_private; 705 struct radeon_device *rdev = dev->dev_private;
672 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 706 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
673 DIG_ENCODER_CONTROL_PS_ALLOCATION args; 707 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
708 struct radeon_connector_atom_dig *dig_connector =
709 radeon_get_atom_connector_priv_from_encoder(encoder);
710 union dig_encoder_control args;
674 int index = 0, num = 0; 711 int index = 0, num = 0;
675 uint8_t frev, crev; 712 uint8_t frev, crev;
676 struct radeon_encoder_atom_dig *dig;
677 struct drm_connector *connector;
678 struct radeon_connector *radeon_connector;
679 struct radeon_connector_atom_dig *dig_connector;
680 713
681 connector = radeon_get_connector_for_encoder(encoder); 714 if (!dig || !dig_connector)
682 if (!connector)
683 return; 715 return;
684 716
685 radeon_connector = to_radeon_connector(connector);
686
687 if (!radeon_connector->con_priv)
688 return;
689
690 dig_connector = radeon_connector->con_priv;
691
692 if (!radeon_encoder->enc_priv)
693 return;
694
695 dig = radeon_encoder->enc_priv;
696
697 memset(&args, 0, sizeof(args)); 717 memset(&args, 0, sizeof(args));
698 718
699 if (dig->dig_encoder) 719 if (ASIC_IS_DCE4(rdev))
700 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); 720 index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
701 else 721 else {
702 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); 722 if (dig->dig_encoder)
723 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
724 else
725 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
726 }
703 num = dig->dig_encoder + 1; 727 num = dig->dig_encoder + 1;
704 728
705 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); 729 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
706 730
707 args.ucAction = action; 731 args.v1.ucAction = action;
708 args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 732 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
733 args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
709 734
710 if (ASIC_IS_DCE32(rdev)) { 735 if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
736 if (dig_connector->dp_clock == 270000)
737 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
738 args.v1.ucLaneNum = dig_connector->dp_lane_count;
739 } else if (radeon_encoder->pixel_clock > 165000)
740 args.v1.ucLaneNum = 8;
741 else
742 args.v1.ucLaneNum = 4;
743
744 if (ASIC_IS_DCE4(rdev)) {
745 args.v3.acConfig.ucDigSel = dig->dig_encoder;
746 args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
747 } else {
711 switch (radeon_encoder->encoder_id) { 748 switch (radeon_encoder->encoder_id) {
712 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 749 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
713 args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; 750 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
714 break; 751 break;
715 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 752 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
716 args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2; 753 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
754 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
717 break; 755 break;
718 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 756 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
719 args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; 757 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
720 break;
721 }
722 } else {
723 switch (radeon_encoder->encoder_id) {
724 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
725 args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER1;
726 break;
727 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
728 args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER2;
729 break; 758 break;
730 } 759 }
760 if (dig_connector->linkb)
761 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
762 else
763 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
731 } 764 }
732 765
733 args.ucEncoderMode = atombios_get_encoder_mode(encoder);
734
735 if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
736 if (dig_connector->dp_clock == 270000)
737 args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
738 args.ucLaneNum = dig_connector->dp_lane_count;
739 } else if (radeon_encoder->pixel_clock > 165000)
740 args.ucLaneNum = 8;
741 else
742 args.ucLaneNum = 4;
743
744 if (dig_connector->linkb)
745 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
746 else
747 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
748
749 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 766 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
750 767
751} 768}
@@ -753,6 +770,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
753union dig_transmitter_control { 770union dig_transmitter_control {
754 DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; 771 DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
755 DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; 772 DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
773 DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
756}; 774};
757 775
758void 776void
@@ -761,37 +779,29 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
761 struct drm_device *dev = encoder->dev; 779 struct drm_device *dev = encoder->dev;
762 struct radeon_device *rdev = dev->dev_private; 780 struct radeon_device *rdev = dev->dev_private;
763 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 781 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
782 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
783 struct radeon_connector_atom_dig *dig_connector =
784 radeon_get_atom_connector_priv_from_encoder(encoder);
785 struct drm_connector *connector;
786 struct radeon_connector *radeon_connector;
764 union dig_transmitter_control args; 787 union dig_transmitter_control args;
765 int index = 0, num = 0; 788 int index = 0, num = 0;
766 uint8_t frev, crev; 789 uint8_t frev, crev;
767 struct radeon_encoder_atom_dig *dig;
768 struct drm_connector *connector;
769 struct radeon_connector *radeon_connector;
770 struct radeon_connector_atom_dig *dig_connector;
771 bool is_dp = false; 790 bool is_dp = false;
791 int pll_id = 0;
772 792
773 connector = radeon_get_connector_for_encoder(encoder); 793 if (!dig || !dig_connector)
774 if (!connector)
775 return; 794 return;
776 795
796 connector = radeon_get_connector_for_encoder(encoder);
777 radeon_connector = to_radeon_connector(connector); 797 radeon_connector = to_radeon_connector(connector);
778 798
779 if (!radeon_encoder->enc_priv)
780 return;
781
782 dig = radeon_encoder->enc_priv;
783
784 if (!radeon_connector->con_priv)
785 return;
786
787 dig_connector = radeon_connector->con_priv;
788
789 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) 799 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
790 is_dp = true; 800 is_dp = true;
791 801
792 memset(&args, 0, sizeof(args)); 802 memset(&args, 0, sizeof(args));
793 803
794 if (ASIC_IS_DCE32(rdev)) 804 if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev))
795 index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); 805 index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
796 else { 806 else {
797 switch (radeon_encoder->encoder_id) { 807 switch (radeon_encoder->encoder_id) {
@@ -821,7 +831,54 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
821 else 831 else
822 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 832 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
823 } 833 }
824 if (ASIC_IS_DCE32(rdev)) { 834 if (ASIC_IS_DCE4(rdev)) {
835 if (is_dp)
836 args.v3.ucLaneNum = dig_connector->dp_lane_count;
837 else if (radeon_encoder->pixel_clock > 165000)
838 args.v3.ucLaneNum = 8;
839 else
840 args.v3.ucLaneNum = 4;
841
842 if (dig_connector->linkb) {
843 args.v3.acConfig.ucLinkSel = 1;
844 args.v3.acConfig.ucEncoderSel = 1;
845 }
846
847 /* Select the PLL for the PHY
848 * DP PHY should be clocked from external src if there is
849 * one.
850 */
851 if (encoder->crtc) {
852 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
853 pll_id = radeon_crtc->pll_id;
854 }
855 if (is_dp && rdev->clock.dp_extclk)
856 args.v3.acConfig.ucRefClkSource = 2; /* external src */
857 else
858 args.v3.acConfig.ucRefClkSource = pll_id;
859
860 switch (radeon_encoder->encoder_id) {
861 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
862 args.v3.acConfig.ucTransmitterSel = 0;
863 num = 0;
864 break;
865 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
866 args.v3.acConfig.ucTransmitterSel = 1;
867 num = 1;
868 break;
869 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
870 args.v3.acConfig.ucTransmitterSel = 2;
871 num = 2;
872 break;
873 }
874
875 if (is_dp)
876 args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
877 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
878 if (dig->coherent_mode)
879 args.v3.acConfig.fCoherentMode = 1;
880 }
881 } else if (ASIC_IS_DCE32(rdev)) {
825 if (dig->dig_encoder == 1) 882 if (dig->dig_encoder == 1)
826 args.v2.acConfig.ucEncoderSel = 1; 883 args.v2.acConfig.ucEncoderSel = 1;
827 if (dig_connector->linkb) 884 if (dig_connector->linkb)
@@ -849,7 +906,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
849 args.v2.acConfig.fCoherentMode = 1; 906 args.v2.acConfig.fCoherentMode = 1;
850 } 907 }
851 } else { 908 } else {
852
853 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; 909 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
854 910
855 if (dig->dig_encoder) 911 if (dig->dig_encoder)
@@ -1024,9 +1080,12 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1024 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1080 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1025 } 1081 }
1026 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 1082 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
1083
1084 /* adjust pm to dpms change */
1085 radeon_pm_compute_clocks(rdev);
1027} 1086}
1028 1087
1029union crtc_sourc_param { 1088union crtc_source_param {
1030 SELECT_CRTC_SOURCE_PS_ALLOCATION v1; 1089 SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
1031 SELECT_CRTC_SOURCE_PARAMETERS_V2 v2; 1090 SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
1032}; 1091};
@@ -1038,7 +1097,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1038 struct radeon_device *rdev = dev->dev_private; 1097 struct radeon_device *rdev = dev->dev_private;
1039 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1098 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1040 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); 1099 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1041 union crtc_sourc_param args; 1100 union crtc_source_param args;
1042 int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); 1101 int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
1043 uint8_t frev, crev; 1102 uint8_t frev, crev;
1044 struct radeon_encoder_atom_dig *dig; 1103 struct radeon_encoder_atom_dig *dig;
@@ -1107,10 +1166,26 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1107 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1166 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1108 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 1167 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1109 dig = radeon_encoder->enc_priv; 1168 dig = radeon_encoder->enc_priv;
1110 if (dig->dig_encoder) 1169 switch (dig->dig_encoder) {
1111 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; 1170 case 0:
1112 else
1113 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; 1171 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1172 break;
1173 case 1:
1174 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1175 break;
1176 case 2:
1177 args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
1178 break;
1179 case 3:
1180 args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
1181 break;
1182 case 4:
1183 args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
1184 break;
1185 case 5:
1186 args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
1187 break;
1188 }
1114 break; 1189 break;
1115 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1190 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1116 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; 1191 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
@@ -1167,6 +1242,7 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
1167 } 1242 }
1168 1243
1169 /* set scaler clears this on some chips */ 1244 /* set scaler clears this on some chips */
1245 /* XXX check DCE4 */
1170 if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) { 1246 if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) {
1171 if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE)) 1247 if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
1172 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 1248 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
@@ -1183,6 +1259,33 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
1183 struct drm_encoder *test_encoder; 1259 struct drm_encoder *test_encoder;
1184 struct radeon_encoder_atom_dig *dig; 1260 struct radeon_encoder_atom_dig *dig;
1185 uint32_t dig_enc_in_use = 0; 1261 uint32_t dig_enc_in_use = 0;
1262
1263 if (ASIC_IS_DCE4(rdev)) {
1264 struct radeon_connector_atom_dig *dig_connector =
1265 radeon_get_atom_connector_priv_from_encoder(encoder);
1266
1267 switch (radeon_encoder->encoder_id) {
1268 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1269 if (dig_connector->linkb)
1270 return 1;
1271 else
1272 return 0;
1273 break;
1274 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1275 if (dig_connector->linkb)
1276 return 3;
1277 else
1278 return 2;
1279 break;
1280 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1281 if (dig_connector->linkb)
1282 return 5;
1283 else
1284 return 4;
1285 break;
1286 }
1287 }
1288
1186 /* on DCE32 and encoder can driver any block so just crtc id */ 1289 /* on DCE32 and encoder can driver any block so just crtc id */
1187 if (ASIC_IS_DCE32(rdev)) { 1290 if (ASIC_IS_DCE32(rdev)) {
1188 return radeon_crtc->crtc_id; 1291 return radeon_crtc->crtc_id;
@@ -1254,15 +1357,26 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1254 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1357 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1255 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1358 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1256 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 1359 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1257 /* disable the encoder and transmitter */ 1360 if (ASIC_IS_DCE4(rdev)) {
1258 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); 1361 /* disable the transmitter */
1259 atombios_dig_encoder_setup(encoder, ATOM_DISABLE); 1362 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1260 1363 /* setup and enable the encoder */
1261 /* setup and enable the encoder and transmitter */ 1364 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP);
1262 atombios_dig_encoder_setup(encoder, ATOM_ENABLE); 1365
1263 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); 1366 /* init and enable the transmitter */
1264 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); 1367 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
1265 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1368 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1369 } else {
1370 /* disable the encoder and transmitter */
1371 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1372 atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
1373
1374 /* setup and enable the encoder and transmitter */
1375 atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
1376 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
1377 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1378 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1379 }
1266 break; 1380 break;
1267 case ENCODER_OBJECT_ID_INTERNAL_DDI: 1381 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1268 atombios_ddia_setup(encoder, ATOM_ENABLE); 1382 atombios_ddia_setup(encoder, ATOM_ENABLE);
@@ -1282,7 +1396,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1282 } 1396 }
1283 atombios_apply_encoder_quirks(encoder, adjusted_mode); 1397 atombios_apply_encoder_quirks(encoder, adjusted_mode);
1284 1398
1285 r600_hdmi_setmode(encoder, adjusted_mode); 1399 /* XXX */
1400 if (!ASIC_IS_DCE4(rdev))
1401 r600_hdmi_setmode(encoder, adjusted_mode);
1286} 1402}
1287 1403
1288static bool 1404static bool
@@ -1480,10 +1596,18 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
1480 return; 1596 return;
1481 1597
1482 encoder = &radeon_encoder->base; 1598 encoder = &radeon_encoder->base;
1483 if (rdev->flags & RADEON_SINGLE_CRTC) 1599 switch (rdev->num_crtc) {
1600 case 1:
1484 encoder->possible_crtcs = 0x1; 1601 encoder->possible_crtcs = 0x1;
1485 else 1602 break;
1603 case 2:
1604 default:
1486 encoder->possible_crtcs = 0x3; 1605 encoder->possible_crtcs = 0x3;
1606 break;
1607 case 6:
1608 encoder->possible_crtcs = 0x3f;
1609 break;
1610 }
1487 1611
1488 radeon_encoder->enc_priv = NULL; 1612 radeon_encoder->enc_priv = NULL;
1489 1613
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 797972e344a6..93c7d5d41914 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -75,6 +75,11 @@ enum radeon_family {
75 CHIP_RV730, 75 CHIP_RV730,
76 CHIP_RV710, 76 CHIP_RV710,
77 CHIP_RV740, 77 CHIP_RV740,
78 CHIP_CEDAR,
79 CHIP_REDWOOD,
80 CHIP_JUNIPER,
81 CHIP_CYPRESS,
82 CHIP_HEMLOCK,
78 CHIP_LAST, 83 CHIP_LAST,
79}; 84};
80 85
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index d71e346e9ab5..8fccbf29235e 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -39,6 +39,8 @@
39 39
40#include "drm_fb_helper.h" 40#include "drm_fb_helper.h"
41 41
42#include <linux/vga_switcheroo.h>
43
42struct radeon_fb_device { 44struct radeon_fb_device {
43 struct drm_fb_helper helper; 45 struct drm_fb_helper helper;
44 struct radeon_framebuffer *rfb; 46 struct radeon_framebuffer *rfb;
@@ -148,7 +150,6 @@ int radeonfb_create(struct drm_device *dev,
148 unsigned long tmp; 150 unsigned long tmp;
149 bool fb_tiled = false; /* useful for testing */ 151 bool fb_tiled = false; /* useful for testing */
150 u32 tiling_flags = 0; 152 u32 tiling_flags = 0;
151 int crtc_count;
152 153
153 mode_cmd.width = surface_width; 154 mode_cmd.width = surface_width;
154 mode_cmd.height = surface_height; 155 mode_cmd.height = surface_height;
@@ -239,11 +240,7 @@ int radeonfb_create(struct drm_device *dev,
239 rfbdev = info->par; 240 rfbdev = info->par;
240 rfbdev->helper.funcs = &radeon_fb_helper_funcs; 241 rfbdev->helper.funcs = &radeon_fb_helper_funcs;
241 rfbdev->helper.dev = dev; 242 rfbdev->helper.dev = dev;
242 if (rdev->flags & RADEON_SINGLE_CRTC) 243 ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc,
243 crtc_count = 1;
244 else
245 crtc_count = 2;
246 ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count,
247 RADEONFB_CONN_LIMIT); 244 RADEONFB_CONN_LIMIT);
248 if (ret) 245 if (ret)
249 goto out_unref; 246 goto out_unref;
@@ -257,7 +254,7 @@ int radeonfb_create(struct drm_device *dev,
257 info->flags = FBINFO_DEFAULT; 254 info->flags = FBINFO_DEFAULT;
258 info->fbops = &radeonfb_ops; 255 info->fbops = &radeonfb_ops;
259 256
260 tmp = fb_gpuaddr - rdev->mc.vram_location; 257 tmp = fb_gpuaddr - rdev->mc.vram_start;
261 info->fix.smem_start = rdev->mc.aper_base + tmp; 258 info->fix.smem_start = rdev->mc.aper_base + tmp;
262 info->fix.smem_len = size; 259 info->fix.smem_len = size;
263 info->screen_base = fbptr; 260 info->screen_base = fbptr;
@@ -291,6 +288,7 @@ int radeonfb_create(struct drm_device *dev,
291 rfbdev->rdev = rdev; 288 rfbdev->rdev = rdev;
292 289
293 mutex_unlock(&rdev->ddev->struct_mutex); 290 mutex_unlock(&rdev->ddev->struct_mutex);
291 vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
294 return 0; 292 return 0;
295 293
296out_unref: 294out_unref:
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index e73d56e83fa6..1770d3c07fd0 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -139,6 +139,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
139 unsigned t; 139 unsigned t;
140 unsigned p; 140 unsigned p;
141 int i, j; 141 int i, j;
142 u64 page_base;
142 143
143 if (!rdev->gart.ready) { 144 if (!rdev->gart.ready) {
144 WARN(1, "trying to unbind memory to unitialized GART !\n"); 145 WARN(1, "trying to unbind memory to unitialized GART !\n");
@@ -151,9 +152,11 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
151 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], 152 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
152 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 153 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
153 rdev->gart.pages[p] = NULL; 154 rdev->gart.pages[p] = NULL;
154 rdev->gart.pages_addr[p] = 0; 155 rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
156 page_base = rdev->gart.pages_addr[p];
155 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { 157 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
156 radeon_gart_set_page(rdev, t, 0); 158 radeon_gart_set_page(rdev, t, page_base);
159 page_base += RADEON_GPU_PAGE_SIZE;
157 } 160 }
158 } 161 }
159 } 162 }
@@ -199,8 +202,26 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
199 return 0; 202 return 0;
200} 203}
201 204
205void radeon_gart_restore(struct radeon_device *rdev)
206{
207 int i, j, t;
208 u64 page_base;
209
210 for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
211 page_base = rdev->gart.pages_addr[i];
212 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
213 radeon_gart_set_page(rdev, t, page_base);
214 page_base += RADEON_GPU_PAGE_SIZE;
215 }
216 }
217 mb();
218 radeon_gart_tlb_flush(rdev);
219}
220
202int radeon_gart_init(struct radeon_device *rdev) 221int radeon_gart_init(struct radeon_device *rdev)
203{ 222{
223 int r, i;
224
204 if (rdev->gart.pages) { 225 if (rdev->gart.pages) {
205 return 0; 226 return 0;
206 } 227 }
@@ -209,6 +230,9 @@ int radeon_gart_init(struct radeon_device *rdev)
209 DRM_ERROR("Page size is smaller than GPU page size!\n"); 230 DRM_ERROR("Page size is smaller than GPU page size!\n");
210 return -EINVAL; 231 return -EINVAL;
211 } 232 }
233 r = radeon_dummy_page_init(rdev);
234 if (r)
235 return r;
212 /* Compute table size */ 236 /* Compute table size */
213 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE; 237 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
214 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE; 238 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
@@ -227,6 +251,10 @@ int radeon_gart_init(struct radeon_device *rdev)
227 radeon_gart_fini(rdev); 251 radeon_gart_fini(rdev);
228 return -ENOMEM; 252 return -ENOMEM;
229 } 253 }
254 /* set GART entry to point to the dummy page by default */
255 for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
256 rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
257 }
230 return 0; 258 return 0;
231} 259}
232 260
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index db8e9a355a01..ef92d147d8f0 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -69,9 +69,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
69 if (r != -ERESTARTSYS) 69 if (r != -ERESTARTSYS)
70 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", 70 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
71 size, initial_domain, alignment, r); 71 size, initial_domain, alignment, r);
72 mutex_lock(&rdev->ddev->struct_mutex); 72 drm_gem_object_unreference_unlocked(gobj);
73 drm_gem_object_unreference(gobj);
74 mutex_unlock(&rdev->ddev->struct_mutex);
75 return r; 73 return r;
76 } 74 }
77 gobj->driver_private = robj; 75 gobj->driver_private = robj;
@@ -202,14 +200,10 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
202 } 200 }
203 r = drm_gem_handle_create(filp, gobj, &handle); 201 r = drm_gem_handle_create(filp, gobj, &handle);
204 if (r) { 202 if (r) {
205 mutex_lock(&dev->struct_mutex); 203 drm_gem_object_unreference_unlocked(gobj);
206 drm_gem_object_unreference(gobj);
207 mutex_unlock(&dev->struct_mutex);
208 return r; 204 return r;
209 } 205 }
210 mutex_lock(&dev->struct_mutex); 206 drm_gem_object_handle_unreference_unlocked(gobj);
211 drm_gem_object_handle_unreference(gobj);
212 mutex_unlock(&dev->struct_mutex);
213 args->handle = handle; 207 args->handle = handle;
214 return 0; 208 return 0;
215} 209}
@@ -236,9 +230,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
236 230
237 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 231 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
238 232
239 mutex_lock(&dev->struct_mutex); 233 drm_gem_object_unreference_unlocked(gobj);
240 drm_gem_object_unreference(gobj);
241 mutex_unlock(&dev->struct_mutex);
242 return r; 234 return r;
243} 235}
244 236
@@ -255,9 +247,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
255 } 247 }
256 robj = gobj->driver_private; 248 robj = gobj->driver_private;
257 args->addr_ptr = radeon_bo_mmap_offset(robj); 249 args->addr_ptr = radeon_bo_mmap_offset(robj);
258 mutex_lock(&dev->struct_mutex); 250 drm_gem_object_unreference_unlocked(gobj);
259 drm_gem_object_unreference(gobj);
260 mutex_unlock(&dev->struct_mutex);
261 return 0; 251 return 0;
262} 252}
263 253
@@ -288,9 +278,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
288 default: 278 default:
289 break; 279 break;
290 } 280 }
291 mutex_lock(&dev->struct_mutex); 281 drm_gem_object_unreference_unlocked(gobj);
292 drm_gem_object_unreference(gobj);
293 mutex_unlock(&dev->struct_mutex);
294 return r; 282 return r;
295} 283}
296 284
@@ -311,9 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
311 /* callback hw specific functions if any */ 299 /* callback hw specific functions if any */
312 if (robj->rdev->asic->ioctl_wait_idle) 300 if (robj->rdev->asic->ioctl_wait_idle)
313 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); 301 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
314 mutex_lock(&dev->struct_mutex); 302 drm_gem_object_unreference_unlocked(gobj);
315 drm_gem_object_unreference(gobj);
316 mutex_unlock(&dev->struct_mutex);
317 return r; 303 return r;
318} 304}
319 305
@@ -331,9 +317,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
331 return -EINVAL; 317 return -EINVAL;
332 robj = gobj->driver_private; 318 robj = gobj->driver_private;
333 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 319 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
334 mutex_lock(&dev->struct_mutex); 320 drm_gem_object_unreference_unlocked(gobj);
335 drm_gem_object_unreference(gobj);
336 mutex_unlock(&dev->struct_mutex);
337 return r; 321 return r;
338} 322}
339 323
@@ -356,8 +340,6 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
356 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 340 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
357 radeon_bo_unreserve(rbo); 341 radeon_bo_unreserve(rbo);
358out: 342out:
359 mutex_lock(&dev->struct_mutex); 343 drm_gem_object_unreference_unlocked(gobj);
360 drm_gem_object_unreference(gobj);
361 mutex_unlock(&dev->struct_mutex);
362 return r; 344 return r;
363} 345}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index da3da1e89d00..4ae50c19589f 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -26,6 +26,7 @@
26#include "drmP.h" 26#include "drmP.h"
27#include "radeon_drm.h" 27#include "radeon_drm.h"
28#include "radeon.h" 28#include "radeon.h"
29#include "atom.h"
29 30
30/** 31/**
31 * radeon_ddc_probe 32 * radeon_ddc_probe
@@ -59,7 +60,7 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
59} 60}
60 61
61 62
62void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) 63static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
63{ 64{
64 struct radeon_device *rdev = i2c->dev->dev_private; 65 struct radeon_device *rdev = i2c->dev->dev_private;
65 struct radeon_i2c_bus_rec *rec = &i2c->rec; 66 struct radeon_i2c_bus_rec *rec = &i2c->rec;
@@ -71,13 +72,25 @@ void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
71 */ 72 */
72 if (rec->hw_capable) { 73 if (rec->hw_capable) {
73 if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) { 74 if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
74 if (rec->a_clk_reg == RADEON_GPIO_MONID) { 75 u32 reg;
76
77 if (rdev->family >= CHIP_RV350)
78 reg = RADEON_GPIO_MONID;
79 else if ((rdev->family == CHIP_R300) ||
80 (rdev->family == CHIP_R350))
81 reg = RADEON_GPIO_DVI_DDC;
82 else
83 reg = RADEON_GPIO_CRT2_DDC;
84
85 mutex_lock(&rdev->dc_hw_i2c_mutex);
86 if (rec->a_clk_reg == reg) {
75 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | 87 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
76 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1))); 88 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
77 } else { 89 } else {
78 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | 90 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
79 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3))); 91 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
80 } 92 }
93 mutex_unlock(&rdev->dc_hw_i2c_mutex);
81 } 94 }
82 } 95 }
83 96
@@ -168,6 +181,692 @@ static void set_data(void *i2c_priv, int data)
168 WREG32(rec->en_data_reg, val); 181 WREG32(rec->en_data_reg, val);
169} 182}
170 183
184static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
185{
186 struct radeon_pll *spll = &rdev->clock.spll;
187 u32 sclk = radeon_get_engine_clock(rdev);
188 u32 prescale = 0;
189 u32 n, m;
190 u8 loop;
191 int i2c_clock;
192
193 switch (rdev->family) {
194 case CHIP_R100:
195 case CHIP_RV100:
196 case CHIP_RS100:
197 case CHIP_RV200:
198 case CHIP_RS200:
199 case CHIP_R200:
200 case CHIP_RV250:
201 case CHIP_RS300:
202 case CHIP_RV280:
203 case CHIP_R300:
204 case CHIP_R350:
205 case CHIP_RV350:
206 n = (spll->reference_freq) / (4 * 6);
207 for (loop = 1; loop < 255; loop++) {
208 if ((loop * (loop - 1)) > n)
209 break;
210 }
211 m = loop - 1;
212 prescale = m | (loop << 8);
213 break;
214 case CHIP_RV380:
215 case CHIP_RS400:
216 case CHIP_RS480:
217 case CHIP_R420:
218 case CHIP_R423:
219 case CHIP_RV410:
220 sclk = radeon_get_engine_clock(rdev);
221 prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
222 break;
223 case CHIP_RS600:
224 case CHIP_RS690:
225 case CHIP_RS740:
226 /* todo */
227 break;
228 case CHIP_RV515:
229 case CHIP_R520:
230 case CHIP_RV530:
231 case CHIP_RV560:
232 case CHIP_RV570:
233 case CHIP_R580:
234 i2c_clock = 50;
235 sclk = radeon_get_engine_clock(rdev);
236 if (rdev->family == CHIP_R520)
237 prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock));
238 else
239 prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
240 break;
241 case CHIP_R600:
242 case CHIP_RV610:
243 case CHIP_RV630:
244 case CHIP_RV670:
245 /* todo */
246 break;
247 case CHIP_RV620:
248 case CHIP_RV635:
249 case CHIP_RS780:
250 case CHIP_RS880:
251 case CHIP_RV770:
252 case CHIP_RV730:
253 case CHIP_RV710:
254 case CHIP_RV740:
255 /* todo */
256 break;
257 case CHIP_CEDAR:
258 case CHIP_REDWOOD:
259 case CHIP_JUNIPER:
260 case CHIP_CYPRESS:
261 case CHIP_HEMLOCK:
262 /* todo */
263 break;
264 default:
265 DRM_ERROR("i2c: unhandled radeon chip\n");
266 break;
267 }
268 return prescale;
269}
270
271
272/* hw i2c engine for r1xx-4xx hardware
273 * hw can buffer up to 15 bytes
274 */
275static int r100_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
276 struct i2c_msg *msgs, int num)
277{
278 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
279 struct radeon_device *rdev = i2c->dev->dev_private;
280 struct radeon_i2c_bus_rec *rec = &i2c->rec;
281 struct i2c_msg *p;
282 int i, j, k, ret = num;
283 u32 prescale;
284 u32 i2c_cntl_0, i2c_cntl_1, i2c_data;
285 u32 tmp, reg;
286
287 mutex_lock(&rdev->dc_hw_i2c_mutex);
288 /* take the pm lock since we need a constant sclk */
289 mutex_lock(&rdev->pm.mutex);
290
291 prescale = radeon_get_i2c_prescale(rdev);
292
293 reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) |
294 RADEON_I2C_START |
295 RADEON_I2C_STOP |
296 RADEON_I2C_GO);
297
298 if (rdev->is_atom_bios) {
299 tmp = RREG32(RADEON_BIOS_6_SCRATCH);
300 WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
301 }
302
303 if (rec->mm_i2c) {
304 i2c_cntl_0 = RADEON_I2C_CNTL_0;
305 i2c_cntl_1 = RADEON_I2C_CNTL_1;
306 i2c_data = RADEON_I2C_DATA;
307 } else {
308 i2c_cntl_0 = RADEON_DVI_I2C_CNTL_0;
309 i2c_cntl_1 = RADEON_DVI_I2C_CNTL_1;
310 i2c_data = RADEON_DVI_I2C_DATA;
311
312 switch (rdev->family) {
313 case CHIP_R100:
314 case CHIP_RV100:
315 case CHIP_RS100:
316 case CHIP_RV200:
317 case CHIP_RS200:
318 case CHIP_RS300:
319 switch (rec->mask_clk_reg) {
320 case RADEON_GPIO_DVI_DDC:
321 /* no gpio select bit */
322 break;
323 default:
324 DRM_ERROR("gpio not supported with hw i2c\n");
325 ret = -EINVAL;
326 goto done;
327 }
328 break;
329 case CHIP_R200:
330 /* only bit 4 on r200 */
331 switch (rec->mask_clk_reg) {
332 case RADEON_GPIO_DVI_DDC:
333 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
334 break;
335 case RADEON_GPIO_MONID:
336 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
337 break;
338 default:
339 DRM_ERROR("gpio not supported with hw i2c\n");
340 ret = -EINVAL;
341 goto done;
342 }
343 break;
344 case CHIP_RV250:
345 case CHIP_RV280:
346 /* bits 3 and 4 */
347 switch (rec->mask_clk_reg) {
348 case RADEON_GPIO_DVI_DDC:
349 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
350 break;
351 case RADEON_GPIO_VGA_DDC:
352 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
353 break;
354 case RADEON_GPIO_CRT2_DDC:
355 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
356 break;
357 default:
358 DRM_ERROR("gpio not supported with hw i2c\n");
359 ret = -EINVAL;
360 goto done;
361 }
362 break;
363 case CHIP_R300:
364 case CHIP_R350:
365 /* only bit 4 on r300/r350 */
366 switch (rec->mask_clk_reg) {
367 case RADEON_GPIO_VGA_DDC:
368 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
369 break;
370 case RADEON_GPIO_DVI_DDC:
371 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
372 break;
373 default:
374 DRM_ERROR("gpio not supported with hw i2c\n");
375 ret = -EINVAL;
376 goto done;
377 }
378 break;
379 case CHIP_RV350:
380 case CHIP_RV380:
381 case CHIP_R420:
382 case CHIP_R423:
383 case CHIP_RV410:
384 case CHIP_RS400:
385 case CHIP_RS480:
386 /* bits 3 and 4 */
387 switch (rec->mask_clk_reg) {
388 case RADEON_GPIO_VGA_DDC:
389 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
390 break;
391 case RADEON_GPIO_DVI_DDC:
392 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
393 break;
394 case RADEON_GPIO_MONID:
395 reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
396 break;
397 default:
398 DRM_ERROR("gpio not supported with hw i2c\n");
399 ret = -EINVAL;
400 goto done;
401 }
402 break;
403 default:
404 DRM_ERROR("unsupported asic\n");
405 ret = -EINVAL;
406 goto done;
407 break;
408 }
409 }
410
411 /* check for bus probe */
412 p = &msgs[0];
413 if ((num == 1) && (p->len == 0)) {
414 WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
415 RADEON_I2C_NACK |
416 RADEON_I2C_HALT |
417 RADEON_I2C_SOFT_RST));
418 WREG32(i2c_data, (p->addr << 1) & 0xff);
419 WREG32(i2c_data, 0);
420 WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
421 (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
422 RADEON_I2C_EN |
423 (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
424 WREG32(i2c_cntl_0, reg);
425 for (k = 0; k < 32; k++) {
426 udelay(10);
427 tmp = RREG32(i2c_cntl_0);
428 if (tmp & RADEON_I2C_GO)
429 continue;
430 tmp = RREG32(i2c_cntl_0);
431 if (tmp & RADEON_I2C_DONE)
432 break;
433 else {
434 DRM_DEBUG("i2c write error 0x%08x\n", tmp);
435 WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
436 ret = -EIO;
437 goto done;
438 }
439 }
440 goto done;
441 }
442
443 for (i = 0; i < num; i++) {
444 p = &msgs[i];
445 for (j = 0; j < p->len; j++) {
446 if (p->flags & I2C_M_RD) {
447 WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
448 RADEON_I2C_NACK |
449 RADEON_I2C_HALT |
450 RADEON_I2C_SOFT_RST));
451 WREG32(i2c_data, ((p->addr << 1) & 0xff) | 0x1);
452 WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
453 (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
454 RADEON_I2C_EN |
455 (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
456 WREG32(i2c_cntl_0, reg | RADEON_I2C_RECEIVE);
457 for (k = 0; k < 32; k++) {
458 udelay(10);
459 tmp = RREG32(i2c_cntl_0);
460 if (tmp & RADEON_I2C_GO)
461 continue;
462 tmp = RREG32(i2c_cntl_0);
463 if (tmp & RADEON_I2C_DONE)
464 break;
465 else {
466 DRM_DEBUG("i2c read error 0x%08x\n", tmp);
467 WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
468 ret = -EIO;
469 goto done;
470 }
471 }
472 p->buf[j] = RREG32(i2c_data) & 0xff;
473 } else {
474 WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
475 RADEON_I2C_NACK |
476 RADEON_I2C_HALT |
477 RADEON_I2C_SOFT_RST));
478 WREG32(i2c_data, (p->addr << 1) & 0xff);
479 WREG32(i2c_data, p->buf[j]);
480 WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
481 (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
482 RADEON_I2C_EN |
483 (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
484 WREG32(i2c_cntl_0, reg);
485 for (k = 0; k < 32; k++) {
486 udelay(10);
487 tmp = RREG32(i2c_cntl_0);
488 if (tmp & RADEON_I2C_GO)
489 continue;
490 tmp = RREG32(i2c_cntl_0);
491 if (tmp & RADEON_I2C_DONE)
492 break;
493 else {
494 DRM_DEBUG("i2c write error 0x%08x\n", tmp);
495 WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
496 ret = -EIO;
497 goto done;
498 }
499 }
500 }
501 }
502 }
503
504done:
505 WREG32(i2c_cntl_0, 0);
506 WREG32(i2c_cntl_1, 0);
507 WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
508 RADEON_I2C_NACK |
509 RADEON_I2C_HALT |
510 RADEON_I2C_SOFT_RST));
511
512 if (rdev->is_atom_bios) {
513 tmp = RREG32(RADEON_BIOS_6_SCRATCH);
514 tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
515 WREG32(RADEON_BIOS_6_SCRATCH, tmp);
516 }
517
518 mutex_unlock(&rdev->pm.mutex);
519 mutex_unlock(&rdev->dc_hw_i2c_mutex);
520
521 return ret;
522}
523
524/* hw i2c engine for r5xx hardware
525 * hw can buffer up to 15 bytes
526 */
527static int r500_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
528 struct i2c_msg *msgs, int num)
529{
530 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
531 struct radeon_device *rdev = i2c->dev->dev_private;
532 struct radeon_i2c_bus_rec *rec = &i2c->rec;
533 struct i2c_msg *p;
534 int i, j, remaining, current_count, buffer_offset, ret = num;
535 u32 prescale;
536 u32 tmp, reg;
537 u32 saved1, saved2;
538
539 mutex_lock(&rdev->dc_hw_i2c_mutex);
540 /* take the pm lock since we need a constant sclk */
541 mutex_lock(&rdev->pm.mutex);
542
543 prescale = radeon_get_i2c_prescale(rdev);
544
545 /* clear gpio mask bits */
546 tmp = RREG32(rec->mask_clk_reg);
547 tmp &= ~rec->mask_clk_mask;
548 WREG32(rec->mask_clk_reg, tmp);
549 tmp = RREG32(rec->mask_clk_reg);
550
551 tmp = RREG32(rec->mask_data_reg);
552 tmp &= ~rec->mask_data_mask;
553 WREG32(rec->mask_data_reg, tmp);
554 tmp = RREG32(rec->mask_data_reg);
555
556 /* clear pin values */
557 tmp = RREG32(rec->a_clk_reg);
558 tmp &= ~rec->a_clk_mask;
559 WREG32(rec->a_clk_reg, tmp);
560 tmp = RREG32(rec->a_clk_reg);
561
562 tmp = RREG32(rec->a_data_reg);
563 tmp &= ~rec->a_data_mask;
564 WREG32(rec->a_data_reg, tmp);
565 tmp = RREG32(rec->a_data_reg);
566
567 /* set the pins to input */
568 tmp = RREG32(rec->en_clk_reg);
569 tmp &= ~rec->en_clk_mask;
570 WREG32(rec->en_clk_reg, tmp);
571 tmp = RREG32(rec->en_clk_reg);
572
573 tmp = RREG32(rec->en_data_reg);
574 tmp &= ~rec->en_data_mask;
575 WREG32(rec->en_data_reg, tmp);
576 tmp = RREG32(rec->en_data_reg);
577
578 /* */
579 tmp = RREG32(RADEON_BIOS_6_SCRATCH);
580 WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
581 saved1 = RREG32(AVIVO_DC_I2C_CONTROL1);
582 saved2 = RREG32(0x494);
583 WREG32(0x494, saved2 | 0x1);
584
585 WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C);
586 for (i = 0; i < 50; i++) {
587 udelay(1);
588 if (RREG32(AVIVO_DC_I2C_ARBITRATION) & AVIVO_DC_I2C_SW_CAN_USE_I2C)
589 break;
590 }
591 if (i == 50) {
592 DRM_ERROR("failed to get i2c bus\n");
593 ret = -EBUSY;
594 goto done;
595 }
596
597 reg = AVIVO_DC_I2C_START | AVIVO_DC_I2C_STOP | AVIVO_DC_I2C_EN;
598 switch (rec->mask_clk_reg) {
599 case AVIVO_DC_GPIO_DDC1_MASK:
600 reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC1);
601 break;
602 case AVIVO_DC_GPIO_DDC2_MASK:
603 reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC2);
604 break;
605 case AVIVO_DC_GPIO_DDC3_MASK:
606 reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC3);
607 break;
608 default:
609 DRM_ERROR("gpio not supported with hw i2c\n");
610 ret = -EINVAL;
611 goto done;
612 }
613
614 /* check for bus probe */
615 p = &msgs[0];
616 if ((num == 1) && (p->len == 0)) {
617 WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
618 AVIVO_DC_I2C_NACK |
619 AVIVO_DC_I2C_HALT));
620 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
621 udelay(1);
622 WREG32(AVIVO_DC_I2C_RESET, 0);
623
624 WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
625 WREG32(AVIVO_DC_I2C_DATA, 0);
626
627 WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
628 WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
629 AVIVO_DC_I2C_DATA_COUNT(1) |
630 (prescale << 16)));
631 WREG32(AVIVO_DC_I2C_CONTROL1, reg);
632 WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
633 for (j = 0; j < 200; j++) {
634 udelay(50);
635 tmp = RREG32(AVIVO_DC_I2C_STATUS1);
636 if (tmp & AVIVO_DC_I2C_GO)
637 continue;
638 tmp = RREG32(AVIVO_DC_I2C_STATUS1);
639 if (tmp & AVIVO_DC_I2C_DONE)
640 break;
641 else {
642 DRM_DEBUG("i2c write error 0x%08x\n", tmp);
643 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
644 ret = -EIO;
645 goto done;
646 }
647 }
648 goto done;
649 }
650
651 for (i = 0; i < num; i++) {
652 p = &msgs[i];
653 remaining = p->len;
654 buffer_offset = 0;
655 if (p->flags & I2C_M_RD) {
656 while (remaining) {
657 if (remaining > 15)
658 current_count = 15;
659 else
660 current_count = remaining;
661 WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
662 AVIVO_DC_I2C_NACK |
663 AVIVO_DC_I2C_HALT));
664 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
665 udelay(1);
666 WREG32(AVIVO_DC_I2C_RESET, 0);
667
668 WREG32(AVIVO_DC_I2C_DATA, ((p->addr << 1) & 0xff) | 0x1);
669 WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
670 WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
671 AVIVO_DC_I2C_DATA_COUNT(current_count) |
672 (prescale << 16)));
673 WREG32(AVIVO_DC_I2C_CONTROL1, reg | AVIVO_DC_I2C_RECEIVE);
674 WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
675 for (j = 0; j < 200; j++) {
676 udelay(50);
677 tmp = RREG32(AVIVO_DC_I2C_STATUS1);
678 if (tmp & AVIVO_DC_I2C_GO)
679 continue;
680 tmp = RREG32(AVIVO_DC_I2C_STATUS1);
681 if (tmp & AVIVO_DC_I2C_DONE)
682 break;
683 else {
684 DRM_DEBUG("i2c read error 0x%08x\n", tmp);
685 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
686 ret = -EIO;
687 goto done;
688 }
689 }
690 for (j = 0; j < current_count; j++)
691 p->buf[buffer_offset + j] = RREG32(AVIVO_DC_I2C_DATA) & 0xff;
692 remaining -= current_count;
693 buffer_offset += current_count;
694 }
695 } else {
696 while (remaining) {
697 if (remaining > 15)
698 current_count = 15;
699 else
700 current_count = remaining;
701 WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
702 AVIVO_DC_I2C_NACK |
703 AVIVO_DC_I2C_HALT));
704 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
705 udelay(1);
706 WREG32(AVIVO_DC_I2C_RESET, 0);
707
708 WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
709 for (j = 0; j < current_count; j++)
710 WREG32(AVIVO_DC_I2C_DATA, p->buf[buffer_offset + j]);
711
712 WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
713 WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
714 AVIVO_DC_I2C_DATA_COUNT(current_count) |
715 (prescale << 16)));
716 WREG32(AVIVO_DC_I2C_CONTROL1, reg);
717 WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
718 for (j = 0; j < 200; j++) {
719 udelay(50);
720 tmp = RREG32(AVIVO_DC_I2C_STATUS1);
721 if (tmp & AVIVO_DC_I2C_GO)
722 continue;
723 tmp = RREG32(AVIVO_DC_I2C_STATUS1);
724 if (tmp & AVIVO_DC_I2C_DONE)
725 break;
726 else {
727 DRM_DEBUG("i2c write error 0x%08x\n", tmp);
728 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
729 ret = -EIO;
730 goto done;
731 }
732 }
733 remaining -= current_count;
734 buffer_offset += current_count;
735 }
736 }
737 }
738
739done:
740 WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
741 AVIVO_DC_I2C_NACK |
742 AVIVO_DC_I2C_HALT));
743 WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
744 udelay(1);
745 WREG32(AVIVO_DC_I2C_RESET, 0);
746
747 WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_DONE_USING_I2C);
748 WREG32(AVIVO_DC_I2C_CONTROL1, saved1);
749 WREG32(0x494, saved2);
750 tmp = RREG32(RADEON_BIOS_6_SCRATCH);
751 tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
752 WREG32(RADEON_BIOS_6_SCRATCH, tmp);
753
754 mutex_unlock(&rdev->pm.mutex);
755 mutex_unlock(&rdev->dc_hw_i2c_mutex);
756
757 return ret;
758}
759
760static int radeon_sw_i2c_xfer(struct i2c_adapter *i2c_adap,
761 struct i2c_msg *msgs, int num)
762{
763 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
764 int ret;
765
766 radeon_i2c_do_lock(i2c, 1);
767 ret = i2c_transfer(&i2c->algo.radeon.bit_adapter, msgs, num);
768 radeon_i2c_do_lock(i2c, 0);
769
770 return ret;
771}
772
773static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
774 struct i2c_msg *msgs, int num)
775{
776 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
777 struct radeon_device *rdev = i2c->dev->dev_private;
778 struct radeon_i2c_bus_rec *rec = &i2c->rec;
779 int ret;
780
781 switch (rdev->family) {
782 case CHIP_R100:
783 case CHIP_RV100:
784 case CHIP_RS100:
785 case CHIP_RV200:
786 case CHIP_RS200:
787 case CHIP_R200:
788 case CHIP_RV250:
789 case CHIP_RS300:
790 case CHIP_RV280:
791 case CHIP_R300:
792 case CHIP_R350:
793 case CHIP_RV350:
794 case CHIP_RV380:
795 case CHIP_R420:
796 case CHIP_R423:
797 case CHIP_RV410:
798 case CHIP_RS400:
799 case CHIP_RS480:
800 if (rec->hw_capable)
801 ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
802 else
803 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
804 break;
805 case CHIP_RS600:
806 case CHIP_RS690:
807 case CHIP_RS740:
808 /* XXX fill in hw i2c implementation */
809 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
810 break;
811 case CHIP_RV515:
812 case CHIP_R520:
813 case CHIP_RV530:
814 case CHIP_RV560:
815 case CHIP_RV570:
816 case CHIP_R580:
817 if (rec->hw_capable) {
818 if (rec->mm_i2c)
819 ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
820 else
821 ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
822 } else
823 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
824 break;
825 case CHIP_R600:
826 case CHIP_RV610:
827 case CHIP_RV630:
828 case CHIP_RV670:
829 /* XXX fill in hw i2c implementation */
830 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
831 break;
832 case CHIP_RV620:
833 case CHIP_RV635:
834 case CHIP_RS780:
835 case CHIP_RS880:
836 case CHIP_RV770:
837 case CHIP_RV730:
838 case CHIP_RV710:
839 case CHIP_RV740:
840 /* XXX fill in hw i2c implementation */
841 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
842 break;
843 case CHIP_CEDAR:
844 case CHIP_REDWOOD:
845 case CHIP_JUNIPER:
846 case CHIP_CYPRESS:
847 case CHIP_HEMLOCK:
848 /* XXX fill in hw i2c implementation */
849 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
850 break;
851 default:
852 DRM_ERROR("i2c: unhandled radeon chip\n");
853 ret = -EIO;
854 break;
855 }
856
857 return ret;
858}
859
860static u32 radeon_i2c_func(struct i2c_adapter *adap)
861{
862 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
863}
864
865static const struct i2c_algorithm radeon_i2c_algo = {
866 .master_xfer = radeon_i2c_xfer,
867 .functionality = radeon_i2c_func,
868};
869
171struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, 870struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
172 struct radeon_i2c_bus_rec *rec, 871 struct radeon_i2c_bus_rec *rec,
173 const char *name) 872 const char *name)
@@ -179,23 +878,36 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
179 if (i2c == NULL) 878 if (i2c == NULL)
180 return NULL; 879 return NULL;
181 880
182 i2c->adapter.owner = THIS_MODULE; 881 /* set the internal bit adapter */
183 i2c->dev = dev; 882 i2c->algo.radeon.bit_adapter.owner = THIS_MODULE;
184 i2c_set_adapdata(&i2c->adapter, i2c); 883 i2c_set_adapdata(&i2c->algo.radeon.bit_adapter, i2c);
185 i2c->adapter.algo_data = &i2c->algo.bit; 884 sprintf(i2c->algo.radeon.bit_adapter.name, "Radeon internal i2c bit bus %s", name);
186 i2c->algo.bit.setsda = set_data; 885 i2c->algo.radeon.bit_adapter.algo_data = &i2c->algo.radeon.bit_data;
187 i2c->algo.bit.setscl = set_clock; 886 i2c->algo.radeon.bit_data.setsda = set_data;
188 i2c->algo.bit.getsda = get_data; 887 i2c->algo.radeon.bit_data.setscl = set_clock;
189 i2c->algo.bit.getscl = get_clock; 888 i2c->algo.radeon.bit_data.getsda = get_data;
190 i2c->algo.bit.udelay = 20; 889 i2c->algo.radeon.bit_data.getscl = get_clock;
890 i2c->algo.radeon.bit_data.udelay = 20;
191 /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always 891 /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
192 * make this, 2 jiffies is a lot more reliable */ 892 * make this, 2 jiffies is a lot more reliable */
193 i2c->algo.bit.timeout = 2; 893 i2c->algo.radeon.bit_data.timeout = 2;
194 i2c->algo.bit.data = i2c; 894 i2c->algo.radeon.bit_data.data = i2c;
895 ret = i2c_bit_add_bus(&i2c->algo.radeon.bit_adapter);
896 if (ret) {
897 DRM_ERROR("Failed to register internal bit i2c %s\n", name);
898 goto out_free;
899 }
900 /* set the radeon i2c adapter */
901 i2c->dev = dev;
195 i2c->rec = *rec; 902 i2c->rec = *rec;
196 ret = i2c_bit_add_bus(&i2c->adapter); 903 i2c->adapter.owner = THIS_MODULE;
904 i2c_set_adapdata(&i2c->adapter, i2c);
905 sprintf(i2c->adapter.name, "Radeon i2c %s", name);
906 i2c->adapter.algo_data = &i2c->algo.radeon;
907 i2c->adapter.algo = &radeon_i2c_algo;
908 ret = i2c_add_adapter(&i2c->adapter);
197 if (ret) { 909 if (ret) {
198 DRM_INFO("Failed to register i2c %s\n", name); 910 DRM_ERROR("Failed to register i2c %s\n", name);
199 goto out_free; 911 goto out_free;
200 } 912 }
201 913
@@ -237,11 +949,19 @@ out_free:
237 949
238} 950}
239 951
240
241void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) 952void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
242{ 953{
243 if (!i2c) 954 if (!i2c)
244 return; 955 return;
956 i2c_del_adapter(&i2c->algo.radeon.bit_adapter);
957 i2c_del_adapter(&i2c->adapter);
958 kfree(i2c);
959}
960
961void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c)
962{
963 if (!i2c)
964 return;
245 965
246 i2c_del_adapter(&i2c->adapter); 966 i2c_del_adapter(&i2c->adapter);
247 kfree(i2c); 967 kfree(i2c);
@@ -252,10 +972,10 @@ struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
252 return NULL; 972 return NULL;
253} 973}
254 974
255void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus, 975void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
256 u8 slave_addr, 976 u8 slave_addr,
257 u8 addr, 977 u8 addr,
258 u8 *val) 978 u8 *val)
259{ 979{
260 u8 out_buf[2]; 980 u8 out_buf[2];
261 u8 in_buf[2]; 981 u8 in_buf[2];
@@ -286,10 +1006,10 @@ void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
286 } 1006 }
287} 1007}
288 1008
289void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus, 1009void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
290 u8 slave_addr, 1010 u8 slave_addr,
291 u8 addr, 1011 u8 addr,
292 u8 val) 1012 u8 val)
293{ 1013{
294 uint8_t out_buf[2]; 1014 uint8_t out_buf[2];
295 struct i2c_msg msg = { 1015 struct i2c_msg msg = {
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index f23b05606eb5..20ec276e7596 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -30,6 +30,8 @@
30#include "radeon.h" 30#include "radeon.h"
31#include "radeon_drm.h" 31#include "radeon_drm.h"
32 32
33#include <linux/vga_switcheroo.h>
34
33int radeon_driver_unload_kms(struct drm_device *dev) 35int radeon_driver_unload_kms(struct drm_device *dev)
34{ 36{
35 struct radeon_device *rdev = dev->dev_private; 37 struct radeon_device *rdev = dev->dev_private;
@@ -136,6 +138,7 @@ int radeon_driver_firstopen_kms(struct drm_device *dev)
136 138
137void radeon_driver_lastclose_kms(struct drm_device *dev) 139void radeon_driver_lastclose_kms(struct drm_device *dev)
138{ 140{
141 vga_switcheroo_process_delayed_switch();
139} 142}
140 143
141int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) 144int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
@@ -276,17 +279,17 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
276 DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), 279 DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
277 DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), 280 DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
278 /* KMS */ 281 /* KMS */
279 DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH), 282 DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
280 DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH), 283 DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
281 DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH), 284 DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
282 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH), 285 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
283 DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH), 286 DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
284 DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH), 287 DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
285 DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH), 288 DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
286 DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH), 289 DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
287 DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH), 290 DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
288 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH), 291 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
289 DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH), 292 DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
290 DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH), 293 DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
291}; 294};
292int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); 295int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index b6d8081e1246..df23d6a01d02 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -403,7 +403,7 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
403 403
404 /* if scanout was in GTT this really wouldn't work */ 404 /* if scanout was in GTT this really wouldn't work */
405 /* crtc offset is from display base addr not FB location */ 405 /* crtc offset is from display base addr not FB location */
406 radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location; 406 radeon_crtc->legacy_display_base_addr = rdev->mc.vram_start;
407 407
408 base -= radeon_crtc->legacy_display_base_addr; 408 base -= radeon_crtc->legacy_display_base_addr;
409 409
@@ -582,29 +582,6 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
582 ? RADEON_CRTC_V_SYNC_POL 582 ? RADEON_CRTC_V_SYNC_POL
583 : 0)); 583 : 0));
584 584
585 /* TODO -> Dell Server */
586 if (0) {
587 uint32_t disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
588 uint32_t tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
589 uint32_t dac2_cntl = RREG32(RADEON_DAC_CNTL2);
590 uint32_t crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
591
592 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
593 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
594
595 /* For CRT on DAC2, don't turn it on if BIOS didn't
596 enable it, even it's detected.
597 */
598 disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
599 tv_dac_cntl &= ~((1<<2) | (3<<8) | (7<<24) | (0xff<<16));
600 tv_dac_cntl |= (0x03 | (2<<8) | (0x58<<16));
601
602 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
603 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
604 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
605 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
606 }
607
608 if (radeon_crtc->crtc_id) { 585 if (radeon_crtc->crtc_id) {
609 uint32_t crtc2_gen_cntl; 586 uint32_t crtc2_gen_cntl;
610 uint32_t disp2_merge_cntl; 587 uint32_t disp2_merge_cntl;
@@ -726,6 +703,10 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
726 pll = &rdev->clock.p1pll; 703 pll = &rdev->clock.p1pll;
727 704
728 pll->flags = RADEON_PLL_LEGACY; 705 pll->flags = RADEON_PLL_LEGACY;
706 if (radeon_new_pll == 1)
707 pll->algo = PLL_ALGO_NEW;
708 else
709 pll->algo = PLL_ALGO_LEGACY;
729 710
730 if (mode->clock > 200000) /* range limits??? */ 711 if (mode->clock > 200000) /* range limits??? */
731 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 712 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 38e45e231ef5..cf389ce50a8a 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -115,6 +115,9 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
115 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 115 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
116 else 116 else
117 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 117 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
118
119 /* adjust pm to dpms change */
120 radeon_pm_compute_clocks(rdev);
118} 121}
119 122
120static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) 123static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
@@ -214,6 +217,11 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
214 struct drm_display_mode *adjusted_mode) 217 struct drm_display_mode *adjusted_mode)
215{ 218{
216 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 219 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
220 struct drm_device *dev = encoder->dev;
221 struct radeon_device *rdev = dev->dev_private;
222
223 /* adjust pm to upcoming mode change */
224 radeon_pm_compute_clocks(rdev);
217 225
218 /* set the active encoder to connector routing */ 226 /* set the active encoder to connector routing */
219 radeon_encoder_set_active_device(encoder); 227 radeon_encoder_set_active_device(encoder);
@@ -285,6 +293,9 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode
285 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 293 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
286 else 294 else
287 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 295 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
296
297 /* adjust pm to dpms change */
298 radeon_pm_compute_clocks(rdev);
288} 299}
289 300
290static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder) 301static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
@@ -470,6 +481,9 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
470 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 481 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
471 else 482 else
472 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 483 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
484
485 /* adjust pm to dpms change */
486 radeon_pm_compute_clocks(rdev);
473} 487}
474 488
475static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder) 489static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
@@ -635,6 +649,9 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
635 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 649 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
636 else 650 else
637 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 651 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
652
653 /* adjust pm to dpms change */
654 radeon_pm_compute_clocks(rdev);
638} 655}
639 656
640static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder) 657static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
@@ -842,6 +859,9 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
842 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 859 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
843 else 860 else
844 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 861 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
862
863 /* adjust pm to dpms change */
864 radeon_pm_compute_clocks(rdev);
845} 865}
846 866
847static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder) 867static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index e81b2aeb6a8f..1702b820aa4d 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -83,6 +83,8 @@ struct radeon_i2c_bus_rec {
83 bool valid; 83 bool valid;
84 /* id used by atom */ 84 /* id used by atom */
85 uint8_t i2c_id; 85 uint8_t i2c_id;
86 /* id used by atom */
87 uint8_t hpd_id;
86 /* can be used with hw i2c engine */ 88 /* can be used with hw i2c engine */
87 bool hw_capable; 89 bool hw_capable;
88 /* uses multi-media i2c engine */ 90 /* uses multi-media i2c engine */
@@ -113,6 +115,7 @@ struct radeon_tmds_pll {
113 115
114#define RADEON_MAX_BIOS_CONNECTOR 16 116#define RADEON_MAX_BIOS_CONNECTOR 16
115 117
118/* pll flags */
116#define RADEON_PLL_USE_BIOS_DIVS (1 << 0) 119#define RADEON_PLL_USE_BIOS_DIVS (1 << 0)
117#define RADEON_PLL_NO_ODD_POST_DIV (1 << 1) 120#define RADEON_PLL_NO_ODD_POST_DIV (1 << 1)
118#define RADEON_PLL_USE_REF_DIV (1 << 2) 121#define RADEON_PLL_USE_REF_DIV (1 << 2)
@@ -127,6 +130,12 @@ struct radeon_tmds_pll {
127#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) 130#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
128#define RADEON_PLL_USE_POST_DIV (1 << 12) 131#define RADEON_PLL_USE_POST_DIV (1 << 12)
129 132
133/* pll algo */
134enum radeon_pll_algo {
135 PLL_ALGO_LEGACY,
136 PLL_ALGO_NEW
137};
138
130struct radeon_pll { 139struct radeon_pll {
131 /* reference frequency */ 140 /* reference frequency */
132 uint32_t reference_freq; 141 uint32_t reference_freq;
@@ -157,6 +166,13 @@ struct radeon_pll {
157 166
158 /* pll id */ 167 /* pll id */
159 uint32_t id; 168 uint32_t id;
169 /* pll algo */
170 enum radeon_pll_algo algo;
171};
172
173struct i2c_algo_radeon_data {
174 struct i2c_adapter bit_adapter;
175 struct i2c_algo_bit_data bit_data;
160}; 176};
161 177
162struct radeon_i2c_chan { 178struct radeon_i2c_chan {
@@ -164,7 +180,7 @@ struct radeon_i2c_chan {
164 struct drm_device *dev; 180 struct drm_device *dev;
165 union { 181 union {
166 struct i2c_algo_dp_aux_data dp; 182 struct i2c_algo_dp_aux_data dp;
167 struct i2c_algo_bit_data bit; 183 struct i2c_algo_radeon_data radeon;
168 } algo; 184 } algo;
169 struct radeon_i2c_bus_rec rec; 185 struct radeon_i2c_bus_rec rec;
170}; 186};
@@ -193,7 +209,7 @@ struct radeon_mode_info {
193 struct card_info *atom_card_info; 209 struct card_info *atom_card_info;
194 enum radeon_connector_table connector_table; 210 enum radeon_connector_table connector_table;
195 bool mode_config_initialized; 211 bool mode_config_initialized;
196 struct radeon_crtc *crtcs[2]; 212 struct radeon_crtc *crtcs[6];
197 /* DVI-I properties */ 213 /* DVI-I properties */
198 struct drm_property *coherent_mode_property; 214 struct drm_property *coherent_mode_property;
199 /* DAC enable load detect */ 215 /* DAC enable load detect */
@@ -202,7 +218,8 @@ struct radeon_mode_info {
202 struct drm_property *tv_std_property; 218 struct drm_property *tv_std_property;
203 /* legacy TMDS PLL detect */ 219 /* legacy TMDS PLL detect */
204 struct drm_property *tmds_pll_property; 220 struct drm_property *tmds_pll_property;
205 221 /* hardcoded DFP edid from BIOS */
222 struct edid *bios_hardcoded_edid;
206}; 223};
207 224
208#define MAX_H_CODE_TIMING_LEN 32 225#define MAX_H_CODE_TIMING_LEN 32
@@ -237,6 +254,7 @@ struct radeon_crtc {
237 fixed20_12 vsc; 254 fixed20_12 vsc;
238 fixed20_12 hsc; 255 fixed20_12 hsc;
239 struct drm_display_mode native_mode; 256 struct drm_display_mode native_mode;
257 int pll_id;
240}; 258};
241 259
242struct radeon_encoder_primary_dac { 260struct radeon_encoder_primary_dac {
@@ -303,6 +321,7 @@ struct radeon_encoder_atom_dig {
303 /* atom lvds */ 321 /* atom lvds */
304 uint32_t lvds_misc; 322 uint32_t lvds_misc;
305 uint16_t panel_pwr_delay; 323 uint16_t panel_pwr_delay;
324 enum radeon_pll_algo pll_algo;
306 struct radeon_atom_ss *ss; 325 struct radeon_atom_ss *ss;
307 /* panel mode */ 326 /* panel mode */
308 struct drm_display_mode native_mode; 327 struct drm_display_mode native_mode;
@@ -398,6 +417,7 @@ extern void dp_link_train(struct drm_encoder *encoder,
398 struct drm_connector *connector); 417 struct drm_connector *connector);
399extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); 418extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
400extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); 419extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
420extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action);
401extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, 421extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
402 int action, uint8_t lane_num, 422 int action, uint8_t lane_num,
403 uint8_t lane_set); 423 uint8_t lane_set);
@@ -411,14 +431,15 @@ extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
411 struct radeon_i2c_bus_rec *rec, 431 struct radeon_i2c_bus_rec *rec,
412 const char *name); 432 const char *name);
413extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c); 433extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
414extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus, 434extern void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c);
415 u8 slave_addr, 435extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
416 u8 addr, 436 u8 slave_addr,
417 u8 *val); 437 u8 addr,
418extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c, 438 u8 *val);
419 u8 slave_addr, 439extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
420 u8 addr, 440 u8 slave_addr,
421 u8 val); 441 u8 addr,
442 u8 val);
422extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); 443extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
423extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); 444extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
424 445
@@ -432,14 +453,6 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
432 uint32_t *ref_div_p, 453 uint32_t *ref_div_p,
433 uint32_t *post_div_p); 454 uint32_t *post_div_p);
434 455
435extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
436 uint64_t freq,
437 uint32_t *dot_clock_p,
438 uint32_t *fb_div_p,
439 uint32_t *frac_fb_div_p,
440 uint32_t *ref_div_p,
441 uint32_t *post_div_p);
442
443extern void radeon_setup_encoder_clones(struct drm_device *dev); 456extern void radeon_setup_encoder_clones(struct drm_device *dev);
444 457
445struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index); 458struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
@@ -473,6 +486,9 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
473extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, 486extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
474 int x, int y); 487 int x, int y);
475 488
489extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
490extern struct edid *
491radeon_combios_get_hardcoded_edid(struct radeon_device *rdev);
476extern bool radeon_atom_get_clock_info(struct drm_device *dev); 492extern bool radeon_atom_get_clock_info(struct drm_device *dev);
477extern bool radeon_combios_get_clock_info(struct drm_device *dev); 493extern bool radeon_combios_get_clock_info(struct drm_device *dev);
478extern struct radeon_encoder_atom_dig * 494extern struct radeon_encoder_atom_dig *
@@ -531,7 +547,6 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
531 struct radeon_crtc *radeon_crtc); 547 struct radeon_crtc *radeon_crtc);
532void radeon_legacy_init_crtc(struct drm_device *dev, 548void radeon_legacy_init_crtc(struct drm_device *dev,
533 struct radeon_crtc *radeon_crtc); 549 struct radeon_crtc *radeon_crtc);
534extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state);
535 550
536void radeon_get_clock_info(struct drm_device *dev); 551void radeon_get_clock_info(struct drm_device *dev);
537 552
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index f1da370928eb..fc9d00ac6b15 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -178,7 +178,6 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
178{ 178{
179 int r, i; 179 int r, i;
180 180
181 radeon_ttm_placement_from_domain(bo, domain);
182 if (bo->pin_count) { 181 if (bo->pin_count) {
183 bo->pin_count++; 182 bo->pin_count++;
184 if (gpu_addr) 183 if (gpu_addr)
@@ -186,6 +185,8 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
186 return 0; 185 return 0;
187 } 186 }
188 radeon_ttm_placement_from_domain(bo, domain); 187 radeon_ttm_placement_from_domain(bo, domain);
188 /* force to pin into visible video ram */
189 bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
189 for (i = 0; i < bo->placement.num_placement; i++) 190 for (i = 0; i < bo->placement.num_placement; i++)
190 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 191 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
191 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 192 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 8bce64cdc320..d4d1c39a0e99 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -18,21 +18,413 @@
18 * OTHER DEALINGS IN THE SOFTWARE. 18 * OTHER DEALINGS IN THE SOFTWARE.
19 * 19 *
20 * Authors: Rafał Miłecki <zajec5@gmail.com> 20 * Authors: Rafał Miłecki <zajec5@gmail.com>
21 * Alex Deucher <alexdeucher@gmail.com>
21 */ 22 */
22#include "drmP.h" 23#include "drmP.h"
23#include "radeon.h" 24#include "radeon.h"
25#include "avivod.h"
24 26
25int radeon_debugfs_pm_init(struct radeon_device *rdev); 27#define RADEON_IDLE_LOOP_MS 100
28#define RADEON_RECLOCK_DELAY_MS 200
29#define RADEON_WAIT_VBLANK_TIMEOUT 200
30
31static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
32static void radeon_pm_set_clocks(struct radeon_device *rdev);
33static void radeon_pm_idle_work_handler(struct work_struct *work);
34static int radeon_debugfs_pm_init(struct radeon_device *rdev);
35
36static const char *pm_state_names[4] = {
37 "PM_STATE_DISABLED",
38 "PM_STATE_MINIMUM",
39 "PM_STATE_PAUSED",
40 "PM_STATE_ACTIVE"
41};
42
43static const char *pm_state_types[5] = {
44 "Default",
45 "Powersave",
46 "Battery",
47 "Balanced",
48 "Performance",
49};
50
51static void radeon_print_power_mode_info(struct radeon_device *rdev)
52{
53 int i, j;
54 bool is_default;
55
56 DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
57 for (i = 0; i < rdev->pm.num_power_states; i++) {
58 if (rdev->pm.default_power_state == &rdev->pm.power_state[i])
59 is_default = true;
60 else
61 is_default = false;
62 DRM_INFO("State %d %s %s\n", i,
63 pm_state_types[rdev->pm.power_state[i].type],
64 is_default ? "(default)" : "");
65 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
66 DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes);
67 DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
68 for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
69 if (rdev->flags & RADEON_IS_IGP)
70 DRM_INFO("\t\t%d engine: %d\n",
71 j,
72 rdev->pm.power_state[i].clock_info[j].sclk * 10);
73 else
74 DRM_INFO("\t\t%d engine/memory: %d/%d\n",
75 j,
76 rdev->pm.power_state[i].clock_info[j].sclk * 10,
77 rdev->pm.power_state[i].clock_info[j].mclk * 10);
78 }
79 }
80}
81
82static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev,
83 enum radeon_pm_state_type type)
84{
85 int i, j;
86 enum radeon_pm_state_type wanted_types[2];
87 int wanted_count;
88
89 switch (type) {
90 case POWER_STATE_TYPE_DEFAULT:
91 default:
92 return rdev->pm.default_power_state;
93 case POWER_STATE_TYPE_POWERSAVE:
94 if (rdev->flags & RADEON_IS_MOBILITY) {
95 wanted_types[0] = POWER_STATE_TYPE_POWERSAVE;
96 wanted_types[1] = POWER_STATE_TYPE_BATTERY;
97 wanted_count = 2;
98 } else {
99 wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
100 wanted_count = 1;
101 }
102 break;
103 case POWER_STATE_TYPE_BATTERY:
104 if (rdev->flags & RADEON_IS_MOBILITY) {
105 wanted_types[0] = POWER_STATE_TYPE_BATTERY;
106 wanted_types[1] = POWER_STATE_TYPE_POWERSAVE;
107 wanted_count = 2;
108 } else {
109 wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
110 wanted_count = 1;
111 }
112 break;
113 case POWER_STATE_TYPE_BALANCED:
114 case POWER_STATE_TYPE_PERFORMANCE:
115 wanted_types[0] = type;
116 wanted_count = 1;
117 break;
118 }
119
120 for (i = 0; i < wanted_count; i++) {
121 for (j = 0; j < rdev->pm.num_power_states; j++) {
122 if (rdev->pm.power_state[j].type == wanted_types[i])
123 return &rdev->pm.power_state[j];
124 }
125 }
126
127 return rdev->pm.default_power_state;
128}
129
130static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev,
131 struct radeon_power_state *power_state,
132 enum radeon_pm_clock_mode_type type)
133{
134 switch (type) {
135 case POWER_MODE_TYPE_DEFAULT:
136 default:
137 return power_state->default_clock_mode;
138 case POWER_MODE_TYPE_LOW:
139 return &power_state->clock_info[0];
140 case POWER_MODE_TYPE_MID:
141 if (power_state->num_clock_modes > 2)
142 return &power_state->clock_info[1];
143 else
144 return &power_state->clock_info[0];
145 break;
146 case POWER_MODE_TYPE_HIGH:
147 return &power_state->clock_info[power_state->num_clock_modes - 1];
148 }
149
150}
151
152static void radeon_get_power_state(struct radeon_device *rdev,
153 enum radeon_pm_action action)
154{
155 switch (action) {
156 case PM_ACTION_MINIMUM:
157 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY);
158 rdev->pm.requested_clock_mode =
159 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW);
160 break;
161 case PM_ACTION_DOWNCLOCK:
162 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE);
163 rdev->pm.requested_clock_mode =
164 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID);
165 break;
166 case PM_ACTION_UPCLOCK:
167 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT);
168 rdev->pm.requested_clock_mode =
169 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH);
170 break;
171 case PM_ACTION_NONE:
172 default:
173 DRM_ERROR("Requested mode for not defined action\n");
174 return;
175 }
176 DRM_INFO("Requested: e: %d m: %d p: %d\n",
177 rdev->pm.requested_clock_mode->sclk,
178 rdev->pm.requested_clock_mode->mclk,
179 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
180}
181
182static void radeon_set_power_state(struct radeon_device *rdev)
183{
184 /* if *_clock_mode are the same, *_power_state are as well */
185 if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode)
186 return;
187
188 DRM_INFO("Setting: e: %d m: %d p: %d\n",
189 rdev->pm.requested_clock_mode->sclk,
190 rdev->pm.requested_clock_mode->mclk,
191 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
192 /* set pcie lanes */
193 /* set voltage */
194 /* set engine clock */
195 radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk);
196 /* set memory clock */
197
198 rdev->pm.current_power_state = rdev->pm.requested_power_state;
199 rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode;
200}
26 201
27int radeon_pm_init(struct radeon_device *rdev) 202int radeon_pm_init(struct radeon_device *rdev)
28{ 203{
204 rdev->pm.state = PM_STATE_DISABLED;
205 rdev->pm.planned_action = PM_ACTION_NONE;
206 rdev->pm.downclocked = false;
207
208 if (rdev->bios) {
209 if (rdev->is_atom_bios)
210 radeon_atombios_get_power_modes(rdev);
211 else
212 radeon_combios_get_power_modes(rdev);
213 radeon_print_power_mode_info(rdev);
214 }
215
29 if (radeon_debugfs_pm_init(rdev)) { 216 if (radeon_debugfs_pm_init(rdev)) {
30 DRM_ERROR("Failed to register debugfs file for PM!\n"); 217 DRM_ERROR("Failed to register debugfs file for PM!\n");
31 } 218 }
32 219
220 INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
221
222 if (radeon_dynpm != -1 && radeon_dynpm) {
223 rdev->pm.state = PM_STATE_PAUSED;
224 DRM_INFO("radeon: dynamic power management enabled\n");
225 }
226
227 DRM_INFO("radeon: power management initialized\n");
228
33 return 0; 229 return 0;
34} 230}
35 231
232void radeon_pm_compute_clocks(struct radeon_device *rdev)
233{
234 struct drm_device *ddev = rdev->ddev;
235 struct drm_connector *connector;
236 struct radeon_crtc *radeon_crtc;
237 int count = 0;
238
239 if (rdev->pm.state == PM_STATE_DISABLED)
240 return;
241
242 mutex_lock(&rdev->pm.mutex);
243
244 rdev->pm.active_crtcs = 0;
245 list_for_each_entry(connector,
246 &ddev->mode_config.connector_list, head) {
247 if (connector->encoder &&
248 connector->dpms != DRM_MODE_DPMS_OFF) {
249 radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
250 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
251 ++count;
252 }
253 }
254
255 if (count > 1) {
256 if (rdev->pm.state == PM_STATE_ACTIVE) {
257 cancel_delayed_work(&rdev->pm.idle_work);
258
259 rdev->pm.state = PM_STATE_PAUSED;
260 rdev->pm.planned_action = PM_ACTION_UPCLOCK;
261 if (rdev->pm.downclocked)
262 radeon_pm_set_clocks(rdev);
263
264 DRM_DEBUG("radeon: dynamic power management deactivated\n");
265 }
266 } else if (count == 1) {
267 /* TODO: Increase clocks if needed for current mode */
268
269 if (rdev->pm.state == PM_STATE_MINIMUM) {
270 rdev->pm.state = PM_STATE_ACTIVE;
271 rdev->pm.planned_action = PM_ACTION_UPCLOCK;
272 radeon_pm_set_clocks(rdev);
273
274 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
275 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
276 }
277 else if (rdev->pm.state == PM_STATE_PAUSED) {
278 rdev->pm.state = PM_STATE_ACTIVE;
279 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
280 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
281 DRM_DEBUG("radeon: dynamic power management activated\n");
282 }
283 }
284 else { /* count == 0 */
285 if (rdev->pm.state != PM_STATE_MINIMUM) {
286 cancel_delayed_work(&rdev->pm.idle_work);
287
288 rdev->pm.state = PM_STATE_MINIMUM;
289 rdev->pm.planned_action = PM_ACTION_MINIMUM;
290 radeon_pm_set_clocks(rdev);
291 }
292 }
293
294 mutex_unlock(&rdev->pm.mutex);
295}
296
297static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
298{
299 u32 stat_crtc1 = 0, stat_crtc2 = 0;
300 bool in_vbl = true;
301
302 if (ASIC_IS_AVIVO(rdev)) {
303 if (rdev->pm.active_crtcs & (1 << 0)) {
304 stat_crtc1 = RREG32(D1CRTC_STATUS);
305 if (!(stat_crtc1 & 1))
306 in_vbl = false;
307 }
308 if (rdev->pm.active_crtcs & (1 << 1)) {
309 stat_crtc2 = RREG32(D2CRTC_STATUS);
310 if (!(stat_crtc2 & 1))
311 in_vbl = false;
312 }
313 }
314 if (in_vbl == false)
315 DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1,
316 stat_crtc2, finish ? "exit" : "entry");
317 return in_vbl;
318}
319static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
320{
321 /*radeon_fence_wait_last(rdev);*/
322 switch (rdev->pm.planned_action) {
323 case PM_ACTION_UPCLOCK:
324 rdev->pm.downclocked = false;
325 break;
326 case PM_ACTION_DOWNCLOCK:
327 rdev->pm.downclocked = true;
328 break;
329 case PM_ACTION_MINIMUM:
330 break;
331 case PM_ACTION_NONE:
332 DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
333 break;
334 }
335
336 /* check if we are in vblank */
337 radeon_pm_debug_check_in_vbl(rdev, false);
338 radeon_set_power_state(rdev);
339 radeon_pm_debug_check_in_vbl(rdev, true);
340 rdev->pm.planned_action = PM_ACTION_NONE;
341}
342
343static void radeon_pm_set_clocks(struct radeon_device *rdev)
344{
345 radeon_get_power_state(rdev, rdev->pm.planned_action);
346 mutex_lock(&rdev->cp.mutex);
347
348 if (rdev->pm.active_crtcs & (1 << 0)) {
349 rdev->pm.req_vblank |= (1 << 0);
350 drm_vblank_get(rdev->ddev, 0);
351 }
352 if (rdev->pm.active_crtcs & (1 << 1)) {
353 rdev->pm.req_vblank |= (1 << 1);
354 drm_vblank_get(rdev->ddev, 1);
355 }
356 if (rdev->pm.active_crtcs)
357 wait_event_interruptible_timeout(
358 rdev->irq.vblank_queue, 0,
359 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
360 if (rdev->pm.req_vblank & (1 << 0)) {
361 rdev->pm.req_vblank &= ~(1 << 0);
362 drm_vblank_put(rdev->ddev, 0);
363 }
364 if (rdev->pm.req_vblank & (1 << 1)) {
365 rdev->pm.req_vblank &= ~(1 << 1);
366 drm_vblank_put(rdev->ddev, 1);
367 }
368
369 radeon_pm_set_clocks_locked(rdev);
370 mutex_unlock(&rdev->cp.mutex);
371}
372
373static void radeon_pm_idle_work_handler(struct work_struct *work)
374{
375 struct radeon_device *rdev;
376 rdev = container_of(work, struct radeon_device,
377 pm.idle_work.work);
378
379 mutex_lock(&rdev->pm.mutex);
380 if (rdev->pm.state == PM_STATE_ACTIVE) {
381 unsigned long irq_flags;
382 int not_processed = 0;
383
384 read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
385 if (!list_empty(&rdev->fence_drv.emited)) {
386 struct list_head *ptr;
387 list_for_each(ptr, &rdev->fence_drv.emited) {
388 /* count up to 3, that's enought info */
389 if (++not_processed >= 3)
390 break;
391 }
392 }
393 read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
394
395 if (not_processed >= 3) { /* should upclock */
396 if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
397 rdev->pm.planned_action = PM_ACTION_NONE;
398 } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
399 rdev->pm.downclocked) {
400 rdev->pm.planned_action =
401 PM_ACTION_UPCLOCK;
402 rdev->pm.action_timeout = jiffies +
403 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
404 }
405 } else if (not_processed == 0) { /* should downclock */
406 if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
407 rdev->pm.planned_action = PM_ACTION_NONE;
408 } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
409 !rdev->pm.downclocked) {
410 rdev->pm.planned_action =
411 PM_ACTION_DOWNCLOCK;
412 rdev->pm.action_timeout = jiffies +
413 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
414 }
415 }
416
417 if (rdev->pm.planned_action != PM_ACTION_NONE &&
418 jiffies > rdev->pm.action_timeout) {
419 radeon_pm_set_clocks(rdev);
420 }
421 }
422 mutex_unlock(&rdev->pm.mutex);
423
424 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
425 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
426}
427
36/* 428/*
37 * Debugfs info 429 * Debugfs info
38 */ 430 */
@@ -44,11 +436,14 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
44 struct drm_device *dev = node->minor->dev; 436 struct drm_device *dev = node->minor->dev;
45 struct radeon_device *rdev = dev->dev_private; 437 struct radeon_device *rdev = dev->dev_private;
46 438
439 seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
47 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); 440 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
48 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 441 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
49 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); 442 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
50 if (rdev->asic->get_memory_clock) 443 if (rdev->asic->get_memory_clock)
51 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 444 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
445 if (rdev->asic->get_pcie_lanes)
446 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
52 447
53 return 0; 448 return 0;
54} 449}
@@ -58,7 +453,7 @@ static struct drm_info_list radeon_pm_info_list[] = {
58}; 453};
59#endif 454#endif
60 455
61int radeon_debugfs_pm_init(struct radeon_device *rdev) 456static int radeon_debugfs_pm_init(struct radeon_device *rdev)
62{ 457{
63#if defined(CONFIG_DEBUG_FS) 458#if defined(CONFIG_DEBUG_FS)
64 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list)); 459 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 6d0a009dd4a1..5c0dc082d330 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -54,7 +54,7 @@
54#include "r300_reg.h" 54#include "r300_reg.h"
55#include "r500_reg.h" 55#include "r500_reg.h"
56#include "r600_reg.h" 56#include "r600_reg.h"
57 57#include "evergreen_reg.h"
58 58
59#define RADEON_MC_AGP_LOCATION 0x014c 59#define RADEON_MC_AGP_LOCATION 0x014c
60#define RADEON_MC_AGP_START_MASK 0x0000FFFF 60#define RADEON_MC_AGP_START_MASK 0x0000FFFF
@@ -1060,32 +1060,38 @@
1060 1060
1061 /* Multimedia I2C bus */ 1061 /* Multimedia I2C bus */
1062#define RADEON_I2C_CNTL_0 0x0090 1062#define RADEON_I2C_CNTL_0 0x0090
1063#define RADEON_I2C_DONE (1 << 0) 1063# define RADEON_I2C_DONE (1 << 0)
1064#define RADEON_I2C_NACK (1 << 1) 1064# define RADEON_I2C_NACK (1 << 1)
1065#define RADEON_I2C_HALT (1 << 2) 1065# define RADEON_I2C_HALT (1 << 2)
1066#define RADEON_I2C_SOFT_RST (1 << 5) 1066# define RADEON_I2C_SOFT_RST (1 << 5)
1067#define RADEON_I2C_DRIVE_EN (1 << 6) 1067# define RADEON_I2C_DRIVE_EN (1 << 6)
1068#define RADEON_I2C_DRIVE_SEL (1 << 7) 1068# define RADEON_I2C_DRIVE_SEL (1 << 7)
1069#define RADEON_I2C_START (1 << 8) 1069# define RADEON_I2C_START (1 << 8)
1070#define RADEON_I2C_STOP (1 << 9) 1070# define RADEON_I2C_STOP (1 << 9)
1071#define RADEON_I2C_RECEIVE (1 << 10) 1071# define RADEON_I2C_RECEIVE (1 << 10)
1072#define RADEON_I2C_ABORT (1 << 11) 1072# define RADEON_I2C_ABORT (1 << 11)
1073#define RADEON_I2C_GO (1 << 12) 1073# define RADEON_I2C_GO (1 << 12)
1074#define RADEON_I2C_PRESCALE_SHIFT 16 1074# define RADEON_I2C_PRESCALE_SHIFT 16
1075#define RADEON_I2C_CNTL_1 0x0094 1075#define RADEON_I2C_CNTL_1 0x0094
1076#define RADEON_I2C_DATA_COUNT_SHIFT 0 1076# define RADEON_I2C_DATA_COUNT_SHIFT 0
1077#define RADEON_I2C_ADDR_COUNT_SHIFT 4 1077# define RADEON_I2C_ADDR_COUNT_SHIFT 4
1078#define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8 1078# define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8
1079#define RADEON_I2C_SEL (1 << 16) 1079# define RADEON_I2C_SEL (1 << 16)
1080#define RADEON_I2C_EN (1 << 17) 1080# define RADEON_I2C_EN (1 << 17)
1081#define RADEON_I2C_TIME_LIMIT_SHIFT 24 1081# define RADEON_I2C_TIME_LIMIT_SHIFT 24
1082#define RADEON_I2C_DATA 0x0098 1082#define RADEON_I2C_DATA 0x0098
1083 1083
1084#define RADEON_DVI_I2C_CNTL_0 0x02e0 1084#define RADEON_DVI_I2C_CNTL_0 0x02e0
1085# define R200_DVI_I2C_PIN_SEL(x) ((x) << 3) 1085# define R200_DVI_I2C_PIN_SEL(x) ((x) << 3)
1086# define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */ 1086# define R200_SEL_DDC1 0 /* depends on asic */
1087# define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */ 1087# define R200_SEL_DDC2 1 /* depends on asic */
1088# define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */ 1088# define R200_SEL_DDC3 2 /* depends on asic */
1089# define RADEON_SW_WANTS_TO_USE_DVI_I2C (1 << 13)
1090# define RADEON_SW_CAN_USE_DVI_I2C (1 << 13)
1091# define RADEON_SW_DONE_USING_DVI_I2C (1 << 14)
1092# define RADEON_HW_NEEDS_DVI_I2C (1 << 14)
1093# define RADEON_ABORT_HW_DVI_I2C (1 << 15)
1094# define RADEON_HW_USING_DVI_I2C (1 << 15)
1089#define RADEON_DVI_I2C_CNTL_1 0x02e4 1095#define RADEON_DVI_I2C_CNTL_1 0x02e4
1090#define RADEON_DVI_I2C_DATA 0x02e8 1096#define RADEON_DVI_I2C_DATA 0x02e8
1091 1097
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 6579eb4c1f28..e50513a62735 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -34,6 +34,36 @@
34 34
35int radeon_debugfs_ib_init(struct radeon_device *rdev); 35int radeon_debugfs_ib_init(struct radeon_device *rdev);
36 36
37void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
38{
39 struct radeon_ib *ib, *n;
40
41 list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
42 list_del(&ib->list);
43 vfree(ib->ptr);
44 kfree(ib);
45 }
46}
47
48void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
49{
50 struct radeon_ib *bib;
51
52 bib = kmalloc(sizeof(*bib), GFP_KERNEL);
53 if (bib == NULL)
54 return;
55 bib->ptr = vmalloc(ib->length_dw * 4);
56 if (bib->ptr == NULL) {
57 kfree(bib);
58 return;
59 }
60 memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
61 bib->length_dw = ib->length_dw;
62 mutex_lock(&rdev->ib_pool.mutex);
63 list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
64 mutex_unlock(&rdev->ib_pool.mutex);
65}
66
37/* 67/*
38 * IB. 68 * IB.
39 */ 69 */
@@ -142,6 +172,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
142 172
143 if (rdev->ib_pool.robj) 173 if (rdev->ib_pool.robj)
144 return 0; 174 return 0;
175 INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
145 /* Allocate 1M object buffer */ 176 /* Allocate 1M object buffer */
146 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 177 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
147 true, RADEON_GEM_DOMAIN_GTT, 178 true, RADEON_GEM_DOMAIN_GTT,
@@ -192,6 +223,8 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
192 return; 223 return;
193 } 224 }
194 mutex_lock(&rdev->ib_pool.mutex); 225 mutex_lock(&rdev->ib_pool.mutex);
226 radeon_ib_bogus_cleanup(rdev);
227
195 if (rdev->ib_pool.robj) { 228 if (rdev->ib_pool.robj) {
196 r = radeon_bo_reserve(rdev->ib_pool.robj, false); 229 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
197 if (likely(r == 0)) { 230 if (likely(r == 0)) {
@@ -349,15 +382,49 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
349 return 0; 382 return 0;
350} 383}
351 384
385static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
386{
387 struct drm_info_node *node = (struct drm_info_node *) m->private;
388 struct radeon_device *rdev = node->info_ent->data;
389 struct radeon_ib *ib;
390 unsigned i;
391
392 mutex_lock(&rdev->ib_pool.mutex);
393 if (list_empty(&rdev->ib_pool.bogus_ib)) {
394 mutex_unlock(&rdev->ib_pool.mutex);
395 seq_printf(m, "no bogus IB recorded\n");
396 return 0;
397 }
398 ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
399 list_del_init(&ib->list);
400 mutex_unlock(&rdev->ib_pool.mutex);
401 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
402 for (i = 0; i < ib->length_dw; i++) {
403 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
404 }
405 vfree(ib->ptr);
406 kfree(ib);
407 return 0;
408}
409
352static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE]; 410static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
353static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32]; 411static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
412
413static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
414 {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
415};
354#endif 416#endif
355 417
356int radeon_debugfs_ib_init(struct radeon_device *rdev) 418int radeon_debugfs_ib_init(struct radeon_device *rdev)
357{ 419{
358#if defined(CONFIG_DEBUG_FS) 420#if defined(CONFIG_DEBUG_FS)
359 unsigned i; 421 unsigned i;
422 int r;
360 423
424 radeon_debugfs_ib_bogus_info_list[0].data = rdev;
425 r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
426 if (r)
427 return r;
361 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { 428 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
362 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i); 429 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
363 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i]; 430 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 067167cb39ca..3c32f840dcd2 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -29,6 +29,7 @@
29 29
30#include "drmP.h" 30#include "drmP.h"
31#include "drm.h" 31#include "drm.h"
32#include "drm_buffer.h"
32#include "drm_sarea.h" 33#include "drm_sarea.h"
33#include "radeon_drm.h" 34#include "radeon_drm.h"
34#include "radeon_drv.h" 35#include "radeon_drv.h"
@@ -91,21 +92,27 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
91static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * 92static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
92 dev_priv, 93 dev_priv,
93 struct drm_file *file_priv, 94 struct drm_file *file_priv,
94 int id, u32 *data) 95 int id, struct drm_buffer *buf)
95{ 96{
97 u32 *data;
96 switch (id) { 98 switch (id) {
97 99
98 case RADEON_EMIT_PP_MISC: 100 case RADEON_EMIT_PP_MISC:
99 if (radeon_check_and_fixup_offset(dev_priv, file_priv, 101 data = drm_buffer_pointer_to_dword(buf,
100 &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) { 102 (RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4);
103
104 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
101 DRM_ERROR("Invalid depth buffer offset\n"); 105 DRM_ERROR("Invalid depth buffer offset\n");
102 return -EINVAL; 106 return -EINVAL;
103 } 107 }
108 dev_priv->have_z_offset = 1;
104 break; 109 break;
105 110
106 case RADEON_EMIT_PP_CNTL: 111 case RADEON_EMIT_PP_CNTL:
107 if (radeon_check_and_fixup_offset(dev_priv, file_priv, 112 data = drm_buffer_pointer_to_dword(buf,
108 &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) { 113 (RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4);
114
115 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
109 DRM_ERROR("Invalid colour buffer offset\n"); 116 DRM_ERROR("Invalid colour buffer offset\n");
110 return -EINVAL; 117 return -EINVAL;
111 } 118 }
@@ -117,8 +124,8 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
117 case R200_EMIT_PP_TXOFFSET_3: 124 case R200_EMIT_PP_TXOFFSET_3:
118 case R200_EMIT_PP_TXOFFSET_4: 125 case R200_EMIT_PP_TXOFFSET_4:
119 case R200_EMIT_PP_TXOFFSET_5: 126 case R200_EMIT_PP_TXOFFSET_5:
120 if (radeon_check_and_fixup_offset(dev_priv, file_priv, 127 data = drm_buffer_pointer_to_dword(buf, 0);
121 &data[0])) { 128 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
122 DRM_ERROR("Invalid R200 texture offset\n"); 129 DRM_ERROR("Invalid R200 texture offset\n");
123 return -EINVAL; 130 return -EINVAL;
124 } 131 }
@@ -127,8 +134,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
127 case RADEON_EMIT_PP_TXFILTER_0: 134 case RADEON_EMIT_PP_TXFILTER_0:
128 case RADEON_EMIT_PP_TXFILTER_1: 135 case RADEON_EMIT_PP_TXFILTER_1:
129 case RADEON_EMIT_PP_TXFILTER_2: 136 case RADEON_EMIT_PP_TXFILTER_2:
130 if (radeon_check_and_fixup_offset(dev_priv, file_priv, 137 data = drm_buffer_pointer_to_dword(buf,
131 &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) { 138 (RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4);
139 if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
132 DRM_ERROR("Invalid R100 texture offset\n"); 140 DRM_ERROR("Invalid R100 texture offset\n");
133 return -EINVAL; 141 return -EINVAL;
134 } 142 }
@@ -142,9 +150,10 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
142 case R200_EMIT_PP_CUBIC_OFFSETS_5:{ 150 case R200_EMIT_PP_CUBIC_OFFSETS_5:{
143 int i; 151 int i;
144 for (i = 0; i < 5; i++) { 152 for (i = 0; i < 5; i++) {
153 data = drm_buffer_pointer_to_dword(buf, i);
145 if (radeon_check_and_fixup_offset(dev_priv, 154 if (radeon_check_and_fixup_offset(dev_priv,
146 file_priv, 155 file_priv,
147 &data[i])) { 156 data)) {
148 DRM_ERROR 157 DRM_ERROR
149 ("Invalid R200 cubic texture offset\n"); 158 ("Invalid R200 cubic texture offset\n");
150 return -EINVAL; 159 return -EINVAL;
@@ -158,9 +167,10 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
158 case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{ 167 case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{
159 int i; 168 int i;
160 for (i = 0; i < 5; i++) { 169 for (i = 0; i < 5; i++) {
170 data = drm_buffer_pointer_to_dword(buf, i);
161 if (radeon_check_and_fixup_offset(dev_priv, 171 if (radeon_check_and_fixup_offset(dev_priv,
162 file_priv, 172 file_priv,
163 &data[i])) { 173 data)) {
164 DRM_ERROR 174 DRM_ERROR
165 ("Invalid R100 cubic texture offset\n"); 175 ("Invalid R100 cubic texture offset\n");
166 return -EINVAL; 176 return -EINVAL;
@@ -269,23 +279,24 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
269 cmdbuf, 279 cmdbuf,
270 unsigned int *cmdsz) 280 unsigned int *cmdsz)
271{ 281{
272 u32 *cmd = (u32 *) cmdbuf->buf; 282 u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
273 u32 offset, narrays; 283 u32 offset, narrays;
274 int count, i, k; 284 int count, i, k;
275 285
276 *cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16); 286 count = ((*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16);
287 *cmdsz = 2 + count;
277 288
278 if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) { 289 if ((*cmd & 0xc0000000) != RADEON_CP_PACKET3) {
279 DRM_ERROR("Not a type 3 packet\n"); 290 DRM_ERROR("Not a type 3 packet\n");
280 return -EINVAL; 291 return -EINVAL;
281 } 292 }
282 293
283 if (4 * *cmdsz > cmdbuf->bufsz) { 294 if (4 * *cmdsz > drm_buffer_unprocessed(cmdbuf->buffer)) {
284 DRM_ERROR("Packet size larger than size of data provided\n"); 295 DRM_ERROR("Packet size larger than size of data provided\n");
285 return -EINVAL; 296 return -EINVAL;
286 } 297 }
287 298
288 switch(cmd[0] & 0xff00) { 299 switch (*cmd & 0xff00) {
289 /* XXX Are there old drivers needing other packets? */ 300 /* XXX Are there old drivers needing other packets? */
290 301
291 case RADEON_3D_DRAW_IMMD: 302 case RADEON_3D_DRAW_IMMD:
@@ -312,7 +323,6 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
312 break; 323 break;
313 324
314 case RADEON_3D_LOAD_VBPNTR: 325 case RADEON_3D_LOAD_VBPNTR:
315 count = (cmd[0] >> 16) & 0x3fff;
316 326
317 if (count > 18) { /* 12 arrays max */ 327 if (count > 18) { /* 12 arrays max */
318 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", 328 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
@@ -321,13 +331,16 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
321 } 331 }
322 332
323 /* carefully check packet contents */ 333 /* carefully check packet contents */
324 narrays = cmd[1] & ~0xc000; 334 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
335
336 narrays = *cmd & ~0xc000;
325 k = 0; 337 k = 0;
326 i = 2; 338 i = 2;
327 while ((k < narrays) && (i < (count + 2))) { 339 while ((k < narrays) && (i < (count + 2))) {
328 i++; /* skip attribute field */ 340 i++; /* skip attribute field */
341 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
329 if (radeon_check_and_fixup_offset(dev_priv, file_priv, 342 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
330 &cmd[i])) { 343 cmd)) {
331 DRM_ERROR 344 DRM_ERROR
332 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", 345 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
333 k, i); 346 k, i);
@@ -338,8 +351,10 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
338 if (k == narrays) 351 if (k == narrays)
339 break; 352 break;
340 /* have one more to process, they come in pairs */ 353 /* have one more to process, they come in pairs */
354 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
355
341 if (radeon_check_and_fixup_offset(dev_priv, 356 if (radeon_check_and_fixup_offset(dev_priv,
342 file_priv, &cmd[i])) 357 file_priv, cmd))
343 { 358 {
344 DRM_ERROR 359 DRM_ERROR
345 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", 360 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
@@ -363,7 +378,9 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
363 DRM_ERROR("Invalid 3d packet for r200-class chip\n"); 378 DRM_ERROR("Invalid 3d packet for r200-class chip\n");
364 return -EINVAL; 379 return -EINVAL;
365 } 380 }
366 if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) { 381
382 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
383 if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
367 DRM_ERROR("Invalid rndr_gen_indx offset\n"); 384 DRM_ERROR("Invalid rndr_gen_indx offset\n");
368 return -EINVAL; 385 return -EINVAL;
369 } 386 }
@@ -374,12 +391,15 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
374 DRM_ERROR("Invalid 3d packet for r100-class chip\n"); 391 DRM_ERROR("Invalid 3d packet for r100-class chip\n");
375 return -EINVAL; 392 return -EINVAL;
376 } 393 }
377 if ((cmd[1] & 0x8000ffff) != 0x80000810) { 394
378 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); 395 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
396 if ((*cmd & 0x8000ffff) != 0x80000810) {
397 DRM_ERROR("Invalid indx_buffer reg address %08X\n", *cmd);
379 return -EINVAL; 398 return -EINVAL;
380 } 399 }
381 if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) { 400 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
382 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); 401 if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
402 DRM_ERROR("Invalid indx_buffer offset is %08X\n", *cmd);
383 return -EINVAL; 403 return -EINVAL;
384 } 404 }
385 break; 405 break;
@@ -388,31 +408,34 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
388 case RADEON_CNTL_PAINT_MULTI: 408 case RADEON_CNTL_PAINT_MULTI:
389 case RADEON_CNTL_BITBLT_MULTI: 409 case RADEON_CNTL_BITBLT_MULTI:
390 /* MSB of opcode: next DWORD GUI_CNTL */ 410 /* MSB of opcode: next DWORD GUI_CNTL */
391 if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL 411 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
412 if (*cmd & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
392 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 413 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
393 offset = cmd[2] << 10; 414 u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
415 offset = *cmd2 << 10;
394 if (radeon_check_and_fixup_offset 416 if (radeon_check_and_fixup_offset
395 (dev_priv, file_priv, &offset)) { 417 (dev_priv, file_priv, &offset)) {
396 DRM_ERROR("Invalid first packet offset\n"); 418 DRM_ERROR("Invalid first packet offset\n");
397 return -EINVAL; 419 return -EINVAL;
398 } 420 }
399 cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10; 421 *cmd2 = (*cmd2 & 0xffc00000) | offset >> 10;
400 } 422 }
401 423
402 if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && 424 if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
403 (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { 425 (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
404 offset = cmd[3] << 10; 426 u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
427 offset = *cmd << 10;
405 if (radeon_check_and_fixup_offset 428 if (radeon_check_and_fixup_offset
406 (dev_priv, file_priv, &offset)) { 429 (dev_priv, file_priv, &offset)) {
407 DRM_ERROR("Invalid second packet offset\n"); 430 DRM_ERROR("Invalid second packet offset\n");
408 return -EINVAL; 431 return -EINVAL;
409 } 432 }
410 cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10; 433 *cmd3 = (*cmd3 & 0xffc00000) | offset >> 10;
411 } 434 }
412 break; 435 break;
413 436
414 default: 437 default:
415 DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00); 438 DRM_ERROR("Invalid packet type %x\n", *cmd & 0xff00);
416 return -EINVAL; 439 return -EINVAL;
417 } 440 }
418 441
@@ -876,6 +899,11 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
876 if (tmp & RADEON_BACK) 899 if (tmp & RADEON_BACK)
877 flags |= RADEON_FRONT; 900 flags |= RADEON_FRONT;
878 } 901 }
902 if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
903 if (!dev_priv->have_z_offset)
904 printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
905 flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
906 }
879 907
880 if (flags & (RADEON_FRONT | RADEON_BACK)) { 908 if (flags & (RADEON_FRONT | RADEON_BACK)) {
881 909
@@ -2611,7 +2639,6 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
2611{ 2639{
2612 int id = (int)header.packet.packet_id; 2640 int id = (int)header.packet.packet_id;
2613 int sz, reg; 2641 int sz, reg;
2614 int *data = (int *)cmdbuf->buf;
2615 RING_LOCALS; 2642 RING_LOCALS;
2616 2643
2617 if (id >= RADEON_MAX_STATE_PACKETS) 2644 if (id >= RADEON_MAX_STATE_PACKETS)
@@ -2620,23 +2647,22 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
2620 sz = packet[id].len; 2647 sz = packet[id].len;
2621 reg = packet[id].start; 2648 reg = packet[id].start;
2622 2649
2623 if (sz * sizeof(int) > cmdbuf->bufsz) { 2650 if (sz * sizeof(u32) > drm_buffer_unprocessed(cmdbuf->buffer)) {
2624 DRM_ERROR("Packet size provided larger than data provided\n"); 2651 DRM_ERROR("Packet size provided larger than data provided\n");
2625 return -EINVAL; 2652 return -EINVAL;
2626 } 2653 }
2627 2654
2628 if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) { 2655 if (radeon_check_and_fixup_packets(dev_priv, file_priv, id,
2656 cmdbuf->buffer)) {
2629 DRM_ERROR("Packet verification failed\n"); 2657 DRM_ERROR("Packet verification failed\n");
2630 return -EINVAL; 2658 return -EINVAL;
2631 } 2659 }
2632 2660
2633 BEGIN_RING(sz + 1); 2661 BEGIN_RING(sz + 1);
2634 OUT_RING(CP_PACKET0(reg, (sz - 1))); 2662 OUT_RING(CP_PACKET0(reg, (sz - 1)));
2635 OUT_RING_TABLE(data, sz); 2663 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2636 ADVANCE_RING(); 2664 ADVANCE_RING();
2637 2665
2638 cmdbuf->buf += sz * sizeof(int);
2639 cmdbuf->bufsz -= sz * sizeof(int);
2640 return 0; 2666 return 0;
2641} 2667}
2642 2668
@@ -2653,10 +2679,8 @@ static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
2653 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0)); 2679 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
2654 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); 2680 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2655 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1)); 2681 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
2656 OUT_RING_TABLE(cmdbuf->buf, sz); 2682 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2657 ADVANCE_RING(); 2683 ADVANCE_RING();
2658 cmdbuf->buf += sz * sizeof(int);
2659 cmdbuf->bufsz -= sz * sizeof(int);
2660 return 0; 2684 return 0;
2661} 2685}
2662 2686
@@ -2675,10 +2699,8 @@ static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
2675 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0)); 2699 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
2676 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); 2700 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2677 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1)); 2701 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
2678 OUT_RING_TABLE(cmdbuf->buf, sz); 2702 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2679 ADVANCE_RING(); 2703 ADVANCE_RING();
2680 cmdbuf->buf += sz * sizeof(int);
2681 cmdbuf->bufsz -= sz * sizeof(int);
2682 return 0; 2704 return 0;
2683} 2705}
2684 2706
@@ -2696,11 +2718,9 @@ static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
2696 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0)); 2718 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
2697 OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); 2719 OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
2698 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1))); 2720 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
2699 OUT_RING_TABLE(cmdbuf->buf, sz); 2721 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2700 ADVANCE_RING(); 2722 ADVANCE_RING();
2701 2723
2702 cmdbuf->buf += sz * sizeof(int);
2703 cmdbuf->bufsz -= sz * sizeof(int);
2704 return 0; 2724 return 0;
2705} 2725}
2706 2726
@@ -2714,7 +2734,7 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
2714 2734
2715 if (!sz) 2735 if (!sz)
2716 return 0; 2736 return 0;
2717 if (sz * 4 > cmdbuf->bufsz) 2737 if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
2718 return -EINVAL; 2738 return -EINVAL;
2719 2739
2720 BEGIN_RING(5 + sz); 2740 BEGIN_RING(5 + sz);
@@ -2722,11 +2742,9 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
2722 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0)); 2742 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
2723 OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); 2743 OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
2724 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1))); 2744 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
2725 OUT_RING_TABLE(cmdbuf->buf, sz); 2745 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
2726 ADVANCE_RING(); 2746 ADVANCE_RING();
2727 2747
2728 cmdbuf->buf += sz * sizeof(int);
2729 cmdbuf->bufsz -= sz * sizeof(int);
2730 return 0; 2748 return 0;
2731} 2749}
2732 2750
@@ -2748,11 +2766,9 @@ static int radeon_emit_packet3(struct drm_device * dev,
2748 } 2766 }
2749 2767
2750 BEGIN_RING(cmdsz); 2768 BEGIN_RING(cmdsz);
2751 OUT_RING_TABLE(cmdbuf->buf, cmdsz); 2769 OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
2752 ADVANCE_RING(); 2770 ADVANCE_RING();
2753 2771
2754 cmdbuf->buf += cmdsz * 4;
2755 cmdbuf->bufsz -= cmdsz * 4;
2756 return 0; 2772 return 0;
2757} 2773}
2758 2774
@@ -2805,16 +2821,16 @@ static int radeon_emit_packet3_cliprect(struct drm_device *dev,
2805 } 2821 }
2806 2822
2807 BEGIN_RING(cmdsz); 2823 BEGIN_RING(cmdsz);
2808 OUT_RING_TABLE(cmdbuf->buf, cmdsz); 2824 OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
2809 ADVANCE_RING(); 2825 ADVANCE_RING();
2810 2826
2811 } while (++i < cmdbuf->nbox); 2827 } while (++i < cmdbuf->nbox);
2812 if (cmdbuf->nbox == 1) 2828 if (cmdbuf->nbox == 1)
2813 cmdbuf->nbox = 0; 2829 cmdbuf->nbox = 0;
2814 2830
2831 return 0;
2815 out: 2832 out:
2816 cmdbuf->buf += cmdsz * 4; 2833 drm_buffer_advance(cmdbuf->buffer, cmdsz * 4);
2817 cmdbuf->bufsz -= cmdsz * 4;
2818 return 0; 2834 return 0;
2819} 2835}
2820 2836
@@ -2847,16 +2863,16 @@ static int radeon_emit_wait(struct drm_device * dev, int flags)
2847 return 0; 2863 return 0;
2848} 2864}
2849 2865
2850static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) 2866static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
2867 struct drm_file *file_priv)
2851{ 2868{
2852 drm_radeon_private_t *dev_priv = dev->dev_private; 2869 drm_radeon_private_t *dev_priv = dev->dev_private;
2853 struct drm_device_dma *dma = dev->dma; 2870 struct drm_device_dma *dma = dev->dma;
2854 struct drm_buf *buf = NULL; 2871 struct drm_buf *buf = NULL;
2872 drm_radeon_cmd_header_t stack_header;
2855 int idx; 2873 int idx;
2856 drm_radeon_kcmd_buffer_t *cmdbuf = data; 2874 drm_radeon_kcmd_buffer_t *cmdbuf = data;
2857 drm_radeon_cmd_header_t header; 2875 int orig_nbox;
2858 int orig_nbox, orig_bufsz;
2859 char *kbuf = NULL;
2860 2876
2861 LOCK_TEST_WITH_RETURN(dev, file_priv); 2877 LOCK_TEST_WITH_RETURN(dev, file_priv);
2862 2878
@@ -2871,17 +2887,16 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2871 * races between checking values and using those values in other code, 2887 * races between checking values and using those values in other code,
2872 * and simply to avoid a lot of function calls to copy in data. 2888 * and simply to avoid a lot of function calls to copy in data.
2873 */ 2889 */
2874 orig_bufsz = cmdbuf->bufsz; 2890 if (cmdbuf->bufsz != 0) {
2875 if (orig_bufsz != 0) { 2891 int rv;
2876 kbuf = kmalloc(cmdbuf->bufsz, GFP_KERNEL); 2892 void __user *buffer = cmdbuf->buffer;
2877 if (kbuf == NULL) 2893 rv = drm_buffer_alloc(&cmdbuf->buffer, cmdbuf->bufsz);
2878 return -ENOMEM; 2894 if (rv)
2879 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf, 2895 return rv;
2880 cmdbuf->bufsz)) { 2896 rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer,
2881 kfree(kbuf); 2897 cmdbuf->bufsz);
2882 return -EFAULT; 2898 if (rv)
2883 } 2899 return rv;
2884 cmdbuf->buf = kbuf;
2885 } 2900 }
2886 2901
2887 orig_nbox = cmdbuf->nbox; 2902 orig_nbox = cmdbuf->nbox;
@@ -2890,24 +2905,24 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2890 int temp; 2905 int temp;
2891 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); 2906 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
2892 2907
2893 if (orig_bufsz != 0) 2908 if (cmdbuf->bufsz != 0)
2894 kfree(kbuf); 2909 drm_buffer_free(cmdbuf->buffer);
2895 2910
2896 return temp; 2911 return temp;
2897 } 2912 }
2898 2913
2899 /* microcode_version != r300 */ 2914 /* microcode_version != r300 */
2900 while (cmdbuf->bufsz >= sizeof(header)) { 2915 while (drm_buffer_unprocessed(cmdbuf->buffer) >= sizeof(stack_header)) {
2901 2916
2902 header.i = *(int *)cmdbuf->buf; 2917 drm_radeon_cmd_header_t *header;
2903 cmdbuf->buf += sizeof(header); 2918 header = drm_buffer_read_object(cmdbuf->buffer,
2904 cmdbuf->bufsz -= sizeof(header); 2919 sizeof(stack_header), &stack_header);
2905 2920
2906 switch (header.header.cmd_type) { 2921 switch (header->header.cmd_type) {
2907 case RADEON_CMD_PACKET: 2922 case RADEON_CMD_PACKET:
2908 DRM_DEBUG("RADEON_CMD_PACKET\n"); 2923 DRM_DEBUG("RADEON_CMD_PACKET\n");
2909 if (radeon_emit_packets 2924 if (radeon_emit_packets
2910 (dev_priv, file_priv, header, cmdbuf)) { 2925 (dev_priv, file_priv, *header, cmdbuf)) {
2911 DRM_ERROR("radeon_emit_packets failed\n"); 2926 DRM_ERROR("radeon_emit_packets failed\n");
2912 goto err; 2927 goto err;
2913 } 2928 }
@@ -2915,7 +2930,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2915 2930
2916 case RADEON_CMD_SCALARS: 2931 case RADEON_CMD_SCALARS:
2917 DRM_DEBUG("RADEON_CMD_SCALARS\n"); 2932 DRM_DEBUG("RADEON_CMD_SCALARS\n");
2918 if (radeon_emit_scalars(dev_priv, header, cmdbuf)) { 2933 if (radeon_emit_scalars(dev_priv, *header, cmdbuf)) {
2919 DRM_ERROR("radeon_emit_scalars failed\n"); 2934 DRM_ERROR("radeon_emit_scalars failed\n");
2920 goto err; 2935 goto err;
2921 } 2936 }
@@ -2923,7 +2938,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2923 2938
2924 case RADEON_CMD_VECTORS: 2939 case RADEON_CMD_VECTORS:
2925 DRM_DEBUG("RADEON_CMD_VECTORS\n"); 2940 DRM_DEBUG("RADEON_CMD_VECTORS\n");
2926 if (radeon_emit_vectors(dev_priv, header, cmdbuf)) { 2941 if (radeon_emit_vectors(dev_priv, *header, cmdbuf)) {
2927 DRM_ERROR("radeon_emit_vectors failed\n"); 2942 DRM_ERROR("radeon_emit_vectors failed\n");
2928 goto err; 2943 goto err;
2929 } 2944 }
@@ -2931,7 +2946,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2931 2946
2932 case RADEON_CMD_DMA_DISCARD: 2947 case RADEON_CMD_DMA_DISCARD:
2933 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); 2948 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
2934 idx = header.dma.buf_idx; 2949 idx = header->dma.buf_idx;
2935 if (idx < 0 || idx >= dma->buf_count) { 2950 if (idx < 0 || idx >= dma->buf_count) {
2936 DRM_ERROR("buffer index %d (of %d max)\n", 2951 DRM_ERROR("buffer index %d (of %d max)\n",
2937 idx, dma->buf_count - 1); 2952 idx, dma->buf_count - 1);
@@ -2968,7 +2983,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2968 2983
2969 case RADEON_CMD_SCALARS2: 2984 case RADEON_CMD_SCALARS2:
2970 DRM_DEBUG("RADEON_CMD_SCALARS2\n"); 2985 DRM_DEBUG("RADEON_CMD_SCALARS2\n");
2971 if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) { 2986 if (radeon_emit_scalars2(dev_priv, *header, cmdbuf)) {
2972 DRM_ERROR("radeon_emit_scalars2 failed\n"); 2987 DRM_ERROR("radeon_emit_scalars2 failed\n");
2973 goto err; 2988 goto err;
2974 } 2989 }
@@ -2976,37 +2991,37 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
2976 2991
2977 case RADEON_CMD_WAIT: 2992 case RADEON_CMD_WAIT:
2978 DRM_DEBUG("RADEON_CMD_WAIT\n"); 2993 DRM_DEBUG("RADEON_CMD_WAIT\n");
2979 if (radeon_emit_wait(dev, header.wait.flags)) { 2994 if (radeon_emit_wait(dev, header->wait.flags)) {
2980 DRM_ERROR("radeon_emit_wait failed\n"); 2995 DRM_ERROR("radeon_emit_wait failed\n");
2981 goto err; 2996 goto err;
2982 } 2997 }
2983 break; 2998 break;
2984 case RADEON_CMD_VECLINEAR: 2999 case RADEON_CMD_VECLINEAR:
2985 DRM_DEBUG("RADEON_CMD_VECLINEAR\n"); 3000 DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
2986 if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) { 3001 if (radeon_emit_veclinear(dev_priv, *header, cmdbuf)) {
2987 DRM_ERROR("radeon_emit_veclinear failed\n"); 3002 DRM_ERROR("radeon_emit_veclinear failed\n");
2988 goto err; 3003 goto err;
2989 } 3004 }
2990 break; 3005 break;
2991 3006
2992 default: 3007 default:
2993 DRM_ERROR("bad cmd_type %d at %p\n", 3008 DRM_ERROR("bad cmd_type %d at byte %d\n",
2994 header.header.cmd_type, 3009 header->header.cmd_type,
2995 cmdbuf->buf - sizeof(header)); 3010 cmdbuf->buffer->iterator);
2996 goto err; 3011 goto err;
2997 } 3012 }
2998 } 3013 }
2999 3014
3000 if (orig_bufsz != 0) 3015 if (cmdbuf->bufsz != 0)
3001 kfree(kbuf); 3016 drm_buffer_free(cmdbuf->buffer);
3002 3017
3003 DRM_DEBUG("DONE\n"); 3018 DRM_DEBUG("DONE\n");
3004 COMMIT_RING(); 3019 COMMIT_RING();
3005 return 0; 3020 return 0;
3006 3021
3007 err: 3022 err:
3008 if (orig_bufsz != 0) 3023 if (cmdbuf->bufsz != 0)
3009 kfree(kbuf); 3024 drm_buffer_free(cmdbuf->buffer);
3010 return -EINVAL; 3025 return -EINVAL;
3011} 3026}
3012 3027
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 9f5e2f929da9..313c96bc09da 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -186,7 +186,7 @@ void radeon_test_moves(struct radeon_device *rdev)
186 radeon_bo_kunmap(gtt_obj[i]); 186 radeon_bo_kunmap(gtt_obj[i]);
187 187
188 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", 188 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
189 gtt_addr - rdev->mc.gtt_location); 189 gtt_addr - rdev->mc.gtt_start);
190 } 190 }
191 191
192out_cleanup: 192out_cleanup:
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 58b5adf974ca..43c5ab34b634 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -150,7 +150,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
150 man->default_caching = TTM_PL_FLAG_CACHED; 150 man->default_caching = TTM_PL_FLAG_CACHED;
151 break; 151 break;
152 case TTM_PL_TT: 152 case TTM_PL_TT:
153 man->gpu_offset = rdev->mc.gtt_location; 153 man->gpu_offset = rdev->mc.gtt_start;
154 man->available_caching = TTM_PL_MASK_CACHING; 154 man->available_caching = TTM_PL_MASK_CACHING;
155 man->default_caching = TTM_PL_FLAG_CACHED; 155 man->default_caching = TTM_PL_FLAG_CACHED;
156 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; 156 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -180,7 +180,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
180 break; 180 break;
181 case TTM_PL_VRAM: 181 case TTM_PL_VRAM:
182 /* "On-card" video ram */ 182 /* "On-card" video ram */
183 man->gpu_offset = rdev->mc.vram_location; 183 man->gpu_offset = rdev->mc.vram_start;
184 man->flags = TTM_MEMTYPE_FLAG_FIXED | 184 man->flags = TTM_MEMTYPE_FLAG_FIXED |
185 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | 185 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
186 TTM_MEMTYPE_FLAG_MAPPABLE; 186 TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -262,10 +262,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
262 262
263 switch (old_mem->mem_type) { 263 switch (old_mem->mem_type) {
264 case TTM_PL_VRAM: 264 case TTM_PL_VRAM:
265 old_start += rdev->mc.vram_location; 265 old_start += rdev->mc.vram_start;
266 break; 266 break;
267 case TTM_PL_TT: 267 case TTM_PL_TT:
268 old_start += rdev->mc.gtt_location; 268 old_start += rdev->mc.gtt_start;
269 break; 269 break;
270 default: 270 default:
271 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); 271 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
@@ -273,10 +273,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
273 } 273 }
274 switch (new_mem->mem_type) { 274 switch (new_mem->mem_type) {
275 case TTM_PL_VRAM: 275 case TTM_PL_VRAM:
276 new_start += rdev->mc.vram_location; 276 new_start += rdev->mc.vram_start;
277 break; 277 break;
278 case TTM_PL_TT: 278 case TTM_PL_TT:
279 new_start += rdev->mc.gtt_location; 279 new_start += rdev->mc.gtt_start;
280 break; 280 break;
281 default: 281 default:
282 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); 282 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
new file mode 100644
index 000000000000..8f414a5f520f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -0,0 +1,837 @@
1r600 0x9400
20x000287A0 R7xx_CB_SHADER_CONTROL
30x00028230 R7xx_PA_SC_EDGERULE
40x000286C8 R7xx_SPI_THREAD_GROUPING
50x00008D8C R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
60x000088C4 VGT_CACHE_INVALIDATION
70x00028A50 VGT_ENHANCE
80x000088CC VGT_ES_PER_GS
90x00028A2C VGT_GROUP_DECR
100x00028A28 VGT_GROUP_FIRST_DECR
110x00028A24 VGT_GROUP_PRIM_TYPE
120x00028A30 VGT_GROUP_VECT_0_CNTL
130x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
140x00028A34 VGT_GROUP_VECT_1_CNTL
150x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
160x00028A40 VGT_GS_MODE
170x00028A6C VGT_GS_OUT_PRIM_TYPE
180x000088C8 VGT_GS_PER_ES
190x000088E8 VGT_GS_PER_VS
200x000088D4 VGT_GS_VERTEX_REUSE
210x00028A14 VGT_HOS_CNTL
220x00028A18 VGT_HOS_MAX_TESS_LEVEL
230x00028A1C VGT_HOS_MIN_TESS_LEVEL
240x00028A20 VGT_HOS_REUSE_DEPTH
250x0000895C VGT_INDEX_TYPE
260x00028408 VGT_INDX_OFFSET
270x00028AA0 VGT_INSTANCE_STEP_RATE_0
280x00028AA4 VGT_INSTANCE_STEP_RATE_1
290x000088C0 VGT_LAST_COPY_STATE
300x00028400 VGT_MAX_VTX_INDX
310x000088D8 VGT_MC_LAT_CNTL
320x00028404 VGT_MIN_VTX_INDX
330x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
340x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
350x00008970 VGT_NUM_INDICES
360x00008974 VGT_NUM_INSTANCES
370x00028A10 VGT_OUTPUT_PATH_CNTL
380x00028C5C VGT_OUT_DEALLOC_CNTL
390x00028A84 VGT_PRIMITIVEID_EN
400x00008958 VGT_PRIMITIVE_TYPE
410x00028AB4 VGT_REUSE_OFF
420x00028C58 VGT_VERTEX_REUSE_BLOCK_CNTL
430x00028AB8 VGT_VTX_CNT_EN
440x000088B0 VGT_VTX_VECT_EJECT_REG
450x00028810 PA_CL_CLIP_CNTL
460x00008A14 PA_CL_ENHANCE
470x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
480x00028C18 PA_CL_GB_HORZ_DISC_ADJ
490x00028C0C PA_CL_GB_VERT_CLIP_ADJ
500x00028C10 PA_CL_GB_VERT_DISC_ADJ
510x00028820 PA_CL_NANINF_CNTL
520x00028E1C PA_CL_POINT_CULL_RAD
530x00028E18 PA_CL_POINT_SIZE
540x00028E10 PA_CL_POINT_X_RAD
550x00028E14 PA_CL_POINT_Y_RAD
560x00028E2C PA_CL_UCP_0_W
570x00028E3C PA_CL_UCP_1_W
580x00028E4C PA_CL_UCP_2_W
590x00028E5C PA_CL_UCP_3_W
600x00028E6C PA_CL_UCP_4_W
610x00028E7C PA_CL_UCP_5_W
620x00028E20 PA_CL_UCP_0_X
630x00028E30 PA_CL_UCP_1_X
640x00028E40 PA_CL_UCP_2_X
650x00028E50 PA_CL_UCP_3_X
660x00028E60 PA_CL_UCP_4_X
670x00028E70 PA_CL_UCP_5_X
680x00028E24 PA_CL_UCP_0_Y
690x00028E34 PA_CL_UCP_1_Y
700x00028E44 PA_CL_UCP_2_Y
710x00028E54 PA_CL_UCP_3_Y
720x00028E64 PA_CL_UCP_4_Y
730x00028E74 PA_CL_UCP_5_Y
740x00028E28 PA_CL_UCP_0_Z
750x00028E38 PA_CL_UCP_1_Z
760x00028E48 PA_CL_UCP_2_Z
770x00028E58 PA_CL_UCP_3_Z
780x00028E68 PA_CL_UCP_4_Z
790x00028E78 PA_CL_UCP_5_Z
800x00028440 PA_CL_VPORT_XOFFSET_0
810x00028458 PA_CL_VPORT_XOFFSET_1
820x00028470 PA_CL_VPORT_XOFFSET_2
830x00028488 PA_CL_VPORT_XOFFSET_3
840x000284A0 PA_CL_VPORT_XOFFSET_4
850x000284B8 PA_CL_VPORT_XOFFSET_5
860x000284D0 PA_CL_VPORT_XOFFSET_6
870x000284E8 PA_CL_VPORT_XOFFSET_7
880x00028500 PA_CL_VPORT_XOFFSET_8
890x00028518 PA_CL_VPORT_XOFFSET_9
900x00028530 PA_CL_VPORT_XOFFSET_10
910x00028548 PA_CL_VPORT_XOFFSET_11
920x00028560 PA_CL_VPORT_XOFFSET_12
930x00028578 PA_CL_VPORT_XOFFSET_13
940x00028590 PA_CL_VPORT_XOFFSET_14
950x000285A8 PA_CL_VPORT_XOFFSET_15
960x0002843C PA_CL_VPORT_XSCALE_0
970x00028454 PA_CL_VPORT_XSCALE_1
980x0002846C PA_CL_VPORT_XSCALE_2
990x00028484 PA_CL_VPORT_XSCALE_3
1000x0002849C PA_CL_VPORT_XSCALE_4
1010x000284B4 PA_CL_VPORT_XSCALE_5
1020x000284CC PA_CL_VPORT_XSCALE_6
1030x000284E4 PA_CL_VPORT_XSCALE_7
1040x000284FC PA_CL_VPORT_XSCALE_8
1050x00028514 PA_CL_VPORT_XSCALE_9
1060x0002852C PA_CL_VPORT_XSCALE_10
1070x00028544 PA_CL_VPORT_XSCALE_11
1080x0002855C PA_CL_VPORT_XSCALE_12
1090x00028574 PA_CL_VPORT_XSCALE_13
1100x0002858C PA_CL_VPORT_XSCALE_14
1110x000285A4 PA_CL_VPORT_XSCALE_15
1120x00028448 PA_CL_VPORT_YOFFSET_0
1130x00028460 PA_CL_VPORT_YOFFSET_1
1140x00028478 PA_CL_VPORT_YOFFSET_2
1150x00028490 PA_CL_VPORT_YOFFSET_3
1160x000284A8 PA_CL_VPORT_YOFFSET_4
1170x000284C0 PA_CL_VPORT_YOFFSET_5
1180x000284D8 PA_CL_VPORT_YOFFSET_6
1190x000284F0 PA_CL_VPORT_YOFFSET_7
1200x00028508 PA_CL_VPORT_YOFFSET_8
1210x00028520 PA_CL_VPORT_YOFFSET_9
1220x00028538 PA_CL_VPORT_YOFFSET_10
1230x00028550 PA_CL_VPORT_YOFFSET_11
1240x00028568 PA_CL_VPORT_YOFFSET_12
1250x00028580 PA_CL_VPORT_YOFFSET_13
1260x00028598 PA_CL_VPORT_YOFFSET_14
1270x000285B0 PA_CL_VPORT_YOFFSET_15
1280x00028444 PA_CL_VPORT_YSCALE_0
1290x0002845C PA_CL_VPORT_YSCALE_1
1300x00028474 PA_CL_VPORT_YSCALE_2
1310x0002848C PA_CL_VPORT_YSCALE_3
1320x000284A4 PA_CL_VPORT_YSCALE_4
1330x000284BC PA_CL_VPORT_YSCALE_5
1340x000284D4 PA_CL_VPORT_YSCALE_6
1350x000284EC PA_CL_VPORT_YSCALE_7
1360x00028504 PA_CL_VPORT_YSCALE_8
1370x0002851C PA_CL_VPORT_YSCALE_9
1380x00028534 PA_CL_VPORT_YSCALE_10
1390x0002854C PA_CL_VPORT_YSCALE_11
1400x00028564 PA_CL_VPORT_YSCALE_12
1410x0002857C PA_CL_VPORT_YSCALE_13
1420x00028594 PA_CL_VPORT_YSCALE_14
1430x000285AC PA_CL_VPORT_YSCALE_15
1440x00028450 PA_CL_VPORT_ZOFFSET_0
1450x00028468 PA_CL_VPORT_ZOFFSET_1
1460x00028480 PA_CL_VPORT_ZOFFSET_2
1470x00028498 PA_CL_VPORT_ZOFFSET_3
1480x000284B0 PA_CL_VPORT_ZOFFSET_4
1490x000284C8 PA_CL_VPORT_ZOFFSET_5
1500x000284E0 PA_CL_VPORT_ZOFFSET_6
1510x000284F8 PA_CL_VPORT_ZOFFSET_7
1520x00028510 PA_CL_VPORT_ZOFFSET_8
1530x00028528 PA_CL_VPORT_ZOFFSET_9
1540x00028540 PA_CL_VPORT_ZOFFSET_10
1550x00028558 PA_CL_VPORT_ZOFFSET_11
1560x00028570 PA_CL_VPORT_ZOFFSET_12
1570x00028588 PA_CL_VPORT_ZOFFSET_13
1580x000285A0 PA_CL_VPORT_ZOFFSET_14
1590x000285B8 PA_CL_VPORT_ZOFFSET_15
1600x0002844C PA_CL_VPORT_ZSCALE_0
1610x00028464 PA_CL_VPORT_ZSCALE_1
1620x0002847C PA_CL_VPORT_ZSCALE_2
1630x00028494 PA_CL_VPORT_ZSCALE_3
1640x000284AC PA_CL_VPORT_ZSCALE_4
1650x000284C4 PA_CL_VPORT_ZSCALE_5
1660x000284DC PA_CL_VPORT_ZSCALE_6
1670x000284F4 PA_CL_VPORT_ZSCALE_7
1680x0002850C PA_CL_VPORT_ZSCALE_8
1690x00028524 PA_CL_VPORT_ZSCALE_9
1700x0002853C PA_CL_VPORT_ZSCALE_10
1710x00028554 PA_CL_VPORT_ZSCALE_11
1720x0002856C PA_CL_VPORT_ZSCALE_12
1730x00028584 PA_CL_VPORT_ZSCALE_13
1740x0002859C PA_CL_VPORT_ZSCALE_14
1750x000285B4 PA_CL_VPORT_ZSCALE_15
1760x0002881C PA_CL_VS_OUT_CNTL
1770x00028818 PA_CL_VTE_CNTL
1780x00028C48 PA_SC_AA_MASK
1790x00008B40 PA_SC_AA_SAMPLE_LOCS_2S
1800x00008B44 PA_SC_AA_SAMPLE_LOCS_4S
1810x00008B48 PA_SC_AA_SAMPLE_LOCS_8S_WD0
1820x00008B4C PA_SC_AA_SAMPLE_LOCS_8S_WD1
1830x00028C20 PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX
1840x00028C1C PA_SC_AA_SAMPLE_LOCS_MCTX
1850x00028214 PA_SC_CLIPRECT_0_BR
1860x0002821C PA_SC_CLIPRECT_1_BR
1870x00028224 PA_SC_CLIPRECT_2_BR
1880x0002822C PA_SC_CLIPRECT_3_BR
1890x00028210 PA_SC_CLIPRECT_0_TL
1900x00028218 PA_SC_CLIPRECT_1_TL
1910x00028220 PA_SC_CLIPRECT_2_TL
1920x00028228 PA_SC_CLIPRECT_3_TL
1930x0002820C PA_SC_CLIPRECT_RULE
1940x00008BF0 PA_SC_ENHANCE
1950x00028244 PA_SC_GENERIC_SCISSOR_BR
1960x00028240 PA_SC_GENERIC_SCISSOR_TL
1970x00028C00 PA_SC_LINE_CNTL
1980x00028A0C PA_SC_LINE_STIPPLE
1990x00008B10 PA_SC_LINE_STIPPLE_STATE
2000x00028A4C PA_SC_MODE_CNTL
2010x00028A48 PA_SC_MPASS_PS_CNTL
2020x00008B20 PA_SC_MULTI_CHIP_CNTL
2030x00028034 PA_SC_SCREEN_SCISSOR_BR
2040x00028030 PA_SC_SCREEN_SCISSOR_TL
2050x00028254 PA_SC_VPORT_SCISSOR_0_BR
2060x0002825C PA_SC_VPORT_SCISSOR_1_BR
2070x00028264 PA_SC_VPORT_SCISSOR_2_BR
2080x0002826C PA_SC_VPORT_SCISSOR_3_BR
2090x00028274 PA_SC_VPORT_SCISSOR_4_BR
2100x0002827C PA_SC_VPORT_SCISSOR_5_BR
2110x00028284 PA_SC_VPORT_SCISSOR_6_BR
2120x0002828C PA_SC_VPORT_SCISSOR_7_BR
2130x00028294 PA_SC_VPORT_SCISSOR_8_BR
2140x0002829C PA_SC_VPORT_SCISSOR_9_BR
2150x000282A4 PA_SC_VPORT_SCISSOR_10_BR
2160x000282AC PA_SC_VPORT_SCISSOR_11_BR
2170x000282B4 PA_SC_VPORT_SCISSOR_12_BR
2180x000282BC PA_SC_VPORT_SCISSOR_13_BR
2190x000282C4 PA_SC_VPORT_SCISSOR_14_BR
2200x000282CC PA_SC_VPORT_SCISSOR_15_BR
2210x00028250 PA_SC_VPORT_SCISSOR_0_TL
2220x00028258 PA_SC_VPORT_SCISSOR_1_TL
2230x00028260 PA_SC_VPORT_SCISSOR_2_TL
2240x00028268 PA_SC_VPORT_SCISSOR_3_TL
2250x00028270 PA_SC_VPORT_SCISSOR_4_TL
2260x00028278 PA_SC_VPORT_SCISSOR_5_TL
2270x00028280 PA_SC_VPORT_SCISSOR_6_TL
2280x00028288 PA_SC_VPORT_SCISSOR_7_TL
2290x00028290 PA_SC_VPORT_SCISSOR_8_TL
2300x00028298 PA_SC_VPORT_SCISSOR_9_TL
2310x000282A0 PA_SC_VPORT_SCISSOR_10_TL
2320x000282A8 PA_SC_VPORT_SCISSOR_11_TL
2330x000282B0 PA_SC_VPORT_SCISSOR_12_TL
2340x000282B8 PA_SC_VPORT_SCISSOR_13_TL
2350x000282C0 PA_SC_VPORT_SCISSOR_14_TL
2360x000282C8 PA_SC_VPORT_SCISSOR_15_TL
2370x000282D4 PA_SC_VPORT_ZMAX_0
2380x000282DC PA_SC_VPORT_ZMAX_1
2390x000282E4 PA_SC_VPORT_ZMAX_2
2400x000282EC PA_SC_VPORT_ZMAX_3
2410x000282F4 PA_SC_VPORT_ZMAX_4
2420x000282FC PA_SC_VPORT_ZMAX_5
2430x00028304 PA_SC_VPORT_ZMAX_6
2440x0002830C PA_SC_VPORT_ZMAX_7
2450x00028314 PA_SC_VPORT_ZMAX_8
2460x0002831C PA_SC_VPORT_ZMAX_9
2470x00028324 PA_SC_VPORT_ZMAX_10
2480x0002832C PA_SC_VPORT_ZMAX_11
2490x00028334 PA_SC_VPORT_ZMAX_12
2500x0002833C PA_SC_VPORT_ZMAX_13
2510x00028344 PA_SC_VPORT_ZMAX_14
2520x0002834C PA_SC_VPORT_ZMAX_15
2530x000282D0 PA_SC_VPORT_ZMIN_0
2540x000282D8 PA_SC_VPORT_ZMIN_1
2550x000282E0 PA_SC_VPORT_ZMIN_2
2560x000282E8 PA_SC_VPORT_ZMIN_3
2570x000282F0 PA_SC_VPORT_ZMIN_4
2580x000282F8 PA_SC_VPORT_ZMIN_5
2590x00028300 PA_SC_VPORT_ZMIN_6
2600x00028308 PA_SC_VPORT_ZMIN_7
2610x00028310 PA_SC_VPORT_ZMIN_8
2620x00028318 PA_SC_VPORT_ZMIN_9
2630x00028320 PA_SC_VPORT_ZMIN_10
2640x00028328 PA_SC_VPORT_ZMIN_11
2650x00028330 PA_SC_VPORT_ZMIN_12
2660x00028338 PA_SC_VPORT_ZMIN_13
2670x00028340 PA_SC_VPORT_ZMIN_14
2680x00028348 PA_SC_VPORT_ZMIN_15
2690x00028200 PA_SC_WINDOW_OFFSET
2700x00028208 PA_SC_WINDOW_SCISSOR_BR
2710x00028204 PA_SC_WINDOW_SCISSOR_TL
2720x00028A08 PA_SU_LINE_CNTL
2730x00028A04 PA_SU_POINT_MINMAX
2740x00028A00 PA_SU_POINT_SIZE
2750x00028E0C PA_SU_POLY_OFFSET_BACK_OFFSET
2760x00028E08 PA_SU_POLY_OFFSET_BACK_SCALE
2770x00028DFC PA_SU_POLY_OFFSET_CLAMP
2780x00028DF8 PA_SU_POLY_OFFSET_DB_FMT_CNTL
2790x00028E04 PA_SU_POLY_OFFSET_FRONT_OFFSET
2800x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE
2810x00028814 PA_SU_SC_MODE_CNTL
2820x00028C08 PA_SU_VTX_CNTL
2830x00008C00 SQ_CONFIG
2840x00008C04 SQ_GPR_RESOURCE_MGMT_1
2850x00008C08 SQ_GPR_RESOURCE_MGMT_2
2860x00008C10 SQ_STACK_RESOURCE_MGMT_1
2870x00008C14 SQ_STACK_RESOURCE_MGMT_2
2880x00008C0C SQ_THREAD_RESOURCE_MGMT
2890x00028380 SQ_VTX_SEMANTIC_0
2900x00028384 SQ_VTX_SEMANTIC_1
2910x00028388 SQ_VTX_SEMANTIC_2
2920x0002838C SQ_VTX_SEMANTIC_3
2930x00028390 SQ_VTX_SEMANTIC_4
2940x00028394 SQ_VTX_SEMANTIC_5
2950x00028398 SQ_VTX_SEMANTIC_6
2960x0002839C SQ_VTX_SEMANTIC_7
2970x000283A0 SQ_VTX_SEMANTIC_8
2980x000283A4 SQ_VTX_SEMANTIC_9
2990x000283A8 SQ_VTX_SEMANTIC_10
3000x000283AC SQ_VTX_SEMANTIC_11
3010x000283B0 SQ_VTX_SEMANTIC_12
3020x000283B4 SQ_VTX_SEMANTIC_13
3030x000283B8 SQ_VTX_SEMANTIC_14
3040x000283BC SQ_VTX_SEMANTIC_15
3050x000283C0 SQ_VTX_SEMANTIC_16
3060x000283C4 SQ_VTX_SEMANTIC_17
3070x000283C8 SQ_VTX_SEMANTIC_18
3080x000283CC SQ_VTX_SEMANTIC_19
3090x000283D0 SQ_VTX_SEMANTIC_20
3100x000283D4 SQ_VTX_SEMANTIC_21
3110x000283D8 SQ_VTX_SEMANTIC_22
3120x000283DC SQ_VTX_SEMANTIC_23
3130x000283E0 SQ_VTX_SEMANTIC_24
3140x000283E4 SQ_VTX_SEMANTIC_25
3150x000283E8 SQ_VTX_SEMANTIC_26
3160x000283EC SQ_VTX_SEMANTIC_27
3170x000283F0 SQ_VTX_SEMANTIC_28
3180x000283F4 SQ_VTX_SEMANTIC_29
3190x000283F8 SQ_VTX_SEMANTIC_30
3200x000283FC SQ_VTX_SEMANTIC_31
3210x000288E0 SQ_VTX_SEMANTIC_CLEAR
3220x0003CFF4 SQ_VTX_START_INST_LOC
3230x0003C000 SQ_TEX_SAMPLER_WORD0_0
3240x0003C004 SQ_TEX_SAMPLER_WORD1_0
3250x0003C008 SQ_TEX_SAMPLER_WORD2_0
3260x00030000 SQ_ALU_CONSTANT0_0
3270x00030004 SQ_ALU_CONSTANT1_0
3280x00030008 SQ_ALU_CONSTANT2_0
3290x0003000C SQ_ALU_CONSTANT3_0
3300x0003E380 SQ_BOOL_CONST_0
3310x0003E384 SQ_BOOL_CONST_1
3320x0003E388 SQ_BOOL_CONST_2
3330x0003E200 SQ_LOOP_CONST_0
3340x0003E200 SQ_LOOP_CONST_DX10_0
3350x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
3360x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
3370x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
3380x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
3390x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
3400x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
3410x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
3420x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
3430x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
3440x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
3450x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
3460x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
3470x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
3480x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
3490x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
3500x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
3510x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
3520x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
3530x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
3540x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
3550x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
3560x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
3570x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
3580x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
3590x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
3600x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
3610x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
3620x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
3630x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
3640x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
3650x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
3660x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
3670x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
3680x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
3690x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
3700x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
3710x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
3720x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
3730x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
3740x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
3750x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
3760x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
3770x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
3780x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
3790x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
3800x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
3810x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
3820x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
3830x000289C0 SQ_ALU_CONST_CACHE_GS_0
3840x000289C4 SQ_ALU_CONST_CACHE_GS_1
3850x000289C8 SQ_ALU_CONST_CACHE_GS_2
3860x000289CC SQ_ALU_CONST_CACHE_GS_3
3870x000289D0 SQ_ALU_CONST_CACHE_GS_4
3880x000289D4 SQ_ALU_CONST_CACHE_GS_5
3890x000289D8 SQ_ALU_CONST_CACHE_GS_6
3900x000289DC SQ_ALU_CONST_CACHE_GS_7
3910x000289E0 SQ_ALU_CONST_CACHE_GS_8
3920x000289E4 SQ_ALU_CONST_CACHE_GS_9
3930x000289E8 SQ_ALU_CONST_CACHE_GS_10
3940x000289EC SQ_ALU_CONST_CACHE_GS_11
3950x000289F0 SQ_ALU_CONST_CACHE_GS_12
3960x000289F4 SQ_ALU_CONST_CACHE_GS_13
3970x000289F8 SQ_ALU_CONST_CACHE_GS_14
3980x000289FC SQ_ALU_CONST_CACHE_GS_15
3990x00028940 SQ_ALU_CONST_CACHE_PS_0
4000x00028944 SQ_ALU_CONST_CACHE_PS_1
4010x00028948 SQ_ALU_CONST_CACHE_PS_2
4020x0002894C SQ_ALU_CONST_CACHE_PS_3
4030x00028950 SQ_ALU_CONST_CACHE_PS_4
4040x00028954 SQ_ALU_CONST_CACHE_PS_5
4050x00028958 SQ_ALU_CONST_CACHE_PS_6
4060x0002895C SQ_ALU_CONST_CACHE_PS_7
4070x00028960 SQ_ALU_CONST_CACHE_PS_8
4080x00028964 SQ_ALU_CONST_CACHE_PS_9
4090x00028968 SQ_ALU_CONST_CACHE_PS_10
4100x0002896C SQ_ALU_CONST_CACHE_PS_11
4110x00028970 SQ_ALU_CONST_CACHE_PS_12
4120x00028974 SQ_ALU_CONST_CACHE_PS_13
4130x00028978 SQ_ALU_CONST_CACHE_PS_14
4140x0002897C SQ_ALU_CONST_CACHE_PS_15
4150x00028980 SQ_ALU_CONST_CACHE_VS_0
4160x00028984 SQ_ALU_CONST_CACHE_VS_1
4170x00028988 SQ_ALU_CONST_CACHE_VS_2
4180x0002898C SQ_ALU_CONST_CACHE_VS_3
4190x00028990 SQ_ALU_CONST_CACHE_VS_4
4200x00028994 SQ_ALU_CONST_CACHE_VS_5
4210x00028998 SQ_ALU_CONST_CACHE_VS_6
4220x0002899C SQ_ALU_CONST_CACHE_VS_7
4230x000289A0 SQ_ALU_CONST_CACHE_VS_8
4240x000289A4 SQ_ALU_CONST_CACHE_VS_9
4250x000289A8 SQ_ALU_CONST_CACHE_VS_10
4260x000289AC SQ_ALU_CONST_CACHE_VS_11
4270x000289B0 SQ_ALU_CONST_CACHE_VS_12
4280x000289B4 SQ_ALU_CONST_CACHE_VS_13
4290x000289B8 SQ_ALU_CONST_CACHE_VS_14
4300x000289BC SQ_ALU_CONST_CACHE_VS_15
4310x000288D8 SQ_PGM_CF_OFFSET_ES
4320x000288DC SQ_PGM_CF_OFFSET_FS
4330x000288D4 SQ_PGM_CF_OFFSET_GS
4340x000288CC SQ_PGM_CF_OFFSET_PS
4350x000288D0 SQ_PGM_CF_OFFSET_VS
4360x00028854 SQ_PGM_EXPORTS_PS
4370x00028890 SQ_PGM_RESOURCES_ES
4380x000288A4 SQ_PGM_RESOURCES_FS
4390x0002887C SQ_PGM_RESOURCES_GS
4400x00028850 SQ_PGM_RESOURCES_PS
4410x00028868 SQ_PGM_RESOURCES_VS
4420x00009100 SPI_CONFIG_CNTL
4430x0000913C SPI_CONFIG_CNTL_1
4440x000286DC SPI_FOG_CNTL
4450x000286E4 SPI_FOG_FUNC_BIAS
4460x000286E0 SPI_FOG_FUNC_SCALE
4470x000286D8 SPI_INPUT_Z
4480x000286D4 SPI_INTERP_CONTROL_0
4490x00028644 SPI_PS_INPUT_CNTL_0
4500x00028648 SPI_PS_INPUT_CNTL_1
4510x0002864C SPI_PS_INPUT_CNTL_2
4520x00028650 SPI_PS_INPUT_CNTL_3
4530x00028654 SPI_PS_INPUT_CNTL_4
4540x00028658 SPI_PS_INPUT_CNTL_5
4550x0002865C SPI_PS_INPUT_CNTL_6
4560x00028660 SPI_PS_INPUT_CNTL_7
4570x00028664 SPI_PS_INPUT_CNTL_8
4580x00028668 SPI_PS_INPUT_CNTL_9
4590x0002866C SPI_PS_INPUT_CNTL_10
4600x00028670 SPI_PS_INPUT_CNTL_11
4610x00028674 SPI_PS_INPUT_CNTL_12
4620x00028678 SPI_PS_INPUT_CNTL_13
4630x0002867C SPI_PS_INPUT_CNTL_14
4640x00028680 SPI_PS_INPUT_CNTL_15
4650x00028684 SPI_PS_INPUT_CNTL_16
4660x00028688 SPI_PS_INPUT_CNTL_17
4670x0002868C SPI_PS_INPUT_CNTL_18
4680x00028690 SPI_PS_INPUT_CNTL_19
4690x00028694 SPI_PS_INPUT_CNTL_20
4700x00028698 SPI_PS_INPUT_CNTL_21
4710x0002869C SPI_PS_INPUT_CNTL_22
4720x000286A0 SPI_PS_INPUT_CNTL_23
4730x000286A4 SPI_PS_INPUT_CNTL_24
4740x000286A8 SPI_PS_INPUT_CNTL_25
4750x000286AC SPI_PS_INPUT_CNTL_26
4760x000286B0 SPI_PS_INPUT_CNTL_27
4770x000286B4 SPI_PS_INPUT_CNTL_28
4780x000286B8 SPI_PS_INPUT_CNTL_29
4790x000286BC SPI_PS_INPUT_CNTL_30
4800x000286C0 SPI_PS_INPUT_CNTL_31
4810x000286CC SPI_PS_IN_CONTROL_0
4820x000286D0 SPI_PS_IN_CONTROL_1
4830x000286C4 SPI_VS_OUT_CONFIG
4840x00028614 SPI_VS_OUT_ID_0
4850x00028618 SPI_VS_OUT_ID_1
4860x0002861C SPI_VS_OUT_ID_2
4870x00028620 SPI_VS_OUT_ID_3
4880x00028624 SPI_VS_OUT_ID_4
4890x00028628 SPI_VS_OUT_ID_5
4900x0002862C SPI_VS_OUT_ID_6
4910x00028630 SPI_VS_OUT_ID_7
4920x00028634 SPI_VS_OUT_ID_8
4930x00028638 SPI_VS_OUT_ID_9
4940x00028438 SX_ALPHA_REF
4950x00028410 SX_ALPHA_TEST_CONTROL
4960x00028350 SX_MISC
4970x0000A020 SMX_DC_CTL0
4980x0000A024 SMX_DC_CTL1
4990x0000A028 SMX_DC_CTL2
5000x00009608 TC_CNTL
5010x00009604 TC_INVALIDATE
5020x00009490 TD_CNTL
5030x00009400 TD_FILTER4
5040x00009404 TD_FILTER4_1
5050x00009408 TD_FILTER4_2
5060x0000940C TD_FILTER4_3
5070x00009410 TD_FILTER4_4
5080x00009414 TD_FILTER4_5
5090x00009418 TD_FILTER4_6
5100x0000941C TD_FILTER4_7
5110x00009420 TD_FILTER4_8
5120x00009424 TD_FILTER4_9
5130x00009428 TD_FILTER4_10
5140x0000942C TD_FILTER4_11
5150x00009430 TD_FILTER4_12
5160x00009434 TD_FILTER4_13
5170x00009438 TD_FILTER4_14
5180x0000943C TD_FILTER4_15
5190x00009440 TD_FILTER4_16
5200x00009444 TD_FILTER4_17
5210x00009448 TD_FILTER4_18
5220x0000944C TD_FILTER4_19
5230x00009450 TD_FILTER4_20
5240x00009454 TD_FILTER4_21
5250x00009458 TD_FILTER4_22
5260x0000945C TD_FILTER4_23
5270x00009460 TD_FILTER4_24
5280x00009464 TD_FILTER4_25
5290x00009468 TD_FILTER4_26
5300x0000946C TD_FILTER4_27
5310x00009470 TD_FILTER4_28
5320x00009474 TD_FILTER4_29
5330x00009478 TD_FILTER4_30
5340x0000947C TD_FILTER4_31
5350x00009480 TD_FILTER4_32
5360x00009484 TD_FILTER4_33
5370x00009488 TD_FILTER4_34
5380x0000948C TD_FILTER4_35
5390x0000A80C TD_GS_SAMPLER0_BORDER_ALPHA
5400x0000A81C TD_GS_SAMPLER1_BORDER_ALPHA
5410x0000A82C TD_GS_SAMPLER2_BORDER_ALPHA
5420x0000A83C TD_GS_SAMPLER3_BORDER_ALPHA
5430x0000A84C TD_GS_SAMPLER4_BORDER_ALPHA
5440x0000A85C TD_GS_SAMPLER5_BORDER_ALPHA
5450x0000A86C TD_GS_SAMPLER6_BORDER_ALPHA
5460x0000A87C TD_GS_SAMPLER7_BORDER_ALPHA
5470x0000A88C TD_GS_SAMPLER8_BORDER_ALPHA
5480x0000A89C TD_GS_SAMPLER9_BORDER_ALPHA
5490x0000A8AC TD_GS_SAMPLER10_BORDER_ALPHA
5500x0000A8BC TD_GS_SAMPLER11_BORDER_ALPHA
5510x0000A8CC TD_GS_SAMPLER12_BORDER_ALPHA
5520x0000A8DC TD_GS_SAMPLER13_BORDER_ALPHA
5530x0000A8EC TD_GS_SAMPLER14_BORDER_ALPHA
5540x0000A8FC TD_GS_SAMPLER15_BORDER_ALPHA
5550x0000A90C TD_GS_SAMPLER16_BORDER_ALPHA
5560x0000A91C TD_GS_SAMPLER17_BORDER_ALPHA
5570x0000A808 TD_GS_SAMPLER0_BORDER_BLUE
5580x0000A818 TD_GS_SAMPLER1_BORDER_BLUE
5590x0000A828 TD_GS_SAMPLER2_BORDER_BLUE
5600x0000A838 TD_GS_SAMPLER3_BORDER_BLUE
5610x0000A848 TD_GS_SAMPLER4_BORDER_BLUE
5620x0000A858 TD_GS_SAMPLER5_BORDER_BLUE
5630x0000A868 TD_GS_SAMPLER6_BORDER_BLUE
5640x0000A878 TD_GS_SAMPLER7_BORDER_BLUE
5650x0000A888 TD_GS_SAMPLER8_BORDER_BLUE
5660x0000A898 TD_GS_SAMPLER9_BORDER_BLUE
5670x0000A8A8 TD_GS_SAMPLER10_BORDER_BLUE
5680x0000A8B8 TD_GS_SAMPLER11_BORDER_BLUE
5690x0000A8C8 TD_GS_SAMPLER12_BORDER_BLUE
5700x0000A8D8 TD_GS_SAMPLER13_BORDER_BLUE
5710x0000A8E8 TD_GS_SAMPLER14_BORDER_BLUE
5720x0000A8F8 TD_GS_SAMPLER15_BORDER_BLUE
5730x0000A908 TD_GS_SAMPLER16_BORDER_BLUE
5740x0000A918 TD_GS_SAMPLER17_BORDER_BLUE
5750x0000A804 TD_GS_SAMPLER0_BORDER_GREEN
5760x0000A814 TD_GS_SAMPLER1_BORDER_GREEN
5770x0000A824 TD_GS_SAMPLER2_BORDER_GREEN
5780x0000A834 TD_GS_SAMPLER3_BORDER_GREEN
5790x0000A844 TD_GS_SAMPLER4_BORDER_GREEN
5800x0000A854 TD_GS_SAMPLER5_BORDER_GREEN
5810x0000A864 TD_GS_SAMPLER6_BORDER_GREEN
5820x0000A874 TD_GS_SAMPLER7_BORDER_GREEN
5830x0000A884 TD_GS_SAMPLER8_BORDER_GREEN
5840x0000A894 TD_GS_SAMPLER9_BORDER_GREEN
5850x0000A8A4 TD_GS_SAMPLER10_BORDER_GREEN
5860x0000A8B4 TD_GS_SAMPLER11_BORDER_GREEN
5870x0000A8C4 TD_GS_SAMPLER12_BORDER_GREEN
5880x0000A8D4 TD_GS_SAMPLER13_BORDER_GREEN
5890x0000A8E4 TD_GS_SAMPLER14_BORDER_GREEN
5900x0000A8F4 TD_GS_SAMPLER15_BORDER_GREEN
5910x0000A904 TD_GS_SAMPLER16_BORDER_GREEN
5920x0000A914 TD_GS_SAMPLER17_BORDER_GREEN
5930x0000A800 TD_GS_SAMPLER0_BORDER_RED
5940x0000A810 TD_GS_SAMPLER1_BORDER_RED
5950x0000A820 TD_GS_SAMPLER2_BORDER_RED
5960x0000A830 TD_GS_SAMPLER3_BORDER_RED
5970x0000A840 TD_GS_SAMPLER4_BORDER_RED
5980x0000A850 TD_GS_SAMPLER5_BORDER_RED
5990x0000A860 TD_GS_SAMPLER6_BORDER_RED
6000x0000A870 TD_GS_SAMPLER7_BORDER_RED
6010x0000A880 TD_GS_SAMPLER8_BORDER_RED
6020x0000A890 TD_GS_SAMPLER9_BORDER_RED
6030x0000A8A0 TD_GS_SAMPLER10_BORDER_RED
6040x0000A8B0 TD_GS_SAMPLER11_BORDER_RED
6050x0000A8C0 TD_GS_SAMPLER12_BORDER_RED
6060x0000A8D0 TD_GS_SAMPLER13_BORDER_RED
6070x0000A8E0 TD_GS_SAMPLER14_BORDER_RED
6080x0000A8F0 TD_GS_SAMPLER15_BORDER_RED
6090x0000A900 TD_GS_SAMPLER16_BORDER_RED
6100x0000A910 TD_GS_SAMPLER17_BORDER_RED
6110x0000A40C TD_PS_SAMPLER0_BORDER_ALPHA
6120x0000A41C TD_PS_SAMPLER1_BORDER_ALPHA
6130x0000A42C TD_PS_SAMPLER2_BORDER_ALPHA
6140x0000A43C TD_PS_SAMPLER3_BORDER_ALPHA
6150x0000A44C TD_PS_SAMPLER4_BORDER_ALPHA
6160x0000A45C TD_PS_SAMPLER5_BORDER_ALPHA
6170x0000A46C TD_PS_SAMPLER6_BORDER_ALPHA
6180x0000A47C TD_PS_SAMPLER7_BORDER_ALPHA
6190x0000A48C TD_PS_SAMPLER8_BORDER_ALPHA
6200x0000A49C TD_PS_SAMPLER9_BORDER_ALPHA
6210x0000A4AC TD_PS_SAMPLER10_BORDER_ALPHA
6220x0000A4BC TD_PS_SAMPLER11_BORDER_ALPHA
6230x0000A4CC TD_PS_SAMPLER12_BORDER_ALPHA
6240x0000A4DC TD_PS_SAMPLER13_BORDER_ALPHA
6250x0000A4EC TD_PS_SAMPLER14_BORDER_ALPHA
6260x0000A4FC TD_PS_SAMPLER15_BORDER_ALPHA
6270x0000A50C TD_PS_SAMPLER16_BORDER_ALPHA
6280x0000A51C TD_PS_SAMPLER17_BORDER_ALPHA
6290x0000A408 TD_PS_SAMPLER0_BORDER_BLUE
6300x0000A418 TD_PS_SAMPLER1_BORDER_BLUE
6310x0000A428 TD_PS_SAMPLER2_BORDER_BLUE
6320x0000A438 TD_PS_SAMPLER3_BORDER_BLUE
6330x0000A448 TD_PS_SAMPLER4_BORDER_BLUE
6340x0000A458 TD_PS_SAMPLER5_BORDER_BLUE
6350x0000A468 TD_PS_SAMPLER6_BORDER_BLUE
6360x0000A478 TD_PS_SAMPLER7_BORDER_BLUE
6370x0000A488 TD_PS_SAMPLER8_BORDER_BLUE
6380x0000A498 TD_PS_SAMPLER9_BORDER_BLUE
6390x0000A4A8 TD_PS_SAMPLER10_BORDER_BLUE
6400x0000A4B8 TD_PS_SAMPLER11_BORDER_BLUE
6410x0000A4C8 TD_PS_SAMPLER12_BORDER_BLUE
6420x0000A4D8 TD_PS_SAMPLER13_BORDER_BLUE
6430x0000A4E8 TD_PS_SAMPLER14_BORDER_BLUE
6440x0000A4F8 TD_PS_SAMPLER15_BORDER_BLUE
6450x0000A508 TD_PS_SAMPLER16_BORDER_BLUE
6460x0000A518 TD_PS_SAMPLER17_BORDER_BLUE
6470x0000A404 TD_PS_SAMPLER0_BORDER_GREEN
6480x0000A414 TD_PS_SAMPLER1_BORDER_GREEN
6490x0000A424 TD_PS_SAMPLER2_BORDER_GREEN
6500x0000A434 TD_PS_SAMPLER3_BORDER_GREEN
6510x0000A444 TD_PS_SAMPLER4_BORDER_GREEN
6520x0000A454 TD_PS_SAMPLER5_BORDER_GREEN
6530x0000A464 TD_PS_SAMPLER6_BORDER_GREEN
6540x0000A474 TD_PS_SAMPLER7_BORDER_GREEN
6550x0000A484 TD_PS_SAMPLER8_BORDER_GREEN
6560x0000A494 TD_PS_SAMPLER9_BORDER_GREEN
6570x0000A4A4 TD_PS_SAMPLER10_BORDER_GREEN
6580x0000A4B4 TD_PS_SAMPLER11_BORDER_GREEN
6590x0000A4C4 TD_PS_SAMPLER12_BORDER_GREEN
6600x0000A4D4 TD_PS_SAMPLER13_BORDER_GREEN
6610x0000A4E4 TD_PS_SAMPLER14_BORDER_GREEN
6620x0000A4F4 TD_PS_SAMPLER15_BORDER_GREEN
6630x0000A504 TD_PS_SAMPLER16_BORDER_GREEN
6640x0000A514 TD_PS_SAMPLER17_BORDER_GREEN
6650x0000A400 TD_PS_SAMPLER0_BORDER_RED
6660x0000A410 TD_PS_SAMPLER1_BORDER_RED
6670x0000A420 TD_PS_SAMPLER2_BORDER_RED
6680x0000A430 TD_PS_SAMPLER3_BORDER_RED
6690x0000A440 TD_PS_SAMPLER4_BORDER_RED
6700x0000A450 TD_PS_SAMPLER5_BORDER_RED
6710x0000A460 TD_PS_SAMPLER6_BORDER_RED
6720x0000A470 TD_PS_SAMPLER7_BORDER_RED
6730x0000A480 TD_PS_SAMPLER8_BORDER_RED
6740x0000A490 TD_PS_SAMPLER9_BORDER_RED
6750x0000A4A0 TD_PS_SAMPLER10_BORDER_RED
6760x0000A4B0 TD_PS_SAMPLER11_BORDER_RED
6770x0000A4C0 TD_PS_SAMPLER12_BORDER_RED
6780x0000A4D0 TD_PS_SAMPLER13_BORDER_RED
6790x0000A4E0 TD_PS_SAMPLER14_BORDER_RED
6800x0000A4F0 TD_PS_SAMPLER15_BORDER_RED
6810x0000A500 TD_PS_SAMPLER16_BORDER_RED
6820x0000A510 TD_PS_SAMPLER17_BORDER_RED
6830x0000AA00 TD_PS_SAMPLER0_CLEARTYPE_KERNEL
6840x0000AA04 TD_PS_SAMPLER1_CLEARTYPE_KERNEL
6850x0000AA08 TD_PS_SAMPLER2_CLEARTYPE_KERNEL
6860x0000AA0C TD_PS_SAMPLER3_CLEARTYPE_KERNEL
6870x0000AA10 TD_PS_SAMPLER4_CLEARTYPE_KERNEL
6880x0000AA14 TD_PS_SAMPLER5_CLEARTYPE_KERNEL
6890x0000AA18 TD_PS_SAMPLER6_CLEARTYPE_KERNEL
6900x0000AA1C TD_PS_SAMPLER7_CLEARTYPE_KERNEL
6910x0000AA20 TD_PS_SAMPLER8_CLEARTYPE_KERNEL
6920x0000AA24 TD_PS_SAMPLER9_CLEARTYPE_KERNEL
6930x0000AA28 TD_PS_SAMPLER10_CLEARTYPE_KERNEL
6940x0000AA2C TD_PS_SAMPLER11_CLEARTYPE_KERNEL
6950x0000AA30 TD_PS_SAMPLER12_CLEARTYPE_KERNEL
6960x0000AA34 TD_PS_SAMPLER13_CLEARTYPE_KERNEL
6970x0000AA38 TD_PS_SAMPLER14_CLEARTYPE_KERNEL
6980x0000AA3C TD_PS_SAMPLER15_CLEARTYPE_KERNEL
6990x0000AA40 TD_PS_SAMPLER16_CLEARTYPE_KERNEL
7000x0000AA44 TD_PS_SAMPLER17_CLEARTYPE_KERNEL
7010x0000A60C TD_VS_SAMPLER0_BORDER_ALPHA
7020x0000A61C TD_VS_SAMPLER1_BORDER_ALPHA
7030x0000A62C TD_VS_SAMPLER2_BORDER_ALPHA
7040x0000A63C TD_VS_SAMPLER3_BORDER_ALPHA
7050x0000A64C TD_VS_SAMPLER4_BORDER_ALPHA
7060x0000A65C TD_VS_SAMPLER5_BORDER_ALPHA
7070x0000A66C TD_VS_SAMPLER6_BORDER_ALPHA
7080x0000A67C TD_VS_SAMPLER7_BORDER_ALPHA
7090x0000A68C TD_VS_SAMPLER8_BORDER_ALPHA
7100x0000A69C TD_VS_SAMPLER9_BORDER_ALPHA
7110x0000A6AC TD_VS_SAMPLER10_BORDER_ALPHA
7120x0000A6BC TD_VS_SAMPLER11_BORDER_ALPHA
7130x0000A6CC TD_VS_SAMPLER12_BORDER_ALPHA
7140x0000A6DC TD_VS_SAMPLER13_BORDER_ALPHA
7150x0000A6EC TD_VS_SAMPLER14_BORDER_ALPHA
7160x0000A6FC TD_VS_SAMPLER15_BORDER_ALPHA
7170x0000A70C TD_VS_SAMPLER16_BORDER_ALPHA
7180x0000A71C TD_VS_SAMPLER17_BORDER_ALPHA
7190x0000A608 TD_VS_SAMPLER0_BORDER_BLUE
7200x0000A618 TD_VS_SAMPLER1_BORDER_BLUE
7210x0000A628 TD_VS_SAMPLER2_BORDER_BLUE
7220x0000A638 TD_VS_SAMPLER3_BORDER_BLUE
7230x0000A648 TD_VS_SAMPLER4_BORDER_BLUE
7240x0000A658 TD_VS_SAMPLER5_BORDER_BLUE
7250x0000A668 TD_VS_SAMPLER6_BORDER_BLUE
7260x0000A678 TD_VS_SAMPLER7_BORDER_BLUE
7270x0000A688 TD_VS_SAMPLER8_BORDER_BLUE
7280x0000A698 TD_VS_SAMPLER9_BORDER_BLUE
7290x0000A6A8 TD_VS_SAMPLER10_BORDER_BLUE
7300x0000A6B8 TD_VS_SAMPLER11_BORDER_BLUE
7310x0000A6C8 TD_VS_SAMPLER12_BORDER_BLUE
7320x0000A6D8 TD_VS_SAMPLER13_BORDER_BLUE
7330x0000A6E8 TD_VS_SAMPLER14_BORDER_BLUE
7340x0000A6F8 TD_VS_SAMPLER15_BORDER_BLUE
7350x0000A708 TD_VS_SAMPLER16_BORDER_BLUE
7360x0000A718 TD_VS_SAMPLER17_BORDER_BLUE
7370x0000A604 TD_VS_SAMPLER0_BORDER_GREEN
7380x0000A614 TD_VS_SAMPLER1_BORDER_GREEN
7390x0000A624 TD_VS_SAMPLER2_BORDER_GREEN
7400x0000A634 TD_VS_SAMPLER3_BORDER_GREEN
7410x0000A644 TD_VS_SAMPLER4_BORDER_GREEN
7420x0000A654 TD_VS_SAMPLER5_BORDER_GREEN
7430x0000A664 TD_VS_SAMPLER6_BORDER_GREEN
7440x0000A674 TD_VS_SAMPLER7_BORDER_GREEN
7450x0000A684 TD_VS_SAMPLER8_BORDER_GREEN
7460x0000A694 TD_VS_SAMPLER9_BORDER_GREEN
7470x0000A6A4 TD_VS_SAMPLER10_BORDER_GREEN
7480x0000A6B4 TD_VS_SAMPLER11_BORDER_GREEN
7490x0000A6C4 TD_VS_SAMPLER12_BORDER_GREEN
7500x0000A6D4 TD_VS_SAMPLER13_BORDER_GREEN
7510x0000A6E4 TD_VS_SAMPLER14_BORDER_GREEN
7520x0000A6F4 TD_VS_SAMPLER15_BORDER_GREEN
7530x0000A704 TD_VS_SAMPLER16_BORDER_GREEN
7540x0000A714 TD_VS_SAMPLER17_BORDER_GREEN
7550x0000A600 TD_VS_SAMPLER0_BORDER_RED
7560x0000A610 TD_VS_SAMPLER1_BORDER_RED
7570x0000A620 TD_VS_SAMPLER2_BORDER_RED
7580x0000A630 TD_VS_SAMPLER3_BORDER_RED
7590x0000A640 TD_VS_SAMPLER4_BORDER_RED
7600x0000A650 TD_VS_SAMPLER5_BORDER_RED
7610x0000A660 TD_VS_SAMPLER6_BORDER_RED
7620x0000A670 TD_VS_SAMPLER7_BORDER_RED
7630x0000A680 TD_VS_SAMPLER8_BORDER_RED
7640x0000A690 TD_VS_SAMPLER9_BORDER_RED
7650x0000A6A0 TD_VS_SAMPLER10_BORDER_RED
7660x0000A6B0 TD_VS_SAMPLER11_BORDER_RED
7670x0000A6C0 TD_VS_SAMPLER12_BORDER_RED
7680x0000A6D0 TD_VS_SAMPLER13_BORDER_RED
7690x0000A6E0 TD_VS_SAMPLER14_BORDER_RED
7700x0000A6F0 TD_VS_SAMPLER15_BORDER_RED
7710x0000A700 TD_VS_SAMPLER16_BORDER_RED
7720x0000A710 TD_VS_SAMPLER17_BORDER_RED
7730x00009508 TA_CNTL_AUX
7740x0002802C DB_DEPTH_CLEAR
7750x00028D24 DB_HTILE_SURFACE
7760x00028D34 DB_PREFETCH_LIMIT
7770x00028D30 DB_PRELOAD_CONTROL
7780x00028D0C DB_RENDER_CONTROL
7790x00028D10 DB_RENDER_OVERRIDE
7800x0002880C DB_SHADER_CONTROL
7810x00028D2C DB_SRESULTS_COMPARE_STATE1
7820x00028430 DB_STENCILREFMASK
7830x00028434 DB_STENCILREFMASK_BF
7840x00028028 DB_STENCIL_CLEAR
7850x00028780 CB_BLEND0_CONTROL
7860x00028784 CB_BLEND1_CONTROL
7870x00028788 CB_BLEND2_CONTROL
7880x0002878C CB_BLEND3_CONTROL
7890x00028790 CB_BLEND4_CONTROL
7900x00028794 CB_BLEND5_CONTROL
7910x00028798 CB_BLEND6_CONTROL
7920x0002879C CB_BLEND7_CONTROL
7930x00028804 CB_BLEND_CONTROL
7940x00028420 CB_BLEND_ALPHA
7950x0002841C CB_BLEND_BLUE
7960x00028418 CB_BLEND_GREEN
7970x00028414 CB_BLEND_RED
7980x0002812C CB_CLEAR_ALPHA
7990x00028128 CB_CLEAR_BLUE
8000x00028124 CB_CLEAR_GREEN
8010x00028120 CB_CLEAR_RED
8020x00028C30 CB_CLRCMP_CONTROL
8030x00028C38 CB_CLRCMP_DST
8040x00028C3C CB_CLRCMP_MSK
8050x00028C34 CB_CLRCMP_SRC
8060x00028100 CB_COLOR0_MASK
8070x00028104 CB_COLOR1_MASK
8080x00028108 CB_COLOR2_MASK
8090x0002810C CB_COLOR3_MASK
8100x00028110 CB_COLOR4_MASK
8110x00028114 CB_COLOR5_MASK
8120x00028118 CB_COLOR6_MASK
8130x0002811C CB_COLOR7_MASK
8140x00028080 CB_COLOR0_VIEW
8150x00028084 CB_COLOR1_VIEW
8160x00028088 CB_COLOR2_VIEW
8170x0002808C CB_COLOR3_VIEW
8180x00028090 CB_COLOR4_VIEW
8190x00028094 CB_COLOR5_VIEW
8200x00028098 CB_COLOR6_VIEW
8210x0002809C CB_COLOR7_VIEW
8220x00028808 CB_COLOR_CONTROL
8230x0002842C CB_FOG_BLUE
8240x00028428 CB_FOG_GREEN
8250x00028424 CB_FOG_RED
8260x00008040 WAIT_UNTIL
8270x00008950 CC_GC_SHADER_PIPE_CONFIG
8280x00008954 GC_USER_SHADER_PIPE_CONFIG
8290x00009714 VC_ENHANCE
8300x00009830 DB_DEBUG
8310x00009838 DB_WATERMARKS
8320x00028D28 DB_SRESULTS_COMPARE_STATE0
8330x00028D44 DB_ALPHA_TO_MASK
8340x00009504 TA_CNTL
8350x00009700 VC_CNTL
8360x00009718 VC_CONFIG
8370x0000A02C SMX_DC_MC_INTF_CTL
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 287fcebfb4e6..626d51891ee9 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -113,6 +113,7 @@ int rs400_gart_enable(struct radeon_device *rdev)
113 uint32_t size_reg; 113 uint32_t size_reg;
114 uint32_t tmp; 114 uint32_t tmp;
115 115
116 radeon_gart_restore(rdev);
116 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); 117 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
117 tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS; 118 tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
118 WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp); 119 WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
@@ -150,9 +151,8 @@ int rs400_gart_enable(struct radeon_device *rdev)
150 WREG32(RADEON_AGP_BASE, 0xFFFFFFFF); 151 WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
151 WREG32(RS480_AGP_BASE_2, 0); 152 WREG32(RS480_AGP_BASE_2, 0);
152 } 153 }
153 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 154 tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
154 tmp = REG_SET(RS690_MC_AGP_TOP, tmp >> 16); 155 tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
155 tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_location >> 16);
156 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) { 156 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
157 WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp); 157 WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
158 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; 158 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
@@ -251,14 +251,19 @@ void rs400_gpu_init(struct radeon_device *rdev)
251 } 251 }
252} 252}
253 253
254void rs400_vram_info(struct radeon_device *rdev) 254void rs400_mc_init(struct radeon_device *rdev)
255{ 255{
256 u64 base;
257
256 rs400_gart_adjust_size(rdev); 258 rs400_gart_adjust_size(rdev);
259 rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
257 /* DDR for all card after R300 & IGP */ 260 /* DDR for all card after R300 & IGP */
258 rdev->mc.vram_is_ddr = true; 261 rdev->mc.vram_is_ddr = true;
259 rdev->mc.vram_width = 128; 262 rdev->mc.vram_width = 128;
260
261 r100_vram_init_sizes(rdev); 263 r100_vram_init_sizes(rdev);
264 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
265 radeon_vram_location(rdev, &rdev->mc, base);
266 radeon_gtt_location(rdev, &rdev->mc);
262} 267}
263 268
264uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) 269uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -362,22 +367,6 @@ static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
362#endif 367#endif
363} 368}
364 369
365static int rs400_mc_init(struct radeon_device *rdev)
366{
367 int r;
368 u32 tmp;
369
370 /* Setup GPU memory space */
371 tmp = RREG32(R_00015C_NB_TOM);
372 rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
373 rdev->mc.gtt_location = 0xFFFFFFFFUL;
374 r = radeon_mc_setup(rdev);
375 rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
376 if (r)
377 return r;
378 return 0;
379}
380
381void rs400_mc_program(struct radeon_device *rdev) 370void rs400_mc_program(struct radeon_device *rdev)
382{ 371{
383 struct r100_mc_save save; 372 struct r100_mc_save save;
@@ -516,12 +505,8 @@ int rs400_init(struct radeon_device *rdev)
516 radeon_get_clock_info(rdev->ddev); 505 radeon_get_clock_info(rdev->ddev);
517 /* Initialize power management */ 506 /* Initialize power management */
518 radeon_pm_init(rdev); 507 radeon_pm_init(rdev);
519 /* Get vram informations */ 508 /* initialize memory controller */
520 rs400_vram_info(rdev); 509 rs400_mc_init(rdev);
521 /* Initialize memory controller (also test AGP) */
522 r = rs400_mc_init(rdev);
523 if (r)
524 return r;
525 /* Fence driver */ 510 /* Fence driver */
526 r = radeon_fence_driver_init(rdev); 511 r = radeon_fence_driver_init(rdev);
527 if (r) 512 if (r)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index c3818562a13e..47f046b78c6b 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -45,23 +45,6 @@
45void rs600_gpu_init(struct radeon_device *rdev); 45void rs600_gpu_init(struct radeon_device *rdev);
46int rs600_mc_wait_for_idle(struct radeon_device *rdev); 46int rs600_mc_wait_for_idle(struct radeon_device *rdev);
47 47
48int rs600_mc_init(struct radeon_device *rdev)
49{
50 /* read back the MC value from the hw */
51 int r;
52 u32 tmp;
53
54 /* Setup GPU memory space */
55 tmp = RREG32_MC(R_000004_MC_FB_LOCATION);
56 rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
57 rdev->mc.gtt_location = 0xffffffffUL;
58 r = radeon_mc_setup(rdev);
59 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
60 if (r)
61 return r;
62 return 0;
63}
64
65/* hpd for digital panel detect/disconnect */ 48/* hpd for digital panel detect/disconnect */
66bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 49bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
67{ 50{
@@ -213,6 +196,7 @@ int rs600_gart_enable(struct radeon_device *rdev)
213 r = radeon_gart_table_vram_pin(rdev); 196 r = radeon_gart_table_vram_pin(rdev);
214 if (r) 197 if (r)
215 return r; 198 return r;
199 radeon_gart_restore(rdev);
216 /* Enable bus master */ 200 /* Enable bus master */
217 tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS; 201 tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
218 WREG32(R_00004C_BUS_CNTL, tmp); 202 WREG32(R_00004C_BUS_CNTL, tmp);
@@ -406,10 +390,14 @@ int rs600_irq_process(struct radeon_device *rdev)
406 if (G_000044_SW_INT(status)) 390 if (G_000044_SW_INT(status))
407 radeon_fence_process(rdev); 391 radeon_fence_process(rdev);
408 /* Vertical blank interrupts */ 392 /* Vertical blank interrupts */
409 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) 393 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
410 drm_handle_vblank(rdev->ddev, 0); 394 drm_handle_vblank(rdev->ddev, 0);
411 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) 395 wake_up(&rdev->irq.vblank_queue);
396 }
397 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) {
412 drm_handle_vblank(rdev->ddev, 1); 398 drm_handle_vblank(rdev->ddev, 1);
399 wake_up(&rdev->irq.vblank_queue);
400 }
413 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { 401 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
414 queue_hotplug = true; 402 queue_hotplug = true;
415 DRM_DEBUG("HPD1\n"); 403 DRM_DEBUG("HPD1\n");
@@ -470,22 +458,22 @@ void rs600_gpu_init(struct radeon_device *rdev)
470 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 458 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
471} 459}
472 460
473void rs600_vram_info(struct radeon_device *rdev) 461void rs600_mc_init(struct radeon_device *rdev)
474{ 462{
463 u64 base;
464
465 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
466 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
475 rdev->mc.vram_is_ddr = true; 467 rdev->mc.vram_is_ddr = true;
476 rdev->mc.vram_width = 128; 468 rdev->mc.vram_width = 128;
477
478 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 469 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
479 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 470 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
480 471 rdev->mc.visible_vram_size = rdev->mc.aper_size;
481 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 472 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
482 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 473 base = RREG32_MC(R_000004_MC_FB_LOCATION);
483 474 base = G_000004_MC_FB_START(base) << 16;
484 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) 475 radeon_vram_location(rdev, &rdev->mc, base);
485 rdev->mc.mc_vram_size = rdev->mc.aper_size; 476 radeon_gtt_location(rdev, &rdev->mc);
486
487 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
488 rdev->mc.real_vram_size = rdev->mc.aper_size;
489} 477}
490 478
491void rs600_bandwidth_update(struct radeon_device *rdev) 479void rs600_bandwidth_update(struct radeon_device *rdev)
@@ -661,12 +649,8 @@ int rs600_init(struct radeon_device *rdev)
661 radeon_get_clock_info(rdev->ddev); 649 radeon_get_clock_info(rdev->ddev);
662 /* Initialize power management */ 650 /* Initialize power management */
663 radeon_pm_init(rdev); 651 radeon_pm_init(rdev);
664 /* Get vram informations */ 652 /* initialize memory controller */
665 rs600_vram_info(rdev); 653 rs600_mc_init(rdev);
666 /* Initialize memory controller (also test AGP) */
667 r = rs600_mc_init(rdev);
668 if (r)
669 return r;
670 rs600_debugfs(rdev); 654 rs600_debugfs(rdev);
671 /* Fence driver */ 655 /* Fence driver */
672 r = radeon_fence_driver_init(rdev); 656 r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 06e2771aee5a..83b9174f76f2 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -129,27 +129,21 @@ void rs690_pm_info(struct radeon_device *rdev)
129 rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp); 129 rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp);
130} 130}
131 131
132void rs690_vram_info(struct radeon_device *rdev) 132void rs690_mc_init(struct radeon_device *rdev)
133{ 133{
134 fixed20_12 a; 134 fixed20_12 a;
135 u64 base;
135 136
136 rs400_gart_adjust_size(rdev); 137 rs400_gart_adjust_size(rdev);
137
138 rdev->mc.vram_is_ddr = true; 138 rdev->mc.vram_is_ddr = true;
139 rdev->mc.vram_width = 128; 139 rdev->mc.vram_width = 128;
140
141 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 140 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
142 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 141 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
143
144 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 142 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
145 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 143 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
146 144 rdev->mc.visible_vram_size = rdev->mc.aper_size;
147 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) 145 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
148 rdev->mc.mc_vram_size = rdev->mc.aper_size; 146 base = G_000100_MC_FB_START(base) << 16;
149
150 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
151 rdev->mc.real_vram_size = rdev->mc.aper_size;
152
153 rs690_pm_info(rdev); 147 rs690_pm_info(rdev);
154 /* FIXME: we should enforce default clock in case GPU is not in 148 /* FIXME: we should enforce default clock in case GPU is not in
155 * default setup 149 * default setup
@@ -160,22 +154,9 @@ void rs690_vram_info(struct radeon_device *rdev)
160 a.full = rfixed_const(16); 154 a.full = rfixed_const(16);
161 /* core_bandwidth = sclk(Mhz) * 16 */ 155 /* core_bandwidth = sclk(Mhz) * 16 */
162 rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); 156 rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
163}
164
165static int rs690_mc_init(struct radeon_device *rdev)
166{
167 int r;
168 u32 tmp;
169
170 /* Setup GPU memory space */
171 tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
172 rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
173 rdev->mc.gtt_location = 0xFFFFFFFFUL;
174 r = radeon_mc_setup(rdev);
175 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 157 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
176 if (r) 158 radeon_vram_location(rdev, &rdev->mc, base);
177 return r; 159 radeon_gtt_location(rdev, &rdev->mc);
178 return 0;
179} 160}
180 161
181void rs690_line_buffer_adjust(struct radeon_device *rdev, 162void rs690_line_buffer_adjust(struct radeon_device *rdev,
@@ -728,12 +709,8 @@ int rs690_init(struct radeon_device *rdev)
728 radeon_get_clock_info(rdev->ddev); 709 radeon_get_clock_info(rdev->ddev);
729 /* Initialize power management */ 710 /* Initialize power management */
730 radeon_pm_init(rdev); 711 radeon_pm_init(rdev);
731 /* Get vram informations */ 712 /* initialize memory controller */
732 rs690_vram_info(rdev); 713 rs690_mc_init(rdev);
733 /* Initialize memory controller (also test AGP) */
734 r = rs690_mc_init(rdev);
735 if (r)
736 return r;
737 rv515_debugfs(rdev); 714 rv515_debugfs(rdev);
738 /* Fence driver */ 715 /* Fence driver */
739 r = radeon_fence_driver_init(rdev); 716 r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 0e1e6b8632b8..bea747da123f 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -277,13 +277,15 @@ static void rv515_vram_get_type(struct radeon_device *rdev)
277 } 277 }
278} 278}
279 279
280void rv515_vram_info(struct radeon_device *rdev) 280void rv515_mc_init(struct radeon_device *rdev)
281{ 281{
282 fixed20_12 a; 282 fixed20_12 a;
283 283
284 rv515_vram_get_type(rdev); 284 rv515_vram_get_type(rdev);
285
286 r100_vram_init_sizes(rdev); 285 r100_vram_init_sizes(rdev);
286 radeon_vram_location(rdev, &rdev->mc, 0);
287 if (!(rdev->flags & RADEON_IS_AGP))
288 radeon_gtt_location(rdev, &rdev->mc);
287 /* FIXME: we should enforce default clock in case GPU is not in 289 /* FIXME: we should enforce default clock in case GPU is not in
288 * default setup 290 * default setup
289 */ 291 */
@@ -587,12 +589,15 @@ int rv515_init(struct radeon_device *rdev)
587 radeon_get_clock_info(rdev->ddev); 589 radeon_get_clock_info(rdev->ddev);
588 /* Initialize power management */ 590 /* Initialize power management */
589 radeon_pm_init(rdev); 591 radeon_pm_init(rdev);
590 /* Get vram informations */ 592 /* initialize AGP */
591 rv515_vram_info(rdev); 593 if (rdev->flags & RADEON_IS_AGP) {
592 /* Initialize memory controller (also test AGP) */ 594 r = radeon_agp_init(rdev);
593 r = r420_mc_init(rdev); 595 if (r) {
594 if (r) 596 radeon_agp_disable(rdev);
595 return r; 597 }
598 }
599 /* initialize memory controller */
600 rv515_mc_init(rdev);
596 rv515_debugfs(rdev); 601 rv515_debugfs(rdev);
597 /* Fence driver */ 602 /* Fence driver */
598 r = radeon_fence_driver_init(rdev); 603 r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 03021674d097..37887dee12af 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -56,6 +56,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
56 r = radeon_gart_table_vram_pin(rdev); 56 r = radeon_gart_table_vram_pin(rdev);
57 if (r) 57 if (r)
58 return r; 58 return r;
59 radeon_gart_restore(rdev);
59 /* Setup L2 cache */ 60 /* Setup L2 cache */
60 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 61 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
61 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 62 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
@@ -273,9 +274,10 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
273/* 274/*
274 * Core functions 275 * Core functions
275 */ 276 */
276static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes, 277static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
277 u32 num_backends, 278 u32 num_tile_pipes,
278 u32 backend_disable_mask) 279 u32 num_backends,
280 u32 backend_disable_mask)
279{ 281{
280 u32 backend_map = 0; 282 u32 backend_map = 0;
281 u32 enabled_backends_mask; 283 u32 enabled_backends_mask;
@@ -284,6 +286,7 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
284 u32 swizzle_pipe[R7XX_MAX_PIPES]; 286 u32 swizzle_pipe[R7XX_MAX_PIPES];
285 u32 cur_backend; 287 u32 cur_backend;
286 u32 i; 288 u32 i;
289 bool force_no_swizzle;
287 290
288 if (num_tile_pipes > R7XX_MAX_PIPES) 291 if (num_tile_pipes > R7XX_MAX_PIPES)
289 num_tile_pipes = R7XX_MAX_PIPES; 292 num_tile_pipes = R7XX_MAX_PIPES;
@@ -313,6 +316,18 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
313 if (enabled_backends_count != num_backends) 316 if (enabled_backends_count != num_backends)
314 num_backends = enabled_backends_count; 317 num_backends = enabled_backends_count;
315 318
319 switch (rdev->family) {
320 case CHIP_RV770:
321 case CHIP_RV730:
322 force_no_swizzle = false;
323 break;
324 case CHIP_RV710:
325 case CHIP_RV740:
326 default:
327 force_no_swizzle = true;
328 break;
329 }
330
316 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES); 331 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
317 switch (num_tile_pipes) { 332 switch (num_tile_pipes) {
318 case 1: 333 case 1:
@@ -323,49 +338,100 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
323 swizzle_pipe[1] = 1; 338 swizzle_pipe[1] = 1;
324 break; 339 break;
325 case 3: 340 case 3:
326 swizzle_pipe[0] = 0; 341 if (force_no_swizzle) {
327 swizzle_pipe[1] = 2; 342 swizzle_pipe[0] = 0;
328 swizzle_pipe[2] = 1; 343 swizzle_pipe[1] = 1;
344 swizzle_pipe[2] = 2;
345 } else {
346 swizzle_pipe[0] = 0;
347 swizzle_pipe[1] = 2;
348 swizzle_pipe[2] = 1;
349 }
329 break; 350 break;
330 case 4: 351 case 4:
331 swizzle_pipe[0] = 0; 352 if (force_no_swizzle) {
332 swizzle_pipe[1] = 2; 353 swizzle_pipe[0] = 0;
333 swizzle_pipe[2] = 3; 354 swizzle_pipe[1] = 1;
334 swizzle_pipe[3] = 1; 355 swizzle_pipe[2] = 2;
356 swizzle_pipe[3] = 3;
357 } else {
358 swizzle_pipe[0] = 0;
359 swizzle_pipe[1] = 2;
360 swizzle_pipe[2] = 3;
361 swizzle_pipe[3] = 1;
362 }
335 break; 363 break;
336 case 5: 364 case 5:
337 swizzle_pipe[0] = 0; 365 if (force_no_swizzle) {
338 swizzle_pipe[1] = 2; 366 swizzle_pipe[0] = 0;
339 swizzle_pipe[2] = 4; 367 swizzle_pipe[1] = 1;
340 swizzle_pipe[3] = 1; 368 swizzle_pipe[2] = 2;
341 swizzle_pipe[4] = 3; 369 swizzle_pipe[3] = 3;
370 swizzle_pipe[4] = 4;
371 } else {
372 swizzle_pipe[0] = 0;
373 swizzle_pipe[1] = 2;
374 swizzle_pipe[2] = 4;
375 swizzle_pipe[3] = 1;
376 swizzle_pipe[4] = 3;
377 }
342 break; 378 break;
343 case 6: 379 case 6:
344 swizzle_pipe[0] = 0; 380 if (force_no_swizzle) {
345 swizzle_pipe[1] = 2; 381 swizzle_pipe[0] = 0;
346 swizzle_pipe[2] = 4; 382 swizzle_pipe[1] = 1;
347 swizzle_pipe[3] = 5; 383 swizzle_pipe[2] = 2;
348 swizzle_pipe[4] = 3; 384 swizzle_pipe[3] = 3;
349 swizzle_pipe[5] = 1; 385 swizzle_pipe[4] = 4;
386 swizzle_pipe[5] = 5;
387 } else {
388 swizzle_pipe[0] = 0;
389 swizzle_pipe[1] = 2;
390 swizzle_pipe[2] = 4;
391 swizzle_pipe[3] = 5;
392 swizzle_pipe[4] = 3;
393 swizzle_pipe[5] = 1;
394 }
350 break; 395 break;
351 case 7: 396 case 7:
352 swizzle_pipe[0] = 0; 397 if (force_no_swizzle) {
353 swizzle_pipe[1] = 2; 398 swizzle_pipe[0] = 0;
354 swizzle_pipe[2] = 4; 399 swizzle_pipe[1] = 1;
355 swizzle_pipe[3] = 6; 400 swizzle_pipe[2] = 2;
356 swizzle_pipe[4] = 3; 401 swizzle_pipe[3] = 3;
357 swizzle_pipe[5] = 1; 402 swizzle_pipe[4] = 4;
358 swizzle_pipe[6] = 5; 403 swizzle_pipe[5] = 5;
404 swizzle_pipe[6] = 6;
405 } else {
406 swizzle_pipe[0] = 0;
407 swizzle_pipe[1] = 2;
408 swizzle_pipe[2] = 4;
409 swizzle_pipe[3] = 6;
410 swizzle_pipe[4] = 3;
411 swizzle_pipe[5] = 1;
412 swizzle_pipe[6] = 5;
413 }
359 break; 414 break;
360 case 8: 415 case 8:
361 swizzle_pipe[0] = 0; 416 if (force_no_swizzle) {
362 swizzle_pipe[1] = 2; 417 swizzle_pipe[0] = 0;
363 swizzle_pipe[2] = 4; 418 swizzle_pipe[1] = 1;
364 swizzle_pipe[3] = 6; 419 swizzle_pipe[2] = 2;
365 swizzle_pipe[4] = 3; 420 swizzle_pipe[3] = 3;
366 swizzle_pipe[5] = 1; 421 swizzle_pipe[4] = 4;
367 swizzle_pipe[6] = 7; 422 swizzle_pipe[5] = 5;
368 swizzle_pipe[7] = 5; 423 swizzle_pipe[6] = 6;
424 swizzle_pipe[7] = 7;
425 } else {
426 swizzle_pipe[0] = 0;
427 swizzle_pipe[1] = 2;
428 swizzle_pipe[2] = 4;
429 swizzle_pipe[3] = 6;
430 swizzle_pipe[4] = 3;
431 swizzle_pipe[5] = 1;
432 swizzle_pipe[6] = 7;
433 swizzle_pipe[7] = 5;
434 }
369 break; 435 break;
370 } 436 }
371 437
@@ -385,8 +451,10 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
385static void rv770_gpu_init(struct radeon_device *rdev) 451static void rv770_gpu_init(struct radeon_device *rdev)
386{ 452{
387 int i, j, num_qd_pipes; 453 int i, j, num_qd_pipes;
454 u32 ta_aux_cntl;
388 u32 sx_debug_1; 455 u32 sx_debug_1;
389 u32 smx_dc_ctl0; 456 u32 smx_dc_ctl0;
457 u32 db_debug3;
390 u32 num_gs_verts_per_thread; 458 u32 num_gs_verts_per_thread;
391 u32 vgt_gs_per_es; 459 u32 vgt_gs_per_es;
392 u32 gs_prim_buffer_depth = 0; 460 u32 gs_prim_buffer_depth = 0;
@@ -515,6 +583,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
515 583
516 switch (rdev->config.rv770.max_tile_pipes) { 584 switch (rdev->config.rv770.max_tile_pipes) {
517 case 1: 585 case 1:
586 default:
518 gb_tiling_config |= PIPE_TILING(0); 587 gb_tiling_config |= PIPE_TILING(0);
519 break; 588 break;
520 case 2: 589 case 2:
@@ -526,16 +595,17 @@ static void rv770_gpu_init(struct radeon_device *rdev)
526 case 8: 595 case 8:
527 gb_tiling_config |= PIPE_TILING(3); 596 gb_tiling_config |= PIPE_TILING(3);
528 break; 597 break;
529 default:
530 break;
531 } 598 }
599 rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
532 600
533 if (rdev->family == CHIP_RV770) 601 if (rdev->family == CHIP_RV770)
534 gb_tiling_config |= BANK_TILING(1); 602 gb_tiling_config |= BANK_TILING(1);
535 else 603 else
536 gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 604 gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
605 rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
537 606
538 gb_tiling_config |= GROUP_SIZE(0); 607 gb_tiling_config |= GROUP_SIZE(0);
608 rdev->config.rv770.tiling_group_size = 256;
539 609
540 if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) { 610 if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
541 gb_tiling_config |= ROW_TILING(3); 611 gb_tiling_config |= ROW_TILING(3);
@@ -549,21 +619,27 @@ static void rv770_gpu_init(struct radeon_device *rdev)
549 619
550 gb_tiling_config |= BANK_SWAPS(1); 620 gb_tiling_config |= BANK_SWAPS(1);
551 621
552 if (rdev->family == CHIP_RV740) 622 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
553 backend_map = 0x28; 623 cc_rb_backend_disable |=
554 else 624 BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
555 backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
556 rdev->config.rv770.max_backends,
557 (0xff << rdev->config.rv770.max_backends) & 0xff);
558 gb_tiling_config |= BACKEND_MAP(backend_map);
559 625
560 cc_gc_shader_pipe_config = 626 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
627 cc_gc_shader_pipe_config |=
561 INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK); 628 INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
562 cc_gc_shader_pipe_config |= 629 cc_gc_shader_pipe_config |=
563 INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK); 630 INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
564 631
565 cc_rb_backend_disable = 632 if (rdev->family == CHIP_RV740)
566 BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK); 633 backend_map = 0x28;
634 else
635 backend_map = r700_get_tile_pipe_to_backend_map(rdev,
636 rdev->config.rv770.max_tile_pipes,
637 (R7XX_MAX_BACKENDS -
638 r600_count_pipe_bits((cc_rb_backend_disable &
639 R7XX_MAX_BACKENDS_MASK) >> 16)),
640 (cc_rb_backend_disable >> 16));
641 gb_tiling_config |= BACKEND_MAP(backend_map);
642
567 643
568 WREG32(GB_TILING_CONFIG, gb_tiling_config); 644 WREG32(GB_TILING_CONFIG, gb_tiling_config);
569 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 645 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
@@ -571,16 +647,13 @@ static void rv770_gpu_init(struct radeon_device *rdev)
571 647
572 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 648 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
573 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 649 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
574 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 650 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
575 651
576 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
577 WREG32(CGTS_SYS_TCC_DISABLE, 0); 652 WREG32(CGTS_SYS_TCC_DISABLE, 0);
578 WREG32(CGTS_TCC_DISABLE, 0); 653 WREG32(CGTS_TCC_DISABLE, 0);
579 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
580 WREG32(CGTS_USER_TCC_DISABLE, 0);
581 654
582 num_qd_pipes = 655 num_qd_pipes =
583 R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK); 656 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
584 WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK); 657 WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
585 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK); 658 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
586 659
@@ -590,10 +663,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
590 663
591 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); 664 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
592 665
593 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | 666 ta_aux_cntl = RREG32(TA_CNTL_AUX);
594 SYNC_GRADIENT | 667 WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO);
595 SYNC_WALKER |
596 SYNC_ALIGNER));
597 668
598 sx_debug_1 = RREG32(SX_DEBUG_1); 669 sx_debug_1 = RREG32(SX_DEBUG_1);
599 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; 670 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
@@ -604,14 +675,28 @@ static void rv770_gpu_init(struct radeon_device *rdev)
604 smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1); 675 smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1);
605 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 676 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
606 677
607 WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) | 678 if (rdev->family != CHIP_RV740)
608 GS_FLUSH_CTL(4) | 679 WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
609 ACK_FLUSH_CTL(3) | 680 GS_FLUSH_CTL(4) |
610 SYNC_FLUSH_CTL)); 681 ACK_FLUSH_CTL(3) |
682 SYNC_FLUSH_CTL));
611 683
612 if (rdev->family == CHIP_RV770) 684 db_debug3 = RREG32(DB_DEBUG3);
613 WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f)); 685 db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
614 else { 686 switch (rdev->family) {
687 case CHIP_RV770:
688 case CHIP_RV740:
689 db_debug3 |= DB_CLK_OFF_DELAY(0x1f);
690 break;
691 case CHIP_RV710:
692 case CHIP_RV730:
693 default:
694 db_debug3 |= DB_CLK_OFF_DELAY(2);
695 break;
696 }
697 WREG32(DB_DEBUG3, db_debug3);
698
699 if (rdev->family != CHIP_RV770) {
615 db_debug4 = RREG32(DB_DEBUG4); 700 db_debug4 = RREG32(DB_DEBUG4);
616 db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER; 701 db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER;
617 WREG32(DB_DEBUG4, db_debug4); 702 WREG32(DB_DEBUG4, db_debug4);
@@ -640,10 +725,10 @@ static void rv770_gpu_init(struct radeon_device *rdev)
640 ALU_UPDATE_FIFO_HIWATER(0x8)); 725 ALU_UPDATE_FIFO_HIWATER(0x8));
641 switch (rdev->family) { 726 switch (rdev->family) {
642 case CHIP_RV770: 727 case CHIP_RV770:
643 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
644 break;
645 case CHIP_RV730: 728 case CHIP_RV730:
646 case CHIP_RV710: 729 case CHIP_RV710:
730 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
731 break;
647 case CHIP_RV740: 732 case CHIP_RV740:
648 default: 733 default:
649 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4); 734 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4);
@@ -816,45 +901,13 @@ int rv770_mc_init(struct radeon_device *rdev)
816 /* Setup GPU memory space */ 901 /* Setup GPU memory space */
817 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 902 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
818 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 903 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
819 904 rdev->mc.visible_vram_size = rdev->mc.aper_size;
820 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) 905 /* FIXME remove this once we support unmappable VRAM */
906 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
821 rdev->mc.mc_vram_size = rdev->mc.aper_size; 907 rdev->mc.mc_vram_size = rdev->mc.aper_size;
822
823 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
824 rdev->mc.real_vram_size = rdev->mc.aper_size; 908 rdev->mc.real_vram_size = rdev->mc.aper_size;
825
826 if (rdev->flags & RADEON_IS_AGP) {
827 /* gtt_size is setup by radeon_agp_init */
828 rdev->mc.gtt_location = rdev->mc.agp_base;
829 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
830 /* Try to put vram before or after AGP because we
831 * we want SYSTEM_APERTURE to cover both VRAM and
832 * AGP so that GPU can catch out of VRAM/AGP access
833 */
834 if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
835 /* Enough place before */
836 rdev->mc.vram_location = rdev->mc.gtt_location -
837 rdev->mc.mc_vram_size;
838 } else if (tmp > rdev->mc.mc_vram_size) {
839 /* Enough place after */
840 rdev->mc.vram_location = rdev->mc.gtt_location +
841 rdev->mc.gtt_size;
842 } else {
843 /* Try to setup VRAM then AGP might not
844 * not work on some card
845 */
846 rdev->mc.vram_location = 0x00000000UL;
847 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
848 }
849 } else {
850 rdev->mc.vram_location = 0x00000000UL;
851 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
852 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
853 } 909 }
854 rdev->mc.vram_start = rdev->mc.vram_location; 910 r600_vram_gtt_location(rdev, &rdev->mc);
855 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
856 rdev->mc.gtt_start = rdev->mc.gtt_location;
857 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
858 /* FIXME: we should enforce default clock in case GPU is not in 911 /* FIXME: we should enforce default clock in case GPU is not in
859 * default setup 912 * default setup
860 */ 913 */
@@ -863,6 +916,7 @@ int rv770_mc_init(struct radeon_device *rdev)
863 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); 916 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
864 return 0; 917 return 0;
865} 918}
919
866int rv770_gpu_reset(struct radeon_device *rdev) 920int rv770_gpu_reset(struct radeon_device *rdev)
867{ 921{
868 /* FIXME: implement any rv770 specific bits */ 922 /* FIXME: implement any rv770 specific bits */
@@ -1038,6 +1092,7 @@ int rv770_init(struct radeon_device *rdev)
1038 r = radeon_fence_driver_init(rdev); 1092 r = radeon_fence_driver_init(rdev);
1039 if (r) 1093 if (r)
1040 return r; 1094 return r;
1095 /* initialize AGP */
1041 if (rdev->flags & RADEON_IS_AGP) { 1096 if (rdev->flags & RADEON_IS_AGP) {
1042 r = radeon_agp_init(rdev); 1097 r = radeon_agp_init(rdev);
1043 if (r) 1098 if (r)
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index a1367ab6f261..9506f8cb99e0 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -343,4 +343,6 @@
343 343
344#define WAIT_UNTIL 0x8040 344#define WAIT_UNTIL 0x8040
345 345
346#define SRBM_STATUS 0x0E50
347
346#endif 348#endif
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 3d47a2c12322..a759170763bb 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -480,7 +480,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
480 void *from_virtual; 480 void *from_virtual;
481 void *to_virtual; 481 void *to_virtual;
482 int i; 482 int i;
483 int ret; 483 int ret = -ENOMEM;
484 484
485 if (ttm->page_flags & TTM_PAGE_FLAG_USER) { 485 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
486 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start, 486 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
@@ -499,8 +499,10 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
499 499
500 for (i = 0; i < ttm->num_pages; ++i) { 500 for (i = 0; i < ttm->num_pages; ++i) {
501 from_page = read_mapping_page(swap_space, i, NULL); 501 from_page = read_mapping_page(swap_space, i, NULL);
502 if (IS_ERR(from_page)) 502 if (IS_ERR(from_page)) {
503 ret = PTR_ERR(from_page);
503 goto out_err; 504 goto out_err;
505 }
504 to_page = __ttm_tt_get_page(ttm, i); 506 to_page = __ttm_tt_get_page(ttm, i);
505 if (unlikely(to_page == NULL)) 507 if (unlikely(to_page == NULL))
506 goto out_err; 508 goto out_err;
@@ -523,7 +525,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
523 return 0; 525 return 0;
524out_err: 526out_err:
525 ttm_tt_free_alloced_pages(ttm); 527 ttm_tt_free_alloced_pages(ttm);
526 return -ENOMEM; 528 return ret;
527} 529}
528 530
529int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) 531int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
@@ -535,6 +537,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
535 void *from_virtual; 537 void *from_virtual;
536 void *to_virtual; 538 void *to_virtual;
537 int i; 539 int i;
540 int ret = -ENOMEM;
538 541
539 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); 542 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
540 BUG_ON(ttm->caching_state != tt_cached); 543 BUG_ON(ttm->caching_state != tt_cached);
@@ -557,7 +560,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
557 0); 560 0);
558 if (unlikely(IS_ERR(swap_storage))) { 561 if (unlikely(IS_ERR(swap_storage))) {
559 printk(KERN_ERR "Failed allocating swap storage.\n"); 562 printk(KERN_ERR "Failed allocating swap storage.\n");
560 return -ENOMEM; 563 return PTR_ERR(swap_storage);
561 } 564 }
562 } else 565 } else
563 swap_storage = persistant_swap_storage; 566 swap_storage = persistant_swap_storage;
@@ -569,9 +572,10 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
569 if (unlikely(from_page == NULL)) 572 if (unlikely(from_page == NULL))
570 continue; 573 continue;
571 to_page = read_mapping_page(swap_space, i, NULL); 574 to_page = read_mapping_page(swap_space, i, NULL);
572 if (unlikely(to_page == NULL)) 575 if (unlikely(IS_ERR(to_page))) {
576 ret = PTR_ERR(to_page);
573 goto out_err; 577 goto out_err;
574 578 }
575 preempt_disable(); 579 preempt_disable();
576 from_virtual = kmap_atomic(from_page, KM_USER0); 580 from_virtual = kmap_atomic(from_page, KM_USER0);
577 to_virtual = kmap_atomic(to_page, KM_USER1); 581 to_virtual = kmap_atomic(to_page, KM_USER1);
@@ -595,5 +599,5 @@ out_err:
595 if (!persistant_swap_storage) 599 if (!persistant_swap_storage)
596 fput(swap_storage); 600 fput(swap_storage);
597 601
598 return -ENOMEM; 602 return ret;
599} 603}
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 0920492cea0a..61ab4daf0bbb 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -16,3 +16,14 @@ config VGA_ARB_MAX_GPUS
16 help 16 help
17 Reserves space in the kernel to maintain resource locking for 17 Reserves space in the kernel to maintain resource locking for
18 multiple GPUS. The overhead for each GPU is very small. 18 multiple GPUS. The overhead for each GPU is very small.
19
20config VGA_SWITCHEROO
21 bool "Laptop Hybrid Grapics - GPU switching support"
22 depends on X86
23 depends on ACPI
24 help
25 Many laptops released in 2008/9/10 have two gpus with a multiplxer
26 to switch between them. This adds support for dynamic switching when
27 X isn't running and delayed switching until the next logoff. This
28 features is called hybrid graphics, ATI PowerXpress, and Nvidia
29 HybridPower.
diff --git a/drivers/gpu/vga/Makefile b/drivers/gpu/vga/Makefile
index 7cc8c1ed645b..14ca30b75d0a 100644
--- a/drivers/gpu/vga/Makefile
+++ b/drivers/gpu/vga/Makefile
@@ -1 +1,2 @@
1obj-$(CONFIG_VGA_ARB) += vgaarb.o 1obj-$(CONFIG_VGA_ARB) += vgaarb.o
2obj-$(CONFIG_VGA_SWITCHEROO) += vga_switcheroo.o
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
new file mode 100644
index 000000000000..d6d1149d525d
--- /dev/null
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -0,0 +1,450 @@
1/*
2 * Copyright (c) 2010 Red Hat Inc.
3 * Author : Dave Airlie <airlied@redhat.com>
4 *
5 *
6 * Licensed under GPLv2
7 *
8 * vga_switcheroo.c - Support for laptop with dual GPU using one set of outputs
9
10 Switcher interface - methods require for ATPX and DCM
11 - switchto - this throws the output MUX switch
12 - discrete_set_power - sets the power state for the discrete card
13
14 GPU driver interface
15 - set_gpu_state - this should do the equiv of s/r for the card
16 - this should *not* set the discrete power state
17 - switch_check - check if the device is in a position to switch now
18 */
19
20#include <linux/module.h>
21#include <linux/dmi.h>
22#include <linux/seq_file.h>
23#include <linux/uaccess.h>
24#include <linux/fs.h>
25#include <linux/debugfs.h>
26#include <linux/fb.h>
27
28#include <linux/pci.h>
29#include <linux/vga_switcheroo.h>
30
31struct vga_switcheroo_client {
32 struct pci_dev *pdev;
33 struct fb_info *fb_info;
34 int pwr_state;
35 void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state);
36 bool (*can_switch)(struct pci_dev *pdev);
37 int id;
38 bool active;
39};
40
41static DEFINE_MUTEX(vgasr_mutex);
42
43struct vgasr_priv {
44
45 bool active;
46 bool delayed_switch_active;
47 enum vga_switcheroo_client_id delayed_client_id;
48
49 struct dentry *debugfs_root;
50 struct dentry *switch_file;
51
52 int registered_clients;
53 struct vga_switcheroo_client clients[VGA_SWITCHEROO_MAX_CLIENTS];
54
55 struct vga_switcheroo_handler *handler;
56};
57
58static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
59static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv);
60
61/* only one switcheroo per system */
62static struct vgasr_priv vgasr_priv;
63
64int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
65{
66 mutex_lock(&vgasr_mutex);
67 if (vgasr_priv.handler) {
68 mutex_unlock(&vgasr_mutex);
69 return -EINVAL;
70 }
71
72 vgasr_priv.handler = handler;
73 mutex_unlock(&vgasr_mutex);
74 return 0;
75}
76EXPORT_SYMBOL(vga_switcheroo_register_handler);
77
78void vga_switcheroo_unregister_handler(void)
79{
80 mutex_lock(&vgasr_mutex);
81 vgasr_priv.handler = NULL;
82 mutex_unlock(&vgasr_mutex);
83}
84EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
85
86static void vga_switcheroo_enable(void)
87{
88 int i;
89 int ret;
90 /* call the handler to init */
91 vgasr_priv.handler->init();
92
93 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
94 ret = vgasr_priv.handler->get_client_id(vgasr_priv.clients[i].pdev);
95 if (ret < 0)
96 return;
97
98 vgasr_priv.clients[i].id = ret;
99 }
100 vga_switcheroo_debugfs_init(&vgasr_priv);
101 vgasr_priv.active = true;
102}
103
104int vga_switcheroo_register_client(struct pci_dev *pdev,
105 void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state),
106 bool (*can_switch)(struct pci_dev *pdev))
107{
108 int index;
109
110 mutex_lock(&vgasr_mutex);
111 /* don't do IGD vs DIS here */
112 if (vgasr_priv.registered_clients & 1)
113 index = 1;
114 else
115 index = 0;
116
117 vgasr_priv.clients[index].pwr_state = VGA_SWITCHEROO_ON;
118 vgasr_priv.clients[index].pdev = pdev;
119 vgasr_priv.clients[index].set_gpu_state = set_gpu_state;
120 vgasr_priv.clients[index].can_switch = can_switch;
121 vgasr_priv.clients[index].id = -1;
122 if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
123 vgasr_priv.clients[index].active = true;
124
125 vgasr_priv.registered_clients |= (1 << index);
126
127 /* if we get two clients + handler */
128 if (vgasr_priv.registered_clients == 0x3 && vgasr_priv.handler) {
129 printk(KERN_INFO "vga_switcheroo: enabled\n");
130 vga_switcheroo_enable();
131 }
132 mutex_unlock(&vgasr_mutex);
133 return 0;
134}
135EXPORT_SYMBOL(vga_switcheroo_register_client);
136
137void vga_switcheroo_unregister_client(struct pci_dev *pdev)
138{
139 int i;
140
141 mutex_lock(&vgasr_mutex);
142 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
143 if (vgasr_priv.clients[i].pdev == pdev) {
144 vgasr_priv.registered_clients &= ~(1 << i);
145 break;
146 }
147 }
148
149 printk(KERN_INFO "vga_switcheroo: disabled\n");
150 vga_switcheroo_debugfs_fini(&vgasr_priv);
151 vgasr_priv.active = false;
152 mutex_unlock(&vgasr_mutex);
153}
154EXPORT_SYMBOL(vga_switcheroo_unregister_client);
155
156void vga_switcheroo_client_fb_set(struct pci_dev *pdev,
157 struct fb_info *info)
158{
159 int i;
160
161 mutex_lock(&vgasr_mutex);
162 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
163 if (vgasr_priv.clients[i].pdev == pdev) {
164 vgasr_priv.clients[i].fb_info = info;
165 break;
166 }
167 }
168 mutex_unlock(&vgasr_mutex);
169}
170EXPORT_SYMBOL(vga_switcheroo_client_fb_set);
171
172static int vga_switcheroo_show(struct seq_file *m, void *v)
173{
174 int i;
175 mutex_lock(&vgasr_mutex);
176 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
177 seq_printf(m, "%d:%c:%s:%s\n", i,
178 vgasr_priv.clients[i].active ? '+' : ' ',
179 vgasr_priv.clients[i].pwr_state ? "Pwr" : "Off",
180 pci_name(vgasr_priv.clients[i].pdev));
181 }
182 mutex_unlock(&vgasr_mutex);
183 return 0;
184}
185
186static int vga_switcheroo_debugfs_open(struct inode *inode, struct file *file)
187{
188 return single_open(file, vga_switcheroo_show, NULL);
189}
190
191static int vga_switchon(struct vga_switcheroo_client *client)
192{
193 int ret;
194
195 ret = vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
196 /* call the driver callback to turn on device */
197 client->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
198 client->pwr_state = VGA_SWITCHEROO_ON;
199 return 0;
200}
201
202static int vga_switchoff(struct vga_switcheroo_client *client)
203{
204 /* call the driver callback to turn off device */
205 client->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
206 vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
207 client->pwr_state = VGA_SWITCHEROO_OFF;
208 return 0;
209}
210
211static int vga_switchto(struct vga_switcheroo_client *new_client)
212{
213 int ret;
214 int i;
215 struct vga_switcheroo_client *active = NULL;
216
217 if (new_client->active == true)
218 return 0;
219
220 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
221 if (vgasr_priv.clients[i].active == true) {
222 active = &vgasr_priv.clients[i];
223 break;
224 }
225 }
226 if (!active)
227 return 0;
228
229 /* power up the first device */
230 ret = pci_enable_device(new_client->pdev);
231 if (ret)
232 return ret;
233
234 if (new_client->pwr_state == VGA_SWITCHEROO_OFF)
235 vga_switchon(new_client);
236
237 /* swap shadow resource to denote boot VGA device has changed so X starts on new device */
238 active->active = false;
239
240 active->pdev->resource[PCI_ROM_RESOURCE].flags &= ~IORESOURCE_ROM_SHADOW;
241 new_client->pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
242
243 if (new_client->fb_info) {
244 struct fb_event event;
245 event.info = new_client->fb_info;
246 fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event);
247 }
248
249 ret = vgasr_priv.handler->switchto(new_client->id);
250 if (ret)
251 return ret;
252
253 if (active->pwr_state == VGA_SWITCHEROO_ON)
254 vga_switchoff(active);
255
256 new_client->active = true;
257 return 0;
258}
259
260static ssize_t
261vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
262 size_t cnt, loff_t *ppos)
263{
264 char usercmd[64];
265 const char *pdev_name;
266 int i, ret;
267 bool delay = false, can_switch;
268 int client_id = -1;
269 struct vga_switcheroo_client *client = NULL;
270
271 if (cnt > 63)
272 cnt = 63;
273
274 if (copy_from_user(usercmd, ubuf, cnt))
275 return -EFAULT;
276
277 mutex_lock(&vgasr_mutex);
278
279 if (!vgasr_priv.active)
280 return -EINVAL;
281
282 /* pwr off the device not in use */
283 if (strncmp(usercmd, "OFF", 3) == 0) {
284 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
285 if (vgasr_priv.clients[i].active)
286 continue;
287 if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_ON)
288 vga_switchoff(&vgasr_priv.clients[i]);
289 }
290 goto out;
291 }
292 /* pwr on the device not in use */
293 if (strncmp(usercmd, "ON", 2) == 0) {
294 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
295 if (vgasr_priv.clients[i].active)
296 continue;
297 if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_OFF)
298 vga_switchon(&vgasr_priv.clients[i]);
299 }
300 goto out;
301 }
302
303 /* request a delayed switch - test can we switch now */
304 if (strncmp(usercmd, "DIGD", 4) == 0) {
305 client_id = VGA_SWITCHEROO_IGD;
306 delay = true;
307 }
308
309 if (strncmp(usercmd, "DDIS", 4) == 0) {
310 client_id = VGA_SWITCHEROO_DIS;
311 delay = true;
312 }
313
314 if (strncmp(usercmd, "IGD", 3) == 0)
315 client_id = VGA_SWITCHEROO_IGD;
316
317 if (strncmp(usercmd, "DIS", 3) == 0)
318 client_id = VGA_SWITCHEROO_DIS;
319
320 if (client_id == -1)
321 goto out;
322
323 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
324 if (vgasr_priv.clients[i].id == client_id) {
325 client = &vgasr_priv.clients[i];
326 break;
327 }
328 }
329
330 vgasr_priv.delayed_switch_active = false;
331 /* okay we want a switch - test if devices are willing to switch */
332 can_switch = true;
333 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
334 can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev);
335 if (can_switch == false) {
336 printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i);
337 break;
338 }
339 }
340
341 if (can_switch == false && delay == false)
342 goto out;
343
344 if (can_switch == true) {
345 pdev_name = pci_name(client->pdev);
346 ret = vga_switchto(client);
347 if (ret)
348 printk(KERN_ERR "vga_switcheroo: switching failed %d\n", ret);
349 } else {
350 printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id);
351 vgasr_priv.delayed_switch_active = true;
352 vgasr_priv.delayed_client_id = client_id;
353
354 /* we should at least power up the card to
355 make the switch faster */
356 if (client->pwr_state == VGA_SWITCHEROO_OFF)
357 vga_switchon(client);
358 }
359
360out:
361 mutex_unlock(&vgasr_mutex);
362 return cnt;
363}
364
365static const struct file_operations vga_switcheroo_debugfs_fops = {
366 .owner = THIS_MODULE,
367 .open = vga_switcheroo_debugfs_open,
368 .write = vga_switcheroo_debugfs_write,
369 .read = seq_read,
370 .llseek = seq_lseek,
371 .release = single_release,
372};
373
374static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv)
375{
376 if (priv->switch_file) {
377 debugfs_remove(priv->switch_file);
378 priv->switch_file = NULL;
379 }
380 if (priv->debugfs_root) {
381 debugfs_remove(priv->debugfs_root);
382 priv->debugfs_root = NULL;
383 }
384}
385
386static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv)
387{
388 /* already initialised */
389 if (priv->debugfs_root)
390 return 0;
391 priv->debugfs_root = debugfs_create_dir("vgaswitcheroo", NULL);
392
393 if (!priv->debugfs_root) {
394 printk(KERN_ERR "vga_switcheroo: Cannot create /sys/kernel/debug/vgaswitcheroo\n");
395 goto fail;
396 }
397
398 priv->switch_file = debugfs_create_file("switch", 0644,
399 priv->debugfs_root, NULL, &vga_switcheroo_debugfs_fops);
400 if (!priv->switch_file) {
401 printk(KERN_ERR "vga_switcheroo: cannot create /sys/kernel/debug/vgaswitcheroo/switch\n");
402 goto fail;
403 }
404 return 0;
405fail:
406 vga_switcheroo_debugfs_fini(priv);
407 return -1;
408}
409
410int vga_switcheroo_process_delayed_switch(void)
411{
412 struct vga_switcheroo_client *client = NULL;
413 const char *pdev_name;
414 bool can_switch = true;
415 int i;
416 int ret;
417 int err = -EINVAL;
418
419 mutex_lock(&vgasr_mutex);
420 if (!vgasr_priv.delayed_switch_active)
421 goto err;
422
423 printk(KERN_INFO "vga_switcheroo: processing delayed switch to %d\n", vgasr_priv.delayed_client_id);
424
425 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
426 if (vgasr_priv.clients[i].id == vgasr_priv.delayed_client_id)
427 client = &vgasr_priv.clients[i];
428 can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev);
429 if (can_switch == false) {
430 printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i);
431 break;
432 }
433 }
434
435 if (can_switch == false || client == NULL)
436 goto err;
437
438 pdev_name = pci_name(client->pdev);
439 ret = vga_switchto(client);
440 if (ret)
441 printk(KERN_ERR "vga_switcheroo: delayed switching failed %d\n", ret);
442
443 vgasr_priv.delayed_switch_active = false;
444 err = 0;
445err:
446 mutex_unlock(&vgasr_mutex);
447 return err;
448}
449EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
450
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 867e08433e4b..433602aed468 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -265,9 +265,10 @@ static int hiddev_release(struct inode * inode, struct file * file)
265static int hiddev_open(struct inode *inode, struct file *file) 265static int hiddev_open(struct inode *inode, struct file *file)
266{ 266{
267 struct hiddev_list *list; 267 struct hiddev_list *list;
268 int res; 268 int res, i;
269 269
270 int i = iminor(inode) - HIDDEV_MINOR_BASE; 270 lock_kernel();
271 i = iminor(inode) - HIDDEV_MINOR_BASE;
271 272
272 if (i >= HIDDEV_MINORS || i < 0 || !hiddev_table[i]) 273 if (i >= HIDDEV_MINORS || i < 0 || !hiddev_table[i])
273 return -ENODEV; 274 return -ENODEV;
@@ -313,10 +314,12 @@ static int hiddev_open(struct inode *inode, struct file *file)
313 usbhid_open(hid); 314 usbhid_open(hid);
314 } 315 }
315 316
317 unlock_kernel();
316 return 0; 318 return 0;
317bail: 319bail:
318 file->private_data = NULL; 320 file->private_data = NULL;
319 kfree(list); 321 kfree(list);
322 unlock_kernel();
320 return res; 323 return res;
321} 324}
322 325
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 8d8a00e5a30e..02ce9cff5fcf 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -61,6 +61,16 @@ config I2C_HELPER_AUTO
61 61
62 In doubt, say Y. 62 In doubt, say Y.
63 63
64config I2C_SMBUS
65 tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO
66 help
67 Say Y here if you want support for SMBus extensions to the I2C
68 specification. At the moment, the only supported extension is
69 the SMBus alert protocol.
70
71 This support is also available as a module. If so, the module
72 will be called i2c-smbus.
73
64source drivers/i2c/algos/Kconfig 74source drivers/i2c/algos/Kconfig
65source drivers/i2c/busses/Kconfig 75source drivers/i2c/busses/Kconfig
66source drivers/i2c/chips/Kconfig 76source drivers/i2c/chips/Kconfig
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index ba26e6cbe74e..acd0250c16a0 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -4,6 +4,7 @@
4 4
5obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o 5obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o
6obj-$(CONFIG_I2C) += i2c-core.o 6obj-$(CONFIG_I2C) += i2c-core.o
7obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o
7obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o 8obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o
8obj-y += busses/ chips/ algos/ 9obj-y += busses/ chips/ algos/
9 10
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 78d42aae0089..dcdaf8e675bf 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -453,8 +453,6 @@ static int pca_init(struct i2c_adapter *adap)
453 */ 453 */
454 int raise_fall_time; 454 int raise_fall_time;
455 455
456 struct i2c_algo_pca_data *pca_data = adap->algo_data;
457
458 /* Ignore the reset function from the module, 456 /* Ignore the reset function from the module,
459 * we can use the parallel bus reset 457 * we can use the parallel bus reset
460 */ 458 */
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 737f05200b1d..4cc3807bd31c 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -77,7 +77,7 @@ config I2C_AMD8111
77 will be called i2c-amd8111. 77 will be called i2c-amd8111.
78 78
79config I2C_I801 79config I2C_I801
80 tristate "Intel 82801 (ICH)" 80 tristate "Intel 82801 (ICH/PCH)"
81 depends on PCI 81 depends on PCI
82 help 82 help
83 If you say yes to this option, support will be included for the Intel 83 If you say yes to this option, support will be included for the Intel
@@ -97,7 +97,8 @@ config I2C_I801
97 ICH9 97 ICH9
98 Tolapai 98 Tolapai
99 ICH10 99 ICH10
100 PCH 100 3400/5 Series (PCH)
101 Cougar Point (PCH)
101 102
102 This driver can also be built as a module. If so, the module 103 This driver can also be built as a module. If so, the module
103 will be called i2c-i801. 104 will be called i2c-i801.
@@ -580,6 +581,7 @@ config I2C_PARPORT
580 tristate "Parallel port adapter" 581 tristate "Parallel port adapter"
581 depends on PARPORT 582 depends on PARPORT
582 select I2C_ALGOBIT 583 select I2C_ALGOBIT
584 select I2C_SMBUS
583 help 585 help
584 This supports parallel port I2C adapters such as the ones made by 586 This supports parallel port I2C adapters such as the ones made by
585 Philips or Velleman, Analog Devices evaluation boards, and more. 587 Philips or Velleman, Analog Devices evaluation boards, and more.
@@ -603,6 +605,7 @@ config I2C_PARPORT
603config I2C_PARPORT_LIGHT 605config I2C_PARPORT_LIGHT
604 tristate "Parallel port adapter (light)" 606 tristate "Parallel port adapter (light)"
605 select I2C_ALGOBIT 607 select I2C_ALGOBIT
608 select I2C_SMBUS
606 help 609 help
607 This supports parallel port I2C adapters such as the ones made by 610 This supports parallel port I2C adapters such as the ones made by
608 Philips or Velleman, Analog Devices evaluation boards, and more. 611 Philips or Velleman, Analog Devices evaluation boards, and more.
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 8de7d7b87bb0..bd8f1e4d9e6c 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -480,7 +480,7 @@ static struct i2c_adapter ali1535_adapter = {
480 .algo = &smbus_algorithm, 480 .algo = &smbus_algorithm,
481}; 481};
482 482
483static struct pci_device_id ali1535_ids[] = { 483static const struct pci_device_id ali1535_ids[] = {
484 { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) }, 484 { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
485 { }, 485 { },
486}; 486};
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index 4687af40dd50..a409cfcf0629 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -417,7 +417,7 @@ static void __devexit ali1563_remove(struct pci_dev * dev)
417 ali1563_shutdown(dev); 417 ali1563_shutdown(dev);
418} 418}
419 419
420static struct pci_device_id __devinitdata ali1563_id_table[] = { 420static const struct pci_device_id ali1563_id_table[] __devinitconst = {
421 { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1563) }, 421 { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1563) },
422 {}, 422 {},
423}; 423};
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index e7e3205f1286..659f63f5e4af 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -477,7 +477,7 @@ static struct i2c_adapter ali15x3_adapter = {
477 .algo = &smbus_algorithm, 477 .algo = &smbus_algorithm,
478}; 478};
479 479
480static struct pci_device_id ali15x3_ids[] = { 480static const struct pci_device_id ali15x3_ids[] = {
481 { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) }, 481 { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
482 { 0, } 482 { 0, }
483}; 483};
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 8f0b90ef8c76..c5a9fa488e7f 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -308,7 +308,7 @@ static const char* chipname[] = {
308 "nVidia nForce", "AMD8111", 308 "nVidia nForce", "AMD8111",
309}; 309};
310 310
311static struct pci_device_id amd756_ids[] = { 311static const struct pci_device_id amd756_ids[] = {
312 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_740B), 312 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_740B),
313 .driver_data = AMD756 }, 313 .driver_data = AMD756 },
314 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7413), 314 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7413),
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 5b4ad86ca166..d0dc970d7370 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -351,7 +351,7 @@ static const struct i2c_algorithm smbus_algorithm = {
351}; 351};
352 352
353 353
354static struct pci_device_id amd8111_ids[] = { 354static const struct pci_device_id amd8111_ids[] = {
355 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS2) }, 355 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS2) },
356 { 0, } 356 { 0, }
357}; 357};
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index bec9b845dd16..c767295ad1fb 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -105,7 +105,7 @@ static struct i2c_adapter hydra_adap = {
105 .algo_data = &hydra_bit_data, 105 .algo_data = &hydra_bit_data,
106}; 106};
107 107
108static struct pci_device_id hydra_ids[] = { 108static const struct pci_device_id hydra_ids[] = {
109 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_HYDRA) }, 109 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_HYDRA) },
110 { 0, } 110 { 0, }
111}; 111};
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index df6ab553f975..9da5b05cdb52 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -41,7 +41,8 @@
41 Tolapai 0x5032 32 hard yes yes yes 41 Tolapai 0x5032 32 hard yes yes yes
42 ICH10 0x3a30 32 hard yes yes yes 42 ICH10 0x3a30 32 hard yes yes yes
43 ICH10 0x3a60 32 hard yes yes yes 43 ICH10 0x3a60 32 hard yes yes yes
44 PCH 0x3b30 32 hard yes yes yes 44 3400/5 Series (PCH) 0x3b30 32 hard yes yes yes
45 Cougar Point (PCH) 0x1c22 32 hard yes yes yes
45 46
46 Features supported by this driver: 47 Features supported by this driver:
47 Software PEC no 48 Software PEC no
@@ -561,7 +562,7 @@ static struct i2c_adapter i801_adapter = {
561 .algo = &smbus_algorithm, 562 .algo = &smbus_algorithm,
562}; 563};
563 564
564static struct pci_device_id i801_ids[] = { 565static const struct pci_device_id i801_ids[] = {
565 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_3) }, 566 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_3) },
566 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_3) }, 567 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_3) },
567 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_2) }, 568 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_2) },
@@ -578,6 +579,7 @@ static struct pci_device_id i801_ids[] = {
578 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) }, 579 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
579 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) }, 580 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
580 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) }, 581 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
582 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) },
581 { 0, } 583 { 0, }
582}; 584};
583 585
@@ -707,6 +709,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
707 case PCI_DEVICE_ID_INTEL_ICH10_4: 709 case PCI_DEVICE_ID_INTEL_ICH10_4:
708 case PCI_DEVICE_ID_INTEL_ICH10_5: 710 case PCI_DEVICE_ID_INTEL_ICH10_5:
709 case PCI_DEVICE_ID_INTEL_PCH_SMBUS: 711 case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
712 case PCI_DEVICE_ID_INTEL_CPT_SMBUS:
710 i801_features |= FEATURE_I2C_BLOCK_READ; 713 i801_features |= FEATURE_I2C_BLOCK_READ;
711 /* fall through */ 714 /* fall through */
712 case PCI_DEVICE_ID_INTEL_82801DB_3: 715 case PCI_DEVICE_ID_INTEL_82801DB_3:
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index dba6eb053e2f..69c22f79f231 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -256,7 +256,7 @@ static struct i2c_adapter sch_adapter = {
256 .algo = &smbus_algorithm, 256 .algo = &smbus_algorithm,
257}; 257};
258 258
259static struct pci_device_id sch_ids[] = { 259static const struct pci_device_id sch_ids[] = {
260 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) }, 260 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
261 { 0, } 261 { 0, }
262}; 262};
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index ec11d1c4e77b..4a700587ef18 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -308,7 +308,7 @@ static struct i2c_algorithm smbus_algorithm = {
308}; 308};
309 309
310 310
311static struct pci_device_id nforce2_ids[] = { 311static const struct pci_device_id nforce2_ids[] = {
312 { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS) }, 312 { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS) },
313 { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS) }, 313 { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS) },
314 { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS) }, 314 { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS) },
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index 322c5691e38e..5f41ec0f72d2 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -1,7 +1,7 @@
1/* ------------------------------------------------------------------------ * 1/* ------------------------------------------------------------------------ *
2 * i2c-parport-light.c I2C bus over parallel port * 2 * i2c-parport-light.c I2C bus over parallel port *
3 * ------------------------------------------------------------------------ * 3 * ------------------------------------------------------------------------ *
4 Copyright (C) 2003-2007 Jean Delvare <khali@linux-fr.org> 4 Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
5 5
6 Based on older i2c-velleman.c driver 6 Based on older i2c-velleman.c driver
7 Copyright (C) 1995-2000 Simon G. Vogl 7 Copyright (C) 1995-2000 Simon G. Vogl
@@ -27,10 +27,12 @@
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/delay.h>
30#include <linux/platform_device.h> 31#include <linux/platform_device.h>
31#include <linux/ioport.h> 32#include <linux/ioport.h>
32#include <linux/i2c.h> 33#include <linux/i2c.h>
33#include <linux/i2c-algo-bit.h> 34#include <linux/i2c-algo-bit.h>
35#include <linux/i2c-smbus.h>
34#include <asm/io.h> 36#include <asm/io.h>
35#include "i2c-parport.h" 37#include "i2c-parport.h"
36 38
@@ -43,6 +45,10 @@ static u16 base;
43module_param(base, ushort, 0); 45module_param(base, ushort, 0);
44MODULE_PARM_DESC(base, "Base I/O address"); 46MODULE_PARM_DESC(base, "Base I/O address");
45 47
48static int irq;
49module_param(irq, int, 0);
50MODULE_PARM_DESC(irq, "IRQ (optional)");
51
46/* ----- Low-level parallel port access ----------------------------------- */ 52/* ----- Low-level parallel port access ----------------------------------- */
47 53
48static inline void port_write(unsigned char p, unsigned char d) 54static inline void port_write(unsigned char p, unsigned char d)
@@ -119,6 +125,16 @@ static struct i2c_adapter parport_adapter = {
119 .name = "Parallel port adapter (light)", 125 .name = "Parallel port adapter (light)",
120}; 126};
121 127
128/* SMBus alert support */
129static struct i2c_smbus_alert_setup alert_data = {
130 .alert_edge_triggered = 1,
131};
132static struct i2c_client *ara;
133static struct lineop parport_ctrl_irq = {
134 .val = (1 << 4),
135 .port = CTRL,
136};
137
122static int __devinit i2c_parport_probe(struct platform_device *pdev) 138static int __devinit i2c_parport_probe(struct platform_device *pdev)
123{ 139{
124 int err; 140 int err;
@@ -127,18 +143,39 @@ static int __devinit i2c_parport_probe(struct platform_device *pdev)
127 parport_setsda(NULL, 1); 143 parport_setsda(NULL, 1);
128 parport_setscl(NULL, 1); 144 parport_setscl(NULL, 1);
129 /* Other init if needed (power on...) */ 145 /* Other init if needed (power on...) */
130 if (adapter_parm[type].init.val) 146 if (adapter_parm[type].init.val) {
131 line_set(1, &adapter_parm[type].init); 147 line_set(1, &adapter_parm[type].init);
148 /* Give powered devices some time to settle */
149 msleep(100);
150 }
132 151
133 parport_adapter.dev.parent = &pdev->dev; 152 parport_adapter.dev.parent = &pdev->dev;
134 err = i2c_bit_add_bus(&parport_adapter); 153 err = i2c_bit_add_bus(&parport_adapter);
135 if (err) 154 if (err) {
136 dev_err(&pdev->dev, "Unable to register with I2C\n"); 155 dev_err(&pdev->dev, "Unable to register with I2C\n");
137 return err; 156 return err;
157 }
158
159 /* Setup SMBus alert if supported */
160 if (adapter_parm[type].smbus_alert && irq) {
161 alert_data.irq = irq;
162 ara = i2c_setup_smbus_alert(&parport_adapter, &alert_data);
163 if (ara)
164 line_set(1, &parport_ctrl_irq);
165 else
166 dev_warn(&pdev->dev, "Failed to register ARA client\n");
167 }
168
169 return 0;
138} 170}
139 171
140static int __devexit i2c_parport_remove(struct platform_device *pdev) 172static int __devexit i2c_parport_remove(struct platform_device *pdev)
141{ 173{
174 if (ara) {
175 line_set(0, &parport_ctrl_irq);
176 i2c_unregister_device(ara);
177 ara = NULL;
178 }
142 i2c_del_adapter(&parport_adapter); 179 i2c_del_adapter(&parport_adapter);
143 180
144 /* Un-init if needed (power off...) */ 181 /* Un-init if needed (power off...) */
@@ -205,6 +242,9 @@ static int __init i2c_parport_init(void)
205 if (!request_region(base, 3, DRVNAME)) 242 if (!request_region(base, 3, DRVNAME))
206 return -EBUSY; 243 return -EBUSY;
207 244
245 if (irq != 0)
246 pr_info(DRVNAME ": using irq %d\n", irq);
247
208 if (!adapter_parm[type].getscl.val) 248 if (!adapter_parm[type].getscl.val)
209 parport_algo_data.getscl = NULL; 249 parport_algo_data.getscl = NULL;
210 250
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 0d8998610c74..220fca7f23a6 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -1,7 +1,7 @@
1/* ------------------------------------------------------------------------ * 1/* ------------------------------------------------------------------------ *
2 * i2c-parport.c I2C bus over parallel port * 2 * i2c-parport.c I2C bus over parallel port *
3 * ------------------------------------------------------------------------ * 3 * ------------------------------------------------------------------------ *
4 Copyright (C) 2003-2007 Jean Delvare <khali@linux-fr.org> 4 Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
5 5
6 Based on older i2c-philips-par.c driver 6 Based on older i2c-philips-par.c driver
7 Copyright (C) 1995-2000 Simon G. Vogl 7 Copyright (C) 1995-2000 Simon G. Vogl
@@ -27,9 +27,11 @@
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/delay.h>
30#include <linux/parport.h> 31#include <linux/parport.h>
31#include <linux/i2c.h> 32#include <linux/i2c.h>
32#include <linux/i2c-algo-bit.h> 33#include <linux/i2c-algo-bit.h>
34#include <linux/i2c-smbus.h>
33#include "i2c-parport.h" 35#include "i2c-parport.h"
34 36
35/* ----- Device list ------------------------------------------------------ */ 37/* ----- Device list ------------------------------------------------------ */
@@ -38,6 +40,8 @@ struct i2c_par {
38 struct pardevice *pdev; 40 struct pardevice *pdev;
39 struct i2c_adapter adapter; 41 struct i2c_adapter adapter;
40 struct i2c_algo_bit_data algo_data; 42 struct i2c_algo_bit_data algo_data;
43 struct i2c_smbus_alert_setup alert_data;
44 struct i2c_client *ara;
41 struct i2c_par *next; 45 struct i2c_par *next;
42}; 46};
43 47
@@ -143,6 +147,19 @@ static struct i2c_algo_bit_data parport_algo_data = {
143 147
144/* ----- I2c and parallel port call-back functions and structures --------- */ 148/* ----- I2c and parallel port call-back functions and structures --------- */
145 149
150void i2c_parport_irq(void *data)
151{
152 struct i2c_par *adapter = data;
153 struct i2c_client *ara = adapter->ara;
154
155 if (ara) {
156 dev_dbg(&ara->dev, "SMBus alert received\n");
157 i2c_handle_smbus_alert(ara);
158 } else
159 dev_dbg(&adapter->adapter.dev,
160 "SMBus alert received but no ARA client!\n");
161}
162
146static void i2c_parport_attach (struct parport *port) 163static void i2c_parport_attach (struct parport *port)
147{ 164{
148 struct i2c_par *adapter; 165 struct i2c_par *adapter;
@@ -154,8 +171,9 @@ static void i2c_parport_attach (struct parport *port)
154 } 171 }
155 172
156 pr_debug("i2c-parport: attaching to %s\n", port->name); 173 pr_debug("i2c-parport: attaching to %s\n", port->name);
174 parport_disable_irq(port);
157 adapter->pdev = parport_register_device(port, "i2c-parport", 175 adapter->pdev = parport_register_device(port, "i2c-parport",
158 NULL, NULL, NULL, PARPORT_FLAG_EXCL, NULL); 176 NULL, NULL, i2c_parport_irq, PARPORT_FLAG_EXCL, adapter);
159 if (!adapter->pdev) { 177 if (!adapter->pdev) {
160 printk(KERN_ERR "i2c-parport: Unable to register with parport\n"); 178 printk(KERN_ERR "i2c-parport: Unable to register with parport\n");
161 goto ERROR0; 179 goto ERROR0;
@@ -185,14 +203,29 @@ static void i2c_parport_attach (struct parport *port)
185 parport_setsda(port, 1); 203 parport_setsda(port, 1);
186 parport_setscl(port, 1); 204 parport_setscl(port, 1);
187 /* Other init if needed (power on...) */ 205 /* Other init if needed (power on...) */
188 if (adapter_parm[type].init.val) 206 if (adapter_parm[type].init.val) {
189 line_set(port, 1, &adapter_parm[type].init); 207 line_set(port, 1, &adapter_parm[type].init);
208 /* Give powered devices some time to settle */
209 msleep(100);
210 }
190 211
191 if (i2c_bit_add_bus(&adapter->adapter) < 0) { 212 if (i2c_bit_add_bus(&adapter->adapter) < 0) {
192 printk(KERN_ERR "i2c-parport: Unable to register with I2C\n"); 213 printk(KERN_ERR "i2c-parport: Unable to register with I2C\n");
193 goto ERROR1; 214 goto ERROR1;
194 } 215 }
195 216
217 /* Setup SMBus alert if supported */
218 if (adapter_parm[type].smbus_alert) {
219 adapter->alert_data.alert_edge_triggered = 1;
220 adapter->ara = i2c_setup_smbus_alert(&adapter->adapter,
221 &adapter->alert_data);
222 if (adapter->ara)
223 parport_enable_irq(port);
224 else
225 printk(KERN_WARNING "i2c-parport: Failed to register "
226 "ARA client\n");
227 }
228
196 /* Add the new adapter to the list */ 229 /* Add the new adapter to the list */
197 adapter->next = adapter_list; 230 adapter->next = adapter_list;
198 adapter_list = adapter; 231 adapter_list = adapter;
@@ -213,6 +246,10 @@ static void i2c_parport_detach (struct parport *port)
213 for (prev = NULL, adapter = adapter_list; adapter; 246 for (prev = NULL, adapter = adapter_list; adapter;
214 prev = adapter, adapter = adapter->next) { 247 prev = adapter, adapter = adapter->next) {
215 if (adapter->pdev->port == port) { 248 if (adapter->pdev->port == port) {
249 if (adapter->ara) {
250 parport_disable_irq(port);
251 i2c_unregister_device(adapter->ara);
252 }
216 i2c_del_adapter(&adapter->adapter); 253 i2c_del_adapter(&adapter->adapter);
217 254
218 /* Un-init if needed (power off...) */ 255 /* Un-init if needed (power off...) */
diff --git a/drivers/i2c/busses/i2c-parport.h b/drivers/i2c/busses/i2c-parport.h
index ed69d846cb95..a9f66816546c 100644
--- a/drivers/i2c/busses/i2c-parport.h
+++ b/drivers/i2c/busses/i2c-parport.h
@@ -1,7 +1,7 @@
1/* ------------------------------------------------------------------------ * 1/* ------------------------------------------------------------------------ *
2 * i2c-parport.h I2C bus over parallel port * 2 * i2c-parport.h I2C bus over parallel port *
3 * ------------------------------------------------------------------------ * 3 * ------------------------------------------------------------------------ *
4 Copyright (C) 2003-2004 Jean Delvare <khali@linux-fr.org> 4 Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
5 5
6 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by 7 it under the terms of the GNU General Public License as published by
@@ -38,6 +38,7 @@ struct adapter_parm {
38 struct lineop getsda; 38 struct lineop getsda;
39 struct lineop getscl; 39 struct lineop getscl;
40 struct lineop init; 40 struct lineop init;
41 unsigned int smbus_alert:1;
41}; 42};
42 43
43static struct adapter_parm adapter_parm[] = { 44static struct adapter_parm adapter_parm[] = {
@@ -73,6 +74,7 @@ static struct adapter_parm adapter_parm[] = {
73 .setscl = { 0x01, DATA, 1 }, 74 .setscl = { 0x01, DATA, 1 },
74 .getsda = { 0x10, STAT, 1 }, 75 .getsda = { 0x10, STAT, 1 },
75 .init = { 0xf0, DATA, 0 }, 76 .init = { 0xf0, DATA, 0 },
77 .smbus_alert = 1,
76 }, 78 },
77 /* type 5: ADM1025, ADM1030 and ADM1031 evaluation boards */ 79 /* type 5: ADM1025, ADM1030 and ADM1031 evaluation boards */
78 { 80 {
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index adf0fbb902f0..0d20ff46a518 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -400,7 +400,7 @@ static void __devexit pasemi_smb_remove(struct pci_dev *dev)
400 kfree(smbus); 400 kfree(smbus);
401} 401}
402 402
403static struct pci_device_id pasemi_smb_ids[] = { 403static const struct pci_device_id pasemi_smb_ids[] = {
404 { PCI_DEVICE(0x1959, 0xa003) }, 404 { PCI_DEVICE(0x1959, 0xa003) },
405 { 0, } 405 { 0, }
406}; 406};
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index e56e4b6823ca..ee9da6fcf69a 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -472,7 +472,7 @@ static struct i2c_adapter piix4_adapter = {
472 .algo = &smbus_algorithm, 472 .algo = &smbus_algorithm,
473}; 473};
474 474
475static struct pci_device_id piix4_ids[] = { 475static const struct pci_device_id piix4_ids[] = {
476 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) }, 476 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) },
477 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) }, 477 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) },
478 { PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3) }, 478 { PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3) },
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 844569f7d8b7..55a71370c79b 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -369,7 +369,7 @@ static struct i2c_adapter sis5595_adapter = {
369 .algo = &smbus_algorithm, 369 .algo = &smbus_algorithm,
370}; 370};
371 371
372static struct pci_device_id sis5595_ids[] __devinitdata = { 372static const struct pci_device_id sis5595_ids[] __devinitconst = {
373 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, 373 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
374 { 0, } 374 { 0, }
375}; 375};
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 68cff7af7013..2309c7f1bde2 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -468,7 +468,7 @@ static struct i2c_adapter sis630_adapter = {
468 .algo = &smbus_algorithm, 468 .algo = &smbus_algorithm,
469}; 469};
470 470
471static struct pci_device_id sis630_ids[] __devinitdata = { 471static const struct pci_device_id sis630_ids[] __devinitconst = {
472 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, 472 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
473 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) }, 473 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) },
474 { 0, } 474 { 0, }
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index 1649963b00dc..d43d8f8943dd 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -245,7 +245,7 @@ static struct i2c_adapter sis96x_adapter = {
245 .algo = &smbus_algorithm, 245 .algo = &smbus_algorithm,
246}; 246};
247 247
248static struct pci_device_id sis96x_ids[] = { 248static const struct pci_device_id sis96x_ids[] = {
249 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) }, 249 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) },
250 { 0, } 250 { 0, }
251}; 251};
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index e29b6d5ba8ef..b5b1bbf37d3c 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -31,11 +31,13 @@
31#define CMD_I2C_IO_BEGIN (1<<0) 31#define CMD_I2C_IO_BEGIN (1<<0)
32#define CMD_I2C_IO_END (1<<1) 32#define CMD_I2C_IO_END (1<<1)
33 33
34/* i2c bit delay, default is 10us -> 100kHz */ 34/* i2c bit delay, default is 10us -> 100kHz max
35 (in practice, due to additional delays in the i2c bitbanging
36 code this results in a i2c clock of about 50kHz) */
35static unsigned short delay = 10; 37static unsigned short delay = 10;
36module_param(delay, ushort, 0); 38module_param(delay, ushort, 0);
37MODULE_PARM_DESC(delay, "bit delay in microseconds, " 39MODULE_PARM_DESC(delay, "bit delay in microseconds "
38 "e.g. 10 for 100kHz (default is 100kHz)"); 40 "(default is 10us for 100kHz max)");
39 41
40static int usb_read(struct i2c_adapter *adapter, int cmd, 42static int usb_read(struct i2c_adapter *adapter, int cmd,
41 int value, int index, void *data, int len); 43 int value, int index, void *data, int len);
@@ -137,7 +139,7 @@ static const struct i2c_algorithm usb_algorithm = {
137 * Future Technology Devices International Ltd., later a pair was 139 * Future Technology Devices International Ltd., later a pair was
138 * bought from EZPrototypes 140 * bought from EZPrototypes
139 */ 141 */
140static struct usb_device_id i2c_tiny_usb_table [] = { 142static const struct usb_device_id i2c_tiny_usb_table[] = {
141 { USB_DEVICE(0x0403, 0xc631) }, /* FTDI */ 143 { USB_DEVICE(0x0403, 0xc631) }, /* FTDI */
142 { USB_DEVICE(0x1c40, 0x0534) }, /* EZPrototypes */ 144 { USB_DEVICE(0x1c40, 0x0534) }, /* EZPrototypes */
143 { } /* Terminating entry */ 145 { } /* Terminating entry */
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 8b24f192103a..de78283bddbe 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -89,7 +89,7 @@ static struct i2c_adapter vt586b_adapter = {
89}; 89};
90 90
91 91
92static struct pci_device_id vt586b_ids[] __devinitdata = { 92static const struct pci_device_id vt586b_ids[] __devinitconst = {
93 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3) }, 93 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3) },
94 { 0, } 94 { 0, }
95}; 95};
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index a84a909e1234..d57292e5dae0 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -444,7 +444,7 @@ release_region:
444 return error; 444 return error;
445} 445}
446 446
447static struct pci_device_id vt596_ids[] = { 447static const struct pci_device_id vt596_ids[] = {
448 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596_3), 448 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596_3),
449 .driver_data = SMBBA1 }, 449 .driver_data = SMBBA1 },
450 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596B_3), 450 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596B_3),
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 10be7b5fbe97..3202a86f420e 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -34,6 +34,7 @@
34#include <linux/hardirq.h> 34#include <linux/hardirq.h>
35#include <linux/irqflags.h> 35#include <linux/irqflags.h>
36#include <linux/rwsem.h> 36#include <linux/rwsem.h>
37#include <linux/pm_runtime.h>
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
38 39
39#include "i2c-core.h" 40#include "i2c-core.h"
@@ -184,6 +185,52 @@ static int i2c_device_pm_resume(struct device *dev)
184#define i2c_device_pm_resume NULL 185#define i2c_device_pm_resume NULL
185#endif 186#endif
186 187
188#ifdef CONFIG_PM_RUNTIME
189static int i2c_device_runtime_suspend(struct device *dev)
190{
191 const struct dev_pm_ops *pm;
192
193 if (!dev->driver)
194 return 0;
195 pm = dev->driver->pm;
196 if (!pm || !pm->runtime_suspend)
197 return 0;
198 return pm->runtime_suspend(dev);
199}
200
201static int i2c_device_runtime_resume(struct device *dev)
202{
203 const struct dev_pm_ops *pm;
204
205 if (!dev->driver)
206 return 0;
207 pm = dev->driver->pm;
208 if (!pm || !pm->runtime_resume)
209 return 0;
210 return pm->runtime_resume(dev);
211}
212
213static int i2c_device_runtime_idle(struct device *dev)
214{
215 const struct dev_pm_ops *pm = NULL;
216 int ret;
217
218 if (dev->driver)
219 pm = dev->driver->pm;
220 if (pm && pm->runtime_idle) {
221 ret = pm->runtime_idle(dev);
222 if (ret)
223 return ret;
224 }
225
226 return pm_runtime_suspend(dev);
227}
228#else
229#define i2c_device_runtime_suspend NULL
230#define i2c_device_runtime_resume NULL
231#define i2c_device_runtime_idle NULL
232#endif
233
187static int i2c_device_suspend(struct device *dev, pm_message_t mesg) 234static int i2c_device_suspend(struct device *dev, pm_message_t mesg)
188{ 235{
189 struct i2c_client *client = i2c_verify_client(dev); 236 struct i2c_client *client = i2c_verify_client(dev);
@@ -251,6 +298,9 @@ static const struct attribute_group *i2c_dev_attr_groups[] = {
251static const struct dev_pm_ops i2c_device_pm_ops = { 298static const struct dev_pm_ops i2c_device_pm_ops = {
252 .suspend = i2c_device_pm_suspend, 299 .suspend = i2c_device_pm_suspend,
253 .resume = i2c_device_pm_resume, 300 .resume = i2c_device_pm_resume,
301 .runtime_suspend = i2c_device_runtime_suspend,
302 .runtime_resume = i2c_device_runtime_resume,
303 .runtime_idle = i2c_device_runtime_idle,
254}; 304};
255 305
256struct bus_type i2c_bus_type = { 306struct bus_type i2c_bus_type = {
@@ -1133,7 +1183,7 @@ EXPORT_SYMBOL(i2c_transfer);
1133 * i2c_master_send - issue a single I2C message in master transmit mode 1183 * i2c_master_send - issue a single I2C message in master transmit mode
1134 * @client: Handle to slave device 1184 * @client: Handle to slave device
1135 * @buf: Data that will be written to the slave 1185 * @buf: Data that will be written to the slave
1136 * @count: How many bytes to write 1186 * @count: How many bytes to write, must be less than 64k since msg.len is u16
1137 * 1187 *
1138 * Returns negative errno, or else the number of bytes written. 1188 * Returns negative errno, or else the number of bytes written.
1139 */ 1189 */
@@ -1160,7 +1210,7 @@ EXPORT_SYMBOL(i2c_master_send);
1160 * i2c_master_recv - issue a single I2C message in master receive mode 1210 * i2c_master_recv - issue a single I2C message in master receive mode
1161 * @client: Handle to slave device 1211 * @client: Handle to slave device
1162 * @buf: Where to store data read from slave 1212 * @buf: Where to store data read from slave
1163 * @count: How many bytes to read 1213 * @count: How many bytes to read, must be less than 64k since msg.len is u16
1164 * 1214 *
1165 * Returns negative errno, or else the number of bytes read. 1215 * Returns negative errno, or else the number of bytes read.
1166 */ 1216 */
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
new file mode 100644
index 000000000000..421278221243
--- /dev/null
+++ b/drivers/i2c/i2c-smbus.c
@@ -0,0 +1,263 @@
1/*
2 * i2c-smbus.c - SMBus extensions to the I2C protocol
3 *
4 * Copyright (C) 2008 David Brownell
5 * Copyright (C) 2010 Jean Delvare <khali@linux-fr.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/device.h>
25#include <linux/semaphore.h>
26#include <linux/interrupt.h>
27#include <linux/workqueue.h>
28#include <linux/i2c.h>
29#include <linux/i2c-smbus.h>
30
31struct i2c_smbus_alert {
32 unsigned int alert_edge_triggered:1;
33 int irq;
34 struct work_struct alert;
35 struct i2c_client *ara; /* Alert response address */
36};
37
38struct alert_data {
39 unsigned short addr;
40 u8 flag:1;
41};
42
43/* If this is the alerting device, notify its driver */
44static int smbus_do_alert(struct device *dev, void *addrp)
45{
46 struct i2c_client *client = i2c_verify_client(dev);
47 struct alert_data *data = addrp;
48
49 if (!client || client->addr != data->addr)
50 return 0;
51 if (client->flags & I2C_CLIENT_TEN)
52 return 0;
53
54 /*
55 * Drivers should either disable alerts, or provide at least
56 * a minimal handler. Lock so client->driver won't change.
57 */
58 down(&dev->sem);
59 if (client->driver) {
60 if (client->driver->alert)
61 client->driver->alert(client, data->flag);
62 else
63 dev_warn(&client->dev, "no driver alert()!\n");
64 } else
65 dev_dbg(&client->dev, "alert with no driver\n");
66 up(&dev->sem);
67
68 /* Stop iterating after we find the device */
69 return -EBUSY;
70}
71
72/*
73 * The alert IRQ handler needs to hand work off to a task which can issue
74 * SMBus calls, because those sleeping calls can't be made in IRQ context.
75 */
76static void smbus_alert(struct work_struct *work)
77{
78 struct i2c_smbus_alert *alert;
79 struct i2c_client *ara;
80 unsigned short prev_addr = 0; /* Not a valid address */
81
82 alert = container_of(work, struct i2c_smbus_alert, alert);
83 ara = alert->ara;
84
85 for (;;) {
86 s32 status;
87 struct alert_data data;
88
89 /*
90 * Devices with pending alerts reply in address order, low
91 * to high, because of slave transmit arbitration. After
92 * responding, an SMBus device stops asserting SMBALERT#.
93 *
94 * Note that SMBus 2.0 reserves 10-bit addresess for future
95 * use. We neither handle them, nor try to use PEC here.
96 */
97 status = i2c_smbus_read_byte(ara);
98 if (status < 0)
99 break;
100
101 data.flag = status & 1;
102 data.addr = status >> 1;
103
104 if (data.addr == prev_addr) {
105 dev_warn(&ara->dev, "Duplicate SMBALERT# from dev "
106 "0x%02x, skipping\n", data.addr);
107 break;
108 }
109 dev_dbg(&ara->dev, "SMBALERT# from dev 0x%02x, flag %d\n",
110 data.addr, data.flag);
111
112 /* Notify driver for the device which issued the alert */
113 device_for_each_child(&ara->adapter->dev, &data,
114 smbus_do_alert);
115 prev_addr = data.addr;
116 }
117
118 /* We handled all alerts; re-enable level-triggered IRQs */
119 if (!alert->alert_edge_triggered)
120 enable_irq(alert->irq);
121}
122
123static irqreturn_t smbalert_irq(int irq, void *d)
124{
125 struct i2c_smbus_alert *alert = d;
126
127 /* Disable level-triggered IRQs until we handle them */
128 if (!alert->alert_edge_triggered)
129 disable_irq_nosync(irq);
130
131 schedule_work(&alert->alert);
132 return IRQ_HANDLED;
133}
134
135/* Setup SMBALERT# infrastructure */
136static int smbalert_probe(struct i2c_client *ara,
137 const struct i2c_device_id *id)
138{
139 struct i2c_smbus_alert_setup *setup = ara->dev.platform_data;
140 struct i2c_smbus_alert *alert;
141 struct i2c_adapter *adapter = ara->adapter;
142 int res;
143
144 alert = kzalloc(sizeof(struct i2c_smbus_alert), GFP_KERNEL);
145 if (!alert)
146 return -ENOMEM;
147
148 alert->alert_edge_triggered = setup->alert_edge_triggered;
149 alert->irq = setup->irq;
150 INIT_WORK(&alert->alert, smbus_alert);
151 alert->ara = ara;
152
153 if (setup->irq > 0) {
154 res = devm_request_irq(&ara->dev, setup->irq, smbalert_irq,
155 0, "smbus_alert", alert);
156 if (res) {
157 kfree(alert);
158 return res;
159 }
160 }
161
162 i2c_set_clientdata(ara, alert);
163 dev_info(&adapter->dev, "supports SMBALERT#, %s trigger\n",
164 setup->alert_edge_triggered ? "edge" : "level");
165
166 return 0;
167}
168
169/* IRQ resource is managed so it is freed automatically */
170static int smbalert_remove(struct i2c_client *ara)
171{
172 struct i2c_smbus_alert *alert = i2c_get_clientdata(ara);
173
174 cancel_work_sync(&alert->alert);
175
176 i2c_set_clientdata(ara, NULL);
177 kfree(alert);
178 return 0;
179}
180
181static const struct i2c_device_id smbalert_ids[] = {
182 { "smbus_alert", 0 },
183 { /* LIST END */ }
184};
185MODULE_DEVICE_TABLE(i2c, smbalert_ids);
186
187static struct i2c_driver smbalert_driver = {
188 .driver = {
189 .name = "smbus_alert",
190 },
191 .probe = smbalert_probe,
192 .remove = smbalert_remove,
193 .id_table = smbalert_ids,
194};
195
196/**
197 * i2c_setup_smbus_alert - Setup SMBus alert support
198 * @adapter: the target adapter
199 * @setup: setup data for the SMBus alert handler
200 * Context: can sleep
201 *
202 * Setup handling of the SMBus alert protocol on a given I2C bus segment.
203 *
204 * Handling can be done either through our IRQ handler, or by the
205 * adapter (from its handler, periodic polling, or whatever).
206 *
207 * NOTE that if we manage the IRQ, we *MUST* know if it's level or
208 * edge triggered in order to hand it to the workqueue correctly.
209 * If triggering the alert seems to wedge the system, you probably
210 * should have said it's level triggered.
211 *
212 * This returns the ara client, which should be saved for later use with
213 * i2c_handle_smbus_alert() and ultimately i2c_unregister_device(); or NULL
214 * to indicate an error.
215 */
216struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
217 struct i2c_smbus_alert_setup *setup)
218{
219 struct i2c_board_info ara_board_info = {
220 I2C_BOARD_INFO("smbus_alert", 0x0c),
221 .platform_data = setup,
222 };
223
224 return i2c_new_device(adapter, &ara_board_info);
225}
226EXPORT_SYMBOL_GPL(i2c_setup_smbus_alert);
227
228/**
229 * i2c_handle_smbus_alert - Handle an SMBus alert
230 * @ara: the ARA client on the relevant adapter
231 * Context: can't sleep
232 *
233 * Helper function to be called from an I2C bus driver's interrupt
234 * handler. It will schedule the alert work, in turn calling the
235 * corresponding I2C device driver's alert function.
236 *
237 * It is assumed that ara is a valid i2c client previously returned by
238 * i2c_setup_smbus_alert().
239 */
240int i2c_handle_smbus_alert(struct i2c_client *ara)
241{
242 struct i2c_smbus_alert *alert = i2c_get_clientdata(ara);
243
244 return schedule_work(&alert->alert);
245}
246EXPORT_SYMBOL_GPL(i2c_handle_smbus_alert);
247
248static int __init i2c_smbus_init(void)
249{
250 return i2c_add_driver(&smbalert_driver);
251}
252
253static void __exit i2c_smbus_exit(void)
254{
255 i2c_del_driver(&smbalert_driver);
256}
257
258module_init(i2c_smbus_init);
259module_exit(i2c_smbus_exit);
260
261MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
262MODULE_DESCRIPTION("SMBus protocol extensions support");
263MODULE_LICENSE("GPL");
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index dd0db67bf8d7..975adce5f40c 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -20,6 +20,7 @@ config INFINIBAND_USER_MAD
20 20
21config INFINIBAND_USER_ACCESS 21config INFINIBAND_USER_ACCESS
22 tristate "InfiniBand userspace access (verbs and CM)" 22 tristate "InfiniBand userspace access (verbs and CM)"
23 select ANON_INODES
23 ---help--- 24 ---help---
24 Userspace InfiniBand access support. This enables the 25 Userspace InfiniBand access support. This enables the
25 kernel side of userspace verbs and the userspace 26 kernel side of userspace verbs and the userspace
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index f504c9b00c1b..1b09b735c5a8 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1215,15 +1215,18 @@ static void ib_ucm_release_dev(struct device *dev)
1215 1215
1216 ucm_dev = container_of(dev, struct ib_ucm_device, dev); 1216 ucm_dev = container_of(dev, struct ib_ucm_device, dev);
1217 cdev_del(&ucm_dev->cdev); 1217 cdev_del(&ucm_dev->cdev);
1218 clear_bit(ucm_dev->devnum, dev_map); 1218 if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
1219 clear_bit(ucm_dev->devnum, dev_map);
1220 else
1221 clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map);
1219 kfree(ucm_dev); 1222 kfree(ucm_dev);
1220} 1223}
1221 1224
1222static const struct file_operations ucm_fops = { 1225static const struct file_operations ucm_fops = {
1223 .owner = THIS_MODULE, 1226 .owner = THIS_MODULE,
1224 .open = ib_ucm_open, 1227 .open = ib_ucm_open,
1225 .release = ib_ucm_close, 1228 .release = ib_ucm_close,
1226 .write = ib_ucm_write, 1229 .write = ib_ucm_write,
1227 .poll = ib_ucm_poll, 1230 .poll = ib_ucm_poll,
1228}; 1231};
1229 1232
@@ -1237,8 +1240,32 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
1237} 1240}
1238static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 1241static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
1239 1242
1243static dev_t overflow_maj;
1244static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
1245static int find_overflow_devnum(void)
1246{
1247 int ret;
1248
1249 if (!overflow_maj) {
1250 ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES,
1251 "infiniband_cm");
1252 if (ret) {
1253 printk(KERN_ERR "ucm: couldn't register dynamic device number\n");
1254 return ret;
1255 }
1256 }
1257
1258 ret = find_first_zero_bit(overflow_map, IB_UCM_MAX_DEVICES);
1259 if (ret >= IB_UCM_MAX_DEVICES)
1260 return -1;
1261
1262 return ret;
1263}
1264
1240static void ib_ucm_add_one(struct ib_device *device) 1265static void ib_ucm_add_one(struct ib_device *device)
1241{ 1266{
1267 int devnum;
1268 dev_t base;
1242 struct ib_ucm_device *ucm_dev; 1269 struct ib_ucm_device *ucm_dev;
1243 1270
1244 if (!device->alloc_ucontext || 1271 if (!device->alloc_ucontext ||
@@ -1251,16 +1278,25 @@ static void ib_ucm_add_one(struct ib_device *device)
1251 1278
1252 ucm_dev->ib_dev = device; 1279 ucm_dev->ib_dev = device;
1253 1280
1254 ucm_dev->devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES); 1281 devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
1255 if (ucm_dev->devnum >= IB_UCM_MAX_DEVICES) 1282 if (devnum >= IB_UCM_MAX_DEVICES) {
1256 goto err; 1283 devnum = find_overflow_devnum();
1257 1284 if (devnum < 0)
1258 set_bit(ucm_dev->devnum, dev_map); 1285 goto err;
1286
1287 ucm_dev->devnum = devnum + IB_UCM_MAX_DEVICES;
1288 base = devnum + overflow_maj;
1289 set_bit(devnum, overflow_map);
1290 } else {
1291 ucm_dev->devnum = devnum;
1292 base = devnum + IB_UCM_BASE_DEV;
1293 set_bit(devnum, dev_map);
1294 }
1259 1295
1260 cdev_init(&ucm_dev->cdev, &ucm_fops); 1296 cdev_init(&ucm_dev->cdev, &ucm_fops);
1261 ucm_dev->cdev.owner = THIS_MODULE; 1297 ucm_dev->cdev.owner = THIS_MODULE;
1262 kobject_set_name(&ucm_dev->cdev.kobj, "ucm%d", ucm_dev->devnum); 1298 kobject_set_name(&ucm_dev->cdev.kobj, "ucm%d", ucm_dev->devnum);
1263 if (cdev_add(&ucm_dev->cdev, IB_UCM_BASE_DEV + ucm_dev->devnum, 1)) 1299 if (cdev_add(&ucm_dev->cdev, base, 1))
1264 goto err; 1300 goto err;
1265 1301
1266 ucm_dev->dev.class = &cm_class; 1302 ucm_dev->dev.class = &cm_class;
@@ -1281,7 +1317,10 @@ err_dev:
1281 device_unregister(&ucm_dev->dev); 1317 device_unregister(&ucm_dev->dev);
1282err_cdev: 1318err_cdev:
1283 cdev_del(&ucm_dev->cdev); 1319 cdev_del(&ucm_dev->cdev);
1284 clear_bit(ucm_dev->devnum, dev_map); 1320 if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
1321 clear_bit(devnum, dev_map);
1322 else
1323 clear_bit(devnum, overflow_map);
1285err: 1324err:
1286 kfree(ucm_dev); 1325 kfree(ucm_dev);
1287 return; 1326 return;
@@ -1340,6 +1379,8 @@ static void __exit ib_ucm_cleanup(void)
1340 ib_unregister_client(&ucm_client); 1379 ib_unregister_client(&ucm_client);
1341 class_remove_file(&cm_class, &class_attr_abi_version); 1380 class_remove_file(&cm_class, &class_attr_abi_version);
1342 unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES); 1381 unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
1382 if (overflow_maj)
1383 unregister_chrdev_region(overflow_maj, IB_UCM_MAX_DEVICES);
1343 idr_destroy(&ctx_id_table); 1384 idr_destroy(&ctx_id_table);
1344} 1385}
1345 1386
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 8ec7876bedcf..650b501eb142 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -181,6 +181,7 @@ static const struct ib_field deth_table[] = {
181 * ib_ud_header_init - Initialize UD header structure 181 * ib_ud_header_init - Initialize UD header structure
182 * @payload_bytes:Length of packet payload 182 * @payload_bytes:Length of packet payload
183 * @grh_present:GRH flag (if non-zero, GRH will be included) 183 * @grh_present:GRH flag (if non-zero, GRH will be included)
184 * @immediate_present: specify if immediate data should be used
184 * @header:Structure to initialize 185 * @header:Structure to initialize
185 * 186 *
186 * ib_ud_header_init() initializes the lrh.link_version, lrh.link_next_header, 187 * ib_ud_header_init() initializes the lrh.link_version, lrh.link_next_header,
@@ -191,21 +192,13 @@ static const struct ib_field deth_table[] = {
191 */ 192 */
192void ib_ud_header_init(int payload_bytes, 193void ib_ud_header_init(int payload_bytes,
193 int grh_present, 194 int grh_present,
195 int immediate_present,
194 struct ib_ud_header *header) 196 struct ib_ud_header *header)
195{ 197{
196 int header_len;
197 u16 packet_length; 198 u16 packet_length;
198 199
199 memset(header, 0, sizeof *header); 200 memset(header, 0, sizeof *header);
200 201
201 header_len =
202 IB_LRH_BYTES +
203 IB_BTH_BYTES +
204 IB_DETH_BYTES;
205 if (grh_present) {
206 header_len += IB_GRH_BYTES;
207 }
208
209 header->lrh.link_version = 0; 202 header->lrh.link_version = 0;
210 header->lrh.link_next_header = 203 header->lrh.link_next_header =
211 grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL; 204 grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL;
@@ -231,7 +224,8 @@ void ib_ud_header_init(int payload_bytes,
231 224
232 header->lrh.packet_length = cpu_to_be16(packet_length); 225 header->lrh.packet_length = cpu_to_be16(packet_length);
233 226
234 if (header->immediate_present) 227 header->immediate_present = immediate_present;
228 if (immediate_present)
235 header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 229 header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
236 else 230 else
237 header->bth.opcode = IB_OPCODE_UD_SEND_ONLY; 231 header->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 6f7c096abf13..4f906f0614f0 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -136,7 +136,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
136 down_write(&current->mm->mmap_sem); 136 down_write(&current->mm->mmap_sem);
137 137
138 locked = npages + current->mm->locked_vm; 138 locked = npages + current->mm->locked_vm;
139 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; 139 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
140 140
141 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { 141 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
142 ret = -ENOMEM; 142 ret = -ENOMEM;
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 7de02969ed7d..02d360cfc2f7 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -65,12 +65,9 @@ enum {
65}; 65};
66 66
67/* 67/*
68 * Our lifetime rules for these structs are the following: each time a 68 * Our lifetime rules for these structs are the following:
69 * device special file is opened, we look up the corresponding struct 69 * device special file is opened, we take a reference on the
70 * ib_umad_port by minor in the umad_port[] table while holding the 70 * ib_umad_port's struct ib_umad_device. We drop these
71 * port_lock. If this lookup succeeds, we take a reference on the
72 * ib_umad_port's struct ib_umad_device while still holding the
73 * port_lock; if the lookup fails, we fail the open(). We drop these
74 * references in the corresponding close(). 71 * references in the corresponding close().
75 * 72 *
76 * In addition to references coming from open character devices, there 73 * In addition to references coming from open character devices, there
@@ -78,19 +75,14 @@ enum {
78 * module's reference taken when allocating the ib_umad_device in 75 * module's reference taken when allocating the ib_umad_device in
79 * ib_umad_add_one(). 76 * ib_umad_add_one().
80 * 77 *
81 * When destroying an ib_umad_device, we clear all of its 78 * When destroying an ib_umad_device, we drop the module's reference.
82 * ib_umad_ports from umad_port[] while holding port_lock before
83 * dropping the module's reference to the ib_umad_device. This is
84 * always safe because any open() calls will either succeed and obtain
85 * a reference before we clear the umad_port[] entries, or fail after
86 * we clear the umad_port[] entries.
87 */ 79 */
88 80
89struct ib_umad_port { 81struct ib_umad_port {
90 struct cdev *cdev; 82 struct cdev cdev;
91 struct device *dev; 83 struct device *dev;
92 84
93 struct cdev *sm_cdev; 85 struct cdev sm_cdev;
94 struct device *sm_dev; 86 struct device *sm_dev;
95 struct semaphore sm_sem; 87 struct semaphore sm_sem;
96 88
@@ -136,7 +128,6 @@ static struct class *umad_class;
136static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE); 128static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
137 129
138static DEFINE_SPINLOCK(port_lock); 130static DEFINE_SPINLOCK(port_lock);
139static struct ib_umad_port *umad_port[IB_UMAD_MAX_PORTS];
140static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS); 131static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
141 132
142static void ib_umad_add_one(struct ib_device *device); 133static void ib_umad_add_one(struct ib_device *device);
@@ -496,8 +487,8 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
496 ah_attr.ah_flags = IB_AH_GRH; 487 ah_attr.ah_flags = IB_AH_GRH;
497 memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); 488 memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
498 ah_attr.grh.sgid_index = packet->mad.hdr.gid_index; 489 ah_attr.grh.sgid_index = packet->mad.hdr.gid_index;
499 ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label); 490 ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label);
500 ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; 491 ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit;
501 ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; 492 ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class;
502 } 493 }
503 494
@@ -528,9 +519,9 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
528 goto err_ah; 519 goto err_ah;
529 } 520 }
530 521
531 packet->msg->ah = ah; 522 packet->msg->ah = ah;
532 packet->msg->timeout_ms = packet->mad.hdr.timeout_ms; 523 packet->msg->timeout_ms = packet->mad.hdr.timeout_ms;
533 packet->msg->retries = packet->mad.hdr.retries; 524 packet->msg->retries = packet->mad.hdr.retries;
534 packet->msg->context[0] = packet; 525 packet->msg->context[0] = packet;
535 526
536 /* Copy MAD header. Any RMPP header is already in place. */ 527 /* Copy MAD header. Any RMPP header is already in place. */
@@ -779,15 +770,11 @@ static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd,
779/* 770/*
780 * ib_umad_open() does not need the BKL: 771 * ib_umad_open() does not need the BKL:
781 * 772 *
782 * - umad_port[] accesses are protected by port_lock, the 773 * - the ib_umad_port structures are properly reference counted, and
783 * ib_umad_port structures are properly reference counted, and
784 * everything else is purely local to the file being created, so 774 * everything else is purely local to the file being created, so
785 * races against other open calls are not a problem; 775 * races against other open calls are not a problem;
786 * - the ioctl method does not affect any global state outside of the 776 * - the ioctl method does not affect any global state outside of the
787 * file structure being operated on; 777 * file structure being operated on;
788 * - the port is added to umad_port[] as the last part of module
789 * initialization so the open method will either immediately run
790 * -ENXIO, or all required initialization will be done.
791 */ 778 */
792static int ib_umad_open(struct inode *inode, struct file *filp) 779static int ib_umad_open(struct inode *inode, struct file *filp)
793{ 780{
@@ -795,13 +782,10 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
795 struct ib_umad_file *file; 782 struct ib_umad_file *file;
796 int ret = 0; 783 int ret = 0;
797 784
798 spin_lock(&port_lock); 785 port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
799 port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE];
800 if (port) 786 if (port)
801 kref_get(&port->umad_dev->ref); 787 kref_get(&port->umad_dev->ref);
802 spin_unlock(&port_lock); 788 else
803
804 if (!port)
805 return -ENXIO; 789 return -ENXIO;
806 790
807 mutex_lock(&port->file_mutex); 791 mutex_lock(&port->file_mutex);
@@ -872,16 +856,16 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
872} 856}
873 857
874static const struct file_operations umad_fops = { 858static const struct file_operations umad_fops = {
875 .owner = THIS_MODULE, 859 .owner = THIS_MODULE,
876 .read = ib_umad_read, 860 .read = ib_umad_read,
877 .write = ib_umad_write, 861 .write = ib_umad_write,
878 .poll = ib_umad_poll, 862 .poll = ib_umad_poll,
879 .unlocked_ioctl = ib_umad_ioctl, 863 .unlocked_ioctl = ib_umad_ioctl,
880#ifdef CONFIG_COMPAT 864#ifdef CONFIG_COMPAT
881 .compat_ioctl = ib_umad_compat_ioctl, 865 .compat_ioctl = ib_umad_compat_ioctl,
882#endif 866#endif
883 .open = ib_umad_open, 867 .open = ib_umad_open,
884 .release = ib_umad_close 868 .release = ib_umad_close
885}; 869};
886 870
887static int ib_umad_sm_open(struct inode *inode, struct file *filp) 871static int ib_umad_sm_open(struct inode *inode, struct file *filp)
@@ -892,13 +876,10 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
892 }; 876 };
893 int ret; 877 int ret;
894 878
895 spin_lock(&port_lock); 879 port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev);
896 port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE - IB_UMAD_MAX_PORTS];
897 if (port) 880 if (port)
898 kref_get(&port->umad_dev->ref); 881 kref_get(&port->umad_dev->ref);
899 spin_unlock(&port_lock); 882 else
900
901 if (!port)
902 return -ENXIO; 883 return -ENXIO;
903 884
904 if (filp->f_flags & O_NONBLOCK) { 885 if (filp->f_flags & O_NONBLOCK) {
@@ -949,8 +930,8 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
949} 930}
950 931
951static const struct file_operations umad_sm_fops = { 932static const struct file_operations umad_sm_fops = {
952 .owner = THIS_MODULE, 933 .owner = THIS_MODULE,
953 .open = ib_umad_sm_open, 934 .open = ib_umad_sm_open,
954 .release = ib_umad_sm_close 935 .release = ib_umad_sm_close
955}; 936};
956 937
@@ -990,16 +971,51 @@ static ssize_t show_abi_version(struct class *class, char *buf)
990} 971}
991static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); 972static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
992 973
974static dev_t overflow_maj;
975static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS);
976static int find_overflow_devnum(void)
977{
978 int ret;
979
980 if (!overflow_maj) {
981 ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2,
982 "infiniband_mad");
983 if (ret) {
984 printk(KERN_ERR "user_mad: couldn't register dynamic device number\n");
985 return ret;
986 }
987 }
988
989 ret = find_first_zero_bit(overflow_map, IB_UMAD_MAX_PORTS);
990 if (ret >= IB_UMAD_MAX_PORTS)
991 return -1;
992
993 return ret;
994}
995
993static int ib_umad_init_port(struct ib_device *device, int port_num, 996static int ib_umad_init_port(struct ib_device *device, int port_num,
994 struct ib_umad_port *port) 997 struct ib_umad_port *port)
995{ 998{
999 int devnum;
1000 dev_t base;
1001
996 spin_lock(&port_lock); 1002 spin_lock(&port_lock);
997 port->dev_num = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS); 1003 devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
998 if (port->dev_num >= IB_UMAD_MAX_PORTS) { 1004 if (devnum >= IB_UMAD_MAX_PORTS) {
999 spin_unlock(&port_lock); 1005 spin_unlock(&port_lock);
1000 return -1; 1006 devnum = find_overflow_devnum();
1007 if (devnum < 0)
1008 return -1;
1009
1010 spin_lock(&port_lock);
1011 port->dev_num = devnum + IB_UMAD_MAX_PORTS;
1012 base = devnum + overflow_maj;
1013 set_bit(devnum, overflow_map);
1014 } else {
1015 port->dev_num = devnum;
1016 base = devnum + base_dev;
1017 set_bit(devnum, dev_map);
1001 } 1018 }
1002 set_bit(port->dev_num, dev_map);
1003 spin_unlock(&port_lock); 1019 spin_unlock(&port_lock);
1004 1020
1005 port->ib_dev = device; 1021 port->ib_dev = device;
@@ -1008,17 +1024,14 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
1008 mutex_init(&port->file_mutex); 1024 mutex_init(&port->file_mutex);
1009 INIT_LIST_HEAD(&port->file_list); 1025 INIT_LIST_HEAD(&port->file_list);
1010 1026
1011 port->cdev = cdev_alloc(); 1027 cdev_init(&port->cdev, &umad_fops);
1012 if (!port->cdev) 1028 port->cdev.owner = THIS_MODULE;
1013 return -1; 1029 kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num);
1014 port->cdev->owner = THIS_MODULE; 1030 if (cdev_add(&port->cdev, base, 1))
1015 port->cdev->ops = &umad_fops;
1016 kobject_set_name(&port->cdev->kobj, "umad%d", port->dev_num);
1017 if (cdev_add(port->cdev, base_dev + port->dev_num, 1))
1018 goto err_cdev; 1031 goto err_cdev;
1019 1032
1020 port->dev = device_create(umad_class, device->dma_device, 1033 port->dev = device_create(umad_class, device->dma_device,
1021 port->cdev->dev, port, 1034 port->cdev.dev, port,
1022 "umad%d", port->dev_num); 1035 "umad%d", port->dev_num);
1023 if (IS_ERR(port->dev)) 1036 if (IS_ERR(port->dev))
1024 goto err_cdev; 1037 goto err_cdev;
@@ -1028,17 +1041,15 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
1028 if (device_create_file(port->dev, &dev_attr_port)) 1041 if (device_create_file(port->dev, &dev_attr_port))
1029 goto err_dev; 1042 goto err_dev;
1030 1043
1031 port->sm_cdev = cdev_alloc(); 1044 base += IB_UMAD_MAX_PORTS;
1032 if (!port->sm_cdev) 1045 cdev_init(&port->sm_cdev, &umad_sm_fops);
1033 goto err_dev; 1046 port->sm_cdev.owner = THIS_MODULE;
1034 port->sm_cdev->owner = THIS_MODULE; 1047 kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num);
1035 port->sm_cdev->ops = &umad_sm_fops; 1048 if (cdev_add(&port->sm_cdev, base, 1))
1036 kobject_set_name(&port->sm_cdev->kobj, "issm%d", port->dev_num);
1037 if (cdev_add(port->sm_cdev, base_dev + port->dev_num + IB_UMAD_MAX_PORTS, 1))
1038 goto err_sm_cdev; 1049 goto err_sm_cdev;
1039 1050
1040 port->sm_dev = device_create(umad_class, device->dma_device, 1051 port->sm_dev = device_create(umad_class, device->dma_device,
1041 port->sm_cdev->dev, port, 1052 port->sm_cdev.dev, port,
1042 "issm%d", port->dev_num); 1053 "issm%d", port->dev_num);
1043 if (IS_ERR(port->sm_dev)) 1054 if (IS_ERR(port->sm_dev))
1044 goto err_sm_cdev; 1055 goto err_sm_cdev;
@@ -1048,24 +1059,23 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
1048 if (device_create_file(port->sm_dev, &dev_attr_port)) 1059 if (device_create_file(port->sm_dev, &dev_attr_port))
1049 goto err_sm_dev; 1060 goto err_sm_dev;
1050 1061
1051 spin_lock(&port_lock);
1052 umad_port[port->dev_num] = port;
1053 spin_unlock(&port_lock);
1054
1055 return 0; 1062 return 0;
1056 1063
1057err_sm_dev: 1064err_sm_dev:
1058 device_destroy(umad_class, port->sm_cdev->dev); 1065 device_destroy(umad_class, port->sm_cdev.dev);
1059 1066
1060err_sm_cdev: 1067err_sm_cdev:
1061 cdev_del(port->sm_cdev); 1068 cdev_del(&port->sm_cdev);
1062 1069
1063err_dev: 1070err_dev:
1064 device_destroy(umad_class, port->cdev->dev); 1071 device_destroy(umad_class, port->cdev.dev);
1065 1072
1066err_cdev: 1073err_cdev:
1067 cdev_del(port->cdev); 1074 cdev_del(&port->cdev);
1068 clear_bit(port->dev_num, dev_map); 1075 if (port->dev_num < IB_UMAD_MAX_PORTS)
1076 clear_bit(devnum, dev_map);
1077 else
1078 clear_bit(devnum, overflow_map);
1069 1079
1070 return -1; 1080 return -1;
1071} 1081}
@@ -1079,15 +1089,11 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
1079 dev_set_drvdata(port->dev, NULL); 1089 dev_set_drvdata(port->dev, NULL);
1080 dev_set_drvdata(port->sm_dev, NULL); 1090 dev_set_drvdata(port->sm_dev, NULL);
1081 1091
1082 device_destroy(umad_class, port->cdev->dev); 1092 device_destroy(umad_class, port->cdev.dev);
1083 device_destroy(umad_class, port->sm_cdev->dev); 1093 device_destroy(umad_class, port->sm_cdev.dev);
1084 1094
1085 cdev_del(port->cdev); 1095 cdev_del(&port->cdev);
1086 cdev_del(port->sm_cdev); 1096 cdev_del(&port->sm_cdev);
1087
1088 spin_lock(&port_lock);
1089 umad_port[port->dev_num] = NULL;
1090 spin_unlock(&port_lock);
1091 1097
1092 mutex_lock(&port->file_mutex); 1098 mutex_lock(&port->file_mutex);
1093 1099
@@ -1106,7 +1112,10 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
1106 1112
1107 mutex_unlock(&port->file_mutex); 1113 mutex_unlock(&port->file_mutex);
1108 1114
1109 clear_bit(port->dev_num, dev_map); 1115 if (port->dev_num < IB_UMAD_MAX_PORTS)
1116 clear_bit(port->dev_num, dev_map);
1117 else
1118 clear_bit(port->dev_num - IB_UMAD_MAX_PORTS, overflow_map);
1110} 1119}
1111 1120
1112static void ib_umad_add_one(struct ib_device *device) 1121static void ib_umad_add_one(struct ib_device *device)
@@ -1214,6 +1223,8 @@ static void __exit ib_umad_cleanup(void)
1214 ib_unregister_client(&umad_client); 1223 ib_unregister_client(&umad_client);
1215 class_destroy(umad_class); 1224 class_destroy(umad_class);
1216 unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2); 1225 unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
1226 if (overflow_maj)
1227 unregister_chrdev_region(overflow_maj, IB_UMAD_MAX_PORTS * 2);
1217} 1228}
1218 1229
1219module_init(ib_umad_init); 1230module_init(ib_umad_init);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index b3ea9587dc80..e54d9ac6d1ca 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -41,6 +41,7 @@
41#include <linux/idr.h> 41#include <linux/idr.h>
42#include <linux/mutex.h> 42#include <linux/mutex.h>
43#include <linux/completion.h> 43#include <linux/completion.h>
44#include <linux/cdev.h>
44 45
45#include <rdma/ib_verbs.h> 46#include <rdma/ib_verbs.h>
46#include <rdma/ib_umem.h> 47#include <rdma/ib_umem.h>
@@ -69,23 +70,23 @@
69 70
70struct ib_uverbs_device { 71struct ib_uverbs_device {
71 struct kref ref; 72 struct kref ref;
73 int num_comp_vectors;
72 struct completion comp; 74 struct completion comp;
73 int devnum;
74 struct cdev *cdev;
75 struct device *dev; 75 struct device *dev;
76 struct ib_device *ib_dev; 76 struct ib_device *ib_dev;
77 int num_comp_vectors; 77 int devnum;
78 struct cdev cdev;
78}; 79};
79 80
80struct ib_uverbs_event_file { 81struct ib_uverbs_event_file {
81 struct kref ref; 82 struct kref ref;
83 int is_async;
82 struct ib_uverbs_file *uverbs_file; 84 struct ib_uverbs_file *uverbs_file;
83 spinlock_t lock; 85 spinlock_t lock;
86 int is_closed;
84 wait_queue_head_t poll_wait; 87 wait_queue_head_t poll_wait;
85 struct fasync_struct *async_queue; 88 struct fasync_struct *async_queue;
86 struct list_head event_list; 89 struct list_head event_list;
87 int is_async;
88 int is_closed;
89}; 90};
90 91
91struct ib_uverbs_file { 92struct ib_uverbs_file {
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 5f284ffd430e..ff59a795e840 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -42,8 +42,8 @@
42#include <linux/poll.h> 42#include <linux/poll.h>
43#include <linux/sched.h> 43#include <linux/sched.h>
44#include <linux/file.h> 44#include <linux/file.h>
45#include <linux/mount.h>
46#include <linux/cdev.h> 45#include <linux/cdev.h>
46#include <linux/anon_inodes.h>
47 47
48#include <asm/uaccess.h> 48#include <asm/uaccess.h>
49 49
@@ -53,8 +53,6 @@ MODULE_AUTHOR("Roland Dreier");
53MODULE_DESCRIPTION("InfiniBand userspace verbs access"); 53MODULE_DESCRIPTION("InfiniBand userspace verbs access");
54MODULE_LICENSE("Dual BSD/GPL"); 54MODULE_LICENSE("Dual BSD/GPL");
55 55
56#define INFINIBANDEVENTFS_MAGIC 0x49426576 /* "IBev" */
57
58enum { 56enum {
59 IB_UVERBS_MAJOR = 231, 57 IB_UVERBS_MAJOR = 231,
60 IB_UVERBS_BASE_MINOR = 192, 58 IB_UVERBS_BASE_MINOR = 192,
@@ -75,44 +73,41 @@ DEFINE_IDR(ib_uverbs_qp_idr);
75DEFINE_IDR(ib_uverbs_srq_idr); 73DEFINE_IDR(ib_uverbs_srq_idr);
76 74
77static DEFINE_SPINLOCK(map_lock); 75static DEFINE_SPINLOCK(map_lock);
78static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES];
79static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); 76static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
80 77
81static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, 78static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
82 const char __user *buf, int in_len, 79 const char __user *buf, int in_len,
83 int out_len) = { 80 int out_len) = {
84 [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context, 81 [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context,
85 [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device, 82 [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device,
86 [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port, 83 [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port,
87 [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd, 84 [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd,
88 [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd, 85 [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd,
89 [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr, 86 [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr,
90 [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr, 87 [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr,
91 [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel, 88 [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel,
92 [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq, 89 [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq,
93 [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq, 90 [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq,
94 [IB_USER_VERBS_CMD_POLL_CQ] = ib_uverbs_poll_cq, 91 [IB_USER_VERBS_CMD_POLL_CQ] = ib_uverbs_poll_cq,
95 [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ] = ib_uverbs_req_notify_cq, 92 [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ] = ib_uverbs_req_notify_cq,
96 [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq, 93 [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq,
97 [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp, 94 [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp,
98 [IB_USER_VERBS_CMD_QUERY_QP] = ib_uverbs_query_qp, 95 [IB_USER_VERBS_CMD_QUERY_QP] = ib_uverbs_query_qp,
99 [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp, 96 [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp,
100 [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp, 97 [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp,
101 [IB_USER_VERBS_CMD_POST_SEND] = ib_uverbs_post_send, 98 [IB_USER_VERBS_CMD_POST_SEND] = ib_uverbs_post_send,
102 [IB_USER_VERBS_CMD_POST_RECV] = ib_uverbs_post_recv, 99 [IB_USER_VERBS_CMD_POST_RECV] = ib_uverbs_post_recv,
103 [IB_USER_VERBS_CMD_POST_SRQ_RECV] = ib_uverbs_post_srq_recv, 100 [IB_USER_VERBS_CMD_POST_SRQ_RECV] = ib_uverbs_post_srq_recv,
104 [IB_USER_VERBS_CMD_CREATE_AH] = ib_uverbs_create_ah, 101 [IB_USER_VERBS_CMD_CREATE_AH] = ib_uverbs_create_ah,
105 [IB_USER_VERBS_CMD_DESTROY_AH] = ib_uverbs_destroy_ah, 102 [IB_USER_VERBS_CMD_DESTROY_AH] = ib_uverbs_destroy_ah,
106 [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast, 103 [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast,
107 [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast, 104 [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast,
108 [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq, 105 [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq,
109 [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, 106 [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq,
110 [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq, 107 [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq,
111 [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, 108 [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq,
112}; 109};
113 110
114static struct vfsmount *uverbs_event_mnt;
115
116static void ib_uverbs_add_one(struct ib_device *device); 111static void ib_uverbs_add_one(struct ib_device *device);
117static void ib_uverbs_remove_one(struct ib_device *device); 112static void ib_uverbs_remove_one(struct ib_device *device);
118 113
@@ -370,7 +365,7 @@ static int ib_uverbs_event_close(struct inode *inode, struct file *filp)
370 365
371static const struct file_operations uverbs_event_fops = { 366static const struct file_operations uverbs_event_fops = {
372 .owner = THIS_MODULE, 367 .owner = THIS_MODULE,
373 .read = ib_uverbs_event_read, 368 .read = ib_uverbs_event_read,
374 .poll = ib_uverbs_event_poll, 369 .poll = ib_uverbs_event_poll,
375 .release = ib_uverbs_event_close, 370 .release = ib_uverbs_event_close,
376 .fasync = ib_uverbs_event_fasync 371 .fasync = ib_uverbs_event_fasync
@@ -492,7 +487,6 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
492 int is_async, int *fd) 487 int is_async, int *fd)
493{ 488{
494 struct ib_uverbs_event_file *ev_file; 489 struct ib_uverbs_event_file *ev_file;
495 struct path path;
496 struct file *filp; 490 struct file *filp;
497 int ret; 491 int ret;
498 492
@@ -515,27 +509,16 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
515 goto err; 509 goto err;
516 } 510 }
517 511
518 /* 512 filp = anon_inode_getfile("[uverbs-event]", &uverbs_event_fops,
519 * fops_get() can't fail here, because we're coming from a 513 ev_file, O_RDONLY);
520 * system call on a uverbs file, which will already have a
521 * module reference.
522 */
523 path.mnt = uverbs_event_mnt;
524 path.dentry = uverbs_event_mnt->mnt_root;
525 path_get(&path);
526 filp = alloc_file(&path, FMODE_READ, fops_get(&uverbs_event_fops));
527 if (!filp) { 514 if (!filp) {
528 ret = -ENFILE; 515 ret = -ENFILE;
529 goto err_fd; 516 goto err_fd;
530 } 517 }
531 518
532 filp->private_data = ev_file;
533
534 return filp; 519 return filp;
535 520
536err_fd: 521err_fd:
537 fops_put(&uverbs_event_fops);
538 path_put(&path);
539 put_unused_fd(*fd); 522 put_unused_fd(*fd);
540 523
541err: 524err:
@@ -617,14 +600,12 @@ static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
617/* 600/*
618 * ib_uverbs_open() does not need the BKL: 601 * ib_uverbs_open() does not need the BKL:
619 * 602 *
620 * - dev_table[] accesses are protected by map_lock, the 603 * - the ib_uverbs_device structures are properly reference counted and
621 * ib_uverbs_device structures are properly reference counted, and
622 * everything else is purely local to the file being created, so 604 * everything else is purely local to the file being created, so
623 * races against other open calls are not a problem; 605 * races against other open calls are not a problem;
624 * - there is no ioctl method to race against; 606 * - there is no ioctl method to race against;
625 * - the device is added to dev_table[] as the last part of module 607 * - the open method will either immediately run -ENXIO, or all
626 * initialization, the open method will either immediately run 608 * required initialization will be done.
627 * -ENXIO, or all required initialization will be done.
628 */ 609 */
629static int ib_uverbs_open(struct inode *inode, struct file *filp) 610static int ib_uverbs_open(struct inode *inode, struct file *filp)
630{ 611{
@@ -632,13 +613,10 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
632 struct ib_uverbs_file *file; 613 struct ib_uverbs_file *file;
633 int ret; 614 int ret;
634 615
635 spin_lock(&map_lock); 616 dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
636 dev = dev_table[iminor(inode) - IB_UVERBS_BASE_MINOR];
637 if (dev) 617 if (dev)
638 kref_get(&dev->ref); 618 kref_get(&dev->ref);
639 spin_unlock(&map_lock); 619 else
640
641 if (!dev)
642 return -ENXIO; 620 return -ENXIO;
643 621
644 if (!try_module_get(dev->ib_dev->owner)) { 622 if (!try_module_get(dev->ib_dev->owner)) {
@@ -685,17 +663,17 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
685} 663}
686 664
687static const struct file_operations uverbs_fops = { 665static const struct file_operations uverbs_fops = {
688 .owner = THIS_MODULE, 666 .owner = THIS_MODULE,
689 .write = ib_uverbs_write, 667 .write = ib_uverbs_write,
690 .open = ib_uverbs_open, 668 .open = ib_uverbs_open,
691 .release = ib_uverbs_close 669 .release = ib_uverbs_close
692}; 670};
693 671
694static const struct file_operations uverbs_mmap_fops = { 672static const struct file_operations uverbs_mmap_fops = {
695 .owner = THIS_MODULE, 673 .owner = THIS_MODULE,
696 .write = ib_uverbs_write, 674 .write = ib_uverbs_write,
697 .mmap = ib_uverbs_mmap, 675 .mmap = ib_uverbs_mmap,
698 .open = ib_uverbs_open, 676 .open = ib_uverbs_open,
699 .release = ib_uverbs_close 677 .release = ib_uverbs_close
700}; 678};
701 679
@@ -735,8 +713,38 @@ static ssize_t show_abi_version(struct class *class, char *buf)
735} 713}
736static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); 714static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
737 715
716static dev_t overflow_maj;
717static DECLARE_BITMAP(overflow_map, IB_UVERBS_MAX_DEVICES);
718
719/*
720 * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by
721 * requesting a new major number and doubling the number of max devices we
722 * support. It's stupid, but simple.
723 */
724static int find_overflow_devnum(void)
725{
726 int ret;
727
728 if (!overflow_maj) {
729 ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
730 "infiniband_verbs");
731 if (ret) {
732 printk(KERN_ERR "user_verbs: couldn't register dynamic device number\n");
733 return ret;
734 }
735 }
736
737 ret = find_first_zero_bit(overflow_map, IB_UVERBS_MAX_DEVICES);
738 if (ret >= IB_UVERBS_MAX_DEVICES)
739 return -1;
740
741 return ret;
742}
743
738static void ib_uverbs_add_one(struct ib_device *device) 744static void ib_uverbs_add_one(struct ib_device *device)
739{ 745{
746 int devnum;
747 dev_t base;
740 struct ib_uverbs_device *uverbs_dev; 748 struct ib_uverbs_device *uverbs_dev;
741 749
742 if (!device->alloc_ucontext) 750 if (!device->alloc_ucontext)
@@ -750,28 +758,36 @@ static void ib_uverbs_add_one(struct ib_device *device)
750 init_completion(&uverbs_dev->comp); 758 init_completion(&uverbs_dev->comp);
751 759
752 spin_lock(&map_lock); 760 spin_lock(&map_lock);
753 uverbs_dev->devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); 761 devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
754 if (uverbs_dev->devnum >= IB_UVERBS_MAX_DEVICES) { 762 if (devnum >= IB_UVERBS_MAX_DEVICES) {
755 spin_unlock(&map_lock); 763 spin_unlock(&map_lock);
756 goto err; 764 devnum = find_overflow_devnum();
765 if (devnum < 0)
766 goto err;
767
768 spin_lock(&map_lock);
769 uverbs_dev->devnum = devnum + IB_UVERBS_MAX_DEVICES;
770 base = devnum + overflow_maj;
771 set_bit(devnum, overflow_map);
772 } else {
773 uverbs_dev->devnum = devnum;
774 base = devnum + IB_UVERBS_BASE_DEV;
775 set_bit(devnum, dev_map);
757 } 776 }
758 set_bit(uverbs_dev->devnum, dev_map);
759 spin_unlock(&map_lock); 777 spin_unlock(&map_lock);
760 778
761 uverbs_dev->ib_dev = device; 779 uverbs_dev->ib_dev = device;
762 uverbs_dev->num_comp_vectors = device->num_comp_vectors; 780 uverbs_dev->num_comp_vectors = device->num_comp_vectors;
763 781
764 uverbs_dev->cdev = cdev_alloc(); 782 cdev_init(&uverbs_dev->cdev, NULL);
765 if (!uverbs_dev->cdev) 783 uverbs_dev->cdev.owner = THIS_MODULE;
766 goto err; 784 uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
767 uverbs_dev->cdev->owner = THIS_MODULE; 785 kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
768 uverbs_dev->cdev->ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops; 786 if (cdev_add(&uverbs_dev->cdev, base, 1))
769 kobject_set_name(&uverbs_dev->cdev->kobj, "uverbs%d", uverbs_dev->devnum);
770 if (cdev_add(uverbs_dev->cdev, IB_UVERBS_BASE_DEV + uverbs_dev->devnum, 1))
771 goto err_cdev; 787 goto err_cdev;
772 788
773 uverbs_dev->dev = device_create(uverbs_class, device->dma_device, 789 uverbs_dev->dev = device_create(uverbs_class, device->dma_device,
774 uverbs_dev->cdev->dev, uverbs_dev, 790 uverbs_dev->cdev.dev, uverbs_dev,
775 "uverbs%d", uverbs_dev->devnum); 791 "uverbs%d", uverbs_dev->devnum);
776 if (IS_ERR(uverbs_dev->dev)) 792 if (IS_ERR(uverbs_dev->dev))
777 goto err_cdev; 793 goto err_cdev;
@@ -781,20 +797,19 @@ static void ib_uverbs_add_one(struct ib_device *device)
781 if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version)) 797 if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version))
782 goto err_class; 798 goto err_class;
783 799
784 spin_lock(&map_lock);
785 dev_table[uverbs_dev->devnum] = uverbs_dev;
786 spin_unlock(&map_lock);
787
788 ib_set_client_data(device, &uverbs_client, uverbs_dev); 800 ib_set_client_data(device, &uverbs_client, uverbs_dev);
789 801
790 return; 802 return;
791 803
792err_class: 804err_class:
793 device_destroy(uverbs_class, uverbs_dev->cdev->dev); 805 device_destroy(uverbs_class, uverbs_dev->cdev.dev);
794 806
795err_cdev: 807err_cdev:
796 cdev_del(uverbs_dev->cdev); 808 cdev_del(&uverbs_dev->cdev);
797 clear_bit(uverbs_dev->devnum, dev_map); 809 if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
810 clear_bit(devnum, dev_map);
811 else
812 clear_bit(devnum, overflow_map);
798 813
799err: 814err:
800 kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); 815 kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
@@ -811,35 +826,19 @@ static void ib_uverbs_remove_one(struct ib_device *device)
811 return; 826 return;
812 827
813 dev_set_drvdata(uverbs_dev->dev, NULL); 828 dev_set_drvdata(uverbs_dev->dev, NULL);
814 device_destroy(uverbs_class, uverbs_dev->cdev->dev); 829 device_destroy(uverbs_class, uverbs_dev->cdev.dev);
815 cdev_del(uverbs_dev->cdev); 830 cdev_del(&uverbs_dev->cdev);
816 831
817 spin_lock(&map_lock); 832 if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
818 dev_table[uverbs_dev->devnum] = NULL; 833 clear_bit(uverbs_dev->devnum, dev_map);
819 spin_unlock(&map_lock); 834 else
820 835 clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
821 clear_bit(uverbs_dev->devnum, dev_map);
822 836
823 kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); 837 kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
824 wait_for_completion(&uverbs_dev->comp); 838 wait_for_completion(&uverbs_dev->comp);
825 kfree(uverbs_dev); 839 kfree(uverbs_dev);
826} 840}
827 841
828static int uverbs_event_get_sb(struct file_system_type *fs_type, int flags,
829 const char *dev_name, void *data,
830 struct vfsmount *mnt)
831{
832 return get_sb_pseudo(fs_type, "infinibandevent:", NULL,
833 INFINIBANDEVENTFS_MAGIC, mnt);
834}
835
836static struct file_system_type uverbs_event_fs = {
837 /* No owner field so module can be unloaded */
838 .name = "infinibandeventfs",
839 .get_sb = uverbs_event_get_sb,
840 .kill_sb = kill_litter_super
841};
842
843static int __init ib_uverbs_init(void) 842static int __init ib_uverbs_init(void)
844{ 843{
845 int ret; 844 int ret;
@@ -864,33 +863,14 @@ static int __init ib_uverbs_init(void)
864 goto out_class; 863 goto out_class;
865 } 864 }
866 865
867 ret = register_filesystem(&uverbs_event_fs);
868 if (ret) {
869 printk(KERN_ERR "user_verbs: couldn't register infinibandeventfs\n");
870 goto out_class;
871 }
872
873 uverbs_event_mnt = kern_mount(&uverbs_event_fs);
874 if (IS_ERR(uverbs_event_mnt)) {
875 ret = PTR_ERR(uverbs_event_mnt);
876 printk(KERN_ERR "user_verbs: couldn't mount infinibandeventfs\n");
877 goto out_fs;
878 }
879
880 ret = ib_register_client(&uverbs_client); 866 ret = ib_register_client(&uverbs_client);
881 if (ret) { 867 if (ret) {
882 printk(KERN_ERR "user_verbs: couldn't register client\n"); 868 printk(KERN_ERR "user_verbs: couldn't register client\n");
883 goto out_mnt; 869 goto out_class;
884 } 870 }
885 871
886 return 0; 872 return 0;
887 873
888out_mnt:
889 mntput(uverbs_event_mnt);
890
891out_fs:
892 unregister_filesystem(&uverbs_event_fs);
893
894out_class: 874out_class:
895 class_destroy(uverbs_class); 875 class_destroy(uverbs_class);
896 876
@@ -904,10 +884,10 @@ out:
904static void __exit ib_uverbs_cleanup(void) 884static void __exit ib_uverbs_cleanup(void)
905{ 885{
906 ib_unregister_client(&uverbs_client); 886 ib_unregister_client(&uverbs_client);
907 mntput(uverbs_event_mnt);
908 unregister_filesystem(&uverbs_event_fs);
909 class_destroy(uverbs_class); 887 class_destroy(uverbs_class);
910 unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES); 888 unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
889 if (overflow_maj)
890 unregister_chrdev_region(overflow_maj, IB_UVERBS_MAX_DEVICES);
911 idr_destroy(&ib_uverbs_pd_idr); 891 idr_destroy(&ib_uverbs_pd_idr);
912 idr_destroy(&ib_uverbs_mr_idr); 892 idr_destroy(&ib_uverbs_mr_idr);
913 idr_destroy(&ib_uverbs_mw_idr); 893 idr_destroy(&ib_uverbs_mw_idr);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 0677fc7dfd51..a28e862f2d68 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -109,7 +109,6 @@ int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
109 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) { 109 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
110 udelay(1); 110 udelay(1);
111 if (i++ > 1000000) { 111 if (i++ > 1000000) {
112 BUG_ON(1);
113 printk(KERN_ERR "%s: stalled rnic\n", 112 printk(KERN_ERR "%s: stalled rnic\n",
114 rdev_p->dev_name); 113 rdev_p->dev_name);
115 return -EIO; 114 return -EIO;
@@ -155,7 +154,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
155 return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb); 154 return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
156} 155}
157 156
158int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) 157int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
159{ 158{
160 struct rdma_cq_setup setup; 159 struct rdma_cq_setup setup;
161 int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe); 160 int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
@@ -163,12 +162,12 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
163 cq->cqid = cxio_hal_get_cqid(rdev_p->rscp); 162 cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
164 if (!cq->cqid) 163 if (!cq->cqid)
165 return -ENOMEM; 164 return -ENOMEM;
166 cq->sw_queue = kzalloc(size, GFP_KERNEL); 165 if (kernel) {
167 if (!cq->sw_queue) 166 cq->sw_queue = kzalloc(size, GFP_KERNEL);
168 return -ENOMEM; 167 if (!cq->sw_queue)
169 cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), 168 return -ENOMEM;
170 (1UL << (cq->size_log2)) * 169 }
171 sizeof(struct t3_cqe), 170 cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
172 &(cq->dma_addr), GFP_KERNEL); 171 &(cq->dma_addr), GFP_KERNEL);
173 if (!cq->queue) { 172 if (!cq->queue) {
174 kfree(cq->sw_queue); 173 kfree(cq->sw_queue);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index f3d440cc68f2..073373c2c560 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -53,7 +53,7 @@
53#define T3_MAX_PBL_SIZE 256 53#define T3_MAX_PBL_SIZE 256
54#define T3_MAX_RQ_SIZE 1024 54#define T3_MAX_RQ_SIZE 1024
55#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1) 55#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
56#define T3_MAX_CQ_DEPTH 8192 56#define T3_MAX_CQ_DEPTH 262144
57#define T3_MAX_NUM_STAG (1<<15) 57#define T3_MAX_NUM_STAG (1<<15)
58#define T3_MAX_MR_SIZE 0x100000000ULL 58#define T3_MAX_MR_SIZE 0x100000000ULL
59#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ 59#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
@@ -157,7 +157,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev);
157void cxio_rdev_close(struct cxio_rdev *rdev); 157void cxio_rdev_close(struct cxio_rdev *rdev);
158int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq, 158int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq,
159 enum t3_cq_opcode op, u32 credit); 159 enum t3_cq_opcode op, u32 credit);
160int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq); 160int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq, int kernel);
161int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq); 161int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
162int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq); 162int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
163void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx); 163void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index a197a5b7ac7f..15073b2da1c5 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -730,7 +730,22 @@ struct t3_cq {
730 730
731static inline void cxio_set_wq_in_error(struct t3_wq *wq) 731static inline void cxio_set_wq_in_error(struct t3_wq *wq)
732{ 732{
733 wq->queue->wq_in_err.err = 1; 733 wq->queue->wq_in_err.err |= 1;
734}
735
736static inline void cxio_disable_wq_db(struct t3_wq *wq)
737{
738 wq->queue->wq_in_err.err |= 2;
739}
740
741static inline void cxio_enable_wq_db(struct t3_wq *wq)
742{
743 wq->queue->wq_in_err.err &= ~2;
744}
745
746static inline int cxio_wq_db_enabled(struct t3_wq *wq)
747{
748 return !(wq->queue->wq_in_err.err & 2);
734} 749}
735 750
736static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq) 751static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index b0ea0105ddf6..ee1d8b4d4541 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -65,6 +65,46 @@ struct cxgb3_client t3c_client = {
65static LIST_HEAD(dev_list); 65static LIST_HEAD(dev_list);
66static DEFINE_MUTEX(dev_mutex); 66static DEFINE_MUTEX(dev_mutex);
67 67
68static int disable_qp_db(int id, void *p, void *data)
69{
70 struct iwch_qp *qhp = p;
71
72 cxio_disable_wq_db(&qhp->wq);
73 return 0;
74}
75
76static int enable_qp_db(int id, void *p, void *data)
77{
78 struct iwch_qp *qhp = p;
79
80 if (data)
81 ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid);
82 cxio_enable_wq_db(&qhp->wq);
83 return 0;
84}
85
86static void disable_dbs(struct iwch_dev *rnicp)
87{
88 spin_lock_irq(&rnicp->lock);
89 idr_for_each(&rnicp->qpidr, disable_qp_db, NULL);
90 spin_unlock_irq(&rnicp->lock);
91}
92
93static void enable_dbs(struct iwch_dev *rnicp, int ring_db)
94{
95 spin_lock_irq(&rnicp->lock);
96 idr_for_each(&rnicp->qpidr, enable_qp_db,
97 (void *)(unsigned long)ring_db);
98 spin_unlock_irq(&rnicp->lock);
99}
100
101static void iwch_db_drop_task(struct work_struct *work)
102{
103 struct iwch_dev *rnicp = container_of(work, struct iwch_dev,
104 db_drop_task.work);
105 enable_dbs(rnicp, 1);
106}
107
68static void rnic_init(struct iwch_dev *rnicp) 108static void rnic_init(struct iwch_dev *rnicp)
69{ 109{
70 PDBG("%s iwch_dev %p\n", __func__, rnicp); 110 PDBG("%s iwch_dev %p\n", __func__, rnicp);
@@ -72,6 +112,7 @@ static void rnic_init(struct iwch_dev *rnicp)
72 idr_init(&rnicp->qpidr); 112 idr_init(&rnicp->qpidr);
73 idr_init(&rnicp->mmidr); 113 idr_init(&rnicp->mmidr);
74 spin_lock_init(&rnicp->lock); 114 spin_lock_init(&rnicp->lock);
115 INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task);
75 116
76 rnicp->attr.max_qps = T3_MAX_NUM_QP - 32; 117 rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
77 rnicp->attr.max_wrs = T3_MAX_QP_DEPTH; 118 rnicp->attr.max_wrs = T3_MAX_QP_DEPTH;
@@ -147,6 +188,8 @@ static void close_rnic_dev(struct t3cdev *tdev)
147 mutex_lock(&dev_mutex); 188 mutex_lock(&dev_mutex);
148 list_for_each_entry_safe(dev, tmp, &dev_list, entry) { 189 list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
149 if (dev->rdev.t3cdev_p == tdev) { 190 if (dev->rdev.t3cdev_p == tdev) {
191 dev->rdev.flags = CXIO_ERROR_FATAL;
192 cancel_delayed_work_sync(&dev->db_drop_task);
150 list_del(&dev->entry); 193 list_del(&dev->entry);
151 iwch_unregister_device(dev); 194 iwch_unregister_device(dev);
152 cxio_rdev_close(&dev->rdev); 195 cxio_rdev_close(&dev->rdev);
@@ -165,7 +208,8 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
165 struct cxio_rdev *rdev = tdev->ulp; 208 struct cxio_rdev *rdev = tdev->ulp;
166 struct iwch_dev *rnicp; 209 struct iwch_dev *rnicp;
167 struct ib_event event; 210 struct ib_event event;
168 u32 portnum = port_id + 1; 211 u32 portnum = port_id + 1;
212 int dispatch = 0;
169 213
170 if (!rdev) 214 if (!rdev)
171 return; 215 return;
@@ -174,21 +218,49 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
174 case OFFLOAD_STATUS_DOWN: { 218 case OFFLOAD_STATUS_DOWN: {
175 rdev->flags = CXIO_ERROR_FATAL; 219 rdev->flags = CXIO_ERROR_FATAL;
176 event.event = IB_EVENT_DEVICE_FATAL; 220 event.event = IB_EVENT_DEVICE_FATAL;
221 dispatch = 1;
177 break; 222 break;
178 } 223 }
179 case OFFLOAD_PORT_DOWN: { 224 case OFFLOAD_PORT_DOWN: {
180 event.event = IB_EVENT_PORT_ERR; 225 event.event = IB_EVENT_PORT_ERR;
226 dispatch = 1;
181 break; 227 break;
182 } 228 }
183 case OFFLOAD_PORT_UP: { 229 case OFFLOAD_PORT_UP: {
184 event.event = IB_EVENT_PORT_ACTIVE; 230 event.event = IB_EVENT_PORT_ACTIVE;
231 dispatch = 1;
232 break;
233 }
234 case OFFLOAD_DB_FULL: {
235 disable_dbs(rnicp);
236 break;
237 }
238 case OFFLOAD_DB_EMPTY: {
239 enable_dbs(rnicp, 1);
240 break;
241 }
242 case OFFLOAD_DB_DROP: {
243 unsigned long delay = 1000;
244 unsigned short r;
245
246 disable_dbs(rnicp);
247 get_random_bytes(&r, 2);
248 delay += r & 1023;
249
250 /*
251 * delay is between 1000-2023 usecs.
252 */
253 schedule_delayed_work(&rnicp->db_drop_task,
254 usecs_to_jiffies(delay));
185 break; 255 break;
186 } 256 }
187 } 257 }
188 258
189 event.device = &rnicp->ibdev; 259 if (dispatch) {
190 event.element.port_num = portnum; 260 event.device = &rnicp->ibdev;
191 ib_dispatch_event(&event); 261 event.element.port_num = portnum;
262 ib_dispatch_event(&event);
263 }
192 264
193 return; 265 return;
194} 266}
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
index 84735506333f..a1c44578e039 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ b/drivers/infiniband/hw/cxgb3/iwch.h
@@ -36,6 +36,7 @@
36#include <linux/list.h> 36#include <linux/list.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/idr.h> 38#include <linux/idr.h>
39#include <linux/workqueue.h>
39 40
40#include <rdma/ib_verbs.h> 41#include <rdma/ib_verbs.h>
41 42
@@ -110,6 +111,7 @@ struct iwch_dev {
110 struct idr mmidr; 111 struct idr mmidr;
111 spinlock_t lock; 112 spinlock_t lock;
112 struct list_head entry; 113 struct list_head entry;
114 struct delayed_work db_drop_task;
113}; 115};
114 116
115static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev) 117static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index ed7175549ebd..47b35c6608d2 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -187,7 +187,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
187 entries = roundup_pow_of_two(entries); 187 entries = roundup_pow_of_two(entries);
188 chp->cq.size_log2 = ilog2(entries); 188 chp->cq.size_log2 = ilog2(entries);
189 189
190 if (cxio_create_cq(&rhp->rdev, &chp->cq)) { 190 if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
191 kfree(chp); 191 kfree(chp);
192 return ERR_PTR(-ENOMEM); 192 return ERR_PTR(-ENOMEM);
193 } 193 }
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 3eb8cecf81d7..b4d893de3650 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -452,7 +452,8 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
452 ++(qhp->wq.sq_wptr); 452 ++(qhp->wq.sq_wptr);
453 } 453 }
454 spin_unlock_irqrestore(&qhp->lock, flag); 454 spin_unlock_irqrestore(&qhp->lock, flag);
455 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); 455 if (cxio_wq_db_enabled(&qhp->wq))
456 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
456 457
457out: 458out:
458 if (err) 459 if (err)
@@ -514,7 +515,8 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
514 num_wrs--; 515 num_wrs--;
515 } 516 }
516 spin_unlock_irqrestore(&qhp->lock, flag); 517 spin_unlock_irqrestore(&qhp->lock, flag);
517 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); 518 if (cxio_wq_db_enabled(&qhp->wq))
519 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
518 520
519out: 521out:
520 if (err) 522 if (err)
@@ -597,7 +599,8 @@ int iwch_bind_mw(struct ib_qp *qp,
597 ++(qhp->wq.sq_wptr); 599 ++(qhp->wq.sq_wptr);
598 spin_unlock_irqrestore(&qhp->lock, flag); 600 spin_unlock_irqrestore(&qhp->lock, flag);
599 601
600 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); 602 if (cxio_wq_db_enabled(&qhp->wq))
603 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
601 604
602 return err; 605 return err;
603} 606}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 42be0b15084b..b2b6fea2b141 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -548,11 +548,10 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
548 struct ehca_eq *eq = &shca->eq; 548 struct ehca_eq *eq = &shca->eq;
549 struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache; 549 struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
550 u64 eqe_value, ret; 550 u64 eqe_value, ret;
551 unsigned long flags;
552 int eqe_cnt, i; 551 int eqe_cnt, i;
553 int eq_empty = 0; 552 int eq_empty = 0;
554 553
555 spin_lock_irqsave(&eq->irq_spinlock, flags); 554 spin_lock(&eq->irq_spinlock);
556 if (is_irq) { 555 if (is_irq) {
557 const int max_query_cnt = 100; 556 const int max_query_cnt = 100;
558 int query_cnt = 0; 557 int query_cnt = 0;
@@ -643,7 +642,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
643 } while (1); 642 } while (1);
644 643
645unlock_irq_spinlock: 644unlock_irq_spinlock:
646 spin_unlock_irqrestore(&eq->irq_spinlock, flags); 645 spin_unlock(&eq->irq_spinlock);
647} 646}
648 647
649void ehca_tasklet_eq(unsigned long data) 648void ehca_tasklet_eq(unsigned long data)
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 0338f1fabe8a..b105f664d3ef 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -55,9 +55,7 @@ static struct kmem_cache *qp_cache;
55/* 55/*
56 * attributes not supported by query qp 56 * attributes not supported by query qp
57 */ 57 */
58#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_MAX_DEST_RD_ATOMIC | \ 58#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_ACCESS_FLAGS | \
59 IB_QP_MAX_QP_RD_ATOMIC | \
60 IB_QP_ACCESS_FLAGS | \
61 IB_QP_EN_SQD_ASYNC_NOTIFY) 59 IB_QP_EN_SQD_ASYNC_NOTIFY)
62 60
63/* 61/*
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index 8c1213f8916a..dba8f9f8b996 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -222,7 +222,7 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
222{ 222{
223 int ret; 223 int ret;
224 224
225 if (!port_num || port_num > ibdev->phys_port_cnt) 225 if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
226 return IB_MAD_RESULT_FAILURE; 226 return IB_MAD_RESULT_FAILURE;
227 227
228 /* accept only pma request */ 228 /* accept only pma request */
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 82878e348627..eb7d59abd12d 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -59,8 +59,7 @@ static int __get_user_pages(unsigned long start_page, size_t num_pages,
59 size_t got; 59 size_t got;
60 int ret; 60 int ret;
61 61
62 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> 62 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
63 PAGE_SHIFT;
64 63
65 if (num_pages > lock_limit) { 64 if (num_pages > lock_limit) {
66 ret = -ENOMEM; 65 ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 2a97c964b9ef..ae75389937d6 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1214,7 +1214,7 @@ out:
1214static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, 1214static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1215 void *wqe, unsigned *mlx_seg_len) 1215 void *wqe, unsigned *mlx_seg_len)
1216{ 1216{
1217 struct ib_device *ib_dev = &to_mdev(sqp->qp.ibqp.device)->ib_dev; 1217 struct ib_device *ib_dev = sqp->qp.ibqp.device;
1218 struct mlx4_wqe_mlx_seg *mlx = wqe; 1218 struct mlx4_wqe_mlx_seg *mlx = wqe;
1219 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; 1219 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
1220 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); 1220 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
@@ -1228,7 +1228,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1228 for (i = 0; i < wr->num_sge; ++i) 1228 for (i = 0; i < wr->num_sge; ++i)
1229 send_size += wr->sg_list[i].length; 1229 send_size += wr->sg_list[i].length;
1230 1230
1231 ib_ud_header_init(send_size, mlx4_ib_ah_grh_present(ah), &sqp->ud_header); 1231 ib_ud_header_init(send_size, mlx4_ib_ah_grh_present(ah), 0, &sqp->ud_header);
1232 1232
1233 sqp->ud_header.lrh.service_level = 1233 sqp->ud_header.lrh.service_level =
1234 be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28; 1234 be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index c10576fa60c1..d2d172e6289c 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1494,7 +1494,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1494 u16 pkey; 1494 u16 pkey;
1495 1495
1496 ib_ud_header_init(256, /* assume a MAD */ 1496 ib_ud_header_init(256, /* assume a MAD */
1497 mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 1497 mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0,
1498 &sqp->ud_header); 1498 &sqp->ud_header);
1499 1499
1500 err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header); 1500 err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index b9d09bafd6c1..4272c52e38a4 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -110,6 +110,7 @@ static unsigned int sysfs_idx_addr;
110 110
111static struct pci_device_id nes_pci_table[] = { 111static struct pci_device_id nes_pci_table[] = {
112 {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020, PCI_ANY_ID, PCI_ANY_ID}, 112 {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020, PCI_ANY_ID, PCI_ANY_ID},
113 {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020_KR, PCI_ANY_ID, PCI_ANY_ID},
113 {0} 114 {0}
114}; 115};
115 116
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 98840564bb2f..cc78fee1dd51 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -64,8 +64,9 @@
64 * NetEffect PCI vendor id and NE010 PCI device id. 64 * NetEffect PCI vendor id and NE010 PCI device id.
65 */ 65 */
66#ifndef PCI_VENDOR_ID_NETEFFECT /* not in pci.ids yet */ 66#ifndef PCI_VENDOR_ID_NETEFFECT /* not in pci.ids yet */
67#define PCI_VENDOR_ID_NETEFFECT 0x1678 67#define PCI_VENDOR_ID_NETEFFECT 0x1678
68#define PCI_DEVICE_ID_NETEFFECT_NE020 0x0100 68#define PCI_DEVICE_ID_NETEFFECT_NE020 0x0100
69#define PCI_DEVICE_ID_NETEFFECT_NE020_KR 0x0110
69#endif 70#endif
70 71
71#define NE020_REV 4 72#define NE020_REV 4
@@ -193,8 +194,8 @@ extern u32 cm_packets_created;
193extern u32 cm_packets_received; 194extern u32 cm_packets_received;
194extern u32 cm_packets_dropped; 195extern u32 cm_packets_dropped;
195extern u32 cm_packets_retrans; 196extern u32 cm_packets_retrans;
196extern u32 cm_listens_created; 197extern atomic_t cm_listens_created;
197extern u32 cm_listens_destroyed; 198extern atomic_t cm_listens_destroyed;
198extern u32 cm_backlog_drops; 199extern u32 cm_backlog_drops;
199extern atomic_t cm_loopbacks; 200extern atomic_t cm_loopbacks;
200extern atomic_t cm_nodes_created; 201extern atomic_t cm_nodes_created;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 39468c277036..2a49ee40b520 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -67,8 +67,8 @@ u32 cm_packets_dropped;
67u32 cm_packets_retrans; 67u32 cm_packets_retrans;
68u32 cm_packets_created; 68u32 cm_packets_created;
69u32 cm_packets_received; 69u32 cm_packets_received;
70u32 cm_listens_created; 70atomic_t cm_listens_created;
71u32 cm_listens_destroyed; 71atomic_t cm_listens_destroyed;
72u32 cm_backlog_drops; 72u32 cm_backlog_drops;
73atomic_t cm_loopbacks; 73atomic_t cm_loopbacks;
74atomic_t cm_nodes_created; 74atomic_t cm_nodes_created;
@@ -1011,9 +1011,10 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
1011 event.cm_info.loc_port = 1011 event.cm_info.loc_port =
1012 loopback->loc_port; 1012 loopback->loc_port;
1013 event.cm_info.cm_id = loopback->cm_id; 1013 event.cm_info.cm_id = loopback->cm_id;
1014 add_ref_cm_node(loopback);
1015 loopback->state = NES_CM_STATE_CLOSED;
1014 cm_event_connect_error(&event); 1016 cm_event_connect_error(&event);
1015 cm_node->state = NES_CM_STATE_LISTENER_DESTROYED; 1017 cm_node->state = NES_CM_STATE_LISTENER_DESTROYED;
1016 loopback->state = NES_CM_STATE_CLOSED;
1017 1018
1018 rem_ref_cm_node(cm_node->cm_core, 1019 rem_ref_cm_node(cm_node->cm_core,
1019 cm_node); 1020 cm_node);
@@ -1042,7 +1043,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
1042 kfree(listener); 1043 kfree(listener);
1043 listener = NULL; 1044 listener = NULL;
1044 ret = 0; 1045 ret = 0;
1045 cm_listens_destroyed++; 1046 atomic_inc(&cm_listens_destroyed);
1046 } else { 1047 } else {
1047 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); 1048 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
1048 } 1049 }
@@ -3172,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
3172 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node); 3173 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
3173 return err; 3174 return err;
3174 } 3175 }
3175 cm_listens_created++; 3176 atomic_inc(&cm_listens_created);
3176 } 3177 }
3177 3178
3178 cm_id->add_ref(cm_id); 3179 cm_id->add_ref(cm_id);
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index b1c2cbb88f09..ce7f53833577 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -748,16 +748,28 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
748 748
749 if (hw_rev != NE020_REV) { 749 if (hw_rev != NE020_REV) {
750 /* init serdes 0 */ 750 /* init serdes 0 */
751 if (wide_ppm_offset && (nesadapter->phy_type[0] == NES_PHY_TYPE_CX4)) 751 switch (nesadapter->phy_type[0]) {
752 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000FFFAA); 752 case NES_PHY_TYPE_CX4:
753 else 753 if (wide_ppm_offset)
754 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000FFFAA);
755 else
756 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
757 break;
758 case NES_PHY_TYPE_KR:
759 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
760 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x00000000);
761 break;
762 case NES_PHY_TYPE_PUMA_1G:
754 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF); 763 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
755
756 if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
757 sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0); 764 sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0);
758 sds |= 0x00000100; 765 sds |= 0x00000100;
759 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds); 766 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds);
767 break;
768 default:
769 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
770 break;
760 } 771 }
772
761 if (!OneG_Mode) 773 if (!OneG_Mode)
762 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000); 774 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000);
763 775
@@ -778,6 +790,9 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
778 if (wide_ppm_offset) 790 if (wide_ppm_offset)
779 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000FFFAA); 791 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000FFFAA);
780 break; 792 break;
793 case NES_PHY_TYPE_KR:
794 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x00000000);
795 break;
781 case NES_PHY_TYPE_PUMA_1G: 796 case NES_PHY_TYPE_PUMA_1G:
782 sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1); 797 sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
783 sds |= 0x000000100; 798 sds |= 0x000000100;
@@ -1279,115 +1294,115 @@ int nes_destroy_cqp(struct nes_device *nesdev)
1279 1294
1280 1295
1281/** 1296/**
1282 * nes_init_phy 1297 * nes_init_1g_phy
1283 */ 1298 */
1284int nes_init_phy(struct nes_device *nesdev) 1299int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
1285{ 1300{
1286 struct nes_adapter *nesadapter = nesdev->nesadapter;
1287 u32 counter = 0; 1301 u32 counter = 0;
1288 u32 sds;
1289 u32 mac_index = nesdev->mac_index;
1290 u32 tx_config = 0;
1291 u16 phy_data; 1302 u16 phy_data;
1292 u32 temp_phy_data = 0; 1303 int ret = 0;
1293 u32 temp_phy_data2 = 0;
1294 u8 phy_type = nesadapter->phy_type[mac_index];
1295 u8 phy_index = nesadapter->phy_index[mac_index];
1296
1297 if ((nesadapter->OneG_Mode) &&
1298 (phy_type != NES_PHY_TYPE_PUMA_1G)) {
1299 nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index);
1300 if (phy_type == NES_PHY_TYPE_1G) {
1301 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
1302 tx_config &= 0xFFFFFFE3;
1303 tx_config |= 0x04;
1304 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
1305 }
1306 1304
1307 nes_read_1G_phy_reg(nesdev, 1, phy_index, &phy_data); 1305 nes_read_1G_phy_reg(nesdev, 1, phy_index, &phy_data);
1308 nes_write_1G_phy_reg(nesdev, 23, phy_index, 0xb000); 1306 nes_write_1G_phy_reg(nesdev, 23, phy_index, 0xb000);
1309 1307
1310 /* Reset the PHY */ 1308 /* Reset the PHY */
1311 nes_write_1G_phy_reg(nesdev, 0, phy_index, 0x8000); 1309 nes_write_1G_phy_reg(nesdev, 0, phy_index, 0x8000);
1312 udelay(100); 1310 udelay(100);
1313 counter = 0; 1311 counter = 0;
1314 do { 1312 do {
1315 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
1316 if (counter++ > 100)
1317 break;
1318 } while (phy_data & 0x8000);
1319
1320 /* Setting no phy loopback */
1321 phy_data &= 0xbfff;
1322 phy_data |= 0x1140;
1323 nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
1324 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); 1313 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
1325 nes_read_1G_phy_reg(nesdev, 0x17, phy_index, &phy_data); 1314 if (counter++ > 100) {
1326 nes_read_1G_phy_reg(nesdev, 0x1e, phy_index, &phy_data); 1315 ret = -1;
1327 1316 break;
1328 /* Setting the interrupt mask */ 1317 }
1329 nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data); 1318 } while (phy_data & 0x8000);
1330 nes_write_1G_phy_reg(nesdev, 0x19, phy_index, 0xffee); 1319
1331 nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data); 1320 /* Setting no phy loopback */
1321 phy_data &= 0xbfff;
1322 phy_data |= 0x1140;
1323 nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
1324 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
1325 nes_read_1G_phy_reg(nesdev, 0x17, phy_index, &phy_data);
1326 nes_read_1G_phy_reg(nesdev, 0x1e, phy_index, &phy_data);
1327
1328 /* Setting the interrupt mask */
1329 nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
1330 nes_write_1G_phy_reg(nesdev, 0x19, phy_index, 0xffee);
1331 nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
1332
1333 /* turning on flow control */
1334 nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
1335 nes_write_1G_phy_reg(nesdev, 4, phy_index, (phy_data & ~(0x03E0)) | 0xc00);
1336 nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
1337
1338 /* Clear Half duplex */
1339 nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
1340 nes_write_1G_phy_reg(nesdev, 9, phy_index, phy_data & ~(0x0100));
1341 nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
1342
1343 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
1344 nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data | 0x0300);
1345
1346 return ret;
1347}
1332 1348
1333 /* turning on flow control */
1334 nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
1335 nes_write_1G_phy_reg(nesdev, 4, phy_index, (phy_data & ~(0x03E0)) | 0xc00);
1336 nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
1337 1349
1338 /* Clear Half duplex */ 1350/**
1339 nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data); 1351 * nes_init_2025_phy
1340 nes_write_1G_phy_reg(nesdev, 9, phy_index, phy_data & ~(0x0100)); 1352 */
1341 nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data); 1353int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
1354{
1355 u32 temp_phy_data = 0;
1356 u32 temp_phy_data2 = 0;
1357 u32 counter = 0;
1358 u32 sds;
1359 u32 mac_index = nesdev->mac_index;
1360 int ret = 0;
1361 unsigned int first_attempt = 1;
1342 1362
1343 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); 1363 /* Check firmware heartbeat */
1344 nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data | 0x0300); 1364 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
1365 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1366 udelay(1500);
1367 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
1368 temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1345 1369
1346 return 0; 1370 if (temp_phy_data != temp_phy_data2) {
1371 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
1372 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1373 if ((temp_phy_data & 0xff) > 0x20)
1374 return 0;
1375 printk(PFX "Reinitialize external PHY\n");
1347 } 1376 }
1348 1377
1349 if ((phy_type == NES_PHY_TYPE_IRIS) || 1378 /* no heartbeat, configure the PHY */
1350 (phy_type == NES_PHY_TYPE_ARGUS) || 1379 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000);
1351 (phy_type == NES_PHY_TYPE_SFP_D)) { 1380 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0000);
1352 /* setup 10G MDIO operation */ 1381 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
1353 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); 1382 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
1354 tx_config &= 0xFFFFFFE3;
1355 tx_config |= 0x15;
1356 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
1357 }
1358 if ((phy_type == NES_PHY_TYPE_ARGUS) ||
1359 (phy_type == NES_PHY_TYPE_SFP_D)) {
1360 u32 first_time = 1;
1361 1383
1362 /* Check firmware heartbeat */ 1384 switch (phy_type) {
1363 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); 1385 case NES_PHY_TYPE_ARGUS:
1364 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1386 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
1365 udelay(1500); 1387 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
1366 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); 1388 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C);
1367 temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1389 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008);
1390 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001);
1391 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098);
1392 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
1368 1393
1369 if (temp_phy_data != temp_phy_data2) { 1394 /* setup LEDs */
1370 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd); 1395 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007);
1371 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1396 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A);
1372 if ((temp_phy_data & 0xff) > 0x20) 1397 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009);
1373 return 0; 1398 break;
1374 printk(PFX "Reinitializing PHY\n");
1375 }
1376 1399
1377 /* no heartbeat, configure the PHY */ 1400 case NES_PHY_TYPE_SFP_D:
1378 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000);
1379 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0000);
1380 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A); 1401 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
1381 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052); 1402 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
1382 if (phy_type == NES_PHY_TYPE_ARGUS) { 1403 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x0004);
1383 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C); 1404 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0038);
1384 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008); 1405 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013);
1385 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001);
1386 } else {
1387 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x0004);
1388 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0038);
1389 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013);
1390 }
1391 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098); 1406 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098);
1392 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00); 1407 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
1393 1408
@@ -1395,71 +1410,136 @@ int nes_init_phy(struct nes_device *nesdev)
1395 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007); 1410 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007);
1396 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A); 1411 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A);
1397 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009); 1412 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009);
1413 break;
1414
1415 case NES_PHY_TYPE_KR:
1416 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
1417 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
1418 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C);
1419 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0010);
1420 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013);
1421 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0080);
1422 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
1423
1424 /* setup LEDs */
1425 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x000B);
1426 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x0003);
1427 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0004);
1398 1428
1399 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0028, 0xA528); 1429 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0022, 0x406D);
1430 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0023, 0x0020);
1431 break;
1432 }
1433
1434 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0028, 0xA528);
1400 1435
1401 /* Bring PHY out of reset */ 1436 /* Bring PHY out of reset */
1402 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0002); 1437 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0002);
1403 1438
1404 /* Check for heartbeat */ 1439 /* Check for heartbeat */
1405 counter = 0; 1440 counter = 0;
1406 mdelay(690); 1441 mdelay(690);
1442 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
1443 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1444 do {
1445 if (counter++ > 150) {
1446 printk(PFX "No PHY heartbeat\n");
1447 break;
1448 }
1449 mdelay(1);
1407 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); 1450 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
1451 temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1452 } while ((temp_phy_data2 == temp_phy_data));
1453
1454 /* wait for tracking */
1455 counter = 0;
1456 do {
1457 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
1408 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1458 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
1409 do { 1459 if (counter++ > 300) {
1410 if (counter++ > 150) { 1460 if (((temp_phy_data & 0xff) == 0x0) && first_attempt) {
1411 printk(PFX "No PHY heartbeat\n"); 1461 first_attempt = 0;
1462 counter = 0;
1463 /* reset AMCC PHY and try again */
1464 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x00c0);
1465 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x0040);
1466 continue;
1467 } else {
1468 ret = 1;
1412 break; 1469 break;
1413 } 1470 }
1414 mdelay(1); 1471 }
1415 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); 1472 mdelay(10);
1416 temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1473 } while ((temp_phy_data & 0xff) < 0x30);
1417 } while ((temp_phy_data2 == temp_phy_data)); 1474
1418 1475 /* setup signal integrity */
1419 /* wait for tracking */ 1476 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000);
1420 counter = 0; 1477 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00D, 0x00FE);
1421 do { 1478 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00E, 0x0032);
1422 nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd); 1479 if (phy_type == NES_PHY_TYPE_KR) {
1423 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); 1480 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x000C);
1424 if (counter++ > 300) { 1481 } else {
1425 if (((temp_phy_data & 0xff) == 0x0) && first_time) {
1426 first_time = 0;
1427 counter = 0;
1428 /* reset AMCC PHY and try again */
1429 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x00c0);
1430 nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x0040);
1431 continue;
1432 } else {
1433 printk(PFX "PHY did not track\n");
1434 break;
1435 }
1436 }
1437 mdelay(10);
1438 } while ((temp_phy_data & 0xff) < 0x30);
1439
1440 /* setup signal integrity */
1441 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000);
1442 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00D, 0x00FE);
1443 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00E, 0x0032);
1444 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x0002); 1482 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x0002);
1445 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc314, 0x0063); 1483 nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc314, 0x0063);
1484 }
1485
1486 /* reset serdes */
1487 sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200);
1488 sds |= 0x1;
1489 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200, sds);
1490 sds &= 0xfffffffe;
1491 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200, sds);
1492
1493 counter = 0;
1494 while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040)
1495 && (counter++ < 5000))
1496 ;
1497
1498 return ret;
1499}
1500
1446 1501
1447 /* reset serdes */ 1502/**
1448 sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + 1503 * nes_init_phy
1449 mac_index * 0x200); 1504 */
1450 sds |= 0x1; 1505int nes_init_phy(struct nes_device *nesdev)
1451 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + 1506{
1452 mac_index * 0x200, sds); 1507 struct nes_adapter *nesadapter = nesdev->nesadapter;
1453 sds &= 0xfffffffe; 1508 u32 mac_index = nesdev->mac_index;
1454 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + 1509 u32 tx_config = 0;
1455 mac_index * 0x200, sds); 1510 unsigned long flags;
1456 1511 u8 phy_type = nesadapter->phy_type[mac_index];
1457 counter = 0; 1512 u8 phy_index = nesadapter->phy_index[mac_index];
1458 while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040) 1513 int ret = 0;
1459 && (counter++ < 5000)) 1514
1460 ; 1515 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
1516 if (phy_type == NES_PHY_TYPE_1G) {
1517 /* setup 1G MDIO operation */
1518 tx_config &= 0xFFFFFFE3;
1519 tx_config |= 0x04;
1520 } else {
1521 /* setup 10G MDIO operation */
1522 tx_config &= 0xFFFFFFE3;
1523 tx_config |= 0x15;
1461 } 1524 }
1462 return 0; 1525 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
1526
1527 spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
1528
1529 switch (phy_type) {
1530 case NES_PHY_TYPE_1G:
1531 ret = nes_init_1g_phy(nesdev, phy_type, phy_index);
1532 break;
1533 case NES_PHY_TYPE_ARGUS:
1534 case NES_PHY_TYPE_SFP_D:
1535 case NES_PHY_TYPE_KR:
1536 ret = nes_init_2025_phy(nesdev, phy_type, phy_index);
1537 break;
1538 }
1539
1540 spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
1541
1542 return ret;
1463} 1543}
1464 1544
1465 1545
@@ -2460,23 +2540,9 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2460 } 2540 }
2461 } else { 2541 } else {
2462 switch (nesadapter->phy_type[mac_index]) { 2542 switch (nesadapter->phy_type[mac_index]) {
2463 case NES_PHY_TYPE_IRIS:
2464 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
2465 temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
2466 u32temp = 20;
2467 do {
2468 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
2469 phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
2470 if ((phy_data == temp_phy_data) || (!(--u32temp)))
2471 break;
2472 temp_phy_data = phy_data;
2473 } while (1);
2474 nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
2475 __func__, phy_data, nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP");
2476 break;
2477
2478 case NES_PHY_TYPE_ARGUS: 2543 case NES_PHY_TYPE_ARGUS:
2479 case NES_PHY_TYPE_SFP_D: 2544 case NES_PHY_TYPE_SFP_D:
2545 case NES_PHY_TYPE_KR:
2480 /* clear the alarms */ 2546 /* clear the alarms */
2481 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0x0008); 2547 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0x0008);
2482 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc001); 2548 nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc001);
@@ -3352,8 +3418,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3352 u16 async_event_id; 3418 u16 async_event_id;
3353 u8 tcp_state; 3419 u8 tcp_state;
3354 u8 iwarp_state; 3420 u8 iwarp_state;
3355 int must_disconn = 1;
3356 int must_terminate = 0;
3357 struct ib_event ibevent; 3421 struct ib_event ibevent;
3358 3422
3359 nes_debug(NES_DBG_AEQ, "\n"); 3423 nes_debug(NES_DBG_AEQ, "\n");
@@ -3367,6 +3431,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3367 BUG_ON(!context); 3431 BUG_ON(!context);
3368 } 3432 }
3369 3433
3434 /* context is nesqp unless async_event_id == CQ ERROR */
3435 nesqp = (struct nes_qp *)(unsigned long)context;
3370 async_event_id = (u16)aeq_info; 3436 async_event_id = (u16)aeq_info;
3371 tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT; 3437 tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
3372 iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT; 3438 iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
@@ -3378,8 +3444,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3378 3444
3379 switch (async_event_id) { 3445 switch (async_event_id) {
3380 case NES_AEQE_AEID_LLP_FIN_RECEIVED: 3446 case NES_AEQE_AEID_LLP_FIN_RECEIVED:
3381 nesqp = (struct nes_qp *)(unsigned long)context;
3382
3383 if (nesqp->term_flags) 3447 if (nesqp->term_flags)
3384 return; /* Ignore it, wait for close complete */ 3448 return; /* Ignore it, wait for close complete */
3385 3449
@@ -3394,79 +3458,48 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3394 async_event_id, nesqp->last_aeq, tcp_state); 3458 async_event_id, nesqp->last_aeq, tcp_state);
3395 } 3459 }
3396 3460
3397 if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) || 3461 break;
3398 (nesqp->ibqp_state != IB_QPS_RTS)) {
3399 /* FIN Received but tcp state or IB state moved on,
3400 should expect a close complete */
3401 return;
3402 }
3403
3404 case NES_AEQE_AEID_LLP_CLOSE_COMPLETE: 3462 case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
3405 nesqp = (struct nes_qp *)(unsigned long)context;
3406 if (nesqp->term_flags) { 3463 if (nesqp->term_flags) {
3407 nes_terminate_done(nesqp, 0); 3464 nes_terminate_done(nesqp, 0);
3408 return; 3465 return;
3409 } 3466 }
3467 spin_lock_irqsave(&nesqp->lock, flags);
3468 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
3469 spin_unlock_irqrestore(&nesqp->lock, flags);
3470 nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_CLOSING, 0, 0);
3471 nes_cm_disconn(nesqp);
3472 break;
3410 3473
3411 case NES_AEQE_AEID_LLP_CONNECTION_RESET:
3412 case NES_AEQE_AEID_RESET_SENT: 3474 case NES_AEQE_AEID_RESET_SENT:
3413 nesqp = (struct nes_qp *)(unsigned long)context; 3475 tcp_state = NES_AEQE_TCP_STATE_CLOSED;
3414 if (async_event_id == NES_AEQE_AEID_RESET_SENT) {
3415 tcp_state = NES_AEQE_TCP_STATE_CLOSED;
3416 }
3417 spin_lock_irqsave(&nesqp->lock, flags); 3476 spin_lock_irqsave(&nesqp->lock, flags);
3418 nesqp->hw_iwarp_state = iwarp_state; 3477 nesqp->hw_iwarp_state = iwarp_state;
3419 nesqp->hw_tcp_state = tcp_state; 3478 nesqp->hw_tcp_state = tcp_state;
3420 nesqp->last_aeq = async_event_id; 3479 nesqp->last_aeq = async_event_id;
3421 3480 nesqp->hte_added = 0;
3422 if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
3423 (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) {
3424 nesqp->hte_added = 0;
3425 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE;
3426 }
3427
3428 if ((nesqp->ibqp_state == IB_QPS_RTS) &&
3429 ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
3430 (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
3431 switch (nesqp->hw_iwarp_state) {
3432 case NES_AEQE_IWARP_STATE_RTS:
3433 next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
3434 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
3435 break;
3436 case NES_AEQE_IWARP_STATE_TERMINATE:
3437 must_disconn = 0; /* terminate path takes care of disconn */
3438 if (nesqp->term_flags == 0)
3439 must_terminate = 1;
3440 break;
3441 }
3442 } else {
3443 if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) {
3444 /* FIN Received but ib state not RTS,
3445 close complete will be on its way */
3446 must_disconn = 0;
3447 }
3448 }
3449 spin_unlock_irqrestore(&nesqp->lock, flags); 3481 spin_unlock_irqrestore(&nesqp->lock, flags);
3482 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE;
3483 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
3484 nes_cm_disconn(nesqp);
3485 break;
3450 3486
3451 if (must_terminate) 3487 case NES_AEQE_AEID_LLP_CONNECTION_RESET:
3452 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL); 3488 if (atomic_read(&nesqp->close_timer_started))
3453 else if (must_disconn) { 3489 return;
3454 if (next_iwarp_state) { 3490 spin_lock_irqsave(&nesqp->lock, flags);
3455 nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X\n", 3491 nesqp->hw_iwarp_state = iwarp_state;
3456 nesqp->hwqp.qp_id, next_iwarp_state); 3492 nesqp->hw_tcp_state = tcp_state;
3457 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0); 3493 nesqp->last_aeq = async_event_id;
3458 } 3494 spin_unlock_irqrestore(&nesqp->lock, flags);
3459 nes_cm_disconn(nesqp); 3495 nes_cm_disconn(nesqp);
3460 }
3461 break; 3496 break;
3462 3497
3463 case NES_AEQE_AEID_TERMINATE_SENT: 3498 case NES_AEQE_AEID_TERMINATE_SENT:
3464 nesqp = (struct nes_qp *)(unsigned long)context;
3465 nes_terminate_send_fin(nesdev, nesqp, aeqe); 3499 nes_terminate_send_fin(nesdev, nesqp, aeqe);
3466 break; 3500 break;
3467 3501
3468 case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED: 3502 case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
3469 nesqp = (struct nes_qp *)(unsigned long)context;
3470 nes_terminate_received(nesdev, nesqp, aeqe); 3503 nes_terminate_received(nesdev, nesqp, aeqe);
3471 break; 3504 break;
3472 3505
@@ -3480,7 +3513,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3480 case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: 3513 case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
3481 case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION: 3514 case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
3482 case NES_AEQE_AEID_AMP_TO_WRAP: 3515 case NES_AEQE_AEID_AMP_TO_WRAP:
3483 nesqp = (struct nes_qp *)(unsigned long)context; 3516 printk(KERN_ERR PFX "QP[%u] async_event_id=0x%04X IB_EVENT_QP_ACCESS_ERR\n",
3517 nesqp->hwqp.qp_id, async_event_id);
3484 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_ACCESS_ERR); 3518 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_ACCESS_ERR);
3485 break; 3519 break;
3486 3520
@@ -3488,7 +3522,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3488 case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL: 3522 case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
3489 case NES_AEQE_AEID_DDP_UBE_INVALID_MO: 3523 case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
3490 case NES_AEQE_AEID_DDP_UBE_INVALID_QN: 3524 case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
3491 nesqp = (struct nes_qp *)(unsigned long)context;
3492 if (iwarp_opcode(nesqp, aeq_info) > IWARP_OPCODE_TERM) { 3525 if (iwarp_opcode(nesqp, aeq_info) > IWARP_OPCODE_TERM) {
3493 aeq_info &= 0xffff0000; 3526 aeq_info &= 0xffff0000;
3494 aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE; 3527 aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE;
@@ -3530,7 +3563,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3530 case NES_AEQE_AEID_STAG_ZERO_INVALID: 3563 case NES_AEQE_AEID_STAG_ZERO_INVALID:
3531 case NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST: 3564 case NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST:
3532 case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: 3565 case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
3533 nesqp = (struct nes_qp *)(unsigned long)context; 3566 printk(KERN_ERR PFX "QP[%u] async_event_id=0x%04X IB_EVENT_QP_FATAL\n",
3567 nesqp->hwqp.qp_id, async_event_id);
3534 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL); 3568 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
3535 break; 3569 break;
3536 3570
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index 084be0ee689b..9b1e7f869d83 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -37,12 +37,12 @@
37 37
38#define NES_PHY_TYPE_CX4 1 38#define NES_PHY_TYPE_CX4 1
39#define NES_PHY_TYPE_1G 2 39#define NES_PHY_TYPE_1G 2
40#define NES_PHY_TYPE_IRIS 3
41#define NES_PHY_TYPE_ARGUS 4 40#define NES_PHY_TYPE_ARGUS 4
42#define NES_PHY_TYPE_PUMA_1G 5 41#define NES_PHY_TYPE_PUMA_1G 5
43#define NES_PHY_TYPE_PUMA_10G 6 42#define NES_PHY_TYPE_PUMA_10G 6
44#define NES_PHY_TYPE_GLADIUS 7 43#define NES_PHY_TYPE_GLADIUS 7
45#define NES_PHY_TYPE_SFP_D 8 44#define NES_PHY_TYPE_SFP_D 8
45#define NES_PHY_TYPE_KR 9
46 46
47#define NES_MULTICAST_PF_MAX 8 47#define NES_MULTICAST_PF_MAX 8
48 48
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 9384f5d3d33b..a1d79b6856ac 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1243,8 +1243,8 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
1243 target_stat_values[++index] = cm_packets_received; 1243 target_stat_values[++index] = cm_packets_received;
1244 target_stat_values[++index] = cm_packets_dropped; 1244 target_stat_values[++index] = cm_packets_dropped;
1245 target_stat_values[++index] = cm_packets_retrans; 1245 target_stat_values[++index] = cm_packets_retrans;
1246 target_stat_values[++index] = cm_listens_created; 1246 target_stat_values[++index] = atomic_read(&cm_listens_created);
1247 target_stat_values[++index] = cm_listens_destroyed; 1247 target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
1248 target_stat_values[++index] = cm_backlog_drops; 1248 target_stat_values[++index] = cm_backlog_drops;
1249 target_stat_values[++index] = atomic_read(&cm_loopbacks); 1249 target_stat_values[++index] = atomic_read(&cm_loopbacks);
1250 target_stat_values[++index] = atomic_read(&cm_nodes_created); 1250 target_stat_values[++index] = atomic_read(&cm_nodes_created);
@@ -1474,9 +1474,9 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
1474 } 1474 }
1475 return 0; 1475 return 0;
1476 } 1476 }
1477 if ((phy_type == NES_PHY_TYPE_IRIS) || 1477 if ((phy_type == NES_PHY_TYPE_ARGUS) ||
1478 (phy_type == NES_PHY_TYPE_ARGUS) || 1478 (phy_type == NES_PHY_TYPE_SFP_D) ||
1479 (phy_type == NES_PHY_TYPE_SFP_D)) { 1479 (phy_type == NES_PHY_TYPE_KR)) {
1480 et_cmd->transceiver = XCVR_EXTERNAL; 1480 et_cmd->transceiver = XCVR_EXTERNAL;
1481 et_cmd->port = PORT_FIBRE; 1481 et_cmd->port = PORT_FIBRE;
1482 et_cmd->supported = SUPPORTED_FIBRE; 1482 et_cmd->supported = SUPPORTED_FIBRE;
@@ -1596,8 +1596,7 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1596 struct net_device *netdev; 1596 struct net_device *netdev;
1597 struct nic_qp_map *curr_qp_map; 1597 struct nic_qp_map *curr_qp_map;
1598 u32 u32temp; 1598 u32 u32temp;
1599 u16 phy_data; 1599 u8 phy_type = nesdev->nesadapter->phy_type[nesdev->mac_index];
1600 u16 temp_phy_data;
1601 1600
1602 netdev = alloc_etherdev(sizeof(struct nes_vnic)); 1601 netdev = alloc_etherdev(sizeof(struct nes_vnic));
1603 if (!netdev) { 1602 if (!netdev) {
@@ -1705,65 +1704,23 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1705 1704
1706 if ((nesdev->netdev_count == 0) && 1705 if ((nesdev->netdev_count == 0) &&
1707 ((PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index) || 1706 ((PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index) ||
1708 ((nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) && 1707 ((phy_type == NES_PHY_TYPE_PUMA_1G) &&
1709 (((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) || 1708 (((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) ||
1710 ((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) { 1709 ((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) {
1711 /*
1712 * nes_debug(NES_DBG_INIT, "Setting up PHY interrupt mask. Using register index 0x%04X\n",
1713 * NES_IDX_PHY_PCS_CONTROL_STATUS0 + (0x200 * (nesvnic->logical_port & 1)));
1714 */
1715 u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 1710 u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
1716 (0x200 * (nesdev->mac_index & 1))); 1711 (0x200 * (nesdev->mac_index & 1)));
1717 if (nesdev->nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G) { 1712 if (phy_type != NES_PHY_TYPE_PUMA_1G) {
1718 u32temp |= 0x00200000; 1713 u32temp |= 0x00200000;
1719 nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 1714 nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
1720 (0x200 * (nesdev->mac_index & 1)), u32temp); 1715 (0x200 * (nesdev->mac_index & 1)), u32temp);
1721 } 1716 }
1722 1717
1723 u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
1724 (0x200 * (nesdev->mac_index & 1)));
1725
1726 if ((u32temp&0x0f1f0000) == 0x0f0f0000) {
1727 if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_IRIS) {
1728 nes_init_phy(nesdev);
1729 nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 1, 1);
1730 temp_phy_data = (u16)nes_read_indexed(nesdev,
1731 NES_IDX_MAC_MDIO_CONTROL);
1732 u32temp = 20;
1733 do {
1734 nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 1, 1);
1735 phy_data = (u16)nes_read_indexed(nesdev,
1736 NES_IDX_MAC_MDIO_CONTROL);
1737 if ((phy_data == temp_phy_data) || (!(--u32temp)))
1738 break;
1739 temp_phy_data = phy_data;
1740 } while (1);
1741 if (phy_data & 4) {
1742 nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
1743 nesvnic->linkup = 1;
1744 } else {
1745 nes_debug(NES_DBG_INIT, "The Link is DOWN!!.\n");
1746 }
1747 } else {
1748 nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
1749 nesvnic->linkup = 1;
1750 }
1751 } else if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) {
1752 nes_debug(NES_DBG_INIT, "mac_index=%d, logical_port=%d, u32temp=0x%04X, PCI_FUNC=%d\n",
1753 nesdev->mac_index, nesvnic->logical_port, u32temp, PCI_FUNC(nesdev->pcidev->devfn));
1754 if (((nesdev->mac_index < 2) && ((u32temp&0x01010000) == 0x01010000)) ||
1755 ((nesdev->mac_index > 1) && ((u32temp&0x02020000) == 0x02020000))) {
1756 nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
1757 nesvnic->linkup = 1;
1758 }
1759 }
1760 /* clear the MAC interrupt status, assumes direct logical to physical mapping */ 1718 /* clear the MAC interrupt status, assumes direct logical to physical mapping */
1761 u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index)); 1719 u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index));
1762 nes_debug(NES_DBG_INIT, "Phy interrupt status = 0x%X.\n", u32temp); 1720 nes_debug(NES_DBG_INIT, "Phy interrupt status = 0x%X.\n", u32temp);
1763 nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index), u32temp); 1721 nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index), u32temp);
1764 1722
1765 if (nesdev->nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_IRIS) 1723 nes_init_phy(nesdev);
1766 nes_init_phy(nesdev);
1767 1724
1768 } 1725 }
1769 1726
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 64d3136e3747..815725f886c4 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -228,7 +228,7 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
228 /* Check for SQ overflow */ 228 /* Check for SQ overflow */
229 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { 229 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
230 spin_unlock_irqrestore(&nesqp->lock, flags); 230 spin_unlock_irqrestore(&nesqp->lock, flags);
231 return -EINVAL; 231 return -ENOMEM;
232 } 232 }
233 233
234 wqe = &nesqp->hwqp.sq_vbase[head]; 234 wqe = &nesqp->hwqp.sq_vbase[head];
@@ -3294,7 +3294,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3294 3294
3295 /* Check for SQ overflow */ 3295 /* Check for SQ overflow */
3296 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { 3296 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
3297 err = -EINVAL; 3297 err = -ENOMEM;
3298 break; 3298 break;
3299 } 3299 }
3300 3300
@@ -3577,7 +3577,7 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
3577 } 3577 }
3578 /* Check for RQ overflow */ 3578 /* Check for RQ overflow */
3579 if (((head + (2 * qsize) - nesqp->hwqp.rq_tail) % qsize) == (qsize - 1)) { 3579 if (((head + (2 * qsize) - nesqp->hwqp.rq_tail) % qsize) == (qsize - 1)) {
3580 err = -EINVAL; 3580 err = -ENOMEM;
3581 break; 3581 break;
3582 } 3582 }
3583 3583
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index e9795f60e5d6..d10b4ec68d28 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -55,9 +55,7 @@ static int ipoib_get_coalesce(struct net_device *dev,
55 struct ipoib_dev_priv *priv = netdev_priv(dev); 55 struct ipoib_dev_priv *priv = netdev_priv(dev);
56 56
57 coal->rx_coalesce_usecs = priv->ethtool.coalesce_usecs; 57 coal->rx_coalesce_usecs = priv->ethtool.coalesce_usecs;
58 coal->tx_coalesce_usecs = priv->ethtool.coalesce_usecs;
59 coal->rx_max_coalesced_frames = priv->ethtool.max_coalesced_frames; 58 coal->rx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
60 coal->tx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
61 59
62 return 0; 60 return 0;
63} 61}
@@ -69,10 +67,8 @@ static int ipoib_set_coalesce(struct net_device *dev,
69 int ret; 67 int ret;
70 68
71 /* 69 /*
72 * Since IPoIB uses a single CQ for both rx and tx, we assume 70 * These values are saved in the private data and returned
73 * that rx params dictate the configuration. These values are 71 * when ipoib_get_coalesce() is called
74 * saved in the private data and returned when ipoib_get_coalesce()
75 * is called.
76 */ 72 */
77 if (coal->rx_coalesce_usecs > 0xffff || 73 if (coal->rx_coalesce_usecs > 0xffff ||
78 coal->rx_max_coalesced_frames > 0xffff) 74 coal->rx_max_coalesced_frames > 0xffff)
@@ -85,8 +81,6 @@ static int ipoib_set_coalesce(struct net_device *dev,
85 return ret; 81 return ret;
86 } 82 }
87 83
88 coal->tx_coalesce_usecs = coal->rx_coalesce_usecs;
89 coal->tx_max_coalesced_frames = coal->rx_max_coalesced_frames;
90 priv->ethtool.coalesce_usecs = coal->rx_coalesce_usecs; 84 priv->ethtool.coalesce_usecs = coal->rx_coalesce_usecs;
91 priv->ethtool.max_coalesced_frames = coal->rx_max_coalesced_frames; 85 priv->ethtool.max_coalesced_frames = coal->rx_max_coalesced_frames;
92 86
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 5f7a6fca0a4d..71237f8f78f7 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -128,6 +128,28 @@ static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
128 return 0; 128 return 0;
129} 129}
130 130
131int iser_initialize_task_headers(struct iscsi_task *task,
132 struct iser_tx_desc *tx_desc)
133{
134 struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
135 struct iser_device *device = iser_conn->ib_conn->device;
136 struct iscsi_iser_task *iser_task = task->dd_data;
137 u64 dma_addr;
138
139 dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc,
140 ISER_HEADERS_LEN, DMA_TO_DEVICE);
141 if (ib_dma_mapping_error(device->ib_device, dma_addr))
142 return -ENOMEM;
143
144 tx_desc->dma_addr = dma_addr;
145 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
146 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
147 tx_desc->tx_sg[0].lkey = device->mr->lkey;
148
149 iser_task->headers_initialized = 1;
150 iser_task->iser_conn = iser_conn;
151 return 0;
152}
131/** 153/**
132 * iscsi_iser_task_init - Initialize task 154 * iscsi_iser_task_init - Initialize task
133 * @task: iscsi task 155 * @task: iscsi task
@@ -137,17 +159,17 @@ static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
137static int 159static int
138iscsi_iser_task_init(struct iscsi_task *task) 160iscsi_iser_task_init(struct iscsi_task *task)
139{ 161{
140 struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
141 struct iscsi_iser_task *iser_task = task->dd_data; 162 struct iscsi_iser_task *iser_task = task->dd_data;
142 163
164 if (!iser_task->headers_initialized)
165 if (iser_initialize_task_headers(task, &iser_task->desc))
166 return -ENOMEM;
167
143 /* mgmt task */ 168 /* mgmt task */
144 if (!task->sc) { 169 if (!task->sc)
145 iser_task->desc.data = task->data;
146 return 0; 170 return 0;
147 }
148 171
149 iser_task->command_sent = 0; 172 iser_task->command_sent = 0;
150 iser_task->iser_conn = iser_conn;
151 iser_task_rdma_init(iser_task); 173 iser_task_rdma_init(iser_task);
152 return 0; 174 return 0;
153} 175}
@@ -168,7 +190,7 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
168{ 190{
169 int error = 0; 191 int error = 0;
170 192
171 iser_dbg("task deq [cid %d itt 0x%x]\n", conn->id, task->itt); 193 iser_dbg("mtask xmit [cid %d itt 0x%x]\n", conn->id, task->itt);
172 194
173 error = iser_send_control(conn, task); 195 error = iser_send_control(conn, task);
174 196
@@ -178,9 +200,6 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
178 * - if yes, the task is recycled at iscsi_complete_pdu 200 * - if yes, the task is recycled at iscsi_complete_pdu
179 * - if no, the task is recycled at iser_snd_completion 201 * - if no, the task is recycled at iser_snd_completion
180 */ 202 */
181 if (error && error != -ENOBUFS)
182 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
183
184 return error; 203 return error;
185} 204}
186 205
@@ -232,7 +251,7 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
232 task->imm_count, task->unsol_r2t.data_length); 251 task->imm_count, task->unsol_r2t.data_length);
233 } 252 }
234 253
235 iser_dbg("task deq [cid %d itt 0x%x]\n", 254 iser_dbg("ctask xmit [cid %d itt 0x%x]\n",
236 conn->id, task->itt); 255 conn->id, task->itt);
237 256
238 /* Send the cmd PDU */ 257 /* Send the cmd PDU */
@@ -248,8 +267,6 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
248 error = iscsi_iser_task_xmit_unsol_data(conn, task); 267 error = iscsi_iser_task_xmit_unsol_data(conn, task);
249 268
250 iscsi_iser_task_xmit_exit: 269 iscsi_iser_task_xmit_exit:
251 if (error && error != -ENOBUFS)
252 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
253 return error; 270 return error;
254} 271}
255 272
@@ -283,7 +300,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
283 * due to issues with the login code re iser sematics 300 * due to issues with the login code re iser sematics
284 * this not set in iscsi_conn_setup - FIXME 301 * this not set in iscsi_conn_setup - FIXME
285 */ 302 */
286 conn->max_recv_dlength = 128; 303 conn->max_recv_dlength = ISER_RECV_DATA_SEG_LEN;
287 304
288 iser_conn = conn->dd_data; 305 iser_conn = conn->dd_data;
289 conn->dd_data = iser_conn; 306 conn->dd_data = iser_conn;
@@ -401,7 +418,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
401 struct Scsi_Host *shost; 418 struct Scsi_Host *shost;
402 struct iser_conn *ib_conn; 419 struct iser_conn *ib_conn;
403 420
404 shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 1); 421 shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
405 if (!shost) 422 if (!shost)
406 return NULL; 423 return NULL;
407 shost->transportt = iscsi_iser_scsi_transport; 424 shost->transportt = iscsi_iser_scsi_transport;
@@ -675,7 +692,7 @@ static int __init iser_init(void)
675 memset(&ig, 0, sizeof(struct iser_global)); 692 memset(&ig, 0, sizeof(struct iser_global));
676 693
677 ig.desc_cache = kmem_cache_create("iser_descriptors", 694 ig.desc_cache = kmem_cache_create("iser_descriptors",
678 sizeof (struct iser_desc), 695 sizeof(struct iser_tx_desc),
679 0, SLAB_HWCACHE_ALIGN, 696 0, SLAB_HWCACHE_ALIGN,
680 NULL); 697 NULL);
681 if (ig.desc_cache == NULL) 698 if (ig.desc_cache == NULL)
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 9d529cae1f0d..036934cdcb92 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -102,9 +102,9 @@
102#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), * 102#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
103 * SCSI_TMFUNC(2), LOGOUT(1) */ 103 * SCSI_TMFUNC(2), LOGOUT(1) */
104 104
105#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX + \ 105#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX)
106 ISER_MAX_RX_MISC_PDUS + \ 106
107 ISER_MAX_TX_MISC_PDUS) 107#define ISER_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2)
108 108
109/* the max TX (send) WR supported by the iSER QP is defined by * 109/* the max TX (send) WR supported by the iSER QP is defined by *
110 * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect * 110 * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect *
@@ -132,6 +132,12 @@ struct iser_hdr {
132 __be64 read_va; 132 __be64 read_va;
133} __attribute__((packed)); 133} __attribute__((packed));
134 134
135/* Constant PDU lengths calculations */
136#define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
137
138#define ISER_RECV_DATA_SEG_LEN 128
139#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
140#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
135 141
136/* Length of an object name string */ 142/* Length of an object name string */
137#define ISER_OBJECT_NAME_SIZE 64 143#define ISER_OBJECT_NAME_SIZE 64
@@ -187,51 +193,43 @@ struct iser_regd_buf {
187 struct iser_mem_reg reg; /* memory registration info */ 193 struct iser_mem_reg reg; /* memory registration info */
188 void *virt_addr; 194 void *virt_addr;
189 struct iser_device *device; /* device->device for dma_unmap */ 195 struct iser_device *device; /* device->device for dma_unmap */
190 u64 dma_addr; /* if non zero, addr for dma_unmap */
191 enum dma_data_direction direction; /* direction for dma_unmap */ 196 enum dma_data_direction direction; /* direction for dma_unmap */
192 unsigned int data_size; 197 unsigned int data_size;
193 atomic_t ref_count; /* refcount, freed when dec to 0 */
194};
195
196#define MAX_REGD_BUF_VECTOR_LEN 2
197
198struct iser_dto {
199 struct iscsi_iser_task *task;
200 struct iser_conn *ib_conn;
201 int notify_enable;
202
203 /* vector of registered buffers */
204 unsigned int regd_vector_len;
205 struct iser_regd_buf *regd[MAX_REGD_BUF_VECTOR_LEN];
206
207 /* offset into the registered buffer may be specified */
208 unsigned int offset[MAX_REGD_BUF_VECTOR_LEN];
209
210 /* a smaller size may be specified, if 0, then full size is used */
211 unsigned int used_sz[MAX_REGD_BUF_VECTOR_LEN];
212}; 198};
213 199
214enum iser_desc_type { 200enum iser_desc_type {
215 ISCSI_RX,
216 ISCSI_TX_CONTROL , 201 ISCSI_TX_CONTROL ,
217 ISCSI_TX_SCSI_COMMAND, 202 ISCSI_TX_SCSI_COMMAND,
218 ISCSI_TX_DATAOUT 203 ISCSI_TX_DATAOUT
219}; 204};
220 205
221struct iser_desc { 206struct iser_tx_desc {
222 struct iser_hdr iser_header; 207 struct iser_hdr iser_header;
223 struct iscsi_hdr iscsi_header; 208 struct iscsi_hdr iscsi_header;
224 struct iser_regd_buf hdr_regd_buf;
225 void *data; /* used by RX & TX_CONTROL */
226 struct iser_regd_buf data_regd_buf; /* used by RX & TX_CONTROL */
227 enum iser_desc_type type; 209 enum iser_desc_type type;
228 struct iser_dto dto; 210 u64 dma_addr;
211 /* sg[0] points to iser/iscsi headers, sg[1] optionally points to either
212 of immediate data, unsolicited data-out or control (login,text) */
213 struct ib_sge tx_sg[2];
214 int num_sge;
229}; 215};
230 216
217#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
218 sizeof(u64) + sizeof(struct ib_sge)))
219struct iser_rx_desc {
220 struct iser_hdr iser_header;
221 struct iscsi_hdr iscsi_header;
222 char data[ISER_RECV_DATA_SEG_LEN];
223 u64 dma_addr;
224 struct ib_sge rx_sg;
225 char pad[ISER_RX_PAD_SIZE];
226} __attribute__((packed));
227
231struct iser_device { 228struct iser_device {
232 struct ib_device *ib_device; 229 struct ib_device *ib_device;
233 struct ib_pd *pd; 230 struct ib_pd *pd;
234 struct ib_cq *cq; 231 struct ib_cq *rx_cq;
232 struct ib_cq *tx_cq;
235 struct ib_mr *mr; 233 struct ib_mr *mr;
236 struct tasklet_struct cq_tasklet; 234 struct tasklet_struct cq_tasklet;
237 struct list_head ig_list; /* entry in ig devices list */ 235 struct list_head ig_list; /* entry in ig devices list */
@@ -250,15 +248,18 @@ struct iser_conn {
250 struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */ 248 struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */
251 int disc_evt_flag; /* disconn event delivered */ 249 int disc_evt_flag; /* disconn event delivered */
252 wait_queue_head_t wait; /* waitq for conn/disconn */ 250 wait_queue_head_t wait; /* waitq for conn/disconn */
253 atomic_t post_recv_buf_count; /* posted rx count */ 251 int post_recv_buf_count; /* posted rx count */
254 atomic_t post_send_buf_count; /* posted tx count */ 252 atomic_t post_send_buf_count; /* posted tx count */
255 atomic_t unexpected_pdu_count;/* count of received *
256 * unexpected pdus *
257 * not yet retired */
258 char name[ISER_OBJECT_NAME_SIZE]; 253 char name[ISER_OBJECT_NAME_SIZE];
259 struct iser_page_vec *page_vec; /* represents SG to fmr maps* 254 struct iser_page_vec *page_vec; /* represents SG to fmr maps*
260 * maps serialized as tx is*/ 255 * maps serialized as tx is*/
261 struct list_head conn_list; /* entry in ig conn list */ 256 struct list_head conn_list; /* entry in ig conn list */
257
258 char *login_buf;
259 u64 login_dma;
260 unsigned int rx_desc_head;
261 struct iser_rx_desc *rx_descs;
262 struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
262}; 263};
263 264
264struct iscsi_iser_conn { 265struct iscsi_iser_conn {
@@ -267,7 +268,7 @@ struct iscsi_iser_conn {
267}; 268};
268 269
269struct iscsi_iser_task { 270struct iscsi_iser_task {
270 struct iser_desc desc; 271 struct iser_tx_desc desc;
271 struct iscsi_iser_conn *iser_conn; 272 struct iscsi_iser_conn *iser_conn;
272 enum iser_task_status status; 273 enum iser_task_status status;
273 int command_sent; /* set if command sent */ 274 int command_sent; /* set if command sent */
@@ -275,6 +276,7 @@ struct iscsi_iser_task {
275 struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */ 276 struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */
276 struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/ 277 struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/
277 struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */ 278 struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */
279 int headers_initialized;
278}; 280};
279 281
280struct iser_page_vec { 282struct iser_page_vec {
@@ -322,22 +324,17 @@ void iser_conn_put(struct iser_conn *ib_conn);
322 324
323void iser_conn_terminate(struct iser_conn *ib_conn); 325void iser_conn_terminate(struct iser_conn *ib_conn);
324 326
325void iser_rcv_completion(struct iser_desc *desc, 327void iser_rcv_completion(struct iser_rx_desc *desc,
326 unsigned long dto_xfer_len); 328 unsigned long dto_xfer_len,
329 struct iser_conn *ib_conn);
327 330
328void iser_snd_completion(struct iser_desc *desc); 331void iser_snd_completion(struct iser_tx_desc *desc, struct iser_conn *ib_conn);
329 332
330void iser_task_rdma_init(struct iscsi_iser_task *task); 333void iser_task_rdma_init(struct iscsi_iser_task *task);
331 334
332void iser_task_rdma_finalize(struct iscsi_iser_task *task); 335void iser_task_rdma_finalize(struct iscsi_iser_task *task);
333 336
334void iser_dto_buffs_release(struct iser_dto *dto); 337void iser_free_rx_descriptors(struct iser_conn *ib_conn);
335
336int iser_regd_buff_release(struct iser_regd_buf *regd_buf);
337
338void iser_reg_single(struct iser_device *device,
339 struct iser_regd_buf *regd_buf,
340 enum dma_data_direction direction);
341 338
342void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task, 339void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
343 enum iser_data_dir cmd_dir); 340 enum iser_data_dir cmd_dir);
@@ -356,11 +353,9 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
356 353
357void iser_unreg_mem(struct iser_mem_reg *mem_reg); 354void iser_unreg_mem(struct iser_mem_reg *mem_reg);
358 355
359int iser_post_recv(struct iser_desc *rx_desc); 356int iser_post_recvl(struct iser_conn *ib_conn);
360int iser_post_send(struct iser_desc *tx_desc); 357int iser_post_recvm(struct iser_conn *ib_conn, int count);
361 358int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc);
362int iser_conn_state_comp(struct iser_conn *ib_conn,
363 enum iser_ib_conn_state comp);
364 359
365int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, 360int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
366 struct iser_data_buf *data, 361 struct iser_data_buf *data,
@@ -368,4 +363,6 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
368 enum dma_data_direction dma_dir); 363 enum dma_data_direction dma_dir);
369 364
370void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task); 365void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
366int iser_initialize_task_headers(struct iscsi_task *task,
367 struct iser_tx_desc *tx_desc);
371#endif 368#endif
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 9de640200ad3..0b9ef0716588 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -39,29 +39,6 @@
39 39
40#include "iscsi_iser.h" 40#include "iscsi_iser.h"
41 41
42/* Constant PDU lengths calculations */
43#define ISER_TOTAL_HEADERS_LEN (sizeof (struct iser_hdr) + \
44 sizeof (struct iscsi_hdr))
45
46/* iser_dto_add_regd_buff - increments the reference count for *
47 * the registered buffer & adds it to the DTO object */
48static void iser_dto_add_regd_buff(struct iser_dto *dto,
49 struct iser_regd_buf *regd_buf,
50 unsigned long use_offset,
51 unsigned long use_size)
52{
53 int add_idx;
54
55 atomic_inc(&regd_buf->ref_count);
56
57 add_idx = dto->regd_vector_len;
58 dto->regd[add_idx] = regd_buf;
59 dto->used_sz[add_idx] = use_size;
60 dto->offset[add_idx] = use_offset;
61
62 dto->regd_vector_len++;
63}
64
65/* Register user buffer memory and initialize passive rdma 42/* Register user buffer memory and initialize passive rdma
66 * dto descriptor. Total data size is stored in 43 * dto descriptor. Total data size is stored in
67 * iser_task->data[ISER_DIR_IN].data_len 44 * iser_task->data[ISER_DIR_IN].data_len
@@ -122,9 +99,9 @@ iser_prepare_write_cmd(struct iscsi_task *task,
122 struct iscsi_iser_task *iser_task = task->dd_data; 99 struct iscsi_iser_task *iser_task = task->dd_data;
123 struct iser_regd_buf *regd_buf; 100 struct iser_regd_buf *regd_buf;
124 int err; 101 int err;
125 struct iser_dto *send_dto = &iser_task->desc.dto;
126 struct iser_hdr *hdr = &iser_task->desc.iser_header; 102 struct iser_hdr *hdr = &iser_task->desc.iser_header;
127 struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT]; 103 struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
104 struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
128 105
129 err = iser_dma_map_task_data(iser_task, 106 err = iser_dma_map_task_data(iser_task,
130 buf_out, 107 buf_out,
@@ -163,135 +140,100 @@ iser_prepare_write_cmd(struct iscsi_task *task,
163 if (imm_sz > 0) { 140 if (imm_sz > 0) {
164 iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n", 141 iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
165 task->itt, imm_sz); 142 task->itt, imm_sz);
166 iser_dto_add_regd_buff(send_dto, 143 tx_dsg->addr = regd_buf->reg.va;
167 regd_buf, 144 tx_dsg->length = imm_sz;
168 0, 145 tx_dsg->lkey = regd_buf->reg.lkey;
169 imm_sz); 146 iser_task->desc.num_sge = 2;
170 } 147 }
171 148
172 return 0; 149 return 0;
173} 150}
174 151
175/** 152/* creates a new tx descriptor and adds header regd buffer */
176 * iser_post_receive_control - allocates, initializes and posts receive DTO. 153static void iser_create_send_desc(struct iser_conn *ib_conn,
177 */ 154 struct iser_tx_desc *tx_desc)
178static int iser_post_receive_control(struct iscsi_conn *conn)
179{ 155{
180 struct iscsi_iser_conn *iser_conn = conn->dd_data; 156 struct iser_device *device = ib_conn->device;
181 struct iser_desc *rx_desc;
182 struct iser_regd_buf *regd_hdr;
183 struct iser_regd_buf *regd_data;
184 struct iser_dto *recv_dto = NULL;
185 struct iser_device *device = iser_conn->ib_conn->device;
186 int rx_data_size, err;
187 int posts, outstanding_unexp_pdus;
188
189 /* for the login sequence we must support rx of upto 8K; login is done
190 * after conn create/bind (connect) and conn stop/bind (reconnect),
191 * what's common for both schemes is that the connection is not started
192 */
193 if (conn->c_stage != ISCSI_CONN_STARTED)
194 rx_data_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
195 else /* FIXME till user space sets conn->max_recv_dlength correctly */
196 rx_data_size = 128;
197
198 outstanding_unexp_pdus =
199 atomic_xchg(&iser_conn->ib_conn->unexpected_pdu_count, 0);
200
201 /*
202 * in addition to the response buffer, replace those consumed by
203 * unexpected pdus.
204 */
205 for (posts = 0; posts < 1 + outstanding_unexp_pdus; posts++) {
206 rx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO);
207 if (rx_desc == NULL) {
208 iser_err("Failed to alloc desc for post recv %d\n",
209 posts);
210 err = -ENOMEM;
211 goto post_rx_cache_alloc_failure;
212 }
213 rx_desc->type = ISCSI_RX;
214 rx_desc->data = kmalloc(rx_data_size, GFP_NOIO);
215 if (rx_desc->data == NULL) {
216 iser_err("Failed to alloc data buf for post recv %d\n",
217 posts);
218 err = -ENOMEM;
219 goto post_rx_kmalloc_failure;
220 }
221
222 recv_dto = &rx_desc->dto;
223 recv_dto->ib_conn = iser_conn->ib_conn;
224 recv_dto->regd_vector_len = 0;
225 157
226 regd_hdr = &rx_desc->hdr_regd_buf; 158 ib_dma_sync_single_for_cpu(device->ib_device,
227 memset(regd_hdr, 0, sizeof(struct iser_regd_buf)); 159 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
228 regd_hdr->device = device;
229 regd_hdr->virt_addr = rx_desc; /* == &rx_desc->iser_header */
230 regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN;
231 160
232 iser_reg_single(device, regd_hdr, DMA_FROM_DEVICE); 161 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
233 162 tx_desc->iser_header.flags = ISER_VER;
234 iser_dto_add_regd_buff(recv_dto, regd_hdr, 0, 0);
235 163
236 regd_data = &rx_desc->data_regd_buf; 164 tx_desc->num_sge = 1;
237 memset(regd_data, 0, sizeof(struct iser_regd_buf));
238 regd_data->device = device;
239 regd_data->virt_addr = rx_desc->data;
240 regd_data->data_size = rx_data_size;
241 165
242 iser_reg_single(device, regd_data, DMA_FROM_DEVICE); 166 if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
167 tx_desc->tx_sg[0].lkey = device->mr->lkey;
168 iser_dbg("sdesc %p lkey mismatch, fixing\n", tx_desc);
169 }
170}
243 171
244 iser_dto_add_regd_buff(recv_dto, regd_data, 0, 0);
245 172
246 err = iser_post_recv(rx_desc); 173int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
247 if (err) { 174{
248 iser_err("Failed iser_post_recv for post %d\n", posts); 175 int i, j;
249 goto post_rx_post_recv_failure; 176 u64 dma_addr;
250 } 177 struct iser_rx_desc *rx_desc;
178 struct ib_sge *rx_sg;
179 struct iser_device *device = ib_conn->device;
180
181 ib_conn->rx_descs = kmalloc(ISER_QP_MAX_RECV_DTOS *
182 sizeof(struct iser_rx_desc), GFP_KERNEL);
183 if (!ib_conn->rx_descs)
184 goto rx_desc_alloc_fail;
185
186 rx_desc = ib_conn->rx_descs;
187
188 for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++) {
189 dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
190 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
191 if (ib_dma_mapping_error(device->ib_device, dma_addr))
192 goto rx_desc_dma_map_failed;
193
194 rx_desc->dma_addr = dma_addr;
195
196 rx_sg = &rx_desc->rx_sg;
197 rx_sg->addr = rx_desc->dma_addr;
198 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
199 rx_sg->lkey = device->mr->lkey;
251 } 200 }
252 /* all posts successful */
253 return 0;
254 201
255post_rx_post_recv_failure: 202 ib_conn->rx_desc_head = 0;
256 iser_dto_buffs_release(recv_dto); 203 return 0;
257 kfree(rx_desc->data);
258post_rx_kmalloc_failure:
259 kmem_cache_free(ig.desc_cache, rx_desc);
260post_rx_cache_alloc_failure:
261 if (posts > 0) {
262 /*
263 * response buffer posted, but did not replace all unexpected
264 * pdu recv bufs. Ignore error, retry occurs next send
265 */
266 outstanding_unexp_pdus -= (posts - 1);
267 err = 0;
268 }
269 atomic_add(outstanding_unexp_pdus,
270 &iser_conn->ib_conn->unexpected_pdu_count);
271 204
272 return err; 205rx_desc_dma_map_failed:
206 rx_desc = ib_conn->rx_descs;
207 for (j = 0; j < i; j++, rx_desc++)
208 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
209 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
210 kfree(ib_conn->rx_descs);
211 ib_conn->rx_descs = NULL;
212rx_desc_alloc_fail:
213 iser_err("failed allocating rx descriptors / data buffers\n");
214 return -ENOMEM;
273} 215}
274 216
275/* creates a new tx descriptor and adds header regd buffer */ 217void iser_free_rx_descriptors(struct iser_conn *ib_conn)
276static void iser_create_send_desc(struct iscsi_iser_conn *iser_conn,
277 struct iser_desc *tx_desc)
278{ 218{
279 struct iser_regd_buf *regd_hdr = &tx_desc->hdr_regd_buf; 219 int i;
280 struct iser_dto *send_dto = &tx_desc->dto; 220 struct iser_rx_desc *rx_desc;
221 struct iser_device *device = ib_conn->device;
281 222
282 memset(regd_hdr, 0, sizeof(struct iser_regd_buf)); 223 if (ib_conn->login_buf) {
283 regd_hdr->device = iser_conn->ib_conn->device; 224 ib_dma_unmap_single(device->ib_device, ib_conn->login_dma,
284 regd_hdr->virt_addr = tx_desc; /* == &tx_desc->iser_header */ 225 ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
285 regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN; 226 kfree(ib_conn->login_buf);
227 }
286 228
287 send_dto->ib_conn = iser_conn->ib_conn; 229 if (!ib_conn->rx_descs)
288 send_dto->notify_enable = 1; 230 return;
289 send_dto->regd_vector_len = 0;
290 231
291 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr)); 232 rx_desc = ib_conn->rx_descs;
292 tx_desc->iser_header.flags = ISER_VER; 233 for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++)
293 234 ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
294 iser_dto_add_regd_buff(send_dto, regd_hdr, 0, 0); 235 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
236 kfree(ib_conn->rx_descs);
295} 237}
296 238
297/** 239/**
@@ -301,46 +243,23 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn)
301{ 243{
302 struct iscsi_iser_conn *iser_conn = conn->dd_data; 244 struct iscsi_iser_conn *iser_conn = conn->dd_data;
303 245
304 int i; 246 iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX);
305 /*
306 * FIXME this value should be declared to the target during login with
307 * the MaxOutstandingUnexpectedPDUs key when supported
308 */
309 int initial_post_recv_bufs_num = ISER_MAX_RX_MISC_PDUS;
310
311 iser_dbg("Initially post: %d\n", initial_post_recv_bufs_num);
312 247
313 /* Check that there is no posted recv or send buffers left - */ 248 /* Check that there is no posted recv or send buffers left - */
314 /* they must be consumed during the login phase */ 249 /* they must be consumed during the login phase */
315 BUG_ON(atomic_read(&iser_conn->ib_conn->post_recv_buf_count) != 0); 250 BUG_ON(iser_conn->ib_conn->post_recv_buf_count != 0);
316 BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0); 251 BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0);
317 252
318 /* Initial post receive buffers */ 253 if (iser_alloc_rx_descriptors(iser_conn->ib_conn))
319 for (i = 0; i < initial_post_recv_bufs_num; i++) { 254 return -ENOMEM;
320 if (iser_post_receive_control(conn) != 0) {
321 iser_err("Failed to post recv bufs at:%d conn:0x%p\n",
322 i, conn);
323 return -ENOMEM;
324 }
325 }
326 iser_dbg("Posted %d post recv bufs, conn:0x%p\n", i, conn);
327 return 0;
328}
329 255
330static int 256 /* Initial post receive buffers */
331iser_check_xmit(struct iscsi_conn *conn, void *task) 257 if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX))
332{ 258 return -ENOMEM;
333 struct iscsi_iser_conn *iser_conn = conn->dd_data;
334 259
335 if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
336 ISER_QP_MAX_REQ_DTOS) {
337 iser_dbg("%ld can't xmit task %p\n",jiffies,task);
338 return -ENOBUFS;
339 }
340 return 0; 260 return 0;
341} 261}
342 262
343
344/** 263/**
345 * iser_send_command - send command PDU 264 * iser_send_command - send command PDU
346 */ 265 */
@@ -349,27 +268,18 @@ int iser_send_command(struct iscsi_conn *conn,
349{ 268{
350 struct iscsi_iser_conn *iser_conn = conn->dd_data; 269 struct iscsi_iser_conn *iser_conn = conn->dd_data;
351 struct iscsi_iser_task *iser_task = task->dd_data; 270 struct iscsi_iser_task *iser_task = task->dd_data;
352 struct iser_dto *send_dto = NULL;
353 unsigned long edtl; 271 unsigned long edtl;
354 int err = 0; 272 int err;
355 struct iser_data_buf *data_buf; 273 struct iser_data_buf *data_buf;
356 struct iscsi_cmd *hdr = (struct iscsi_cmd *)task->hdr; 274 struct iscsi_cmd *hdr = (struct iscsi_cmd *)task->hdr;
357 struct scsi_cmnd *sc = task->sc; 275 struct scsi_cmnd *sc = task->sc;
358 276 struct iser_tx_desc *tx_desc = &iser_task->desc;
359 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
360 iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
361 return -EPERM;
362 }
363 if (iser_check_xmit(conn, task))
364 return -ENOBUFS;
365 277
366 edtl = ntohl(hdr->data_length); 278 edtl = ntohl(hdr->data_length);
367 279
368 /* build the tx desc regd header and add it to the tx desc dto */ 280 /* build the tx desc regd header and add it to the tx desc dto */
369 iser_task->desc.type = ISCSI_TX_SCSI_COMMAND; 281 tx_desc->type = ISCSI_TX_SCSI_COMMAND;
370 send_dto = &iser_task->desc.dto; 282 iser_create_send_desc(iser_conn->ib_conn, tx_desc);
371 send_dto->task = iser_task;
372 iser_create_send_desc(iser_conn, &iser_task->desc);
373 283
374 if (hdr->flags & ISCSI_FLAG_CMD_READ) 284 if (hdr->flags & ISCSI_FLAG_CMD_READ)
375 data_buf = &iser_task->data[ISER_DIR_IN]; 285 data_buf = &iser_task->data[ISER_DIR_IN];
@@ -398,23 +308,13 @@ int iser_send_command(struct iscsi_conn *conn,
398 goto send_command_error; 308 goto send_command_error;
399 } 309 }
400 310
401 iser_reg_single(iser_conn->ib_conn->device,
402 send_dto->regd[0], DMA_TO_DEVICE);
403
404 if (iser_post_receive_control(conn) != 0) {
405 iser_err("post_recv failed!\n");
406 err = -ENOMEM;
407 goto send_command_error;
408 }
409
410 iser_task->status = ISER_TASK_STATUS_STARTED; 311 iser_task->status = ISER_TASK_STATUS_STARTED;
411 312
412 err = iser_post_send(&iser_task->desc); 313 err = iser_post_send(iser_conn->ib_conn, tx_desc);
413 if (!err) 314 if (!err)
414 return 0; 315 return 0;
415 316
416send_command_error: 317send_command_error:
417 iser_dto_buffs_release(send_dto);
418 iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err); 318 iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
419 return err; 319 return err;
420} 320}
@@ -428,20 +328,13 @@ int iser_send_data_out(struct iscsi_conn *conn,
428{ 328{
429 struct iscsi_iser_conn *iser_conn = conn->dd_data; 329 struct iscsi_iser_conn *iser_conn = conn->dd_data;
430 struct iscsi_iser_task *iser_task = task->dd_data; 330 struct iscsi_iser_task *iser_task = task->dd_data;
431 struct iser_desc *tx_desc = NULL; 331 struct iser_tx_desc *tx_desc = NULL;
432 struct iser_dto *send_dto = NULL; 332 struct iser_regd_buf *regd_buf;
433 unsigned long buf_offset; 333 unsigned long buf_offset;
434 unsigned long data_seg_len; 334 unsigned long data_seg_len;
435 uint32_t itt; 335 uint32_t itt;
436 int err = 0; 336 int err = 0;
437 337 struct ib_sge *tx_dsg;
438 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
439 iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
440 return -EPERM;
441 }
442
443 if (iser_check_xmit(conn, task))
444 return -ENOBUFS;
445 338
446 itt = (__force uint32_t)hdr->itt; 339 itt = (__force uint32_t)hdr->itt;
447 data_seg_len = ntoh24(hdr->dlength); 340 data_seg_len = ntoh24(hdr->dlength);
@@ -450,28 +343,25 @@ int iser_send_data_out(struct iscsi_conn *conn,
450 iser_dbg("%s itt %d dseg_len %d offset %d\n", 343 iser_dbg("%s itt %d dseg_len %d offset %d\n",
451 __func__,(int)itt,(int)data_seg_len,(int)buf_offset); 344 __func__,(int)itt,(int)data_seg_len,(int)buf_offset);
452 345
453 tx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO); 346 tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
454 if (tx_desc == NULL) { 347 if (tx_desc == NULL) {
455 iser_err("Failed to alloc desc for post dataout\n"); 348 iser_err("Failed to alloc desc for post dataout\n");
456 return -ENOMEM; 349 return -ENOMEM;
457 } 350 }
458 351
459 tx_desc->type = ISCSI_TX_DATAOUT; 352 tx_desc->type = ISCSI_TX_DATAOUT;
353 tx_desc->iser_header.flags = ISER_VER;
460 memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr)); 354 memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
461 355
462 /* build the tx desc regd header and add it to the tx desc dto */ 356 /* build the tx desc */
463 send_dto = &tx_desc->dto; 357 iser_initialize_task_headers(task, tx_desc);
464 send_dto->task = iser_task;
465 iser_create_send_desc(iser_conn, tx_desc);
466
467 iser_reg_single(iser_conn->ib_conn->device,
468 send_dto->regd[0], DMA_TO_DEVICE);
469 358
470 /* all data was registered for RDMA, we can use the lkey */ 359 regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
471 iser_dto_add_regd_buff(send_dto, 360 tx_dsg = &tx_desc->tx_sg[1];
472 &iser_task->rdma_regd[ISER_DIR_OUT], 361 tx_dsg->addr = regd_buf->reg.va + buf_offset;
473 buf_offset, 362 tx_dsg->length = data_seg_len;
474 data_seg_len); 363 tx_dsg->lkey = regd_buf->reg.lkey;
364 tx_desc->num_sge = 2;
475 365
476 if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) { 366 if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
477 iser_err("Offset:%ld & DSL:%ld in Data-Out " 367 iser_err("Offset:%ld & DSL:%ld in Data-Out "
@@ -485,12 +375,11 @@ int iser_send_data_out(struct iscsi_conn *conn,
485 itt, buf_offset, data_seg_len); 375 itt, buf_offset, data_seg_len);
486 376
487 377
488 err = iser_post_send(tx_desc); 378 err = iser_post_send(iser_conn->ib_conn, tx_desc);
489 if (!err) 379 if (!err)
490 return 0; 380 return 0;
491 381
492send_data_out_error: 382send_data_out_error:
493 iser_dto_buffs_release(send_dto);
494 kmem_cache_free(ig.desc_cache, tx_desc); 383 kmem_cache_free(ig.desc_cache, tx_desc);
495 iser_err("conn %p failed err %d\n",conn, err); 384 iser_err("conn %p failed err %d\n",conn, err);
496 return err; 385 return err;
@@ -501,64 +390,44 @@ int iser_send_control(struct iscsi_conn *conn,
501{ 390{
502 struct iscsi_iser_conn *iser_conn = conn->dd_data; 391 struct iscsi_iser_conn *iser_conn = conn->dd_data;
503 struct iscsi_iser_task *iser_task = task->dd_data; 392 struct iscsi_iser_task *iser_task = task->dd_data;
504 struct iser_desc *mdesc = &iser_task->desc; 393 struct iser_tx_desc *mdesc = &iser_task->desc;
505 struct iser_dto *send_dto = NULL;
506 unsigned long data_seg_len; 394 unsigned long data_seg_len;
507 int err = 0; 395 int err = 0;
508 struct iser_regd_buf *regd_buf;
509 struct iser_device *device; 396 struct iser_device *device;
510 unsigned char opcode;
511
512 if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
513 iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
514 return -EPERM;
515 }
516
517 if (iser_check_xmit(conn, task))
518 return -ENOBUFS;
519 397
520 /* build the tx desc regd header and add it to the tx desc dto */ 398 /* build the tx desc regd header and add it to the tx desc dto */
521 mdesc->type = ISCSI_TX_CONTROL; 399 mdesc->type = ISCSI_TX_CONTROL;
522 send_dto = &mdesc->dto; 400 iser_create_send_desc(iser_conn->ib_conn, mdesc);
523 send_dto->task = NULL;
524 iser_create_send_desc(iser_conn, mdesc);
525 401
526 device = iser_conn->ib_conn->device; 402 device = iser_conn->ib_conn->device;
527 403
528 iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
529
530 data_seg_len = ntoh24(task->hdr->dlength); 404 data_seg_len = ntoh24(task->hdr->dlength);
531 405
532 if (data_seg_len > 0) { 406 if (data_seg_len > 0) {
533 regd_buf = &mdesc->data_regd_buf; 407 struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
534 memset(regd_buf, 0, sizeof(struct iser_regd_buf)); 408 if (task != conn->login_task) {
535 regd_buf->device = device; 409 iser_err("data present on non login task!!!\n");
536 regd_buf->virt_addr = task->data; 410 goto send_control_error;
537 regd_buf->data_size = task->data_count; 411 }
538 iser_reg_single(device, regd_buf, 412 memcpy(iser_conn->ib_conn->login_buf, task->data,
539 DMA_TO_DEVICE); 413 task->data_count);
540 iser_dto_add_regd_buff(send_dto, regd_buf, 414 tx_dsg->addr = iser_conn->ib_conn->login_dma;
541 0, 415 tx_dsg->length = data_seg_len;
542 data_seg_len); 416 tx_dsg->lkey = device->mr->lkey;
417 mdesc->num_sge = 2;
543 } 418 }
544 419
545 opcode = task->hdr->opcode & ISCSI_OPCODE_MASK; 420 if (task == conn->login_task) {
546 421 err = iser_post_recvl(iser_conn->ib_conn);
547 /* post recv buffer for response if one is expected */ 422 if (err)
548 if (!(opcode == ISCSI_OP_NOOP_OUT && task->hdr->itt == RESERVED_ITT)) {
549 if (iser_post_receive_control(conn) != 0) {
550 iser_err("post_rcv_buff failed!\n");
551 err = -ENOMEM;
552 goto send_control_error; 423 goto send_control_error;
553 }
554 } 424 }
555 425
556 err = iser_post_send(mdesc); 426 err = iser_post_send(iser_conn->ib_conn, mdesc);
557 if (!err) 427 if (!err)
558 return 0; 428 return 0;
559 429
560send_control_error: 430send_control_error:
561 iser_dto_buffs_release(send_dto);
562 iser_err("conn %p failed err %d\n",conn, err); 431 iser_err("conn %p failed err %d\n",conn, err);
563 return err; 432 return err;
564} 433}
@@ -566,104 +435,71 @@ send_control_error:
566/** 435/**
567 * iser_rcv_dto_completion - recv DTO completion 436 * iser_rcv_dto_completion - recv DTO completion
568 */ 437 */
569void iser_rcv_completion(struct iser_desc *rx_desc, 438void iser_rcv_completion(struct iser_rx_desc *rx_desc,
570 unsigned long dto_xfer_len) 439 unsigned long rx_xfer_len,
440 struct iser_conn *ib_conn)
571{ 441{
572 struct iser_dto *dto = &rx_desc->dto; 442 struct iscsi_iser_conn *conn = ib_conn->iser_conn;
573 struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
574 struct iscsi_task *task;
575 struct iscsi_iser_task *iser_task;
576 struct iscsi_hdr *hdr; 443 struct iscsi_hdr *hdr;
577 char *rx_data = NULL; 444 u64 rx_dma;
578 int rx_data_len = 0; 445 int rx_buflen, outstanding, count, err;
579 unsigned char opcode; 446
580 447 /* differentiate between login to all other PDUs */
581 hdr = &rx_desc->iscsi_header; 448 if ((char *)rx_desc == ib_conn->login_buf) {
449 rx_dma = ib_conn->login_dma;
450 rx_buflen = ISER_RX_LOGIN_SIZE;
451 } else {
452 rx_dma = rx_desc->dma_addr;
453 rx_buflen = ISER_RX_PAYLOAD_SIZE;
454 }
582 455
583 iser_dbg("op 0x%x itt 0x%x\n", hdr->opcode,hdr->itt); 456 ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma,
457 rx_buflen, DMA_FROM_DEVICE);
584 458
585 if (dto_xfer_len > ISER_TOTAL_HEADERS_LEN) { /* we have data */ 459 hdr = &rx_desc->iscsi_header;
586 rx_data_len = dto_xfer_len - ISER_TOTAL_HEADERS_LEN;
587 rx_data = dto->regd[1]->virt_addr;
588 rx_data += dto->offset[1];
589 }
590 460
591 opcode = hdr->opcode & ISCSI_OPCODE_MASK; 461 iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
592 462 hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
593 if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
594 spin_lock(&conn->iscsi_conn->session->lock);
595 task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
596 if (task)
597 __iscsi_get_task(task);
598 spin_unlock(&conn->iscsi_conn->session->lock);
599
600 if (!task)
601 iser_err("itt can't be matched to task!!! "
602 "conn %p opcode %d itt %d\n",
603 conn->iscsi_conn, opcode, hdr->itt);
604 else {
605 iser_task = task->dd_data;
606 iser_dbg("itt %d task %p\n",hdr->itt, task);
607 iser_task->status = ISER_TASK_STATUS_COMPLETED;
608 iser_task_rdma_finalize(iser_task);
609 iscsi_put_task(task);
610 }
611 }
612 iser_dto_buffs_release(dto);
613 463
614 iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len); 464 iscsi_iser_recv(conn->iscsi_conn, hdr,
465 rx_desc->data, rx_xfer_len - ISER_HEADERS_LEN);
615 466
616 kfree(rx_desc->data); 467 ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
617 kmem_cache_free(ig.desc_cache, rx_desc); 468 rx_buflen, DMA_FROM_DEVICE);
618 469
619 /* decrementing conn->post_recv_buf_count only --after-- freeing the * 470 /* decrementing conn->post_recv_buf_count only --after-- freeing the *
620 * task eliminates the need to worry on tasks which are completed in * 471 * task eliminates the need to worry on tasks which are completed in *
621 * parallel to the execution of iser_conn_term. So the code that waits * 472 * parallel to the execution of iser_conn_term. So the code that waits *
622 * for the posted rx bufs refcount to become zero handles everything */ 473 * for the posted rx bufs refcount to become zero handles everything */
623 atomic_dec(&conn->ib_conn->post_recv_buf_count); 474 conn->ib_conn->post_recv_buf_count--;
624 475
625 /* 476 if (rx_dma == ib_conn->login_dma)
626 * if an unexpected PDU was received then the recv wr consumed must 477 return;
627 * be replaced, this is done in the next send of a control-type PDU 478
628 */ 479 outstanding = ib_conn->post_recv_buf_count;
629 if (opcode == ISCSI_OP_NOOP_IN && hdr->itt == RESERVED_ITT) { 480 if (outstanding + ISER_MIN_POSTED_RX <= ISER_QP_MAX_RECV_DTOS) {
630 /* nop-in with itt = 0xffffffff */ 481 count = min(ISER_QP_MAX_RECV_DTOS - outstanding,
631 atomic_inc(&conn->ib_conn->unexpected_pdu_count); 482 ISER_MIN_POSTED_RX);
632 } 483 err = iser_post_recvm(ib_conn, count);
633 else if (opcode == ISCSI_OP_ASYNC_EVENT) { 484 if (err)
634 /* asyncronous message */ 485 iser_err("posting %d rx bufs err %d\n", count, err);
635 atomic_inc(&conn->ib_conn->unexpected_pdu_count);
636 } 486 }
637 /* a reject PDU consumes the recv buf posted for the response */
638} 487}
639 488
640void iser_snd_completion(struct iser_desc *tx_desc) 489void iser_snd_completion(struct iser_tx_desc *tx_desc,
490 struct iser_conn *ib_conn)
641{ 491{
642 struct iser_dto *dto = &tx_desc->dto;
643 struct iser_conn *ib_conn = dto->ib_conn;
644 struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
645 struct iscsi_conn *conn = iser_conn->iscsi_conn;
646 struct iscsi_task *task; 492 struct iscsi_task *task;
647 int resume_tx = 0; 493 struct iser_device *device = ib_conn->device;
648
649 iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
650
651 iser_dto_buffs_release(dto);
652 494
653 if (tx_desc->type == ISCSI_TX_DATAOUT) 495 if (tx_desc->type == ISCSI_TX_DATAOUT) {
496 ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
497 ISER_HEADERS_LEN, DMA_TO_DEVICE);
654 kmem_cache_free(ig.desc_cache, tx_desc); 498 kmem_cache_free(ig.desc_cache, tx_desc);
655 499 }
656 if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
657 ISER_QP_MAX_REQ_DTOS)
658 resume_tx = 1;
659 500
660 atomic_dec(&ib_conn->post_send_buf_count); 501 atomic_dec(&ib_conn->post_send_buf_count);
661 502
662 if (resume_tx) {
663 iser_dbg("%ld resuming tx\n",jiffies);
664 iscsi_conn_queue_work(conn);
665 }
666
667 if (tx_desc->type == ISCSI_TX_CONTROL) { 503 if (tx_desc->type == ISCSI_TX_CONTROL) {
668 /* this arithmetic is legal by libiscsi dd_data allocation */ 504 /* this arithmetic is legal by libiscsi dd_data allocation */
669 task = (void *) ((long)(void *)tx_desc - 505 task = (void *) ((long)(void *)tx_desc -
@@ -692,7 +528,6 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
692 528
693void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) 529void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
694{ 530{
695 int deferred;
696 int is_rdma_aligned = 1; 531 int is_rdma_aligned = 1;
697 struct iser_regd_buf *regd; 532 struct iser_regd_buf *regd;
698 533
@@ -710,32 +545,17 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
710 545
711 if (iser_task->dir[ISER_DIR_IN]) { 546 if (iser_task->dir[ISER_DIR_IN]) {
712 regd = &iser_task->rdma_regd[ISER_DIR_IN]; 547 regd = &iser_task->rdma_regd[ISER_DIR_IN];
713 deferred = iser_regd_buff_release(regd); 548 if (regd->reg.is_fmr)
714 if (deferred) { 549 iser_unreg_mem(&regd->reg);
715 iser_err("%d references remain for BUF-IN rdma reg\n",
716 atomic_read(&regd->ref_count));
717 }
718 } 550 }
719 551
720 if (iser_task->dir[ISER_DIR_OUT]) { 552 if (iser_task->dir[ISER_DIR_OUT]) {
721 regd = &iser_task->rdma_regd[ISER_DIR_OUT]; 553 regd = &iser_task->rdma_regd[ISER_DIR_OUT];
722 deferred = iser_regd_buff_release(regd); 554 if (regd->reg.is_fmr)
723 if (deferred) { 555 iser_unreg_mem(&regd->reg);
724 iser_err("%d references remain for BUF-OUT rdma reg\n",
725 atomic_read(&regd->ref_count));
726 }
727 } 556 }
728 557
729 /* if the data was unaligned, it was already unmapped and then copied */ 558 /* if the data was unaligned, it was already unmapped and then copied */
730 if (is_rdma_aligned) 559 if (is_rdma_aligned)
731 iser_dma_unmap_task_data(iser_task); 560 iser_dma_unmap_task_data(iser_task);
732} 561}
733
734void iser_dto_buffs_release(struct iser_dto *dto)
735{
736 int i;
737
738 for (i = 0; i < dto->regd_vector_len; i++)
739 iser_regd_buff_release(dto->regd[i]);
740}
741
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 274c883ef3ea..fb88d6896b67 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -41,62 +41,6 @@
41#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */ 41#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
42 42
43/** 43/**
44 * Decrements the reference count for the
45 * registered buffer & releases it
46 *
47 * returns 0 if released, 1 if deferred
48 */
49int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
50{
51 struct ib_device *dev;
52
53 if ((atomic_read(&regd_buf->ref_count) == 0) ||
54 atomic_dec_and_test(&regd_buf->ref_count)) {
55 /* if we used the dma mr, unreg is just NOP */
56 if (regd_buf->reg.is_fmr)
57 iser_unreg_mem(&regd_buf->reg);
58
59 if (regd_buf->dma_addr) {
60 dev = regd_buf->device->ib_device;
61 ib_dma_unmap_single(dev,
62 regd_buf->dma_addr,
63 regd_buf->data_size,
64 regd_buf->direction);
65 }
66 /* else this regd buf is associated with task which we */
67 /* dma_unmap_single/sg later */
68 return 0;
69 } else {
70 iser_dbg("Release deferred, regd.buff: 0x%p\n", regd_buf);
71 return 1;
72 }
73}
74
75/**
76 * iser_reg_single - fills registered buffer descriptor with
77 * registration information
78 */
79void iser_reg_single(struct iser_device *device,
80 struct iser_regd_buf *regd_buf,
81 enum dma_data_direction direction)
82{
83 u64 dma_addr;
84
85 dma_addr = ib_dma_map_single(device->ib_device,
86 regd_buf->virt_addr,
87 regd_buf->data_size, direction);
88 BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr));
89
90 regd_buf->reg.lkey = device->mr->lkey;
91 regd_buf->reg.len = regd_buf->data_size;
92 regd_buf->reg.va = dma_addr;
93 regd_buf->reg.is_fmr = 0;
94
95 regd_buf->dma_addr = dma_addr;
96 regd_buf->direction = direction;
97}
98
99/**
100 * iser_start_rdma_unaligned_sg 44 * iser_start_rdma_unaligned_sg
101 */ 45 */
102static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, 46static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
@@ -109,10 +53,10 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
109 unsigned long cmd_data_len = data->data_len; 53 unsigned long cmd_data_len = data->data_len;
110 54
111 if (cmd_data_len > ISER_KMALLOC_THRESHOLD) 55 if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
112 mem = (void *)__get_free_pages(GFP_NOIO, 56 mem = (void *)__get_free_pages(GFP_ATOMIC,
113 ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); 57 ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
114 else 58 else
115 mem = kmalloc(cmd_data_len, GFP_NOIO); 59 mem = kmalloc(cmd_data_len, GFP_ATOMIC);
116 60
117 if (mem == NULL) { 61 if (mem == NULL) {
118 iser_err("Failed to allocate mem size %d %d for copying sglist\n", 62 iser_err("Failed to allocate mem size %d %d for copying sglist\n",
@@ -474,9 +418,5 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
474 return err; 418 return err;
475 } 419 }
476 } 420 }
477
478 /* take a reference on this regd buf such that it will not be released *
479 * (eg in send dto completion) before we get the scsi response */
480 atomic_inc(&regd_buf->ref_count);
481 return 0; 421 return 0;
482} 422}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 8579f32ce38e..308d17bb5146 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -37,9 +37,8 @@
37#include "iscsi_iser.h" 37#include "iscsi_iser.h"
38 38
39#define ISCSI_ISER_MAX_CONN 8 39#define ISCSI_ISER_MAX_CONN 8
40#define ISER_MAX_CQ_LEN ((ISER_QP_MAX_RECV_DTOS + \ 40#define ISER_MAX_RX_CQ_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
41 ISER_QP_MAX_REQ_DTOS) * \ 41#define ISER_MAX_TX_CQ_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
42 ISCSI_ISER_MAX_CONN)
43 42
44static void iser_cq_tasklet_fn(unsigned long data); 43static void iser_cq_tasklet_fn(unsigned long data);
45static void iser_cq_callback(struct ib_cq *cq, void *cq_context); 44static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
@@ -67,15 +66,23 @@ static int iser_create_device_ib_res(struct iser_device *device)
67 if (IS_ERR(device->pd)) 66 if (IS_ERR(device->pd))
68 goto pd_err; 67 goto pd_err;
69 68
70 device->cq = ib_create_cq(device->ib_device, 69 device->rx_cq = ib_create_cq(device->ib_device,
71 iser_cq_callback, 70 iser_cq_callback,
72 iser_cq_event_callback, 71 iser_cq_event_callback,
73 (void *)device, 72 (void *)device,
74 ISER_MAX_CQ_LEN, 0); 73 ISER_MAX_RX_CQ_LEN, 0);
75 if (IS_ERR(device->cq)) 74 if (IS_ERR(device->rx_cq))
76 goto cq_err; 75 goto rx_cq_err;
77 76
78 if (ib_req_notify_cq(device->cq, IB_CQ_NEXT_COMP)) 77 device->tx_cq = ib_create_cq(device->ib_device,
78 NULL, iser_cq_event_callback,
79 (void *)device,
80 ISER_MAX_TX_CQ_LEN, 0);
81
82 if (IS_ERR(device->tx_cq))
83 goto tx_cq_err;
84
85 if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP))
79 goto cq_arm_err; 86 goto cq_arm_err;
80 87
81 tasklet_init(&device->cq_tasklet, 88 tasklet_init(&device->cq_tasklet,
@@ -93,8 +100,10 @@ static int iser_create_device_ib_res(struct iser_device *device)
93dma_mr_err: 100dma_mr_err:
94 tasklet_kill(&device->cq_tasklet); 101 tasklet_kill(&device->cq_tasklet);
95cq_arm_err: 102cq_arm_err:
96 ib_destroy_cq(device->cq); 103 ib_destroy_cq(device->tx_cq);
97cq_err: 104tx_cq_err:
105 ib_destroy_cq(device->rx_cq);
106rx_cq_err:
98 ib_dealloc_pd(device->pd); 107 ib_dealloc_pd(device->pd);
99pd_err: 108pd_err:
100 iser_err("failed to allocate an IB resource\n"); 109 iser_err("failed to allocate an IB resource\n");
@@ -112,11 +121,13 @@ static void iser_free_device_ib_res(struct iser_device *device)
112 tasklet_kill(&device->cq_tasklet); 121 tasklet_kill(&device->cq_tasklet);
113 122
114 (void)ib_dereg_mr(device->mr); 123 (void)ib_dereg_mr(device->mr);
115 (void)ib_destroy_cq(device->cq); 124 (void)ib_destroy_cq(device->tx_cq);
125 (void)ib_destroy_cq(device->rx_cq);
116 (void)ib_dealloc_pd(device->pd); 126 (void)ib_dealloc_pd(device->pd);
117 127
118 device->mr = NULL; 128 device->mr = NULL;
119 device->cq = NULL; 129 device->tx_cq = NULL;
130 device->rx_cq = NULL;
120 device->pd = NULL; 131 device->pd = NULL;
121} 132}
122 133
@@ -129,13 +140,23 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
129{ 140{
130 struct iser_device *device; 141 struct iser_device *device;
131 struct ib_qp_init_attr init_attr; 142 struct ib_qp_init_attr init_attr;
132 int ret; 143 int ret = -ENOMEM;
133 struct ib_fmr_pool_param params; 144 struct ib_fmr_pool_param params;
134 145
135 BUG_ON(ib_conn->device == NULL); 146 BUG_ON(ib_conn->device == NULL);
136 147
137 device = ib_conn->device; 148 device = ib_conn->device;
138 149
150 ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
151 if (!ib_conn->login_buf) {
152 goto alloc_err;
153 ret = -ENOMEM;
154 }
155
156 ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device,
157 (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE,
158 DMA_FROM_DEVICE);
159
139 ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) + 160 ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
140 (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)), 161 (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
141 GFP_KERNEL); 162 GFP_KERNEL);
@@ -169,12 +190,12 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
169 190
170 init_attr.event_handler = iser_qp_event_callback; 191 init_attr.event_handler = iser_qp_event_callback;
171 init_attr.qp_context = (void *)ib_conn; 192 init_attr.qp_context = (void *)ib_conn;
172 init_attr.send_cq = device->cq; 193 init_attr.send_cq = device->tx_cq;
173 init_attr.recv_cq = device->cq; 194 init_attr.recv_cq = device->rx_cq;
174 init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; 195 init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
175 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; 196 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
176 init_attr.cap.max_send_sge = MAX_REGD_BUF_VECTOR_LEN; 197 init_attr.cap.max_send_sge = 2;
177 init_attr.cap.max_recv_sge = 2; 198 init_attr.cap.max_recv_sge = 1;
178 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 199 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
179 init_attr.qp_type = IB_QPT_RC; 200 init_attr.qp_type = IB_QPT_RC;
180 201
@@ -192,6 +213,7 @@ qp_err:
192 (void)ib_destroy_fmr_pool(ib_conn->fmr_pool); 213 (void)ib_destroy_fmr_pool(ib_conn->fmr_pool);
193fmr_pool_err: 214fmr_pool_err:
194 kfree(ib_conn->page_vec); 215 kfree(ib_conn->page_vec);
216 kfree(ib_conn->login_buf);
195alloc_err: 217alloc_err:
196 iser_err("unable to alloc mem or create resource, err %d\n", ret); 218 iser_err("unable to alloc mem or create resource, err %d\n", ret);
197 return ret; 219 return ret;
@@ -278,17 +300,6 @@ static void iser_device_try_release(struct iser_device *device)
278 mutex_unlock(&ig.device_list_mutex); 300 mutex_unlock(&ig.device_list_mutex);
279} 301}
280 302
281int iser_conn_state_comp(struct iser_conn *ib_conn,
282 enum iser_ib_conn_state comp)
283{
284 int ret;
285
286 spin_lock_bh(&ib_conn->lock);
287 ret = (ib_conn->state == comp);
288 spin_unlock_bh(&ib_conn->lock);
289 return ret;
290}
291
292static int iser_conn_state_comp_exch(struct iser_conn *ib_conn, 303static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
293 enum iser_ib_conn_state comp, 304 enum iser_ib_conn_state comp,
294 enum iser_ib_conn_state exch) 305 enum iser_ib_conn_state exch)
@@ -314,7 +325,7 @@ static void iser_conn_release(struct iser_conn *ib_conn)
314 mutex_lock(&ig.connlist_mutex); 325 mutex_lock(&ig.connlist_mutex);
315 list_del(&ib_conn->conn_list); 326 list_del(&ib_conn->conn_list);
316 mutex_unlock(&ig.connlist_mutex); 327 mutex_unlock(&ig.connlist_mutex);
317 328 iser_free_rx_descriptors(ib_conn);
318 iser_free_ib_conn_res(ib_conn); 329 iser_free_ib_conn_res(ib_conn);
319 ib_conn->device = NULL; 330 ib_conn->device = NULL;
320 /* on EVENT_ADDR_ERROR there's no device yet for this conn */ 331 /* on EVENT_ADDR_ERROR there's no device yet for this conn */
@@ -442,7 +453,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
442 ISCSI_ERR_CONN_FAILED); 453 ISCSI_ERR_CONN_FAILED);
443 454
444 /* Complete the termination process if no posts are pending */ 455 /* Complete the termination process if no posts are pending */
445 if ((atomic_read(&ib_conn->post_recv_buf_count) == 0) && 456 if (ib_conn->post_recv_buf_count == 0 &&
446 (atomic_read(&ib_conn->post_send_buf_count) == 0)) { 457 (atomic_read(&ib_conn->post_send_buf_count) == 0)) {
447 ib_conn->state = ISER_CONN_DOWN; 458 ib_conn->state = ISER_CONN_DOWN;
448 wake_up_interruptible(&ib_conn->wait); 459 wake_up_interruptible(&ib_conn->wait);
@@ -489,9 +500,8 @@ void iser_conn_init(struct iser_conn *ib_conn)
489{ 500{
490 ib_conn->state = ISER_CONN_INIT; 501 ib_conn->state = ISER_CONN_INIT;
491 init_waitqueue_head(&ib_conn->wait); 502 init_waitqueue_head(&ib_conn->wait);
492 atomic_set(&ib_conn->post_recv_buf_count, 0); 503 ib_conn->post_recv_buf_count = 0;
493 atomic_set(&ib_conn->post_send_buf_count, 0); 504 atomic_set(&ib_conn->post_send_buf_count, 0);
494 atomic_set(&ib_conn->unexpected_pdu_count, 0);
495 atomic_set(&ib_conn->refcount, 1); 505 atomic_set(&ib_conn->refcount, 1);
496 INIT_LIST_HEAD(&ib_conn->conn_list); 506 INIT_LIST_HEAD(&ib_conn->conn_list);
497 spin_lock_init(&ib_conn->lock); 507 spin_lock_init(&ib_conn->lock);
@@ -626,136 +636,97 @@ void iser_unreg_mem(struct iser_mem_reg *reg)
626 reg->mem_h = NULL; 636 reg->mem_h = NULL;
627} 637}
628 638
629/** 639int iser_post_recvl(struct iser_conn *ib_conn)
630 * iser_dto_to_iov - builds IOV from a dto descriptor
631 */
632static void iser_dto_to_iov(struct iser_dto *dto, struct ib_sge *iov, int iov_len)
633{ 640{
634 int i; 641 struct ib_recv_wr rx_wr, *rx_wr_failed;
635 struct ib_sge *sge; 642 struct ib_sge sge;
636 struct iser_regd_buf *regd_buf; 643 int ib_ret;
637
638 if (dto->regd_vector_len > iov_len) {
639 iser_err("iov size %d too small for posting dto of len %d\n",
640 iov_len, dto->regd_vector_len);
641 BUG();
642 }
643 644
644 for (i = 0; i < dto->regd_vector_len; i++) { 645 sge.addr = ib_conn->login_dma;
645 sge = &iov[i]; 646 sge.length = ISER_RX_LOGIN_SIZE;
646 regd_buf = dto->regd[i]; 647 sge.lkey = ib_conn->device->mr->lkey;
647
648 sge->addr = regd_buf->reg.va;
649 sge->length = regd_buf->reg.len;
650 sge->lkey = regd_buf->reg.lkey;
651
652 if (dto->used_sz[i] > 0) /* Adjust size */
653 sge->length = dto->used_sz[i];
654
655 /* offset and length should not exceed the regd buf length */
656 if (sge->length + dto->offset[i] > regd_buf->reg.len) {
657 iser_err("Used len:%ld + offset:%d, exceed reg.buf.len:"
658 "%ld in dto:0x%p [%d], va:0x%08lX\n",
659 (unsigned long)sge->length, dto->offset[i],
660 (unsigned long)regd_buf->reg.len, dto, i,
661 (unsigned long)sge->addr);
662 BUG();
663 }
664 648
665 sge->addr += dto->offset[i]; /* Adjust offset */ 649 rx_wr.wr_id = (unsigned long)ib_conn->login_buf;
650 rx_wr.sg_list = &sge;
651 rx_wr.num_sge = 1;
652 rx_wr.next = NULL;
653
654 ib_conn->post_recv_buf_count++;
655 ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
656 if (ib_ret) {
657 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
658 ib_conn->post_recv_buf_count--;
666 } 659 }
660 return ib_ret;
667} 661}
668 662
669/** 663int iser_post_recvm(struct iser_conn *ib_conn, int count)
670 * iser_post_recv - Posts a receive buffer.
671 *
672 * returns 0 on success, -1 on failure
673 */
674int iser_post_recv(struct iser_desc *rx_desc)
675{ 664{
676 int ib_ret, ret_val = 0; 665 struct ib_recv_wr *rx_wr, *rx_wr_failed;
677 struct ib_recv_wr recv_wr, *recv_wr_failed; 666 int i, ib_ret;
678 struct ib_sge iov[2]; 667 unsigned int my_rx_head = ib_conn->rx_desc_head;
679 struct iser_conn *ib_conn; 668 struct iser_rx_desc *rx_desc;
680 struct iser_dto *recv_dto = &rx_desc->dto; 669
681 670 for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
682 /* Retrieve conn */ 671 rx_desc = &ib_conn->rx_descs[my_rx_head];
683 ib_conn = recv_dto->ib_conn; 672 rx_wr->wr_id = (unsigned long)rx_desc;
684 673 rx_wr->sg_list = &rx_desc->rx_sg;
685 iser_dto_to_iov(recv_dto, iov, 2); 674 rx_wr->num_sge = 1;
675 rx_wr->next = rx_wr + 1;
676 my_rx_head = (my_rx_head + 1) & (ISER_QP_MAX_RECV_DTOS - 1);
677 }
686 678
687 recv_wr.next = NULL; 679 rx_wr--;
688 recv_wr.sg_list = iov; 680 rx_wr->next = NULL; /* mark end of work requests list */
689 recv_wr.num_sge = recv_dto->regd_vector_len;
690 recv_wr.wr_id = (unsigned long)rx_desc;
691 681
692 atomic_inc(&ib_conn->post_recv_buf_count); 682 ib_conn->post_recv_buf_count += count;
693 ib_ret = ib_post_recv(ib_conn->qp, &recv_wr, &recv_wr_failed); 683 ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
694 if (ib_ret) { 684 if (ib_ret) {
695 iser_err("ib_post_recv failed ret=%d\n", ib_ret); 685 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
696 atomic_dec(&ib_conn->post_recv_buf_count); 686 ib_conn->post_recv_buf_count -= count;
697 ret_val = -1; 687 } else
698 } 688 ib_conn->rx_desc_head = my_rx_head;
699 689 return ib_ret;
700 return ret_val;
701} 690}
702 691
692
703/** 693/**
704 * iser_start_send - Initiate a Send DTO operation 694 * iser_start_send - Initiate a Send DTO operation
705 * 695 *
706 * returns 0 on success, -1 on failure 696 * returns 0 on success, -1 on failure
707 */ 697 */
708int iser_post_send(struct iser_desc *tx_desc) 698int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc)
709{ 699{
710 int ib_ret, ret_val = 0; 700 int ib_ret;
711 struct ib_send_wr send_wr, *send_wr_failed; 701 struct ib_send_wr send_wr, *send_wr_failed;
712 struct ib_sge iov[MAX_REGD_BUF_VECTOR_LEN];
713 struct iser_conn *ib_conn;
714 struct iser_dto *dto = &tx_desc->dto;
715 702
716 ib_conn = dto->ib_conn; 703 ib_dma_sync_single_for_device(ib_conn->device->ib_device,
717 704 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
718 iser_dto_to_iov(dto, iov, MAX_REGD_BUF_VECTOR_LEN);
719 705
720 send_wr.next = NULL; 706 send_wr.next = NULL;
721 send_wr.wr_id = (unsigned long)tx_desc; 707 send_wr.wr_id = (unsigned long)tx_desc;
722 send_wr.sg_list = iov; 708 send_wr.sg_list = tx_desc->tx_sg;
723 send_wr.num_sge = dto->regd_vector_len; 709 send_wr.num_sge = tx_desc->num_sge;
724 send_wr.opcode = IB_WR_SEND; 710 send_wr.opcode = IB_WR_SEND;
725 send_wr.send_flags = dto->notify_enable ? IB_SEND_SIGNALED : 0; 711 send_wr.send_flags = IB_SEND_SIGNALED;
726 712
727 atomic_inc(&ib_conn->post_send_buf_count); 713 atomic_inc(&ib_conn->post_send_buf_count);
728 714
729 ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed); 715 ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
730 if (ib_ret) { 716 if (ib_ret) {
731 iser_err("Failed to start SEND DTO, dto: 0x%p, IOV len: %d\n",
732 dto, dto->regd_vector_len);
733 iser_err("ib_post_send failed, ret:%d\n", ib_ret); 717 iser_err("ib_post_send failed, ret:%d\n", ib_ret);
734 atomic_dec(&ib_conn->post_send_buf_count); 718 atomic_dec(&ib_conn->post_send_buf_count);
735 ret_val = -1;
736 } 719 }
737 720 return ib_ret;
738 return ret_val;
739} 721}
740 722
741static void iser_handle_comp_error(struct iser_desc *desc) 723static void iser_handle_comp_error(struct iser_tx_desc *desc,
724 struct iser_conn *ib_conn)
742{ 725{
743 struct iser_dto *dto = &desc->dto; 726 if (desc && desc->type == ISCSI_TX_DATAOUT)
744 struct iser_conn *ib_conn = dto->ib_conn;
745
746 iser_dto_buffs_release(dto);
747
748 if (desc->type == ISCSI_RX) {
749 kfree(desc->data);
750 kmem_cache_free(ig.desc_cache, desc); 727 kmem_cache_free(ig.desc_cache, desc);
751 atomic_dec(&ib_conn->post_recv_buf_count);
752 } else { /* type is TX control/command/dataout */
753 if (desc->type == ISCSI_TX_DATAOUT)
754 kmem_cache_free(ig.desc_cache, desc);
755 atomic_dec(&ib_conn->post_send_buf_count);
756 }
757 728
758 if (atomic_read(&ib_conn->post_recv_buf_count) == 0 && 729 if (ib_conn->post_recv_buf_count == 0 &&
759 atomic_read(&ib_conn->post_send_buf_count) == 0) { 730 atomic_read(&ib_conn->post_send_buf_count) == 0) {
760 /* getting here when the state is UP means that the conn is * 731 /* getting here when the state is UP means that the conn is *
761 * being terminated asynchronously from the iSCSI layer's * 732 * being terminated asynchronously from the iSCSI layer's *
@@ -774,32 +745,74 @@ static void iser_handle_comp_error(struct iser_desc *desc)
774 } 745 }
775} 746}
776 747
748static int iser_drain_tx_cq(struct iser_device *device)
749{
750 struct ib_cq *cq = device->tx_cq;
751 struct ib_wc wc;
752 struct iser_tx_desc *tx_desc;
753 struct iser_conn *ib_conn;
754 int completed_tx = 0;
755
756 while (ib_poll_cq(cq, 1, &wc) == 1) {
757 tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id;
758 ib_conn = wc.qp->qp_context;
759 if (wc.status == IB_WC_SUCCESS) {
760 if (wc.opcode == IB_WC_SEND)
761 iser_snd_completion(tx_desc, ib_conn);
762 else
763 iser_err("expected opcode %d got %d\n",
764 IB_WC_SEND, wc.opcode);
765 } else {
766 iser_err("tx id %llx status %d vend_err %x\n",
767 wc.wr_id, wc.status, wc.vendor_err);
768 atomic_dec(&ib_conn->post_send_buf_count);
769 iser_handle_comp_error(tx_desc, ib_conn);
770 }
771 completed_tx++;
772 }
773 return completed_tx;
774}
775
776
777static void iser_cq_tasklet_fn(unsigned long data) 777static void iser_cq_tasklet_fn(unsigned long data)
778{ 778{
779 struct iser_device *device = (struct iser_device *)data; 779 struct iser_device *device = (struct iser_device *)data;
780 struct ib_cq *cq = device->cq; 780 struct ib_cq *cq = device->rx_cq;
781 struct ib_wc wc; 781 struct ib_wc wc;
782 struct iser_desc *desc; 782 struct iser_rx_desc *desc;
783 unsigned long xfer_len; 783 unsigned long xfer_len;
784 struct iser_conn *ib_conn;
785 int completed_tx, completed_rx;
786 completed_tx = completed_rx = 0;
784 787
785 while (ib_poll_cq(cq, 1, &wc) == 1) { 788 while (ib_poll_cq(cq, 1, &wc) == 1) {
786 desc = (struct iser_desc *) (unsigned long) wc.wr_id; 789 desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id;
787 BUG_ON(desc == NULL); 790 BUG_ON(desc == NULL);
788 791 ib_conn = wc.qp->qp_context;
789 if (wc.status == IB_WC_SUCCESS) { 792 if (wc.status == IB_WC_SUCCESS) {
790 if (desc->type == ISCSI_RX) { 793 if (wc.opcode == IB_WC_RECV) {
791 xfer_len = (unsigned long)wc.byte_len; 794 xfer_len = (unsigned long)wc.byte_len;
792 iser_rcv_completion(desc, xfer_len); 795 iser_rcv_completion(desc, xfer_len, ib_conn);
793 } else /* type == ISCSI_TX_CONTROL/SCSI_CMD/DOUT */ 796 } else
794 iser_snd_completion(desc); 797 iser_err("expected opcode %d got %d\n",
798 IB_WC_RECV, wc.opcode);
795 } else { 799 } else {
796 iser_err("comp w. error op %d status %d\n",desc->type,wc.status); 800 if (wc.status != IB_WC_WR_FLUSH_ERR)
797 iser_handle_comp_error(desc); 801 iser_err("rx id %llx status %d vend_err %x\n",
802 wc.wr_id, wc.status, wc.vendor_err);
803 ib_conn->post_recv_buf_count--;
804 iser_handle_comp_error(NULL, ib_conn);
798 } 805 }
806 completed_rx++;
807 if (!(completed_rx & 63))
808 completed_tx += iser_drain_tx_cq(device);
799 } 809 }
800 /* #warning "it is assumed here that arming CQ only once its empty" * 810 /* #warning "it is assumed here that arming CQ only once its empty" *
801 * " would not cause interrupts to be missed" */ 811 * " would not cause interrupts to be missed" */
802 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 812 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
813
814 completed_tx += iser_drain_tx_cq(device);
815 iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx);
803} 816}
804 817
805static void iser_cq_callback(struct ib_cq *cq, void *cq_context) 818static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 54c8fe25c423..ed3f9ebae882 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -80,7 +80,8 @@ MODULE_PARM_DESC(mellanox_workarounds,
80 80
81static void srp_add_one(struct ib_device *device); 81static void srp_add_one(struct ib_device *device);
82static void srp_remove_one(struct ib_device *device); 82static void srp_remove_one(struct ib_device *device);
83static void srp_completion(struct ib_cq *cq, void *target_ptr); 83static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
84static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
84static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 85static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
85 86
86static struct scsi_transport_template *ib_srp_transport_template; 87static struct scsi_transport_template *ib_srp_transport_template;
@@ -227,14 +228,21 @@ static int srp_create_target_ib(struct srp_target_port *target)
227 if (!init_attr) 228 if (!init_attr)
228 return -ENOMEM; 229 return -ENOMEM;
229 230
230 target->cq = ib_create_cq(target->srp_host->srp_dev->dev, 231 target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
231 srp_completion, NULL, target, SRP_CQ_SIZE, 0); 232 srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
232 if (IS_ERR(target->cq)) { 233 if (IS_ERR(target->recv_cq)) {
233 ret = PTR_ERR(target->cq); 234 ret = PTR_ERR(target->recv_cq);
234 goto out; 235 goto err;
235 } 236 }
236 237
237 ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP); 238 target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
239 srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
240 if (IS_ERR(target->send_cq)) {
241 ret = PTR_ERR(target->send_cq);
242 goto err_recv_cq;
243 }
244
245 ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
238 246
239 init_attr->event_handler = srp_qp_event; 247 init_attr->event_handler = srp_qp_event;
240 init_attr->cap.max_send_wr = SRP_SQ_SIZE; 248 init_attr->cap.max_send_wr = SRP_SQ_SIZE;
@@ -243,24 +251,32 @@ static int srp_create_target_ib(struct srp_target_port *target)
243 init_attr->cap.max_send_sge = 1; 251 init_attr->cap.max_send_sge = 1;
244 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 252 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
245 init_attr->qp_type = IB_QPT_RC; 253 init_attr->qp_type = IB_QPT_RC;
246 init_attr->send_cq = target->cq; 254 init_attr->send_cq = target->send_cq;
247 init_attr->recv_cq = target->cq; 255 init_attr->recv_cq = target->recv_cq;
248 256
249 target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr); 257 target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
250 if (IS_ERR(target->qp)) { 258 if (IS_ERR(target->qp)) {
251 ret = PTR_ERR(target->qp); 259 ret = PTR_ERR(target->qp);
252 ib_destroy_cq(target->cq); 260 goto err_send_cq;
253 goto out;
254 } 261 }
255 262
256 ret = srp_init_qp(target, target->qp); 263 ret = srp_init_qp(target, target->qp);
257 if (ret) { 264 if (ret)
258 ib_destroy_qp(target->qp); 265 goto err_qp;
259 ib_destroy_cq(target->cq);
260 goto out;
261 }
262 266
263out: 267 kfree(init_attr);
268 return 0;
269
270err_qp:
271 ib_destroy_qp(target->qp);
272
273err_send_cq:
274 ib_destroy_cq(target->send_cq);
275
276err_recv_cq:
277 ib_destroy_cq(target->recv_cq);
278
279err:
264 kfree(init_attr); 280 kfree(init_attr);
265 return ret; 281 return ret;
266} 282}
@@ -270,7 +286,8 @@ static void srp_free_target_ib(struct srp_target_port *target)
270 int i; 286 int i;
271 287
272 ib_destroy_qp(target->qp); 288 ib_destroy_qp(target->qp);
273 ib_destroy_cq(target->cq); 289 ib_destroy_cq(target->send_cq);
290 ib_destroy_cq(target->recv_cq);
274 291
275 for (i = 0; i < SRP_RQ_SIZE; ++i) 292 for (i = 0; i < SRP_RQ_SIZE; ++i)
276 srp_free_iu(target->srp_host, target->rx_ring[i]); 293 srp_free_iu(target->srp_host, target->rx_ring[i]);
@@ -568,7 +585,9 @@ static int srp_reconnect_target(struct srp_target_port *target)
568 if (ret) 585 if (ret)
569 goto err; 586 goto err;
570 587
571 while (ib_poll_cq(target->cq, 1, &wc) > 0) 588 while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
589 ; /* nothing */
590 while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
572 ; /* nothing */ 591 ; /* nothing */
573 592
574 spin_lock_irq(target->scsi_host->host_lock); 593 spin_lock_irq(target->scsi_host->host_lock);
@@ -851,7 +870,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
851 struct srp_iu *iu; 870 struct srp_iu *iu;
852 u8 opcode; 871 u8 opcode;
853 872
854 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; 873 iu = target->rx_ring[wc->wr_id];
855 874
856 dev = target->srp_host->srp_dev->dev; 875 dev = target->srp_host->srp_dev->dev;
857 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, 876 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
@@ -898,7 +917,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
898 DMA_FROM_DEVICE); 917 DMA_FROM_DEVICE);
899} 918}
900 919
901static void srp_completion(struct ib_cq *cq, void *target_ptr) 920static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
902{ 921{
903 struct srp_target_port *target = target_ptr; 922 struct srp_target_port *target = target_ptr;
904 struct ib_wc wc; 923 struct ib_wc wc;
@@ -907,17 +926,31 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr)
907 while (ib_poll_cq(cq, 1, &wc) > 0) { 926 while (ib_poll_cq(cq, 1, &wc) > 0) {
908 if (wc.status) { 927 if (wc.status) {
909 shost_printk(KERN_ERR, target->scsi_host, 928 shost_printk(KERN_ERR, target->scsi_host,
910 PFX "failed %s status %d\n", 929 PFX "failed receive status %d\n",
911 wc.wr_id & SRP_OP_RECV ? "receive" : "send",
912 wc.status); 930 wc.status);
913 target->qp_in_error = 1; 931 target->qp_in_error = 1;
914 break; 932 break;
915 } 933 }
916 934
917 if (wc.wr_id & SRP_OP_RECV) 935 srp_handle_recv(target, &wc);
918 srp_handle_recv(target, &wc); 936 }
919 else 937}
920 ++target->tx_tail; 938
939static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
940{
941 struct srp_target_port *target = target_ptr;
942 struct ib_wc wc;
943
944 while (ib_poll_cq(cq, 1, &wc) > 0) {
945 if (wc.status) {
946 shost_printk(KERN_ERR, target->scsi_host,
947 PFX "failed send status %d\n",
948 wc.status);
949 target->qp_in_error = 1;
950 break;
951 }
952
953 ++target->tx_tail;
921 } 954 }
922} 955}
923 956
@@ -930,7 +963,7 @@ static int __srp_post_recv(struct srp_target_port *target)
930 int ret; 963 int ret;
931 964
932 next = target->rx_head & (SRP_RQ_SIZE - 1); 965 next = target->rx_head & (SRP_RQ_SIZE - 1);
933 wr.wr_id = next | SRP_OP_RECV; 966 wr.wr_id = next;
934 iu = target->rx_ring[next]; 967 iu = target->rx_ring[next];
935 968
936 list.addr = iu->dma; 969 list.addr = iu->dma;
@@ -970,6 +1003,8 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
970{ 1003{
971 s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2; 1004 s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2;
972 1005
1006 srp_send_completion(target->send_cq, target);
1007
973 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) 1008 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
974 return NULL; 1009 return NULL;
975 1010
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index e185b907fc12..5a80eac6fdaa 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -60,7 +60,6 @@ enum {
60 SRP_RQ_SHIFT = 6, 60 SRP_RQ_SHIFT = 6,
61 SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, 61 SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
62 SRP_SQ_SIZE = SRP_RQ_SIZE - 1, 62 SRP_SQ_SIZE = SRP_RQ_SIZE - 1,
63 SRP_CQ_SIZE = SRP_SQ_SIZE + SRP_RQ_SIZE,
64 63
65 SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1), 64 SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1),
66 65
@@ -69,8 +68,6 @@ enum {
69 SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4 68 SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4
70}; 69};
71 70
72#define SRP_OP_RECV (1 << 31)
73
74enum srp_target_state { 71enum srp_target_state {
75 SRP_TARGET_LIVE, 72 SRP_TARGET_LIVE,
76 SRP_TARGET_CONNECTING, 73 SRP_TARGET_CONNECTING,
@@ -133,7 +130,8 @@ struct srp_target_port {
133 int path_query_id; 130 int path_query_id;
134 131
135 struct ib_cm_id *cm_id; 132 struct ib_cm_id *cm_id;
136 struct ib_cq *cq; 133 struct ib_cq *recv_cq;
134 struct ib_cq *send_cq;
137 struct ib_qp *qp; 135 struct ib_qp *qp;
138 136
139 int max_ti_iu_len; 137 int max_ti_iu_len;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 509c8f3dd9a5..70ffbd071b2e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4680,7 +4680,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf)
4680{ 4680{
4681 unsigned long cpu; 4681 unsigned long cpu;
4682 struct page *spare_page; 4682 struct page *spare_page;
4683 struct raid5_percpu *allcpus; 4683 struct raid5_percpu __percpu *allcpus;
4684 void *scribble; 4684 void *scribble;
4685 int err; 4685 int err;
4686 4686
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index dd708359b451..0f86f5e36724 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -405,7 +405,7 @@ struct raid5_private_data {
405 * lists and performing address 405 * lists and performing address
406 * conversions 406 * conversions
407 */ 407 */
408 } *percpu; 408 } __percpu *percpu;
409 size_t scribble_len; /* size of scribble region must be 409 size_t scribble_len; /* size of scribble region must be
410 * associated with conf to handle 410 * associated with conf to handle
411 * cpu hotplug while reshaping 411 * cpu hotplug while reshaping
diff --git a/drivers/media/dvb/firewire/firedtv-fw.c b/drivers/media/dvb/firewire/firedtv-fw.c
index 7a3de16fba06..75afe4f81e33 100644
--- a/drivers/media/dvb/firewire/firedtv-fw.c
+++ b/drivers/media/dvb/firewire/firedtv-fw.c
@@ -239,47 +239,18 @@ static const struct fw_address_region fcp_region = {
239}; 239};
240 240
241/* Adjust the template string if models with longer names appear. */ 241/* Adjust the template string if models with longer names appear. */
242#define MAX_MODEL_NAME_LEN ((int)DIV_ROUND_UP(sizeof("FireDTV ????"), 4)) 242#define MAX_MODEL_NAME_LEN sizeof("FireDTV ????")
243
244static size_t model_name(u32 *directory, __be32 *buffer)
245{
246 struct fw_csr_iterator ci;
247 int i, length, key, value, last_key = 0;
248 u32 *block = NULL;
249
250 fw_csr_iterator_init(&ci, directory);
251 while (fw_csr_iterator_next(&ci, &key, &value)) {
252 if (last_key == CSR_MODEL &&
253 key == (CSR_DESCRIPTOR | CSR_LEAF))
254 block = ci.p - 1 + value;
255 last_key = key;
256 }
257
258 if (block == NULL)
259 return 0;
260
261 length = min((int)(block[0] >> 16) - 2, MAX_MODEL_NAME_LEN);
262 if (length <= 0)
263 return 0;
264
265 /* fast-forward to text string */
266 block += 3;
267
268 for (i = 0; i < length; i++)
269 buffer[i] = cpu_to_be32(block[i]);
270
271 return length * 4;
272}
273 243
274static int node_probe(struct device *dev) 244static int node_probe(struct device *dev)
275{ 245{
276 struct firedtv *fdtv; 246 struct firedtv *fdtv;
277 __be32 name[MAX_MODEL_NAME_LEN]; 247 char name[MAX_MODEL_NAME_LEN];
278 int name_len, err; 248 int name_len, err;
279 249
280 name_len = model_name(fw_unit(dev)->directory, name); 250 name_len = fw_csr_string(fw_unit(dev)->directory, CSR_MODEL,
251 name, sizeof(name));
281 252
282 fdtv = fdtv_alloc(dev, &backend, (char *)name, name_len); 253 fdtv = fdtv_alloc(dev, &backend, name, name_len >= 0 ? name_len : 0);
283 if (!fdtv) 254 if (!fdtv)
284 return -ENOMEM; 255 return -ENOMEM;
285 256
diff --git a/drivers/media/video/dabusb.c b/drivers/media/video/dabusb.c
index 9b413a35e048..0f505086774c 100644
--- a/drivers/media/video/dabusb.c
+++ b/drivers/media/video/dabusb.c
@@ -616,10 +616,12 @@ static int dabusb_open (struct inode *inode, struct file *file)
616{ 616{
617 int devnum = iminor(inode); 617 int devnum = iminor(inode);
618 pdabusb_t s; 618 pdabusb_t s;
619 int r;
619 620
620 if (devnum < DABUSB_MINOR || devnum >= (DABUSB_MINOR + NRDABUSB)) 621 if (devnum < DABUSB_MINOR || devnum >= (DABUSB_MINOR + NRDABUSB))
621 return -EIO; 622 return -EIO;
622 623
624 lock_kernel();
623 s = &dabusb[devnum - DABUSB_MINOR]; 625 s = &dabusb[devnum - DABUSB_MINOR];
624 626
625 dbg("dabusb_open"); 627 dbg("dabusb_open");
@@ -634,6 +636,7 @@ static int dabusb_open (struct inode *inode, struct file *file)
634 msleep_interruptible(500); 636 msleep_interruptible(500);
635 637
636 if (signal_pending (current)) { 638 if (signal_pending (current)) {
639 unlock_kernel();
637 return -EAGAIN; 640 return -EAGAIN;
638 } 641 }
639 mutex_lock(&s->mutex); 642 mutex_lock(&s->mutex);
@@ -641,6 +644,7 @@ static int dabusb_open (struct inode *inode, struct file *file)
641 if (usb_set_interface (s->usbdev, _DABUSB_IF, 1) < 0) { 644 if (usb_set_interface (s->usbdev, _DABUSB_IF, 1) < 0) {
642 mutex_unlock(&s->mutex); 645 mutex_unlock(&s->mutex);
643 dev_err(&s->usbdev->dev, "set_interface failed\n"); 646 dev_err(&s->usbdev->dev, "set_interface failed\n");
647 unlock_kernel();
644 return -EINVAL; 648 return -EINVAL;
645 } 649 }
646 s->opened = 1; 650 s->opened = 1;
@@ -649,7 +653,9 @@ static int dabusb_open (struct inode *inode, struct file *file)
649 file->f_pos = 0; 653 file->f_pos = 0;
650 file->private_data = s; 654 file->private_data = s;
651 655
652 return nonseekable_open(inode, file); 656 r = nonseekable_open(inode, file);
657 unlock_kernel();
658 return r;
653} 659}
654 660
655static int dabusb_release (struct inode *inode, struct file *file) 661static int dabusb_release (struct inode *inode, struct file *file)
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index f53755533e7e..3fab78ba8952 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -37,6 +37,7 @@
37#include <linux/gfp.h> 37#include <linux/gfp.h>
38#include <linux/tty.h> 38#include <linux/tty.h>
39#include <linux/tty_flip.h> 39#include <linux/tty_flip.h>
40#include <linux/kfifo.h>
40 41
41#include <linux/mmc/core.h> 42#include <linux/mmc/core.h>
42#include <linux/mmc/card.h> 43#include <linux/mmc/card.h>
@@ -47,19 +48,9 @@
47#define UART_NR 8 /* Number of UARTs this driver can handle */ 48#define UART_NR 8 /* Number of UARTs this driver can handle */
48 49
49 50
50#define UART_XMIT_SIZE PAGE_SIZE 51#define FIFO_SIZE PAGE_SIZE
51#define WAKEUP_CHARS 256 52#define WAKEUP_CHARS 256
52 53
53#define circ_empty(circ) ((circ)->head == (circ)->tail)
54#define circ_clear(circ) ((circ)->head = (circ)->tail = 0)
55
56#define circ_chars_pending(circ) \
57 (CIRC_CNT((circ)->head, (circ)->tail, UART_XMIT_SIZE))
58
59#define circ_chars_free(circ) \
60 (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE))
61
62
63struct uart_icount { 54struct uart_icount {
64 __u32 cts; 55 __u32 cts;
65 __u32 dsr; 56 __u32 dsr;
@@ -82,7 +73,7 @@ struct sdio_uart_port {
82 struct mutex func_lock; 73 struct mutex func_lock;
83 struct task_struct *in_sdio_uart_irq; 74 struct task_struct *in_sdio_uart_irq;
84 unsigned int regs_offset; 75 unsigned int regs_offset;
85 struct circ_buf xmit; 76 struct kfifo xmit_fifo;
86 spinlock_t write_lock; 77 spinlock_t write_lock;
87 struct uart_icount icount; 78 struct uart_icount icount;
88 unsigned int uartclk; 79 unsigned int uartclk;
@@ -105,6 +96,8 @@ static int sdio_uart_add_port(struct sdio_uart_port *port)
105 kref_init(&port->kref); 96 kref_init(&port->kref);
106 mutex_init(&port->func_lock); 97 mutex_init(&port->func_lock);
107 spin_lock_init(&port->write_lock); 98 spin_lock_init(&port->write_lock);
99 if (kfifo_alloc(&port->xmit_fifo, FIFO_SIZE, GFP_KERNEL))
100 return -ENOMEM;
108 101
109 spin_lock(&sdio_uart_table_lock); 102 spin_lock(&sdio_uart_table_lock);
110 for (index = 0; index < UART_NR; index++) { 103 for (index = 0; index < UART_NR; index++) {
@@ -140,6 +133,7 @@ static void sdio_uart_port_destroy(struct kref *kref)
140{ 133{
141 struct sdio_uart_port *port = 134 struct sdio_uart_port *port =
142 container_of(kref, struct sdio_uart_port, kref); 135 container_of(kref, struct sdio_uart_port, kref);
136 kfifo_free(&port->xmit_fifo);
143 kfree(port); 137 kfree(port);
144} 138}
145 139
@@ -456,9 +450,11 @@ static void sdio_uart_receive_chars(struct sdio_uart_port *port,
456 450
457static void sdio_uart_transmit_chars(struct sdio_uart_port *port) 451static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
458{ 452{
459 struct circ_buf *xmit = &port->xmit; 453 struct kfifo *xmit = &port->xmit_fifo;
460 int count; 454 int count;
461 struct tty_struct *tty; 455 struct tty_struct *tty;
456 u8 iobuf[16];
457 int len;
462 458
463 if (port->x_char) { 459 if (port->x_char) {
464 sdio_out(port, UART_TX, port->x_char); 460 sdio_out(port, UART_TX, port->x_char);
@@ -469,27 +465,25 @@ static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
469 465
470 tty = tty_port_tty_get(&port->port); 466 tty = tty_port_tty_get(&port->port);
471 467
472 if (tty == NULL || circ_empty(xmit) || 468 if (tty == NULL || !kfifo_len(xmit) ||
473 tty->stopped || tty->hw_stopped) { 469 tty->stopped || tty->hw_stopped) {
474 sdio_uart_stop_tx(port); 470 sdio_uart_stop_tx(port);
475 tty_kref_put(tty); 471 tty_kref_put(tty);
476 return; 472 return;
477 } 473 }
478 474
479 count = 16; 475 len = kfifo_out_locked(xmit, iobuf, 16, &port->write_lock);
480 do { 476 for (count = 0; count < len; count++) {
481 sdio_out(port, UART_TX, xmit->buf[xmit->tail]); 477 sdio_out(port, UART_TX, iobuf[count]);
482 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
483 port->icount.tx++; 478 port->icount.tx++;
484 if (circ_empty(xmit)) 479 }
485 break;
486 } while (--count > 0);
487 480
488 if (circ_chars_pending(xmit) < WAKEUP_CHARS) 481 len = kfifo_len(xmit);
482 if (len < WAKEUP_CHARS) {
489 tty_wakeup(tty); 483 tty_wakeup(tty);
490 484 if (len == 0)
491 if (circ_empty(xmit)) 485 sdio_uart_stop_tx(port);
492 sdio_uart_stop_tx(port); 486 }
493 tty_kref_put(tty); 487 tty_kref_put(tty);
494} 488}
495 489
@@ -632,7 +626,6 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
632{ 626{
633 struct sdio_uart_port *port = 627 struct sdio_uart_port *port =
634 container_of(tport, struct sdio_uart_port, port); 628 container_of(tport, struct sdio_uart_port, port);
635 unsigned long page;
636 int ret; 629 int ret;
637 630
638 /* 631 /*
@@ -641,22 +634,17 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
641 */ 634 */
642 set_bit(TTY_IO_ERROR, &tty->flags); 635 set_bit(TTY_IO_ERROR, &tty->flags);
643 636
644 /* Initialise and allocate the transmit buffer. */ 637 kfifo_reset(&port->xmit_fifo);
645 page = __get_free_page(GFP_KERNEL);
646 if (!page)
647 return -ENOMEM;
648 port->xmit.buf = (unsigned char *)page;
649 circ_clear(&port->xmit);
650 638
651 ret = sdio_uart_claim_func(port); 639 ret = sdio_uart_claim_func(port);
652 if (ret) 640 if (ret)
653 goto err1; 641 return ret;
654 ret = sdio_enable_func(port->func); 642 ret = sdio_enable_func(port->func);
655 if (ret) 643 if (ret)
656 goto err2; 644 goto err1;
657 ret = sdio_claim_irq(port->func, sdio_uart_irq); 645 ret = sdio_claim_irq(port->func, sdio_uart_irq);
658 if (ret) 646 if (ret)
659 goto err3; 647 goto err2;
660 648
661 /* 649 /*
662 * Clear the FIFO buffers and disable them. 650 * Clear the FIFO buffers and disable them.
@@ -700,12 +688,10 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
700 sdio_uart_release_func(port); 688 sdio_uart_release_func(port);
701 return 0; 689 return 0;
702 690
703err3:
704 sdio_disable_func(port->func);
705err2: 691err2:
706 sdio_uart_release_func(port); 692 sdio_disable_func(port->func);
707err1: 693err1:
708 free_page((unsigned long)port->xmit.buf); 694 sdio_uart_release_func(port);
709 return ret; 695 return ret;
710} 696}
711 697
@@ -727,7 +713,7 @@ static void sdio_uart_shutdown(struct tty_port *tport)
727 713
728 ret = sdio_uart_claim_func(port); 714 ret = sdio_uart_claim_func(port);
729 if (ret) 715 if (ret)
730 goto skip; 716 return;
731 717
732 sdio_uart_stop_rx(port); 718 sdio_uart_stop_rx(port);
733 719
@@ -749,10 +735,6 @@ static void sdio_uart_shutdown(struct tty_port *tport)
749 sdio_disable_func(port->func); 735 sdio_disable_func(port->func);
750 736
751 sdio_uart_release_func(port); 737 sdio_uart_release_func(port);
752
753skip:
754 /* Free the transmit buffer page. */
755 free_page((unsigned long)port->xmit.buf);
756} 738}
757 739
758/** 740/**
@@ -822,27 +804,12 @@ static int sdio_uart_write(struct tty_struct *tty, const unsigned char *buf,
822 int count) 804 int count)
823{ 805{
824 struct sdio_uart_port *port = tty->driver_data; 806 struct sdio_uart_port *port = tty->driver_data;
825 struct circ_buf *circ = &port->xmit; 807 int ret;
826 int c, ret = 0;
827 808
828 if (!port->func) 809 if (!port->func)
829 return -ENODEV; 810 return -ENODEV;
830 811
831 spin_lock(&port->write_lock); 812 ret = kfifo_in_locked(&port->xmit_fifo, buf, count, &port->write_lock);
832 while (1) {
833 c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
834 if (count < c)
835 c = count;
836 if (c <= 0)
837 break;
838 memcpy(circ->buf + circ->head, buf, c);
839 circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1);
840 buf += c;
841 count -= c;
842 ret += c;
843 }
844 spin_unlock(&port->write_lock);
845
846 if (!(port->ier & UART_IER_THRI)) { 813 if (!(port->ier & UART_IER_THRI)) {
847 int err = sdio_uart_claim_func(port); 814 int err = sdio_uart_claim_func(port);
848 if (!err) { 815 if (!err) {
@@ -859,13 +826,13 @@ static int sdio_uart_write(struct tty_struct *tty, const unsigned char *buf,
859static int sdio_uart_write_room(struct tty_struct *tty) 826static int sdio_uart_write_room(struct tty_struct *tty)
860{ 827{
861 struct sdio_uart_port *port = tty->driver_data; 828 struct sdio_uart_port *port = tty->driver_data;
862 return port ? circ_chars_free(&port->xmit) : 0; 829 return FIFO_SIZE - kfifo_len(&port->xmit_fifo);
863} 830}
864 831
865static int sdio_uart_chars_in_buffer(struct tty_struct *tty) 832static int sdio_uart_chars_in_buffer(struct tty_struct *tty)
866{ 833{
867 struct sdio_uart_port *port = tty->driver_data; 834 struct sdio_uart_port *port = tty->driver_data;
868 return port ? circ_chars_pending(&port->xmit) : 0; 835 return kfifo_len(&port->xmit_fifo);
869} 836}
870 837
871static void sdio_uart_send_xchar(struct tty_struct *tty, char ch) 838static void sdio_uart_send_xchar(struct tty_struct *tty, char ch)
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 3e8618b4efbc..4cd7f420766a 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -264,6 +264,10 @@ struct adapter {
264 struct work_struct fatal_error_handler_task; 264 struct work_struct fatal_error_handler_task;
265 struct work_struct link_fault_handler_task; 265 struct work_struct link_fault_handler_task;
266 266
267 struct work_struct db_full_task;
268 struct work_struct db_empty_task;
269 struct work_struct db_drop_task;
270
267 struct dentry *debugfs_root; 271 struct dentry *debugfs_root;
268 272
269 struct mutex mdio_lock; 273 struct mutex mdio_lock;
@@ -335,6 +339,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
335int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, 339int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
336 unsigned char *data); 340 unsigned char *data);
337irqreturn_t t3_sge_intr_msix(int irq, void *cookie); 341irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
342extern struct workqueue_struct *cxgb3_wq;
338 343
339int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size); 344int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size);
340 345
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 6fd968abb073..3e453e1d97e7 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -45,6 +45,7 @@
45#include <linux/firmware.h> 45#include <linux/firmware.h>
46#include <linux/log2.h> 46#include <linux/log2.h>
47#include <linux/stringify.h> 47#include <linux/stringify.h>
48#include <linux/sched.h>
48#include <asm/uaccess.h> 49#include <asm/uaccess.h>
49 50
50#include "common.h" 51#include "common.h"
@@ -140,7 +141,7 @@ MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
140 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting 141 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
141 * for our work to complete. Get our own work queue to solve this. 142 * for our work to complete. Get our own work queue to solve this.
142 */ 143 */
143static struct workqueue_struct *cxgb3_wq; 144struct workqueue_struct *cxgb3_wq;
144 145
145/** 146/**
146 * link_report - show link status and link speed/duplex 147 * link_report - show link status and link speed/duplex
@@ -586,6 +587,19 @@ static void setup_rss(struct adapter *adap)
586 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map); 587 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
587} 588}
588 589
590static void ring_dbs(struct adapter *adap)
591{
592 int i, j;
593
594 for (i = 0; i < SGE_QSETS; i++) {
595 struct sge_qset *qs = &adap->sge.qs[i];
596
597 if (qs->adap)
598 for (j = 0; j < SGE_TXQ_PER_SET; j++)
599 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
600 }
601}
602
589static void init_napi(struct adapter *adap) 603static void init_napi(struct adapter *adap)
590{ 604{
591 int i; 605 int i;
@@ -2750,6 +2764,42 @@ static void t3_adap_check_task(struct work_struct *work)
2750 spin_unlock_irq(&adapter->work_lock); 2764 spin_unlock_irq(&adapter->work_lock);
2751} 2765}
2752 2766
2767static void db_full_task(struct work_struct *work)
2768{
2769 struct adapter *adapter = container_of(work, struct adapter,
2770 db_full_task);
2771
2772 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2773}
2774
2775static void db_empty_task(struct work_struct *work)
2776{
2777 struct adapter *adapter = container_of(work, struct adapter,
2778 db_empty_task);
2779
2780 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2781}
2782
2783static void db_drop_task(struct work_struct *work)
2784{
2785 struct adapter *adapter = container_of(work, struct adapter,
2786 db_drop_task);
2787 unsigned long delay = 1000;
2788 unsigned short r;
2789
2790 cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2791
2792 /*
2793 * Sleep a while before ringing the driver qset dbs.
2794 * The delay is between 1000-2023 usecs.
2795 */
2796 get_random_bytes(&r, 2);
2797 delay += r & 1023;
2798 set_current_state(TASK_UNINTERRUPTIBLE);
2799 schedule_timeout(usecs_to_jiffies(delay));
2800 ring_dbs(adapter);
2801}
2802
2753/* 2803/*
2754 * Processes external (PHY) interrupts in process context. 2804 * Processes external (PHY) interrupts in process context.
2755 */ 2805 */
@@ -3218,6 +3268,11 @@ static int __devinit init_one(struct pci_dev *pdev,
3218 INIT_LIST_HEAD(&adapter->adapter_list); 3268 INIT_LIST_HEAD(&adapter->adapter_list);
3219 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); 3269 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3220 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); 3270 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3271
3272 INIT_WORK(&adapter->db_full_task, db_full_task);
3273 INIT_WORK(&adapter->db_empty_task, db_empty_task);
3274 INIT_WORK(&adapter->db_drop_task, db_drop_task);
3275
3221 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); 3276 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3222 3277
3223 for (i = 0; i < ai->nports0 + ai->nports1; ++i) { 3278 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index 670aa62042da..929c298115ca 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -73,7 +73,10 @@ enum {
73 OFFLOAD_STATUS_UP, 73 OFFLOAD_STATUS_UP,
74 OFFLOAD_STATUS_DOWN, 74 OFFLOAD_STATUS_DOWN,
75 OFFLOAD_PORT_DOWN, 75 OFFLOAD_PORT_DOWN,
76 OFFLOAD_PORT_UP 76 OFFLOAD_PORT_UP,
77 OFFLOAD_DB_FULL,
78 OFFLOAD_DB_EMPTY,
79 OFFLOAD_DB_DROP
77}; 80};
78 81
79struct cxgb3_client { 82struct cxgb3_client {
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index 1b5327b5a965..cb42353c9fdd 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -254,6 +254,22 @@
254#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR) 254#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR)
255#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U) 255#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U)
256 256
257#define S_HIPRIORITYDBFULL 7
258#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL)
259#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U)
260
261#define S_HIPRIORITYDBEMPTY 6
262#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY)
263#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U)
264
265#define S_LOPRIORITYDBFULL 5
266#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL)
267#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U)
268
269#define S_LOPRIORITYDBEMPTY 4
270#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY)
271#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U)
272
257#define S_RSPQDISABLED 3 273#define S_RSPQDISABLED 3
258#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED) 274#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
259#define F_RSPQDISABLED V_RSPQDISABLED(1U) 275#define F_RSPQDISABLED V_RSPQDISABLED(1U)
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 048205903741..78e265b484b6 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -42,6 +42,7 @@
42#include "sge_defs.h" 42#include "sge_defs.h"
43#include "t3_cpl.h" 43#include "t3_cpl.h"
44#include "firmware_exports.h" 44#include "firmware_exports.h"
45#include "cxgb3_offload.h"
45 46
46#define USE_GTS 0 47#define USE_GTS 0
47 48
@@ -2841,8 +2842,13 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
2841 } 2842 }
2842 2843
2843 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR)) 2844 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2844 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n", 2845 queue_work(cxgb3_wq, &adapter->db_drop_task);
2845 status & F_HIPIODRBDROPERR ? "high" : "lo"); 2846
2847 if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
2848 queue_work(cxgb3_wq, &adapter->db_full_task);
2849
2850 if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
2851 queue_work(cxgb3_wq, &adapter->db_empty_task);
2846 2852
2847 t3_write_reg(adapter, A_SG_INT_CAUSE, status); 2853 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2848 if (status & SGE_FATALERR) 2854 if (status & SGE_FATALERR)
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 3ab9f51918aa..95a8ba0759f1 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1433,7 +1433,10 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1433 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ 1433 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1434 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ 1434 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1435 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ 1435 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1436 F_HIRCQPARITYERROR) 1436 F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1437 F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1438 F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1439 F_LOPIODRBDROPERR)
1437#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \ 1440#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1438 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \ 1441 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1439 F_NFASRCHFAIL) 1442 F_NFASRCHFAIL)
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index ad113b0f62db..0950fa40684f 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2908,6 +2908,7 @@ enum parport_pc_pci_cards {
2908 netmos_9805, 2908 netmos_9805,
2909 netmos_9815, 2909 netmos_9815,
2910 netmos_9901, 2910 netmos_9901,
2911 netmos_9865,
2911 quatech_sppxp100, 2912 quatech_sppxp100,
2912}; 2913};
2913 2914
@@ -2989,6 +2990,7 @@ static struct parport_pc_pci {
2989 /* netmos_9805 */ { 1, { { 0, -1 }, } }, 2990 /* netmos_9805 */ { 1, { { 0, -1 }, } },
2990 /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, 2991 /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } },
2991 /* netmos_9901 */ { 1, { { 0, -1 }, } }, 2992 /* netmos_9901 */ { 1, { { 0, -1 }, } },
2993 /* netmos_9865 */ { 1, { { 0, -1 }, } },
2992 /* quatech_sppxp100 */ { 1, { { 0, 1 }, } }, 2994 /* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
2993}; 2995};
2994 2996
@@ -3092,6 +3094,10 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
3092 PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 }, 3094 PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 },
3093 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901, 3095 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
3094 0xA000, 0x2000, 0, 0, netmos_9901 }, 3096 0xA000, 0x2000, 0, 0, netmos_9901 },
3097 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
3098 0xA000, 0x1000, 0, 0, netmos_9865 },
3099 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
3100 0xA000, 0x2000, 0, 0, netmos_9865 },
3095 /* Quatech SPPXP-100 Parallel port PCI ExpressCard */ 3101 /* Quatech SPPXP-100 Parallel port PCI ExpressCard */
3096 { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100, 3102 { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
3097 PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 }, 3103 PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c
index ec73294d1fa6..e2dc289f767c 100644
--- a/drivers/pci/hotplug/ibmphp_res.c
+++ b/drivers/pci/hotplug/ibmphp_res.c
@@ -40,7 +40,7 @@ static void update_resources (struct bus_node *bus_cur, int type, int rangeno);
40static int once_over (void); 40static int once_over (void);
41static int remove_ranges (struct bus_node *, struct bus_node *); 41static int remove_ranges (struct bus_node *, struct bus_node *);
42static int update_bridge_ranges (struct bus_node **); 42static int update_bridge_ranges (struct bus_node **);
43static int add_range (int type, struct range_node *, struct bus_node *); 43static int add_bus_range (int type, struct range_node *, struct bus_node *);
44static void fix_resources (struct bus_node *); 44static void fix_resources (struct bus_node *);
45static struct bus_node *find_bus_wprev (u8, struct bus_node **, u8); 45static struct bus_node *find_bus_wprev (u8, struct bus_node **, u8);
46 46
@@ -133,7 +133,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node
133 newrange->rangeno = 1; 133 newrange->rangeno = 1;
134 else { 134 else {
135 /* need to insert our range */ 135 /* need to insert our range */
136 add_range (flag, newrange, newbus); 136 add_bus_range (flag, newrange, newbus);
137 debug ("%d resource Primary Bus inserted on bus %x [%x - %x]\n", flag, newbus->busno, newrange->start, newrange->end); 137 debug ("%d resource Primary Bus inserted on bus %x [%x - %x]\n", flag, newbus->busno, newrange->start, newrange->end);
138 } 138 }
139 139
@@ -384,7 +384,7 @@ int __init ibmphp_rsrc_init (void)
384 * Input: type of the resource, range to add, current bus 384 * Input: type of the resource, range to add, current bus
385 * Output: 0 or -1, bus and range ptrs 385 * Output: 0 or -1, bus and range ptrs
386 ********************************************************************************/ 386 ********************************************************************************/
387static int add_range (int type, struct range_node *range, struct bus_node *bus_cur) 387static int add_bus_range (int type, struct range_node *range, struct bus_node *bus_cur)
388{ 388{
389 struct range_node *range_cur = NULL; 389 struct range_node *range_cur = NULL;
390 struct range_node *range_prev; 390 struct range_node *range_prev;
@@ -455,7 +455,7 @@ static int add_range (int type, struct range_node *range, struct bus_node *bus_c
455 455
456/******************************************************************************* 456/*******************************************************************************
457 * This routine goes through the list of resources of type 'type' and updates 457 * This routine goes through the list of resources of type 'type' and updates
458 * the range numbers that they correspond to. It was called from add_range fnc 458 * the range numbers that they correspond to. It was called from add_bus_range fnc
459 * 459 *
460 * Input: bus, type of the resource, the rangeno starting from which to update 460 * Input: bus, type of the resource, the rangeno starting from which to update
461 ******************************************************************************/ 461 ******************************************************************************/
@@ -1999,7 +1999,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
1999 1999
2000 if (bus_sec->noIORanges > 0) { 2000 if (bus_sec->noIORanges > 0) {
2001 if (!range_exists_already (range, bus_sec, IO)) { 2001 if (!range_exists_already (range, bus_sec, IO)) {
2002 add_range (IO, range, bus_sec); 2002 add_bus_range (IO, range, bus_sec);
2003 ++bus_sec->noIORanges; 2003 ++bus_sec->noIORanges;
2004 } else { 2004 } else {
2005 kfree (range); 2005 kfree (range);
@@ -2048,7 +2048,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
2048 2048
2049 if (bus_sec->noMemRanges > 0) { 2049 if (bus_sec->noMemRanges > 0) {
2050 if (!range_exists_already (range, bus_sec, MEM)) { 2050 if (!range_exists_already (range, bus_sec, MEM)) {
2051 add_range (MEM, range, bus_sec); 2051 add_bus_range (MEM, range, bus_sec);
2052 ++bus_sec->noMemRanges; 2052 ++bus_sec->noMemRanges;
2053 } else { 2053 } else {
2054 kfree (range); 2054 kfree (range);
@@ -2102,7 +2102,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
2102 2102
2103 if (bus_sec->noPFMemRanges > 0) { 2103 if (bus_sec->noPFMemRanges > 0) {
2104 if (!range_exists_already (range, bus_sec, PFMEM)) { 2104 if (!range_exists_already (range, bus_sec, PFMEM)) {
2105 add_range (PFMEM, range, bus_sec); 2105 add_bus_range (PFMEM, range, bus_sec);
2106 ++bus_sec->noPFMemRanges; 2106 ++bus_sec->noPFMemRanges;
2107 } else { 2107 } else {
2108 kfree (range); 2108 kfree (range);
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 0a6601c76809..d189e4743e69 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -51,17 +51,23 @@ config PCMCIA_LOAD_CIS
51 51
52config PCMCIA_IOCTL 52config PCMCIA_IOCTL
53 bool "PCMCIA control ioctl (obsolete)" 53 bool "PCMCIA control ioctl (obsolete)"
54 depends on PCMCIA 54 depends on PCMCIA && ARM && !SMP && !PREEMPT
55 default y 55 default y
56 help 56 help
57 If you say Y here, the deprecated ioctl interface to the PCMCIA 57 If you say Y here, the deprecated ioctl interface to the PCMCIA
58 subsystem will be built. It is needed by cardmgr and cardctl 58 subsystem will be built. It is needed by the deprecated pcmcia-cs
59 (pcmcia-cs) to function properly. 59 tools (cardmgr, cardctl) to function properly.
60 60
61 You should use the new pcmciautils package instead (see 61 You should use the new pcmciautils package instead (see
62 <file:Documentation/Changes> for location and details). 62 <file:Documentation/Changes> for location and details).
63 63
64 If unsure, say Y. 64 This config option will most likely be removed from kernel 2.6.35,
65 the associated code from kernel 2.6.36.
66
67 As the PCMCIA ioctl is not locking safe, it depends on !SMP and
68 !PREEMPT.
69
70 If unsure, say N.
65 71
66config CARDBUS 72config CARDBUS
67 bool "32-bit CardBus support" 73 bool "32-bit CardBus support"
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index ac0686efbf75..e6ab2a47d8cb 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -71,7 +71,7 @@ int __ref cb_alloc(struct pcmcia_socket *s)
71 unsigned int max, pass; 71 unsigned int max, pass;
72 72
73 s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0)); 73 s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0));
74 pci_fixup_cardbus(bus); 74 pci_fixup_cardbus(bus);
75 75
76 max = bus->secondary; 76 max = bus->secondary;
77 for (pass = 0; pass < 2; pass++) 77 for (pass = 0; pass < 2; pass++)
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 2f3622dd4b69..f230f6543bff 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -54,46 +54,44 @@ static const u_int exponent[] = {
54/* Upper limit on reasonable # of tuples */ 54/* Upper limit on reasonable # of tuples */
55#define MAX_TUPLES 200 55#define MAX_TUPLES 200
56 56
57/*====================================================================*/
58
59/* Parameters that can be set with 'insmod' */
60
61/* 16-bit CIS? */ 57/* 16-bit CIS? */
62static int cis_width; 58static int cis_width;
63module_param(cis_width, int, 0444); 59module_param(cis_width, int, 0444);
64 60
65void release_cis_mem(struct pcmcia_socket *s) 61void release_cis_mem(struct pcmcia_socket *s)
66{ 62{
67 mutex_lock(&s->ops_mutex); 63 mutex_lock(&s->ops_mutex);
68 if (s->cis_mem.flags & MAP_ACTIVE) { 64 if (s->cis_mem.flags & MAP_ACTIVE) {
69 s->cis_mem.flags &= ~MAP_ACTIVE; 65 s->cis_mem.flags &= ~MAP_ACTIVE;
70 s->ops->set_mem_map(s, &s->cis_mem); 66 s->ops->set_mem_map(s, &s->cis_mem);
71 if (s->cis_mem.res) { 67 if (s->cis_mem.res) {
72 release_resource(s->cis_mem.res); 68 release_resource(s->cis_mem.res);
73 kfree(s->cis_mem.res); 69 kfree(s->cis_mem.res);
74 s->cis_mem.res = NULL; 70 s->cis_mem.res = NULL;
71 }
72 iounmap(s->cis_virt);
73 s->cis_virt = NULL;
75 } 74 }
76 iounmap(s->cis_virt); 75 mutex_unlock(&s->ops_mutex);
77 s->cis_virt = NULL;
78 }
79 mutex_unlock(&s->ops_mutex);
80} 76}
81 77
82/* 78/**
83 * Map the card memory at "card_offset" into virtual space. 79 * set_cis_map() - map the card memory at "card_offset" into virtual space.
80 *
84 * If flags & MAP_ATTRIB, map the attribute space, otherwise 81 * If flags & MAP_ATTRIB, map the attribute space, otherwise
85 * map the memory space. 82 * map the memory space.
86 * 83 *
87 * Must be called with ops_mutex held. 84 * Must be called with ops_mutex held.
88 */ 85 */
89static void __iomem * 86static void __iomem *set_cis_map(struct pcmcia_socket *s,
90set_cis_map(struct pcmcia_socket *s, unsigned int card_offset, unsigned int flags) 87 unsigned int card_offset, unsigned int flags)
91{ 88{
92 pccard_mem_map *mem = &s->cis_mem; 89 pccard_mem_map *mem = &s->cis_mem;
93 int ret; 90 int ret;
94 91
95 if (!(s->features & SS_CAP_STATIC_MAP) && (mem->res == NULL)) { 92 if (!(s->features & SS_CAP_STATIC_MAP) && (mem->res == NULL)) {
96 mem->res = pcmcia_find_mem_region(0, s->map_size, s->map_size, 0, s); 93 mem->res = pcmcia_find_mem_region(0, s->map_size,
94 s->map_size, 0, s);
97 if (mem->res == NULL) { 95 if (mem->res == NULL) {
98 dev_printk(KERN_NOTICE, &s->dev, 96 dev_printk(KERN_NOTICE, &s->dev,
99 "cs: unable to map card memory!\n"); 97 "cs: unable to map card memory!\n");
@@ -124,165 +122,170 @@ set_cis_map(struct pcmcia_socket *s, unsigned int card_offset, unsigned int flag
124 return s->cis_virt; 122 return s->cis_virt;
125} 123}
126 124
127/*======================================================================
128
129 Low-level functions to read and write CIS memory. I think the
130 write routine is only useful for writing one-byte registers.
131
132======================================================================*/
133 125
134/* Bits in attr field */ 126/* Bits in attr field */
135#define IS_ATTR 1 127#define IS_ATTR 1
136#define IS_INDIRECT 8 128#define IS_INDIRECT 8
137 129
130/**
131 * pcmcia_read_cis_mem() - low-level function to read CIS memory
132 */
138int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr, 133int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
139 u_int len, void *ptr) 134 u_int len, void *ptr)
140{ 135{
141 void __iomem *sys, *end; 136 void __iomem *sys, *end;
142 unsigned char *buf = ptr; 137 unsigned char *buf = ptr;
143
144 dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len);
145
146 mutex_lock(&s->ops_mutex);
147 if (attr & IS_INDIRECT) {
148 /* Indirect accesses use a bunch of special registers at fixed
149 locations in common memory */
150 u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN;
151 if (attr & IS_ATTR) {
152 addr *= 2;
153 flags = ICTRL0_AUTOINC;
154 }
155 138
156 sys = set_cis_map(s, 0, MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0)); 139 dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len);
157 if (!sys) {
158 dev_dbg(&s->dev, "could not map memory\n");
159 memset(ptr, 0xff, len);
160 mutex_unlock(&s->ops_mutex);
161 return -1;
162 }
163 140
164 writeb(flags, sys+CISREG_ICTRL0); 141 mutex_lock(&s->ops_mutex);
165 writeb(addr & 0xff, sys+CISREG_IADDR0); 142 if (attr & IS_INDIRECT) {
166 writeb((addr>>8) & 0xff, sys+CISREG_IADDR1); 143 /* Indirect accesses use a bunch of special registers at fixed
167 writeb((addr>>16) & 0xff, sys+CISREG_IADDR2); 144 locations in common memory */
168 writeb((addr>>24) & 0xff, sys+CISREG_IADDR3); 145 u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN;
169 for ( ; len > 0; len--, buf++) 146 if (attr & IS_ATTR) {
170 *buf = readb(sys+CISREG_IDATA0); 147 addr *= 2;
171 } else { 148 flags = ICTRL0_AUTOINC;
172 u_int inc = 1, card_offset, flags; 149 }
173
174 if (addr > CISTPL_MAX_CIS_SIZE)
175 dev_dbg(&s->dev, "attempt to read CIS mem at addr %#x", addr);
176
177 flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
178 if (attr) {
179 flags |= MAP_ATTRIB;
180 inc++;
181 addr *= 2;
182 }
183 150
184 card_offset = addr & ~(s->map_size-1); 151 sys = set_cis_map(s, 0, MAP_ACTIVE |
185 while (len) { 152 ((cis_width) ? MAP_16BIT : 0));
186 sys = set_cis_map(s, card_offset, flags); 153 if (!sys) {
187 if (!sys) { 154 dev_dbg(&s->dev, "could not map memory\n");
188 dev_dbg(&s->dev, "could not map memory\n"); 155 memset(ptr, 0xff, len);
189 memset(ptr, 0xff, len); 156 mutex_unlock(&s->ops_mutex);
190 mutex_unlock(&s->ops_mutex); 157 return -1;
191 return -1; 158 }
192 } 159
193 end = sys + s->map_size; 160 writeb(flags, sys+CISREG_ICTRL0);
194 sys = sys + (addr & (s->map_size-1)); 161 writeb(addr & 0xff, sys+CISREG_IADDR0);
195 for ( ; len > 0; len--, buf++, sys += inc) { 162 writeb((addr>>8) & 0xff, sys+CISREG_IADDR1);
196 if (sys == end) 163 writeb((addr>>16) & 0xff, sys+CISREG_IADDR2);
197 break; 164 writeb((addr>>24) & 0xff, sys+CISREG_IADDR3);
198 *buf = readb(sys); 165 for ( ; len > 0; len--, buf++)
199 } 166 *buf = readb(sys+CISREG_IDATA0);
200 card_offset += s->map_size; 167 } else {
201 addr = 0; 168 u_int inc = 1, card_offset, flags;
169
170 if (addr > CISTPL_MAX_CIS_SIZE)
171 dev_dbg(&s->dev,
172 "attempt to read CIS mem at addr %#x", addr);
173
174 flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
175 if (attr) {
176 flags |= MAP_ATTRIB;
177 inc++;
178 addr *= 2;
179 }
180
181 card_offset = addr & ~(s->map_size-1);
182 while (len) {
183 sys = set_cis_map(s, card_offset, flags);
184 if (!sys) {
185 dev_dbg(&s->dev, "could not map memory\n");
186 memset(ptr, 0xff, len);
187 mutex_unlock(&s->ops_mutex);
188 return -1;
189 }
190 end = sys + s->map_size;
191 sys = sys + (addr & (s->map_size-1));
192 for ( ; len > 0; len--, buf++, sys += inc) {
193 if (sys == end)
194 break;
195 *buf = readb(sys);
196 }
197 card_offset += s->map_size;
198 addr = 0;
199 }
202 } 200 }
203 } 201 mutex_unlock(&s->ops_mutex);
204 mutex_unlock(&s->ops_mutex); 202 dev_dbg(&s->dev, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n",
205 dev_dbg(&s->dev, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n", 203 *(u_char *)(ptr+0), *(u_char *)(ptr+1),
206 *(u_char *)(ptr+0), *(u_char *)(ptr+1), 204 *(u_char *)(ptr+2), *(u_char *)(ptr+3));
207 *(u_char *)(ptr+2), *(u_char *)(ptr+3)); 205 return 0;
208 return 0;
209} 206}
210 207
211 208
209/**
210 * pcmcia_write_cis_mem() - low-level function to write CIS memory
211 *
212 * Probably only useful for writing one-byte registers.
213 */
212void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr, 214void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
213 u_int len, void *ptr) 215 u_int len, void *ptr)
214{ 216{
215 void __iomem *sys, *end; 217 void __iomem *sys, *end;
216 unsigned char *buf = ptr; 218 unsigned char *buf = ptr;
217
218 dev_dbg(&s->dev, "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len);
219
220 mutex_lock(&s->ops_mutex);
221 if (attr & IS_INDIRECT) {
222 /* Indirect accesses use a bunch of special registers at fixed
223 locations in common memory */
224 u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN;
225 if (attr & IS_ATTR) {
226 addr *= 2;
227 flags = ICTRL0_AUTOINC;
228 }
229 219
230 sys = set_cis_map(s, 0, MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0)); 220 dev_dbg(&s->dev,
231 if (!sys) { 221 "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len);
232 dev_dbg(&s->dev, "could not map memory\n");
233 mutex_unlock(&s->ops_mutex);
234 return; /* FIXME: Error */
235 }
236 222
237 writeb(flags, sys+CISREG_ICTRL0); 223 mutex_lock(&s->ops_mutex);
238 writeb(addr & 0xff, sys+CISREG_IADDR0); 224 if (attr & IS_INDIRECT) {
239 writeb((addr>>8) & 0xff, sys+CISREG_IADDR1); 225 /* Indirect accesses use a bunch of special registers at fixed
240 writeb((addr>>16) & 0xff, sys+CISREG_IADDR2); 226 locations in common memory */
241 writeb((addr>>24) & 0xff, sys+CISREG_IADDR3); 227 u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN;
242 for ( ; len > 0; len--, buf++) 228 if (attr & IS_ATTR) {
243 writeb(*buf, sys+CISREG_IDATA0); 229 addr *= 2;
244 } else { 230 flags = ICTRL0_AUTOINC;
245 u_int inc = 1, card_offset, flags; 231 }
246
247 flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
248 if (attr & IS_ATTR) {
249 flags |= MAP_ATTRIB;
250 inc++;
251 addr *= 2;
252 }
253 232
254 card_offset = addr & ~(s->map_size-1); 233 sys = set_cis_map(s, 0, MAP_ACTIVE |
255 while (len) { 234 ((cis_width) ? MAP_16BIT : 0));
256 sys = set_cis_map(s, card_offset, flags); 235 if (!sys) {
257 if (!sys) { 236 dev_dbg(&s->dev, "could not map memory\n");
258 dev_dbg(&s->dev, "could not map memory\n"); 237 mutex_unlock(&s->ops_mutex);
259 mutex_unlock(&s->ops_mutex); 238 return; /* FIXME: Error */
260 return; /* FIXME: error */ 239 }
261 } 240
262 241 writeb(flags, sys+CISREG_ICTRL0);
263 end = sys + s->map_size; 242 writeb(addr & 0xff, sys+CISREG_IADDR0);
264 sys = sys + (addr & (s->map_size-1)); 243 writeb((addr>>8) & 0xff, sys+CISREG_IADDR1);
265 for ( ; len > 0; len--, buf++, sys += inc) { 244 writeb((addr>>16) & 0xff, sys+CISREG_IADDR2);
266 if (sys == end) 245 writeb((addr>>24) & 0xff, sys+CISREG_IADDR3);
267 break; 246 for ( ; len > 0; len--, buf++)
268 writeb(*buf, sys); 247 writeb(*buf, sys+CISREG_IDATA0);
269 } 248 } else {
270 card_offset += s->map_size; 249 u_int inc = 1, card_offset, flags;
271 addr = 0;
272 }
273 }
274 mutex_unlock(&s->ops_mutex);
275}
276 250
251 flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
252 if (attr & IS_ATTR) {
253 flags |= MAP_ATTRIB;
254 inc++;
255 addr *= 2;
256 }
277 257
278/*====================================================================== 258 card_offset = addr & ~(s->map_size-1);
259 while (len) {
260 sys = set_cis_map(s, card_offset, flags);
261 if (!sys) {
262 dev_dbg(&s->dev, "could not map memory\n");
263 mutex_unlock(&s->ops_mutex);
264 return; /* FIXME: error */
265 }
279 266
280 This is a wrapper around read_cis_mem, with the same interface, 267 end = sys + s->map_size;
281 but which caches information, for cards whose CIS may not be 268 sys = sys + (addr & (s->map_size-1));
282 readable all the time. 269 for ( ; len > 0; len--, buf++, sys += inc) {
270 if (sys == end)
271 break;
272 writeb(*buf, sys);
273 }
274 card_offset += s->map_size;
275 addr = 0;
276 }
277 }
278 mutex_unlock(&s->ops_mutex);
279}
283 280
284======================================================================*/
285 281
282/**
283 * read_cis_cache() - read CIS memory or its associated cache
284 *
285 * This is a wrapper around read_cis_mem, with the same interface,
286 * but which caches information, for cards whose CIS may not be
287 * readable all the time.
288 */
286static int read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr, 289static int read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr,
287 size_t len, void *ptr) 290 size_t len, void *ptr)
288{ 291{
@@ -353,7 +356,6 @@ remove_cis_cache(struct pcmcia_socket *s, int attr, u_int addr, u_int len)
353 * This destroys the CIS cache but keeps any fake CIS alive. Must be 356 * This destroys the CIS cache but keeps any fake CIS alive. Must be
354 * called with ops_mutex held. 357 * called with ops_mutex held.
355 */ 358 */
356
357void destroy_cis_cache(struct pcmcia_socket *s) 359void destroy_cis_cache(struct pcmcia_socket *s)
358{ 360{
359 struct list_head *l, *n; 361 struct list_head *l, *n;
@@ -366,13 +368,9 @@ void destroy_cis_cache(struct pcmcia_socket *s)
366 } 368 }
367} 369}
368 370
369/*====================================================================== 371/**
370 372 * verify_cis_cache() - does the CIS match what is in the CIS cache?
371 This verifies if the CIS of a card matches what is in the CIS 373 */
372 cache.
373
374======================================================================*/
375
376int verify_cis_cache(struct pcmcia_socket *s) 374int verify_cis_cache(struct pcmcia_socket *s)
377{ 375{
378 struct cis_cache_entry *cis; 376 struct cis_cache_entry *cis;
@@ -404,13 +402,12 @@ int verify_cis_cache(struct pcmcia_socket *s)
404 return 0; 402 return 0;
405} 403}
406 404
407/*====================================================================== 405/**
408 406 * pcmcia_replace_cis() - use a replacement CIS instead of the card's CIS
409 For really bad cards, we provide a facility for uploading a 407 *
410 replacement CIS. 408 * For really bad cards, we provide a facility for uploading a
411 409 * replacement CIS.
412======================================================================*/ 410 */
413
414int pcmcia_replace_cis(struct pcmcia_socket *s, 411int pcmcia_replace_cis(struct pcmcia_socket *s,
415 const u8 *data, const size_t len) 412 const u8 *data, const size_t len)
416{ 413{
@@ -433,17 +430,13 @@ int pcmcia_replace_cis(struct pcmcia_socket *s,
433 return 0; 430 return 0;
434} 431}
435 432
436/*====================================================================== 433/* The high-level CIS tuple services */
437
438 The high-level CIS tuple services
439
440======================================================================*/
441 434
442typedef struct tuple_flags { 435typedef struct tuple_flags {
443 u_int link_space:4; 436 u_int link_space:4;
444 u_int has_link:1; 437 u_int has_link:1;
445 u_int mfc_fn:3; 438 u_int mfc_fn:3;
446 u_int space:4; 439 u_int space:4;
447} tuple_flags; 440} tuple_flags;
448 441
449#define LINK_SPACE(f) (((tuple_flags *)(&(f)))->link_space) 442#define LINK_SPACE(f) (((tuple_flags *)(&(f)))->link_space)
@@ -451,982 +444,961 @@ typedef struct tuple_flags {
451#define MFC_FN(f) (((tuple_flags *)(&(f)))->mfc_fn) 444#define MFC_FN(f) (((tuple_flags *)(&(f)))->mfc_fn)
452#define SPACE(f) (((tuple_flags *)(&(f)))->space) 445#define SPACE(f) (((tuple_flags *)(&(f)))->space)
453 446
454int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function, tuple_t *tuple) 447int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function,
448 tuple_t *tuple)
455{ 449{
456 if (!s) 450 if (!s)
457 return -EINVAL; 451 return -EINVAL;
458 452
459 if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS)) 453 if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS))
460 return -ENODEV; 454 return -ENODEV;
461 tuple->TupleLink = tuple->Flags = 0; 455 tuple->TupleLink = tuple->Flags = 0;
462 456
463 /* Assume presence of a LONGLINK_C to address 0 */ 457 /* Assume presence of a LONGLINK_C to address 0 */
464 tuple->CISOffset = tuple->LinkOffset = 0; 458 tuple->CISOffset = tuple->LinkOffset = 0;
465 SPACE(tuple->Flags) = HAS_LINK(tuple->Flags) = 1; 459 SPACE(tuple->Flags) = HAS_LINK(tuple->Flags) = 1;
466 460
467 if ((s->functions > 1) && !(tuple->Attributes & TUPLE_RETURN_COMMON)) { 461 if ((s->functions > 1) && !(tuple->Attributes & TUPLE_RETURN_COMMON)) {
468 cisdata_t req = tuple->DesiredTuple; 462 cisdata_t req = tuple->DesiredTuple;
469 tuple->DesiredTuple = CISTPL_LONGLINK_MFC; 463 tuple->DesiredTuple = CISTPL_LONGLINK_MFC;
470 if (pccard_get_next_tuple(s, function, tuple) == 0) { 464 if (pccard_get_next_tuple(s, function, tuple) == 0) {
471 tuple->DesiredTuple = CISTPL_LINKTARGET; 465 tuple->DesiredTuple = CISTPL_LINKTARGET;
472 if (pccard_get_next_tuple(s, function, tuple) != 0) 466 if (pccard_get_next_tuple(s, function, tuple) != 0)
473 return -ENOSPC; 467 return -ENOSPC;
474 } else 468 } else
475 tuple->CISOffset = tuple->TupleLink = 0; 469 tuple->CISOffset = tuple->TupleLink = 0;
476 tuple->DesiredTuple = req; 470 tuple->DesiredTuple = req;
477 } 471 }
478 return pccard_get_next_tuple(s, function, tuple); 472 return pccard_get_next_tuple(s, function, tuple);
479} 473}
480 474
481static int follow_link(struct pcmcia_socket *s, tuple_t *tuple) 475static int follow_link(struct pcmcia_socket *s, tuple_t *tuple)
482{ 476{
483 u_char link[5]; 477 u_char link[5];
484 u_int ofs; 478 u_int ofs;
485 int ret; 479 int ret;
486 480
487 if (MFC_FN(tuple->Flags)) { 481 if (MFC_FN(tuple->Flags)) {
488 /* Get indirect link from the MFC tuple */ 482 /* Get indirect link from the MFC tuple */
489 ret = read_cis_cache(s, LINK_SPACE(tuple->Flags), 483 ret = read_cis_cache(s, LINK_SPACE(tuple->Flags),
490 tuple->LinkOffset, 5, link); 484 tuple->LinkOffset, 5, link);
491 if (ret) 485 if (ret)
486 return -1;
487 ofs = get_unaligned_le32(link + 1);
488 SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR);
489 /* Move to the next indirect link */
490 tuple->LinkOffset += 5;
491 MFC_FN(tuple->Flags)--;
492 } else if (HAS_LINK(tuple->Flags)) {
493 ofs = tuple->LinkOffset;
494 SPACE(tuple->Flags) = LINK_SPACE(tuple->Flags);
495 HAS_LINK(tuple->Flags) = 0;
496 } else
492 return -1; 497 return -1;
493 ofs = get_unaligned_le32(link + 1); 498
494 SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR); 499 if (SPACE(tuple->Flags)) {
495 /* Move to the next indirect link */ 500 /* This is ugly, but a common CIS error is to code the long
496 tuple->LinkOffset += 5; 501 link offset incorrectly, so we check the right spot... */
497 MFC_FN(tuple->Flags)--; 502 ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link);
498 } else if (HAS_LINK(tuple->Flags)) { 503 if (ret)
499 ofs = tuple->LinkOffset; 504 return -1;
500 SPACE(tuple->Flags) = LINK_SPACE(tuple->Flags); 505 if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) &&
501 HAS_LINK(tuple->Flags) = 0; 506 (strncmp(link+2, "CIS", 3) == 0))
502 } else { 507 return ofs;
503 return -1; 508 remove_cis_cache(s, SPACE(tuple->Flags), ofs, 5);
504 } 509 /* Then, we try the wrong spot... */
505 if (SPACE(tuple->Flags)) { 510 ofs = ofs >> 1;
506 /* This is ugly, but a common CIS error is to code the long 511 }
507 link offset incorrectly, so we check the right spot... */
508 ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link); 512 ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link);
509 if (ret) 513 if (ret)
510 return -1; 514 return -1;
511 if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) && 515 if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) &&
512 (strncmp(link+2, "CIS", 3) == 0)) 516 (strncmp(link+2, "CIS", 3) == 0))
513 return ofs; 517 return ofs;
514 remove_cis_cache(s, SPACE(tuple->Flags), ofs, 5); 518 remove_cis_cache(s, SPACE(tuple->Flags), ofs, 5);
515 /* Then, we try the wrong spot... */ 519 return -1;
516 ofs = ofs >> 1;
517 }
518 ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link);
519 if (ret)
520 return -1;
521 if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) &&
522 (strncmp(link+2, "CIS", 3) == 0))
523 return ofs;
524 remove_cis_cache(s, SPACE(tuple->Flags), ofs, 5);
525 return -1;
526} 520}
527 521
528int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_t *tuple) 522int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function,
523 tuple_t *tuple)
529{ 524{
530 u_char link[2], tmp; 525 u_char link[2], tmp;
531 int ofs, i, attr; 526 int ofs, i, attr;
532 int ret; 527 int ret;
533
534 if (!s)
535 return -EINVAL;
536 if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS))
537 return -ENODEV;
538
539 link[1] = tuple->TupleLink;
540 ofs = tuple->CISOffset + tuple->TupleLink;
541 attr = SPACE(tuple->Flags);
542
543 for (i = 0; i < MAX_TUPLES; i++) {
544 if (link[1] == 0xff) {
545 link[0] = CISTPL_END;
546 } else {
547 ret = read_cis_cache(s, attr, ofs, 2, link);
548 if (ret)
549 return -1;
550 if (link[0] == CISTPL_NULL) {
551 ofs++; continue;
552 }
553 }
554 528
555 /* End of chain? Follow long link if possible */ 529 if (!s)
556 if (link[0] == CISTPL_END) { 530 return -EINVAL;
557 ofs = follow_link(s, tuple); 531 if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS))
558 if (ofs < 0) 532 return -ENODEV;
559 return -ENOSPC;
560 attr = SPACE(tuple->Flags);
561 ret = read_cis_cache(s, attr, ofs, 2, link);
562 if (ret)
563 return -1;
564 }
565 533
566 /* Is this a link tuple? Make a note of it */ 534 link[1] = tuple->TupleLink;
567 if ((link[0] == CISTPL_LONGLINK_A) || 535 ofs = tuple->CISOffset + tuple->TupleLink;
568 (link[0] == CISTPL_LONGLINK_C) || 536 attr = SPACE(tuple->Flags);
569 (link[0] == CISTPL_LONGLINK_MFC) || 537
570 (link[0] == CISTPL_LINKTARGET) || 538 for (i = 0; i < MAX_TUPLES; i++) {
571 (link[0] == CISTPL_INDIRECT) || 539 if (link[1] == 0xff)
572 (link[0] == CISTPL_NO_LINK)) { 540 link[0] = CISTPL_END;
573 switch (link[0]) { 541 else {
574 case CISTPL_LONGLINK_A: 542 ret = read_cis_cache(s, attr, ofs, 2, link);
575 HAS_LINK(tuple->Flags) = 1; 543 if (ret)
576 LINK_SPACE(tuple->Flags) = attr | IS_ATTR; 544 return -1;
577 ret = read_cis_cache(s, attr, ofs+2, 4, &tuple->LinkOffset); 545 if (link[0] == CISTPL_NULL) {
578 if (ret) 546 ofs++;
579 return -1; 547 continue;
580 break; 548 }
581 case CISTPL_LONGLINK_C:
582 HAS_LINK(tuple->Flags) = 1;
583 LINK_SPACE(tuple->Flags) = attr & ~IS_ATTR;
584 ret = read_cis_cache(s, attr, ofs+2, 4, &tuple->LinkOffset);
585 if (ret)
586 return -1;
587 break;
588 case CISTPL_INDIRECT:
589 HAS_LINK(tuple->Flags) = 1;
590 LINK_SPACE(tuple->Flags) = IS_ATTR | IS_INDIRECT;
591 tuple->LinkOffset = 0;
592 break;
593 case CISTPL_LONGLINK_MFC:
594 tuple->LinkOffset = ofs + 3;
595 LINK_SPACE(tuple->Flags) = attr;
596 if (function == BIND_FN_ALL) {
597 /* Follow all the MFC links */
598 ret = read_cis_cache(s, attr, ofs+2, 1, &tmp);
599 if (ret)
600 return -1;
601 MFC_FN(tuple->Flags) = tmp;
602 } else {
603 /* Follow exactly one of the links */
604 MFC_FN(tuple->Flags) = 1;
605 tuple->LinkOffset += function * 5;
606 } 549 }
607 break;
608 case CISTPL_NO_LINK:
609 HAS_LINK(tuple->Flags) = 0;
610 break;
611 }
612 if ((tuple->Attributes & TUPLE_RETURN_LINK) &&
613 (tuple->DesiredTuple == RETURN_FIRST_TUPLE))
614 break;
615 } else
616 if (tuple->DesiredTuple == RETURN_FIRST_TUPLE)
617 break;
618 550
619 if (link[0] == tuple->DesiredTuple) 551 /* End of chain? Follow long link if possible */
620 break; 552 if (link[0] == CISTPL_END) {
621 ofs += link[1] + 2; 553 ofs = follow_link(s, tuple);
622 } 554 if (ofs < 0)
623 if (i == MAX_TUPLES) { 555 return -ENOSPC;
624 dev_dbg(&s->dev, "cs: overrun in pcmcia_get_next_tuple\n"); 556 attr = SPACE(tuple->Flags);
625 return -ENOSPC; 557 ret = read_cis_cache(s, attr, ofs, 2, link);
626 } 558 if (ret)
627 559 return -1;
628 tuple->TupleCode = link[0]; 560 }
629 tuple->TupleLink = link[1];
630 tuple->CISOffset = ofs + 2;
631 return 0;
632}
633 561
634/*====================================================================*/ 562 /* Is this a link tuple? Make a note of it */
563 if ((link[0] == CISTPL_LONGLINK_A) ||
564 (link[0] == CISTPL_LONGLINK_C) ||
565 (link[0] == CISTPL_LONGLINK_MFC) ||
566 (link[0] == CISTPL_LINKTARGET) ||
567 (link[0] == CISTPL_INDIRECT) ||
568 (link[0] == CISTPL_NO_LINK)) {
569 switch (link[0]) {
570 case CISTPL_LONGLINK_A:
571 HAS_LINK(tuple->Flags) = 1;
572 LINK_SPACE(tuple->Flags) = attr | IS_ATTR;
573 ret = read_cis_cache(s, attr, ofs+2, 4,
574 &tuple->LinkOffset);
575 if (ret)
576 return -1;
577 break;
578 case CISTPL_LONGLINK_C:
579 HAS_LINK(tuple->Flags) = 1;
580 LINK_SPACE(tuple->Flags) = attr & ~IS_ATTR;
581 ret = read_cis_cache(s, attr, ofs+2, 4,
582 &tuple->LinkOffset);
583 if (ret)
584 return -1;
585 break;
586 case CISTPL_INDIRECT:
587 HAS_LINK(tuple->Flags) = 1;
588 LINK_SPACE(tuple->Flags) = IS_ATTR |
589 IS_INDIRECT;
590 tuple->LinkOffset = 0;
591 break;
592 case CISTPL_LONGLINK_MFC:
593 tuple->LinkOffset = ofs + 3;
594 LINK_SPACE(tuple->Flags) = attr;
595 if (function == BIND_FN_ALL) {
596 /* Follow all the MFC links */
597 ret = read_cis_cache(s, attr, ofs+2,
598 1, &tmp);
599 if (ret)
600 return -1;
601 MFC_FN(tuple->Flags) = tmp;
602 } else {
603 /* Follow exactly one of the links */
604 MFC_FN(tuple->Flags) = 1;
605 tuple->LinkOffset += function * 5;
606 }
607 break;
608 case CISTPL_NO_LINK:
609 HAS_LINK(tuple->Flags) = 0;
610 break;
611 }
612 if ((tuple->Attributes & TUPLE_RETURN_LINK) &&
613 (tuple->DesiredTuple == RETURN_FIRST_TUPLE))
614 break;
615 } else
616 if (tuple->DesiredTuple == RETURN_FIRST_TUPLE)
617 break;
618
619 if (link[0] == tuple->DesiredTuple)
620 break;
621 ofs += link[1] + 2;
622 }
623 if (i == MAX_TUPLES) {
624 dev_dbg(&s->dev, "cs: overrun in pcmcia_get_next_tuple\n");
625 return -ENOSPC;
626 }
635 627
636#define _MIN(a, b) (((a) < (b)) ? (a) : (b)) 628 tuple->TupleCode = link[0];
629 tuple->TupleLink = link[1];
630 tuple->CISOffset = ofs + 2;
631 return 0;
632}
637 633
638int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple) 634int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple)
639{ 635{
640 u_int len; 636 u_int len;
641 int ret; 637 int ret;
642 638
643 if (!s) 639 if (!s)
644 return -EINVAL; 640 return -EINVAL;
645 641
646 if (tuple->TupleLink < tuple->TupleOffset) 642 if (tuple->TupleLink < tuple->TupleOffset)
647 return -ENOSPC; 643 return -ENOSPC;
648 len = tuple->TupleLink - tuple->TupleOffset; 644 len = tuple->TupleLink - tuple->TupleOffset;
649 tuple->TupleDataLen = tuple->TupleLink; 645 tuple->TupleDataLen = tuple->TupleLink;
650 if (len == 0) 646 if (len == 0)
647 return 0;
648 ret = read_cis_cache(s, SPACE(tuple->Flags),
649 tuple->CISOffset + tuple->TupleOffset,
650 min(len, (u_int) tuple->TupleDataMax),
651 tuple->TupleData);
652 if (ret)
653 return -1;
651 return 0; 654 return 0;
652 ret = read_cis_cache(s, SPACE(tuple->Flags),
653 tuple->CISOffset + tuple->TupleOffset,
654 _MIN(len, tuple->TupleDataMax), tuple->TupleData);
655 if (ret)
656 return -1;
657 return 0;
658} 655}
659 656
660 657
661/*====================================================================== 658/* Parsing routines for individual tuples */
662
663 Parsing routines for individual tuples
664
665======================================================================*/
666 659
667static int parse_device(tuple_t *tuple, cistpl_device_t *device) 660static int parse_device(tuple_t *tuple, cistpl_device_t *device)
668{ 661{
669 int i; 662 int i;
670 u_char scale; 663 u_char scale;
671 u_char *p, *q; 664 u_char *p, *q;
672 665
673 p = (u_char *)tuple->TupleData; 666 p = (u_char *)tuple->TupleData;
674 q = p + tuple->TupleDataLen; 667 q = p + tuple->TupleDataLen;
675 668
676 device->ndev = 0; 669 device->ndev = 0;
677 for (i = 0; i < CISTPL_MAX_DEVICES; i++) { 670 for (i = 0; i < CISTPL_MAX_DEVICES; i++) {
678 671
679 if (*p == 0xff) 672 if (*p == 0xff)
680 break; 673 break;
681 device->dev[i].type = (*p >> 4); 674 device->dev[i].type = (*p >> 4);
682 device->dev[i].wp = (*p & 0x08) ? 1 : 0; 675 device->dev[i].wp = (*p & 0x08) ? 1 : 0;
683 switch (*p & 0x07) { 676 switch (*p & 0x07) {
684 case 0: 677 case 0:
685 device->dev[i].speed = 0; 678 device->dev[i].speed = 0;
686 break; 679 break;
687 case 1: 680 case 1:
688 device->dev[i].speed = 250; 681 device->dev[i].speed = 250;
689 break; 682 break;
690 case 2: 683 case 2:
691 device->dev[i].speed = 200; 684 device->dev[i].speed = 200;
692 break; 685 break;
693 case 3: 686 case 3:
694 device->dev[i].speed = 150; 687 device->dev[i].speed = 150;
695 break; 688 break;
696 case 4: 689 case 4:
697 device->dev[i].speed = 100; 690 device->dev[i].speed = 100;
698 break; 691 break;
699 case 7: 692 case 7:
700 if (++p == q)
701 return -EINVAL;
702 device->dev[i].speed = SPEED_CVT(*p);
703 while (*p & 0x80)
704 if (++p == q) 693 if (++p == q)
705 return -EINVAL; 694 return -EINVAL;
706 break; 695 device->dev[i].speed = SPEED_CVT(*p);
707 default: 696 while (*p & 0x80)
708 return -EINVAL; 697 if (++p == q)
709 } 698 return -EINVAL;
699 break;
700 default:
701 return -EINVAL;
702 }
710 703
711 if (++p == q) 704 if (++p == q)
712 return -EINVAL; 705 return -EINVAL;
713 if (*p == 0xff) 706 if (*p == 0xff)
714 break; 707 break;
715 scale = *p & 7; 708 scale = *p & 7;
716 if (scale == 7) 709 if (scale == 7)
717 return -EINVAL; 710 return -EINVAL;
718 device->dev[i].size = ((*p >> 3) + 1) * (512 << (scale*2)); 711 device->dev[i].size = ((*p >> 3) + 1) * (512 << (scale*2));
719 device->ndev++; 712 device->ndev++;
720 if (++p == q) 713 if (++p == q)
721 break; 714 break;
722 } 715 }
723 716
724 return 0; 717 return 0;
725} 718}
726 719
727/*====================================================================*/
728 720
729static int parse_checksum(tuple_t *tuple, cistpl_checksum_t *csum) 721static int parse_checksum(tuple_t *tuple, cistpl_checksum_t *csum)
730{ 722{
731 u_char *p; 723 u_char *p;
732 if (tuple->TupleDataLen < 5) 724 if (tuple->TupleDataLen < 5)
733 return -EINVAL; 725 return -EINVAL;
734 p = (u_char *) tuple->TupleData; 726 p = (u_char *) tuple->TupleData;
735 csum->addr = tuple->CISOffset + get_unaligned_le16(p) - 2; 727 csum->addr = tuple->CISOffset + get_unaligned_le16(p) - 2;
736 csum->len = get_unaligned_le16(p + 2); 728 csum->len = get_unaligned_le16(p + 2);
737 csum->sum = *(p + 4); 729 csum->sum = *(p + 4);
738 return 0; 730 return 0;
739} 731}
740 732
741/*====================================================================*/
742 733
743static int parse_longlink(tuple_t *tuple, cistpl_longlink_t *link) 734static int parse_longlink(tuple_t *tuple, cistpl_longlink_t *link)
744{ 735{
745 if (tuple->TupleDataLen < 4) 736 if (tuple->TupleDataLen < 4)
746 return -EINVAL; 737 return -EINVAL;
747 link->addr = get_unaligned_le32(tuple->TupleData); 738 link->addr = get_unaligned_le32(tuple->TupleData);
748 return 0; 739 return 0;
749} 740}
750 741
751/*====================================================================*/
752 742
753static int parse_longlink_mfc(tuple_t *tuple, 743static int parse_longlink_mfc(tuple_t *tuple, cistpl_longlink_mfc_t *link)
754 cistpl_longlink_mfc_t *link)
755{ 744{
756 u_char *p; 745 u_char *p;
757 int i; 746 int i;
758 747
759 p = (u_char *)tuple->TupleData; 748 p = (u_char *)tuple->TupleData;
760 749
761 link->nfn = *p; p++; 750 link->nfn = *p; p++;
762 if (tuple->TupleDataLen <= link->nfn*5) 751 if (tuple->TupleDataLen <= link->nfn*5)
763 return -EINVAL; 752 return -EINVAL;
764 for (i = 0; i < link->nfn; i++) { 753 for (i = 0; i < link->nfn; i++) {
765 link->fn[i].space = *p; p++; 754 link->fn[i].space = *p; p++;
766 link->fn[i].addr = get_unaligned_le32(p); 755 link->fn[i].addr = get_unaligned_le32(p);
767 p += 4; 756 p += 4;
768 } 757 }
769 return 0; 758 return 0;
770} 759}
771 760
772/*====================================================================*/
773 761
774static int parse_strings(u_char *p, u_char *q, int max, 762static int parse_strings(u_char *p, u_char *q, int max,
775 char *s, u_char *ofs, u_char *found) 763 char *s, u_char *ofs, u_char *found)
776{ 764{
777 int i, j, ns; 765 int i, j, ns;
778 766
779 if (p == q) 767 if (p == q)
780 return -EINVAL; 768 return -EINVAL;
781 ns = 0; j = 0; 769 ns = 0; j = 0;
782 for (i = 0; i < max; i++) { 770 for (i = 0; i < max; i++) {
783 if (*p == 0xff) 771 if (*p == 0xff)
784 break; 772 break;
785 ofs[i] = j; 773 ofs[i] = j;
786 ns++; 774 ns++;
787 for (;;) { 775 for (;;) {
788 s[j++] = (*p == 0xff) ? '\0' : *p; 776 s[j++] = (*p == 0xff) ? '\0' : *p;
789 if ((*p == '\0') || (*p == 0xff)) 777 if ((*p == '\0') || (*p == 0xff))
790 break; 778 break;
791 if (++p == q) 779 if (++p == q)
792 return -EINVAL; 780 return -EINVAL;
781 }
782 if ((*p == 0xff) || (++p == q))
783 break;
793 } 784 }
794 if ((*p == 0xff) || (++p == q)) 785 if (found) {
795 break; 786 *found = ns;
796 } 787 return 0;
797 if (found) { 788 }
798 *found = ns; 789
799 return 0;
800 } else {
801 return (ns == max) ? 0 : -EINVAL; 790 return (ns == max) ? 0 : -EINVAL;
802 }
803} 791}
804 792
805/*====================================================================*/
806 793
807static int parse_vers_1(tuple_t *tuple, cistpl_vers_1_t *vers_1) 794static int parse_vers_1(tuple_t *tuple, cistpl_vers_1_t *vers_1)
808{ 795{
809 u_char *p, *q; 796 u_char *p, *q;
810 797
811 p = (u_char *)tuple->TupleData; 798 p = (u_char *)tuple->TupleData;
812 q = p + tuple->TupleDataLen; 799 q = p + tuple->TupleDataLen;
813 800
814 vers_1->major = *p; p++; 801 vers_1->major = *p; p++;
815 vers_1->minor = *p; p++; 802 vers_1->minor = *p; p++;
816 if (p >= q) 803 if (p >= q)
817 return -EINVAL; 804 return -EINVAL;
818 805
819 return parse_strings(p, q, CISTPL_VERS_1_MAX_PROD_STRINGS, 806 return parse_strings(p, q, CISTPL_VERS_1_MAX_PROD_STRINGS,
820 vers_1->str, vers_1->ofs, &vers_1->ns); 807 vers_1->str, vers_1->ofs, &vers_1->ns);
821} 808}
822 809
823/*====================================================================*/
824 810
825static int parse_altstr(tuple_t *tuple, cistpl_altstr_t *altstr) 811static int parse_altstr(tuple_t *tuple, cistpl_altstr_t *altstr)
826{ 812{
827 u_char *p, *q; 813 u_char *p, *q;
828 814
829 p = (u_char *)tuple->TupleData; 815 p = (u_char *)tuple->TupleData;
830 q = p + tuple->TupleDataLen; 816 q = p + tuple->TupleDataLen;
831 817
832 return parse_strings(p, q, CISTPL_MAX_ALTSTR_STRINGS, 818 return parse_strings(p, q, CISTPL_MAX_ALTSTR_STRINGS,
833 altstr->str, altstr->ofs, &altstr->ns); 819 altstr->str, altstr->ofs, &altstr->ns);
834} 820}
835 821
836/*====================================================================*/
837 822
838static int parse_jedec(tuple_t *tuple, cistpl_jedec_t *jedec) 823static int parse_jedec(tuple_t *tuple, cistpl_jedec_t *jedec)
839{ 824{
840 u_char *p, *q; 825 u_char *p, *q;
841 int nid; 826 int nid;
842 827
843 p = (u_char *)tuple->TupleData; 828 p = (u_char *)tuple->TupleData;
844 q = p + tuple->TupleDataLen; 829 q = p + tuple->TupleDataLen;
845 830
846 for (nid = 0; nid < CISTPL_MAX_DEVICES; nid++) { 831 for (nid = 0; nid < CISTPL_MAX_DEVICES; nid++) {
847 if (p > q-2) 832 if (p > q-2)
848 break; 833 break;
849 jedec->id[nid].mfr = p[0]; 834 jedec->id[nid].mfr = p[0];
850 jedec->id[nid].info = p[1]; 835 jedec->id[nid].info = p[1];
851 p += 2; 836 p += 2;
852 } 837 }
853 jedec->nid = nid; 838 jedec->nid = nid;
854 return 0; 839 return 0;
855} 840}
856 841
857/*====================================================================*/
858 842
859static int parse_manfid(tuple_t *tuple, cistpl_manfid_t *m) 843static int parse_manfid(tuple_t *tuple, cistpl_manfid_t *m)
860{ 844{
861 if (tuple->TupleDataLen < 4) 845 if (tuple->TupleDataLen < 4)
862 return -EINVAL; 846 return -EINVAL;
863 m->manf = get_unaligned_le16(tuple->TupleData); 847 m->manf = get_unaligned_le16(tuple->TupleData);
864 m->card = get_unaligned_le16(tuple->TupleData + 2); 848 m->card = get_unaligned_le16(tuple->TupleData + 2);
865 return 0; 849 return 0;
866} 850}
867 851
868/*====================================================================*/
869 852
870static int parse_funcid(tuple_t *tuple, cistpl_funcid_t *f) 853static int parse_funcid(tuple_t *tuple, cistpl_funcid_t *f)
871{ 854{
872 u_char *p; 855 u_char *p;
873 if (tuple->TupleDataLen < 2) 856 if (tuple->TupleDataLen < 2)
874 return -EINVAL; 857 return -EINVAL;
875 p = (u_char *)tuple->TupleData; 858 p = (u_char *)tuple->TupleData;
876 f->func = p[0]; 859 f->func = p[0];
877 f->sysinit = p[1]; 860 f->sysinit = p[1];
878 return 0; 861 return 0;
879} 862}
880 863
881/*====================================================================*/
882 864
883static int parse_funce(tuple_t *tuple, cistpl_funce_t *f) 865static int parse_funce(tuple_t *tuple, cistpl_funce_t *f)
884{ 866{
885 u_char *p; 867 u_char *p;
886 int i; 868 int i;
887 if (tuple->TupleDataLen < 1) 869 if (tuple->TupleDataLen < 1)
888 return -EINVAL; 870 return -EINVAL;
889 p = (u_char *)tuple->TupleData; 871 p = (u_char *)tuple->TupleData;
890 f->type = p[0]; 872 f->type = p[0];
891 for (i = 1; i < tuple->TupleDataLen; i++) 873 for (i = 1; i < tuple->TupleDataLen; i++)
892 f->data[i-1] = p[i]; 874 f->data[i-1] = p[i];
893 return 0; 875 return 0;
894} 876}
895 877
896/*====================================================================*/
897 878
898static int parse_config(tuple_t *tuple, cistpl_config_t *config) 879static int parse_config(tuple_t *tuple, cistpl_config_t *config)
899{ 880{
900 int rasz, rmsz, i; 881 int rasz, rmsz, i;
901 u_char *p; 882 u_char *p;
902 883
903 p = (u_char *)tuple->TupleData; 884 p = (u_char *)tuple->TupleData;
904 rasz = *p & 0x03; 885 rasz = *p & 0x03;
905 rmsz = (*p & 0x3c) >> 2; 886 rmsz = (*p & 0x3c) >> 2;
906 if (tuple->TupleDataLen < rasz+rmsz+4) 887 if (tuple->TupleDataLen < rasz+rmsz+4)
907 return -EINVAL; 888 return -EINVAL;
908 config->last_idx = *(++p); 889 config->last_idx = *(++p);
909 p++; 890 p++;
910 config->base = 0; 891 config->base = 0;
911 for (i = 0; i <= rasz; i++) 892 for (i = 0; i <= rasz; i++)
912 config->base += p[i] << (8*i); 893 config->base += p[i] << (8*i);
913 p += rasz+1; 894 p += rasz+1;
914 for (i = 0; i < 4; i++) 895 for (i = 0; i < 4; i++)
915 config->rmask[i] = 0; 896 config->rmask[i] = 0;
916 for (i = 0; i <= rmsz; i++) 897 for (i = 0; i <= rmsz; i++)
917 config->rmask[i>>2] += p[i] << (8*(i%4)); 898 config->rmask[i>>2] += p[i] << (8*(i%4));
918 config->subtuples = tuple->TupleDataLen - (rasz+rmsz+4); 899 config->subtuples = tuple->TupleDataLen - (rasz+rmsz+4);
919 return 0; 900 return 0;
920} 901}
921 902
922/*====================================================================== 903/* The following routines are all used to parse the nightmarish
904 * config table entries.
905 */
906
907static u_char *parse_power(u_char *p, u_char *q, cistpl_power_t *pwr)
908{
909 int i;
910 u_int scale;
923 911
924 The following routines are all used to parse the nightmarish 912 if (p == q)
925 config table entries. 913 return NULL;
914 pwr->present = *p;
915 pwr->flags = 0;
916 p++;
917 for (i = 0; i < 7; i++)
918 if (pwr->present & (1<<i)) {
919 if (p == q)
920 return NULL;
921 pwr->param[i] = POWER_CVT(*p);
922 scale = POWER_SCALE(*p);
923 while (*p & 0x80) {
924 if (++p == q)
925 return NULL;
926 if ((*p & 0x7f) < 100)
927 pwr->param[i] +=
928 (*p & 0x7f) * scale / 100;
929 else if (*p == 0x7d)
930 pwr->flags |= CISTPL_POWER_HIGHZ_OK;
931 else if (*p == 0x7e)
932 pwr->param[i] = 0;
933 else if (*p == 0x7f)
934 pwr->flags |= CISTPL_POWER_HIGHZ_REQ;
935 else
936 return NULL;
937 }
938 p++;
939 }
940 return p;
941}
926 942
927======================================================================*/
928 943
929static u_char *parse_power(u_char *p, u_char *q, 944static u_char *parse_timing(u_char *p, u_char *q, cistpl_timing_t *timing)
930 cistpl_power_t *pwr)
931{ 945{
932 int i; 946 u_char scale;
933 u_int scale; 947
934 948 if (p == q)
935 if (p == q) 949 return NULL;
936 return NULL; 950 scale = *p;
937 pwr->present = *p; 951 if ((scale & 3) != 3) {
938 pwr->flags = 0;
939 p++;
940 for (i = 0; i < 7; i++)
941 if (pwr->present & (1<<i)) {
942 if (p == q)
943 return NULL;
944 pwr->param[i] = POWER_CVT(*p);
945 scale = POWER_SCALE(*p);
946 while (*p & 0x80) {
947 if (++p == q) 952 if (++p == q)
948 return NULL; 953 return NULL;
949 if ((*p & 0x7f) < 100) 954 timing->wait = SPEED_CVT(*p);
950 pwr->param[i] += (*p & 0x7f) * scale / 100; 955 timing->waitscale = exponent[scale & 3];
951 else if (*p == 0x7d) 956 } else
952 pwr->flags |= CISTPL_POWER_HIGHZ_OK; 957 timing->wait = 0;
953 else if (*p == 0x7e) 958 scale >>= 2;
954 pwr->param[i] = 0; 959 if ((scale & 7) != 7) {
955 else if (*p == 0x7f) 960 if (++p == q)
956 pwr->flags |= CISTPL_POWER_HIGHZ_REQ; 961 return NULL;
957 else 962 timing->ready = SPEED_CVT(*p);
958 return NULL; 963 timing->rdyscale = exponent[scale & 7];
959 } 964 } else
960 p++; 965 timing->ready = 0;
961 } 966 scale >>= 3;
962 return p; 967 if (scale != 7) {
968 if (++p == q)
969 return NULL;
970 timing->reserved = SPEED_CVT(*p);
971 timing->rsvscale = exponent[scale];
972 } else
973 timing->reserved = 0;
974 p++;
975 return p;
963} 976}
964 977
965/*====================================================================*/
966 978
967static u_char *parse_timing(u_char *p, u_char *q, 979static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io)
968 cistpl_timing_t *timing)
969{ 980{
970 u_char scale; 981 int i, j, bsz, lsz;
971 982
972 if (p == q) 983 if (p == q)
973 return NULL;
974 scale = *p;
975 if ((scale & 3) != 3) {
976 if (++p == q)
977 return NULL;
978 timing->wait = SPEED_CVT(*p);
979 timing->waitscale = exponent[scale & 3];
980 } else
981 timing->wait = 0;
982 scale >>= 2;
983 if ((scale & 7) != 7) {
984 if (++p == q)
985 return NULL; 984 return NULL;
986 timing->ready = SPEED_CVT(*p); 985 io->flags = *p;
987 timing->rdyscale = exponent[scale & 7]; 986
988 } else 987 if (!(*p & 0x80)) {
989 timing->ready = 0; 988 io->nwin = 1;
990 scale >>= 3; 989 io->win[0].base = 0;
991 if (scale != 7) { 990 io->win[0].len = (1 << (io->flags & CISTPL_IO_LINES_MASK));
991 return p+1;
992 }
993
992 if (++p == q) 994 if (++p == q)
993 return NULL; 995 return NULL;
994 timing->reserved = SPEED_CVT(*p); 996 io->nwin = (*p & 0x0f) + 1;
995 timing->rsvscale = exponent[scale]; 997 bsz = (*p & 0x30) >> 4;
996 } else 998 if (bsz == 3)
997 timing->reserved = 0; 999 bsz++;
998 p++; 1000 lsz = (*p & 0xc0) >> 6;
999 return p; 1001 if (lsz == 3)
1000} 1002 lsz++;
1001 1003 p++;
1002/*====================================================================*/
1003 1004
1004static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io) 1005 for (i = 0; i < io->nwin; i++) {
1005{ 1006 io->win[i].base = 0;
1006 int i, j, bsz, lsz; 1007 io->win[i].len = 1;
1007 1008 for (j = 0; j < bsz; j++, p++) {
1008 if (p == q) 1009 if (p == q)
1009 return NULL; 1010 return NULL;
1010 io->flags = *p; 1011 io->win[i].base += *p << (j*8);
1011 1012 }
1012 if (!(*p & 0x80)) { 1013 for (j = 0; j < lsz; j++, p++) {
1013 io->nwin = 1; 1014 if (p == q)
1014 io->win[0].base = 0; 1015 return NULL;
1015 io->win[0].len = (1 << (io->flags & CISTPL_IO_LINES_MASK)); 1016 io->win[i].len += *p << (j*8);
1016 return p+1; 1017 }
1017 }
1018
1019 if (++p == q)
1020 return NULL;
1021 io->nwin = (*p & 0x0f) + 1;
1022 bsz = (*p & 0x30) >> 4;
1023 if (bsz == 3)
1024 bsz++;
1025 lsz = (*p & 0xc0) >> 6;
1026 if (lsz == 3)
1027 lsz++;
1028 p++;
1029
1030 for (i = 0; i < io->nwin; i++) {
1031 io->win[i].base = 0;
1032 io->win[i].len = 1;
1033 for (j = 0; j < bsz; j++, p++) {
1034 if (p == q)
1035 return NULL;
1036 io->win[i].base += *p << (j*8);
1037 }
1038 for (j = 0; j < lsz; j++, p++) {
1039 if (p == q)
1040 return NULL;
1041 io->win[i].len += *p << (j*8);
1042 } 1018 }
1043 } 1019 return p;
1044 return p;
1045} 1020}
1046 1021
1047/*====================================================================*/
1048 1022
1049static u_char *parse_mem(u_char *p, u_char *q, cistpl_mem_t *mem) 1023static u_char *parse_mem(u_char *p, u_char *q, cistpl_mem_t *mem)
1050{ 1024{
1051 int i, j, asz, lsz, has_ha; 1025 int i, j, asz, lsz, has_ha;
1052 u_int len, ca, ha; 1026 u_int len, ca, ha;
1053 1027
1054 if (p == q) 1028 if (p == q)
1055 return NULL; 1029 return NULL;
1056 1030
1057 mem->nwin = (*p & 0x07) + 1; 1031 mem->nwin = (*p & 0x07) + 1;
1058 lsz = (*p & 0x18) >> 3; 1032 lsz = (*p & 0x18) >> 3;
1059 asz = (*p & 0x60) >> 5; 1033 asz = (*p & 0x60) >> 5;
1060 has_ha = (*p & 0x80); 1034 has_ha = (*p & 0x80);
1061 if (++p == q) 1035 if (++p == q)
1062 return NULL; 1036 return NULL;
1063 1037
1064 for (i = 0; i < mem->nwin; i++) { 1038 for (i = 0; i < mem->nwin; i++) {
1065 len = ca = ha = 0; 1039 len = ca = ha = 0;
1066 for (j = 0; j < lsz; j++, p++) { 1040 for (j = 0; j < lsz; j++, p++) {
1067 if (p == q) 1041 if (p == q)
1068 return NULL; 1042 return NULL;
1069 len += *p << (j*8); 1043 len += *p << (j*8);
1070 } 1044 }
1071 for (j = 0; j < asz; j++, p++) { 1045 for (j = 0; j < asz; j++, p++) {
1072 if (p == q) 1046 if (p == q)
1073 return NULL; 1047 return NULL;
1074 ca += *p << (j*8); 1048 ca += *p << (j*8);
1049 }
1050 if (has_ha)
1051 for (j = 0; j < asz; j++, p++) {
1052 if (p == q)
1053 return NULL;
1054 ha += *p << (j*8);
1055 }
1056 mem->win[i].len = len << 8;
1057 mem->win[i].card_addr = ca << 8;
1058 mem->win[i].host_addr = ha << 8;
1075 } 1059 }
1076 if (has_ha) 1060 return p;
1077 for (j = 0; j < asz; j++, p++) {
1078 if (p == q)
1079 return NULL;
1080 ha += *p << (j*8);
1081 }
1082 mem->win[i].len = len << 8;
1083 mem->win[i].card_addr = ca << 8;
1084 mem->win[i].host_addr = ha << 8;
1085 }
1086 return p;
1087} 1061}
1088 1062
1089/*====================================================================*/
1090 1063
1091static u_char *parse_irq(u_char *p, u_char *q, cistpl_irq_t *irq) 1064static u_char *parse_irq(u_char *p, u_char *q, cistpl_irq_t *irq)
1092{ 1065{
1093 if (p == q) 1066 if (p == q)
1094 return NULL;
1095 irq->IRQInfo1 = *p; p++;
1096 if (irq->IRQInfo1 & IRQ_INFO2_VALID) {
1097 if (p+2 > q)
1098 return NULL; 1067 return NULL;
1099 irq->IRQInfo2 = (p[1]<<8) + p[0]; 1068 irq->IRQInfo1 = *p; p++;
1100 p += 2; 1069 if (irq->IRQInfo1 & IRQ_INFO2_VALID) {
1101 } 1070 if (p+2 > q)
1102 return p; 1071 return NULL;
1072 irq->IRQInfo2 = (p[1]<<8) + p[0];
1073 p += 2;
1074 }
1075 return p;
1103} 1076}
1104 1077
1105/*====================================================================*/
1106 1078
1107static int parse_cftable_entry(tuple_t *tuple, 1079static int parse_cftable_entry(tuple_t *tuple,
1108 cistpl_cftable_entry_t *entry) 1080 cistpl_cftable_entry_t *entry)
1109{ 1081{
1110 u_char *p, *q, features; 1082 u_char *p, *q, features;
1111 1083
1112 p = tuple->TupleData; 1084 p = tuple->TupleData;
1113 q = p + tuple->TupleDataLen; 1085 q = p + tuple->TupleDataLen;
1114 entry->index = *p & 0x3f; 1086 entry->index = *p & 0x3f;
1115 entry->flags = 0; 1087 entry->flags = 0;
1116 if (*p & 0x40)
1117 entry->flags |= CISTPL_CFTABLE_DEFAULT;
1118 if (*p & 0x80) {
1119 if (++p == q)
1120 return -EINVAL;
1121 if (*p & 0x10)
1122 entry->flags |= CISTPL_CFTABLE_BVDS;
1123 if (*p & 0x20)
1124 entry->flags |= CISTPL_CFTABLE_WP;
1125 if (*p & 0x40) 1088 if (*p & 0x40)
1126 entry->flags |= CISTPL_CFTABLE_RDYBSY; 1089 entry->flags |= CISTPL_CFTABLE_DEFAULT;
1127 if (*p & 0x80) 1090 if (*p & 0x80) {
1128 entry->flags |= CISTPL_CFTABLE_MWAIT; 1091 if (++p == q)
1129 entry->interface = *p & 0x0f; 1092 return -EINVAL;
1130 } else 1093 if (*p & 0x10)
1131 entry->interface = 0; 1094 entry->flags |= CISTPL_CFTABLE_BVDS;
1132 1095 if (*p & 0x20)
1133 /* Process optional features */ 1096 entry->flags |= CISTPL_CFTABLE_WP;
1134 if (++p == q) 1097 if (*p & 0x40)
1135 return -EINVAL; 1098 entry->flags |= CISTPL_CFTABLE_RDYBSY;
1136 features = *p; p++; 1099 if (*p & 0x80)
1137 1100 entry->flags |= CISTPL_CFTABLE_MWAIT;
1138 /* Power options */ 1101 entry->interface = *p & 0x0f;
1139 if ((features & 3) > 0) { 1102 } else
1140 p = parse_power(p, q, &entry->vcc); 1103 entry->interface = 0;
1141 if (p == NULL)
1142 return -EINVAL;
1143 } else
1144 entry->vcc.present = 0;
1145 if ((features & 3) > 1) {
1146 p = parse_power(p, q, &entry->vpp1);
1147 if (p == NULL)
1148 return -EINVAL;
1149 } else
1150 entry->vpp1.present = 0;
1151 if ((features & 3) > 2) {
1152 p = parse_power(p, q, &entry->vpp2);
1153 if (p == NULL)
1154 return -EINVAL;
1155 } else
1156 entry->vpp2.present = 0;
1157 1104
1158 /* Timing options */ 1105 /* Process optional features */
1159 if (features & 0x04) { 1106 if (++p == q)
1160 p = parse_timing(p, q, &entry->timing);
1161 if (p == NULL)
1162 return -EINVAL;
1163 } else {
1164 entry->timing.wait = 0;
1165 entry->timing.ready = 0;
1166 entry->timing.reserved = 0;
1167 }
1168
1169 /* I/O window options */
1170 if (features & 0x08) {
1171 p = parse_io(p, q, &entry->io);
1172 if (p == NULL)
1173 return -EINVAL; 1107 return -EINVAL;
1174 } else 1108 features = *p; p++;
1175 entry->io.nwin = 0;
1176 1109
1177 /* Interrupt options */ 1110 /* Power options */
1178 if (features & 0x10) { 1111 if ((features & 3) > 0) {
1179 p = parse_irq(p, q, &entry->irq); 1112 p = parse_power(p, q, &entry->vcc);
1180 if (p == NULL) 1113 if (p == NULL)
1181 return -EINVAL; 1114 return -EINVAL;
1182 } else 1115 } else
1183 entry->irq.IRQInfo1 = 0; 1116 entry->vcc.present = 0;
1184 1117 if ((features & 3) > 1) {
1185 switch (features & 0x60) { 1118 p = parse_power(p, q, &entry->vpp1);
1186 case 0x00: 1119 if (p == NULL)
1187 entry->mem.nwin = 0; 1120 return -EINVAL;
1188 break; 1121 } else
1189 case 0x20: 1122 entry->vpp1.present = 0;
1190 entry->mem.nwin = 1; 1123 if ((features & 3) > 2) {
1191 entry->mem.win[0].len = get_unaligned_le16(p) << 8; 1124 p = parse_power(p, q, &entry->vpp2);
1192 entry->mem.win[0].card_addr = 0; 1125 if (p == NULL)
1193 entry->mem.win[0].host_addr = 0; 1126 return -EINVAL;
1194 p += 2; 1127 } else
1195 if (p > q) 1128 entry->vpp2.present = 0;
1196 return -EINVAL;
1197 break;
1198 case 0x40:
1199 entry->mem.nwin = 1;
1200 entry->mem.win[0].len = get_unaligned_le16(p) << 8;
1201 entry->mem.win[0].card_addr = get_unaligned_le16(p + 2) << 8;
1202 entry->mem.win[0].host_addr = 0;
1203 p += 4;
1204 if (p > q)
1205 return -EINVAL;
1206 break;
1207 case 0x60:
1208 p = parse_mem(p, q, &entry->mem);
1209 if (p == NULL)
1210 return -EINVAL;
1211 break;
1212 }
1213 1129
1214 /* Misc features */ 1130 /* Timing options */
1215 if (features & 0x80) { 1131 if (features & 0x04) {
1216 if (p == q) 1132 p = parse_timing(p, q, &entry->timing);
1217 return -EINVAL; 1133 if (p == NULL)
1218 entry->flags |= (*p << 8); 1134 return -EINVAL;
1219 while (*p & 0x80) 1135 } else {
1220 if (++p == q) 1136 entry->timing.wait = 0;
1221 return -EINVAL; 1137 entry->timing.ready = 0;
1222 p++; 1138 entry->timing.reserved = 0;
1223 } 1139 }
1224 1140
1225 entry->subtuples = q-p; 1141 /* I/O window options */
1142 if (features & 0x08) {
1143 p = parse_io(p, q, &entry->io);
1144 if (p == NULL)
1145 return -EINVAL;
1146 } else
1147 entry->io.nwin = 0;
1148
1149 /* Interrupt options */
1150 if (features & 0x10) {
1151 p = parse_irq(p, q, &entry->irq);
1152 if (p == NULL)
1153 return -EINVAL;
1154 } else
1155 entry->irq.IRQInfo1 = 0;
1156
1157 switch (features & 0x60) {
1158 case 0x00:
1159 entry->mem.nwin = 0;
1160 break;
1161 case 0x20:
1162 entry->mem.nwin = 1;
1163 entry->mem.win[0].len = get_unaligned_le16(p) << 8;
1164 entry->mem.win[0].card_addr = 0;
1165 entry->mem.win[0].host_addr = 0;
1166 p += 2;
1167 if (p > q)
1168 return -EINVAL;
1169 break;
1170 case 0x40:
1171 entry->mem.nwin = 1;
1172 entry->mem.win[0].len = get_unaligned_le16(p) << 8;
1173 entry->mem.win[0].card_addr = get_unaligned_le16(p + 2) << 8;
1174 entry->mem.win[0].host_addr = 0;
1175 p += 4;
1176 if (p > q)
1177 return -EINVAL;
1178 break;
1179 case 0x60:
1180 p = parse_mem(p, q, &entry->mem);
1181 if (p == NULL)
1182 return -EINVAL;
1183 break;
1184 }
1185
1186 /* Misc features */
1187 if (features & 0x80) {
1188 if (p == q)
1189 return -EINVAL;
1190 entry->flags |= (*p << 8);
1191 while (*p & 0x80)
1192 if (++p == q)
1193 return -EINVAL;
1194 p++;
1195 }
1196
1197 entry->subtuples = q-p;
1226 1198
1227 return 0; 1199 return 0;
1228} 1200}
1229 1201
1230/*====================================================================*/
1231 1202
1232static int parse_device_geo(tuple_t *tuple, cistpl_device_geo_t *geo) 1203static int parse_device_geo(tuple_t *tuple, cistpl_device_geo_t *geo)
1233{ 1204{
1234 u_char *p, *q; 1205 u_char *p, *q;
1235 int n; 1206 int n;
1236 1207
1237 p = (u_char *)tuple->TupleData; 1208 p = (u_char *)tuple->TupleData;
1238 q = p + tuple->TupleDataLen; 1209 q = p + tuple->TupleDataLen;
1239 1210
1240 for (n = 0; n < CISTPL_MAX_DEVICES; n++) { 1211 for (n = 0; n < CISTPL_MAX_DEVICES; n++) {
1241 if (p > q-6) 1212 if (p > q-6)
1242 break; 1213 break;
1243 geo->geo[n].buswidth = p[0]; 1214 geo->geo[n].buswidth = p[0];
1244 geo->geo[n].erase_block = 1 << (p[1]-1); 1215 geo->geo[n].erase_block = 1 << (p[1]-1);
1245 geo->geo[n].read_block = 1 << (p[2]-1); 1216 geo->geo[n].read_block = 1 << (p[2]-1);
1246 geo->geo[n].write_block = 1 << (p[3]-1); 1217 geo->geo[n].write_block = 1 << (p[3]-1);
1247 geo->geo[n].partition = 1 << (p[4]-1); 1218 geo->geo[n].partition = 1 << (p[4]-1);
1248 geo->geo[n].interleave = 1 << (p[5]-1); 1219 geo->geo[n].interleave = 1 << (p[5]-1);
1249 p += 6; 1220 p += 6;
1250 } 1221 }
1251 geo->ngeo = n; 1222 geo->ngeo = n;
1252 return 0; 1223 return 0;
1253} 1224}
1254 1225
1255/*====================================================================*/
1256 1226
1257static int parse_vers_2(tuple_t *tuple, cistpl_vers_2_t *v2) 1227static int parse_vers_2(tuple_t *tuple, cistpl_vers_2_t *v2)
1258{ 1228{
1259 u_char *p, *q; 1229 u_char *p, *q;
1260 1230
1261 if (tuple->TupleDataLen < 10) 1231 if (tuple->TupleDataLen < 10)
1262 return -EINVAL; 1232 return -EINVAL;
1263 1233
1264 p = tuple->TupleData; 1234 p = tuple->TupleData;
1265 q = p + tuple->TupleDataLen; 1235 q = p + tuple->TupleDataLen;
1266 1236
1267 v2->vers = p[0]; 1237 v2->vers = p[0];
1268 v2->comply = p[1]; 1238 v2->comply = p[1];
1269 v2->dindex = get_unaligned_le16(p + 2); 1239 v2->dindex = get_unaligned_le16(p + 2);
1270 v2->vspec8 = p[6]; 1240 v2->vspec8 = p[6];
1271 v2->vspec9 = p[7]; 1241 v2->vspec9 = p[7];
1272 v2->nhdr = p[8]; 1242 v2->nhdr = p[8];
1273 p += 9; 1243 p += 9;
1274 return parse_strings(p, q, 2, v2->str, &v2->vendor, NULL); 1244 return parse_strings(p, q, 2, v2->str, &v2->vendor, NULL);
1275} 1245}
1276 1246
1277/*====================================================================*/
1278 1247
1279static int parse_org(tuple_t *tuple, cistpl_org_t *org) 1248static int parse_org(tuple_t *tuple, cistpl_org_t *org)
1280{ 1249{
1281 u_char *p, *q; 1250 u_char *p, *q;
1282 int i; 1251 int i;
1283 1252
1284 p = tuple->TupleData; 1253 p = tuple->TupleData;
1285 q = p + tuple->TupleDataLen; 1254 q = p + tuple->TupleDataLen;
1286 if (p == q) 1255 if (p == q)
1287 return -EINVAL; 1256 return -EINVAL;
1288 org->data_org = *p; 1257 org->data_org = *p;
1289 if (++p == q)
1290 return -EINVAL;
1291 for (i = 0; i < 30; i++) {
1292 org->desc[i] = *p;
1293 if (*p == '\0')
1294 break;
1295 if (++p == q) 1258 if (++p == q)
1296 return -EINVAL; 1259 return -EINVAL;
1297 } 1260 for (i = 0; i < 30; i++) {
1298 return 0; 1261 org->desc[i] = *p;
1262 if (*p == '\0')
1263 break;
1264 if (++p == q)
1265 return -EINVAL;
1266 }
1267 return 0;
1299} 1268}
1300 1269
1301/*====================================================================*/
1302 1270
1303static int parse_format(tuple_t *tuple, cistpl_format_t *fmt) 1271static int parse_format(tuple_t *tuple, cistpl_format_t *fmt)
1304{ 1272{
1305 u_char *p; 1273 u_char *p;
1306 1274
1307 if (tuple->TupleDataLen < 10) 1275 if (tuple->TupleDataLen < 10)
1308 return -EINVAL; 1276 return -EINVAL;
1309 1277
1310 p = tuple->TupleData; 1278 p = tuple->TupleData;
1311 1279
1312 fmt->type = p[0]; 1280 fmt->type = p[0];
1313 fmt->edc = p[1]; 1281 fmt->edc = p[1];
1314 fmt->offset = get_unaligned_le32(p + 2); 1282 fmt->offset = get_unaligned_le32(p + 2);
1315 fmt->length = get_unaligned_le32(p + 6); 1283 fmt->length = get_unaligned_le32(p + 6);
1316 1284
1317 return 0; 1285 return 0;
1318} 1286}
1319 1287
1320/*====================================================================*/
1321 1288
1322int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse) 1289int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse)
1323{ 1290{
1324 int ret = 0; 1291 int ret = 0;
1325 1292
1326 if (tuple->TupleDataLen > tuple->TupleDataMax) 1293 if (tuple->TupleDataLen > tuple->TupleDataMax)
1327 return -EINVAL; 1294 return -EINVAL;
1328 switch (tuple->TupleCode) { 1295 switch (tuple->TupleCode) {
1329 case CISTPL_DEVICE: 1296 case CISTPL_DEVICE:
1330 case CISTPL_DEVICE_A: 1297 case CISTPL_DEVICE_A:
1331 ret = parse_device(tuple, &parse->device); 1298 ret = parse_device(tuple, &parse->device);
1332 break; 1299 break;
1333 case CISTPL_CHECKSUM: 1300 case CISTPL_CHECKSUM:
1334 ret = parse_checksum(tuple, &parse->checksum); 1301 ret = parse_checksum(tuple, &parse->checksum);
1335 break; 1302 break;
1336 case CISTPL_LONGLINK_A: 1303 case CISTPL_LONGLINK_A:
1337 case CISTPL_LONGLINK_C: 1304 case CISTPL_LONGLINK_C:
1338 ret = parse_longlink(tuple, &parse->longlink); 1305 ret = parse_longlink(tuple, &parse->longlink);
1339 break; 1306 break;
1340 case CISTPL_LONGLINK_MFC: 1307 case CISTPL_LONGLINK_MFC:
1341 ret = parse_longlink_mfc(tuple, &parse->longlink_mfc); 1308 ret = parse_longlink_mfc(tuple, &parse->longlink_mfc);
1342 break; 1309 break;
1343 case CISTPL_VERS_1: 1310 case CISTPL_VERS_1:
1344 ret = parse_vers_1(tuple, &parse->version_1); 1311 ret = parse_vers_1(tuple, &parse->version_1);
1345 break; 1312 break;
1346 case CISTPL_ALTSTR: 1313 case CISTPL_ALTSTR:
1347 ret = parse_altstr(tuple, &parse->altstr); 1314 ret = parse_altstr(tuple, &parse->altstr);
1348 break; 1315 break;
1349 case CISTPL_JEDEC_A: 1316 case CISTPL_JEDEC_A:
1350 case CISTPL_JEDEC_C: 1317 case CISTPL_JEDEC_C:
1351 ret = parse_jedec(tuple, &parse->jedec); 1318 ret = parse_jedec(tuple, &parse->jedec);
1352 break; 1319 break;
1353 case CISTPL_MANFID: 1320 case CISTPL_MANFID:
1354 ret = parse_manfid(tuple, &parse->manfid); 1321 ret = parse_manfid(tuple, &parse->manfid);
1355 break; 1322 break;
1356 case CISTPL_FUNCID: 1323 case CISTPL_FUNCID:
1357 ret = parse_funcid(tuple, &parse->funcid); 1324 ret = parse_funcid(tuple, &parse->funcid);
1358 break; 1325 break;
1359 case CISTPL_FUNCE: 1326 case CISTPL_FUNCE:
1360 ret = parse_funce(tuple, &parse->funce); 1327 ret = parse_funce(tuple, &parse->funce);
1361 break; 1328 break;
1362 case CISTPL_CONFIG: 1329 case CISTPL_CONFIG:
1363 ret = parse_config(tuple, &parse->config); 1330 ret = parse_config(tuple, &parse->config);
1364 break; 1331 break;
1365 case CISTPL_CFTABLE_ENTRY: 1332 case CISTPL_CFTABLE_ENTRY:
1366 ret = parse_cftable_entry(tuple, &parse->cftable_entry); 1333 ret = parse_cftable_entry(tuple, &parse->cftable_entry);
1367 break; 1334 break;
1368 case CISTPL_DEVICE_GEO: 1335 case CISTPL_DEVICE_GEO:
1369 case CISTPL_DEVICE_GEO_A: 1336 case CISTPL_DEVICE_GEO_A:
1370 ret = parse_device_geo(tuple, &parse->device_geo); 1337 ret = parse_device_geo(tuple, &parse->device_geo);
1371 break; 1338 break;
1372 case CISTPL_VERS_2: 1339 case CISTPL_VERS_2:
1373 ret = parse_vers_2(tuple, &parse->vers_2); 1340 ret = parse_vers_2(tuple, &parse->vers_2);
1374 break; 1341 break;
1375 case CISTPL_ORG: 1342 case CISTPL_ORG:
1376 ret = parse_org(tuple, &parse->org); 1343 ret = parse_org(tuple, &parse->org);
1377 break; 1344 break;
1378 case CISTPL_FORMAT: 1345 case CISTPL_FORMAT:
1379 case CISTPL_FORMAT_A: 1346 case CISTPL_FORMAT_A:
1380 ret = parse_format(tuple, &parse->format); 1347 ret = parse_format(tuple, &parse->format);
1381 break; 1348 break;
1382 case CISTPL_NO_LINK: 1349 case CISTPL_NO_LINK:
1383 case CISTPL_LINKTARGET: 1350 case CISTPL_LINKTARGET:
1384 ret = 0; 1351 ret = 0;
1385 break; 1352 break;
1386 default: 1353 default:
1387 ret = -EINVAL; 1354 ret = -EINVAL;
1388 break; 1355 break;
1389 } 1356 }
1390 if (ret) 1357 if (ret)
1391 pr_debug("parse_tuple failed %d\n", ret); 1358 pr_debug("parse_tuple failed %d\n", ret);
1392 return ret; 1359 return ret;
1393} 1360}
1394EXPORT_SYMBOL(pcmcia_parse_tuple); 1361EXPORT_SYMBOL(pcmcia_parse_tuple);
1395 1362
1396/*======================================================================
1397 1363
1398 This is used internally by Card Services to look up CIS stuff. 1364/**
1399 1365 * pccard_read_tuple() - internal CIS tuple access
1400======================================================================*/ 1366 * @s: the struct pcmcia_socket where the card is inserted
1401 1367 * @function: the device function we loop for
1402int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function, cisdata_t code, void *parse) 1368 * @code: which CIS code shall we look for?
1369 * @parse: buffer where the tuple shall be parsed (or NULL, if no parse)
1370 *
1371 * pccard_read_tuple() reads out one tuple and attempts to parse it
1372 */
1373int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function,
1374 cisdata_t code, void *parse)
1403{ 1375{
1404 tuple_t tuple; 1376 tuple_t tuple;
1405 cisdata_t *buf; 1377 cisdata_t *buf;
1406 int ret; 1378 int ret;
1407 1379
1408 buf = kmalloc(256, GFP_KERNEL); 1380 buf = kmalloc(256, GFP_KERNEL);
1409 if (buf == NULL) { 1381 if (buf == NULL) {
1410 dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n"); 1382 dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n");
1411 return -ENOMEM; 1383 return -ENOMEM;
1412 } 1384 }
1413 tuple.DesiredTuple = code; 1385 tuple.DesiredTuple = code;
1414 tuple.Attributes = 0; 1386 tuple.Attributes = 0;
1415 if (function == BIND_FN_ALL) 1387 if (function == BIND_FN_ALL)
1416 tuple.Attributes = TUPLE_RETURN_COMMON; 1388 tuple.Attributes = TUPLE_RETURN_COMMON;
1417 ret = pccard_get_first_tuple(s, function, &tuple); 1389 ret = pccard_get_first_tuple(s, function, &tuple);
1418 if (ret != 0) 1390 if (ret != 0)
1419 goto done; 1391 goto done;
1420 tuple.TupleData = buf; 1392 tuple.TupleData = buf;
1421 tuple.TupleOffset = 0; 1393 tuple.TupleOffset = 0;
1422 tuple.TupleDataMax = 255; 1394 tuple.TupleDataMax = 255;
1423 ret = pccard_get_tuple_data(s, &tuple); 1395 ret = pccard_get_tuple_data(s, &tuple);
1424 if (ret != 0) 1396 if (ret != 0)
1425 goto done; 1397 goto done;
1426 ret = pcmcia_parse_tuple(&tuple, parse); 1398 ret = pcmcia_parse_tuple(&tuple, parse);
1427done: 1399done:
1428 kfree(buf); 1400 kfree(buf);
1429 return ret; 1401 return ret;
1430} 1402}
1431 1403
1432 1404
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 3889cf07d6ce..9254ab0b29b1 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -42,7 +42,6 @@ struct db1x_pcmcia_sock {
42 int nr; /* socket number */ 42 int nr; /* socket number */
43 void *virt_io; 43 void *virt_io;
44 44
45 /* the "pseudo" addresses of the PCMCIA space. */
46 phys_addr_t phys_io; 45 phys_addr_t phys_io;
47 phys_addr_t phys_attr; 46 phys_addr_t phys_attr;
48 phys_addr_t phys_mem; 47 phys_addr_t phys_mem;
@@ -437,7 +436,7 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
437 * This includes IRQs for Carddetection/ejection, the card 436 * This includes IRQs for Carddetection/ejection, the card
438 * itself and optional status change detection. 437 * itself and optional status change detection.
439 * Also, the memory areas covered by a socket. For these 438 * Also, the memory areas covered by a socket. For these
440 * we require the 32bit "pseudo" addresses (see the au1000.h 439 * we require the real 36bit addresses (see the au1000.h
441 * header for more information). 440 * header for more information).
442 */ 441 */
443 442
@@ -459,11 +458,7 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
459 458
460 ret = -ENODEV; 459 ret = -ENODEV;
461 460
462 /* 461 /* 36bit PCMCIA Attribute area address */
463 * pseudo-attr: The 32bit address of the PCMCIA attribute space
464 * for this socket (usually the 36bit address shifted 4 to the
465 * right).
466 */
467 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-attr"); 462 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-attr");
468 if (!r) { 463 if (!r) {
469 printk(KERN_ERR "pcmcia%d has no 'pseudo-attr' resource!\n", 464 printk(KERN_ERR "pcmcia%d has no 'pseudo-attr' resource!\n",
@@ -472,10 +467,7 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
472 } 467 }
473 sock->phys_attr = r->start; 468 sock->phys_attr = r->start;
474 469
475 /* 470 /* 36bit PCMCIA Memory area address */
476 * pseudo-mem: The 32bit address of the PCMCIA memory space for
477 * this socket (usually the 36bit address shifted 4 to the right)
478 */
479 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-mem"); 471 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-mem");
480 if (!r) { 472 if (!r) {
481 printk(KERN_ERR "pcmcia%d has no 'pseudo-mem' resource!\n", 473 printk(KERN_ERR "pcmcia%d has no 'pseudo-mem' resource!\n",
@@ -484,10 +476,7 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
484 } 476 }
485 sock->phys_mem = r->start; 477 sock->phys_mem = r->start;
486 478
487 /* 479 /* 36bit PCMCIA IO area address */
488 * pseudo-io: The 32bit address of the PCMCIA IO space for this
489 * socket (usually the 36bit address shifted 4 to the right).
490 */
491 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-io"); 480 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-io");
492 if (!r) { 481 if (!r) {
493 printk(KERN_ERR "pcmcia%d has no 'pseudo-io' resource!\n", 482 printk(KERN_ERR "pcmcia%d has no 'pseudo-io' resource!\n",
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index e1741cd875aa..7c204910a777 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -48,23 +48,13 @@ MODULE_AUTHOR("Jun Komuro <komurojun-mbn@nifty.com>");
48 * Specifies the interrupt delivery mode. The default (1) is to use PCI 48 * Specifies the interrupt delivery mode. The default (1) is to use PCI
49 * interrupts; a value of 0 selects ISA interrupts. This must be set for 49 * interrupts; a value of 0 selects ISA interrupts. This must be set for
50 * correct operation of PCI card readers. 50 * correct operation of PCI card readers.
51 *
52 * irq_list=i,j,...
53 * This list limits the set of interrupts that can be used by PCMCIA
54 * cards.
55 * The default list is 3,4,5,7,9,10,11.
56 * (irq_list parameter is not used, if irq_mode = 1)
57 */ 51 */
58 52
59static int irq_mode = 1; /* 0 = ISA interrupt, 1 = PCI interrupt */ 53static int irq_mode = 1; /* 0 = ISA interrupt, 1 = PCI interrupt */
60static int irq_list[16];
61static unsigned int irq_list_count = 0;
62 54
63module_param(irq_mode, int, 0444); 55module_param(irq_mode, int, 0444);
64module_param_array(irq_list, int, &irq_list_count, 0444);
65MODULE_PARM_DESC(irq_mode, 56MODULE_PARM_DESC(irq_mode,
66 "interrupt delivery mode. 0 = ISA, 1 = PCI. default is 1"); 57 "interrupt delivery mode. 0 = ISA, 1 = PCI. default is 1");
67MODULE_PARM_DESC(irq_list, "interrupts that can be used by PCMCIA cards");
68 58
69static DEFINE_SPINLOCK(port_lock); 59static DEFINE_SPINLOCK(port_lock);
70 60
@@ -605,13 +595,7 @@ static u_int __devinit pd6729_isa_scan(void)
605 return 0; 595 return 0;
606 } 596 }
607 597
608 if (irq_list_count == 0) 598 mask0 = PD67_MASK;
609 mask0 = 0xffff;
610 else
611 for (i = mask0 = 0; i < irq_list_count; i++)
612 mask0 |= (1<<irq_list[i]);
613
614 mask0 &= PD67_MASK;
615 599
616 /* just find interrupts that aren't in use */ 600 /* just find interrupts that aren't in use */
617 for (i = 0; i < 16; i++) 601 for (i = 0; i < 16; i++)
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index e6f7d410aed6..452c83b512c4 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -79,9 +79,8 @@ static resource_size_t pcmcia_align(void *align_data,
79 79
80#ifdef CONFIG_X86 80#ifdef CONFIG_X86
81 if (res->flags & IORESOURCE_IO) { 81 if (res->flags & IORESOURCE_IO) {
82 if (start & 0x300) { 82 if (start & 0x300)
83 start = (start + 0x3ff) & ~0x3ff; 83 start = (start + 0x3ff) & ~0x3ff;
84 }
85 } 84 }
86#endif 85#endif
87 86
diff --git a/drivers/pcmcia/xxs1500_ss.c b/drivers/pcmcia/xxs1500_ss.c
index 61560cd6e287..f9009d34254b 100644
--- a/drivers/pcmcia/xxs1500_ss.c
+++ b/drivers/pcmcia/xxs1500_ss.c
@@ -218,11 +218,7 @@ static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev)
218 218
219 ret = -ENODEV; 219 ret = -ENODEV;
220 220
221 /* 221 /* 36bit PCMCIA Attribute area address */
222 * pseudo-attr: The 32bit address of the PCMCIA attribute space
223 * for this socket (usually the 36bit address shifted 4 to the
224 * right).
225 */
226 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-attr"); 222 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-attr");
227 if (!r) { 223 if (!r) {
228 dev_err(&pdev->dev, "missing 'pcmcia-attr' resource!\n"); 224 dev_err(&pdev->dev, "missing 'pcmcia-attr' resource!\n");
@@ -230,10 +226,7 @@ static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev)
230 } 226 }
231 sock->phys_attr = r->start; 227 sock->phys_attr = r->start;
232 228
233 /* 229 /* 36bit PCMCIA Memory area address */
234 * pseudo-mem: The 32bit address of the PCMCIA memory space for
235 * this socket (usually the 36bit address shifted 4 to the right)
236 */
237 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-mem"); 230 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-mem");
238 if (!r) { 231 if (!r) {
239 dev_err(&pdev->dev, "missing 'pcmcia-mem' resource!\n"); 232 dev_err(&pdev->dev, "missing 'pcmcia-mem' resource!\n");
@@ -241,10 +234,7 @@ static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev)
241 } 234 }
242 sock->phys_mem = r->start; 235 sock->phys_mem = r->start;
243 236
244 /* 237 /* 36bit PCMCIA IO area address */
245 * pseudo-io: The 32bit address of the PCMCIA IO space for this
246 * socket (usually the 36bit address shifted 4 to the right).
247 */
248 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-io"); 238 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-io");
249 if (!r) { 239 if (!r) {
250 dev_err(&pdev->dev, "missing 'pcmcia-io' resource!\n"); 240 dev_err(&pdev->dev, "missing 'pcmcia-io' resource!\n");
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index b85375f87622..967c766f53ba 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1408,10 +1408,10 @@ static struct pci_device_id yenta_table[] = {
1408 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7510, TI12XX), 1408 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7510, TI12XX),
1409 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7610, TI12XX), 1409 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7610, TI12XX),
1410 1410
1411 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_710, TI12XX), 1411 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_710, ENE),
1412 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_712, TI12XX), 1412 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_712, ENE),
1413 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_720, TI12XX), 1413 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_720, ENE),
1414 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_722, TI12XX), 1414 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_722, ENE),
1415 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1211, ENE), 1415 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1211, ENE),
1416 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1225, ENE), 1416 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1225, ENE),
1417 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1410, ENE), 1417 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1410, ENE),
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 6848f213eb53..cd2ee6fce1b4 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -59,6 +59,8 @@ config ASUS_LAPTOP
59 select NEW_LEDS 59 select NEW_LEDS
60 select BACKLIGHT_CLASS_DEVICE 60 select BACKLIGHT_CLASS_DEVICE
61 depends on INPUT 61 depends on INPUT
62 depends on RFKILL || RFKILL = n
63 select INPUT_SPARSEKMAP
62 ---help--- 64 ---help---
63 This is the new Linux driver for Asus laptops. It may also support some 65 This is the new Linux driver for Asus laptops. It may also support some
64 MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate 66 MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate
@@ -177,6 +179,7 @@ config COMPAL_LAPTOP
177 tristate "Compal Laptop Extras" 179 tristate "Compal Laptop Extras"
178 depends on ACPI 180 depends on ACPI
179 depends on BACKLIGHT_CLASS_DEVICE 181 depends on BACKLIGHT_CLASS_DEVICE
182 depends on RFKILL
180 ---help--- 183 ---help---
181 This is a driver for laptops built by Compal: 184 This is a driver for laptops built by Compal:
182 185
@@ -320,9 +323,15 @@ config THINKPAD_ACPI_VIDEO
320 server running, phase of the moon, and the current mood of 323 server running, phase of the moon, and the current mood of
321 Schroedinger's cat. If you can use X.org's RandR to control 324 Schroedinger's cat. If you can use X.org's RandR to control
322 your ThinkPad's video output ports instead of this feature, 325 your ThinkPad's video output ports instead of this feature,
323 don't think twice: do it and say N here to save some memory. 326 don't think twice: do it and say N here to save memory and avoid
327 bad interactions with X.org.
328
329 NOTE: access to this feature is limited to processes with the
330 CAP_SYS_ADMIN capability, to avoid local DoS issues in platforms
331 where it interacts badly with X.org.
324 332
325 If you are not sure, say Y here. 333 If you are not sure, say Y here but do try to check if you could
334 be using X.org RandR instead.
326 335
327config THINKPAD_ACPI_HOTKEY_POLL 336config THINKPAD_ACPI_HOTKEY_POLL
328 bool "Support NVRAM polling for hot keys" 337 bool "Support NVRAM polling for hot keys"
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 61a1c7503658..791fcf321506 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -45,58 +45,23 @@
45#include <linux/fb.h> 45#include <linux/fb.h>
46#include <linux/leds.h> 46#include <linux/leds.h>
47#include <linux/platform_device.h> 47#include <linux/platform_device.h>
48#include <linux/uaccess.h>
49#include <linux/input.h>
50#include <linux/input/sparse-keymap.h>
51#include <linux/rfkill.h>
48#include <acpi/acpi_drivers.h> 52#include <acpi/acpi_drivers.h>
49#include <acpi/acpi_bus.h> 53#include <acpi/acpi_bus.h>
50#include <asm/uaccess.h>
51#include <linux/input.h>
52
53#define ASUS_LAPTOP_VERSION "0.42"
54
55#define ASUS_HOTK_NAME "Asus Laptop Support"
56#define ASUS_HOTK_CLASS "hotkey"
57#define ASUS_HOTK_DEVICE_NAME "Hotkey"
58#define ASUS_HOTK_FILE KBUILD_MODNAME
59#define ASUS_HOTK_PREFIX "\\_SB.ATKD."
60 54
55#define ASUS_LAPTOP_VERSION "0.42"
61 56
62/* 57#define ASUS_LAPTOP_NAME "Asus Laptop Support"
63 * Some events we use, same for all Asus 58#define ASUS_LAPTOP_CLASS "hotkey"
64 */ 59#define ASUS_LAPTOP_DEVICE_NAME "Hotkey"
65#define ATKD_BR_UP 0x10 60#define ASUS_LAPTOP_FILE KBUILD_MODNAME
66#define ATKD_BR_DOWN 0x20 61#define ASUS_LAPTOP_PREFIX "\\_SB.ATKD."
67#define ATKD_LCD_ON 0x33
68#define ATKD_LCD_OFF 0x34
69
70/*
71 * Known bits returned by \_SB.ATKD.HWRS
72 */
73#define WL_HWRS 0x80
74#define BT_HWRS 0x100
75
76/*
77 * Flags for hotk status
78 * WL_ON and BT_ON are also used for wireless_status()
79 */
80#define WL_ON 0x01 /* internal Wifi */
81#define BT_ON 0x02 /* internal Bluetooth */
82#define MLED_ON 0x04 /* mail LED */
83#define TLED_ON 0x08 /* touchpad LED */
84#define RLED_ON 0x10 /* Record LED */
85#define PLED_ON 0x20 /* Phone LED */
86#define GLED_ON 0x40 /* Gaming LED */
87#define LCD_ON 0x80 /* LCD backlight */
88#define GPS_ON 0x100 /* GPS */
89#define KEY_ON 0x200 /* Keyboard backlight */
90
91#define ASUS_LOG ASUS_HOTK_FILE ": "
92#define ASUS_ERR KERN_ERR ASUS_LOG
93#define ASUS_WARNING KERN_WARNING ASUS_LOG
94#define ASUS_NOTICE KERN_NOTICE ASUS_LOG
95#define ASUS_INFO KERN_INFO ASUS_LOG
96#define ASUS_DEBUG KERN_DEBUG ASUS_LOG
97 62
98MODULE_AUTHOR("Julien Lerouge, Karol Kozimor, Corentin Chary"); 63MODULE_AUTHOR("Julien Lerouge, Karol Kozimor, Corentin Chary");
99MODULE_DESCRIPTION(ASUS_HOTK_NAME); 64MODULE_DESCRIPTION(ASUS_LAPTOP_NAME);
100MODULE_LICENSE("GPL"); 65MODULE_LICENSE("GPL");
101 66
102/* 67/*
@@ -113,225 +78,209 @@ static uint wapf = 1;
113module_param(wapf, uint, 0644); 78module_param(wapf, uint, 0644);
114MODULE_PARM_DESC(wapf, "WAPF value"); 79MODULE_PARM_DESC(wapf, "WAPF value");
115 80
116#define ASUS_HANDLE(object, paths...) \ 81static uint wlan_status = 1;
117 static acpi_handle object##_handle = NULL; \ 82static uint bluetooth_status = 1;
118 static char *object##_paths[] = { paths } 83
84module_param(wlan_status, uint, 0644);
85MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot "
86 "(0 = disabled, 1 = enabled, -1 = don't do anything). "
87 "default is 1");
88
89module_param(bluetooth_status, uint, 0644);
90MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot "
91 "(0 = disabled, 1 = enabled, -1 = don't do anything). "
92 "default is 1");
93
94/*
95 * Some events we use, same for all Asus
96 */
97#define ATKD_BR_UP 0x10 /* (event & ~ATKD_BR_UP) = brightness level */
98#define ATKD_BR_DOWN 0x20 /* (event & ~ATKD_BR_DOWN) = britghness level */
99#define ATKD_BR_MIN ATKD_BR_UP
100#define ATKD_BR_MAX (ATKD_BR_DOWN | 0xF) /* 0x2f */
101#define ATKD_LCD_ON 0x33
102#define ATKD_LCD_OFF 0x34
103
104/*
105 * Known bits returned by \_SB.ATKD.HWRS
106 */
107#define WL_HWRS 0x80
108#define BT_HWRS 0x100
109
110/*
111 * Flags for hotk status
112 * WL_ON and BT_ON are also used for wireless_status()
113 */
114#define WL_RSTS 0x01 /* internal Wifi */
115#define BT_RSTS 0x02 /* internal Bluetooth */
119 116
120/* LED */ 117/* LED */
121ASUS_HANDLE(mled_set, ASUS_HOTK_PREFIX "MLED"); 118#define METHOD_MLED "MLED"
122ASUS_HANDLE(tled_set, ASUS_HOTK_PREFIX "TLED"); 119#define METHOD_TLED "TLED"
123ASUS_HANDLE(rled_set, ASUS_HOTK_PREFIX "RLED"); /* W1JC */ 120#define METHOD_RLED "RLED" /* W1JC */
124ASUS_HANDLE(pled_set, ASUS_HOTK_PREFIX "PLED"); /* A7J */ 121#define METHOD_PLED "PLED" /* A7J */
125ASUS_HANDLE(gled_set, ASUS_HOTK_PREFIX "GLED"); /* G1, G2 (probably) */ 122#define METHOD_GLED "GLED" /* G1, G2 (probably) */
126 123
127/* LEDD */ 124/* LEDD */
128ASUS_HANDLE(ledd_set, ASUS_HOTK_PREFIX "SLCM"); 125#define METHOD_LEDD "SLCM"
129 126
130/* 127/*
131 * Bluetooth and WLAN 128 * Bluetooth and WLAN
132 * WLED and BLED are not handled like other XLED, because in some dsdt 129 * WLED and BLED are not handled like other XLED, because in some dsdt
133 * they also control the WLAN/Bluetooth device. 130 * they also control the WLAN/Bluetooth device.
134 */ 131 */
135ASUS_HANDLE(wl_switch, ASUS_HOTK_PREFIX "WLED"); 132#define METHOD_WLAN "WLED"
136ASUS_HANDLE(bt_switch, ASUS_HOTK_PREFIX "BLED"); 133#define METHOD_BLUETOOTH "BLED"
137ASUS_HANDLE(wireless_status, ASUS_HOTK_PREFIX "RSTS"); /* All new models */ 134#define METHOD_WL_STATUS "RSTS"
138 135
139/* Brightness */ 136/* Brightness */
140ASUS_HANDLE(brightness_set, ASUS_HOTK_PREFIX "SPLV"); 137#define METHOD_BRIGHTNESS_SET "SPLV"
141ASUS_HANDLE(brightness_get, ASUS_HOTK_PREFIX "GPLV"); 138#define METHOD_BRIGHTNESS_GET "GPLV"
142 139
143/* Backlight */ 140/* Backlight */
144ASUS_HANDLE(lcd_switch, "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */ 141static acpi_handle lcd_switch_handle;
145 "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */ 142static const char *lcd_switch_paths[] = {
146 "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */ 143 "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */
147 "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */ 144 "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */
148 "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */ 145 "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */
149 "\\_SB.PCI0.LPCB.EC0._Q0E", /* P30/P35 */ 146 "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */
150 "\\_SB.PCI0.PX40.Q10", /* S1x */ 147 "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */
151 "\\Q10"); /* A2x, L2D, L3D, M2E */ 148 "\\_SB.PCI0.LPCB.EC0._Q0E", /* P30/P35 */
149 "\\_SB.PCI0.PX40.Q10", /* S1x */
150 "\\Q10"}; /* A2x, L2D, L3D, M2E */
152 151
153/* Display */ 152/* Display */
154ASUS_HANDLE(display_set, ASUS_HOTK_PREFIX "SDSP"); 153#define METHOD_SWITCH_DISPLAY "SDSP"
155ASUS_HANDLE(display_get, 154
156 /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */ 155static acpi_handle display_get_handle;
157 "\\_SB.PCI0.P0P1.VGA.GETD", 156static const char *display_get_paths[] = {
158 /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */ 157 /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */
159 "\\_SB.PCI0.P0P2.VGA.GETD", 158 "\\_SB.PCI0.P0P1.VGA.GETD",
160 /* A6V A6Q */ 159 /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */
161 "\\_SB.PCI0.P0P3.VGA.GETD", 160 "\\_SB.PCI0.P0P2.VGA.GETD",
162 /* A6T, A6M */ 161 /* A6V A6Q */
163 "\\_SB.PCI0.P0PA.VGA.GETD", 162 "\\_SB.PCI0.P0P3.VGA.GETD",
164 /* L3C */ 163 /* A6T, A6M */
165 "\\_SB.PCI0.PCI1.VGAC.NMAP", 164 "\\_SB.PCI0.P0PA.VGA.GETD",
166 /* Z96F */ 165 /* L3C */
167 "\\_SB.PCI0.VGA.GETD", 166 "\\_SB.PCI0.PCI1.VGAC.NMAP",
168 /* A2D */ 167 /* Z96F */
169 "\\ACTD", 168 "\\_SB.PCI0.VGA.GETD",
170 /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */ 169 /* A2D */
171 "\\ADVG", 170 "\\ACTD",
172 /* P30 */ 171 /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */
173 "\\DNXT", 172 "\\ADVG",
174 /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */ 173 /* P30 */
175 "\\INFB", 174 "\\DNXT",
176 /* A3F A6F A3N A3L M6N W3N W6A */ 175 /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */
177 "\\SSTE"); 176 "\\INFB",
178 177 /* A3F A6F A3N A3L M6N W3N W6A */
179ASUS_HANDLE(ls_switch, ASUS_HOTK_PREFIX "ALSC"); /* Z71A Z71V */ 178 "\\SSTE"};
180ASUS_HANDLE(ls_level, ASUS_HOTK_PREFIX "ALSL"); /* Z71A Z71V */ 179
180#define METHOD_ALS_CONTROL "ALSC" /* Z71A Z71V */
181#define METHOD_ALS_LEVEL "ALSL" /* Z71A Z71V */
181 182
182/* GPS */ 183/* GPS */
183/* R2H use different handle for GPS on/off */ 184/* R2H use different handle for GPS on/off */
184ASUS_HANDLE(gps_on, ASUS_HOTK_PREFIX "SDON"); /* R2H */ 185#define METHOD_GPS_ON "SDON"
185ASUS_HANDLE(gps_off, ASUS_HOTK_PREFIX "SDOF"); /* R2H */ 186#define METHOD_GPS_OFF "SDOF"
186ASUS_HANDLE(gps_status, ASUS_HOTK_PREFIX "GPST"); 187#define METHOD_GPS_STATUS "GPST"
187 188
188/* Keyboard light */ 189/* Keyboard light */
189ASUS_HANDLE(kled_set, ASUS_HOTK_PREFIX "SLKB"); 190#define METHOD_KBD_LIGHT_SET "SLKB"
190ASUS_HANDLE(kled_get, ASUS_HOTK_PREFIX "GLKB"); 191#define METHOD_KBD_LIGHT_GET "GLKB"
191 192
192/* 193/*
193 * This is the main structure, we can use it to store anything interesting 194 * Define a specific led structure to keep the main structure clean
194 * about the hotk device
195 */ 195 */
196struct asus_hotk { 196struct asus_led {
197 char *name; /* laptop name */ 197 int wk;
198 struct acpi_device *device; /* the device we are in */ 198 struct work_struct work;
199 acpi_handle handle; /* the handle of the hotk device */ 199 struct led_classdev led;
200 char status; /* status of the hotk, for LEDs, ... */ 200 struct asus_laptop *asus;
201 u32 ledd_status; /* status of the LED display */ 201 const char *method;
202 u8 light_level; /* light sensor level */
203 u8 light_switch; /* light sensor switch value */
204 u16 event_count[128]; /* count for each event TODO make this better */
205 struct input_dev *inputdev;
206 u16 *keycode_map;
207}; 202};
208 203
209/* 204/*
210 * This header is made available to allow proper configuration given model, 205 * This is the main structure, we can use it to store anything interesting
211 * revision number , ... this info cannot go in struct asus_hotk because it is 206 * about the hotk device
212 * available before the hotk
213 */
214static struct acpi_table_header *asus_info;
215
216/* The actual device the driver binds to */
217static struct asus_hotk *hotk;
218
219/*
220 * The hotkey driver declaration
221 */ 207 */
222static const struct acpi_device_id asus_device_ids[] = { 208struct asus_laptop {
223 {"ATK0100", 0}, 209 char *name; /* laptop name */
224 {"ATK0101", 0},
225 {"", 0},
226};
227MODULE_DEVICE_TABLE(acpi, asus_device_ids);
228 210
229static int asus_hotk_add(struct acpi_device *device); 211 struct acpi_table_header *dsdt_info;
230static int asus_hotk_remove(struct acpi_device *device, int type); 212 struct platform_device *platform_device;
231static void asus_hotk_notify(struct acpi_device *device, u32 event); 213 struct acpi_device *device; /* the device we are in */
214 struct backlight_device *backlight_device;
232 215
233static struct acpi_driver asus_hotk_driver = { 216 struct input_dev *inputdev;
234 .name = ASUS_HOTK_NAME, 217 struct key_entry *keymap;
235 .class = ASUS_HOTK_CLASS,
236 .owner = THIS_MODULE,
237 .ids = asus_device_ids,
238 .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
239 .ops = {
240 .add = asus_hotk_add,
241 .remove = asus_hotk_remove,
242 .notify = asus_hotk_notify,
243 },
244};
245 218
246/* The backlight device /sys/class/backlight */ 219 struct asus_led mled;
247static struct backlight_device *asus_backlight_device; 220 struct asus_led tled;
221 struct asus_led rled;
222 struct asus_led pled;
223 struct asus_led gled;
224 struct asus_led kled;
225 struct workqueue_struct *led_workqueue;
248 226
249/* 227 int wireless_status;
250 * The backlight class declaration 228 bool have_rsts;
251 */ 229 int lcd_state;
252static int read_brightness(struct backlight_device *bd);
253static int update_bl_status(struct backlight_device *bd);
254static struct backlight_ops asusbl_ops = {
255 .get_brightness = read_brightness,
256 .update_status = update_bl_status,
257};
258 230
259/* 231 struct rfkill *gps_rfkill;
260 * These functions actually update the LED's, and are called from a
261 * workqueue. By doing this as separate work rather than when the LED
262 * subsystem asks, we avoid messing with the Asus ACPI stuff during a
263 * potentially bad time, such as a timer interrupt.
264 */
265static struct workqueue_struct *led_workqueue;
266
267#define ASUS_LED(object, ledname, max) \
268 static void object##_led_set(struct led_classdev *led_cdev, \
269 enum led_brightness value); \
270 static enum led_brightness object##_led_get( \
271 struct led_classdev *led_cdev); \
272 static void object##_led_update(struct work_struct *ignored); \
273 static int object##_led_wk; \
274 static DECLARE_WORK(object##_led_work, object##_led_update); \
275 static struct led_classdev object##_led = { \
276 .name = "asus::" ledname, \
277 .brightness_set = object##_led_set, \
278 .brightness_get = object##_led_get, \
279 .max_brightness = max \
280 }
281 232
282ASUS_LED(mled, "mail", 1); 233 acpi_handle handle; /* the handle of the hotk device */
283ASUS_LED(tled, "touchpad", 1); 234 u32 ledd_status; /* status of the LED display */
284ASUS_LED(rled, "record", 1); 235 u8 light_level; /* light sensor level */
285ASUS_LED(pled, "phone", 1); 236 u8 light_switch; /* light sensor switch value */
286ASUS_LED(gled, "gaming", 1); 237 u16 event_count[128]; /* count for each event TODO make this better */
287ASUS_LED(kled, "kbd_backlight", 3); 238 u16 *keycode_map;
288
289struct key_entry {
290 char type;
291 u8 code;
292 u16 keycode;
293}; 239};
294 240
295enum { KE_KEY, KE_END }; 241static const struct key_entry asus_keymap[] = {
296 242 /* Lenovo SL Specific keycodes */
297static struct key_entry asus_keymap[] = { 243 {KE_KEY, 0x02, { KEY_SCREENLOCK } },
298 {KE_KEY, 0x02, KEY_SCREENLOCK}, 244 {KE_KEY, 0x05, { KEY_WLAN } },
299 {KE_KEY, 0x05, KEY_WLAN}, 245 {KE_KEY, 0x08, { KEY_F13 } },
300 {KE_KEY, 0x08, KEY_F13}, 246 {KE_KEY, 0x17, { KEY_ZOOM } },
301 {KE_KEY, 0x17, KEY_ZOOM}, 247 {KE_KEY, 0x1f, { KEY_BATTERY } },
302 {KE_KEY, 0x1f, KEY_BATTERY}, 248 /* End of Lenovo SL Specific keycodes */
303 {KE_KEY, 0x30, KEY_VOLUMEUP}, 249 {KE_KEY, 0x30, { KEY_VOLUMEUP } },
304 {KE_KEY, 0x31, KEY_VOLUMEDOWN}, 250 {KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
305 {KE_KEY, 0x32, KEY_MUTE}, 251 {KE_KEY, 0x32, { KEY_MUTE } },
306 {KE_KEY, 0x33, KEY_SWITCHVIDEOMODE}, 252 {KE_KEY, 0x33, { KEY_SWITCHVIDEOMODE } },
307 {KE_KEY, 0x34, KEY_SWITCHVIDEOMODE}, 253 {KE_KEY, 0x34, { KEY_SWITCHVIDEOMODE } },
308 {KE_KEY, 0x40, KEY_PREVIOUSSONG}, 254 {KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
309 {KE_KEY, 0x41, KEY_NEXTSONG}, 255 {KE_KEY, 0x41, { KEY_NEXTSONG } },
310 {KE_KEY, 0x43, KEY_STOPCD}, 256 {KE_KEY, 0x43, { KEY_STOPCD } },
311 {KE_KEY, 0x45, KEY_PLAYPAUSE}, 257 {KE_KEY, 0x45, { KEY_PLAYPAUSE } },
312 {KE_KEY, 0x4c, KEY_MEDIA}, 258 {KE_KEY, 0x4c, { KEY_MEDIA } },
313 {KE_KEY, 0x50, KEY_EMAIL}, 259 {KE_KEY, 0x50, { KEY_EMAIL } },
314 {KE_KEY, 0x51, KEY_WWW}, 260 {KE_KEY, 0x51, { KEY_WWW } },
315 {KE_KEY, 0x55, KEY_CALC}, 261 {KE_KEY, 0x55, { KEY_CALC } },
316 {KE_KEY, 0x5C, KEY_SCREENLOCK}, /* Screenlock */ 262 {KE_KEY, 0x5C, { KEY_SCREENLOCK } }, /* Screenlock */
317 {KE_KEY, 0x5D, KEY_WLAN}, 263 {KE_KEY, 0x5D, { KEY_WLAN } },
318 {KE_KEY, 0x5E, KEY_WLAN}, 264 {KE_KEY, 0x5E, { KEY_WLAN } },
319 {KE_KEY, 0x5F, KEY_WLAN}, 265 {KE_KEY, 0x5F, { KEY_WLAN } },
320 {KE_KEY, 0x60, KEY_SWITCHVIDEOMODE}, 266 {KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } },
321 {KE_KEY, 0x61, KEY_SWITCHVIDEOMODE}, 267 {KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } },
322 {KE_KEY, 0x62, KEY_SWITCHVIDEOMODE}, 268 {KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } },
323 {KE_KEY, 0x63, KEY_SWITCHVIDEOMODE}, 269 {KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } },
324 {KE_KEY, 0x6B, KEY_F13}, /* Lock Touchpad */ 270 {KE_KEY, 0x6B, { KEY_F13 } }, /* Lock Touchpad */
325 {KE_KEY, 0x82, KEY_CAMERA}, 271 {KE_KEY, 0x7E, { KEY_BLUETOOTH } },
326 {KE_KEY, 0x88, KEY_WLAN }, 272 {KE_KEY, 0x7D, { KEY_BLUETOOTH } },
327 {KE_KEY, 0x8A, KEY_PROG1}, 273 {KE_KEY, 0x82, { KEY_CAMERA } },
328 {KE_KEY, 0x95, KEY_MEDIA}, 274 {KE_KEY, 0x88, { KEY_WLAN } },
329 {KE_KEY, 0x99, KEY_PHONE}, 275 {KE_KEY, 0x8A, { KEY_PROG1 } },
330 {KE_KEY, 0xc4, KEY_KBDILLUMUP}, 276 {KE_KEY, 0x95, { KEY_MEDIA } },
331 {KE_KEY, 0xc5, KEY_KBDILLUMDOWN}, 277 {KE_KEY, 0x99, { KEY_PHONE } },
278 {KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
279 {KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
332 {KE_END, 0}, 280 {KE_END, 0},
333}; 281};
334 282
283
335/* 284/*
336 * This function evaluates an ACPI method, given an int as parameter, the 285 * This function evaluates an ACPI method, given an int as parameter, the
337 * method is searched within the scope of the handle, can be NULL. The output 286 * method is searched within the scope of the handle, can be NULL. The output
@@ -339,8 +288,8 @@ static struct key_entry asus_keymap[] = {
339 * 288 *
340 * returns 0 if write is successful, -1 else. 289 * returns 0 if write is successful, -1 else.
341 */ 290 */
342static int write_acpi_int(acpi_handle handle, const char *method, int val, 291static int write_acpi_int_ret(acpi_handle handle, const char *method, int val,
343 struct acpi_buffer *output) 292 struct acpi_buffer *output)
344{ 293{
345 struct acpi_object_list params; /* list of input parameters (an int) */ 294 struct acpi_object_list params; /* list of input parameters (an int) */
346 union acpi_object in_obj; /* the only param we use */ 295 union acpi_object in_obj; /* the only param we use */
@@ -361,102 +310,82 @@ static int write_acpi_int(acpi_handle handle, const char *method, int val,
361 return -1; 310 return -1;
362} 311}
363 312
364static int read_wireless_status(int mask) 313static int write_acpi_int(acpi_handle handle, const char *method, int val)
365{ 314{
366 unsigned long long status; 315 return write_acpi_int_ret(handle, method, val, NULL);
367 acpi_status rv = AE_OK; 316}
317
318static int acpi_check_handle(acpi_handle handle, const char *method,
319 acpi_handle *ret)
320{
321 acpi_status status;
368 322
369 if (!wireless_status_handle) 323 if (method == NULL)
370 return (hotk->status & mask) ? 1 : 0; 324 return -ENODEV;
371 325
372 rv = acpi_evaluate_integer(wireless_status_handle, NULL, NULL, &status); 326 if (ret)
373 if (ACPI_FAILURE(rv)) 327 status = acpi_get_handle(handle, (char *)method,
374 pr_warning("Error reading Wireless status\n"); 328 ret);
375 else 329 else {
376 return (status & mask) ? 1 : 0; 330 acpi_handle dummy;
377 331
378 return (hotk->status & mask) ? 1 : 0; 332 status = acpi_get_handle(handle, (char *)method,
333 &dummy);
334 }
335
336 if (status != AE_OK) {
337 if (ret)
338 pr_warning("Error finding %s\n", method);
339 return -ENODEV;
340 }
341 return 0;
379} 342}
380 343
381static int read_gps_status(void) 344/* Generic LED function */
345static int asus_led_set(struct asus_laptop *asus, const char *method,
346 int value)
382{ 347{
383 unsigned long long status; 348 if (!strcmp(method, METHOD_MLED))
384 acpi_status rv = AE_OK; 349 value = !value;
385 350 else if (!strcmp(method, METHOD_GLED))
386 rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status); 351 value = !value + 1;
387 if (ACPI_FAILURE(rv))
388 pr_warning("Error reading GPS status\n");
389 else 352 else
390 return status ? 1 : 0; 353 value = !!value;
391 354
392 return (hotk->status & GPS_ON) ? 1 : 0; 355 return write_acpi_int(asus->handle, method, value);
393} 356}
394 357
395/* Generic LED functions */ 358/*
396static int read_status(int mask) 359 * LEDs
360 */
361/* /sys/class/led handlers */
362static void asus_led_cdev_set(struct led_classdev *led_cdev,
363 enum led_brightness value)
397{ 364{
398 /* There is a special method for both wireless devices */ 365 struct asus_led *led = container_of(led_cdev, struct asus_led, led);
399 if (mask == BT_ON || mask == WL_ON) 366 struct asus_laptop *asus = led->asus;
400 return read_wireless_status(mask);
401 else if (mask == GPS_ON)
402 return read_gps_status();
403 367
404 return (hotk->status & mask) ? 1 : 0; 368 led->wk = !!value;
369 queue_work(asus->led_workqueue, &led->work);
405} 370}
406 371
407static void write_status(acpi_handle handle, int out, int mask) 372static void asus_led_cdev_update(struct work_struct *work)
408{ 373{
409 hotk->status = (out) ? (hotk->status | mask) : (hotk->status & ~mask); 374 struct asus_led *led = container_of(work, struct asus_led, work);
410 375 struct asus_laptop *asus = led->asus;
411 switch (mask) {
412 case MLED_ON:
413 out = !(out & 0x1);
414 break;
415 case GLED_ON:
416 out = (out & 0x1) + 1;
417 break;
418 case GPS_ON:
419 handle = (out) ? gps_on_handle : gps_off_handle;
420 out = 0x02;
421 break;
422 default:
423 out &= 0x1;
424 break;
425 }
426 376
427 if (write_acpi_int(handle, NULL, out, NULL)) 377 asus_led_set(asus, led->method, led->wk);
428 pr_warning(" write failed %x\n", mask);
429} 378}
430 379
431/* /sys/class/led handlers */ 380static enum led_brightness asus_led_cdev_get(struct led_classdev *led_cdev)
432#define ASUS_LED_HANDLER(object, mask) \ 381{
433 static void object##_led_set(struct led_classdev *led_cdev, \ 382 return led_cdev->brightness;
434 enum led_brightness value) \ 383}
435 { \
436 object##_led_wk = (value > 0) ? 1 : 0; \
437 queue_work(led_workqueue, &object##_led_work); \
438 } \
439 static void object##_led_update(struct work_struct *ignored) \
440 { \
441 int value = object##_led_wk; \
442 write_status(object##_set_handle, value, (mask)); \
443 } \
444 static enum led_brightness object##_led_get( \
445 struct led_classdev *led_cdev) \
446 { \
447 return led_cdev->brightness; \
448 }
449
450ASUS_LED_HANDLER(mled, MLED_ON);
451ASUS_LED_HANDLER(pled, PLED_ON);
452ASUS_LED_HANDLER(rled, RLED_ON);
453ASUS_LED_HANDLER(tled, TLED_ON);
454ASUS_LED_HANDLER(gled, GLED_ON);
455 384
456/* 385/*
457 * Keyboard backlight 386 * Keyboard backlight (also a LED)
458 */ 387 */
459static int get_kled_lvl(void) 388static int asus_kled_lvl(struct asus_laptop *asus)
460{ 389{
461 unsigned long long kblv; 390 unsigned long long kblv;
462 struct acpi_object_list params; 391 struct acpi_object_list params;
@@ -468,75 +397,183 @@ static int get_kled_lvl(void)
468 in_obj.type = ACPI_TYPE_INTEGER; 397 in_obj.type = ACPI_TYPE_INTEGER;
469 in_obj.integer.value = 2; 398 in_obj.integer.value = 2;
470 399
471 rv = acpi_evaluate_integer(kled_get_handle, NULL, &params, &kblv); 400 rv = acpi_evaluate_integer(asus->handle, METHOD_KBD_LIGHT_GET,
401 &params, &kblv);
472 if (ACPI_FAILURE(rv)) { 402 if (ACPI_FAILURE(rv)) {
473 pr_warning("Error reading kled level\n"); 403 pr_warning("Error reading kled level\n");
474 return 0; 404 return -ENODEV;
475 } 405 }
476 return kblv; 406 return kblv;
477} 407}
478 408
479static int set_kled_lvl(int kblv) 409static int asus_kled_set(struct asus_laptop *asus, int kblv)
480{ 410{
481 if (kblv > 0) 411 if (kblv > 0)
482 kblv = (1 << 7) | (kblv & 0x7F); 412 kblv = (1 << 7) | (kblv & 0x7F);
483 else 413 else
484 kblv = 0; 414 kblv = 0;
485 415
486 if (write_acpi_int(kled_set_handle, NULL, kblv, NULL)) { 416 if (write_acpi_int(asus->handle, METHOD_KBD_LIGHT_SET, kblv)) {
487 pr_warning("Keyboard LED display write failed\n"); 417 pr_warning("Keyboard LED display write failed\n");
488 return -EINVAL; 418 return -EINVAL;
489 } 419 }
490 return 0; 420 return 0;
491} 421}
492 422
493static void kled_led_set(struct led_classdev *led_cdev, 423static void asus_kled_cdev_set(struct led_classdev *led_cdev,
494 enum led_brightness value) 424 enum led_brightness value)
495{ 425{
496 kled_led_wk = value; 426 struct asus_led *led = container_of(led_cdev, struct asus_led, led);
497 queue_work(led_workqueue, &kled_led_work); 427 struct asus_laptop *asus = led->asus;
428
429 led->wk = value;
430 queue_work(asus->led_workqueue, &led->work);
498} 431}
499 432
500static void kled_led_update(struct work_struct *ignored) 433static void asus_kled_cdev_update(struct work_struct *work)
501{ 434{
502 set_kled_lvl(kled_led_wk); 435 struct asus_led *led = container_of(work, struct asus_led, work);
436 struct asus_laptop *asus = led->asus;
437
438 asus_kled_set(asus, led->wk);
503} 439}
504 440
505static enum led_brightness kled_led_get(struct led_classdev *led_cdev) 441static enum led_brightness asus_kled_cdev_get(struct led_classdev *led_cdev)
506{ 442{
507 return get_kled_lvl(); 443 struct asus_led *led = container_of(led_cdev, struct asus_led, led);
444 struct asus_laptop *asus = led->asus;
445
446 return asus_kled_lvl(asus);
508} 447}
509 448
510static int get_lcd_state(void) 449static void asus_led_exit(struct asus_laptop *asus)
511{ 450{
512 return read_status(LCD_ON); 451 if (asus->mled.led.dev)
452 led_classdev_unregister(&asus->mled.led);
453 if (asus->tled.led.dev)
454 led_classdev_unregister(&asus->tled.led);
455 if (asus->pled.led.dev)
456 led_classdev_unregister(&asus->pled.led);
457 if (asus->rled.led.dev)
458 led_classdev_unregister(&asus->rled.led);
459 if (asus->gled.led.dev)
460 led_classdev_unregister(&asus->gled.led);
461 if (asus->kled.led.dev)
462 led_classdev_unregister(&asus->kled.led);
463 if (asus->led_workqueue) {
464 destroy_workqueue(asus->led_workqueue);
465 asus->led_workqueue = NULL;
466 }
513} 467}
514 468
515static int set_lcd_state(int value) 469/* Ugly macro, need to fix that later */
470static int asus_led_register(struct asus_laptop *asus,
471 struct asus_led *led,
472 const char *name, const char *method)
473{
474 struct led_classdev *led_cdev = &led->led;
475
476 if (!method || acpi_check_handle(asus->handle, method, NULL))
477 return 0; /* Led not present */
478
479 led->asus = asus;
480 led->method = method;
481
482 INIT_WORK(&led->work, asus_led_cdev_update);
483 led_cdev->name = name;
484 led_cdev->brightness_set = asus_led_cdev_set;
485 led_cdev->brightness_get = asus_led_cdev_get;
486 led_cdev->max_brightness = 1;
487 return led_classdev_register(&asus->platform_device->dev, led_cdev);
488}
489
490static int asus_led_init(struct asus_laptop *asus)
491{
492 int r;
493
494 /*
495 * Functions that actually update the LED's are called from a
496 * workqueue. By doing this as separate work rather than when the LED
497 * subsystem asks, we avoid messing with the Asus ACPI stuff during a
498 * potentially bad time, such as a timer interrupt.
499 */
500 asus->led_workqueue = create_singlethread_workqueue("led_workqueue");
501 if (!asus->led_workqueue)
502 return -ENOMEM;
503
504 r = asus_led_register(asus, &asus->mled, "asus::mail", METHOD_MLED);
505 if (r)
506 goto error;
507 r = asus_led_register(asus, &asus->tled, "asus::touchpad", METHOD_TLED);
508 if (r)
509 goto error;
510 r = asus_led_register(asus, &asus->rled, "asus::record", METHOD_RLED);
511 if (r)
512 goto error;
513 r = asus_led_register(asus, &asus->pled, "asus::phone", METHOD_PLED);
514 if (r)
515 goto error;
516 r = asus_led_register(asus, &asus->gled, "asus::gaming", METHOD_GLED);
517 if (r)
518 goto error;
519 if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL) &&
520 !acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_GET, NULL)) {
521 struct asus_led *led = &asus->kled;
522 struct led_classdev *cdev = &led->led;
523
524 led->asus = asus;
525
526 INIT_WORK(&led->work, asus_kled_cdev_update);
527 cdev->name = "asus::kbd_backlight";
528 cdev->brightness_set = asus_kled_cdev_set;
529 cdev->brightness_get = asus_kled_cdev_get;
530 cdev->max_brightness = 3;
531 r = led_classdev_register(&asus->platform_device->dev, cdev);
532 }
533error:
534 if (r)
535 asus_led_exit(asus);
536 return r;
537}
538
539/*
540 * Backlight device
541 */
542static int asus_lcd_status(struct asus_laptop *asus)
543{
544 return asus->lcd_state;
545}
546
547static int asus_lcd_set(struct asus_laptop *asus, int value)
516{ 548{
517 int lcd = 0; 549 int lcd = 0;
518 acpi_status status = 0; 550 acpi_status status = 0;
519 551
520 lcd = value ? 1 : 0; 552 lcd = !!value;
521 553
522 if (lcd == get_lcd_state()) 554 if (lcd == asus_lcd_status(asus))
523 return 0; 555 return 0;
524 556
525 if (lcd_switch_handle) { 557 if (!lcd_switch_handle)
526 status = acpi_evaluate_object(lcd_switch_handle, 558 return -ENODEV;
527 NULL, NULL, NULL); 559
560 status = acpi_evaluate_object(lcd_switch_handle,
561 NULL, NULL, NULL);
528 562
529 if (ACPI_FAILURE(status)) 563 if (ACPI_FAILURE(status)) {
530 pr_warning("Error switching LCD\n"); 564 pr_warning("Error switching LCD\n");
565 return -ENODEV;
531 } 566 }
532 567
533 write_status(NULL, lcd, LCD_ON); 568 asus->lcd_state = lcd;
534 return 0; 569 return 0;
535} 570}
536 571
537static void lcd_blank(int blank) 572static void lcd_blank(struct asus_laptop *asus, int blank)
538{ 573{
539 struct backlight_device *bd = asus_backlight_device; 574 struct backlight_device *bd = asus->backlight_device;
575
576 asus->lcd_state = (blank == FB_BLANK_UNBLANK);
540 577
541 if (bd) { 578 if (bd) {
542 bd->props.power = blank; 579 bd->props.power = blank;
@@ -544,44 +581,91 @@ static void lcd_blank(int blank)
544 } 581 }
545} 582}
546 583
547static int read_brightness(struct backlight_device *bd) 584static int asus_read_brightness(struct backlight_device *bd)
548{ 585{
586 struct asus_laptop *asus = bl_get_data(bd);
549 unsigned long long value; 587 unsigned long long value;
550 acpi_status rv = AE_OK; 588 acpi_status rv = AE_OK;
551 589
552 rv = acpi_evaluate_integer(brightness_get_handle, NULL, NULL, &value); 590 rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET,
591 NULL, &value);
553 if (ACPI_FAILURE(rv)) 592 if (ACPI_FAILURE(rv))
554 pr_warning("Error reading brightness\n"); 593 pr_warning("Error reading brightness\n");
555 594
556 return value; 595 return value;
557} 596}
558 597
559static int set_brightness(struct backlight_device *bd, int value) 598static int asus_set_brightness(struct backlight_device *bd, int value)
560{ 599{
561 int ret = 0; 600 struct asus_laptop *asus = bl_get_data(bd);
562
563 value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
564 /* 0 <= value <= 15 */
565 601
566 if (write_acpi_int(brightness_set_handle, NULL, value, NULL)) { 602 if (write_acpi_int(asus->handle, METHOD_BRIGHTNESS_SET, value)) {
567 pr_warning("Error changing brightness\n"); 603 pr_warning("Error changing brightness\n");
568 ret = -EIO; 604 return -EIO;
569 } 605 }
570 606 return 0;
571 return ret;
572} 607}
573 608
574static int update_bl_status(struct backlight_device *bd) 609static int update_bl_status(struct backlight_device *bd)
575{ 610{
611 struct asus_laptop *asus = bl_get_data(bd);
576 int rv; 612 int rv;
577 int value = bd->props.brightness; 613 int value = bd->props.brightness;
578 614
579 rv = set_brightness(bd, value); 615 rv = asus_set_brightness(bd, value);
580 if (rv) 616 if (rv)
581 return rv; 617 return rv;
582 618
583 value = (bd->props.power == FB_BLANK_UNBLANK) ? 1 : 0; 619 value = (bd->props.power == FB_BLANK_UNBLANK) ? 1 : 0;
584 return set_lcd_state(value); 620 return asus_lcd_set(asus, value);
621}
622
623static struct backlight_ops asusbl_ops = {
624 .get_brightness = asus_read_brightness,
625 .update_status = update_bl_status,
626};
627
628static int asus_backlight_notify(struct asus_laptop *asus)
629{
630 struct backlight_device *bd = asus->backlight_device;
631 int old = bd->props.brightness;
632
633 backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
634
635 return old;
636}
637
638static int asus_backlight_init(struct asus_laptop *asus)
639{
640 struct backlight_device *bd;
641 struct device *dev = &asus->platform_device->dev;
642
643 if (!acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) &&
644 !acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL) &&
645 lcd_switch_handle) {
646 bd = backlight_device_register(ASUS_LAPTOP_FILE, dev,
647 asus, &asusbl_ops);
648 if (IS_ERR(bd)) {
649 pr_err("Could not register asus backlight device\n");
650 asus->backlight_device = NULL;
651 return PTR_ERR(bd);
652 }
653
654 asus->backlight_device = bd;
655
656 bd->props.max_brightness = 15;
657 bd->props.power = FB_BLANK_UNBLANK;
658 bd->props.brightness = asus_read_brightness(bd);
659 backlight_update_status(bd);
660 }
661 return 0;
662}
663
664static void asus_backlight_exit(struct asus_laptop *asus)
665{
666 if (asus->backlight_device)
667 backlight_device_unregister(asus->backlight_device);
668 asus->backlight_device = NULL;
585} 669}
586 670
587/* 671/*
@@ -596,25 +680,26 @@ static int update_bl_status(struct backlight_device *bd)
596static ssize_t show_infos(struct device *dev, 680static ssize_t show_infos(struct device *dev,
597 struct device_attribute *attr, char *page) 681 struct device_attribute *attr, char *page)
598{ 682{
683 struct asus_laptop *asus = dev_get_drvdata(dev);
599 int len = 0; 684 int len = 0;
600 unsigned long long temp; 685 unsigned long long temp;
601 char buf[16]; /* enough for all info */ 686 char buf[16]; /* enough for all info */
602 acpi_status rv = AE_OK; 687 acpi_status rv = AE_OK;
603 688
604 /* 689 /*
605 * We use the easy way, we don't care of off and count, so we don't set eof 690 * We use the easy way, we don't care of off and count,
606 * to 1 691 * so we don't set eof to 1
607 */ 692 */
608 693
609 len += sprintf(page, ASUS_HOTK_NAME " " ASUS_LAPTOP_VERSION "\n"); 694 len += sprintf(page, ASUS_LAPTOP_NAME " " ASUS_LAPTOP_VERSION "\n");
610 len += sprintf(page + len, "Model reference : %s\n", hotk->name); 695 len += sprintf(page + len, "Model reference : %s\n", asus->name);
611 /* 696 /*
612 * The SFUN method probably allows the original driver to get the list 697 * The SFUN method probably allows the original driver to get the list
613 * of features supported by a given model. For now, 0x0100 or 0x0800 698 * of features supported by a given model. For now, 0x0100 or 0x0800
614 * bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card. 699 * bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card.
615 * The significance of others is yet to be found. 700 * The significance of others is yet to be found.
616 */ 701 */
617 rv = acpi_evaluate_integer(hotk->handle, "SFUN", NULL, &temp); 702 rv = acpi_evaluate_integer(asus->handle, "SFUN", NULL, &temp);
618 if (!ACPI_FAILURE(rv)) 703 if (!ACPI_FAILURE(rv))
619 len += sprintf(page + len, "SFUN value : %#x\n", 704 len += sprintf(page + len, "SFUN value : %#x\n",
620 (uint) temp); 705 (uint) temp);
@@ -624,7 +709,7 @@ static ssize_t show_infos(struct device *dev,
624 * The significance of others is yet to be found. 709 * The significance of others is yet to be found.
625 * If we don't find the method, we assume the device are present. 710 * If we don't find the method, we assume the device are present.
626 */ 711 */
627 rv = acpi_evaluate_integer(hotk->handle, "HRWS", NULL, &temp); 712 rv = acpi_evaluate_integer(asus->handle, "HRWS", NULL, &temp);
628 if (!ACPI_FAILURE(rv)) 713 if (!ACPI_FAILURE(rv))
629 len += sprintf(page + len, "HRWS value : %#x\n", 714 len += sprintf(page + len, "HRWS value : %#x\n",
630 (uint) temp); 715 (uint) temp);
@@ -635,26 +720,26 @@ static ssize_t show_infos(struct device *dev,
635 * Note: since not all the laptops provide this method, errors are 720 * Note: since not all the laptops provide this method, errors are
636 * silently ignored. 721 * silently ignored.
637 */ 722 */
638 rv = acpi_evaluate_integer(hotk->handle, "ASYM", NULL, &temp); 723 rv = acpi_evaluate_integer(asus->handle, "ASYM", NULL, &temp);
639 if (!ACPI_FAILURE(rv)) 724 if (!ACPI_FAILURE(rv))
640 len += sprintf(page + len, "ASYM value : %#x\n", 725 len += sprintf(page + len, "ASYM value : %#x\n",
641 (uint) temp); 726 (uint) temp);
642 if (asus_info) { 727 if (asus->dsdt_info) {
643 snprintf(buf, 16, "%d", asus_info->length); 728 snprintf(buf, 16, "%d", asus->dsdt_info->length);
644 len += sprintf(page + len, "DSDT length : %s\n", buf); 729 len += sprintf(page + len, "DSDT length : %s\n", buf);
645 snprintf(buf, 16, "%d", asus_info->checksum); 730 snprintf(buf, 16, "%d", asus->dsdt_info->checksum);
646 len += sprintf(page + len, "DSDT checksum : %s\n", buf); 731 len += sprintf(page + len, "DSDT checksum : %s\n", buf);
647 snprintf(buf, 16, "%d", asus_info->revision); 732 snprintf(buf, 16, "%d", asus->dsdt_info->revision);
648 len += sprintf(page + len, "DSDT revision : %s\n", buf); 733 len += sprintf(page + len, "DSDT revision : %s\n", buf);
649 snprintf(buf, 7, "%s", asus_info->oem_id); 734 snprintf(buf, 7, "%s", asus->dsdt_info->oem_id);
650 len += sprintf(page + len, "OEM id : %s\n", buf); 735 len += sprintf(page + len, "OEM id : %s\n", buf);
651 snprintf(buf, 9, "%s", asus_info->oem_table_id); 736 snprintf(buf, 9, "%s", asus->dsdt_info->oem_table_id);
652 len += sprintf(page + len, "OEM table id : %s\n", buf); 737 len += sprintf(page + len, "OEM table id : %s\n", buf);
653 snprintf(buf, 16, "%x", asus_info->oem_revision); 738 snprintf(buf, 16, "%x", asus->dsdt_info->oem_revision);
654 len += sprintf(page + len, "OEM revision : 0x%s\n", buf); 739 len += sprintf(page + len, "OEM revision : 0x%s\n", buf);
655 snprintf(buf, 5, "%s", asus_info->asl_compiler_id); 740 snprintf(buf, 5, "%s", asus->dsdt_info->asl_compiler_id);
656 len += sprintf(page + len, "ASL comp vendor id : %s\n", buf); 741 len += sprintf(page + len, "ASL comp vendor id : %s\n", buf);
657 snprintf(buf, 16, "%x", asus_info->asl_compiler_revision); 742 snprintf(buf, 16, "%x", asus->dsdt_info->asl_compiler_revision);
658 len += sprintf(page + len, "ASL comp revision : 0x%s\n", buf); 743 len += sprintf(page + len, "ASL comp revision : 0x%s\n", buf);
659 } 744 }
660 745
@@ -672,8 +757,9 @@ static int parse_arg(const char *buf, unsigned long count, int *val)
672 return count; 757 return count;
673} 758}
674 759
675static ssize_t store_status(const char *buf, size_t count, 760static ssize_t sysfs_acpi_set(struct asus_laptop *asus,
676 acpi_handle handle, int mask) 761 const char *buf, size_t count,
762 const char *method)
677{ 763{
678 int rv, value; 764 int rv, value;
679 int out = 0; 765 int out = 0;
@@ -682,8 +768,8 @@ static ssize_t store_status(const char *buf, size_t count,
682 if (rv > 0) 768 if (rv > 0)
683 out = value ? 1 : 0; 769 out = value ? 1 : 0;
684 770
685 write_status(handle, out, mask); 771 if (write_acpi_int(asus->handle, method, value))
686 772 return -ENODEV;
687 return rv; 773 return rv;
688} 774}
689 775
@@ -693,67 +779,116 @@ static ssize_t store_status(const char *buf, size_t count,
693static ssize_t show_ledd(struct device *dev, 779static ssize_t show_ledd(struct device *dev,
694 struct device_attribute *attr, char *buf) 780 struct device_attribute *attr, char *buf)
695{ 781{
696 return sprintf(buf, "0x%08x\n", hotk->ledd_status); 782 struct asus_laptop *asus = dev_get_drvdata(dev);
783
784 return sprintf(buf, "0x%08x\n", asus->ledd_status);
697} 785}
698 786
699static ssize_t store_ledd(struct device *dev, struct device_attribute *attr, 787static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
700 const char *buf, size_t count) 788 const char *buf, size_t count)
701{ 789{
790 struct asus_laptop *asus = dev_get_drvdata(dev);
702 int rv, value; 791 int rv, value;
703 792
704 rv = parse_arg(buf, count, &value); 793 rv = parse_arg(buf, count, &value);
705 if (rv > 0) { 794 if (rv > 0) {
706 if (write_acpi_int(ledd_set_handle, NULL, value, NULL)) 795 if (write_acpi_int(asus->handle, METHOD_LEDD, value))
707 pr_warning("LED display write failed\n"); 796 pr_warning("LED display write failed\n");
708 else 797 else
709 hotk->ledd_status = (u32) value; 798 asus->ledd_status = (u32) value;
710 } 799 }
711 return rv; 800 return rv;
712} 801}
713 802
714/* 803/*
804 * Wireless
805 */
806static int asus_wireless_status(struct asus_laptop *asus, int mask)
807{
808 unsigned long long status;
809 acpi_status rv = AE_OK;
810
811 if (!asus->have_rsts)
812 return (asus->wireless_status & mask) ? 1 : 0;
813
814 rv = acpi_evaluate_integer(asus->handle, METHOD_WL_STATUS,
815 NULL, &status);
816 if (ACPI_FAILURE(rv)) {
817 pr_warning("Error reading Wireless status\n");
818 return -EINVAL;
819 }
820 return !!(status & mask);
821}
822
823/*
715 * WLAN 824 * WLAN
716 */ 825 */
826static int asus_wlan_set(struct asus_laptop *asus, int status)
827{
828 if (write_acpi_int(asus->handle, METHOD_WLAN, !!status)) {
829 pr_warning("Error setting wlan status to %d", status);
830 return -EIO;
831 }
832 return 0;
833}
834
717static ssize_t show_wlan(struct device *dev, 835static ssize_t show_wlan(struct device *dev,
718 struct device_attribute *attr, char *buf) 836 struct device_attribute *attr, char *buf)
719{ 837{
720 return sprintf(buf, "%d\n", read_status(WL_ON)); 838 struct asus_laptop *asus = dev_get_drvdata(dev);
839
840 return sprintf(buf, "%d\n", asus_wireless_status(asus, WL_RSTS));
721} 841}
722 842
723static ssize_t store_wlan(struct device *dev, struct device_attribute *attr, 843static ssize_t store_wlan(struct device *dev, struct device_attribute *attr,
724 const char *buf, size_t count) 844 const char *buf, size_t count)
725{ 845{
726 return store_status(buf, count, wl_switch_handle, WL_ON); 846 struct asus_laptop *asus = dev_get_drvdata(dev);
847
848 return sysfs_acpi_set(asus, buf, count, METHOD_WLAN);
727} 849}
728 850
729/* 851/*
730 * Bluetooth 852 * Bluetooth
731 */ 853 */
854static int asus_bluetooth_set(struct asus_laptop *asus, int status)
855{
856 if (write_acpi_int(asus->handle, METHOD_BLUETOOTH, !!status)) {
857 pr_warning("Error setting bluetooth status to %d", status);
858 return -EIO;
859 }
860 return 0;
861}
862
732static ssize_t show_bluetooth(struct device *dev, 863static ssize_t show_bluetooth(struct device *dev,
733 struct device_attribute *attr, char *buf) 864 struct device_attribute *attr, char *buf)
734{ 865{
735 return sprintf(buf, "%d\n", read_status(BT_ON)); 866 struct asus_laptop *asus = dev_get_drvdata(dev);
867
868 return sprintf(buf, "%d\n", asus_wireless_status(asus, BT_RSTS));
736} 869}
737 870
738static ssize_t store_bluetooth(struct device *dev, 871static ssize_t store_bluetooth(struct device *dev,
739 struct device_attribute *attr, const char *buf, 872 struct device_attribute *attr, const char *buf,
740 size_t count) 873 size_t count)
741{ 874{
742 return store_status(buf, count, bt_switch_handle, BT_ON); 875 struct asus_laptop *asus = dev_get_drvdata(dev);
876
877 return sysfs_acpi_set(asus, buf, count, METHOD_BLUETOOTH);
743} 878}
744 879
745/* 880/*
746 * Display 881 * Display
747 */ 882 */
748static void set_display(int value) 883static void asus_set_display(struct asus_laptop *asus, int value)
749{ 884{
750 /* no sanity check needed for now */ 885 /* no sanity check needed for now */
751 if (write_acpi_int(display_set_handle, NULL, value, NULL)) 886 if (write_acpi_int(asus->handle, METHOD_SWITCH_DISPLAY, value))
752 pr_warning("Error setting display\n"); 887 pr_warning("Error setting display\n");
753 return; 888 return;
754} 889}
755 890
756static int read_display(void) 891static int read_display(struct asus_laptop *asus)
757{ 892{
758 unsigned long long value = 0; 893 unsigned long long value = 0;
759 acpi_status rv = AE_OK; 894 acpi_status rv = AE_OK;
@@ -769,7 +904,7 @@ static int read_display(void)
769 pr_warning("Error reading display status\n"); 904 pr_warning("Error reading display status\n");
770 } 905 }
771 906
772 value &= 0x0F; /* needed for some models, shouldn't hurt others */ 907 value &= 0x0F; /* needed for some models, shouldn't hurt others */
773 908
774 return value; 909 return value;
775} 910}
@@ -781,7 +916,11 @@ static int read_display(void)
781static ssize_t show_disp(struct device *dev, 916static ssize_t show_disp(struct device *dev,
782 struct device_attribute *attr, char *buf) 917 struct device_attribute *attr, char *buf)
783{ 918{
784 return sprintf(buf, "%d\n", read_display()); 919 struct asus_laptop *asus = dev_get_drvdata(dev);
920
921 if (!display_get_handle)
922 return -ENODEV;
923 return sprintf(buf, "%d\n", read_display(asus));
785} 924}
786 925
787/* 926/*
@@ -794,65 +933,72 @@ static ssize_t show_disp(struct device *dev,
794static ssize_t store_disp(struct device *dev, struct device_attribute *attr, 933static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
795 const char *buf, size_t count) 934 const char *buf, size_t count)
796{ 935{
936 struct asus_laptop *asus = dev_get_drvdata(dev);
797 int rv, value; 937 int rv, value;
798 938
799 rv = parse_arg(buf, count, &value); 939 rv = parse_arg(buf, count, &value);
800 if (rv > 0) 940 if (rv > 0)
801 set_display(value); 941 asus_set_display(asus, value);
802 return rv; 942 return rv;
803} 943}
804 944
805/* 945/*
806 * Light Sens 946 * Light Sens
807 */ 947 */
808static void set_light_sens_switch(int value) 948static void asus_als_switch(struct asus_laptop *asus, int value)
809{ 949{
810 if (write_acpi_int(ls_switch_handle, NULL, value, NULL)) 950 if (write_acpi_int(asus->handle, METHOD_ALS_CONTROL, value))
811 pr_warning("Error setting light sensor switch\n"); 951 pr_warning("Error setting light sensor switch\n");
812 hotk->light_switch = value; 952 asus->light_switch = value;
813} 953}
814 954
815static ssize_t show_lssw(struct device *dev, 955static ssize_t show_lssw(struct device *dev,
816 struct device_attribute *attr, char *buf) 956 struct device_attribute *attr, char *buf)
817{ 957{
818 return sprintf(buf, "%d\n", hotk->light_switch); 958 struct asus_laptop *asus = dev_get_drvdata(dev);
959
960 return sprintf(buf, "%d\n", asus->light_switch);
819} 961}
820 962
821static ssize_t store_lssw(struct device *dev, struct device_attribute *attr, 963static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
822 const char *buf, size_t count) 964 const char *buf, size_t count)
823{ 965{
966 struct asus_laptop *asus = dev_get_drvdata(dev);
824 int rv, value; 967 int rv, value;
825 968
826 rv = parse_arg(buf, count, &value); 969 rv = parse_arg(buf, count, &value);
827 if (rv > 0) 970 if (rv > 0)
828 set_light_sens_switch(value ? 1 : 0); 971 asus_als_switch(asus, value ? 1 : 0);
829 972
830 return rv; 973 return rv;
831} 974}
832 975
833static void set_light_sens_level(int value) 976static void asus_als_level(struct asus_laptop *asus, int value)
834{ 977{
835 if (write_acpi_int(ls_level_handle, NULL, value, NULL)) 978 if (write_acpi_int(asus->handle, METHOD_ALS_LEVEL, value))
836 pr_warning("Error setting light sensor level\n"); 979 pr_warning("Error setting light sensor level\n");
837 hotk->light_level = value; 980 asus->light_level = value;
838} 981}
839 982
840static ssize_t show_lslvl(struct device *dev, 983static ssize_t show_lslvl(struct device *dev,
841 struct device_attribute *attr, char *buf) 984 struct device_attribute *attr, char *buf)
842{ 985{
843 return sprintf(buf, "%d\n", hotk->light_level); 986 struct asus_laptop *asus = dev_get_drvdata(dev);
987
988 return sprintf(buf, "%d\n", asus->light_level);
844} 989}
845 990
846static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr, 991static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
847 const char *buf, size_t count) 992 const char *buf, size_t count)
848{ 993{
994 struct asus_laptop *asus = dev_get_drvdata(dev);
849 int rv, value; 995 int rv, value;
850 996
851 rv = parse_arg(buf, count, &value); 997 rv = parse_arg(buf, count, &value);
852 if (rv > 0) { 998 if (rv > 0) {
853 value = (0 < value) ? ((15 < value) ? 15 : value) : 0; 999 value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
854 /* 0 <= value <= 15 */ 1000 /* 0 <= value <= 15 */
855 set_light_sens_level(value); 1001 asus_als_level(asus, value);
856 } 1002 }
857 1003
858 return rv; 1004 return rv;
@@ -861,197 +1007,309 @@ static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
861/* 1007/*
862 * GPS 1008 * GPS
863 */ 1009 */
1010static int asus_gps_status(struct asus_laptop *asus)
1011{
1012 unsigned long long status;
1013 acpi_status rv = AE_OK;
1014
1015 rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS,
1016 NULL, &status);
1017 if (ACPI_FAILURE(rv)) {
1018 pr_warning("Error reading GPS status\n");
1019 return -ENODEV;
1020 }
1021 return !!status;
1022}
1023
1024static int asus_gps_switch(struct asus_laptop *asus, int status)
1025{
1026 const char *meth = status ? METHOD_GPS_ON : METHOD_GPS_OFF;
1027
1028 if (write_acpi_int(asus->handle, meth, 0x02))
1029 return -ENODEV;
1030 return 0;
1031}
1032
864static ssize_t show_gps(struct device *dev, 1033static ssize_t show_gps(struct device *dev,
865 struct device_attribute *attr, char *buf) 1034 struct device_attribute *attr, char *buf)
866{ 1035{
867 return sprintf(buf, "%d\n", read_status(GPS_ON)); 1036 struct asus_laptop *asus = dev_get_drvdata(dev);
1037
1038 return sprintf(buf, "%d\n", asus_gps_status(asus));
868} 1039}
869 1040
870static ssize_t store_gps(struct device *dev, struct device_attribute *attr, 1041static ssize_t store_gps(struct device *dev, struct device_attribute *attr,
871 const char *buf, size_t count) 1042 const char *buf, size_t count)
872{ 1043{
873 return store_status(buf, count, NULL, GPS_ON); 1044 struct asus_laptop *asus = dev_get_drvdata(dev);
1045 int rv, value;
1046 int ret;
1047
1048 rv = parse_arg(buf, count, &value);
1049 if (rv <= 0)
1050 return -EINVAL;
1051 ret = asus_gps_switch(asus, !!value);
1052 if (ret)
1053 return ret;
1054 rfkill_set_sw_state(asus->gps_rfkill, !value);
1055 return rv;
874} 1056}
875 1057
876/* 1058/*
877 * Hotkey functions 1059 * rfkill
878 */ 1060 */
879static struct key_entry *asus_get_entry_by_scancode(int code) 1061static int asus_gps_rfkill_set(void *data, bool blocked)
880{ 1062{
881 struct key_entry *key; 1063 acpi_handle handle = data;
882
883 for (key = asus_keymap; key->type != KE_END; key++)
884 if (code == key->code)
885 return key;
886 1064
887 return NULL; 1065 return asus_gps_switch(handle, !blocked);
888} 1066}
889 1067
890static struct key_entry *asus_get_entry_by_keycode(int code) 1068static const struct rfkill_ops asus_gps_rfkill_ops = {
891{ 1069 .set_block = asus_gps_rfkill_set,
892 struct key_entry *key; 1070};
893
894 for (key = asus_keymap; key->type != KE_END; key++)
895 if (code == key->keycode && key->type == KE_KEY)
896 return key;
897 1071
898 return NULL; 1072static void asus_rfkill_exit(struct asus_laptop *asus)
1073{
1074 if (asus->gps_rfkill) {
1075 rfkill_unregister(asus->gps_rfkill);
1076 rfkill_destroy(asus->gps_rfkill);
1077 asus->gps_rfkill = NULL;
1078 }
899} 1079}
900 1080
901static int asus_getkeycode(struct input_dev *dev, int scancode, int *keycode) 1081static int asus_rfkill_init(struct asus_laptop *asus)
902{ 1082{
903 struct key_entry *key = asus_get_entry_by_scancode(scancode); 1083 int result;
904 1084
905 if (key && key->type == KE_KEY) { 1085 if (acpi_check_handle(asus->handle, METHOD_GPS_ON, NULL) ||
906 *keycode = key->keycode; 1086 acpi_check_handle(asus->handle, METHOD_GPS_OFF, NULL) ||
1087 acpi_check_handle(asus->handle, METHOD_GPS_STATUS, NULL))
907 return 0; 1088 return 0;
1089
1090 asus->gps_rfkill = rfkill_alloc("asus-gps", &asus->platform_device->dev,
1091 RFKILL_TYPE_GPS,
1092 &asus_gps_rfkill_ops, NULL);
1093 if (!asus->gps_rfkill)
1094 return -EINVAL;
1095
1096 result = rfkill_register(asus->gps_rfkill);
1097 if (result) {
1098 rfkill_destroy(asus->gps_rfkill);
1099 asus->gps_rfkill = NULL;
908 } 1100 }
909 1101
910 return -EINVAL; 1102 return result;
911} 1103}
912 1104
913static int asus_setkeycode(struct input_dev *dev, int scancode, int keycode) 1105/*
1106 * Input device (i.e. hotkeys)
1107 */
1108static void asus_input_notify(struct asus_laptop *asus, int event)
914{ 1109{
915 struct key_entry *key; 1110 if (asus->inputdev)
916 int old_keycode; 1111 sparse_keymap_report_event(asus->inputdev, event, 1, true);
1112}
917 1113
918 if (keycode < 0 || keycode > KEY_MAX) 1114static int asus_input_init(struct asus_laptop *asus)
919 return -EINVAL; 1115{
1116 struct input_dev *input;
1117 int error;
920 1118
921 key = asus_get_entry_by_scancode(scancode); 1119 input = input_allocate_device();
922 if (key && key->type == KE_KEY) { 1120 if (!input) {
923 old_keycode = key->keycode; 1121 pr_info("Unable to allocate input device\n");
924 key->keycode = keycode;
925 set_bit(keycode, dev->keybit);
926 if (!asus_get_entry_by_keycode(old_keycode))
927 clear_bit(old_keycode, dev->keybit);
928 return 0; 1122 return 0;
929 } 1123 }
1124 input->name = "Asus Laptop extra buttons";
1125 input->phys = ASUS_LAPTOP_FILE "/input0";
1126 input->id.bustype = BUS_HOST;
1127 input->dev.parent = &asus->platform_device->dev;
1128 input_set_drvdata(input, asus);
1129
1130 error = sparse_keymap_setup(input, asus_keymap, NULL);
1131 if (error) {
1132 pr_err("Unable to setup input device keymap\n");
1133 goto err_keymap;
1134 }
1135 error = input_register_device(input);
1136 if (error) {
1137 pr_info("Unable to register input device\n");
1138 goto err_device;
1139 }
1140
1141 asus->inputdev = input;
1142 return 0;
930 1143
931 return -EINVAL; 1144err_keymap:
1145 sparse_keymap_free(input);
1146err_device:
1147 input_free_device(input);
1148 return error;
932} 1149}
933 1150
934static void asus_hotk_notify(struct acpi_device *device, u32 event) 1151static void asus_input_exit(struct asus_laptop *asus)
935{ 1152{
936 static struct key_entry *key; 1153 if (asus->inputdev) {
937 u16 count; 1154 sparse_keymap_free(asus->inputdev);
1155 input_unregister_device(asus->inputdev);
1156 }
1157}
938 1158
939 /* TODO Find a better way to handle events count. */ 1159/*
940 if (!hotk) 1160 * ACPI driver
941 return; 1161 */
1162static void asus_acpi_notify(struct acpi_device *device, u32 event)
1163{
1164 struct asus_laptop *asus = acpi_driver_data(device);
1165 u16 count;
942 1166
943 /* 1167 /*
944 * We need to tell the backlight device when the backlight power is 1168 * We need to tell the backlight device when the backlight power is
945 * switched 1169 * switched
946 */ 1170 */
947 if (event == ATKD_LCD_ON) { 1171 if (event == ATKD_LCD_ON)
948 write_status(NULL, 1, LCD_ON); 1172 lcd_blank(asus, FB_BLANK_UNBLANK);
949 lcd_blank(FB_BLANK_UNBLANK); 1173 else if (event == ATKD_LCD_OFF)
950 } else if (event == ATKD_LCD_OFF) { 1174 lcd_blank(asus, FB_BLANK_POWERDOWN);
951 write_status(NULL, 0, LCD_ON);
952 lcd_blank(FB_BLANK_POWERDOWN);
953 }
954 1175
955 count = hotk->event_count[event % 128]++; 1176 /* TODO Find a better way to handle events count. */
956 acpi_bus_generate_proc_event(hotk->device, event, count); 1177 count = asus->event_count[event % 128]++;
957 acpi_bus_generate_netlink_event(hotk->device->pnp.device_class, 1178 acpi_bus_generate_proc_event(asus->device, event, count);
958 dev_name(&hotk->device->dev), event, 1179 acpi_bus_generate_netlink_event(asus->device->pnp.device_class,
1180 dev_name(&asus->device->dev), event,
959 count); 1181 count);
960 1182
961 if (hotk->inputdev) { 1183 /* Brightness events are special */
962 key = asus_get_entry_by_scancode(event); 1184 if (event >= ATKD_BR_MIN && event <= ATKD_BR_MAX) {
963 if (!key) 1185
964 return ; 1186 /* Ignore them completely if the acpi video driver is used */
965 1187 if (asus->backlight_device != NULL) {
966 switch (key->type) { 1188 /* Update the backlight device. */
967 case KE_KEY: 1189 asus_backlight_notify(asus);
968 input_report_key(hotk->inputdev, key->keycode, 1);
969 input_sync(hotk->inputdev);
970 input_report_key(hotk->inputdev, key->keycode, 0);
971 input_sync(hotk->inputdev);
972 break;
973 } 1190 }
1191 return ;
974 } 1192 }
1193 asus_input_notify(asus, event);
975} 1194}
976 1195
977#define ASUS_CREATE_DEVICE_ATTR(_name) \ 1196static DEVICE_ATTR(infos, S_IRUGO, show_infos, NULL);
978 struct device_attribute dev_attr_##_name = { \ 1197static DEVICE_ATTR(wlan, S_IRUGO | S_IWUSR, show_wlan, store_wlan);
979 .attr = { \ 1198static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR, show_bluetooth,
980 .name = __stringify(_name), \ 1199 store_bluetooth);
981 .mode = 0 }, \ 1200static DEVICE_ATTR(display, S_IRUGO | S_IWUSR, show_disp, store_disp);
982 .show = NULL, \ 1201static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd);
983 .store = NULL, \ 1202static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl);
1203static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw);
1204static DEVICE_ATTR(gps, S_IRUGO | S_IWUSR, show_gps, store_gps);
1205
1206static void asus_sysfs_exit(struct asus_laptop *asus)
1207{
1208 struct platform_device *device = asus->platform_device;
1209
1210 device_remove_file(&device->dev, &dev_attr_infos);
1211 device_remove_file(&device->dev, &dev_attr_wlan);
1212 device_remove_file(&device->dev, &dev_attr_bluetooth);
1213 device_remove_file(&device->dev, &dev_attr_display);
1214 device_remove_file(&device->dev, &dev_attr_ledd);
1215 device_remove_file(&device->dev, &dev_attr_ls_switch);
1216 device_remove_file(&device->dev, &dev_attr_ls_level);
1217 device_remove_file(&device->dev, &dev_attr_gps);
1218}
1219
1220static int asus_sysfs_init(struct asus_laptop *asus)
1221{
1222 struct platform_device *device = asus->platform_device;
1223 int err;
1224
1225 err = device_create_file(&device->dev, &dev_attr_infos);
1226 if (err)
1227 return err;
1228
1229 if (!acpi_check_handle(asus->handle, METHOD_WLAN, NULL)) {
1230 err = device_create_file(&device->dev, &dev_attr_wlan);
1231 if (err)
1232 return err;
984 } 1233 }
985 1234
986#define ASUS_SET_DEVICE_ATTR(_name, _mode, _show, _store) \ 1235 if (!acpi_check_handle(asus->handle, METHOD_BLUETOOTH, NULL)) {
987 do { \ 1236 err = device_create_file(&device->dev, &dev_attr_bluetooth);
988 dev_attr_##_name.attr.mode = _mode; \ 1237 if (err)
989 dev_attr_##_name.show = _show; \ 1238 return err;
990 dev_attr_##_name.store = _store; \ 1239 }
991 } while(0)
992
993static ASUS_CREATE_DEVICE_ATTR(infos);
994static ASUS_CREATE_DEVICE_ATTR(wlan);
995static ASUS_CREATE_DEVICE_ATTR(bluetooth);
996static ASUS_CREATE_DEVICE_ATTR(display);
997static ASUS_CREATE_DEVICE_ATTR(ledd);
998static ASUS_CREATE_DEVICE_ATTR(ls_switch);
999static ASUS_CREATE_DEVICE_ATTR(ls_level);
1000static ASUS_CREATE_DEVICE_ATTR(gps);
1001
1002static struct attribute *asuspf_attributes[] = {
1003 &dev_attr_infos.attr,
1004 &dev_attr_wlan.attr,
1005 &dev_attr_bluetooth.attr,
1006 &dev_attr_display.attr,
1007 &dev_attr_ledd.attr,
1008 &dev_attr_ls_switch.attr,
1009 &dev_attr_ls_level.attr,
1010 &dev_attr_gps.attr,
1011 NULL
1012};
1013 1240
1014static struct attribute_group asuspf_attribute_group = { 1241 if (!acpi_check_handle(asus->handle, METHOD_SWITCH_DISPLAY, NULL)) {
1015 .attrs = asuspf_attributes 1242 err = device_create_file(&device->dev, &dev_attr_display);
1016}; 1243 if (err)
1244 return err;
1245 }
1017 1246
1018static struct platform_driver asuspf_driver = { 1247 if (!acpi_check_handle(asus->handle, METHOD_LEDD, NULL)) {
1019 .driver = { 1248 err = device_create_file(&device->dev, &dev_attr_ledd);
1020 .name = ASUS_HOTK_FILE, 1249 if (err)
1021 .owner = THIS_MODULE, 1250 return err;
1022 } 1251 }
1023};
1024 1252
1025static struct platform_device *asuspf_device; 1253 if (!acpi_check_handle(asus->handle, METHOD_ALS_CONTROL, NULL) &&
1254 !acpi_check_handle(asus->handle, METHOD_ALS_LEVEL, NULL)) {
1255 err = device_create_file(&device->dev, &dev_attr_ls_switch);
1256 if (err)
1257 return err;
1258 err = device_create_file(&device->dev, &dev_attr_ls_level);
1259 if (err)
1260 return err;
1261 }
1026 1262
1027static void asus_hotk_add_fs(void) 1263 if (!acpi_check_handle(asus->handle, METHOD_GPS_ON, NULL) &&
1028{ 1264 !acpi_check_handle(asus->handle, METHOD_GPS_OFF, NULL) &&
1029 ASUS_SET_DEVICE_ATTR(infos, 0444, show_infos, NULL); 1265 !acpi_check_handle(asus->handle, METHOD_GPS_STATUS, NULL)) {
1266 err = device_create_file(&device->dev, &dev_attr_gps);
1267 if (err)
1268 return err;
1269 }
1030 1270
1031 if (wl_switch_handle) 1271 return err;
1032 ASUS_SET_DEVICE_ATTR(wlan, 0644, show_wlan, store_wlan); 1272}
1273
1274static int asus_platform_init(struct asus_laptop *asus)
1275{
1276 int err;
1033 1277
1034 if (bt_switch_handle) 1278 asus->platform_device = platform_device_alloc(ASUS_LAPTOP_FILE, -1);
1035 ASUS_SET_DEVICE_ATTR(bluetooth, 0644, 1279 if (!asus->platform_device)
1036 show_bluetooth, store_bluetooth); 1280 return -ENOMEM;
1281 platform_set_drvdata(asus->platform_device, asus);
1037 1282
1038 if (display_set_handle && display_get_handle) 1283 err = platform_device_add(asus->platform_device);
1039 ASUS_SET_DEVICE_ATTR(display, 0644, show_disp, store_disp); 1284 if (err)
1040 else if (display_set_handle) 1285 goto fail_platform_device;
1041 ASUS_SET_DEVICE_ATTR(display, 0200, NULL, store_disp);
1042 1286
1043 if (ledd_set_handle) 1287 err = asus_sysfs_init(asus);
1044 ASUS_SET_DEVICE_ATTR(ledd, 0644, show_ledd, store_ledd); 1288 if (err)
1289 goto fail_sysfs;
1290 return 0;
1045 1291
1046 if (ls_switch_handle && ls_level_handle) { 1292fail_sysfs:
1047 ASUS_SET_DEVICE_ATTR(ls_level, 0644, show_lslvl, store_lslvl); 1293 asus_sysfs_exit(asus);
1048 ASUS_SET_DEVICE_ATTR(ls_switch, 0644, show_lssw, store_lssw); 1294 platform_device_del(asus->platform_device);
1049 } 1295fail_platform_device:
1296 platform_device_put(asus->platform_device);
1297 return err;
1298}
1050 1299
1051 if (gps_status_handle && gps_on_handle && gps_off_handle) 1300static void asus_platform_exit(struct asus_laptop *asus)
1052 ASUS_SET_DEVICE_ATTR(gps, 0644, show_gps, store_gps); 1301{
1302 asus_sysfs_exit(asus);
1303 platform_device_unregister(asus->platform_device);
1053} 1304}
1054 1305
1306static struct platform_driver platform_driver = {
1307 .driver = {
1308 .name = ASUS_LAPTOP_FILE,
1309 .owner = THIS_MODULE,
1310 }
1311};
1312
1055static int asus_handle_init(char *name, acpi_handle * handle, 1313static int asus_handle_init(char *name, acpi_handle * handle,
1056 char **paths, int num_paths) 1314 char **paths, int num_paths)
1057{ 1315{
@@ -1073,10 +1331,11 @@ static int asus_handle_init(char *name, acpi_handle * handle,
1073 ARRAY_SIZE(object##_paths)) 1331 ARRAY_SIZE(object##_paths))
1074 1332
1075/* 1333/*
1076 * This function is used to initialize the hotk with right values. In this 1334 * This function is used to initialize the context with right values. In this
1077 * method, we can make all the detection we want, and modify the hotk struct 1335 * method, we can make all the detection we want, and modify the asus_laptop
1336 * struct
1078 */ 1337 */
1079static int asus_hotk_get_info(void) 1338static int asus_laptop_get_info(struct asus_laptop *asus)
1080{ 1339{
1081 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1340 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1082 union acpi_object *model = NULL; 1341 union acpi_object *model = NULL;
@@ -1089,22 +1348,21 @@ static int asus_hotk_get_info(void)
1089 * models, but late enough to allow acpi_bus_register_driver() to fail 1348 * models, but late enough to allow acpi_bus_register_driver() to fail
1090 * before doing anything ACPI-specific. Should we encounter a machine, 1349 * before doing anything ACPI-specific. Should we encounter a machine,
1091 * which needs special handling (i.e. its hotkey device has a different 1350 * which needs special handling (i.e. its hotkey device has a different
1092 * HID), this bit will be moved. A global variable asus_info contains 1351 * HID), this bit will be moved.
1093 * the DSDT header.
1094 */ 1352 */
1095 status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info); 1353 status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus->dsdt_info);
1096 if (ACPI_FAILURE(status)) 1354 if (ACPI_FAILURE(status))
1097 pr_warning("Couldn't get the DSDT table header\n"); 1355 pr_warning("Couldn't get the DSDT table header\n");
1098 1356
1099 /* We have to write 0 on init this far for all ASUS models */ 1357 /* We have to write 0 on init this far for all ASUS models */
1100 if (write_acpi_int(hotk->handle, "INIT", 0, &buffer)) { 1358 if (write_acpi_int_ret(asus->handle, "INIT", 0, &buffer)) {
1101 pr_err("Hotkey initialization failed\n"); 1359 pr_err("Hotkey initialization failed\n");
1102 return -ENODEV; 1360 return -ENODEV;
1103 } 1361 }
1104 1362
1105 /* This needs to be called for some laptops to init properly */ 1363 /* This needs to be called for some laptops to init properly */
1106 status = 1364 status =
1107 acpi_evaluate_integer(hotk->handle, "BSTS", NULL, &bsts_result); 1365 acpi_evaluate_integer(asus->handle, "BSTS", NULL, &bsts_result);
1108 if (ACPI_FAILURE(status)) 1366 if (ACPI_FAILURE(status))
1109 pr_warning("Error calling BSTS\n"); 1367 pr_warning("Error calling BSTS\n");
1110 else if (bsts_result) 1368 else if (bsts_result)
@@ -1112,8 +1370,8 @@ static int asus_hotk_get_info(void)
1112 (uint) bsts_result); 1370 (uint) bsts_result);
1113 1371
1114 /* This too ... */ 1372 /* This too ... */
1115 write_acpi_int(hotk->handle, "CWAP", wapf, NULL); 1373 if (write_acpi_int(asus->handle, "CWAP", wapf))
1116 1374 pr_err("Error calling CWAP(%d)\n", wapf);
1117 /* 1375 /*
1118 * Try to match the object returned by INIT to the specific model. 1376 * Try to match the object returned by INIT to the specific model.
1119 * Handle every possible object (or the lack of thereof) the DSDT 1377 * Handle every possible object (or the lack of thereof) the DSDT
@@ -1134,397 +1392,210 @@ static int asus_hotk_get_info(void)
1134 break; 1392 break;
1135 } 1393 }
1136 } 1394 }
1137 hotk->name = kstrdup(string, GFP_KERNEL); 1395 asus->name = kstrdup(string, GFP_KERNEL);
1138 if (!hotk->name) 1396 if (!asus->name)
1139 return -ENOMEM; 1397 return -ENOMEM;
1140 1398
1141 if (*string) 1399 if (*string)
1142 pr_notice(" %s model detected\n", string); 1400 pr_notice(" %s model detected\n", string);
1143 1401
1144 ASUS_HANDLE_INIT(mled_set);
1145 ASUS_HANDLE_INIT(tled_set);
1146 ASUS_HANDLE_INIT(rled_set);
1147 ASUS_HANDLE_INIT(pled_set);
1148 ASUS_HANDLE_INIT(gled_set);
1149
1150 ASUS_HANDLE_INIT(ledd_set);
1151
1152 ASUS_HANDLE_INIT(kled_set);
1153 ASUS_HANDLE_INIT(kled_get);
1154
1155 /* 1402 /*
1156 * The HWRS method return informations about the hardware. 1403 * The HWRS method return informations about the hardware.
1157 * 0x80 bit is for WLAN, 0x100 for Bluetooth. 1404 * 0x80 bit is for WLAN, 0x100 for Bluetooth.
1158 * The significance of others is yet to be found. 1405 * The significance of others is yet to be found.
1159 * If we don't find the method, we assume the device are present.
1160 */ 1406 */
1161 status = 1407 status =
1162 acpi_evaluate_integer(hotk->handle, "HRWS", NULL, &hwrs_result); 1408 acpi_evaluate_integer(asus->handle, "HRWS", NULL, &hwrs_result);
1163 if (ACPI_FAILURE(status)) 1409 if (!ACPI_FAILURE(status))
1164 hwrs_result = WL_HWRS | BT_HWRS; 1410 pr_notice(" HRWS returned %x", (int)hwrs_result);
1165
1166 if (hwrs_result & WL_HWRS)
1167 ASUS_HANDLE_INIT(wl_switch);
1168 if (hwrs_result & BT_HWRS)
1169 ASUS_HANDLE_INIT(bt_switch);
1170
1171 ASUS_HANDLE_INIT(wireless_status);
1172 1411
1173 ASUS_HANDLE_INIT(brightness_set); 1412 if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
1174 ASUS_HANDLE_INIT(brightness_get); 1413 asus->have_rsts = true;
1175 1414
1415 /* Scheduled for removal */
1176 ASUS_HANDLE_INIT(lcd_switch); 1416 ASUS_HANDLE_INIT(lcd_switch);
1177
1178 ASUS_HANDLE_INIT(display_set);
1179 ASUS_HANDLE_INIT(display_get); 1417 ASUS_HANDLE_INIT(display_get);
1180 1418
1181 /*
1182 * There is a lot of models with "ALSL", but a few get
1183 * a real light sens, so we need to check it.
1184 */
1185 if (!ASUS_HANDLE_INIT(ls_switch))
1186 ASUS_HANDLE_INIT(ls_level);
1187
1188 ASUS_HANDLE_INIT(gps_on);
1189 ASUS_HANDLE_INIT(gps_off);
1190 ASUS_HANDLE_INIT(gps_status);
1191
1192 kfree(model); 1419 kfree(model);
1193 1420
1194 return AE_OK; 1421 return AE_OK;
1195} 1422}
1196 1423
1197static int asus_input_init(void) 1424static bool asus_device_present;
1198{
1199 const struct key_entry *key;
1200 int result;
1201 1425
1202 hotk->inputdev = input_allocate_device(); 1426static int __devinit asus_acpi_init(struct asus_laptop *asus)
1203 if (!hotk->inputdev) {
1204 pr_info("Unable to allocate input device\n");
1205 return 0;
1206 }
1207 hotk->inputdev->name = "Asus Laptop extra buttons";
1208 hotk->inputdev->phys = ASUS_HOTK_FILE "/input0";
1209 hotk->inputdev->id.bustype = BUS_HOST;
1210 hotk->inputdev->getkeycode = asus_getkeycode;
1211 hotk->inputdev->setkeycode = asus_setkeycode;
1212
1213 for (key = asus_keymap; key->type != KE_END; key++) {
1214 switch (key->type) {
1215 case KE_KEY:
1216 set_bit(EV_KEY, hotk->inputdev->evbit);
1217 set_bit(key->keycode, hotk->inputdev->keybit);
1218 break;
1219 }
1220 }
1221 result = input_register_device(hotk->inputdev);
1222 if (result) {
1223 pr_info("Unable to register input device\n");
1224 input_free_device(hotk->inputdev);
1225 }
1226 return result;
1227}
1228
1229static int asus_hotk_check(void)
1230{ 1427{
1231 int result = 0; 1428 int result = 0;
1232 1429
1233 result = acpi_bus_get_status(hotk->device); 1430 result = acpi_bus_get_status(asus->device);
1234 if (result) 1431 if (result)
1235 return result; 1432 return result;
1236 1433 if (!asus->device->status.present) {
1237 if (hotk->device->status.present) {
1238 result = asus_hotk_get_info();
1239 } else {
1240 pr_err("Hotkey device not present, aborting\n"); 1434 pr_err("Hotkey device not present, aborting\n");
1241 return -EINVAL; 1435 return -ENODEV;
1242 } 1436 }
1243 1437
1244 return result; 1438 result = asus_laptop_get_info(asus);
1245}
1246
1247static int asus_hotk_found;
1248
1249static int asus_hotk_add(struct acpi_device *device)
1250{
1251 int result;
1252
1253 pr_notice("Asus Laptop Support version %s\n",
1254 ASUS_LAPTOP_VERSION);
1255
1256 hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL);
1257 if (!hotk)
1258 return -ENOMEM;
1259
1260 hotk->handle = device->handle;
1261 strcpy(acpi_device_name(device), ASUS_HOTK_DEVICE_NAME);
1262 strcpy(acpi_device_class(device), ASUS_HOTK_CLASS);
1263 device->driver_data = hotk;
1264 hotk->device = device;
1265
1266 result = asus_hotk_check();
1267 if (result) 1439 if (result)
1268 goto end; 1440 return result;
1269
1270 asus_hotk_add_fs();
1271
1272 asus_hotk_found = 1;
1273 1441
1274 /* WLED and BLED are on by default */ 1442 /* WLED and BLED are on by default */
1275 write_status(bt_switch_handle, 1, BT_ON); 1443 if (bluetooth_status >= 0)
1276 write_status(wl_switch_handle, 1, WL_ON); 1444 asus_bluetooth_set(asus, !!bluetooth_status);
1277
1278 /* If the h/w switch is off, we need to check the real status */
1279 write_status(NULL, read_status(BT_ON), BT_ON);
1280 write_status(NULL, read_status(WL_ON), WL_ON);
1281 1445
1282 /* LCD Backlight is on by default */ 1446 if (wlan_status >= 0)
1283 write_status(NULL, 1, LCD_ON); 1447 asus_wlan_set(asus, !!wlan_status);
1284 1448
1285 /* Keyboard Backlight is on by default */ 1449 /* Keyboard Backlight is on by default */
1286 if (kled_set_handle) 1450 if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL))
1287 set_kled_lvl(1); 1451 asus_kled_set(asus, 1);
1288 1452
1289 /* LED display is off by default */ 1453 /* LED display is off by default */
1290 hotk->ledd_status = 0xFFF; 1454 asus->ledd_status = 0xFFF;
1291 1455
1292 /* Set initial values of light sensor and level */ 1456 /* Set initial values of light sensor and level */
1293 hotk->light_switch = 0; /* Default to light sensor disabled */ 1457 asus->light_switch = 0; /* Default to light sensor disabled */
1294 hotk->light_level = 5; /* level 5 for sensor sensitivity */ 1458 asus->light_level = 5; /* level 5 for sensor sensitivity */
1295 1459
1296 if (ls_switch_handle) 1460 if (!acpi_check_handle(asus->handle, METHOD_ALS_CONTROL, NULL) &&
1297 set_light_sens_switch(hotk->light_switch); 1461 !acpi_check_handle(asus->handle, METHOD_ALS_LEVEL, NULL)) {
1298 1462 asus_als_switch(asus, asus->light_switch);
1299 if (ls_level_handle) 1463 asus_als_level(asus, asus->light_level);
1300 set_light_sens_level(hotk->light_level);
1301
1302 /* GPS is on by default */
1303 write_status(NULL, 1, GPS_ON);
1304
1305end:
1306 if (result) {
1307 kfree(hotk->name);
1308 kfree(hotk);
1309 } 1464 }
1310 1465
1466 asus->lcd_state = 1; /* LCD should be on when the module load */
1311 return result; 1467 return result;
1312} 1468}
1313 1469
1314static int asus_hotk_remove(struct acpi_device *device, int type) 1470static int __devinit asus_acpi_add(struct acpi_device *device)
1315{
1316 kfree(hotk->name);
1317 kfree(hotk);
1318
1319 return 0;
1320}
1321
1322static void asus_backlight_exit(void)
1323{ 1471{
1324 if (asus_backlight_device) 1472 struct asus_laptop *asus;
1325 backlight_device_unregister(asus_backlight_device); 1473 int result;
1326}
1327
1328#define ASUS_LED_UNREGISTER(object) \
1329 if (object##_led.dev) \
1330 led_classdev_unregister(&object##_led)
1331 1474
1332static void asus_led_exit(void) 1475 pr_notice("Asus Laptop Support version %s\n",
1333{ 1476 ASUS_LAPTOP_VERSION);
1334 destroy_workqueue(led_workqueue); 1477 asus = kzalloc(sizeof(struct asus_laptop), GFP_KERNEL);
1335 ASUS_LED_UNREGISTER(mled); 1478 if (!asus)
1336 ASUS_LED_UNREGISTER(tled); 1479 return -ENOMEM;
1337 ASUS_LED_UNREGISTER(pled); 1480 asus->handle = device->handle;
1338 ASUS_LED_UNREGISTER(rled); 1481 strcpy(acpi_device_name(device), ASUS_LAPTOP_DEVICE_NAME);
1339 ASUS_LED_UNREGISTER(gled); 1482 strcpy(acpi_device_class(device), ASUS_LAPTOP_CLASS);
1340 ASUS_LED_UNREGISTER(kled); 1483 device->driver_data = asus;
1341} 1484 asus->device = device;
1342 1485
1343static void asus_input_exit(void) 1486 result = asus_acpi_init(asus);
1344{ 1487 if (result)
1345 if (hotk->inputdev) 1488 goto fail_platform;
1346 input_unregister_device(hotk->inputdev);
1347}
1348 1489
1349static void __exit asus_laptop_exit(void) 1490 /*
1350{ 1491 * Register the platform device first. It is used as a parent for the
1351 asus_backlight_exit(); 1492 * sub-devices below.
1352 asus_led_exit(); 1493 */
1353 asus_input_exit(); 1494 result = asus_platform_init(asus);
1495 if (result)
1496 goto fail_platform;
1354 1497
1355 acpi_bus_unregister_driver(&asus_hotk_driver); 1498 if (!acpi_video_backlight_support()) {
1356 sysfs_remove_group(&asuspf_device->dev.kobj, &asuspf_attribute_group); 1499 result = asus_backlight_init(asus);
1357 platform_device_unregister(asuspf_device); 1500 if (result)
1358 platform_driver_unregister(&asuspf_driver); 1501 goto fail_backlight;
1359} 1502 } else
1503 pr_info("Backlight controlled by ACPI video driver\n");
1360 1504
1361static int asus_backlight_init(struct device *dev) 1505 result = asus_input_init(asus);
1362{ 1506 if (result)
1363 struct backlight_device *bd; 1507 goto fail_input;
1364 1508
1365 if (brightness_set_handle && lcd_switch_handle) { 1509 result = asus_led_init(asus);
1366 bd = backlight_device_register(ASUS_HOTK_FILE, dev, 1510 if (result)
1367 NULL, &asusbl_ops); 1511 goto fail_led;
1368 if (IS_ERR(bd)) {
1369 pr_err("Could not register asus backlight device\n");
1370 asus_backlight_device = NULL;
1371 return PTR_ERR(bd);
1372 }
1373 1512
1374 asus_backlight_device = bd; 1513 result = asus_rfkill_init(asus);
1514 if (result)
1515 goto fail_rfkill;
1375 1516
1376 bd->props.max_brightness = 15; 1517 asus_device_present = true;
1377 bd->props.brightness = read_brightness(NULL);
1378 bd->props.power = FB_BLANK_UNBLANK;
1379 backlight_update_status(bd);
1380 }
1381 return 0; 1518 return 0;
1382}
1383 1519
1384static int asus_led_register(acpi_handle handle, 1520fail_rfkill:
1385 struct led_classdev *ldev, struct device *dev) 1521 asus_led_exit(asus);
1386{ 1522fail_led:
1387 if (!handle) 1523 asus_input_exit(asus);
1388 return 0; 1524fail_input:
1525 asus_backlight_exit(asus);
1526fail_backlight:
1527 asus_platform_exit(asus);
1528fail_platform:
1529 kfree(asus->name);
1530 kfree(asus);
1389 1531
1390 return led_classdev_register(dev, ldev); 1532 return result;
1391} 1533}
1392 1534
1393#define ASUS_LED_REGISTER(object, device) \ 1535static int asus_acpi_remove(struct acpi_device *device, int type)
1394 asus_led_register(object##_set_handle, &object##_led, device)
1395
1396static int asus_led_init(struct device *dev)
1397{ 1536{
1398 int rv; 1537 struct asus_laptop *asus = acpi_driver_data(device);
1399
1400 rv = ASUS_LED_REGISTER(mled, dev);
1401 if (rv)
1402 goto out;
1403
1404 rv = ASUS_LED_REGISTER(tled, dev);
1405 if (rv)
1406 goto out1;
1407
1408 rv = ASUS_LED_REGISTER(rled, dev);
1409 if (rv)
1410 goto out2;
1411
1412 rv = ASUS_LED_REGISTER(pled, dev);
1413 if (rv)
1414 goto out3;
1415
1416 rv = ASUS_LED_REGISTER(gled, dev);
1417 if (rv)
1418 goto out4;
1419 1538
1420 if (kled_set_handle && kled_get_handle) 1539 asus_backlight_exit(asus);
1421 rv = ASUS_LED_REGISTER(kled, dev); 1540 asus_rfkill_exit(asus);
1422 if (rv) 1541 asus_led_exit(asus);
1423 goto out5; 1542 asus_input_exit(asus);
1424 1543 asus_platform_exit(asus);
1425 led_workqueue = create_singlethread_workqueue("led_workqueue");
1426 if (!led_workqueue)
1427 goto out6;
1428 1544
1545 kfree(asus->name);
1546 kfree(asus);
1429 return 0; 1547 return 0;
1430out6:
1431 rv = -ENOMEM;
1432 ASUS_LED_UNREGISTER(kled);
1433out5:
1434 ASUS_LED_UNREGISTER(gled);
1435out4:
1436 ASUS_LED_UNREGISTER(pled);
1437out3:
1438 ASUS_LED_UNREGISTER(rled);
1439out2:
1440 ASUS_LED_UNREGISTER(tled);
1441out1:
1442 ASUS_LED_UNREGISTER(mled);
1443out:
1444 return rv;
1445} 1548}
1446 1549
1550static const struct acpi_device_id asus_device_ids[] = {
1551 {"ATK0100", 0},
1552 {"ATK0101", 0},
1553 {"", 0},
1554};
1555MODULE_DEVICE_TABLE(acpi, asus_device_ids);
1556
1557static struct acpi_driver asus_acpi_driver = {
1558 .name = ASUS_LAPTOP_NAME,
1559 .class = ASUS_LAPTOP_CLASS,
1560 .owner = THIS_MODULE,
1561 .ids = asus_device_ids,
1562 .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
1563 .ops = {
1564 .add = asus_acpi_add,
1565 .remove = asus_acpi_remove,
1566 .notify = asus_acpi_notify,
1567 },
1568};
1569
1447static int __init asus_laptop_init(void) 1570static int __init asus_laptop_init(void)
1448{ 1571{
1449 int result; 1572 int result;
1450 1573
1451 result = acpi_bus_register_driver(&asus_hotk_driver); 1574 result = platform_driver_register(&platform_driver);
1452 if (result < 0) 1575 if (result < 0)
1453 return result; 1576 return result;
1454 1577
1455 /* 1578 result = acpi_bus_register_driver(&asus_acpi_driver);
1456 * This is a bit of a kludge. We only want this module loaded 1579 if (result < 0)
1457 * for ASUS systems, but there's currently no way to probe the 1580 goto fail_acpi_driver;
1458 * ACPI namespace for ASUS HIDs. So we just return failure if 1581 if (!asus_device_present) {
1459 * we didn't find one, which will cause the module to be 1582 result = -ENODEV;
1460 * unloaded. 1583 goto fail_no_device;
1461 */
1462 if (!asus_hotk_found) {
1463 acpi_bus_unregister_driver(&asus_hotk_driver);
1464 return -ENODEV;
1465 }
1466
1467 result = asus_input_init();
1468 if (result)
1469 goto fail_input;
1470
1471 /* Register platform stuff */
1472 result = platform_driver_register(&asuspf_driver);
1473 if (result)
1474 goto fail_platform_driver;
1475
1476 asuspf_device = platform_device_alloc(ASUS_HOTK_FILE, -1);
1477 if (!asuspf_device) {
1478 result = -ENOMEM;
1479 goto fail_platform_device1;
1480 } 1584 }
1481
1482 result = platform_device_add(asuspf_device);
1483 if (result)
1484 goto fail_platform_device2;
1485
1486 result = sysfs_create_group(&asuspf_device->dev.kobj,
1487 &asuspf_attribute_group);
1488 if (result)
1489 goto fail_sysfs;
1490
1491 result = asus_led_init(&asuspf_device->dev);
1492 if (result)
1493 goto fail_led;
1494
1495 if (!acpi_video_backlight_support()) {
1496 result = asus_backlight_init(&asuspf_device->dev);
1497 if (result)
1498 goto fail_backlight;
1499 } else
1500 pr_info("Brightness ignored, must be controlled by "
1501 "ACPI video driver\n");
1502
1503 return 0; 1585 return 0;
1504 1586
1505fail_backlight: 1587fail_no_device:
1506 asus_led_exit(); 1588 acpi_bus_unregister_driver(&asus_acpi_driver);
1507 1589fail_acpi_driver:
1508fail_led: 1590 platform_driver_unregister(&platform_driver);
1509 sysfs_remove_group(&asuspf_device->dev.kobj,
1510 &asuspf_attribute_group);
1511
1512fail_sysfs:
1513 platform_device_del(asuspf_device);
1514
1515fail_platform_device2:
1516 platform_device_put(asuspf_device);
1517
1518fail_platform_device1:
1519 platform_driver_unregister(&asuspf_driver);
1520
1521fail_platform_driver:
1522 asus_input_exit();
1523
1524fail_input:
1525
1526 return result; 1591 return result;
1527} 1592}
1528 1593
1594static void __exit asus_laptop_exit(void)
1595{
1596 acpi_bus_unregister_driver(&asus_acpi_driver);
1597 platform_driver_unregister(&platform_driver);
1598}
1599
1529module_init(asus_laptop_init); 1600module_init(asus_laptop_init);
1530module_exit(asus_laptop_exit); 1601module_exit(asus_laptop_exit);
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index c1d2aeeea948..1381430e1105 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1225,9 +1225,8 @@ static int asus_model_match(char *model)
1225 else if (strncmp(model, "M2N", 3) == 0 || 1225 else if (strncmp(model, "M2N", 3) == 0 ||
1226 strncmp(model, "M3N", 3) == 0 || 1226 strncmp(model, "M3N", 3) == 0 ||
1227 strncmp(model, "M5N", 3) == 0 || 1227 strncmp(model, "M5N", 3) == 0 ||
1228 strncmp(model, "M6N", 3) == 0 ||
1229 strncmp(model, "S1N", 3) == 0 || 1228 strncmp(model, "S1N", 3) == 0 ||
1230 strncmp(model, "S5N", 3) == 0 || strncmp(model, "W1N", 3) == 0) 1229 strncmp(model, "S5N", 3) == 0)
1231 return xxN; 1230 return xxN;
1232 else if (strncmp(model, "M1", 2) == 0) 1231 else if (strncmp(model, "M1", 2) == 0)
1233 return M1A; 1232 return M1A;
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 8cb20e45bad6..035a7dd65a3f 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -507,6 +507,10 @@ static int cmpc_keys_codes[] = {
507 KEY_BRIGHTNESSDOWN, 507 KEY_BRIGHTNESSDOWN,
508 KEY_BRIGHTNESSUP, 508 KEY_BRIGHTNESSUP,
509 KEY_VENDOR, 509 KEY_VENDOR,
510 KEY_UNKNOWN,
511 KEY_CAMERA,
512 KEY_BACK,
513 KEY_FORWARD,
510 KEY_MAX 514 KEY_MAX
511}; 515};
512 516
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index b7f4d2705916..ef614979afe9 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -132,8 +132,8 @@ static struct dmi_system_id __devinitdata dell_blacklist[] = {
132}; 132};
133 133
134static struct calling_interface_buffer *buffer; 134static struct calling_interface_buffer *buffer;
135struct page *bufferpage; 135static struct page *bufferpage;
136DEFINE_MUTEX(buffer_mutex); 136static DEFINE_MUTEX(buffer_mutex);
137 137
138static int hwswitch_state; 138static int hwswitch_state;
139 139
@@ -580,6 +580,7 @@ static int __init dell_init(void)
580 580
581fail_backlight: 581fail_backlight:
582 i8042_remove_filter(dell_laptop_i8042_filter); 582 i8042_remove_filter(dell_laptop_i8042_filter);
583 cancel_delayed_work_sync(&dell_rfkill_work);
583fail_filter: 584fail_filter:
584 dell_cleanup_rfkill(); 585 dell_cleanup_rfkill();
585fail_rfkill: 586fail_rfkill:
@@ -597,12 +598,12 @@ fail_platform_driver:
597 598
598static void __exit dell_exit(void) 599static void __exit dell_exit(void)
599{ 600{
600 cancel_delayed_work_sync(&dell_rfkill_work);
601 i8042_remove_filter(dell_laptop_i8042_filter); 601 i8042_remove_filter(dell_laptop_i8042_filter);
602 cancel_delayed_work_sync(&dell_rfkill_work);
602 backlight_device_unregister(dell_backlight_device); 603 backlight_device_unregister(dell_backlight_device);
603 dell_cleanup_rfkill(); 604 dell_cleanup_rfkill();
604 if (platform_device) { 605 if (platform_device) {
605 platform_device_del(platform_device); 606 platform_device_unregister(platform_device);
606 platform_driver_unregister(&platform_driver); 607 platform_driver_unregister(&platform_driver);
607 } 608 }
608 kfree(da_tokens); 609 kfree(da_tokens);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index e2be6bb33d92..9a844caa3756 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -578,6 +578,8 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc)
578 struct pci_dev *dev; 578 struct pci_dev *dev;
579 struct pci_bus *bus; 579 struct pci_bus *bus;
580 bool blocked = eeepc_wlan_rfkill_blocked(eeepc); 580 bool blocked = eeepc_wlan_rfkill_blocked(eeepc);
581 bool absent;
582 u32 l;
581 583
582 if (eeepc->wlan_rfkill) 584 if (eeepc->wlan_rfkill)
583 rfkill_set_sw_state(eeepc->wlan_rfkill, blocked); 585 rfkill_set_sw_state(eeepc->wlan_rfkill, blocked);
@@ -591,6 +593,22 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc)
591 goto out_unlock; 593 goto out_unlock;
592 } 594 }
593 595
596 if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
597 pr_err("Unable to read PCI config space?\n");
598 goto out_unlock;
599 }
600 absent = (l == 0xffffffff);
601
602 if (blocked != absent) {
603 pr_warning("BIOS says wireless lan is %s, "
604 "but the pci device is %s\n",
605 blocked ? "blocked" : "unblocked",
606 absent ? "absent" : "present");
607 pr_warning("skipped wireless hotplug as probably "
608 "inappropriate for this model\n");
609 goto out_unlock;
610 }
611
594 if (!blocked) { 612 if (!blocked) {
595 dev = pci_get_slot(bus, 0); 613 dev = pci_get_slot(bus, 0);
596 if (dev) { 614 if (dev) {
@@ -1277,7 +1295,8 @@ static void eeepc_dmi_check(struct eeepc_laptop *eeepc)
1277 * hotplug code. In fact, current hotplug code seems to unplug another 1295 * hotplug code. In fact, current hotplug code seems to unplug another
1278 * device... 1296 * device...
1279 */ 1297 */
1280 if (strcmp(model, "1005HA") == 0 || strcmp(model, "1201N") == 0) { 1298 if (strcmp(model, "1005HA") == 0 || strcmp(model, "1201N") == 0 ||
1299 strcmp(model, "1005PE") == 0) {
1281 eeepc->hotplug_disabled = true; 1300 eeepc->hotplug_disabled = true;
1282 pr_info("wlan hotplug disabled\n"); 1301 pr_info("wlan hotplug disabled\n");
1283 } 1302 }
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index eb603f1d55ca..e7b0c3bcef89 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -286,6 +286,7 @@ struct ibm_init_struct {
286 char param[32]; 286 char param[32];
287 287
288 int (*init) (struct ibm_init_struct *); 288 int (*init) (struct ibm_init_struct *);
289 mode_t base_procfs_mode;
289 struct ibm_struct *data; 290 struct ibm_struct *data;
290}; 291};
291 292
@@ -2082,6 +2083,7 @@ static struct attribute_set *hotkey_dev_attributes;
2082 2083
2083static void tpacpi_driver_event(const unsigned int hkey_event); 2084static void tpacpi_driver_event(const unsigned int hkey_event);
2084static void hotkey_driver_event(const unsigned int scancode); 2085static void hotkey_driver_event(const unsigned int scancode);
2086static void hotkey_poll_setup(const bool may_warn);
2085 2087
2086/* HKEY.MHKG() return bits */ 2088/* HKEY.MHKG() return bits */
2087#define TP_HOTKEY_TABLET_MASK (1 << 3) 2089#define TP_HOTKEY_TABLET_MASK (1 << 3)
@@ -2264,6 +2266,8 @@ static int tpacpi_hotkey_driver_mask_set(const u32 mask)
2264 2266
2265 rc = hotkey_mask_set((hotkey_acpi_mask | hotkey_driver_mask) & 2267 rc = hotkey_mask_set((hotkey_acpi_mask | hotkey_driver_mask) &
2266 ~hotkey_source_mask); 2268 ~hotkey_source_mask);
2269 hotkey_poll_setup(true);
2270
2267 mutex_unlock(&hotkey_mutex); 2271 mutex_unlock(&hotkey_mutex);
2268 2272
2269 return rc; 2273 return rc;
@@ -2548,7 +2552,7 @@ static void hotkey_poll_stop_sync(void)
2548} 2552}
2549 2553
2550/* call with hotkey_mutex held */ 2554/* call with hotkey_mutex held */
2551static void hotkey_poll_setup(bool may_warn) 2555static void hotkey_poll_setup(const bool may_warn)
2552{ 2556{
2553 const u32 poll_driver_mask = hotkey_driver_mask & hotkey_source_mask; 2557 const u32 poll_driver_mask = hotkey_driver_mask & hotkey_source_mask;
2554 const u32 poll_user_mask = hotkey_user_mask & hotkey_source_mask; 2558 const u32 poll_user_mask = hotkey_user_mask & hotkey_source_mask;
@@ -2579,7 +2583,7 @@ static void hotkey_poll_setup(bool may_warn)
2579 } 2583 }
2580} 2584}
2581 2585
2582static void hotkey_poll_setup_safe(bool may_warn) 2586static void hotkey_poll_setup_safe(const bool may_warn)
2583{ 2587{
2584 mutex_lock(&hotkey_mutex); 2588 mutex_lock(&hotkey_mutex);
2585 hotkey_poll_setup(may_warn); 2589 hotkey_poll_setup(may_warn);
@@ -2597,7 +2601,11 @@ static void hotkey_poll_set_freq(unsigned int freq)
2597 2601
2598#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */ 2602#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
2599 2603
2600static void hotkey_poll_setup_safe(bool __unused) 2604static void hotkey_poll_setup(const bool __unused)
2605{
2606}
2607
2608static void hotkey_poll_setup_safe(const bool __unused)
2601{ 2609{
2602} 2610}
2603 2611
@@ -2607,16 +2615,11 @@ static int hotkey_inputdev_open(struct input_dev *dev)
2607{ 2615{
2608 switch (tpacpi_lifecycle) { 2616 switch (tpacpi_lifecycle) {
2609 case TPACPI_LIFE_INIT: 2617 case TPACPI_LIFE_INIT:
2610 /*
2611 * hotkey_init will call hotkey_poll_setup_safe
2612 * at the appropriate moment
2613 */
2614 return 0;
2615 case TPACPI_LIFE_EXITING:
2616 return -EBUSY;
2617 case TPACPI_LIFE_RUNNING: 2618 case TPACPI_LIFE_RUNNING:
2618 hotkey_poll_setup_safe(false); 2619 hotkey_poll_setup_safe(false);
2619 return 0; 2620 return 0;
2621 case TPACPI_LIFE_EXITING:
2622 return -EBUSY;
2620 } 2623 }
2621 2624
2622 /* Should only happen if tpacpi_lifecycle is corrupt */ 2625 /* Should only happen if tpacpi_lifecycle is corrupt */
@@ -2627,7 +2630,7 @@ static int hotkey_inputdev_open(struct input_dev *dev)
2627static void hotkey_inputdev_close(struct input_dev *dev) 2630static void hotkey_inputdev_close(struct input_dev *dev)
2628{ 2631{
2629 /* disable hotkey polling when possible */ 2632 /* disable hotkey polling when possible */
2630 if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING && 2633 if (tpacpi_lifecycle != TPACPI_LIFE_EXITING &&
2631 !(hotkey_source_mask & hotkey_driver_mask)) 2634 !(hotkey_source_mask & hotkey_driver_mask))
2632 hotkey_poll_setup_safe(false); 2635 hotkey_poll_setup_safe(false);
2633} 2636}
@@ -3655,13 +3658,19 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
3655 break; 3658 break;
3656 case 3: 3659 case 3:
3657 /* 0x3000-0x3FFF: bay-related wakeups */ 3660 /* 0x3000-0x3FFF: bay-related wakeups */
3658 if (hkey == TP_HKEY_EV_BAYEJ_ACK) { 3661 switch (hkey) {
3662 case TP_HKEY_EV_BAYEJ_ACK:
3659 hotkey_autosleep_ack = 1; 3663 hotkey_autosleep_ack = 1;
3660 printk(TPACPI_INFO 3664 printk(TPACPI_INFO
3661 "bay ejected\n"); 3665 "bay ejected\n");
3662 hotkey_wakeup_hotunplug_complete_notify_change(); 3666 hotkey_wakeup_hotunplug_complete_notify_change();
3663 known_ev = true; 3667 known_ev = true;
3664 } else { 3668 break;
3669 case TP_HKEY_EV_OPTDRV_EJ:
3670 /* FIXME: kick libata if SATA link offline */
3671 known_ev = true;
3672 break;
3673 default:
3665 known_ev = false; 3674 known_ev = false;
3666 } 3675 }
3667 break; 3676 break;
@@ -3870,7 +3879,7 @@ enum {
3870 TP_ACPI_BLUETOOTH_HWPRESENT = 0x01, /* Bluetooth hw available */ 3879 TP_ACPI_BLUETOOTH_HWPRESENT = 0x01, /* Bluetooth hw available */
3871 TP_ACPI_BLUETOOTH_RADIOSSW = 0x02, /* Bluetooth radio enabled */ 3880 TP_ACPI_BLUETOOTH_RADIOSSW = 0x02, /* Bluetooth radio enabled */
3872 TP_ACPI_BLUETOOTH_RESUMECTRL = 0x04, /* Bluetooth state at resume: 3881 TP_ACPI_BLUETOOTH_RESUMECTRL = 0x04, /* Bluetooth state at resume:
3873 off / last state */ 3882 0 = disable, 1 = enable */
3874}; 3883};
3875 3884
3876enum { 3885enum {
@@ -3916,10 +3925,11 @@ static int bluetooth_set_status(enum tpacpi_rfkill_state state)
3916 } 3925 }
3917#endif 3926#endif
3918 3927
3919 /* We make sure to keep TP_ACPI_BLUETOOTH_RESUMECTRL off */
3920 status = TP_ACPI_BLUETOOTH_RESUMECTRL;
3921 if (state == TPACPI_RFK_RADIO_ON) 3928 if (state == TPACPI_RFK_RADIO_ON)
3922 status |= TP_ACPI_BLUETOOTH_RADIOSSW; 3929 status = TP_ACPI_BLUETOOTH_RADIOSSW
3930 | TP_ACPI_BLUETOOTH_RESUMECTRL;
3931 else
3932 status = 0;
3923 3933
3924 if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status)) 3934 if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status))
3925 return -EIO; 3935 return -EIO;
@@ -4070,7 +4080,7 @@ enum {
4070 TP_ACPI_WANCARD_HWPRESENT = 0x01, /* Wan hw available */ 4080 TP_ACPI_WANCARD_HWPRESENT = 0x01, /* Wan hw available */
4071 TP_ACPI_WANCARD_RADIOSSW = 0x02, /* Wan radio enabled */ 4081 TP_ACPI_WANCARD_RADIOSSW = 0x02, /* Wan radio enabled */
4072 TP_ACPI_WANCARD_RESUMECTRL = 0x04, /* Wan state at resume: 4082 TP_ACPI_WANCARD_RESUMECTRL = 0x04, /* Wan state at resume:
4073 off / last state */ 4083 0 = disable, 1 = enable */
4074}; 4084};
4075 4085
4076#define TPACPI_RFK_WWAN_SW_NAME "tpacpi_wwan_sw" 4086#define TPACPI_RFK_WWAN_SW_NAME "tpacpi_wwan_sw"
@@ -4107,10 +4117,11 @@ static int wan_set_status(enum tpacpi_rfkill_state state)
4107 } 4117 }
4108#endif 4118#endif
4109 4119
4110 /* We make sure to set TP_ACPI_WANCARD_RESUMECTRL */
4111 status = TP_ACPI_WANCARD_RESUMECTRL;
4112 if (state == TPACPI_RFK_RADIO_ON) 4120 if (state == TPACPI_RFK_RADIO_ON)
4113 status |= TP_ACPI_WANCARD_RADIOSSW; 4121 status = TP_ACPI_WANCARD_RADIOSSW
4122 | TP_ACPI_WANCARD_RESUMECTRL;
4123 else
4124 status = 0;
4114 4125
4115 if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status)) 4126 if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
4116 return -EIO; 4127 return -EIO;
@@ -4619,6 +4630,10 @@ static int video_read(struct seq_file *m)
4619 return 0; 4630 return 0;
4620 } 4631 }
4621 4632
4633 /* Even reads can crash X.org, so... */
4634 if (!capable(CAP_SYS_ADMIN))
4635 return -EPERM;
4636
4622 status = video_outputsw_get(); 4637 status = video_outputsw_get();
4623 if (status < 0) 4638 if (status < 0)
4624 return status; 4639 return status;
@@ -4652,6 +4667,10 @@ static int video_write(char *buf)
4652 if (video_supported == TPACPI_VIDEO_NONE) 4667 if (video_supported == TPACPI_VIDEO_NONE)
4653 return -ENODEV; 4668 return -ENODEV;
4654 4669
4670 /* Even reads can crash X.org, let alone writes... */
4671 if (!capable(CAP_SYS_ADMIN))
4672 return -EPERM;
4673
4655 enable = 0; 4674 enable = 0;
4656 disable = 0; 4675 disable = 0;
4657 4676
@@ -6133,13 +6152,13 @@ static const struct tpacpi_quirk brightness_quirk_table[] __initconst = {
6133 TPACPI_Q_IBM('1', 'Y', TPACPI_BRGHT_Q_EC), /* T43/p ATI */ 6152 TPACPI_Q_IBM('1', 'Y', TPACPI_BRGHT_Q_EC), /* T43/p ATI */
6134 6153
6135 /* Models with ATI GPUs that can use ECNVRAM */ 6154 /* Models with ATI GPUs that can use ECNVRAM */
6136 TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_EC), 6155 TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_EC), /* R50,51 T40-42 */
6137 TPACPI_Q_IBM('1', 'Q', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), 6156 TPACPI_Q_IBM('1', 'Q', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
6138 TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), 6157 TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_EC), /* R52 */
6139 TPACPI_Q_IBM('7', '8', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), 6158 TPACPI_Q_IBM('7', '8', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
6140 6159
6141 /* Models with Intel Extreme Graphics 2 */ 6160 /* Models with Intel Extreme Graphics 2 */
6142 TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC), 6161 TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC), /* X40 */
6143 TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), 6162 TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
6144 TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), 6163 TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
6145 6164
@@ -6522,7 +6541,8 @@ static int volume_set_status(const u8 status)
6522 return volume_set_status_ec(status); 6541 return volume_set_status_ec(status);
6523} 6542}
6524 6543
6525static int volume_set_mute_ec(const bool mute) 6544/* returns < 0 on error, 0 on no change, 1 on change */
6545static int __volume_set_mute_ec(const bool mute)
6526{ 6546{
6527 int rc; 6547 int rc;
6528 u8 s, n; 6548 u8 s, n;
@@ -6537,22 +6557,37 @@ static int volume_set_mute_ec(const bool mute)
6537 n = (mute) ? s | TP_EC_AUDIO_MUTESW_MSK : 6557 n = (mute) ? s | TP_EC_AUDIO_MUTESW_MSK :
6538 s & ~TP_EC_AUDIO_MUTESW_MSK; 6558 s & ~TP_EC_AUDIO_MUTESW_MSK;
6539 6559
6540 if (n != s) 6560 if (n != s) {
6541 rc = volume_set_status_ec(n); 6561 rc = volume_set_status_ec(n);
6562 if (!rc)
6563 rc = 1;
6564 }
6542 6565
6543unlock: 6566unlock:
6544 mutex_unlock(&volume_mutex); 6567 mutex_unlock(&volume_mutex);
6545 return rc; 6568 return rc;
6546} 6569}
6547 6570
6571static int volume_alsa_set_mute(const bool mute)
6572{
6573 dbg_printk(TPACPI_DBG_MIXER, "ALSA: trying to %smute\n",
6574 (mute) ? "" : "un");
6575 return __volume_set_mute_ec(mute);
6576}
6577
6548static int volume_set_mute(const bool mute) 6578static int volume_set_mute(const bool mute)
6549{ 6579{
6580 int rc;
6581
6550 dbg_printk(TPACPI_DBG_MIXER, "trying to %smute\n", 6582 dbg_printk(TPACPI_DBG_MIXER, "trying to %smute\n",
6551 (mute) ? "" : "un"); 6583 (mute) ? "" : "un");
6552 return volume_set_mute_ec(mute); 6584
6585 rc = __volume_set_mute_ec(mute);
6586 return (rc < 0) ? rc : 0;
6553} 6587}
6554 6588
6555static int volume_set_volume_ec(const u8 vol) 6589/* returns < 0 on error, 0 on no change, 1 on change */
6590static int __volume_set_volume_ec(const u8 vol)
6556{ 6591{
6557 int rc; 6592 int rc;
6558 u8 s, n; 6593 u8 s, n;
@@ -6569,19 +6604,22 @@ static int volume_set_volume_ec(const u8 vol)
6569 6604
6570 n = (s & ~TP_EC_AUDIO_LVL_MSK) | vol; 6605 n = (s & ~TP_EC_AUDIO_LVL_MSK) | vol;
6571 6606
6572 if (n != s) 6607 if (n != s) {
6573 rc = volume_set_status_ec(n); 6608 rc = volume_set_status_ec(n);
6609 if (!rc)
6610 rc = 1;
6611 }
6574 6612
6575unlock: 6613unlock:
6576 mutex_unlock(&volume_mutex); 6614 mutex_unlock(&volume_mutex);
6577 return rc; 6615 return rc;
6578} 6616}
6579 6617
6580static int volume_set_volume(const u8 vol) 6618static int volume_alsa_set_volume(const u8 vol)
6581{ 6619{
6582 dbg_printk(TPACPI_DBG_MIXER, 6620 dbg_printk(TPACPI_DBG_MIXER,
6583 "trying to set volume level to %hu\n", vol); 6621 "ALSA: trying to set volume level to %hu\n", vol);
6584 return volume_set_volume_ec(vol); 6622 return __volume_set_volume_ec(vol);
6585} 6623}
6586 6624
6587static void volume_alsa_notify_change(void) 6625static void volume_alsa_notify_change(void)
@@ -6628,7 +6666,7 @@ static int volume_alsa_vol_get(struct snd_kcontrol *kcontrol,
6628static int volume_alsa_vol_put(struct snd_kcontrol *kcontrol, 6666static int volume_alsa_vol_put(struct snd_kcontrol *kcontrol,
6629 struct snd_ctl_elem_value *ucontrol) 6667 struct snd_ctl_elem_value *ucontrol)
6630{ 6668{
6631 return volume_set_volume(ucontrol->value.integer.value[0]); 6669 return volume_alsa_set_volume(ucontrol->value.integer.value[0]);
6632} 6670}
6633 6671
6634#define volume_alsa_mute_info snd_ctl_boolean_mono_info 6672#define volume_alsa_mute_info snd_ctl_boolean_mono_info
@@ -6651,7 +6689,7 @@ static int volume_alsa_mute_get(struct snd_kcontrol *kcontrol,
6651static int volume_alsa_mute_put(struct snd_kcontrol *kcontrol, 6689static int volume_alsa_mute_put(struct snd_kcontrol *kcontrol,
6652 struct snd_ctl_elem_value *ucontrol) 6690 struct snd_ctl_elem_value *ucontrol)
6653{ 6691{
6654 return volume_set_mute(!ucontrol->value.integer.value[0]); 6692 return volume_alsa_set_mute(!ucontrol->value.integer.value[0]);
6655} 6693}
6656 6694
6657static struct snd_kcontrol_new volume_alsa_control_vol __devinitdata = { 6695static struct snd_kcontrol_new volume_alsa_control_vol __devinitdata = {
@@ -8477,9 +8515,10 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
8477 "%s installed\n", ibm->name); 8515 "%s installed\n", ibm->name);
8478 8516
8479 if (ibm->read) { 8517 if (ibm->read) {
8480 mode_t mode; 8518 mode_t mode = iibm->base_procfs_mode;
8481 8519
8482 mode = S_IRUGO; 8520 if (!mode)
8521 mode = S_IRUGO;
8483 if (ibm->write) 8522 if (ibm->write)
8484 mode |= S_IWUSR; 8523 mode |= S_IWUSR;
8485 entry = proc_create_data(ibm->name, mode, proc_dir, 8524 entry = proc_create_data(ibm->name, mode, proc_dir,
@@ -8670,6 +8709,7 @@ static struct ibm_init_struct ibms_init[] __initdata = {
8670#ifdef CONFIG_THINKPAD_ACPI_VIDEO 8709#ifdef CONFIG_THINKPAD_ACPI_VIDEO
8671 { 8710 {
8672 .init = video_init, 8711 .init = video_init,
8712 .base_procfs_mode = S_IRUSR,
8673 .data = &video_driver_data, 8713 .data = &video_driver_data,
8674 }, 8714 },
8675#endif 8715#endif
@@ -9032,6 +9072,9 @@ static int __init thinkpad_acpi_module_init(void)
9032 return ret; 9072 return ret;
9033 } 9073 }
9034 } 9074 }
9075
9076 tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
9077
9035 ret = input_register_device(tpacpi_inputdev); 9078 ret = input_register_device(tpacpi_inputdev);
9036 if (ret < 0) { 9079 if (ret < 0) {
9037 printk(TPACPI_ERR "unable to register input device\n"); 9080 printk(TPACPI_ERR "unable to register input device\n");
@@ -9041,7 +9084,6 @@ static int __init thinkpad_acpi_module_init(void)
9041 tp_features.input_device_registered = 1; 9084 tp_features.input_device_registered = 1;
9042 } 9085 }
9043 9086
9044 tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
9045 return 0; 9087 return 0;
9046} 9088}
9047 9089
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 26c211724acf..405b969734d6 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -814,21 +814,23 @@ static void toshiba_acpi_notify(acpi_handle handle, u32 event, void *context)
814 if (hci_result == HCI_SUCCESS) { 814 if (hci_result == HCI_SUCCESS) {
815 if (value == 0x100) 815 if (value == 0x100)
816 continue; 816 continue;
817 else if (value & 0x80) { 817 /* act on key press; ignore key release */
818 key = toshiba_acpi_get_entry_by_scancode 818 if (value & 0x80)
819 (value & ~0x80); 819 continue;
820 if (!key) { 820
821 printk(MY_INFO "Unknown key %x\n", 821 key = toshiba_acpi_get_entry_by_scancode
822 value & ~0x80); 822 (value);
823 continue; 823 if (!key) {
824 } 824 printk(MY_INFO "Unknown key %x\n",
825 input_report_key(toshiba_acpi.hotkey_dev, 825 value);
826 key->keycode, 1); 826 continue;
827 input_sync(toshiba_acpi.hotkey_dev);
828 input_report_key(toshiba_acpi.hotkey_dev,
829 key->keycode, 0);
830 input_sync(toshiba_acpi.hotkey_dev);
831 } 827 }
828 input_report_key(toshiba_acpi.hotkey_dev,
829 key->keycode, 1);
830 input_sync(toshiba_acpi.hotkey_dev);
831 input_report_key(toshiba_acpi.hotkey_dev,
832 key->keycode, 0);
833 input_sync(toshiba_acpi.hotkey_dev);
832 } else if (hci_result == HCI_NOT_SUPPORTED) { 834 } else if (hci_result == HCI_NOT_SUPPORTED) {
833 /* This is a workaround for an unresolved issue on 835 /* This is a workaround for an unresolved issue on
834 * some machines where system events sporadically 836 * some machines where system events sporadically
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index d935b2d04f93..ae0251ef6f4e 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -153,8 +153,6 @@ static int baud_table[] = {
153 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800, 153 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
154 9600, 19200, 38400, 57600, 115200, 0 }; 154 9600, 19200, 38400, 57600, 115200, 0 };
155 155
156#define BAUD_TABLE_SIZE (sizeof(baud_table)/sizeof(baud_table[0]))
157
158/* Sets or clears DTR/RTS on the requested line */ 156/* Sets or clears DTR/RTS on the requested line */
159static inline void m68k_rtsdtr(struct m68k_serial *ss, int set) 157static inline void m68k_rtsdtr(struct m68k_serial *ss, int set)
160{ 158{
@@ -1406,10 +1404,10 @@ static void m68328_set_baud(void)
1406 USTCNT = ustcnt & ~USTCNT_TXEN; 1404 USTCNT = ustcnt & ~USTCNT_TXEN;
1407 1405
1408again: 1406again:
1409 for (i = 0; i < sizeof(baud_table) / sizeof(baud_table[0]); i++) 1407 for (i = 0; i < ARRAY_SIZE(baud_table); i++)
1410 if (baud_table[i] == m68328_console_baud) 1408 if (baud_table[i] == m68328_console_baud)
1411 break; 1409 break;
1412 if (i >= sizeof(baud_table) / sizeof(baud_table[0])) { 1410 if (i >= ARRAY_SIZE(baud_table)) {
1413 m68328_console_baud = 9600; 1411 m68328_console_baud = 9600;
1414 goto again; 1412 goto again;
1415 } 1413 }
@@ -1435,7 +1433,7 @@ int m68328_console_setup(struct console *cp, char *arg)
1435 if (arg) 1433 if (arg)
1436 n = simple_strtoul(arg,NULL,0); 1434 n = simple_strtoul(arg,NULL,0);
1437 1435
1438 for (i = 0; i < BAUD_TABLE_SIZE; i++) 1436 for (i = 0; i < ARRAY_SIZE(baud_table); i++)
1439 if (baud_table[i] == n) 1437 if (baud_table[i] == n)
1440 break; 1438 break;
1441 if (i < BAUD_TABLE_SIZE) { 1439 if (i < BAUD_TABLE_SIZE) {
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index a81ff7bc5fa1..7c4ebe6ee18b 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2690,6 +2690,15 @@ static void __init serial8250_isa_init_ports(void)
2690 } 2690 }
2691} 2691}
2692 2692
2693static void
2694serial8250_init_fixed_type_port(struct uart_8250_port *up, unsigned int type)
2695{
2696 up->port.type = type;
2697 up->port.fifosize = uart_config[type].fifo_size;
2698 up->capabilities = uart_config[type].flags;
2699 up->tx_loadsz = uart_config[type].tx_loadsz;
2700}
2701
2693static void __init 2702static void __init
2694serial8250_register_ports(struct uart_driver *drv, struct device *dev) 2703serial8250_register_ports(struct uart_driver *drv, struct device *dev)
2695{ 2704{
@@ -2706,6 +2715,10 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
2706 struct uart_8250_port *up = &serial8250_ports[i]; 2715 struct uart_8250_port *up = &serial8250_ports[i];
2707 2716
2708 up->port.dev = dev; 2717 up->port.dev = dev;
2718
2719 if (up->port.flags & UPF_FIXED_TYPE)
2720 serial8250_init_fixed_type_port(up, up->port.type);
2721
2709 uart_add_one_port(drv, &up->port); 2722 uart_add_one_port(drv, &up->port);
2710 } 2723 }
2711} 2724}
@@ -3118,12 +3131,8 @@ int serial8250_register_port(struct uart_port *port)
3118 if (port->dev) 3131 if (port->dev)
3119 uart->port.dev = port->dev; 3132 uart->port.dev = port->dev;
3120 3133
3121 if (port->flags & UPF_FIXED_TYPE) { 3134 if (port->flags & UPF_FIXED_TYPE)
3122 uart->port.type = port->type; 3135 serial8250_init_fixed_type_port(uart, port->type);
3123 uart->port.fifosize = uart_config[port->type].fifo_size;
3124 uart->capabilities = uart_config[port->type].flags;
3125 uart->tx_loadsz = uart_config[port->type].tx_loadsz;
3126 }
3127 3136
3128 set_io_from_upio(&uart->port); 3137 set_io_from_upio(&uart->port);
3129 /* Possibly override default I/O functions. */ 3138 /* Possibly override default I/O functions. */
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index b28af13c45a1..01c012da4e26 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -760,7 +760,8 @@ static int pci_netmos_init(struct pci_dev *dev)
760 /* subdevice 0x00PS means <P> parallel, <S> serial */ 760 /* subdevice 0x00PS means <P> parallel, <S> serial */
761 unsigned int num_serial = dev->subsystem_device & 0xf; 761 unsigned int num_serial = dev->subsystem_device & 0xf;
762 762
763 if (dev->device == PCI_DEVICE_ID_NETMOS_9901) 763 if ((dev->device == PCI_DEVICE_ID_NETMOS_9901) ||
764 (dev->device == PCI_DEVICE_ID_NETMOS_9865))
764 return 0; 765 return 0;
765 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM && 766 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
766 dev->subsystem_device == 0x0299) 767 dev->subsystem_device == 0x0299)
@@ -1479,6 +1480,7 @@ enum pci_board_num_t {
1479 1480
1480 pbn_b0_bt_1_115200, 1481 pbn_b0_bt_1_115200,
1481 pbn_b0_bt_2_115200, 1482 pbn_b0_bt_2_115200,
1483 pbn_b0_bt_4_115200,
1482 pbn_b0_bt_8_115200, 1484 pbn_b0_bt_8_115200,
1483 1485
1484 pbn_b0_bt_1_460800, 1486 pbn_b0_bt_1_460800,
@@ -1703,6 +1705,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
1703 .base_baud = 115200, 1705 .base_baud = 115200,
1704 .uart_offset = 8, 1706 .uart_offset = 8,
1705 }, 1707 },
1708 [pbn_b0_bt_4_115200] = {
1709 .flags = FL_BASE0|FL_BASE_BARS,
1710 .num_ports = 4,
1711 .base_baud = 115200,
1712 .uart_offset = 8,
1713 },
1706 [pbn_b0_bt_8_115200] = { 1714 [pbn_b0_bt_8_115200] = {
1707 .flags = FL_BASE0|FL_BASE_BARS, 1715 .flags = FL_BASE0|FL_BASE_BARS,
1708 .num_ports = 8, 1716 .num_ports = 8,
@@ -3191,6 +3199,15 @@ static struct pci_device_id serial_pci_tbl[] = {
3191 0x1208, 0x0004, 0, 0, 3199 0x1208, 0x0004, 0, 0,
3192 pbn_b0_4_921600 }, 3200 pbn_b0_4_921600 },
3193 3201
3202 { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF2,
3203 0x1204, 0x0004, 0, 0,
3204 pbn_b0_4_921600 },
3205 { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF2,
3206 0x1208, 0x0004, 0, 0,
3207 pbn_b0_4_921600 },
3208 { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF3,
3209 0x1208, 0x0004, 0, 0,
3210 pbn_b0_4_921600 },
3194 /* 3211 /*
3195 * Dell Remote Access Card 4 - Tim_T_Murphy@Dell.com 3212 * Dell Remote Access Card 4 - Tim_T_Murphy@Dell.com
3196 */ 3213 */
@@ -3649,6 +3666,18 @@ static struct pci_device_id serial_pci_tbl[] = {
3649 0, 0, pbn_b0_1_115200 }, 3666 0, 0, pbn_b0_1_115200 },
3650 3667
3651 /* 3668 /*
3669 * Best Connectivity PCI Multi I/O cards
3670 */
3671
3672 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
3673 0xA000, 0x1000,
3674 0, 0, pbn_b0_1_115200 },
3675
3676 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
3677 0xA000, 0x3004,
3678 0, 0, pbn_b0_bt_4_115200 },
3679
3680 /*
3652 * These entries match devices with class COMMUNICATION_SERIAL, 3681 * These entries match devices with class COMMUNICATION_SERIAL,
3653 * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL 3682 * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
3654 */ 3683 */
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 888a0ce91c4b..746e07033dce 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -1418,42 +1418,37 @@ config SERIAL_BFIN_SPORT
1418 To compile this driver as a module, choose M here: the 1418 To compile this driver as a module, choose M here: the
1419 module will be called bfin_sport_uart. 1419 module will be called bfin_sport_uart.
1420 1420
1421choice 1421config SERIAL_BFIN_SPORT_CONSOLE
1422 prompt "Baud rate for Blackfin SPORT UART" 1422 bool "Console on Blackfin sport emulated uart"
1423 depends on SERIAL_BFIN_SPORT 1423 depends on SERIAL_BFIN_SPORT=y
1424 default SERIAL_SPORT_BAUD_RATE_57600 1424 select SERIAL_CORE_CONSOLE
1425 help
1426 Choose a baud rate for the SPORT UART, other uart settings are
1427 8 bit, 1 stop bit, no parity, no flow control.
1428
1429config SERIAL_SPORT_BAUD_RATE_115200
1430 bool "115200"
1431
1432config SERIAL_SPORT_BAUD_RATE_57600
1433 bool "57600"
1434 1425
1435config SERIAL_SPORT_BAUD_RATE_38400 1426config SERIAL_BFIN_SPORT0_UART
1436 bool "38400" 1427 bool "Enable UART over SPORT0"
1428 depends on SERIAL_BFIN_SPORT && !(BF542 || BF542M || BF544 || BF544M)
1429 help
1430 Enable UART over SPORT0
1437 1431
1438config SERIAL_SPORT_BAUD_RATE_19200 1432config SERIAL_BFIN_SPORT1_UART
1439 bool "19200" 1433 bool "Enable UART over SPORT1"
1434 depends on SERIAL_BFIN_SPORT
1435 help
1436 Enable UART over SPORT1
1440 1437
1441config SERIAL_SPORT_BAUD_RATE_9600 1438config SERIAL_BFIN_SPORT2_UART
1442 bool "9600" 1439 bool "Enable UART over SPORT2"
1443endchoice 1440 depends on SERIAL_BFIN_SPORT && (BF54x || BF538 || BF539)
1441 help
1442 Enable UART over SPORT2
1444 1443
1445config SPORT_BAUD_RATE 1444config SERIAL_BFIN_SPORT3_UART
1446 int 1445 bool "Enable UART over SPORT3"
1447 depends on SERIAL_BFIN_SPORT 1446 depends on SERIAL_BFIN_SPORT && (BF54x || BF538 || BF539)
1448 default 115200 if (SERIAL_SPORT_BAUD_RATE_115200) 1447 help
1449 default 57600 if (SERIAL_SPORT_BAUD_RATE_57600) 1448 Enable UART over SPORT3
1450 default 38400 if (SERIAL_SPORT_BAUD_RATE_38400)
1451 default 19200 if (SERIAL_SPORT_BAUD_RATE_19200)
1452 default 9600 if (SERIAL_SPORT_BAUD_RATE_9600)
1453 1449
1454config SERIAL_TIMBERDALE 1450config SERIAL_TIMBERDALE
1455 tristate "Support for timberdale UART" 1451 tristate "Support for timberdale UART"
1456 depends on MFD_TIMBERDALE
1457 select SERIAL_CORE 1452 select SERIAL_CORE
1458 ---help--- 1453 ---help---
1459 Add support for UART controller on timberdale. 1454 Add support for UART controller on timberdale.
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 9d948bccafaf..2c9bf9b68327 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -1213,6 +1213,24 @@ static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
1213 return ret; 1213 return ret;
1214} 1214}
1215 1215
1216#ifdef CONFIG_CONSOLE_POLL
1217static int atmel_poll_get_char(struct uart_port *port)
1218{
1219 while (!(UART_GET_CSR(port) & ATMEL_US_RXRDY))
1220 cpu_relax();
1221
1222 return UART_GET_CHAR(port);
1223}
1224
1225static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
1226{
1227 while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY))
1228 cpu_relax();
1229
1230 UART_PUT_CHAR(port, ch);
1231}
1232#endif
1233
1216static struct uart_ops atmel_pops = { 1234static struct uart_ops atmel_pops = {
1217 .tx_empty = atmel_tx_empty, 1235 .tx_empty = atmel_tx_empty,
1218 .set_mctrl = atmel_set_mctrl, 1236 .set_mctrl = atmel_set_mctrl,
@@ -1232,6 +1250,10 @@ static struct uart_ops atmel_pops = {
1232 .config_port = atmel_config_port, 1250 .config_port = atmel_config_port,
1233 .verify_port = atmel_verify_port, 1251 .verify_port = atmel_verify_port,
1234 .pm = atmel_serial_pm, 1252 .pm = atmel_serial_pm,
1253#ifdef CONFIG_CONSOLE_POLL
1254 .poll_get_char = atmel_poll_get_char,
1255 .poll_put_char = atmel_poll_put_char,
1256#endif
1235}; 1257};
1236 1258
1237/* 1259/*
diff --git a/drivers/serial/bcm63xx_uart.c b/drivers/serial/bcm63xx_uart.c
index 37ad0c449937..a1a0e55d0807 100644
--- a/drivers/serial/bcm63xx_uart.c
+++ b/drivers/serial/bcm63xx_uart.c
@@ -35,7 +35,7 @@
35#include <bcm63xx_regs.h> 35#include <bcm63xx_regs.h>
36#include <bcm63xx_io.h> 36#include <bcm63xx_io.h>
37 37
38#define BCM63XX_NR_UARTS 1 38#define BCM63XX_NR_UARTS 2
39 39
40static struct uart_port ports[BCM63XX_NR_UARTS]; 40static struct uart_port ports[BCM63XX_NR_UARTS];
41 41
@@ -784,7 +784,7 @@ static struct uart_driver bcm_uart_driver = {
784 .dev_name = "ttyS", 784 .dev_name = "ttyS",
785 .major = TTY_MAJOR, 785 .major = TTY_MAJOR,
786 .minor = 64, 786 .minor = 64,
787 .nr = 1, 787 .nr = BCM63XX_NR_UARTS,
788 .cons = BCM63XX_CONSOLE, 788 .cons = BCM63XX_CONSOLE,
789}; 789};
790 790
@@ -826,11 +826,12 @@ static int __devinit bcm_uart_probe(struct platform_device *pdev)
826 port->dev = &pdev->dev; 826 port->dev = &pdev->dev;
827 port->fifosize = 16; 827 port->fifosize = 16;
828 port->uartclk = clk_get_rate(clk) / 2; 828 port->uartclk = clk_get_rate(clk) / 2;
829 port->line = pdev->id;
829 clk_put(clk); 830 clk_put(clk);
830 831
831 ret = uart_add_one_port(&bcm_uart_driver, port); 832 ret = uart_add_one_port(&bcm_uart_driver, port);
832 if (ret) { 833 if (ret) {
833 kfree(port); 834 ports[pdev->id].membase = 0;
834 return ret; 835 return ret;
835 } 836 }
836 platform_set_drvdata(pdev, port); 837 platform_set_drvdata(pdev, port);
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index 50abb7e557f4..fcf273e3f48c 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -14,6 +14,7 @@
14 14
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/ioport.h> 16#include <linux/ioport.h>
17#include <linux/io.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/console.h> 19#include <linux/console.h>
19#include <linux/sysrq.h> 20#include <linux/sysrq.h>
@@ -237,7 +238,8 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
237 238
238#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ 239#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
239 defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) 240 defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
240 if (kgdb_connected && kgdboc_port_line == uart->port.line) 241 if (kgdb_connected && kgdboc_port_line == uart->port.line
242 && kgdboc_break_enabled)
241 if (ch == 0x3) {/* Ctrl + C */ 243 if (ch == 0x3) {/* Ctrl + C */
242 kgdb_breakpoint(); 244 kgdb_breakpoint();
243 return; 245 return;
@@ -488,6 +490,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
488{ 490{
489 int x_pos, pos; 491 int x_pos, pos;
490 492
493 dma_disable_irq(uart->tx_dma_channel);
491 dma_disable_irq(uart->rx_dma_channel); 494 dma_disable_irq(uart->rx_dma_channel);
492 spin_lock_bh(&uart->port.lock); 495 spin_lock_bh(&uart->port.lock);
493 496
@@ -521,6 +524,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
521 } 524 }
522 525
523 spin_unlock_bh(&uart->port.lock); 526 spin_unlock_bh(&uart->port.lock);
527 dma_enable_irq(uart->tx_dma_channel);
524 dma_enable_irq(uart->rx_dma_channel); 528 dma_enable_irq(uart->rx_dma_channel);
525 529
526 mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES); 530 mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
@@ -746,15 +750,6 @@ static int bfin_serial_startup(struct uart_port *port)
746 Status interrupt.\n"); 750 Status interrupt.\n");
747 } 751 }
748 752
749 if (uart->cts_pin >= 0) {
750 gpio_request(uart->cts_pin, DRIVER_NAME);
751 gpio_direction_output(uart->cts_pin, 1);
752 }
753 if (uart->rts_pin >= 0) {
754 gpio_request(uart->rts_pin, DRIVER_NAME);
755 gpio_direction_output(uart->rts_pin, 0);
756 }
757
758 /* CTS RTS PINs are negative assertive. */ 753 /* CTS RTS PINs are negative assertive. */
759 UART_PUT_MCR(uart, ACTS); 754 UART_PUT_MCR(uart, ACTS);
760 UART_SET_IER(uart, EDSSI); 755 UART_SET_IER(uart, EDSSI);
@@ -801,10 +796,6 @@ static void bfin_serial_shutdown(struct uart_port *port)
801 gpio_free(uart->rts_pin); 796 gpio_free(uart->rts_pin);
802#endif 797#endif
803#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS 798#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
804 if (uart->cts_pin >= 0)
805 gpio_free(uart->cts_pin);
806 if (uart->rts_pin >= 0)
807 gpio_free(uart->rts_pin);
808 if (UART_GET_IER(uart) && EDSSI) 799 if (UART_GET_IER(uart) && EDSSI)
809 free_irq(uart->status_irq, uart); 800 free_irq(uart->status_irq, uart);
810#endif 801#endif
@@ -1409,8 +1400,7 @@ static int bfin_serial_remove(struct platform_device *dev)
1409 continue; 1400 continue;
1410 uart_remove_one_port(&bfin_serial_reg, &bfin_serial_ports[i].port); 1401 uart_remove_one_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
1411 bfin_serial_ports[i].port.dev = NULL; 1402 bfin_serial_ports[i].port.dev = NULL;
1412#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \ 1403#if defined(CONFIG_SERIAL_BFIN_CTSRTS)
1413 defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
1414 gpio_free(bfin_serial_ports[i].cts_pin); 1404 gpio_free(bfin_serial_ports[i].cts_pin);
1415 gpio_free(bfin_serial_ports[i].rts_pin); 1405 gpio_free(bfin_serial_ports[i].rts_pin);
1416#endif 1406#endif
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
index 088bb35475f1..7c72888fbf94 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/serial/bfin_sport_uart.c
@@ -1,27 +1,11 @@
1/* 1/*
2 * File: linux/drivers/serial/bfin_sport_uart.c 2 * Blackfin On-Chip Sport Emulated UART Driver
3 * 3 *
4 * Based on: drivers/serial/bfin_5xx.c by Aubrey Li. 4 * Copyright 2006-2009 Analog Devices Inc.
5 * Author: Roy Huang <roy.huang@analog.com>
6 * 5 *
7 * Created: Nov 22, 2006 6 * Enter bugs at http://blackfin.uclinux.org/
8 * Copyright: (c) 2006-2007 Analog Devices Inc.
9 * Description: this driver enable SPORTs on Blackfin emulate UART.
10 * 7 *
11 * This program is free software; you can redistribute it and/or modify 8 * Licensed under the GPL-2 or later.
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see the file COPYING, or write
23 * to the Free Software Foundation, Inc.,
24 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 */ 9 */
26 10
27/* 11/*
@@ -29,39 +13,18 @@
29 * http://www.analog.com/UploadedFiles/Application_Notes/399447663EE191.pdf 13 * http://www.analog.com/UploadedFiles/Application_Notes/399447663EE191.pdf
30 * This application note describe how to implement a UART on a Sharc DSP, 14 * This application note describe how to implement a UART on a Sharc DSP,
31 * but this driver is implemented on Blackfin Processor. 15 * but this driver is implemented on Blackfin Processor.
16 * Transmit Frame Sync is not used by this driver to transfer data out.
32 */ 17 */
33 18
34/* After reset, there is a prelude of low level pulse when transmit data first 19/* #define DEBUG */
35 * time. No addtional pulse in following transmit.
36 * According to document:
37 * The SPORTs are ready to start transmitting or receiving data no later than
38 * three serial clock cycles after they are enabled in the SPORTx_TCR1 or
39 * SPORTx_RCR1 register. No serial clock cycles are lost from this point on.
40 * The first internal frame sync will occur one frame sync delay after the
41 * SPORTs are ready. External frame syncs can occur as soon as the SPORT is
42 * ready.
43 */
44 20
45/* Thanks to Axel Alatalo <axel@rubico.se> for fixing sport rx bug. Sometimes 21#define DRV_NAME "bfin-sport-uart"
46 * sport receives data incorrectly. The following is Axel's words. 22#define DEVICE_NAME "ttySS"
47 * As EE-191, sport rx samples 3 times of the UART baudrate and takes the 23#define pr_fmt(fmt) DRV_NAME ": " fmt
48 * middle smaple of every 3 samples as the data bit. For a 8-N-1 UART setting,
49 * 30 samples will be required for a byte. If transmitter sends a 1/3 bit short
50 * byte due to buadrate drift, then the 30th sample of a byte, this sample is
51 * also the third sample of the stop bit, will happens on the immediately
52 * following start bit which will be thrown away and missed. Thus since parts
53 * of the startbit will be missed and the receiver will begin to drift, the
54 * effect accumulates over time until synchronization is lost.
55 * If only require 2 samples of the stopbit (by sampling in total 29 samples),
56 * then a to short byte as in the case above will be tolerated. Then the 1/3
57 * early startbit will trigger a framesync since the last read is complete
58 * after only 2/3 stopbit and framesync is active during the last 1/3 looking
59 * for a possible early startbit. */
60
61//#define DEBUG
62 24
63#include <linux/module.h> 25#include <linux/module.h>
64#include <linux/ioport.h> 26#include <linux/ioport.h>
27#include <linux/io.h>
65#include <linux/init.h> 28#include <linux/init.h>
66#include <linux/console.h> 29#include <linux/console.h>
67#include <linux/sysrq.h> 30#include <linux/sysrq.h>
@@ -75,23 +38,36 @@
75 38
76#include "bfin_sport_uart.h" 39#include "bfin_sport_uart.h"
77 40
41#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
78unsigned short bfin_uart_pin_req_sport0[] = 42unsigned short bfin_uart_pin_req_sport0[] =
79 {P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, \ 43 {P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, \
80 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0}; 44 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0};
81 45#endif
46#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
82unsigned short bfin_uart_pin_req_sport1[] = 47unsigned short bfin_uart_pin_req_sport1[] =
83 {P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, \ 48 {P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, \
84 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0}; 49 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0};
85 50#endif
86#define DRV_NAME "bfin-sport-uart" 51#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
52unsigned short bfin_uart_pin_req_sport2[] =
53 {P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS, \
54 P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0};
55#endif
56#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
57unsigned short bfin_uart_pin_req_sport3[] =
58 {P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS, \
59 P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0};
60#endif
87 61
88struct sport_uart_port { 62struct sport_uart_port {
89 struct uart_port port; 63 struct uart_port port;
90 char *name;
91
92 int tx_irq;
93 int rx_irq;
94 int err_irq; 64 int err_irq;
65 unsigned short csize;
66 unsigned short rxmask;
67 unsigned short txmask1;
68 unsigned short txmask2;
69 unsigned char stopb;
70/* unsigned char parib; */
95}; 71};
96 72
97static void sport_uart_tx_chars(struct sport_uart_port *up); 73static void sport_uart_tx_chars(struct sport_uart_port *up);
@@ -99,36 +75,42 @@ static void sport_stop_tx(struct uart_port *port);
99 75
100static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value) 76static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value)
101{ 77{
102 pr_debug("%s value:%x\n", __func__, value); 78 pr_debug("%s value:%x, mask1=0x%x, mask2=0x%x\n", __func__, value,
103 /* Place a Start and Stop bit */ 79 up->txmask1, up->txmask2);
80
81 /* Place Start and Stop bits */
104 __asm__ __volatile__ ( 82 __asm__ __volatile__ (
105 "R2 = b#01111111100;" 83 "%[val] <<= 1;"
106 "R3 = b#10000000001;" 84 "%[val] = %[val] & %[mask1];"
107 "%0 <<= 2;" 85 "%[val] = %[val] | %[mask2];"
108 "%0 = %0 & R2;" 86 : [val]"+d"(value)
109 "%0 = %0 | R3;" 87 : [mask1]"d"(up->txmask1), [mask2]"d"(up->txmask2)
110 : "=d"(value) 88 : "ASTAT"
111 : "d"(value)
112 : "ASTAT", "R2", "R3"
113 ); 89 );
114 pr_debug("%s value:%x\n", __func__, value); 90 pr_debug("%s value:%x\n", __func__, value);
115 91
116 SPORT_PUT_TX(up, value); 92 SPORT_PUT_TX(up, value);
117} 93}
118 94
119static inline unsigned int rx_one_byte(struct sport_uart_port *up) 95static inline unsigned char rx_one_byte(struct sport_uart_port *up)
120{ 96{
121 unsigned int value, extract; 97 unsigned int value;
98 unsigned char extract;
122 u32 tmp_mask1, tmp_mask2, tmp_shift, tmp; 99 u32 tmp_mask1, tmp_mask2, tmp_shift, tmp;
123 100
124 value = SPORT_GET_RX32(up); 101 if ((up->csize + up->stopb) > 7)
125 pr_debug("%s value:%x\n", __func__, value); 102 value = SPORT_GET_RX32(up);
103 else
104 value = SPORT_GET_RX(up);
105
106 pr_debug("%s value:%x, cs=%d, mask=0x%x\n", __func__, value,
107 up->csize, up->rxmask);
126 108
127 /* Extract 8 bits data */ 109 /* Extract data */
128 __asm__ __volatile__ ( 110 __asm__ __volatile__ (
129 "%[extr] = 0;" 111 "%[extr] = 0;"
130 "%[mask1] = 0x1801(Z);" 112 "%[mask1] = %[rxmask];"
131 "%[mask2] = 0x0300(Z);" 113 "%[mask2] = 0x0200(Z);"
132 "%[shift] = 0;" 114 "%[shift] = 0;"
133 "LSETUP(.Lloop_s, .Lloop_e) LC0 = %[lc];" 115 "LSETUP(.Lloop_s, .Lloop_e) LC0 = %[lc];"
134 ".Lloop_s:" 116 ".Lloop_s:"
@@ -138,9 +120,9 @@ static inline unsigned int rx_one_byte(struct sport_uart_port *up)
138 "%[mask1] = %[mask1] - %[mask2];" 120 "%[mask1] = %[mask1] - %[mask2];"
139 ".Lloop_e:" 121 ".Lloop_e:"
140 "%[shift] += 1;" 122 "%[shift] += 1;"
141 : [val]"=d"(value), [extr]"=d"(extract), [shift]"=d"(tmp_shift), [tmp]"=d"(tmp), 123 : [extr]"=&d"(extract), [shift]"=&d"(tmp_shift), [tmp]"=&d"(tmp),
142 [mask1]"=d"(tmp_mask1), [mask2]"=d"(tmp_mask2) 124 [mask1]"=&d"(tmp_mask1), [mask2]"=&d"(tmp_mask2)
143 : "d"(value), [lc]"a"(8) 125 : [val]"d"(value), [rxmask]"d"(up->rxmask), [lc]"a"(up->csize)
144 : "ASTAT", "LB0", "LC0", "LT0" 126 : "ASTAT", "LB0", "LC0", "LT0"
145 ); 127 );
146 128
@@ -148,29 +130,28 @@ static inline unsigned int rx_one_byte(struct sport_uart_port *up)
148 return extract; 130 return extract;
149} 131}
150 132
151static int sport_uart_setup(struct sport_uart_port *up, int sclk, int baud_rate) 133static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate)
152{ 134{
153 int tclkdiv, tfsdiv, rclkdiv; 135 int tclkdiv, rclkdiv;
136 unsigned int sclk = get_sclk();
154 137
155 /* Set TCR1 and TCR2 */ 138 /* Set TCR1 and TCR2, TFSR is not enabled for uart */
156 SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK)); 139 SPORT_PUT_TCR1(up, (ITFS | TLSBIT | ITCLK));
157 SPORT_PUT_TCR2(up, 10); 140 SPORT_PUT_TCR2(up, size + 1);
158 pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up)); 141 pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up));
159 142
160 /* Set RCR1 and RCR2 */ 143 /* Set RCR1 and RCR2 */
161 SPORT_PUT_RCR1(up, (RCKFE | LARFS | LRFS | RFSR | IRCLK)); 144 SPORT_PUT_RCR1(up, (RCKFE | LARFS | LRFS | RFSR | IRCLK));
162 SPORT_PUT_RCR2(up, 28); 145 SPORT_PUT_RCR2(up, (size + 1) * 2 - 1);
163 pr_debug("%s RCR1:%x, RCR2:%x\n", __func__, SPORT_GET_RCR1(up), SPORT_GET_RCR2(up)); 146 pr_debug("%s RCR1:%x, RCR2:%x\n", __func__, SPORT_GET_RCR1(up), SPORT_GET_RCR2(up));
164 147
165 tclkdiv = sclk/(2 * baud_rate) - 1; 148 tclkdiv = sclk / (2 * baud_rate) - 1;
166 tfsdiv = 12; 149 rclkdiv = sclk / (2 * baud_rate * 2) - 1;
167 rclkdiv = sclk/(2 * baud_rate * 3) - 1;
168 SPORT_PUT_TCLKDIV(up, tclkdiv); 150 SPORT_PUT_TCLKDIV(up, tclkdiv);
169 SPORT_PUT_TFSDIV(up, tfsdiv);
170 SPORT_PUT_RCLKDIV(up, rclkdiv); 151 SPORT_PUT_RCLKDIV(up, rclkdiv);
171 SSYNC(); 152 SSYNC();
172 pr_debug("%s sclk:%d, baud_rate:%d, tclkdiv:%d, tfsdiv:%d, rclkdiv:%d\n", 153 pr_debug("%s sclk:%d, baud_rate:%d, tclkdiv:%d, rclkdiv:%d\n",
173 __func__, sclk, baud_rate, tclkdiv, tfsdiv, rclkdiv); 154 __func__, sclk, baud_rate, tclkdiv, rclkdiv);
174 155
175 return 0; 156 return 0;
176} 157}
@@ -181,23 +162,29 @@ static irqreturn_t sport_uart_rx_irq(int irq, void *dev_id)
181 struct tty_struct *tty = up->port.state->port.tty; 162 struct tty_struct *tty = up->port.state->port.tty;
182 unsigned int ch; 163 unsigned int ch;
183 164
184 do { 165 spin_lock(&up->port.lock);
166
167 while (SPORT_GET_STAT(up) & RXNE) {
185 ch = rx_one_byte(up); 168 ch = rx_one_byte(up);
186 up->port.icount.rx++; 169 up->port.icount.rx++;
187 170
188 if (uart_handle_sysrq_char(&up->port, ch)) 171 if (!uart_handle_sysrq_char(&up->port, ch))
189 ;
190 else
191 tty_insert_flip_char(tty, ch, TTY_NORMAL); 172 tty_insert_flip_char(tty, ch, TTY_NORMAL);
192 } while (SPORT_GET_STAT(up) & RXNE); 173 }
193 tty_flip_buffer_push(tty); 174 tty_flip_buffer_push(tty);
194 175
176 spin_unlock(&up->port.lock);
177
195 return IRQ_HANDLED; 178 return IRQ_HANDLED;
196} 179}
197 180
198static irqreturn_t sport_uart_tx_irq(int irq, void *dev_id) 181static irqreturn_t sport_uart_tx_irq(int irq, void *dev_id)
199{ 182{
200 sport_uart_tx_chars(dev_id); 183 struct sport_uart_port *up = dev_id;
184
185 spin_lock(&up->port.lock);
186 sport_uart_tx_chars(up);
187 spin_unlock(&up->port.lock);
201 188
202 return IRQ_HANDLED; 189 return IRQ_HANDLED;
203} 190}
@@ -208,6 +195,8 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
208 struct tty_struct *tty = up->port.state->port.tty; 195 struct tty_struct *tty = up->port.state->port.tty;
209 unsigned int stat = SPORT_GET_STAT(up); 196 unsigned int stat = SPORT_GET_STAT(up);
210 197
198 spin_lock(&up->port.lock);
199
211 /* Overflow in RX FIFO */ 200 /* Overflow in RX FIFO */
212 if (stat & ROVF) { 201 if (stat & ROVF) {
213 up->port.icount.overrun++; 202 up->port.icount.overrun++;
@@ -216,15 +205,16 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
216 } 205 }
217 /* These should not happen */ 206 /* These should not happen */
218 if (stat & (TOVF | TUVF | RUVF)) { 207 if (stat & (TOVF | TUVF | RUVF)) {
219 printk(KERN_ERR "SPORT Error:%s %s %s\n", 208 pr_err("SPORT Error:%s %s %s\n",
220 (stat & TOVF)?"TX overflow":"", 209 (stat & TOVF) ? "TX overflow" : "",
221 (stat & TUVF)?"TX underflow":"", 210 (stat & TUVF) ? "TX underflow" : "",
222 (stat & RUVF)?"RX underflow":""); 211 (stat & RUVF) ? "RX underflow" : "");
223 SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN); 212 SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
224 SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN); 213 SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN);
225 } 214 }
226 SSYNC(); 215 SSYNC();
227 216
217 spin_unlock(&up->port.lock);
228 return IRQ_HANDLED; 218 return IRQ_HANDLED;
229} 219}
230 220
@@ -232,60 +222,37 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
232static int sport_startup(struct uart_port *port) 222static int sport_startup(struct uart_port *port)
233{ 223{
234 struct sport_uart_port *up = (struct sport_uart_port *)port; 224 struct sport_uart_port *up = (struct sport_uart_port *)port;
235 char buffer[20]; 225 int ret;
236 int retval;
237 226
238 pr_debug("%s enter\n", __func__); 227 pr_debug("%s enter\n", __func__);
239 snprintf(buffer, 20, "%s rx", up->name); 228 ret = request_irq(up->port.irq, sport_uart_rx_irq, 0,
240 retval = request_irq(up->rx_irq, sport_uart_rx_irq, IRQF_SAMPLE_RANDOM, buffer, up); 229 "SPORT_UART_RX", up);
241 if (retval) { 230 if (ret) {
242 printk(KERN_ERR "Unable to request interrupt %s\n", buffer); 231 dev_err(port->dev, "unable to request SPORT RX interrupt\n");
243 return retval; 232 return ret;
244 } 233 }
245 234
246 snprintf(buffer, 20, "%s tx", up->name); 235 ret = request_irq(up->port.irq+1, sport_uart_tx_irq, 0,
247 retval = request_irq(up->tx_irq, sport_uart_tx_irq, IRQF_SAMPLE_RANDOM, buffer, up); 236 "SPORT_UART_TX", up);
248 if (retval) { 237 if (ret) {
249 printk(KERN_ERR "Unable to request interrupt %s\n", buffer); 238 dev_err(port->dev, "unable to request SPORT TX interrupt\n");
250 goto fail1; 239 goto fail1;
251 } 240 }
252 241
253 snprintf(buffer, 20, "%s err", up->name); 242 ret = request_irq(up->err_irq, sport_uart_err_irq, 0,
254 retval = request_irq(up->err_irq, sport_uart_err_irq, IRQF_SAMPLE_RANDOM, buffer, up); 243 "SPORT_UART_STATUS", up);
255 if (retval) { 244 if (ret) {
256 printk(KERN_ERR "Unable to request interrupt %s\n", buffer); 245 dev_err(port->dev, "unable to request SPORT status interrupt\n");
257 goto fail2; 246 goto fail2;
258 } 247 }
259 248
260 if (port->line) {
261 if (peripheral_request_list(bfin_uart_pin_req_sport1, DRV_NAME))
262 goto fail3;
263 } else {
264 if (peripheral_request_list(bfin_uart_pin_req_sport0, DRV_NAME))
265 goto fail3;
266 }
267
268 sport_uart_setup(up, get_sclk(), port->uartclk);
269
270 /* Enable receive interrupt */
271 SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) | RSPEN));
272 SSYNC();
273
274 return 0; 249 return 0;
250 fail2:
251 free_irq(up->port.irq+1, up);
252 fail1:
253 free_irq(up->port.irq, up);
275 254
276 255 return ret;
277fail3:
278 printk(KERN_ERR DRV_NAME
279 ": Requesting Peripherals failed\n");
280
281 free_irq(up->err_irq, up);
282fail2:
283 free_irq(up->tx_irq, up);
284fail1:
285 free_irq(up->rx_irq, up);
286
287 return retval;
288
289} 256}
290 257
291static void sport_uart_tx_chars(struct sport_uart_port *up) 258static void sport_uart_tx_chars(struct sport_uart_port *up)
@@ -344,20 +311,17 @@ static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl)
344static void sport_stop_tx(struct uart_port *port) 311static void sport_stop_tx(struct uart_port *port)
345{ 312{
346 struct sport_uart_port *up = (struct sport_uart_port *)port; 313 struct sport_uart_port *up = (struct sport_uart_port *)port;
347 unsigned int stat;
348 314
349 pr_debug("%s enter\n", __func__); 315 pr_debug("%s enter\n", __func__);
350 316
351 stat = SPORT_GET_STAT(up);
352 while(!(stat & TXHRE)) {
353 udelay(1);
354 stat = SPORT_GET_STAT(up);
355 }
356 /* Although the hold register is empty, last byte is still in shift 317 /* Although the hold register is empty, last byte is still in shift
357 * register and not sent out yet. If baud rate is lower than default, 318 * register and not sent out yet. So, put a dummy data into TX FIFO.
358 * delay should be longer. For example, if the baud rate is 9600, 319 * Then, sport tx stops when last byte is shift out and the dummy
359 * the delay must be at least 2ms by experience */ 320 * data is moved into the shift register.
360 udelay(500); 321 */
322 SPORT_PUT_TX(up, 0xffff);
323 while (!(SPORT_GET_STAT(up) & TXHRE))
324 cpu_relax();
361 325
362 SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN)); 326 SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
363 SSYNC(); 327 SSYNC();
@@ -370,6 +334,7 @@ static void sport_start_tx(struct uart_port *port)
370 struct sport_uart_port *up = (struct sport_uart_port *)port; 334 struct sport_uart_port *up = (struct sport_uart_port *)port;
371 335
372 pr_debug("%s enter\n", __func__); 336 pr_debug("%s enter\n", __func__);
337
373 /* Write data into SPORT FIFO before enable SPROT to transmit */ 338 /* Write data into SPORT FIFO before enable SPROT to transmit */
374 sport_uart_tx_chars(up); 339 sport_uart_tx_chars(up);
375 340
@@ -403,37 +368,24 @@ static void sport_shutdown(struct uart_port *port)
403{ 368{
404 struct sport_uart_port *up = (struct sport_uart_port *)port; 369 struct sport_uart_port *up = (struct sport_uart_port *)port;
405 370
406 pr_debug("%s enter\n", __func__); 371 dev_dbg(port->dev, "%s enter\n", __func__);
407 372
408 /* Disable sport */ 373 /* Disable sport */
409 SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN)); 374 SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
410 SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) & ~RSPEN)); 375 SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) & ~RSPEN));
411 SSYNC(); 376 SSYNC();
412 377
413 if (port->line) { 378 free_irq(up->port.irq, up);
414 peripheral_free_list(bfin_uart_pin_req_sport1); 379 free_irq(up->port.irq+1, up);
415 } else {
416 peripheral_free_list(bfin_uart_pin_req_sport0);
417 }
418
419 free_irq(up->rx_irq, up);
420 free_irq(up->tx_irq, up);
421 free_irq(up->err_irq, up); 380 free_irq(up->err_irq, up);
422} 381}
423 382
424static void sport_set_termios(struct uart_port *port,
425 struct ktermios *termios, struct ktermios *old)
426{
427 pr_debug("%s enter, c_cflag:%08x\n", __func__, termios->c_cflag);
428 uart_update_timeout(port, CS8 ,port->uartclk);
429}
430
431static const char *sport_type(struct uart_port *port) 383static const char *sport_type(struct uart_port *port)
432{ 384{
433 struct sport_uart_port *up = (struct sport_uart_port *)port; 385 struct sport_uart_port *up = (struct sport_uart_port *)port;
434 386
435 pr_debug("%s enter\n", __func__); 387 pr_debug("%s enter\n", __func__);
436 return up->name; 388 return up->port.type == PORT_BFIN_SPORT ? "BFIN-SPORT-UART" : NULL;
437} 389}
438 390
439static void sport_release_port(struct uart_port *port) 391static void sport_release_port(struct uart_port *port)
@@ -461,6 +413,110 @@ static int sport_verify_port(struct uart_port *port, struct serial_struct *ser)
461 return 0; 413 return 0;
462} 414}
463 415
416static void sport_set_termios(struct uart_port *port,
417 struct ktermios *termios, struct ktermios *old)
418{
419 struct sport_uart_port *up = (struct sport_uart_port *)port;
420 unsigned long flags;
421 int i;
422
423 pr_debug("%s enter, c_cflag:%08x\n", __func__, termios->c_cflag);
424
425 switch (termios->c_cflag & CSIZE) {
426 case CS8:
427 up->csize = 8;
428 break;
429 case CS7:
430 up->csize = 7;
431 break;
432 case CS6:
433 up->csize = 6;
434 break;
435 case CS5:
436 up->csize = 5;
437 break;
438 default:
439 pr_warning("requested word length not supported\n");
440 }
441
442 if (termios->c_cflag & CSTOPB) {
443 up->stopb = 1;
444 }
445 if (termios->c_cflag & PARENB) {
446 pr_warning("PAREN bits is not supported yet\n");
447 /* up->parib = 1; */
448 }
449
450 port->read_status_mask = OE;
451 if (termios->c_iflag & INPCK)
452 port->read_status_mask |= (FE | PE);
453 if (termios->c_iflag & (BRKINT | PARMRK))
454 port->read_status_mask |= BI;
455
456 /*
457 * Characters to ignore
458 */
459 port->ignore_status_mask = 0;
460 if (termios->c_iflag & IGNPAR)
461 port->ignore_status_mask |= FE | PE;
462 if (termios->c_iflag & IGNBRK) {
463 port->ignore_status_mask |= BI;
464 /*
465 * If we're ignoring parity and break indicators,
466 * ignore overruns too (for real raw support).
467 */
468 if (termios->c_iflag & IGNPAR)
469 port->ignore_status_mask |= OE;
470 }
471
472 /* RX extract mask */
473 up->rxmask = 0x01 | (((up->csize + up->stopb) * 2 - 1) << 0x8);
474 /* TX masks, 8 bit data and 1 bit stop for example:
475 * mask1 = b#0111111110
476 * mask2 = b#1000000000
477 */
478 for (i = 0, up->txmask1 = 0; i < up->csize; i++)
479 up->txmask1 |= (1<<i);
480 up->txmask2 = (1<<i);
481 if (up->stopb) {
482 ++i;
483 up->txmask2 |= (1<<i);
484 }
485 up->txmask1 <<= 1;
486 up->txmask2 <<= 1;
487 /* uart baud rate */
488 port->uartclk = uart_get_baud_rate(port, termios, old, 0, get_sclk()/16);
489
490 spin_lock_irqsave(&up->port.lock, flags);
491
492 /* Disable UART */
493 SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
494 SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN);
495
496 sport_uart_setup(up, up->csize + up->stopb, port->uartclk);
497
498 /* driver TX line high after config, one dummy data is
499 * necessary to stop sport after shift one byte
500 */
501 SPORT_PUT_TX(up, 0xffff);
502 SPORT_PUT_TX(up, 0xffff);
503 SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
504 SSYNC();
505 while (!(SPORT_GET_STAT(up) & TXHRE))
506 cpu_relax();
507 SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
508 SSYNC();
509
510 /* Port speed changed, update the per-port timeout. */
511 uart_update_timeout(port, termios->c_cflag, port->uartclk);
512
513 /* Enable sport rx */
514 SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) | RSPEN);
515 SSYNC();
516
517 spin_unlock_irqrestore(&up->port.lock, flags);
518}
519
464struct uart_ops sport_uart_ops = { 520struct uart_ops sport_uart_ops = {
465 .tx_empty = sport_tx_empty, 521 .tx_empty = sport_tx_empty,
466 .set_mctrl = sport_set_mctrl, 522 .set_mctrl = sport_set_mctrl,
@@ -480,138 +536,319 @@ struct uart_ops sport_uart_ops = {
480 .verify_port = sport_verify_port, 536 .verify_port = sport_verify_port,
481}; 537};
482 538
483static struct sport_uart_port sport_uart_ports[] = { 539#define BFIN_SPORT_UART_MAX_PORTS 4
484 { /* SPORT 0 */ 540
485 .name = "SPORT0", 541static struct sport_uart_port *bfin_sport_uart_ports[BFIN_SPORT_UART_MAX_PORTS];
486 .tx_irq = IRQ_SPORT0_TX, 542
487 .rx_irq = IRQ_SPORT0_RX, 543#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
488 .err_irq= IRQ_SPORT0_ERROR, 544static int __init
489 .port = { 545sport_uart_console_setup(struct console *co, char *options)
490 .type = PORT_BFIN_SPORT, 546{
491 .iotype = UPIO_MEM, 547 struct sport_uart_port *up;
492 .membase = (void __iomem *)SPORT0_TCR1, 548 int baud = 57600;
493 .mapbase = SPORT0_TCR1, 549 int bits = 8;
494 .irq = IRQ_SPORT0_RX, 550 int parity = 'n';
495 .uartclk = CONFIG_SPORT_BAUD_RATE, 551 int flow = 'n';
496 .fifosize = 8, 552
497 .ops = &sport_uart_ops, 553 /* Check whether an invalid uart number has been specified */
498 .line = 0, 554 if (co->index < 0 || co->index >= BFIN_SPORT_UART_MAX_PORTS)
499 }, 555 return -ENODEV;
500 }, { /* SPORT 1 */ 556
501 .name = "SPORT1", 557 up = bfin_sport_uart_ports[co->index];
502 .tx_irq = IRQ_SPORT1_TX, 558 if (!up)
503 .rx_irq = IRQ_SPORT1_RX, 559 return -ENODEV;
504 .err_irq= IRQ_SPORT1_ERROR, 560
505 .port = { 561 if (options)
506 .type = PORT_BFIN_SPORT, 562 uart_parse_options(options, &baud, &parity, &bits, &flow);
507 .iotype = UPIO_MEM, 563
508 .membase = (void __iomem *)SPORT1_TCR1, 564 return uart_set_options(&up->port, co, baud, parity, bits, flow);
509 .mapbase = SPORT1_TCR1, 565}
510 .irq = IRQ_SPORT1_RX, 566
511 .uartclk = CONFIG_SPORT_BAUD_RATE, 567static void sport_uart_console_putchar(struct uart_port *port, int ch)
512 .fifosize = 8, 568{
513 .ops = &sport_uart_ops, 569 struct sport_uart_port *up = (struct sport_uart_port *)port;
514 .line = 1, 570
515 }, 571 while (SPORT_GET_STAT(up) & TXF)
572 barrier();
573
574 tx_one_byte(up, ch);
575}
576
577/*
578 * Interrupts are disabled on entering
579 */
580static void
581sport_uart_console_write(struct console *co, const char *s, unsigned int count)
582{
583 struct sport_uart_port *up = bfin_sport_uart_ports[co->index];
584 unsigned long flags;
585
586 spin_lock_irqsave(&up->port.lock, flags);
587
588 if (SPORT_GET_TCR1(up) & TSPEN)
589 uart_console_write(&up->port, s, count, sport_uart_console_putchar);
590 else {
591 /* dummy data to start sport */
592 while (SPORT_GET_STAT(up) & TXF)
593 barrier();
594 SPORT_PUT_TX(up, 0xffff);
595 /* Enable transmit, then an interrupt will generated */
596 SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
597 SSYNC();
598
599 uart_console_write(&up->port, s, count, sport_uart_console_putchar);
600
601 /* Although the hold register is empty, last byte is still in shift
602 * register and not sent out yet. So, put a dummy data into TX FIFO.
603 * Then, sport tx stops when last byte is shift out and the dummy
604 * data is moved into the shift register.
605 */
606 while (SPORT_GET_STAT(up) & TXF)
607 barrier();
608 SPORT_PUT_TX(up, 0xffff);
609 while (!(SPORT_GET_STAT(up) & TXHRE))
610 barrier();
611
612 /* Stop sport tx transfer */
613 SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
614 SSYNC();
516 } 615 }
616
617 spin_unlock_irqrestore(&up->port.lock, flags);
618}
619
620static struct uart_driver sport_uart_reg;
621
622static struct console sport_uart_console = {
623 .name = DEVICE_NAME,
624 .write = sport_uart_console_write,
625 .device = uart_console_device,
626 .setup = sport_uart_console_setup,
627 .flags = CON_PRINTBUFFER,
628 .index = -1,
629 .data = &sport_uart_reg,
517}; 630};
518 631
632#define SPORT_UART_CONSOLE (&sport_uart_console)
633#else
634#define SPORT_UART_CONSOLE NULL
635#endif /* CONFIG_SERIAL_BFIN_SPORT_CONSOLE */
636
637
519static struct uart_driver sport_uart_reg = { 638static struct uart_driver sport_uart_reg = {
520 .owner = THIS_MODULE, 639 .owner = THIS_MODULE,
521 .driver_name = "SPORT-UART", 640 .driver_name = DRV_NAME,
522 .dev_name = "ttySS", 641 .dev_name = DEVICE_NAME,
523 .major = 204, 642 .major = 204,
524 .minor = 84, 643 .minor = 84,
525 .nr = ARRAY_SIZE(sport_uart_ports), 644 .nr = BFIN_SPORT_UART_MAX_PORTS,
526 .cons = NULL, 645 .cons = SPORT_UART_CONSOLE,
527}; 646};
528 647
529static int sport_uart_suspend(struct platform_device *dev, pm_message_t state) 648#ifdef CONFIG_PM
649static int sport_uart_suspend(struct device *dev)
530{ 650{
531 struct sport_uart_port *sport = platform_get_drvdata(dev); 651 struct sport_uart_port *sport = dev_get_drvdata(dev);
532 652
533 pr_debug("%s enter\n", __func__); 653 dev_dbg(dev, "%s enter\n", __func__);
534 if (sport) 654 if (sport)
535 uart_suspend_port(&sport_uart_reg, &sport->port); 655 uart_suspend_port(&sport_uart_reg, &sport->port);
536 656
537 return 0; 657 return 0;
538} 658}
539 659
540static int sport_uart_resume(struct platform_device *dev) 660static int sport_uart_resume(struct device *dev)
541{ 661{
542 struct sport_uart_port *sport = platform_get_drvdata(dev); 662 struct sport_uart_port *sport = dev_get_drvdata(dev);
543 663
544 pr_debug("%s enter\n", __func__); 664 dev_dbg(dev, "%s enter\n", __func__);
545 if (sport) 665 if (sport)
546 uart_resume_port(&sport_uart_reg, &sport->port); 666 uart_resume_port(&sport_uart_reg, &sport->port);
547 667
548 return 0; 668 return 0;
549} 669}
550 670
551static int sport_uart_probe(struct platform_device *dev) 671static struct dev_pm_ops bfin_sport_uart_dev_pm_ops = {
672 .suspend = sport_uart_suspend,
673 .resume = sport_uart_resume,
674};
675#endif
676
677static int __devinit sport_uart_probe(struct platform_device *pdev)
552{ 678{
553 pr_debug("%s enter\n", __func__); 679 struct resource *res;
554 sport_uart_ports[dev->id].port.dev = &dev->dev; 680 struct sport_uart_port *sport;
555 uart_add_one_port(&sport_uart_reg, &sport_uart_ports[dev->id].port); 681 int ret = 0;
556 platform_set_drvdata(dev, &sport_uart_ports[dev->id]);
557 682
558 return 0; 683 dev_dbg(&pdev->dev, "%s enter\n", __func__);
684
685 if (pdev->id < 0 || pdev->id >= BFIN_SPORT_UART_MAX_PORTS) {
686 dev_err(&pdev->dev, "Wrong sport uart platform device id.\n");
687 return -ENOENT;
688 }
689
690 if (bfin_sport_uart_ports[pdev->id] == NULL) {
691 bfin_sport_uart_ports[pdev->id] =
692 kmalloc(sizeof(struct sport_uart_port), GFP_KERNEL);
693 sport = bfin_sport_uart_ports[pdev->id];
694 if (!sport) {
695 dev_err(&pdev->dev,
696 "Fail to kmalloc sport_uart_port\n");
697 return -ENOMEM;
698 }
699
700 ret = peripheral_request_list(
701 (unsigned short *)pdev->dev.platform_data, DRV_NAME);
702 if (ret) {
703 dev_err(&pdev->dev,
704 "Fail to request SPORT peripherals\n");
705 goto out_error_free_mem;
706 }
707
708 spin_lock_init(&sport->port.lock);
709 sport->port.fifosize = SPORT_TX_FIFO_SIZE,
710 sport->port.ops = &sport_uart_ops;
711 sport->port.line = pdev->id;
712 sport->port.iotype = UPIO_MEM;
713 sport->port.flags = UPF_BOOT_AUTOCONF;
714
715 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
716 if (res == NULL) {
717 dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
718 ret = -ENOENT;
719 goto out_error_free_peripherals;
720 }
721
722 sport->port.membase = ioremap(res->start,
723 res->end - res->start);
724 if (!sport->port.membase) {
725 dev_err(&pdev->dev, "Cannot map sport IO\n");
726 ret = -ENXIO;
727 goto out_error_free_peripherals;
728 }
729
730 sport->port.irq = platform_get_irq(pdev, 0);
731 if (sport->port.irq < 0) {
732 dev_err(&pdev->dev, "No sport RX/TX IRQ specified\n");
733 ret = -ENOENT;
734 goto out_error_unmap;
735 }
736
737 sport->err_irq = platform_get_irq(pdev, 1);
738 if (sport->err_irq < 0) {
739 dev_err(&pdev->dev, "No sport status IRQ specified\n");
740 ret = -ENOENT;
741 goto out_error_unmap;
742 }
743 }
744
745#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
746 if (!is_early_platform_device(pdev)) {
747#endif
748 sport = bfin_sport_uart_ports[pdev->id];
749 sport->port.dev = &pdev->dev;
750 dev_set_drvdata(&pdev->dev, sport);
751 ret = uart_add_one_port(&sport_uart_reg, &sport->port);
752#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
753 }
754#endif
755 if (!ret)
756 return 0;
757
758 if (sport) {
759out_error_unmap:
760 iounmap(sport->port.membase);
761out_error_free_peripherals:
762 peripheral_free_list(
763 (unsigned short *)pdev->dev.platform_data);
764out_error_free_mem:
765 kfree(sport);
766 bfin_sport_uart_ports[pdev->id] = NULL;
767 }
768
769 return ret;
559} 770}
560 771
561static int sport_uart_remove(struct platform_device *dev) 772static int __devexit sport_uart_remove(struct platform_device *pdev)
562{ 773{
563 struct sport_uart_port *sport = platform_get_drvdata(dev); 774 struct sport_uart_port *sport = platform_get_drvdata(pdev);
564 775
565 pr_debug("%s enter\n", __func__); 776 dev_dbg(&pdev->dev, "%s enter\n", __func__);
566 platform_set_drvdata(dev, NULL); 777 dev_set_drvdata(&pdev->dev, NULL);
567 778
568 if (sport) 779 if (sport) {
569 uart_remove_one_port(&sport_uart_reg, &sport->port); 780 uart_remove_one_port(&sport_uart_reg, &sport->port);
781 iounmap(sport->port.membase);
782 peripheral_free_list(
783 (unsigned short *)pdev->dev.platform_data);
784 kfree(sport);
785 bfin_sport_uart_ports[pdev->id] = NULL;
786 }
570 787
571 return 0; 788 return 0;
572} 789}
573 790
574static struct platform_driver sport_uart_driver = { 791static struct platform_driver sport_uart_driver = {
575 .probe = sport_uart_probe, 792 .probe = sport_uart_probe,
576 .remove = sport_uart_remove, 793 .remove = __devexit_p(sport_uart_remove),
577 .suspend = sport_uart_suspend,
578 .resume = sport_uart_resume,
579 .driver = { 794 .driver = {
580 .name = DRV_NAME, 795 .name = DRV_NAME,
796#ifdef CONFIG_PM
797 .pm = &bfin_sport_uart_dev_pm_ops,
798#endif
581 }, 799 },
582}; 800};
583 801
802#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
803static __initdata struct early_platform_driver early_sport_uart_driver = {
804 .class_str = DRV_NAME,
805 .pdrv = &sport_uart_driver,
806 .requested_id = EARLY_PLATFORM_ID_UNSET,
807};
808
809static int __init sport_uart_rs_console_init(void)
810{
811 early_platform_driver_register(&early_sport_uart_driver, DRV_NAME);
812
813 early_platform_driver_probe(DRV_NAME, BFIN_SPORT_UART_MAX_PORTS, 0);
814
815 register_console(&sport_uart_console);
816
817 return 0;
818}
819console_initcall(sport_uart_rs_console_init);
820#endif
821
584static int __init sport_uart_init(void) 822static int __init sport_uart_init(void)
585{ 823{
586 int ret; 824 int ret;
587 825
588 pr_debug("%s enter\n", __func__); 826 pr_info("Serial: Blackfin uart over sport driver\n");
827
589 ret = uart_register_driver(&sport_uart_reg); 828 ret = uart_register_driver(&sport_uart_reg);
590 if (ret != 0) { 829 if (ret) {
591 printk(KERN_ERR "Failed to register %s:%d\n", 830 pr_err("failed to register %s:%d\n",
592 sport_uart_reg.driver_name, ret); 831 sport_uart_reg.driver_name, ret);
593 return ret; 832 return ret;
594 } 833 }
595 834
596 ret = platform_driver_register(&sport_uart_driver); 835 ret = platform_driver_register(&sport_uart_driver);
597 if (ret != 0) { 836 if (ret) {
598 printk(KERN_ERR "Failed to register sport uart driver:%d\n", ret); 837 pr_err("failed to register sport uart driver:%d\n", ret);
599 uart_unregister_driver(&sport_uart_reg); 838 uart_unregister_driver(&sport_uart_reg);
600 } 839 }
601 840
602
603 pr_debug("%s exit\n", __func__);
604 return ret; 841 return ret;
605} 842}
843module_init(sport_uart_init);
606 844
607static void __exit sport_uart_exit(void) 845static void __exit sport_uart_exit(void)
608{ 846{
609 pr_debug("%s enter\n", __func__);
610 platform_driver_unregister(&sport_uart_driver); 847 platform_driver_unregister(&sport_uart_driver);
611 uart_unregister_driver(&sport_uart_reg); 848 uart_unregister_driver(&sport_uart_reg);
612} 849}
613
614module_init(sport_uart_init);
615module_exit(sport_uart_exit); 850module_exit(sport_uart_exit);
616 851
852MODULE_AUTHOR("Sonic Zhang, Roy Huang");
853MODULE_DESCRIPTION("Blackfin serial over SPORT driver");
617MODULE_LICENSE("GPL"); 854MODULE_LICENSE("GPL");
diff --git a/drivers/serial/bfin_sport_uart.h b/drivers/serial/bfin_sport_uart.h
index 671d41cc1a3f..abe03614e4df 100644
--- a/drivers/serial/bfin_sport_uart.h
+++ b/drivers/serial/bfin_sport_uart.h
@@ -1,29 +1,23 @@
1/* 1/*
2 * File: linux/drivers/serial/bfin_sport_uart.h 2 * Blackfin On-Chip Sport Emulated UART Driver
3 * 3 *
4 * Based on: include/asm-blackfin/mach-533/bfin_serial_5xx.h 4 * Copyright 2006-2008 Analog Devices Inc.
5 * Author: Roy Huang <roy.huang>analog.com>
6 * 5 *
7 * Created: Nov 22, 2006 6 * Enter bugs at http://blackfin.uclinux.org/
8 * Copyright: (C) Analog Device Inc.
9 * Description: this driver enable SPORTs on Blackfin emulate UART.
10 * 7 *
11 * This program is free software; you can redistribute it and/or modify 8 * Licensed under the GPL-2 or later.
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, see the file COPYING, or write
23 * to the Free Software Foundation, Inc.,
24 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 */ 9 */
26 10
11/*
12 * This driver and the hardware supported are in term of EE-191 of ADI.
13 * http://www.analog.com/UploadedFiles/Application_Notes/399447663EE191.pdf
14 * This application note describe how to implement a UART on a Sharc DSP,
15 * but this driver is implemented on Blackfin Processor.
16 * Transmit Frame Sync is not used by this driver to transfer data out.
17 */
18
19#ifndef _BFIN_SPORT_UART_H
20#define _BFIN_SPORT_UART_H
27 21
28#define OFFSET_TCR1 0x00 /* Transmit Configuration 1 Register */ 22#define OFFSET_TCR1 0x00 /* Transmit Configuration 1 Register */
29#define OFFSET_TCR2 0x04 /* Transmit Configuration 2 Register */ 23#define OFFSET_TCR2 0x04 /* Transmit Configuration 2 Register */
@@ -61,3 +55,7 @@
61#define SPORT_PUT_RCLKDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RCLKDIV), v) 55#define SPORT_PUT_RCLKDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RCLKDIV), v)
62#define SPORT_PUT_RFSDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RFSDIV), v) 56#define SPORT_PUT_RFSDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RFSDIV), v)
63#define SPORT_PUT_STAT(sport, v) bfin_write16(((sport)->port.membase + OFFSET_STAT), v) 57#define SPORT_PUT_STAT(sport, v) bfin_write16(((sport)->port.membase + OFFSET_STAT), v)
58
59#define SPORT_TX_FIFO_SIZE 8
60
61#endif /* _BFIN_SPORT_UART_H */
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 0028b6f89ce6..53a468227056 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -751,7 +751,6 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
751 trace(icom_port, "FID_STATUS", status); 751 trace(icom_port, "FID_STATUS", status);
752 count = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].leLength); 752 count = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].leLength);
753 753
754 count = tty_buffer_request_room(tty, count);
755 trace(icom_port, "RCV_COUNT", count); 754 trace(icom_port, "RCV_COUNT", count);
756 755
757 trace(icom_port, "REAL_COUNT", count); 756 trace(icom_port, "REAL_COUNT", count);
@@ -1654,4 +1653,6 @@ MODULE_DESCRIPTION("IBM iSeries Serial IOA driver");
1654MODULE_SUPPORTED_DEVICE 1653MODULE_SUPPORTED_DEVICE
1655 ("IBM iSeries 2745, 2771, 2772, 2742, 2793 and 2805 Communications adapters"); 1654 ("IBM iSeries 2745, 2771, 2772, 2742, 2793 and 2805 Communications adapters");
1656MODULE_LICENSE("GPL"); 1655MODULE_LICENSE("GPL");
1657 1656MODULE_FIRMWARE("icom_call_setup.bin");
1657MODULE_FIRMWARE("icom_res_dce.bin");
1658MODULE_FIRMWARE("icom_asc.bin");
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 60d665a17a88..d00fcf8e6c70 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -1279,7 +1279,7 @@ static int serial_imx_probe(struct platform_device *pdev)
1279 sport->use_irda = 1; 1279 sport->use_irda = 1;
1280#endif 1280#endif
1281 1281
1282 if (pdata->init) { 1282 if (pdata && pdata->init) {
1283 ret = pdata->init(pdev); 1283 ret = pdata->init(pdev);
1284 if (ret) 1284 if (ret)
1285 goto clkput; 1285 goto clkput;
@@ -1292,7 +1292,7 @@ static int serial_imx_probe(struct platform_device *pdev)
1292 1292
1293 return 0; 1293 return 0;
1294deinit: 1294deinit:
1295 if (pdata->exit) 1295 if (pdata && pdata->exit)
1296 pdata->exit(pdev); 1296 pdata->exit(pdev);
1297clkput: 1297clkput:
1298 clk_put(sport->clk); 1298 clk_put(sport->clk);
@@ -1321,7 +1321,7 @@ static int serial_imx_remove(struct platform_device *pdev)
1321 1321
1322 clk_disable(sport->clk); 1322 clk_disable(sport->clk);
1323 1323
1324 if (pdata->exit) 1324 if (pdata && pdata->exit)
1325 pdata->exit(pdev); 1325 pdata->exit(pdev);
1326 1326
1327 iounmap(sport->port.membase); 1327 iounmap(sport->port.membase);
diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c
index 85dc0410ac1a..23ba6b40b3ac 100644
--- a/drivers/serial/ioc3_serial.c
+++ b/drivers/serial/ioc3_serial.c
@@ -1411,8 +1411,7 @@ static int receive_chars(struct uart_port *the_port)
1411 read_count = do_read(the_port, ch, MAX_CHARS); 1411 read_count = do_read(the_port, ch, MAX_CHARS);
1412 if (read_count > 0) { 1412 if (read_count > 0) {
1413 flip = 1; 1413 flip = 1;
1414 read_room = tty_buffer_request_room(tty, read_count); 1414 read_room = tty_insert_flip_string(tty, ch, read_count);
1415 tty_insert_flip_string(tty, ch, read_room);
1416 the_port->icount.rx += read_count; 1415 the_port->icount.rx += read_count;
1417 } 1416 }
1418 spin_unlock_irqrestore(&the_port->lock, pflags); 1417 spin_unlock_irqrestore(&the_port->lock, pflags);
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/serial/jsm/jsm_driver.c
index 108c3e0471fd..12cb5e446a4f 100644
--- a/drivers/serial/jsm/jsm_driver.c
+++ b/drivers/serial/jsm/jsm_driver.c
@@ -179,6 +179,7 @@ static int __devinit jsm_probe_one(struct pci_dev *pdev, const struct pci_device
179 179
180 return 0; 180 return 0;
181 out_free_irq: 181 out_free_irq:
182 jsm_remove_uart_port(brd);
182 free_irq(brd->irq, brd); 183 free_irq(brd->irq, brd);
183 out_iounmap: 184 out_iounmap:
184 iounmap(brd->re_map_membase); 185 iounmap(brd->re_map_membase);
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c
index cd95e215550d..5673ca9dfdc8 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/serial/jsm/jsm_tty.c
@@ -432,7 +432,7 @@ int __devinit jsm_tty_init(struct jsm_board *brd)
432 432
433int jsm_uart_port_init(struct jsm_board *brd) 433int jsm_uart_port_init(struct jsm_board *brd)
434{ 434{
435 int i; 435 int i, rc;
436 unsigned int line; 436 unsigned int line;
437 struct jsm_channel *ch; 437 struct jsm_channel *ch;
438 438
@@ -467,8 +467,11 @@ int jsm_uart_port_init(struct jsm_board *brd)
467 } else 467 } else
468 set_bit(line, linemap); 468 set_bit(line, linemap);
469 brd->channels[i]->uart_port.line = line; 469 brd->channels[i]->uart_port.line = line;
470 if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port)) 470 rc = uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port);
471 printk(KERN_INFO "jsm: add device failed\n"); 471 if (rc){
472 printk(KERN_INFO "jsm: Port %d failed. Aborting...\n", i);
473 return rc;
474 }
472 else 475 else
473 printk(KERN_INFO "jsm: Port %d added\n", i); 476 printk(KERN_INFO "jsm: Port %d added\n", i);
474 } 477 }
diff --git a/drivers/serial/msm_serial.c b/drivers/serial/msm_serial.c
index b05c5aa02cb4..ecdc0facf7ee 100644
--- a/drivers/serial/msm_serial.c
+++ b/drivers/serial/msm_serial.c
@@ -691,6 +691,7 @@ static int __init msm_serial_probe(struct platform_device *pdev)
691 struct msm_port *msm_port; 691 struct msm_port *msm_port;
692 struct resource *resource; 692 struct resource *resource;
693 struct uart_port *port; 693 struct uart_port *port;
694 int irq;
694 695
695 if (unlikely(pdev->id < 0 || pdev->id >= UART_NR)) 696 if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
696 return -ENXIO; 697 return -ENXIO;
@@ -711,9 +712,10 @@ static int __init msm_serial_probe(struct platform_device *pdev)
711 return -ENXIO; 712 return -ENXIO;
712 port->mapbase = resource->start; 713 port->mapbase = resource->start;
713 714
714 port->irq = platform_get_irq(pdev, 0); 715 irq = platform_get_irq(pdev, 0);
715 if (unlikely(port->irq < 0)) 716 if (unlikely(irq < 0))
716 return -ENXIO; 717 return -ENXIO;
718 port->irq = irq;
717 719
718 platform_set_drvdata(pdev, port); 720 platform_set_drvdata(pdev, port);
719 721
diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c
index 34b31da01d09..7bf10264a6ac 100644
--- a/drivers/serial/timbuart.c
+++ b/drivers/serial/timbuart.c
@@ -421,7 +421,7 @@ static struct uart_driver timbuart_driver = {
421 421
422static int timbuart_probe(struct platform_device *dev) 422static int timbuart_probe(struct platform_device *dev)
423{ 423{
424 int err; 424 int err, irq;
425 struct timbuart_port *uart; 425 struct timbuart_port *uart;
426 struct resource *iomem; 426 struct resource *iomem;
427 427
@@ -453,11 +453,12 @@ static int timbuart_probe(struct platform_device *dev)
453 uart->port.mapbase = iomem->start; 453 uart->port.mapbase = iomem->start;
454 uart->port.membase = NULL; 454 uart->port.membase = NULL;
455 455
456 uart->port.irq = platform_get_irq(dev, 0); 456 irq = platform_get_irq(dev, 0);
457 if (uart->port.irq < 0) { 457 if (irq < 0) {
458 err = -EINVAL; 458 err = -EINVAL;
459 goto err_register; 459 goto err_register;
460 } 460 }
461 uart->port.irq = irq;
461 462
462 tasklet_init(&uart->tasklet, timbuart_tasklet, (unsigned long)uart); 463 tasklet_init(&uart->tasklet, timbuart_tasklet, (unsigned long)uart);
463 464
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index d8992d10d555..f6e34e03c8e4 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -144,7 +144,7 @@ static int valid_args(__u32 rhport, enum usb_device_speed speed)
144 case USB_SPEED_LOW: 144 case USB_SPEED_LOW:
145 case USB_SPEED_FULL: 145 case USB_SPEED_FULL:
146 case USB_SPEED_HIGH: 146 case USB_SPEED_HIGH:
147 case USB_SPEED_VARIABLE: 147 case USB_SPEED_WIRELESS:
148 break; 148 break;
149 default: 149 default:
150 usbip_uerr("speed %d\n", speed); 150 usbip_uerr("speed %d\n", speed);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 4f5bb5698f5d..6a58cb1330c1 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -21,6 +21,7 @@ config USB_ARCH_HAS_HCD
21 default y if USB_ARCH_HAS_EHCI 21 default y if USB_ARCH_HAS_EHCI
22 default y if PCMCIA && !M32R # sl811_cs 22 default y if PCMCIA && !M32R # sl811_cs
23 default y if ARM # SL-811 23 default y if ARM # SL-811
24 default y if BLACKFIN # SL-811
24 default y if SUPERH # r8a66597-hcd 25 default y if SUPERH # r8a66597-hcd
25 default PCI 26 default PCI
26 27
@@ -39,6 +40,7 @@ config USB_ARCH_HAS_OHCI
39 default y if ARCH_PNX4008 && I2C 40 default y if ARCH_PNX4008 && I2C
40 default y if MFD_TC6393XB 41 default y if MFD_TC6393XB
41 default y if ARCH_W90X900 42 default y if ARCH_W90X900
43 default y if ARCH_DAVINCI_DA8XX
42 # PPC: 44 # PPC:
43 default y if STB03xxx 45 default y if STB03xxx
44 default y if PPC_MPC52xx 46 default y if PPC_MPC52xx
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index be3c9b80bc9f..80b4008c89ba 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_USB_U132_HCD) += host/
21obj-$(CONFIG_USB_R8A66597_HCD) += host/ 21obj-$(CONFIG_USB_R8A66597_HCD) += host/
22obj-$(CONFIG_USB_HWA_HCD) += host/ 22obj-$(CONFIG_USB_HWA_HCD) += host/
23obj-$(CONFIG_USB_ISP1760_HCD) += host/ 23obj-$(CONFIG_USB_ISP1760_HCD) += host/
24obj-$(CONFIG_USB_IMX21_HCD) += host/
24 25
25obj-$(CONFIG_USB_C67X00_HCD) += c67x00/ 26obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
26 27
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 56802d2e994b..c89990f5e018 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -5,6 +5,7 @@
5 * Copyright (C) 2004 David Woodhouse, Duncan Sands, Roman Kagan 5 * Copyright (C) 2004 David Woodhouse, Duncan Sands, Roman Kagan
6 * Copyright (C) 2005 Duncan Sands, Roman Kagan (rkagan % mail ! ru) 6 * Copyright (C) 2005 Duncan Sands, Roman Kagan (rkagan % mail ! ru)
7 * Copyright (C) 2007 Simon Arlott 7 * Copyright (C) 2007 Simon Arlott
8 * Copyright (C) 2009 Simon Arlott
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free 11 * under the terms of the GNU General Public License as published by the Free
@@ -43,7 +44,7 @@
43#include "usbatm.h" 44#include "usbatm.h"
44 45
45#define DRIVER_AUTHOR "Roman Kagan, David Woodhouse, Duncan Sands, Simon Arlott" 46#define DRIVER_AUTHOR "Roman Kagan, David Woodhouse, Duncan Sands, Simon Arlott"
46#define DRIVER_VERSION "0.3" 47#define DRIVER_VERSION "0.4"
47#define DRIVER_DESC "Conexant AccessRunner ADSL USB modem driver" 48#define DRIVER_DESC "Conexant AccessRunner ADSL USB modem driver"
48 49
49static const char cxacru_driver_name[] = "cxacru"; 50static const char cxacru_driver_name[] = "cxacru";
@@ -52,6 +53,7 @@ static const char cxacru_driver_name[] = "cxacru";
52#define CXACRU_EP_DATA 0x02 /* Bulk in/out */ 53#define CXACRU_EP_DATA 0x02 /* Bulk in/out */
53 54
54#define CMD_PACKET_SIZE 64 /* Should be maxpacket(ep)? */ 55#define CMD_PACKET_SIZE 64 /* Should be maxpacket(ep)? */
56#define CMD_MAX_CONFIG ((CMD_PACKET_SIZE / 4 - 1) / 2)
55 57
56/* Addresses */ 58/* Addresses */
57#define PLLFCLK_ADDR 0x00350068 59#define PLLFCLK_ADDR 0x00350068
@@ -105,6 +107,26 @@ enum cxacru_cm_request {
105 CM_REQUEST_MAX, 107 CM_REQUEST_MAX,
106}; 108};
107 109
110/* commands for interaction with the flash memory
111 *
112 * read: response is the contents of the first 60 bytes of flash memory
113 * write: request contains the 60 bytes of data to write to flash memory
114 * response is the contents of the first 60 bytes of flash memory
115 *
116 * layout: PP PP VV VV MM MM MM MM MM MM ?? ?? SS SS SS SS SS SS SS SS
117 * SS SS SS SS SS SS SS SS 00 00 00 00 00 00 00 00 00 00 00 00
118 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
119 *
120 * P: le16 USB Product ID
121 * V: le16 USB Vendor ID
122 * M: be48 MAC Address
123 * S: le16 ASCII Serial Number
124 */
125enum cxacru_cm_flash {
126 CM_FLASH_READ = 0xa1,
127 CM_FLASH_WRITE = 0xa2
128};
129
108/* reply codes to the commands above */ 130/* reply codes to the commands above */
109enum cxacru_cm_status { 131enum cxacru_cm_status {
110 CM_STATUS_UNDEFINED, 132 CM_STATUS_UNDEFINED,
@@ -196,23 +218,32 @@ static DEVICE_ATTR(_name, S_IRUGO, cxacru_sysfs_show_##_name, NULL)
196static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, \ 218static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, \
197 cxacru_sysfs_show_##_name, cxacru_sysfs_store_##_name) 219 cxacru_sysfs_show_##_name, cxacru_sysfs_store_##_name)
198 220
221#define CXACRU_SET_INIT(_name) \
222static DEVICE_ATTR(_name, S_IWUSR, \
223 NULL, cxacru_sysfs_store_##_name)
224
199#define CXACRU_ATTR_INIT(_value, _type, _name) \ 225#define CXACRU_ATTR_INIT(_value, _type, _name) \
200static ssize_t cxacru_sysfs_show_##_name(struct device *dev, \ 226static ssize_t cxacru_sysfs_show_##_name(struct device *dev, \
201 struct device_attribute *attr, char *buf) \ 227 struct device_attribute *attr, char *buf) \
202{ \ 228{ \
203 struct usb_interface *intf = to_usb_interface(dev); \ 229 struct cxacru_data *instance = to_usbatm_driver_data(\
204 struct usbatm_data *usbatm_instance = usb_get_intfdata(intf); \ 230 to_usb_interface(dev)); \
205 struct cxacru_data *instance = usbatm_instance->driver_data; \ 231\
232 if (instance == NULL) \
233 return -ENODEV; \
234\
206 return cxacru_sysfs_showattr_##_type(instance->card_info[_value], buf); \ 235 return cxacru_sysfs_showattr_##_type(instance->card_info[_value], buf); \
207} \ 236} \
208CXACRU__ATTR_INIT(_name) 237CXACRU__ATTR_INIT(_name)
209 238
210#define CXACRU_ATTR_CREATE(_v, _t, _name) CXACRU_DEVICE_CREATE_FILE(_name) 239#define CXACRU_ATTR_CREATE(_v, _t, _name) CXACRU_DEVICE_CREATE_FILE(_name)
211#define CXACRU_CMD_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name) 240#define CXACRU_CMD_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
241#define CXACRU_SET_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
212#define CXACRU__ATTR_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name) 242#define CXACRU__ATTR_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
213 243
214#define CXACRU_ATTR_REMOVE(_v, _t, _name) CXACRU_DEVICE_REMOVE_FILE(_name) 244#define CXACRU_ATTR_REMOVE(_v, _t, _name) CXACRU_DEVICE_REMOVE_FILE(_name)
215#define CXACRU_CMD_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name) 245#define CXACRU_CMD_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
246#define CXACRU_SET_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
216#define CXACRU__ATTR_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name) 247#define CXACRU__ATTR_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
217 248
218static ssize_t cxacru_sysfs_showattr_u32(u32 value, char *buf) 249static ssize_t cxacru_sysfs_showattr_u32(u32 value, char *buf)
@@ -267,12 +298,12 @@ static ssize_t cxacru_sysfs_showattr_LINE(u32 value, char *buf)
267static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf) 298static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf)
268{ 299{
269 static char *str[] = { 300 static char *str[] = {
270 NULL, 301 "",
271 "ANSI T1.413", 302 "ANSI T1.413",
272 "ITU-T G.992.1 (G.DMT)", 303 "ITU-T G.992.1 (G.DMT)",
273 "ITU-T G.992.2 (G.LITE)" 304 "ITU-T G.992.2 (G.LITE)"
274 }; 305 };
275 if (unlikely(value >= ARRAY_SIZE(str) || str[value] == NULL)) 306 if (unlikely(value >= ARRAY_SIZE(str)))
276 return snprintf(buf, PAGE_SIZE, "%u\n", value); 307 return snprintf(buf, PAGE_SIZE, "%u\n", value);
277 return snprintf(buf, PAGE_SIZE, "%s\n", str[value]); 308 return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
278} 309}
@@ -288,22 +319,28 @@ static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf)
288static ssize_t cxacru_sysfs_show_mac_address(struct device *dev, 319static ssize_t cxacru_sysfs_show_mac_address(struct device *dev,
289 struct device_attribute *attr, char *buf) 320 struct device_attribute *attr, char *buf)
290{ 321{
291 struct usb_interface *intf = to_usb_interface(dev); 322 struct cxacru_data *instance = to_usbatm_driver_data(
292 struct usbatm_data *usbatm_instance = usb_get_intfdata(intf); 323 to_usb_interface(dev));
293 struct atm_dev *atm_dev = usbatm_instance->atm_dev;
294 324
295 return snprintf(buf, PAGE_SIZE, "%pM\n", atm_dev->esi); 325 if (instance == NULL || instance->usbatm->atm_dev == NULL)
326 return -ENODEV;
327
328 return snprintf(buf, PAGE_SIZE, "%pM\n",
329 instance->usbatm->atm_dev->esi);
296} 330}
297 331
298static ssize_t cxacru_sysfs_show_adsl_state(struct device *dev, 332static ssize_t cxacru_sysfs_show_adsl_state(struct device *dev,
299 struct device_attribute *attr, char *buf) 333 struct device_attribute *attr, char *buf)
300{ 334{
301 struct usb_interface *intf = to_usb_interface(dev);
302 struct usbatm_data *usbatm_instance = usb_get_intfdata(intf);
303 struct cxacru_data *instance = usbatm_instance->driver_data;
304 u32 value = instance->card_info[CXINF_LINE_STARTABLE];
305
306 static char *str[] = { "running", "stopped" }; 335 static char *str[] = { "running", "stopped" };
336 struct cxacru_data *instance = to_usbatm_driver_data(
337 to_usb_interface(dev));
338 u32 value;
339
340 if (instance == NULL)
341 return -ENODEV;
342
343 value = instance->card_info[CXINF_LINE_STARTABLE];
307 if (unlikely(value >= ARRAY_SIZE(str))) 344 if (unlikely(value >= ARRAY_SIZE(str)))
308 return snprintf(buf, PAGE_SIZE, "%u\n", value); 345 return snprintf(buf, PAGE_SIZE, "%u\n", value);
309 return snprintf(buf, PAGE_SIZE, "%s\n", str[value]); 346 return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
@@ -312,9 +349,8 @@ static ssize_t cxacru_sysfs_show_adsl_state(struct device *dev,
312static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev, 349static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
313 struct device_attribute *attr, const char *buf, size_t count) 350 struct device_attribute *attr, const char *buf, size_t count)
314{ 351{
315 struct usb_interface *intf = to_usb_interface(dev); 352 struct cxacru_data *instance = to_usbatm_driver_data(
316 struct usbatm_data *usbatm_instance = usb_get_intfdata(intf); 353 to_usb_interface(dev));
317 struct cxacru_data *instance = usbatm_instance->driver_data;
318 int ret; 354 int ret;
319 int poll = -1; 355 int poll = -1;
320 char str_cmd[8]; 356 char str_cmd[8];
@@ -328,13 +364,16 @@ static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
328 return -EINVAL; 364 return -EINVAL;
329 ret = 0; 365 ret = 0;
330 366
367 if (instance == NULL)
368 return -ENODEV;
369
331 if (mutex_lock_interruptible(&instance->adsl_state_serialize)) 370 if (mutex_lock_interruptible(&instance->adsl_state_serialize))
332 return -ERESTARTSYS; 371 return -ERESTARTSYS;
333 372
334 if (!strcmp(str_cmd, "stop") || !strcmp(str_cmd, "restart")) { 373 if (!strcmp(str_cmd, "stop") || !strcmp(str_cmd, "restart")) {
335 ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_STOP, NULL, 0, NULL, 0); 374 ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_STOP, NULL, 0, NULL, 0);
336 if (ret < 0) { 375 if (ret < 0) {
337 atm_err(usbatm_instance, "change adsl state:" 376 atm_err(instance->usbatm, "change adsl state:"
338 " CHIP_ADSL_LINE_STOP returned %d\n", ret); 377 " CHIP_ADSL_LINE_STOP returned %d\n", ret);
339 378
340 ret = -EIO; 379 ret = -EIO;
@@ -354,7 +393,7 @@ static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
354 if (!strcmp(str_cmd, "start") || !strcmp(str_cmd, "restart")) { 393 if (!strcmp(str_cmd, "start") || !strcmp(str_cmd, "restart")) {
355 ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_START, NULL, 0, NULL, 0); 394 ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_START, NULL, 0, NULL, 0);
356 if (ret < 0) { 395 if (ret < 0) {
357 atm_err(usbatm_instance, "change adsl state:" 396 atm_err(instance->usbatm, "change adsl state:"
358 " CHIP_ADSL_LINE_START returned %d\n", ret); 397 " CHIP_ADSL_LINE_START returned %d\n", ret);
359 398
360 ret = -EIO; 399 ret = -EIO;
@@ -407,6 +446,72 @@ static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
407 return ret; 446 return ret;
408} 447}
409 448
449/* CM_REQUEST_CARD_DATA_GET times out, so no show attribute */
450
451static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
452 struct device_attribute *attr, const char *buf, size_t count)
453{
454 struct cxacru_data *instance = to_usbatm_driver_data(
455 to_usb_interface(dev));
456 int len = strlen(buf);
457 int ret, pos, num;
458 __le32 data[CMD_PACKET_SIZE / 4];
459
460 if (!capable(CAP_NET_ADMIN))
461 return -EACCES;
462
463 if (instance == NULL)
464 return -ENODEV;
465
466 pos = 0;
467 num = 0;
468 while (pos < len) {
469 int tmp;
470 u32 index;
471 u32 value;
472
473 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
474 if (ret < 2)
475 return -EINVAL;
476 if (index < 0 || index > 0x7f)
477 return -EINVAL;
478 pos += tmp;
479
480 /* skip trailing newline */
481 if (buf[pos] == '\n' && pos == len-1)
482 pos++;
483
484 data[num * 2 + 1] = cpu_to_le32(index);
485 data[num * 2 + 2] = cpu_to_le32(value);
486 num++;
487
488 /* send config values when data buffer is full
489 * or no more data
490 */
491 if (pos >= len || num >= CMD_MAX_CONFIG) {
492 char log[CMD_MAX_CONFIG * 12 + 1]; /* %02x=%08x */
493
494 data[0] = cpu_to_le32(num);
495 ret = cxacru_cm(instance, CM_REQUEST_CARD_DATA_SET,
496 (u8 *) data, 4 + num * 8, NULL, 0);
497 if (ret < 0) {
498 atm_err(instance->usbatm,
499 "set card data returned %d\n", ret);
500 return -EIO;
501 }
502
503 for (tmp = 0; tmp < num; tmp++)
504 snprintf(log + tmp*12, 13, " %02x=%08x",
505 le32_to_cpu(data[tmp * 2 + 1]),
506 le32_to_cpu(data[tmp * 2 + 2]));
507 atm_info(instance->usbatm, "config%s\n", log);
508 num = 0;
509 }
510 }
511
512 return len;
513}
514
410/* 515/*
411 * All device attributes are included in CXACRU_ALL_FILES 516 * All device attributes are included in CXACRU_ALL_FILES
412 * so that the same list can be used multiple times: 517 * so that the same list can be used multiple times:
@@ -442,7 +547,8 @@ CXACRU_ATTR_##_action(CXINF_MODULATION, MODU, modulation); \
442CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND, u32, adsl_headend); \ 547CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND, u32, adsl_headend); \
443CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND_ENVIRONMENT, u32, adsl_headend_environment); \ 548CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND_ENVIRONMENT, u32, adsl_headend_environment); \
444CXACRU_ATTR_##_action(CXINF_CONTROLLER_VERSION, u32, adsl_controller_version); \ 549CXACRU_ATTR_##_action(CXINF_CONTROLLER_VERSION, u32, adsl_controller_version); \
445CXACRU_CMD_##_action( adsl_state); 550CXACRU_CMD_##_action( adsl_state); \
551CXACRU_SET_##_action( adsl_config);
446 552
447CXACRU_ALL_FILES(INIT); 553CXACRU_ALL_FILES(INIT);
448 554
@@ -596,7 +702,7 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ
596 len = ret / 4; 702 len = ret / 4;
597 for (offb = 0; offb < len; ) { 703 for (offb = 0; offb < len; ) {
598 int l = le32_to_cpu(buf[offb++]); 704 int l = le32_to_cpu(buf[offb++]);
599 if (l > stride || l > (len - offb) / 2) { 705 if (l < 0 || l > stride || l > (len - offb) / 2) {
600 if (printk_ratelimit()) 706 if (printk_ratelimit())
601 usb_err(instance->usbatm, "invalid data length from cm %#x: %d\n", 707 usb_err(instance->usbatm, "invalid data length from cm %#x: %d\n",
602 cm, l); 708 cm, l);
@@ -649,9 +755,6 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
649{ 755{
650 struct cxacru_data *instance = usbatm_instance->driver_data; 756 struct cxacru_data *instance = usbatm_instance->driver_data;
651 struct usb_interface *intf = usbatm_instance->usb_intf; 757 struct usb_interface *intf = usbatm_instance->usb_intf;
652 /*
653 struct atm_dev *atm_dev = usbatm_instance->atm_dev;
654 */
655 int ret; 758 int ret;
656 int start_polling = 1; 759 int start_polling = 1;
657 760
@@ -697,6 +800,9 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
697 mutex_unlock(&instance->poll_state_serialize); 800 mutex_unlock(&instance->poll_state_serialize);
698 mutex_unlock(&instance->adsl_state_serialize); 801 mutex_unlock(&instance->adsl_state_serialize);
699 802
803 printk(KERN_INFO "%s%d: %s %pM\n", atm_dev->type, atm_dev->number,
804 usbatm_instance->description, atm_dev->esi);
805
700 if (start_polling) 806 if (start_polling)
701 cxacru_poll_status(&instance->poll_work.work); 807 cxacru_poll_status(&instance->poll_work.work);
702 return 0; 808 return 0;
@@ -873,11 +979,9 @@ cleanup:
873 979
874static void cxacru_upload_firmware(struct cxacru_data *instance, 980static void cxacru_upload_firmware(struct cxacru_data *instance,
875 const struct firmware *fw, 981 const struct firmware *fw,
876 const struct firmware *bp, 982 const struct firmware *bp)
877 const struct firmware *cf)
878{ 983{
879 int ret; 984 int ret;
880 int off;
881 struct usbatm_data *usbatm = instance->usbatm; 985 struct usbatm_data *usbatm = instance->usbatm;
882 struct usb_device *usb_dev = usbatm->usb_dev; 986 struct usb_device *usb_dev = usbatm->usb_dev;
883 __le16 signature[] = { usb_dev->descriptor.idVendor, 987 __le16 signature[] = { usb_dev->descriptor.idVendor,
@@ -911,6 +1015,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
911 } 1015 }
912 1016
913 /* Firmware */ 1017 /* Firmware */
1018 usb_info(usbatm, "loading firmware\n");
914 ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, FW_ADDR, fw->data, fw->size); 1019 ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, FW_ADDR, fw->data, fw->size);
915 if (ret) { 1020 if (ret) {
916 usb_err(usbatm, "Firmware upload failed: %d\n", ret); 1021 usb_err(usbatm, "Firmware upload failed: %d\n", ret);
@@ -919,6 +1024,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
919 1024
920 /* Boot ROM patch */ 1025 /* Boot ROM patch */
921 if (instance->modem_type->boot_rom_patch) { 1026 if (instance->modem_type->boot_rom_patch) {
1027 usb_info(usbatm, "loading boot ROM patch\n");
922 ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_ADDR, bp->data, bp->size); 1028 ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_ADDR, bp->data, bp->size);
923 if (ret) { 1029 if (ret) {
924 usb_err(usbatm, "Boot ROM patching failed: %d\n", ret); 1030 usb_err(usbatm, "Boot ROM patching failed: %d\n", ret);
@@ -933,6 +1039,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
933 return; 1039 return;
934 } 1040 }
935 1041
1042 usb_info(usbatm, "starting device\n");
936 if (instance->modem_type->boot_rom_patch) { 1043 if (instance->modem_type->boot_rom_patch) {
937 val = cpu_to_le32(BR_ADDR); 1044 val = cpu_to_le32(BR_ADDR);
938 ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_STACK_ADDR, (u8 *) &val, 4); 1045 ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_STACK_ADDR, (u8 *) &val, 4);
@@ -958,26 +1065,6 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
958 usb_err(usbatm, "modem failed to initialize: %d\n", ret); 1065 usb_err(usbatm, "modem failed to initialize: %d\n", ret);
959 return; 1066 return;
960 } 1067 }
961
962 /* Load config data (le32), doing one packet at a time */
963 if (cf)
964 for (off = 0; off < cf->size / 4; ) {
965 __le32 buf[CMD_PACKET_SIZE / 4 - 1];
966 int i, len = min_t(int, cf->size / 4 - off, CMD_PACKET_SIZE / 4 / 2 - 1);
967 buf[0] = cpu_to_le32(len);
968 for (i = 0; i < len; i++, off++) {
969 buf[i * 2 + 1] = cpu_to_le32(off);
970 memcpy(buf + i * 2 + 2, cf->data + off * 4, 4);
971 }
972 ret = cxacru_cm(instance, CM_REQUEST_CARD_DATA_SET,
973 (u8 *) buf, len, NULL, 0);
974 if (ret < 0) {
975 usb_err(usbatm, "load config data failed: %d\n", ret);
976 return;
977 }
978 }
979
980 msleep_interruptible(4000);
981} 1068}
982 1069
983static int cxacru_find_firmware(struct cxacru_data *instance, 1070static int cxacru_find_firmware(struct cxacru_data *instance,
@@ -1003,7 +1090,7 @@ static int cxacru_find_firmware(struct cxacru_data *instance,
1003static int cxacru_heavy_init(struct usbatm_data *usbatm_instance, 1090static int cxacru_heavy_init(struct usbatm_data *usbatm_instance,
1004 struct usb_interface *usb_intf) 1091 struct usb_interface *usb_intf)
1005{ 1092{
1006 const struct firmware *fw, *bp, *cf; 1093 const struct firmware *fw, *bp;
1007 struct cxacru_data *instance = usbatm_instance->driver_data; 1094 struct cxacru_data *instance = usbatm_instance->driver_data;
1008 1095
1009 int ret = cxacru_find_firmware(instance, "fw", &fw); 1096 int ret = cxacru_find_firmware(instance, "fw", &fw);
@@ -1021,13 +1108,8 @@ static int cxacru_heavy_init(struct usbatm_data *usbatm_instance,
1021 } 1108 }
1022 } 1109 }
1023 1110
1024 if (cxacru_find_firmware(instance, "cf", &cf)) /* optional */ 1111 cxacru_upload_firmware(instance, fw, bp);
1025 cf = NULL;
1026
1027 cxacru_upload_firmware(instance, fw, bp, cf);
1028 1112
1029 if (cf)
1030 release_firmware(cf);
1031 if (instance->modem_type->boot_rom_patch) 1113 if (instance->modem_type->boot_rom_patch)
1032 release_firmware(bp); 1114 release_firmware(bp);
1033 release_firmware(fw); 1115 release_firmware(fw);
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index fbea8563df1e..9b53e8df4648 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -1333,6 +1333,7 @@ void usbatm_usb_disconnect(struct usb_interface *intf)
1333 if (instance->atm_dev) { 1333 if (instance->atm_dev) {
1334 sysfs_remove_link(&instance->atm_dev->class_dev.kobj, "device"); 1334 sysfs_remove_link(&instance->atm_dev->class_dev.kobj, "device");
1335 atm_dev_deregister(instance->atm_dev); 1335 atm_dev_deregister(instance->atm_dev);
1336 instance->atm_dev = NULL;
1336 } 1337 }
1337 1338
1338 usbatm_put_instance(instance); /* taken in usbatm_usb_probe */ 1339 usbatm_put_instance(instance); /* taken in usbatm_usb_probe */
@@ -1348,7 +1349,7 @@ static int __init usbatm_usb_init(void)
1348{ 1349{
1349 dbg("%s: driver version %s", __func__, DRIVER_VERSION); 1350 dbg("%s: driver version %s", __func__, DRIVER_VERSION);
1350 1351
1351 if (sizeof(struct usbatm_control) > sizeof(((struct sk_buff *) 0)->cb)) { 1352 if (sizeof(struct usbatm_control) > FIELD_SIZEOF(struct sk_buff, cb)) {
1352 printk(KERN_ERR "%s unusable with this kernel!\n", usbatm_driver_name); 1353 printk(KERN_ERR "%s unusable with this kernel!\n", usbatm_driver_name);
1353 return -EIO; 1354 return -EIO;
1354 } 1355 }
diff --git a/drivers/usb/atm/usbatm.h b/drivers/usb/atm/usbatm.h
index f6f4508a9d42..0863f85fcc26 100644
--- a/drivers/usb/atm/usbatm.h
+++ b/drivers/usb/atm/usbatm.h
@@ -204,4 +204,19 @@ struct usbatm_data {
204 struct urb *urbs[0]; 204 struct urb *urbs[0];
205}; 205};
206 206
207static inline void *to_usbatm_driver_data(struct usb_interface *intf)
208{
209 struct usbatm_data *usbatm_instance;
210
211 if (intf == NULL)
212 return NULL;
213
214 usbatm_instance = usb_get_intfdata(intf);
215
216 if (usbatm_instance == NULL) /* set NULL before unbind() */
217 return NULL;
218
219 return usbatm_instance->driver_data; /* set NULL after unbind() */
220}
221
207#endif /* _USBATM_H_ */ 222#endif /* _USBATM_H_ */
diff --git a/drivers/usb/c67x00/c67x00-drv.c b/drivers/usb/c67x00/c67x00-drv.c
index 5633bc5c8bf2..029ee4a8a1f3 100644
--- a/drivers/usb/c67x00/c67x00-drv.c
+++ b/drivers/usb/c67x00/c67x00-drv.c
@@ -137,13 +137,13 @@ static int __devinit c67x00_drv_probe(struct platform_device *pdev)
137 if (!c67x00) 137 if (!c67x00)
138 return -ENOMEM; 138 return -ENOMEM;
139 139
140 if (!request_mem_region(res->start, res->end - res->start + 1, 140 if (!request_mem_region(res->start, resource_size(res),
141 pdev->name)) { 141 pdev->name)) {
142 dev_err(&pdev->dev, "Memory region busy\n"); 142 dev_err(&pdev->dev, "Memory region busy\n");
143 ret = -EBUSY; 143 ret = -EBUSY;
144 goto request_mem_failed; 144 goto request_mem_failed;
145 } 145 }
146 c67x00->hpi.base = ioremap(res->start, res->end - res->start + 1); 146 c67x00->hpi.base = ioremap(res->start, resource_size(res));
147 if (!c67x00->hpi.base) { 147 if (!c67x00->hpi.base) {
148 dev_err(&pdev->dev, "Unable to map HPI registers\n"); 148 dev_err(&pdev->dev, "Unable to map HPI registers\n");
149 ret = -EIO; 149 ret = -EIO;
@@ -182,7 +182,7 @@ static int __devinit c67x00_drv_probe(struct platform_device *pdev)
182 request_irq_failed: 182 request_irq_failed:
183 iounmap(c67x00->hpi.base); 183 iounmap(c67x00->hpi.base);
184 map_failed: 184 map_failed:
185 release_mem_region(res->start, res->end - res->start + 1); 185 release_mem_region(res->start, resource_size(res));
186 request_mem_failed: 186 request_mem_failed:
187 kfree(c67x00); 187 kfree(c67x00);
188 188
@@ -208,7 +208,7 @@ static int __devexit c67x00_drv_remove(struct platform_device *pdev)
208 208
209 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 209 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
210 if (res) 210 if (res)
211 release_mem_region(res->start, res->end - res->start + 1); 211 release_mem_region(res->start, resource_size(res));
212 212
213 kfree(c67x00); 213 kfree(c67x00);
214 214
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 34d4eb98829e..975d556b4787 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -170,6 +170,7 @@ static void acm_write_done(struct acm *acm, struct acm_wb *wb)
170{ 170{
171 wb->use = 0; 171 wb->use = 0;
172 acm->transmitting--; 172 acm->transmitting--;
173 usb_autopm_put_interface_async(acm->control);
173} 174}
174 175
175/* 176/*
@@ -211,9 +212,12 @@ static int acm_write_start(struct acm *acm, int wbn)
211 } 212 }
212 213
213 dbg("%s susp_count: %d", __func__, acm->susp_count); 214 dbg("%s susp_count: %d", __func__, acm->susp_count);
215 usb_autopm_get_interface_async(acm->control);
214 if (acm->susp_count) { 216 if (acm->susp_count) {
215 acm->delayed_wb = wb; 217 if (!acm->delayed_wb)
216 schedule_work(&acm->waker); 218 acm->delayed_wb = wb;
219 else
220 usb_autopm_put_interface_async(acm->control);
217 spin_unlock_irqrestore(&acm->write_lock, flags); 221 spin_unlock_irqrestore(&acm->write_lock, flags);
218 return 0; /* A white lie */ 222 return 0; /* A white lie */
219 } 223 }
@@ -424,7 +428,6 @@ next_buffer:
424 throttled = acm->throttle; 428 throttled = acm->throttle;
425 spin_unlock_irqrestore(&acm->throttle_lock, flags); 429 spin_unlock_irqrestore(&acm->throttle_lock, flags);
426 if (!throttled) { 430 if (!throttled) {
427 tty_buffer_request_room(tty, buf->size);
428 tty_insert_flip_string(tty, buf->base, buf->size); 431 tty_insert_flip_string(tty, buf->base, buf->size);
429 tty_flip_buffer_push(tty); 432 tty_flip_buffer_push(tty);
430 } else { 433 } else {
@@ -534,23 +537,6 @@ static void acm_softint(struct work_struct *work)
534 tty_kref_put(tty); 537 tty_kref_put(tty);
535} 538}
536 539
537static void acm_waker(struct work_struct *waker)
538{
539 struct acm *acm = container_of(waker, struct acm, waker);
540 int rv;
541
542 rv = usb_autopm_get_interface(acm->control);
543 if (rv < 0) {
544 dev_err(&acm->dev->dev, "Autopm failure in %s\n", __func__);
545 return;
546 }
547 if (acm->delayed_wb) {
548 acm_start_wb(acm, acm->delayed_wb);
549 acm->delayed_wb = NULL;
550 }
551 usb_autopm_put_interface(acm->control);
552}
553
554/* 540/*
555 * TTY handlers 541 * TTY handlers
556 */ 542 */
@@ -566,7 +552,7 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
566 552
567 acm = acm_table[tty->index]; 553 acm = acm_table[tty->index];
568 if (!acm || !acm->dev) 554 if (!acm || !acm->dev)
569 goto err_out; 555 goto out;
570 else 556 else
571 rv = 0; 557 rv = 0;
572 558
@@ -582,8 +568,9 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
582 568
583 mutex_lock(&acm->mutex); 569 mutex_lock(&acm->mutex);
584 if (acm->port.count++) { 570 if (acm->port.count++) {
571 mutex_unlock(&acm->mutex);
585 usb_autopm_put_interface(acm->control); 572 usb_autopm_put_interface(acm->control);
586 goto done; 573 goto out;
587 } 574 }
588 575
589 acm->ctrlurb->dev = acm->dev; 576 acm->ctrlurb->dev = acm->dev;
@@ -612,18 +599,18 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
612 set_bit(ASYNCB_INITIALIZED, &acm->port.flags); 599 set_bit(ASYNCB_INITIALIZED, &acm->port.flags);
613 rv = tty_port_block_til_ready(&acm->port, tty, filp); 600 rv = tty_port_block_til_ready(&acm->port, tty, filp);
614 tasklet_schedule(&acm->urb_task); 601 tasklet_schedule(&acm->urb_task);
615done: 602
616 mutex_unlock(&acm->mutex); 603 mutex_unlock(&acm->mutex);
617err_out: 604out:
618 mutex_unlock(&open_mutex); 605 mutex_unlock(&open_mutex);
619 return rv; 606 return rv;
620 607
621full_bailout: 608full_bailout:
622 usb_kill_urb(acm->ctrlurb); 609 usb_kill_urb(acm->ctrlurb);
623bail_out: 610bail_out:
624 usb_autopm_put_interface(acm->control);
625 acm->port.count--; 611 acm->port.count--;
626 mutex_unlock(&acm->mutex); 612 mutex_unlock(&acm->mutex);
613 usb_autopm_put_interface(acm->control);
627early_bail: 614early_bail:
628 mutex_unlock(&open_mutex); 615 mutex_unlock(&open_mutex);
629 tty_port_tty_set(&acm->port, NULL); 616 tty_port_tty_set(&acm->port, NULL);
@@ -1023,7 +1010,7 @@ static int acm_probe(struct usb_interface *intf,
1023 case USB_CDC_CALL_MANAGEMENT_TYPE: 1010 case USB_CDC_CALL_MANAGEMENT_TYPE:
1024 call_management_function = buffer[3]; 1011 call_management_function = buffer[3];
1025 call_interface_num = buffer[4]; 1012 call_interface_num = buffer[4];
1026 if ((call_management_function & 3) != 3) 1013 if ( (quirks & NOT_A_MODEM) == 0 && (call_management_function & 3) != 3)
1027 dev_err(&intf->dev, "This device cannot do calls on its own. It is not a modem.\n"); 1014 dev_err(&intf->dev, "This device cannot do calls on its own. It is not a modem.\n");
1028 break; 1015 break;
1029 default: 1016 default:
@@ -1178,7 +1165,6 @@ made_compressed_probe:
1178 acm->urb_task.func = acm_rx_tasklet; 1165 acm->urb_task.func = acm_rx_tasklet;
1179 acm->urb_task.data = (unsigned long) acm; 1166 acm->urb_task.data = (unsigned long) acm;
1180 INIT_WORK(&acm->work, acm_softint); 1167 INIT_WORK(&acm->work, acm_softint);
1181 INIT_WORK(&acm->waker, acm_waker);
1182 init_waitqueue_head(&acm->drain_wait); 1168 init_waitqueue_head(&acm->drain_wait);
1183 spin_lock_init(&acm->throttle_lock); 1169 spin_lock_init(&acm->throttle_lock);
1184 spin_lock_init(&acm->write_lock); 1170 spin_lock_init(&acm->write_lock);
@@ -1343,7 +1329,6 @@ static void stop_data_traffic(struct acm *acm)
1343 tasklet_enable(&acm->urb_task); 1329 tasklet_enable(&acm->urb_task);
1344 1330
1345 cancel_work_sync(&acm->work); 1331 cancel_work_sync(&acm->work);
1346 cancel_work_sync(&acm->waker);
1347} 1332}
1348 1333
1349static void acm_disconnect(struct usb_interface *intf) 1334static void acm_disconnect(struct usb_interface *intf)
@@ -1435,6 +1420,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
1435static int acm_resume(struct usb_interface *intf) 1420static int acm_resume(struct usb_interface *intf)
1436{ 1421{
1437 struct acm *acm = usb_get_intfdata(intf); 1422 struct acm *acm = usb_get_intfdata(intf);
1423 struct acm_wb *wb;
1438 int rv = 0; 1424 int rv = 0;
1439 int cnt; 1425 int cnt;
1440 1426
@@ -1449,6 +1435,21 @@ static int acm_resume(struct usb_interface *intf)
1449 mutex_lock(&acm->mutex); 1435 mutex_lock(&acm->mutex);
1450 if (acm->port.count) { 1436 if (acm->port.count) {
1451 rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO); 1437 rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
1438
1439 spin_lock_irq(&acm->write_lock);
1440 if (acm->delayed_wb) {
1441 wb = acm->delayed_wb;
1442 acm->delayed_wb = NULL;
1443 spin_unlock_irq(&acm->write_lock);
1444 acm_start_wb(acm, acm->delayed_wb);
1445 } else {
1446 spin_unlock_irq(&acm->write_lock);
1447 }
1448
1449 /*
1450 * delayed error checking because we must
1451 * do the write path at all cost
1452 */
1452 if (rv < 0) 1453 if (rv < 0)
1453 goto err_out; 1454 goto err_out;
1454 1455
@@ -1460,6 +1461,23 @@ err_out:
1460 return rv; 1461 return rv;
1461} 1462}
1462 1463
1464static int acm_reset_resume(struct usb_interface *intf)
1465{
1466 struct acm *acm = usb_get_intfdata(intf);
1467 struct tty_struct *tty;
1468
1469 mutex_lock(&acm->mutex);
1470 if (acm->port.count) {
1471 tty = tty_port_tty_get(&acm->port);
1472 if (tty) {
1473 tty_hangup(tty);
1474 tty_kref_put(tty);
1475 }
1476 }
1477 mutex_unlock(&acm->mutex);
1478 return acm_resume(intf);
1479}
1480
1463#endif /* CONFIG_PM */ 1481#endif /* CONFIG_PM */
1464 1482
1465#define NOKIA_PCSUITE_ACM_INFO(x) \ 1483#define NOKIA_PCSUITE_ACM_INFO(x) \
@@ -1471,7 +1489,7 @@ err_out:
1471 * USB driver structure. 1489 * USB driver structure.
1472 */ 1490 */
1473 1491
1474static struct usb_device_id acm_ids[] = { 1492static const struct usb_device_id acm_ids[] = {
1475 /* quirky and broken devices */ 1493 /* quirky and broken devices */
1476 { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */ 1494 { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
1477 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ 1495 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
@@ -1576,6 +1594,11 @@ static struct usb_device_id acm_ids[] = {
1576 1594
1577 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ 1595 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
1578 1596
1597 /* Support Lego NXT using pbLua firmware */
1598 { USB_DEVICE(0x0694, 0xff00),
1599 .driver_info = NOT_A_MODEM,
1600 },
1601
1579 /* control interfaces with various AT-command sets */ 1602 /* control interfaces with various AT-command sets */
1580 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1603 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1581 USB_CDC_ACM_PROTO_AT_V25TER) }, 1604 USB_CDC_ACM_PROTO_AT_V25TER) },
@@ -1602,6 +1625,7 @@ static struct usb_driver acm_driver = {
1602#ifdef CONFIG_PM 1625#ifdef CONFIG_PM
1603 .suspend = acm_suspend, 1626 .suspend = acm_suspend,
1604 .resume = acm_resume, 1627 .resume = acm_resume,
1628 .reset_resume = acm_reset_resume,
1605#endif 1629#endif
1606 .id_table = acm_ids, 1630 .id_table = acm_ids,
1607#ifdef CONFIG_PM 1631#ifdef CONFIG_PM
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index c4a0ee8ffccf..4a8e87ec6ce9 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -112,7 +112,6 @@ struct acm {
112 struct mutex mutex; 112 struct mutex mutex;
113 struct usb_cdc_line_coding line; /* bits, stop, parity */ 113 struct usb_cdc_line_coding line; /* bits, stop, parity */
114 struct work_struct work; /* work queue entry for line discipline waking up */ 114 struct work_struct work; /* work queue entry for line discipline waking up */
115 struct work_struct waker;
116 wait_queue_head_t drain_wait; /* close processing */ 115 wait_queue_head_t drain_wait; /* close processing */
117 struct tasklet_struct urb_task; /* rx processing */ 116 struct tasklet_struct urb_task; /* rx processing */
118 spinlock_t throttle_lock; /* synchronize throtteling and read callback */ 117 spinlock_t throttle_lock; /* synchronize throtteling and read callback */
@@ -137,3 +136,4 @@ struct acm {
137#define NO_UNION_NORMAL 1 136#define NO_UNION_NORMAL 1
138#define SINGLE_RX_URB 2 137#define SINGLE_RX_URB 2
139#define NO_CAP_LINE 4 138#define NO_CAP_LINE 4
139#define NOT_A_MODEM 8
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 3e564bfe17d1..18aafcb08fc8 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -31,7 +31,7 @@
31#define DRIVER_AUTHOR "Oliver Neukum" 31#define DRIVER_AUTHOR "Oliver Neukum"
32#define DRIVER_DESC "USB Abstract Control Model driver for USB WCM Device Management" 32#define DRIVER_DESC "USB Abstract Control Model driver for USB WCM Device Management"
33 33
34static struct usb_device_id wdm_ids[] = { 34static const struct usb_device_id wdm_ids[] = {
35 { 35 {
36 .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS | 36 .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS |
37 USB_DEVICE_ID_MATCH_INT_SUBCLASS, 37 USB_DEVICE_ID_MATCH_INT_SUBCLASS,
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 9bc112ee7803..93b5f85d7ceb 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -163,7 +163,6 @@ struct usblp {
163 unsigned char used; /* True if open */ 163 unsigned char used; /* True if open */
164 unsigned char present; /* True if not disconnected */ 164 unsigned char present; /* True if not disconnected */
165 unsigned char bidir; /* interface is bidirectional */ 165 unsigned char bidir; /* interface is bidirectional */
166 unsigned char sleeping; /* interface is suspended */
167 unsigned char no_paper; /* Paper Out happened */ 166 unsigned char no_paper; /* Paper Out happened */
168 unsigned char *device_id_string; /* IEEE 1284 DEVICE ID string (ptr) */ 167 unsigned char *device_id_string; /* IEEE 1284 DEVICE ID string (ptr) */
169 /* first 2 bytes are (big-endian) length */ 168 /* first 2 bytes are (big-endian) length */
@@ -191,7 +190,6 @@ static void usblp_dump(struct usblp *usblp) {
191 dbg("quirks=%d", usblp->quirks); 190 dbg("quirks=%d", usblp->quirks);
192 dbg("used=%d", usblp->used); 191 dbg("used=%d", usblp->used);
193 dbg("bidir=%d", usblp->bidir); 192 dbg("bidir=%d", usblp->bidir);
194 dbg("sleeping=%d", usblp->sleeping);
195 dbg("device_id_string=\"%s\"", 193 dbg("device_id_string=\"%s\"",
196 usblp->device_id_string ? 194 usblp->device_id_string ?
197 usblp->device_id_string + 2 : 195 usblp->device_id_string + 2 :
@@ -376,7 +374,7 @@ static int usblp_check_status(struct usblp *usblp, int err)
376 374
377static int handle_bidir (struct usblp *usblp) 375static int handle_bidir (struct usblp *usblp)
378{ 376{
379 if (usblp->bidir && usblp->used && !usblp->sleeping) { 377 if (usblp->bidir && usblp->used) {
380 if (usblp_submit_read(usblp) < 0) 378 if (usblp_submit_read(usblp) < 0)
381 return -EIO; 379 return -EIO;
382 } 380 }
@@ -503,11 +501,6 @@ static long usblp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
503 goto done; 501 goto done;
504 } 502 }
505 503
506 if (usblp->sleeping) {
507 retval = -ENODEV;
508 goto done;
509 }
510
511 dbg("usblp_ioctl: cmd=0x%x (%c nr=%d len=%d dir=%d)", cmd, _IOC_TYPE(cmd), 504 dbg("usblp_ioctl: cmd=0x%x (%c nr=%d len=%d dir=%d)", cmd, _IOC_TYPE(cmd),
512 _IOC_NR(cmd), _IOC_SIZE(cmd), _IOC_DIR(cmd) ); 505 _IOC_NR(cmd), _IOC_SIZE(cmd), _IOC_DIR(cmd) );
513 506
@@ -914,8 +907,6 @@ static int usblp_wtest(struct usblp *usblp, int nonblock)
914 return 0; 907 return 0;
915 } 908 }
916 spin_unlock_irqrestore(&usblp->lock, flags); 909 spin_unlock_irqrestore(&usblp->lock, flags);
917 if (usblp->sleeping)
918 return -ENODEV;
919 if (nonblock) 910 if (nonblock)
920 return -EAGAIN; 911 return -EAGAIN;
921 return 1; 912 return 1;
@@ -968,8 +959,6 @@ static int usblp_rtest(struct usblp *usblp, int nonblock)
968 return 0; 959 return 0;
969 } 960 }
970 spin_unlock_irqrestore(&usblp->lock, flags); 961 spin_unlock_irqrestore(&usblp->lock, flags);
971 if (usblp->sleeping)
972 return -ENODEV;
973 if (nonblock) 962 if (nonblock)
974 return -EAGAIN; 963 return -EAGAIN;
975 return 1; 964 return 1;
@@ -1377,12 +1366,10 @@ static void usblp_disconnect(struct usb_interface *intf)
1377 mutex_unlock (&usblp_mutex); 1366 mutex_unlock (&usblp_mutex);
1378} 1367}
1379 1368
1380static int usblp_suspend (struct usb_interface *intf, pm_message_t message) 1369static int usblp_suspend(struct usb_interface *intf, pm_message_t message)
1381{ 1370{
1382 struct usblp *usblp = usb_get_intfdata (intf); 1371 struct usblp *usblp = usb_get_intfdata (intf);
1383 1372
1384 /* we take no more IO */
1385 usblp->sleeping = 1;
1386 usblp_unlink_urbs(usblp); 1373 usblp_unlink_urbs(usblp);
1387#if 0 /* XXX Do we want this? What if someone is reading, should we fail? */ 1374#if 0 /* XXX Do we want this? What if someone is reading, should we fail? */
1388 /* not strictly necessary, but just in case */ 1375 /* not strictly necessary, but just in case */
@@ -1393,18 +1380,17 @@ static int usblp_suspend (struct usb_interface *intf, pm_message_t message)
1393 return 0; 1380 return 0;
1394} 1381}
1395 1382
1396static int usblp_resume (struct usb_interface *intf) 1383static int usblp_resume(struct usb_interface *intf)
1397{ 1384{
1398 struct usblp *usblp = usb_get_intfdata (intf); 1385 struct usblp *usblp = usb_get_intfdata (intf);
1399 int r; 1386 int r;
1400 1387
1401 usblp->sleeping = 0;
1402 r = handle_bidir (usblp); 1388 r = handle_bidir (usblp);
1403 1389
1404 return r; 1390 return r;
1405} 1391}
1406 1392
1407static struct usb_device_id usblp_ids [] = { 1393static const struct usb_device_id usblp_ids[] = {
1408 { USB_DEVICE_INFO(7, 1, 1) }, 1394 { USB_DEVICE_INFO(7, 1, 1) },
1409 { USB_DEVICE_INFO(7, 1, 2) }, 1395 { USB_DEVICE_INFO(7, 1, 2) },
1410 { USB_DEVICE_INFO(7, 1, 3) }, 1396 { USB_DEVICE_INFO(7, 1, 3) },
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 7c5f4e32c920..8588c0937a89 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -48,7 +48,7 @@
48 */ 48 */
49#define USBTMC_MAX_READS_TO_CLEAR_BULK_IN 100 49#define USBTMC_MAX_READS_TO_CLEAR_BULK_IN 100
50 50
51static struct usb_device_id usbtmc_devices[] = { 51static const struct usb_device_id usbtmc_devices[] = {
52 { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), }, 52 { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), },
53 { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 1), }, 53 { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 1), },
54 { 0, } /* terminating entry */ 54 { 0, } /* terminating entry */
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index ad925946f869..97a819c23ef3 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -91,8 +91,8 @@ config USB_DYNAMIC_MINORS
91 If you are unsure about this, say N here. 91 If you are unsure about this, say N here.
92 92
93config USB_SUSPEND 93config USB_SUSPEND
94 bool "USB selective suspend/resume and wakeup" 94 bool "USB runtime power management (suspend/resume and wakeup)"
95 depends on USB && PM 95 depends on USB && PM_RUNTIME
96 help 96 help
97 If you say Y here, you can use driver calls or the sysfs 97 If you say Y here, you can use driver calls or the sysfs
98 "power/level" file to suspend or resume individual USB 98 "power/level" file to suspend or resume individual USB
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 355dffcc23b0..c83c975152a6 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -118,6 +118,7 @@ static const char *format_endpt =
118 */ 118 */
119 119
120static DECLARE_WAIT_QUEUE_HEAD(deviceconndiscwq); 120static DECLARE_WAIT_QUEUE_HEAD(deviceconndiscwq);
121/* guarded by usbfs_mutex */
121static unsigned int conndiscevcnt; 122static unsigned int conndiscevcnt;
122 123
123/* this struct stores the poll state for <mountpoint>/devices pollers */ 124/* this struct stores the poll state for <mountpoint>/devices pollers */
@@ -156,7 +157,9 @@ static const struct class_info clas_info[] =
156 157
157void usbfs_conn_disc_event(void) 158void usbfs_conn_disc_event(void)
158{ 159{
160 mutex_lock(&usbfs_mutex);
159 conndiscevcnt++; 161 conndiscevcnt++;
162 mutex_unlock(&usbfs_mutex);
160 wake_up(&deviceconndiscwq); 163 wake_up(&deviceconndiscwq);
161} 164}
162 165
@@ -629,42 +632,29 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
629static unsigned int usb_device_poll(struct file *file, 632static unsigned int usb_device_poll(struct file *file,
630 struct poll_table_struct *wait) 633 struct poll_table_struct *wait)
631{ 634{
632 struct usb_device_status *st = file->private_data; 635 struct usb_device_status *st;
633 unsigned int mask = 0; 636 unsigned int mask = 0;
634 637
635 lock_kernel(); 638 mutex_lock(&usbfs_mutex);
639 st = file->private_data;
636 if (!st) { 640 if (!st) {
637 st = kmalloc(sizeof(struct usb_device_status), GFP_KERNEL); 641 st = kmalloc(sizeof(struct usb_device_status), GFP_KERNEL);
638
639 /* we may have dropped BKL -
640 * need to check for having lost the race */
641 if (file->private_data) {
642 kfree(st);
643 st = file->private_data;
644 goto lost_race;
645 }
646 /* we haven't lost - check for allocation failure now */
647 if (!st) { 642 if (!st) {
648 unlock_kernel(); 643 mutex_unlock(&usbfs_mutex);
649 return POLLIN; 644 return POLLIN;
650 } 645 }
651 646
652 /*
653 * need to prevent the module from being unloaded, since
654 * proc_unregister does not call the release method and
655 * we would have a memory leak
656 */
657 st->lastev = conndiscevcnt; 647 st->lastev = conndiscevcnt;
658 file->private_data = st; 648 file->private_data = st;
659 mask = POLLIN; 649 mask = POLLIN;
660 } 650 }
661lost_race: 651
662 if (file->f_mode & FMODE_READ) 652 if (file->f_mode & FMODE_READ)
663 poll_wait(file, &deviceconndiscwq, wait); 653 poll_wait(file, &deviceconndiscwq, wait);
664 if (st->lastev != conndiscevcnt) 654 if (st->lastev != conndiscevcnt)
665 mask |= POLLIN; 655 mask |= POLLIN;
666 st->lastev = conndiscevcnt; 656 st->lastev = conndiscevcnt;
667 unlock_kernel(); 657 mutex_unlock(&usbfs_mutex);
668 return mask; 658 return mask;
669} 659}
670 660
@@ -685,7 +675,7 @@ static loff_t usb_device_lseek(struct file *file, loff_t offset, int orig)
685{ 675{
686 loff_t ret; 676 loff_t ret;
687 677
688 lock_kernel(); 678 mutex_lock(&file->f_dentry->d_inode->i_mutex);
689 679
690 switch (orig) { 680 switch (orig) {
691 case 0: 681 case 0:
@@ -701,7 +691,7 @@ static loff_t usb_device_lseek(struct file *file, loff_t offset, int orig)
701 ret = -EINVAL; 691 ret = -EINVAL;
702 } 692 }
703 693
704 unlock_kernel(); 694 mutex_unlock(&file->f_dentry->d_inode->i_mutex);
705 return ret; 695 return ret;
706} 696}
707 697
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index a678186f218f..e909ff7b9094 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -122,7 +122,7 @@ static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig)
122{ 122{
123 loff_t ret; 123 loff_t ret;
124 124
125 lock_kernel(); 125 mutex_lock(&file->f_dentry->d_inode->i_mutex);
126 126
127 switch (orig) { 127 switch (orig) {
128 case 0: 128 case 0:
@@ -138,7 +138,7 @@ static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig)
138 ret = -EINVAL; 138 ret = -EINVAL;
139 } 139 }
140 140
141 unlock_kernel(); 141 mutex_unlock(&file->f_dentry->d_inode->i_mutex);
142 return ret; 142 return ret;
143} 143}
144 144
@@ -310,7 +310,8 @@ static struct async *async_getpending(struct dev_state *ps,
310 310
311static void snoop_urb(struct usb_device *udev, 311static void snoop_urb(struct usb_device *udev,
312 void __user *userurb, int pipe, unsigned length, 312 void __user *userurb, int pipe, unsigned length,
313 int timeout_or_status, enum snoop_when when) 313 int timeout_or_status, enum snoop_when when,
314 unsigned char *data, unsigned data_len)
314{ 315{
315 static const char *types[] = {"isoc", "int", "ctrl", "bulk"}; 316 static const char *types[] = {"isoc", "int", "ctrl", "bulk"};
316 static const char *dirs[] = {"out", "in"}; 317 static const char *dirs[] = {"out", "in"};
@@ -344,6 +345,11 @@ static void snoop_urb(struct usb_device *udev,
344 "status %d\n", 345 "status %d\n",
345 ep, t, d, length, timeout_or_status); 346 ep, t, d, length, timeout_or_status);
346 } 347 }
348
349 if (data && data_len > 0) {
350 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1,
351 data, data_len, 1);
352 }
347} 353}
348 354
349#define AS_CONTINUATION 1 355#define AS_CONTINUATION 1
@@ -410,7 +416,9 @@ static void async_completed(struct urb *urb)
410 } 416 }
411 snoop(&urb->dev->dev, "urb complete\n"); 417 snoop(&urb->dev->dev, "urb complete\n");
412 snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length, 418 snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length,
413 as->status, COMPLETE); 419 as->status, COMPLETE,
420 ((urb->transfer_flags & URB_DIR_MASK) == USB_DIR_OUT) ?
421 NULL : urb->transfer_buffer, urb->actual_length);
414 if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET && 422 if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
415 as->status != -ENOENT) 423 as->status != -ENOENT)
416 cancel_bulk_urbs(ps, as->bulk_addr); 424 cancel_bulk_urbs(ps, as->bulk_addr);
@@ -653,20 +661,20 @@ static int usbdev_open(struct inode *inode, struct file *file)
653 const struct cred *cred = current_cred(); 661 const struct cred *cred = current_cred();
654 int ret; 662 int ret;
655 663
656 lock_kernel();
657 /* Protect against simultaneous removal or release */
658 mutex_lock(&usbfs_mutex);
659
660 ret = -ENOMEM; 664 ret = -ENOMEM;
661 ps = kmalloc(sizeof(struct dev_state), GFP_KERNEL); 665 ps = kmalloc(sizeof(struct dev_state), GFP_KERNEL);
662 if (!ps) 666 if (!ps)
663 goto out; 667 goto out_free_ps;
664 668
665 ret = -ENODEV; 669 ret = -ENODEV;
666 670
671 /* Protect against simultaneous removal or release */
672 mutex_lock(&usbfs_mutex);
673
667 /* usbdev device-node */ 674 /* usbdev device-node */
668 if (imajor(inode) == USB_DEVICE_MAJOR) 675 if (imajor(inode) == USB_DEVICE_MAJOR)
669 dev = usbdev_lookup_by_devt(inode->i_rdev); 676 dev = usbdev_lookup_by_devt(inode->i_rdev);
677
670#ifdef CONFIG_USB_DEVICEFS 678#ifdef CONFIG_USB_DEVICEFS
671 /* procfs file */ 679 /* procfs file */
672 if (!dev) { 680 if (!dev) {
@@ -678,13 +686,19 @@ static int usbdev_open(struct inode *inode, struct file *file)
678 dev = NULL; 686 dev = NULL;
679 } 687 }
680#endif 688#endif
681 if (!dev || dev->state == USB_STATE_NOTATTACHED) 689 mutex_unlock(&usbfs_mutex);
682 goto out; 690
691 if (!dev)
692 goto out_free_ps;
693
694 usb_lock_device(dev);
695 if (dev->state == USB_STATE_NOTATTACHED)
696 goto out_unlock_device;
697
683 ret = usb_autoresume_device(dev); 698 ret = usb_autoresume_device(dev);
684 if (ret) 699 if (ret)
685 goto out; 700 goto out_unlock_device;
686 701
687 ret = 0;
688 ps->dev = dev; 702 ps->dev = dev;
689 ps->file = file; 703 ps->file = file;
690 spin_lock_init(&ps->lock); 704 spin_lock_init(&ps->lock);
@@ -702,15 +716,16 @@ static int usbdev_open(struct inode *inode, struct file *file)
702 smp_wmb(); 716 smp_wmb();
703 list_add_tail(&ps->list, &dev->filelist); 717 list_add_tail(&ps->list, &dev->filelist);
704 file->private_data = ps; 718 file->private_data = ps;
719 usb_unlock_device(dev);
705 snoop(&dev->dev, "opened by process %d: %s\n", task_pid_nr(current), 720 snoop(&dev->dev, "opened by process %d: %s\n", task_pid_nr(current),
706 current->comm); 721 current->comm);
707 out: 722 return ret;
708 if (ret) { 723
709 kfree(ps); 724 out_unlock_device:
710 usb_put_dev(dev); 725 usb_unlock_device(dev);
711 } 726 usb_put_dev(dev);
712 mutex_unlock(&usbfs_mutex); 727 out_free_ps:
713 unlock_kernel(); 728 kfree(ps);
714 return ret; 729 return ret;
715} 730}
716 731
@@ -724,10 +739,7 @@ static int usbdev_release(struct inode *inode, struct file *file)
724 usb_lock_device(dev); 739 usb_lock_device(dev);
725 usb_hub_release_all_ports(dev, ps); 740 usb_hub_release_all_ports(dev, ps);
726 741
727 /* Protect against simultaneous open */
728 mutex_lock(&usbfs_mutex);
729 list_del_init(&ps->list); 742 list_del_init(&ps->list);
730 mutex_unlock(&usbfs_mutex);
731 743
732 for (ifnum = 0; ps->ifclaimed && ifnum < 8*sizeof(ps->ifclaimed); 744 for (ifnum = 0; ps->ifclaimed && ifnum < 8*sizeof(ps->ifclaimed);
733 ifnum++) { 745 ifnum++) {
@@ -770,6 +782,13 @@ static int proc_control(struct dev_state *ps, void __user *arg)
770 if (!tbuf) 782 if (!tbuf)
771 return -ENOMEM; 783 return -ENOMEM;
772 tmo = ctrl.timeout; 784 tmo = ctrl.timeout;
785 snoop(&dev->dev, "control urb: bRequestType=%02x "
786 "bRequest=%02x wValue=%04x "
787 "wIndex=%04x wLength=%04x\n",
788 ctrl.bRequestType, ctrl.bRequest,
789 __le16_to_cpup(&ctrl.wValue),
790 __le16_to_cpup(&ctrl.wIndex),
791 __le16_to_cpup(&ctrl.wLength));
773 if (ctrl.bRequestType & 0x80) { 792 if (ctrl.bRequestType & 0x80) {
774 if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data, 793 if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data,
775 ctrl.wLength)) { 794 ctrl.wLength)) {
@@ -777,15 +796,15 @@ static int proc_control(struct dev_state *ps, void __user *arg)
777 return -EINVAL; 796 return -EINVAL;
778 } 797 }
779 pipe = usb_rcvctrlpipe(dev, 0); 798 pipe = usb_rcvctrlpipe(dev, 0);
780 snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT); 799 snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT, NULL, 0);
781 800
782 usb_unlock_device(dev); 801 usb_unlock_device(dev);
783 i = usb_control_msg(dev, pipe, ctrl.bRequest, 802 i = usb_control_msg(dev, pipe, ctrl.bRequest,
784 ctrl.bRequestType, ctrl.wValue, ctrl.wIndex, 803 ctrl.bRequestType, ctrl.wValue, ctrl.wIndex,
785 tbuf, ctrl.wLength, tmo); 804 tbuf, ctrl.wLength, tmo);
786 usb_lock_device(dev); 805 usb_lock_device(dev);
787 snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE); 806 snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE,
788 807 tbuf, i);
789 if ((i > 0) && ctrl.wLength) { 808 if ((i > 0) && ctrl.wLength) {
790 if (copy_to_user(ctrl.data, tbuf, i)) { 809 if (copy_to_user(ctrl.data, tbuf, i)) {
791 free_page((unsigned long)tbuf); 810 free_page((unsigned long)tbuf);
@@ -800,14 +819,15 @@ static int proc_control(struct dev_state *ps, void __user *arg)
800 } 819 }
801 } 820 }
802 pipe = usb_sndctrlpipe(dev, 0); 821 pipe = usb_sndctrlpipe(dev, 0);
803 snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT); 822 snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT,
823 tbuf, ctrl.wLength);
804 824
805 usb_unlock_device(dev); 825 usb_unlock_device(dev);
806 i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl.bRequest, 826 i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl.bRequest,
807 ctrl.bRequestType, ctrl.wValue, ctrl.wIndex, 827 ctrl.bRequestType, ctrl.wValue, ctrl.wIndex,
808 tbuf, ctrl.wLength, tmo); 828 tbuf, ctrl.wLength, tmo);
809 usb_lock_device(dev); 829 usb_lock_device(dev);
810 snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE); 830 snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE, NULL, 0);
811 } 831 }
812 free_page((unsigned long)tbuf); 832 free_page((unsigned long)tbuf);
813 if (i < 0 && i != -EPIPE) { 833 if (i < 0 && i != -EPIPE) {
@@ -853,12 +873,12 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
853 kfree(tbuf); 873 kfree(tbuf);
854 return -EINVAL; 874 return -EINVAL;
855 } 875 }
856 snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT); 876 snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, NULL, 0);
857 877
858 usb_unlock_device(dev); 878 usb_unlock_device(dev);
859 i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo); 879 i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
860 usb_lock_device(dev); 880 usb_lock_device(dev);
861 snoop_urb(dev, NULL, pipe, len2, i, COMPLETE); 881 snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, tbuf, len2);
862 882
863 if (!i && len2) { 883 if (!i && len2) {
864 if (copy_to_user(bulk.data, tbuf, len2)) { 884 if (copy_to_user(bulk.data, tbuf, len2)) {
@@ -873,12 +893,12 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
873 return -EFAULT; 893 return -EFAULT;
874 } 894 }
875 } 895 }
876 snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT); 896 snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, tbuf, len1);
877 897
878 usb_unlock_device(dev); 898 usb_unlock_device(dev);
879 i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo); 899 i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
880 usb_lock_device(dev); 900 usb_lock_device(dev);
881 snoop_urb(dev, NULL, pipe, len2, i, COMPLETE); 901 snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, NULL, 0);
882 } 902 }
883 kfree(tbuf); 903 kfree(tbuf);
884 if (i < 0) 904 if (i < 0)
@@ -1097,6 +1117,13 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1097 is_in = 0; 1117 is_in = 0;
1098 uurb->endpoint &= ~USB_DIR_IN; 1118 uurb->endpoint &= ~USB_DIR_IN;
1099 } 1119 }
1120 snoop(&ps->dev->dev, "control urb: bRequestType=%02x "
1121 "bRequest=%02x wValue=%04x "
1122 "wIndex=%04x wLength=%04x\n",
1123 dr->bRequestType, dr->bRequest,
1124 __le16_to_cpup(&dr->wValue),
1125 __le16_to_cpup(&dr->wIndex),
1126 __le16_to_cpup(&dr->wLength));
1100 break; 1127 break;
1101 1128
1102 case USBDEVFS_URB_TYPE_BULK: 1129 case USBDEVFS_URB_TYPE_BULK:
@@ -1104,13 +1131,25 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1104 case USB_ENDPOINT_XFER_CONTROL: 1131 case USB_ENDPOINT_XFER_CONTROL:
1105 case USB_ENDPOINT_XFER_ISOC: 1132 case USB_ENDPOINT_XFER_ISOC:
1106 return -EINVAL; 1133 return -EINVAL;
1107 /* allow single-shot interrupt transfers, at bogus rates */ 1134 case USB_ENDPOINT_XFER_INT:
1135 /* allow single-shot interrupt transfers */
1136 uurb->type = USBDEVFS_URB_TYPE_INTERRUPT;
1137 goto interrupt_urb;
1108 } 1138 }
1109 uurb->number_of_packets = 0; 1139 uurb->number_of_packets = 0;
1110 if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE) 1140 if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
1111 return -EINVAL; 1141 return -EINVAL;
1112 break; 1142 break;
1113 1143
1144 case USBDEVFS_URB_TYPE_INTERRUPT:
1145 if (!usb_endpoint_xfer_int(&ep->desc))
1146 return -EINVAL;
1147 interrupt_urb:
1148 uurb->number_of_packets = 0;
1149 if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
1150 return -EINVAL;
1151 break;
1152
1114 case USBDEVFS_URB_TYPE_ISO: 1153 case USBDEVFS_URB_TYPE_ISO:
1115 /* arbitrary limit */ 1154 /* arbitrary limit */
1116 if (uurb->number_of_packets < 1 || 1155 if (uurb->number_of_packets < 1 ||
@@ -1143,14 +1182,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1143 uurb->buffer_length = totlen; 1182 uurb->buffer_length = totlen;
1144 break; 1183 break;
1145 1184
1146 case USBDEVFS_URB_TYPE_INTERRUPT:
1147 uurb->number_of_packets = 0;
1148 if (!usb_endpoint_xfer_int(&ep->desc))
1149 return -EINVAL;
1150 if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
1151 return -EINVAL;
1152 break;
1153
1154 default: 1185 default:
1155 return -EINVAL; 1186 return -EINVAL;
1156 } 1187 }
@@ -1236,7 +1267,9 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1236 } 1267 }
1237 } 1268 }
1238 snoop_urb(ps->dev, as->userurb, as->urb->pipe, 1269 snoop_urb(ps->dev, as->userurb, as->urb->pipe,
1239 as->urb->transfer_buffer_length, 0, SUBMIT); 1270 as->urb->transfer_buffer_length, 0, SUBMIT,
1271 is_in ? NULL : as->urb->transfer_buffer,
1272 uurb->buffer_length);
1240 async_newpending(as); 1273 async_newpending(as);
1241 1274
1242 if (usb_endpoint_xfer_bulk(&ep->desc)) { 1275 if (usb_endpoint_xfer_bulk(&ep->desc)) {
@@ -1274,7 +1307,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1274 dev_printk(KERN_DEBUG, &ps->dev->dev, 1307 dev_printk(KERN_DEBUG, &ps->dev->dev,
1275 "usbfs: usb_submit_urb returned %d\n", ret); 1308 "usbfs: usb_submit_urb returned %d\n", ret);
1276 snoop_urb(ps->dev, as->userurb, as->urb->pipe, 1309 snoop_urb(ps->dev, as->userurb, as->urb->pipe,
1277 0, ret, COMPLETE); 1310 0, ret, COMPLETE, NULL, 0);
1278 async_removepending(as); 1311 async_removepending(as);
1279 free_async(as); 1312 free_async(as);
1280 return ret; 1313 return ret;
@@ -1628,7 +1661,10 @@ static int proc_ioctl(struct dev_state *ps, struct usbdevfs_ioctl *ctl)
1628 if (driver == NULL || driver->ioctl == NULL) { 1661 if (driver == NULL || driver->ioctl == NULL) {
1629 retval = -ENOTTY; 1662 retval = -ENOTTY;
1630 } else { 1663 } else {
1664 /* keep API that guarantees BKL */
1665 lock_kernel();
1631 retval = driver->ioctl(intf, ctl->ioctl_code, buf); 1666 retval = driver->ioctl(intf, ctl->ioctl_code, buf);
1667 unlock_kernel();
1632 if (retval == -ENOIOCTLCMD) 1668 if (retval == -ENOIOCTLCMD)
1633 retval = -ENOTTY; 1669 retval = -ENOTTY;
1634 } 1670 }
@@ -1711,6 +1747,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
1711 1747
1712 if (!(file->f_mode & FMODE_WRITE)) 1748 if (!(file->f_mode & FMODE_WRITE))
1713 return -EPERM; 1749 return -EPERM;
1750
1714 usb_lock_device(dev); 1751 usb_lock_device(dev);
1715 if (!connected(ps)) { 1752 if (!connected(ps)) {
1716 usb_unlock_device(dev); 1753 usb_unlock_device(dev);
@@ -1877,9 +1914,7 @@ static long usbdev_ioctl(struct file *file, unsigned int cmd,
1877{ 1914{
1878 int ret; 1915 int ret;
1879 1916
1880 lock_kernel();
1881 ret = usbdev_do_ioctl(file, cmd, (void __user *)arg); 1917 ret = usbdev_do_ioctl(file, cmd, (void __user *)arg);
1882 unlock_kernel();
1883 1918
1884 return ret; 1919 return ret;
1885} 1920}
@@ -1890,9 +1925,7 @@ static long usbdev_compat_ioctl(struct file *file, unsigned int cmd,
1890{ 1925{
1891 int ret; 1926 int ret;
1892 1927
1893 lock_kernel();
1894 ret = usbdev_do_ioctl(file, cmd, compat_ptr(arg)); 1928 ret = usbdev_do_ioctl(file, cmd, compat_ptr(arg));
1895 unlock_kernel();
1896 1929
1897 return ret; 1930 return ret;
1898} 1931}
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index f2f055eb6831..a7037bf81688 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -25,7 +25,7 @@
25#include <linux/device.h> 25#include <linux/device.h>
26#include <linux/usb.h> 26#include <linux/usb.h>
27#include <linux/usb/quirks.h> 27#include <linux/usb/quirks.h>
28#include <linux/workqueue.h> 28#include <linux/pm_runtime.h>
29#include "hcd.h" 29#include "hcd.h"
30#include "usb.h" 30#include "usb.h"
31 31
@@ -221,7 +221,7 @@ static int usb_probe_device(struct device *dev)
221{ 221{
222 struct usb_device_driver *udriver = to_usb_device_driver(dev->driver); 222 struct usb_device_driver *udriver = to_usb_device_driver(dev->driver);
223 struct usb_device *udev = to_usb_device(dev); 223 struct usb_device *udev = to_usb_device(dev);
224 int error = -ENODEV; 224 int error = 0;
225 225
226 dev_dbg(dev, "%s\n", __func__); 226 dev_dbg(dev, "%s\n", __func__);
227 227
@@ -230,18 +230,23 @@ static int usb_probe_device(struct device *dev)
230 /* The device should always appear to be in use 230 /* The device should always appear to be in use
231 * unless the driver suports autosuspend. 231 * unless the driver suports autosuspend.
232 */ 232 */
233 udev->pm_usage_cnt = !(udriver->supports_autosuspend); 233 if (!udriver->supports_autosuspend)
234 error = usb_autoresume_device(udev);
234 235
235 error = udriver->probe(udev); 236 if (!error)
237 error = udriver->probe(udev);
236 return error; 238 return error;
237} 239}
238 240
239/* called from driver core with dev locked */ 241/* called from driver core with dev locked */
240static int usb_unbind_device(struct device *dev) 242static int usb_unbind_device(struct device *dev)
241{ 243{
244 struct usb_device *udev = to_usb_device(dev);
242 struct usb_device_driver *udriver = to_usb_device_driver(dev->driver); 245 struct usb_device_driver *udriver = to_usb_device_driver(dev->driver);
243 246
244 udriver->disconnect(to_usb_device(dev)); 247 udriver->disconnect(udev);
248 if (!udriver->supports_autosuspend)
249 usb_autosuspend_device(udev);
245 return 0; 250 return 0;
246} 251}
247 252
@@ -274,60 +279,62 @@ static int usb_probe_interface(struct device *dev)
274 intf->needs_binding = 0; 279 intf->needs_binding = 0;
275 280
276 if (usb_device_is_owned(udev)) 281 if (usb_device_is_owned(udev))
277 return -ENODEV; 282 return error;
278 283
279 if (udev->authorized == 0) { 284 if (udev->authorized == 0) {
280 dev_err(&intf->dev, "Device is not authorized for usage\n"); 285 dev_err(&intf->dev, "Device is not authorized for usage\n");
281 return -ENODEV; 286 return error;
282 } 287 }
283 288
284 id = usb_match_id(intf, driver->id_table); 289 id = usb_match_id(intf, driver->id_table);
285 if (!id) 290 if (!id)
286 id = usb_match_dynamic_id(intf, driver); 291 id = usb_match_dynamic_id(intf, driver);
287 if (id) { 292 if (!id)
288 dev_dbg(dev, "%s - got id\n", __func__); 293 return error;
289
290 error = usb_autoresume_device(udev);
291 if (error)
292 return error;
293 294
294 /* Interface "power state" doesn't correspond to any hardware 295 dev_dbg(dev, "%s - got id\n", __func__);
295 * state whatsoever. We use it to record when it's bound to
296 * a driver that may start I/0: it's not frozen/quiesced.
297 */
298 mark_active(intf);
299 intf->condition = USB_INTERFACE_BINDING;
300 296
301 /* The interface should always appear to be in use 297 error = usb_autoresume_device(udev);
302 * unless the driver suports autosuspend. 298 if (error)
303 */ 299 return error;
304 atomic_set(&intf->pm_usage_cnt, !driver->supports_autosuspend);
305
306 /* Carry out a deferred switch to altsetting 0 */
307 if (intf->needs_altsetting0) {
308 error = usb_set_interface(udev, intf->altsetting[0].
309 desc.bInterfaceNumber, 0);
310 if (error < 0)
311 goto err;
312 300
313 intf->needs_altsetting0 = 0; 301 intf->condition = USB_INTERFACE_BINDING;
314 }
315 302
316 error = driver->probe(intf, id); 303 /* Bound interfaces are initially active. They are
317 if (error) 304 * runtime-PM-enabled only if the driver has autosuspend support.
305 * They are sensitive to their children's power states.
306 */
307 pm_runtime_set_active(dev);
308 pm_suspend_ignore_children(dev, false);
309 if (driver->supports_autosuspend)
310 pm_runtime_enable(dev);
311
312 /* Carry out a deferred switch to altsetting 0 */
313 if (intf->needs_altsetting0) {
314 error = usb_set_interface(udev, intf->altsetting[0].
315 desc.bInterfaceNumber, 0);
316 if (error < 0)
318 goto err; 317 goto err;
319 318 intf->needs_altsetting0 = 0;
320 intf->condition = USB_INTERFACE_BOUND;
321 usb_autosuspend_device(udev);
322 } 319 }
323 320
321 error = driver->probe(intf, id);
322 if (error)
323 goto err;
324
325 intf->condition = USB_INTERFACE_BOUND;
326 usb_autosuspend_device(udev);
324 return error; 327 return error;
325 328
326err: 329 err:
327 mark_quiesced(intf);
328 intf->needs_remote_wakeup = 0; 330 intf->needs_remote_wakeup = 0;
329 intf->condition = USB_INTERFACE_UNBOUND; 331 intf->condition = USB_INTERFACE_UNBOUND;
330 usb_cancel_queued_reset(intf); 332 usb_cancel_queued_reset(intf);
333
334 /* Unbound interfaces are always runtime-PM-disabled and -suspended */
335 pm_runtime_disable(dev);
336 pm_runtime_set_suspended(dev);
337
331 usb_autosuspend_device(udev); 338 usb_autosuspend_device(udev);
332 return error; 339 return error;
333} 340}
@@ -377,9 +384,17 @@ static int usb_unbind_interface(struct device *dev)
377 usb_set_intfdata(intf, NULL); 384 usb_set_intfdata(intf, NULL);
378 385
379 intf->condition = USB_INTERFACE_UNBOUND; 386 intf->condition = USB_INTERFACE_UNBOUND;
380 mark_quiesced(intf);
381 intf->needs_remote_wakeup = 0; 387 intf->needs_remote_wakeup = 0;
382 388
389 /* Unbound interfaces are always runtime-PM-disabled and -suspended */
390 pm_runtime_disable(dev);
391 pm_runtime_set_suspended(dev);
392
393 /* Undo any residual pm_autopm_get_interface_* calls */
394 for (r = atomic_read(&intf->pm_usage_cnt); r > 0; --r)
395 usb_autopm_put_interface_no_suspend(intf);
396 atomic_set(&intf->pm_usage_cnt, 0);
397
383 if (!error) 398 if (!error)
384 usb_autosuspend_device(udev); 399 usb_autosuspend_device(udev);
385 400
@@ -410,7 +425,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
410 struct usb_interface *iface, void *priv) 425 struct usb_interface *iface, void *priv)
411{ 426{
412 struct device *dev = &iface->dev; 427 struct device *dev = &iface->dev;
413 struct usb_device *udev = interface_to_usbdev(iface);
414 int retval = 0; 428 int retval = 0;
415 429
416 if (dev->driver) 430 if (dev->driver)
@@ -420,11 +434,16 @@ int usb_driver_claim_interface(struct usb_driver *driver,
420 usb_set_intfdata(iface, priv); 434 usb_set_intfdata(iface, priv);
421 iface->needs_binding = 0; 435 iface->needs_binding = 0;
422 436
423 usb_pm_lock(udev);
424 iface->condition = USB_INTERFACE_BOUND; 437 iface->condition = USB_INTERFACE_BOUND;
425 mark_active(iface); 438
426 atomic_set(&iface->pm_usage_cnt, !driver->supports_autosuspend); 439 /* Bound interfaces are initially active. They are
427 usb_pm_unlock(udev); 440 * runtime-PM-enabled only if the driver has autosuspend support.
441 * They are sensitive to their children's power states.
442 */
443 pm_runtime_set_active(dev);
444 pm_suspend_ignore_children(dev, false);
445 if (driver->supports_autosuspend)
446 pm_runtime_enable(dev);
428 447
429 /* if interface was already added, bind now; else let 448 /* if interface was already added, bind now; else let
430 * the future device_add() bind it, bypassing probe() 449 * the future device_add() bind it, bypassing probe()
@@ -691,9 +710,6 @@ static int usb_uevent(struct device *dev, struct kobj_uevent_env *env)
691{ 710{
692 struct usb_device *usb_dev; 711 struct usb_device *usb_dev;
693 712
694 /* driver is often null here; dev_dbg() would oops */
695 pr_debug("usb %s: uevent\n", dev_name(dev));
696
697 if (is_usb_device(dev)) { 713 if (is_usb_device(dev)) {
698 usb_dev = to_usb_device(dev); 714 usb_dev = to_usb_device(dev);
699 } else if (is_usb_interface(dev)) { 715 } else if (is_usb_interface(dev)) {
@@ -705,6 +721,7 @@ static int usb_uevent(struct device *dev, struct kobj_uevent_env *env)
705 } 721 }
706 722
707 if (usb_dev->devnum < 0) { 723 if (usb_dev->devnum < 0) {
724 /* driver is often null here; dev_dbg() would oops */
708 pr_debug("usb %s: already deleted?\n", dev_name(dev)); 725 pr_debug("usb %s: already deleted?\n", dev_name(dev));
709 return -ENODEV; 726 return -ENODEV;
710 } 727 }
@@ -983,7 +1000,6 @@ static void do_unbind_rebind(struct usb_device *udev, int action)
983 } 1000 }
984} 1001}
985 1002
986/* Caller has locked udev's pm_mutex */
987static int usb_suspend_device(struct usb_device *udev, pm_message_t msg) 1003static int usb_suspend_device(struct usb_device *udev, pm_message_t msg)
988{ 1004{
989 struct usb_device_driver *udriver; 1005 struct usb_device_driver *udriver;
@@ -1007,7 +1023,6 @@ static int usb_suspend_device(struct usb_device *udev, pm_message_t msg)
1007 return status; 1023 return status;
1008} 1024}
1009 1025
1010/* Caller has locked udev's pm_mutex */
1011static int usb_resume_device(struct usb_device *udev, pm_message_t msg) 1026static int usb_resume_device(struct usb_device *udev, pm_message_t msg)
1012{ 1027{
1013 struct usb_device_driver *udriver; 1028 struct usb_device_driver *udriver;
@@ -1041,27 +1056,20 @@ static int usb_resume_device(struct usb_device *udev, pm_message_t msg)
1041 return status; 1056 return status;
1042} 1057}
1043 1058
1044/* Caller has locked intf's usb_device's pm mutex */
1045static int usb_suspend_interface(struct usb_device *udev, 1059static int usb_suspend_interface(struct usb_device *udev,
1046 struct usb_interface *intf, pm_message_t msg) 1060 struct usb_interface *intf, pm_message_t msg)
1047{ 1061{
1048 struct usb_driver *driver; 1062 struct usb_driver *driver;
1049 int status = 0; 1063 int status = 0;
1050 1064
1051 /* with no hardware, USB interfaces only use FREEZE and ON states */ 1065 if (udev->state == USB_STATE_NOTATTACHED ||
1052 if (udev->state == USB_STATE_NOTATTACHED || !is_active(intf)) 1066 intf->condition == USB_INTERFACE_UNBOUND)
1053 goto done;
1054
1055 /* This can happen; see usb_driver_release_interface() */
1056 if (intf->condition == USB_INTERFACE_UNBOUND)
1057 goto done; 1067 goto done;
1058 driver = to_usb_driver(intf->dev.driver); 1068 driver = to_usb_driver(intf->dev.driver);
1059 1069
1060 if (driver->suspend) { 1070 if (driver->suspend) {
1061 status = driver->suspend(intf, msg); 1071 status = driver->suspend(intf, msg);
1062 if (status == 0) 1072 if (status && !(msg.event & PM_EVENT_AUTO))
1063 mark_quiesced(intf);
1064 else if (!(msg.event & PM_EVENT_AUTO))
1065 dev_err(&intf->dev, "%s error %d\n", 1073 dev_err(&intf->dev, "%s error %d\n",
1066 "suspend", status); 1074 "suspend", status);
1067 } else { 1075 } else {
@@ -1069,7 +1077,6 @@ static int usb_suspend_interface(struct usb_device *udev,
1069 intf->needs_binding = 1; 1077 intf->needs_binding = 1;
1070 dev_warn(&intf->dev, "no %s for driver %s?\n", 1078 dev_warn(&intf->dev, "no %s for driver %s?\n",
1071 "suspend", driver->name); 1079 "suspend", driver->name);
1072 mark_quiesced(intf);
1073 } 1080 }
1074 1081
1075 done: 1082 done:
@@ -1077,14 +1084,13 @@ static int usb_suspend_interface(struct usb_device *udev,
1077 return status; 1084 return status;
1078} 1085}
1079 1086
1080/* Caller has locked intf's usb_device's pm_mutex */
1081static int usb_resume_interface(struct usb_device *udev, 1087static int usb_resume_interface(struct usb_device *udev,
1082 struct usb_interface *intf, pm_message_t msg, int reset_resume) 1088 struct usb_interface *intf, pm_message_t msg, int reset_resume)
1083{ 1089{
1084 struct usb_driver *driver; 1090 struct usb_driver *driver;
1085 int status = 0; 1091 int status = 0;
1086 1092
1087 if (udev->state == USB_STATE_NOTATTACHED || is_active(intf)) 1093 if (udev->state == USB_STATE_NOTATTACHED)
1088 goto done; 1094 goto done;
1089 1095
1090 /* Don't let autoresume interfere with unbinding */ 1096 /* Don't let autoresume interfere with unbinding */
@@ -1135,90 +1141,11 @@ static int usb_resume_interface(struct usb_device *udev,
1135 1141
1136done: 1142done:
1137 dev_vdbg(&intf->dev, "%s: status %d\n", __func__, status); 1143 dev_vdbg(&intf->dev, "%s: status %d\n", __func__, status);
1138 if (status == 0 && intf->condition == USB_INTERFACE_BOUND)
1139 mark_active(intf);
1140 1144
1141 /* Later we will unbind the driver and/or reprobe, if necessary */ 1145 /* Later we will unbind the driver and/or reprobe, if necessary */
1142 return status; 1146 return status;
1143} 1147}
1144 1148
1145#ifdef CONFIG_USB_SUSPEND
1146
1147/* Internal routine to check whether we may autosuspend a device. */
1148static int autosuspend_check(struct usb_device *udev, int reschedule)
1149{
1150 int i;
1151 struct usb_interface *intf;
1152 unsigned long suspend_time, j;
1153
1154 /* For autosuspend, fail fast if anything is in use or autosuspend
1155 * is disabled. Also fail if any interfaces require remote wakeup
1156 * but it isn't available.
1157 */
1158 if (udev->pm_usage_cnt > 0)
1159 return -EBUSY;
1160 if (udev->autosuspend_delay < 0 || udev->autosuspend_disabled)
1161 return -EPERM;
1162
1163 suspend_time = udev->last_busy + udev->autosuspend_delay;
1164 if (udev->actconfig) {
1165 for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
1166 intf = udev->actconfig->interface[i];
1167 if (!is_active(intf))
1168 continue;
1169 if (atomic_read(&intf->pm_usage_cnt) > 0)
1170 return -EBUSY;
1171 if (intf->needs_remote_wakeup &&
1172 !udev->do_remote_wakeup) {
1173 dev_dbg(&udev->dev, "remote wakeup needed "
1174 "for autosuspend\n");
1175 return -EOPNOTSUPP;
1176 }
1177
1178 /* Don't allow autosuspend if the device will need
1179 * a reset-resume and any of its interface drivers
1180 * doesn't include support.
1181 */
1182 if (udev->quirks & USB_QUIRK_RESET_RESUME) {
1183 struct usb_driver *driver;
1184
1185 driver = to_usb_driver(intf->dev.driver);
1186 if (!driver->reset_resume ||
1187 intf->needs_remote_wakeup)
1188 return -EOPNOTSUPP;
1189 }
1190 }
1191 }
1192
1193 /* If everything is okay but the device hasn't been idle for long
1194 * enough, queue a delayed autosuspend request. If the device
1195 * _has_ been idle for long enough and the reschedule flag is set,
1196 * likewise queue a delayed (1 second) autosuspend request.
1197 */
1198 j = jiffies;
1199 if (time_before(j, suspend_time))
1200 reschedule = 1;
1201 else
1202 suspend_time = j + HZ;
1203 if (reschedule) {
1204 if (!timer_pending(&udev->autosuspend.timer)) {
1205 queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
1206 round_jiffies_up_relative(suspend_time - j));
1207 }
1208 return -EAGAIN;
1209 }
1210 return 0;
1211}
1212
1213#else
1214
1215static inline int autosuspend_check(struct usb_device *udev, int reschedule)
1216{
1217 return 0;
1218}
1219
1220#endif /* CONFIG_USB_SUSPEND */
1221
1222/** 1149/**
1223 * usb_suspend_both - suspend a USB device and its interfaces 1150 * usb_suspend_both - suspend a USB device and its interfaces
1224 * @udev: the usb_device to suspend 1151 * @udev: the usb_device to suspend
@@ -1230,27 +1157,12 @@ static inline int autosuspend_check(struct usb_device *udev, int reschedule)
1230 * all the interfaces which were suspended are resumed so that they remain 1157 * all the interfaces which were suspended are resumed so that they remain
1231 * in the same state as the device. 1158 * in the same state as the device.
1232 * 1159 *
1233 * If an autosuspend is in progress the routine checks first to make sure 1160 * Autosuspend requests originating from a child device or an interface
1234 * that neither the device itself or any of its active interfaces is in use 1161 * driver may be made without the protection of @udev's device lock, but
1235 * (pm_usage_cnt is greater than 0). If they are, the autosuspend fails. 1162 * all other suspend calls will hold the lock. Usbcore will insure that
1236 * 1163 * method calls do not arrive during bind, unbind, or reset operations.
1237 * If the suspend succeeds, the routine recursively queues an autosuspend 1164 * However drivers must be prepared to handle suspend calls arriving at
1238 * request for @udev's parent device, thereby propagating the change up 1165 * unpredictable times.
1239 * the device tree. If all of the parent's children are now suspended,
1240 * the parent will autosuspend in turn.
1241 *
1242 * The suspend method calls are subject to mutual exclusion under control
1243 * of @udev's pm_mutex. Many of these calls are also under the protection
1244 * of @udev's device lock (including all requests originating outside the
1245 * USB subsystem), but autosuspend requests generated by a child device or
1246 * interface driver may not be. Usbcore will insure that the method calls
1247 * do not arrive during bind, unbind, or reset operations. However, drivers
1248 * must be prepared to handle suspend calls arriving at unpredictable times.
1249 * The only way to block such calls is to do an autoresume (preventing
1250 * autosuspends) while holding @udev's device lock (preventing outside
1251 * suspends).
1252 *
1253 * The caller must hold @udev->pm_mutex.
1254 * 1166 *
1255 * This routine can run only in process context. 1167 * This routine can run only in process context.
1256 */ 1168 */
@@ -1259,20 +1171,11 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1259 int status = 0; 1171 int status = 0;
1260 int i = 0; 1172 int i = 0;
1261 struct usb_interface *intf; 1173 struct usb_interface *intf;
1262 struct usb_device *parent = udev->parent;
1263 1174
1264 if (udev->state == USB_STATE_NOTATTACHED || 1175 if (udev->state == USB_STATE_NOTATTACHED ||
1265 udev->state == USB_STATE_SUSPENDED) 1176 udev->state == USB_STATE_SUSPENDED)
1266 goto done; 1177 goto done;
1267 1178
1268 udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
1269
1270 if (msg.event & PM_EVENT_AUTO) {
1271 status = autosuspend_check(udev, 0);
1272 if (status < 0)
1273 goto done;
1274 }
1275
1276 /* Suspend all the interfaces and then udev itself */ 1179 /* Suspend all the interfaces and then udev itself */
1277 if (udev->actconfig) { 1180 if (udev->actconfig) {
1278 for (; i < udev->actconfig->desc.bNumInterfaces; i++) { 1181 for (; i < udev->actconfig->desc.bNumInterfaces; i++) {
@@ -1287,35 +1190,21 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1287 1190
1288 /* If the suspend failed, resume interfaces that did get suspended */ 1191 /* If the suspend failed, resume interfaces that did get suspended */
1289 if (status != 0) { 1192 if (status != 0) {
1290 pm_message_t msg2; 1193 msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
1291
1292 msg2.event = msg.event ^ (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
1293 while (--i >= 0) { 1194 while (--i >= 0) {
1294 intf = udev->actconfig->interface[i]; 1195 intf = udev->actconfig->interface[i];
1295 usb_resume_interface(udev, intf, msg2, 0); 1196 usb_resume_interface(udev, intf, msg, 0);
1296 } 1197 }
1297 1198
1298 /* Try another autosuspend when the interfaces aren't busy */ 1199 /* If the suspend succeeded then prevent any more URB submissions
1299 if (msg.event & PM_EVENT_AUTO) 1200 * and flush any outstanding URBs.
1300 autosuspend_check(udev, status == -EBUSY);
1301
1302 /* If the suspend succeeded then prevent any more URB submissions,
1303 * flush any outstanding URBs, and propagate the suspend up the tree.
1304 */ 1201 */
1305 } else { 1202 } else {
1306 cancel_delayed_work(&udev->autosuspend);
1307 udev->can_submit = 0; 1203 udev->can_submit = 0;
1308 for (i = 0; i < 16; ++i) { 1204 for (i = 0; i < 16; ++i) {
1309 usb_hcd_flush_endpoint(udev, udev->ep_out[i]); 1205 usb_hcd_flush_endpoint(udev, udev->ep_out[i]);
1310 usb_hcd_flush_endpoint(udev, udev->ep_in[i]); 1206 usb_hcd_flush_endpoint(udev, udev->ep_in[i]);
1311 } 1207 }
1312
1313 /* If this is just a FREEZE or a PRETHAW, udev might
1314 * not really be suspended. Only true suspends get
1315 * propagated up the device tree.
1316 */
1317 if (parent && udev->state == USB_STATE_SUSPENDED)
1318 usb_autosuspend_device(parent);
1319 } 1208 }
1320 1209
1321 done: 1210 done:
@@ -1332,23 +1221,12 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1332 * the resume method for @udev and then calls the resume methods for all 1221 * the resume method for @udev and then calls the resume methods for all
1333 * the interface drivers in @udev. 1222 * the interface drivers in @udev.
1334 * 1223 *
1335 * Before starting the resume, the routine calls itself recursively for 1224 * Autoresume requests originating from a child device or an interface
1336 * the parent device of @udev, thereby propagating the change up the device 1225 * driver may be made without the protection of @udev's device lock, but
1337 * tree and assuring that @udev will be able to resume. If the parent is 1226 * all other resume calls will hold the lock. Usbcore will insure that
1338 * unable to resume successfully, the routine fails. 1227 * method calls do not arrive during bind, unbind, or reset operations.
1339 * 1228 * However drivers must be prepared to handle resume calls arriving at
1340 * The resume method calls are subject to mutual exclusion under control 1229 * unpredictable times.
1341 * of @udev's pm_mutex. Many of these calls are also under the protection
1342 * of @udev's device lock (including all requests originating outside the
1343 * USB subsystem), but autoresume requests generated by a child device or
1344 * interface driver may not be. Usbcore will insure that the method calls
1345 * do not arrive during bind, unbind, or reset operations. However, drivers
1346 * must be prepared to handle resume calls arriving at unpredictable times.
1347 * The only way to block such calls is to do an autoresume (preventing
1348 * other autoresumes) while holding @udev's device lock (preventing outside
1349 * resumes).
1350 *
1351 * The caller must hold @udev->pm_mutex.
1352 * 1230 *
1353 * This routine can run only in process context. 1231 * This routine can run only in process context.
1354 */ 1232 */
@@ -1357,48 +1235,18 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
1357 int status = 0; 1235 int status = 0;
1358 int i; 1236 int i;
1359 struct usb_interface *intf; 1237 struct usb_interface *intf;
1360 struct usb_device *parent = udev->parent;
1361 1238
1362 cancel_delayed_work(&udev->autosuspend);
1363 if (udev->state == USB_STATE_NOTATTACHED) { 1239 if (udev->state == USB_STATE_NOTATTACHED) {
1364 status = -ENODEV; 1240 status = -ENODEV;
1365 goto done; 1241 goto done;
1366 } 1242 }
1367 udev->can_submit = 1; 1243 udev->can_submit = 1;
1368 1244
1369 /* Propagate the resume up the tree, if necessary */ 1245 /* Resume the device */
1370 if (udev->state == USB_STATE_SUSPENDED) { 1246 if (udev->state == USB_STATE_SUSPENDED || udev->reset_resume)
1371 if (parent) {
1372 status = usb_autoresume_device(parent);
1373 if (status == 0) {
1374 status = usb_resume_device(udev, msg);
1375 if (status || udev->state ==
1376 USB_STATE_NOTATTACHED) {
1377 usb_autosuspend_device(parent);
1378
1379 /* It's possible usb_resume_device()
1380 * failed after the port was
1381 * unsuspended, causing udev to be
1382 * logically disconnected. We don't
1383 * want usb_disconnect() to autosuspend
1384 * the parent again, so tell it that
1385 * udev disconnected while still
1386 * suspended. */
1387 if (udev->state ==
1388 USB_STATE_NOTATTACHED)
1389 udev->discon_suspended = 1;
1390 }
1391 }
1392 } else {
1393
1394 /* We can't progagate beyond the USB subsystem,
1395 * so if a root hub's controller is suspended
1396 * then we're stuck. */
1397 status = usb_resume_device(udev, msg);
1398 }
1399 } else if (udev->reset_resume)
1400 status = usb_resume_device(udev, msg); 1247 status = usb_resume_device(udev, msg);
1401 1248
1249 /* Resume the interfaces */
1402 if (status == 0 && udev->actconfig) { 1250 if (status == 0 && udev->actconfig) {
1403 for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { 1251 for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
1404 intf = udev->actconfig->interface[i]; 1252 intf = udev->actconfig->interface[i];
@@ -1414,55 +1262,94 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
1414 return status; 1262 return status;
1415} 1263}
1416 1264
1417#ifdef CONFIG_USB_SUSPEND 1265/* The device lock is held by the PM core */
1266int usb_suspend(struct device *dev, pm_message_t msg)
1267{
1268 struct usb_device *udev = to_usb_device(dev);
1418 1269
1419/* Internal routine to adjust a device's usage counter and change 1270 do_unbind_rebind(udev, DO_UNBIND);
1420 * its autosuspend state. 1271 udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
1421 */ 1272 return usb_suspend_both(udev, msg);
1422static int usb_autopm_do_device(struct usb_device *udev, int inc_usage_cnt) 1273}
1274
1275/* The device lock is held by the PM core */
1276int usb_resume(struct device *dev, pm_message_t msg)
1423{ 1277{
1424 int status = 0; 1278 struct usb_device *udev = to_usb_device(dev);
1279 int status;
1425 1280
1426 usb_pm_lock(udev); 1281 /* For PM complete calls, all we do is rebind interfaces */
1427 udev->pm_usage_cnt += inc_usage_cnt; 1282 if (msg.event == PM_EVENT_ON) {
1428 WARN_ON(udev->pm_usage_cnt < 0); 1283 if (udev->state != USB_STATE_NOTATTACHED)
1429 if (inc_usage_cnt) 1284 do_unbind_rebind(udev, DO_REBIND);
1430 udev->last_busy = jiffies; 1285 status = 0;
1431 if (inc_usage_cnt >= 0 && udev->pm_usage_cnt > 0) { 1286
1432 if (udev->state == USB_STATE_SUSPENDED) 1287 /* For all other calls, take the device back to full power and
1433 status = usb_resume_both(udev, PMSG_AUTO_RESUME); 1288 * tell the PM core in case it was autosuspended previously.
1434 if (status != 0) 1289 */
1435 udev->pm_usage_cnt -= inc_usage_cnt; 1290 } else {
1436 else if (inc_usage_cnt) 1291 status = usb_resume_both(udev, msg);
1292 if (status == 0) {
1293 pm_runtime_disable(dev);
1294 pm_runtime_set_active(dev);
1295 pm_runtime_enable(dev);
1437 udev->last_busy = jiffies; 1296 udev->last_busy = jiffies;
1438 } else if (inc_usage_cnt <= 0 && udev->pm_usage_cnt <= 0) { 1297 }
1439 status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
1440 } 1298 }
1441 usb_pm_unlock(udev); 1299
1300 /* Avoid PM error messages for devices disconnected while suspended
1301 * as we'll display regular disconnect messages just a bit later.
1302 */
1303 if (status == -ENODEV)
1304 status = 0;
1442 return status; 1305 return status;
1443} 1306}
1444 1307
1445/* usb_autosuspend_work - callback routine to autosuspend a USB device */ 1308#endif /* CONFIG_PM */
1446void usb_autosuspend_work(struct work_struct *work) 1309
1447{ 1310#ifdef CONFIG_USB_SUSPEND
1448 struct usb_device *udev =
1449 container_of(work, struct usb_device, autosuspend.work);
1450 1311
1451 usb_autopm_do_device(udev, 0); 1312/**
1313 * usb_enable_autosuspend - allow a USB device to be autosuspended
1314 * @udev: the USB device which may be autosuspended
1315 *
1316 * This routine allows @udev to be autosuspended. An autosuspend won't
1317 * take place until the autosuspend_delay has elapsed and all the other
1318 * necessary conditions are satisfied.
1319 *
1320 * The caller must hold @udev's device lock.
1321 */
1322int usb_enable_autosuspend(struct usb_device *udev)
1323{
1324 if (udev->autosuspend_disabled) {
1325 udev->autosuspend_disabled = 0;
1326 usb_autosuspend_device(udev);
1327 }
1328 return 0;
1452} 1329}
1330EXPORT_SYMBOL_GPL(usb_enable_autosuspend);
1453 1331
1454/* usb_autoresume_work - callback routine to autoresume a USB device */ 1332/**
1455void usb_autoresume_work(struct work_struct *work) 1333 * usb_disable_autosuspend - prevent a USB device from being autosuspended
1334 * @udev: the USB device which may not be autosuspended
1335 *
1336 * This routine prevents @udev from being autosuspended and wakes it up
1337 * if it is already autosuspended.
1338 *
1339 * The caller must hold @udev's device lock.
1340 */
1341int usb_disable_autosuspend(struct usb_device *udev)
1456{ 1342{
1457 struct usb_device *udev = 1343 int rc = 0;
1458 container_of(work, struct usb_device, autoresume);
1459 1344
1460 /* Wake it up, let the drivers do their thing, and then put it 1345 if (!udev->autosuspend_disabled) {
1461 * back to sleep. 1346 rc = usb_autoresume_device(udev);
1462 */ 1347 if (rc == 0)
1463 if (usb_autopm_do_device(udev, 1) == 0) 1348 udev->autosuspend_disabled = 1;
1464 usb_autopm_do_device(udev, -1); 1349 }
1350 return rc;
1465} 1351}
1352EXPORT_SYMBOL_GPL(usb_disable_autosuspend);
1466 1353
1467/** 1354/**
1468 * usb_autosuspend_device - delayed autosuspend of a USB device and its interfaces 1355 * usb_autosuspend_device - delayed autosuspend of a USB device and its interfaces
@@ -1472,15 +1359,11 @@ void usb_autoresume_work(struct work_struct *work)
1472 * @udev and wants to allow it to autosuspend. Examples would be when 1359 * @udev and wants to allow it to autosuspend. Examples would be when
1473 * @udev's device file in usbfs is closed or after a configuration change. 1360 * @udev's device file in usbfs is closed or after a configuration change.
1474 * 1361 *
1475 * @udev's usage counter is decremented. If it or any of the usage counters 1362 * @udev's usage counter is decremented; if it drops to 0 and all the
1476 * for an active interface is greater than 0, no autosuspend request will be 1363 * interfaces are inactive then a delayed autosuspend will be attempted.
1477 * queued. (If an interface driver does not support autosuspend then its 1364 * The attempt may fail (see autosuspend_check()).
1478 * usage counter is permanently positive.) Furthermore, if an interface
1479 * driver requires remote-wakeup capability during autosuspend but remote
1480 * wakeup is disabled, the autosuspend will fail.
1481 * 1365 *
1482 * Often the caller will hold @udev's device lock, but this is not 1366 * The caller must hold @udev's device lock.
1483 * necessary.
1484 * 1367 *
1485 * This routine can run only in process context. 1368 * This routine can run only in process context.
1486 */ 1369 */
@@ -1488,9 +1371,11 @@ void usb_autosuspend_device(struct usb_device *udev)
1488{ 1371{
1489 int status; 1372 int status;
1490 1373
1491 status = usb_autopm_do_device(udev, -1); 1374 udev->last_busy = jiffies;
1492 dev_vdbg(&udev->dev, "%s: cnt %d\n", 1375 status = pm_runtime_put_sync(&udev->dev);
1493 __func__, udev->pm_usage_cnt); 1376 dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
1377 __func__, atomic_read(&udev->dev.power.usage_count),
1378 status);
1494} 1379}
1495 1380
1496/** 1381/**
@@ -1500,17 +1385,22 @@ void usb_autosuspend_device(struct usb_device *udev)
1500 * This routine should be called when a core subsystem thinks @udev may 1385 * This routine should be called when a core subsystem thinks @udev may
1501 * be ready to autosuspend. 1386 * be ready to autosuspend.
1502 * 1387 *
1503 * @udev's usage counter left unchanged. If it or any of the usage counters 1388 * @udev's usage counter left unchanged. If it is 0 and all the interfaces
1504 * for an active interface is greater than 0, or autosuspend is not allowed 1389 * are inactive then an autosuspend will be attempted. The attempt may
1505 * for any other reason, no autosuspend request will be queued. 1390 * fail or be delayed.
1391 *
1392 * The caller must hold @udev's device lock.
1506 * 1393 *
1507 * This routine can run only in process context. 1394 * This routine can run only in process context.
1508 */ 1395 */
1509void usb_try_autosuspend_device(struct usb_device *udev) 1396void usb_try_autosuspend_device(struct usb_device *udev)
1510{ 1397{
1511 usb_autopm_do_device(udev, 0); 1398 int status;
1512 dev_vdbg(&udev->dev, "%s: cnt %d\n", 1399
1513 __func__, udev->pm_usage_cnt); 1400 status = pm_runtime_idle(&udev->dev);
1401 dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
1402 __func__, atomic_read(&udev->dev.power.usage_count),
1403 status);
1514} 1404}
1515 1405
1516/** 1406/**
@@ -1519,16 +1409,15 @@ void usb_try_autosuspend_device(struct usb_device *udev)
1519 * 1409 *
1520 * This routine should be called when a core subsystem wants to use @udev 1410 * This routine should be called when a core subsystem wants to use @udev
1521 * and needs to guarantee that it is not suspended. No autosuspend will 1411 * and needs to guarantee that it is not suspended. No autosuspend will
1522 * occur until usb_autosuspend_device is called. (Note that this will not 1412 * occur until usb_autosuspend_device() is called. (Note that this will
1523 * prevent suspend events originating in the PM core.) Examples would be 1413 * not prevent suspend events originating in the PM core.) Examples would
1524 * when @udev's device file in usbfs is opened or when a remote-wakeup 1414 * be when @udev's device file in usbfs is opened or when a remote-wakeup
1525 * request is received. 1415 * request is received.
1526 * 1416 *
1527 * @udev's usage counter is incremented to prevent subsequent autosuspends. 1417 * @udev's usage counter is incremented to prevent subsequent autosuspends.
1528 * However if the autoresume fails then the usage counter is re-decremented. 1418 * However if the autoresume fails then the usage counter is re-decremented.
1529 * 1419 *
1530 * Often the caller will hold @udev's device lock, but this is not 1420 * The caller must hold @udev's device lock.
1531 * necessary (and attempting it might cause deadlock).
1532 * 1421 *
1533 * This routine can run only in process context. 1422 * This routine can run only in process context.
1534 */ 1423 */
@@ -1536,42 +1425,14 @@ int usb_autoresume_device(struct usb_device *udev)
1536{ 1425{
1537 int status; 1426 int status;
1538 1427
1539 status = usb_autopm_do_device(udev, 1); 1428 status = pm_runtime_get_sync(&udev->dev);
1540 dev_vdbg(&udev->dev, "%s: status %d cnt %d\n", 1429 if (status < 0)
1541 __func__, status, udev->pm_usage_cnt); 1430 pm_runtime_put_sync(&udev->dev);
1542 return status; 1431 dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
1543} 1432 __func__, atomic_read(&udev->dev.power.usage_count),
1544 1433 status);
1545/* Internal routine to adjust an interface's usage counter and change 1434 if (status > 0)
1546 * its device's autosuspend state. 1435 status = 0;
1547 */
1548static int usb_autopm_do_interface(struct usb_interface *intf,
1549 int inc_usage_cnt)
1550{
1551 struct usb_device *udev = interface_to_usbdev(intf);
1552 int status = 0;
1553
1554 usb_pm_lock(udev);
1555 if (intf->condition == USB_INTERFACE_UNBOUND)
1556 status = -ENODEV;
1557 else {
1558 atomic_add(inc_usage_cnt, &intf->pm_usage_cnt);
1559 udev->last_busy = jiffies;
1560 if (inc_usage_cnt >= 0 &&
1561 atomic_read(&intf->pm_usage_cnt) > 0) {
1562 if (udev->state == USB_STATE_SUSPENDED)
1563 status = usb_resume_both(udev,
1564 PMSG_AUTO_RESUME);
1565 if (status != 0)
1566 atomic_sub(inc_usage_cnt, &intf->pm_usage_cnt);
1567 else
1568 udev->last_busy = jiffies;
1569 } else if (inc_usage_cnt <= 0 &&
1570 atomic_read(&intf->pm_usage_cnt) <= 0) {
1571 status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
1572 }
1573 }
1574 usb_pm_unlock(udev);
1575 return status; 1436 return status;
1576} 1437}
1577 1438
@@ -1585,34 +1446,25 @@ static int usb_autopm_do_interface(struct usb_interface *intf,
1585 * closed. 1446 * closed.
1586 * 1447 *
1587 * The routine decrements @intf's usage counter. When the counter reaches 1448 * The routine decrements @intf's usage counter. When the counter reaches
1588 * 0, a delayed autosuspend request for @intf's device is queued. When 1449 * 0, a delayed autosuspend request for @intf's device is attempted. The
1589 * the delay expires, if @intf->pm_usage_cnt is still <= 0 along with all 1450 * attempt may fail (see autosuspend_check()).
1590 * the other usage counters for the sibling interfaces and @intf's
1591 * usb_device, the device and all its interfaces will be autosuspended.
1592 *
1593 * Note that @intf->pm_usage_cnt is owned by the interface driver. The
1594 * core will not change its value other than the increment and decrement
1595 * in usb_autopm_get_interface and usb_autopm_put_interface. The driver
1596 * may use this simple counter-oriented discipline or may set the value
1597 * any way it likes.
1598 * 1451 *
1599 * If the driver has set @intf->needs_remote_wakeup then autosuspend will 1452 * If the driver has set @intf->needs_remote_wakeup then autosuspend will
1600 * take place only if the device's remote-wakeup facility is enabled. 1453 * take place only if the device's remote-wakeup facility is enabled.
1601 * 1454 *
1602 * Suspend method calls queued by this routine can arrive at any time
1603 * while @intf is resumed and its usage counter is equal to 0. They are
1604 * not protected by the usb_device's lock but only by its pm_mutex.
1605 * Drivers must provide their own synchronization.
1606 *
1607 * This routine can run only in process context. 1455 * This routine can run only in process context.
1608 */ 1456 */
1609void usb_autopm_put_interface(struct usb_interface *intf) 1457void usb_autopm_put_interface(struct usb_interface *intf)
1610{ 1458{
1611 int status; 1459 struct usb_device *udev = interface_to_usbdev(intf);
1460 int status;
1612 1461
1613 status = usb_autopm_do_interface(intf, -1); 1462 udev->last_busy = jiffies;
1614 dev_vdbg(&intf->dev, "%s: status %d cnt %d\n", 1463 atomic_dec(&intf->pm_usage_cnt);
1615 __func__, status, atomic_read(&intf->pm_usage_cnt)); 1464 status = pm_runtime_put_sync(&intf->dev);
1465 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
1466 __func__, atomic_read(&intf->dev.power.usage_count),
1467 status);
1616} 1468}
1617EXPORT_SYMBOL_GPL(usb_autopm_put_interface); 1469EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
1618 1470
@@ -1620,11 +1472,11 @@ EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
1620 * usb_autopm_put_interface_async - decrement a USB interface's PM-usage counter 1472 * usb_autopm_put_interface_async - decrement a USB interface's PM-usage counter
1621 * @intf: the usb_interface whose counter should be decremented 1473 * @intf: the usb_interface whose counter should be decremented
1622 * 1474 *
1623 * This routine does essentially the same thing as 1475 * This routine does much the same thing as usb_autopm_put_interface():
1624 * usb_autopm_put_interface(): it decrements @intf's usage counter and 1476 * It decrements @intf's usage counter and schedules a delayed
1625 * queues a delayed autosuspend request if the counter is <= 0. The 1477 * autosuspend request if the counter is <= 0. The difference is that it
1626 * difference is that it does not acquire the device's pm_mutex; 1478 * does not perform any synchronization; callers should hold a private
1627 * callers must handle all synchronization issues themselves. 1479 * lock and handle all synchronization issues themselves.
1628 * 1480 *
1629 * Typically a driver would call this routine during an URB's completion 1481 * Typically a driver would call this routine during an URB's completion
1630 * handler, if no more URBs were pending. 1482 * handler, if no more URBs were pending.
@@ -1634,28 +1486,58 @@ EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
1634void usb_autopm_put_interface_async(struct usb_interface *intf) 1486void usb_autopm_put_interface_async(struct usb_interface *intf)
1635{ 1487{
1636 struct usb_device *udev = interface_to_usbdev(intf); 1488 struct usb_device *udev = interface_to_usbdev(intf);
1489 unsigned long last_busy;
1637 int status = 0; 1490 int status = 0;
1638 1491
1639 if (intf->condition == USB_INTERFACE_UNBOUND) { 1492 last_busy = udev->last_busy;
1640 status = -ENODEV; 1493 udev->last_busy = jiffies;
1641 } else { 1494 atomic_dec(&intf->pm_usage_cnt);
1642 udev->last_busy = jiffies; 1495 pm_runtime_put_noidle(&intf->dev);
1643 atomic_dec(&intf->pm_usage_cnt); 1496
1644 if (udev->autosuspend_disabled || udev->autosuspend_delay < 0) 1497 if (!udev->autosuspend_disabled) {
1645 status = -EPERM; 1498 /* Optimization: Don't schedule a delayed autosuspend if
1646 else if (atomic_read(&intf->pm_usage_cnt) <= 0 && 1499 * the timer is already running and the expiration time
1647 !timer_pending(&udev->autosuspend.timer)) { 1500 * wouldn't change.
1648 queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend, 1501 *
1502 * We have to use the interface's timer. Attempts to
1503 * schedule a suspend for the device would fail because
1504 * the interface is still active.
1505 */
1506 if (intf->dev.power.timer_expires == 0 ||
1507 round_jiffies_up(last_busy) !=
1508 round_jiffies_up(jiffies)) {
1509 status = pm_schedule_suspend(&intf->dev,
1510 jiffies_to_msecs(
1649 round_jiffies_up_relative( 1511 round_jiffies_up_relative(
1650 udev->autosuspend_delay)); 1512 udev->autosuspend_delay)));
1651 } 1513 }
1652 } 1514 }
1653 dev_vdbg(&intf->dev, "%s: status %d cnt %d\n", 1515 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
1654 __func__, status, atomic_read(&intf->pm_usage_cnt)); 1516 __func__, atomic_read(&intf->dev.power.usage_count),
1517 status);
1655} 1518}
1656EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async); 1519EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async);
1657 1520
1658/** 1521/**
1522 * usb_autopm_put_interface_no_suspend - decrement a USB interface's PM-usage counter
1523 * @intf: the usb_interface whose counter should be decremented
1524 *
1525 * This routine decrements @intf's usage counter but does not carry out an
1526 * autosuspend.
1527 *
1528 * This routine can run in atomic context.
1529 */
1530void usb_autopm_put_interface_no_suspend(struct usb_interface *intf)
1531{
1532 struct usb_device *udev = interface_to_usbdev(intf);
1533
1534 udev->last_busy = jiffies;
1535 atomic_dec(&intf->pm_usage_cnt);
1536 pm_runtime_put_noidle(&intf->dev);
1537}
1538EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend);
1539
1540/**
1659 * usb_autopm_get_interface - increment a USB interface's PM-usage counter 1541 * usb_autopm_get_interface - increment a USB interface's PM-usage counter
1660 * @intf: the usb_interface whose counter should be incremented 1542 * @intf: the usb_interface whose counter should be incremented
1661 * 1543 *
@@ -1667,25 +1549,8 @@ EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async);
1667 * or @intf is unbound. A typical example would be a character-device 1549 * or @intf is unbound. A typical example would be a character-device
1668 * driver when its device file is opened. 1550 * driver when its device file is opened.
1669 * 1551 *
1670 * 1552 * @intf's usage counter is incremented to prevent subsequent autosuspends.
1671 * The routine increments @intf's usage counter. (However if the 1553 * However if the autoresume fails then the counter is re-decremented.
1672 * autoresume fails then the counter is re-decremented.) So long as the
1673 * counter is greater than 0, autosuspend will not be allowed for @intf
1674 * or its usb_device. When the driver is finished using @intf it should
1675 * call usb_autopm_put_interface() to decrement the usage counter and
1676 * queue a delayed autosuspend request (if the counter is <= 0).
1677 *
1678 *
1679 * Note that @intf->pm_usage_cnt is owned by the interface driver. The
1680 * core will not change its value other than the increment and decrement
1681 * in usb_autopm_get_interface and usb_autopm_put_interface. The driver
1682 * may use this simple counter-oriented discipline or may set the value
1683 * any way it likes.
1684 *
1685 * Resume method calls generated by this routine can arrive at any time
1686 * while @intf is suspended. They are not protected by the usb_device's
1687 * lock but only by its pm_mutex. Drivers must provide their own
1688 * synchronization.
1689 * 1554 *
1690 * This routine can run only in process context. 1555 * This routine can run only in process context.
1691 */ 1556 */
@@ -1693,9 +1558,16 @@ int usb_autopm_get_interface(struct usb_interface *intf)
1693{ 1558{
1694 int status; 1559 int status;
1695 1560
1696 status = usb_autopm_do_interface(intf, 1); 1561 status = pm_runtime_get_sync(&intf->dev);
1697 dev_vdbg(&intf->dev, "%s: status %d cnt %d\n", 1562 if (status < 0)
1698 __func__, status, atomic_read(&intf->pm_usage_cnt)); 1563 pm_runtime_put_sync(&intf->dev);
1564 else
1565 atomic_inc(&intf->pm_usage_cnt);
1566 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
1567 __func__, atomic_read(&intf->dev.power.usage_count),
1568 status);
1569 if (status > 0)
1570 status = 0;
1699 return status; 1571 return status;
1700} 1572}
1701EXPORT_SYMBOL_GPL(usb_autopm_get_interface); 1573EXPORT_SYMBOL_GPL(usb_autopm_get_interface);
@@ -1705,149 +1577,207 @@ EXPORT_SYMBOL_GPL(usb_autopm_get_interface);
1705 * @intf: the usb_interface whose counter should be incremented 1577 * @intf: the usb_interface whose counter should be incremented
1706 * 1578 *
1707 * This routine does much the same thing as 1579 * This routine does much the same thing as
1708 * usb_autopm_get_interface(): it increments @intf's usage counter and 1580 * usb_autopm_get_interface(): It increments @intf's usage counter and
1709 * queues an autoresume request if the result is > 0. The differences 1581 * queues an autoresume request if the device is suspended. The
1710 * are that it does not acquire the device's pm_mutex (callers must 1582 * differences are that it does not perform any synchronization (callers
1711 * handle all synchronization issues themselves), and it does not 1583 * should hold a private lock and handle all synchronization issues
1712 * autoresume the device directly (it only queues a request). After a 1584 * themselves), and it does not autoresume the device directly (it only
1713 * successful call, the device will generally not yet be resumed. 1585 * queues a request). After a successful call, the device may not yet be
1586 * resumed.
1714 * 1587 *
1715 * This routine can run in atomic context. 1588 * This routine can run in atomic context.
1716 */ 1589 */
1717int usb_autopm_get_interface_async(struct usb_interface *intf) 1590int usb_autopm_get_interface_async(struct usb_interface *intf)
1718{ 1591{
1719 struct usb_device *udev = interface_to_usbdev(intf); 1592 int status = 0;
1720 int status = 0; 1593 enum rpm_status s;
1721 1594
1722 if (intf->condition == USB_INTERFACE_UNBOUND) 1595 /* Don't request a resume unless the interface is already suspending
1723 status = -ENODEV; 1596 * or suspended. Doing so would force a running suspend timer to be
1724 else { 1597 * cancelled.
1598 */
1599 pm_runtime_get_noresume(&intf->dev);
1600 s = ACCESS_ONCE(intf->dev.power.runtime_status);
1601 if (s == RPM_SUSPENDING || s == RPM_SUSPENDED)
1602 status = pm_request_resume(&intf->dev);
1603
1604 if (status < 0 && status != -EINPROGRESS)
1605 pm_runtime_put_noidle(&intf->dev);
1606 else
1725 atomic_inc(&intf->pm_usage_cnt); 1607 atomic_inc(&intf->pm_usage_cnt);
1726 if (atomic_read(&intf->pm_usage_cnt) > 0 && 1608 dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
1727 udev->state == USB_STATE_SUSPENDED) 1609 __func__, atomic_read(&intf->dev.power.usage_count),
1728 queue_work(ksuspend_usb_wq, &udev->autoresume); 1610 status);
1729 } 1611 if (status > 0)
1730 dev_vdbg(&intf->dev, "%s: status %d cnt %d\n", 1612 status = 0;
1731 __func__, status, atomic_read(&intf->pm_usage_cnt));
1732 return status; 1613 return status;
1733} 1614}
1734EXPORT_SYMBOL_GPL(usb_autopm_get_interface_async); 1615EXPORT_SYMBOL_GPL(usb_autopm_get_interface_async);
1735 1616
1736#else
1737
1738void usb_autosuspend_work(struct work_struct *work)
1739{}
1740
1741void usb_autoresume_work(struct work_struct *work)
1742{}
1743
1744#endif /* CONFIG_USB_SUSPEND */
1745
1746/** 1617/**
1747 * usb_external_suspend_device - external suspend of a USB device and its interfaces 1618 * usb_autopm_get_interface_no_resume - increment a USB interface's PM-usage counter
1748 * @udev: the usb_device to suspend 1619 * @intf: the usb_interface whose counter should be incremented
1749 * @msg: Power Management message describing this state transition
1750 * 1620 *
1751 * This routine handles external suspend requests: ones not generated 1621 * This routine increments @intf's usage counter but does not carry out an
1752 * internally by a USB driver (autosuspend) but rather coming from the user 1622 * autoresume.
1753 * (via sysfs) or the PM core (system sleep). The suspend will be carried
1754 * out regardless of @udev's usage counter or those of its interfaces,
1755 * and regardless of whether or not remote wakeup is enabled. Of course,
1756 * interface drivers still have the option of failing the suspend (if
1757 * there are unsuspended children, for example).
1758 * 1623 *
1759 * The caller must hold @udev's device lock. 1624 * This routine can run in atomic context.
1760 */ 1625 */
1761int usb_external_suspend_device(struct usb_device *udev, pm_message_t msg) 1626void usb_autopm_get_interface_no_resume(struct usb_interface *intf)
1762{ 1627{
1763 int status; 1628 struct usb_device *udev = interface_to_usbdev(intf);
1764 1629
1765 do_unbind_rebind(udev, DO_UNBIND); 1630 udev->last_busy = jiffies;
1766 usb_pm_lock(udev); 1631 atomic_inc(&intf->pm_usage_cnt);
1767 status = usb_suspend_both(udev, msg); 1632 pm_runtime_get_noresume(&intf->dev);
1768 usb_pm_unlock(udev);
1769 return status;
1770} 1633}
1634EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume);
1771 1635
1772/** 1636/* Internal routine to check whether we may autosuspend a device. */
1773 * usb_external_resume_device - external resume of a USB device and its interfaces 1637static int autosuspend_check(struct usb_device *udev)
1774 * @udev: the usb_device to resume
1775 * @msg: Power Management message describing this state transition
1776 *
1777 * This routine handles external resume requests: ones not generated
1778 * internally by a USB driver (autoresume) but rather coming from the user
1779 * (via sysfs), the PM core (system resume), or the device itself (remote
1780 * wakeup). @udev's usage counter is unaffected.
1781 *
1782 * The caller must hold @udev's device lock.
1783 */
1784int usb_external_resume_device(struct usb_device *udev, pm_message_t msg)
1785{ 1638{
1786 int status; 1639 int i;
1640 struct usb_interface *intf;
1641 unsigned long suspend_time, j;
1787 1642
1788 usb_pm_lock(udev); 1643 /* Fail if autosuspend is disabled, or any interfaces are in use, or
1789 status = usb_resume_both(udev, msg); 1644 * any interface drivers require remote wakeup but it isn't available.
1790 udev->last_busy = jiffies; 1645 */
1791 usb_pm_unlock(udev); 1646 udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
1792 if (status == 0) 1647 if (udev->actconfig) {
1793 do_unbind_rebind(udev, DO_REBIND); 1648 for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
1649 intf = udev->actconfig->interface[i];
1794 1650
1795 /* Now that the device is awake, we can start trying to autosuspend 1651 /* We don't need to check interfaces that are
1796 * it again. */ 1652 * disabled for runtime PM. Either they are unbound
1797 if (status == 0) 1653 * or else their drivers don't support autosuspend
1798 usb_try_autosuspend_device(udev); 1654 * and so they are permanently active.
1799 return status; 1655 */
1656 if (intf->dev.power.disable_depth)
1657 continue;
1658 if (atomic_read(&intf->dev.power.usage_count) > 0)
1659 return -EBUSY;
1660 if (intf->needs_remote_wakeup &&
1661 !udev->do_remote_wakeup) {
1662 dev_dbg(&udev->dev, "remote wakeup needed "
1663 "for autosuspend\n");
1664 return -EOPNOTSUPP;
1665 }
1666
1667 /* Don't allow autosuspend if the device will need
1668 * a reset-resume and any of its interface drivers
1669 * doesn't include support or needs remote wakeup.
1670 */
1671 if (udev->quirks & USB_QUIRK_RESET_RESUME) {
1672 struct usb_driver *driver;
1673
1674 driver = to_usb_driver(intf->dev.driver);
1675 if (!driver->reset_resume ||
1676 intf->needs_remote_wakeup)
1677 return -EOPNOTSUPP;
1678 }
1679 }
1680 }
1681
1682 /* If everything is okay but the device hasn't been idle for long
1683 * enough, queue a delayed autosuspend request.
1684 */
1685 j = ACCESS_ONCE(jiffies);
1686 suspend_time = udev->last_busy + udev->autosuspend_delay;
1687 if (time_before(j, suspend_time)) {
1688 pm_schedule_suspend(&udev->dev, jiffies_to_msecs(
1689 round_jiffies_up_relative(suspend_time - j)));
1690 return -EAGAIN;
1691 }
1692 return 0;
1800} 1693}
1801 1694
1802int usb_suspend(struct device *dev, pm_message_t msg) 1695static int usb_runtime_suspend(struct device *dev)
1803{ 1696{
1804 struct usb_device *udev; 1697 int status = 0;
1805
1806 udev = to_usb_device(dev);
1807 1698
1808 /* If udev is already suspended, we can skip this suspend and 1699 /* A USB device can be suspended if it passes the various autosuspend
1809 * we should also skip the upcoming system resume. High-speed 1700 * checks. Runtime suspend for a USB device means suspending all the
1810 * root hubs are an exception; they need to resume whenever the 1701 * interfaces and then the device itself.
1811 * system wakes up in order for USB-PERSIST port handover to work
1812 * properly.
1813 */ 1702 */
1814 if (udev->state == USB_STATE_SUSPENDED) { 1703 if (is_usb_device(dev)) {
1815 if (udev->parent || udev->speed != USB_SPEED_HIGH) 1704 struct usb_device *udev = to_usb_device(dev);
1816 udev->skip_sys_resume = 1; 1705
1817 return 0; 1706 if (autosuspend_check(udev) != 0)
1707 return -EAGAIN;
1708
1709 status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
1710
1711 /* If an interface fails the suspend, adjust the last_busy
1712 * time so that we don't get another suspend attempt right
1713 * away.
1714 */
1715 if (status) {
1716 udev->last_busy = jiffies +
1717 (udev->autosuspend_delay == 0 ?
1718 HZ/2 : 0);
1719 }
1720
1721 /* Prevent the parent from suspending immediately after */
1722 else if (udev->parent) {
1723 udev->parent->last_busy = jiffies;
1724 }
1818 } 1725 }
1819 1726
1820 udev->skip_sys_resume = 0; 1727 /* Runtime suspend for a USB interface doesn't mean anything. */
1821 return usb_external_suspend_device(udev, msg); 1728 return status;
1822} 1729}
1823 1730
1824int usb_resume(struct device *dev, pm_message_t msg) 1731static int usb_runtime_resume(struct device *dev)
1825{ 1732{
1826 struct usb_device *udev; 1733 /* Runtime resume for a USB device means resuming both the device
1827 int status; 1734 * and all its interfaces.
1735 */
1736 if (is_usb_device(dev)) {
1737 struct usb_device *udev = to_usb_device(dev);
1738 int status;
1828 1739
1829 udev = to_usb_device(dev); 1740 status = usb_resume_both(udev, PMSG_AUTO_RESUME);
1741 udev->last_busy = jiffies;
1742 return status;
1743 }
1830 1744
1831 /* If udev->skip_sys_resume is set then udev was already suspended 1745 /* Runtime resume for a USB interface doesn't mean anything. */
1832 * when the system sleep started, so we don't want to resume it 1746 return 0;
1833 * during this system wakeup. 1747}
1834 */
1835 if (udev->skip_sys_resume)
1836 return 0;
1837 status = usb_external_resume_device(udev, msg);
1838 1748
1839 /* Avoid PM error messages for devices disconnected while suspended 1749static int usb_runtime_idle(struct device *dev)
1840 * as we'll display regular disconnect messages just a bit later. 1750{
1751 /* An idle USB device can be suspended if it passes the various
1752 * autosuspend checks. An idle interface can be suspended at
1753 * any time.
1841 */ 1754 */
1842 if (status == -ENODEV) 1755 if (is_usb_device(dev)) {
1843 return 0; 1756 struct usb_device *udev = to_usb_device(dev);
1844 return status; 1757
1758 if (autosuspend_check(udev) != 0)
1759 return 0;
1760 }
1761
1762 pm_runtime_suspend(dev);
1763 return 0;
1845} 1764}
1846 1765
1847#endif /* CONFIG_PM */ 1766static struct dev_pm_ops usb_bus_pm_ops = {
1767 .runtime_suspend = usb_runtime_suspend,
1768 .runtime_resume = usb_runtime_resume,
1769 .runtime_idle = usb_runtime_idle,
1770};
1771
1772#else
1773
1774#define usb_bus_pm_ops (*(struct dev_pm_ops *) NULL)
1775
1776#endif /* CONFIG_USB_SUSPEND */
1848 1777
1849struct bus_type usb_bus_type = { 1778struct bus_type usb_bus_type = {
1850 .name = "usb", 1779 .name = "usb",
1851 .match = usb_device_match, 1780 .match = usb_device_match,
1852 .uevent = usb_uevent, 1781 .uevent = usb_uevent,
1782 .pm = &usb_bus_pm_ops,
1853}; 1783};
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index bfc6c2eea647..c3536f151f02 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -34,7 +34,6 @@ static int usb_open(struct inode * inode, struct file * file)
34 int err = -ENODEV; 34 int err = -ENODEV;
35 const struct file_operations *old_fops, *new_fops = NULL; 35 const struct file_operations *old_fops, *new_fops = NULL;
36 36
37 lock_kernel();
38 down_read(&minor_rwsem); 37 down_read(&minor_rwsem);
39 c = usb_minors[minor]; 38 c = usb_minors[minor];
40 39
@@ -53,7 +52,6 @@ static int usb_open(struct inode * inode, struct file * file)
53 fops_put(old_fops); 52 fops_put(old_fops);
54 done: 53 done:
55 up_read(&minor_rwsem); 54 up_read(&minor_rwsem);
56 unlock_kernel();
57 return err; 55 return err;
58} 56}
59 57
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 80995ef0868c..2f8cedda8007 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -39,6 +39,7 @@
39#include <linux/platform_device.h> 39#include <linux/platform_device.h>
40#include <linux/workqueue.h> 40#include <linux/workqueue.h>
41#include <linux/mutex.h> 41#include <linux/mutex.h>
42#include <linux/pm_runtime.h>
42 43
43#include <linux/usb.h> 44#include <linux/usb.h>
44 45
@@ -141,7 +142,7 @@ static const u8 usb3_rh_dev_descriptor[18] = {
141 0x09, /* __u8 bMaxPacketSize0; 2^9 = 512 Bytes */ 142 0x09, /* __u8 bMaxPacketSize0; 2^9 = 512 Bytes */
142 143
143 0x6b, 0x1d, /* __le16 idVendor; Linux Foundation */ 144 0x6b, 0x1d, /* __le16 idVendor; Linux Foundation */
144 0x02, 0x00, /* __le16 idProduct; device 0x0002 */ 145 0x03, 0x00, /* __le16 idProduct; device 0x0003 */
145 KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */ 146 KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */
146 147
147 0x03, /* __u8 iManufacturer; */ 148 0x03, /* __u8 iManufacturer; */
@@ -1670,11 +1671,16 @@ int usb_hcd_alloc_bandwidth(struct usb_device *udev,
1670 } 1671 }
1671 } 1672 }
1672 for (i = 0; i < num_intfs; ++i) { 1673 for (i = 0; i < num_intfs; ++i) {
1674 struct usb_host_interface *first_alt;
1675 int iface_num;
1676
1677 first_alt = &new_config->intf_cache[i]->altsetting[0];
1678 iface_num = first_alt->desc.bInterfaceNumber;
1673 /* Set up endpoints for alternate interface setting 0 */ 1679 /* Set up endpoints for alternate interface setting 0 */
1674 alt = usb_find_alt_setting(new_config, i, 0); 1680 alt = usb_find_alt_setting(new_config, iface_num, 0);
1675 if (!alt) 1681 if (!alt)
1676 /* No alt setting 0? Pick the first setting. */ 1682 /* No alt setting 0? Pick the first setting. */
1677 alt = &new_config->intf_cache[i]->altsetting[0]; 1683 alt = first_alt;
1678 1684
1679 for (j = 0; j < alt->desc.bNumEndpoints; j++) { 1685 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
1680 ret = hcd->driver->add_endpoint(hcd, udev, &alt->endpoint[j]); 1686 ret = hcd->driver->add_endpoint(hcd, udev, &alt->endpoint[j]);
@@ -1853,6 +1859,10 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
1853 return status; 1859 return status;
1854} 1860}
1855 1861
1862#endif /* CONFIG_PM */
1863
1864#ifdef CONFIG_USB_SUSPEND
1865
1856/* Workqueue routine for root-hub remote wakeup */ 1866/* Workqueue routine for root-hub remote wakeup */
1857static void hcd_resume_work(struct work_struct *work) 1867static void hcd_resume_work(struct work_struct *work)
1858{ 1868{
@@ -1860,8 +1870,7 @@ static void hcd_resume_work(struct work_struct *work)
1860 struct usb_device *udev = hcd->self.root_hub; 1870 struct usb_device *udev = hcd->self.root_hub;
1861 1871
1862 usb_lock_device(udev); 1872 usb_lock_device(udev);
1863 usb_mark_last_busy(udev); 1873 usb_remote_wakeup(udev);
1864 usb_external_resume_device(udev, PMSG_REMOTE_RESUME);
1865 usb_unlock_device(udev); 1874 usb_unlock_device(udev);
1866} 1875}
1867 1876
@@ -1880,12 +1889,12 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
1880 1889
1881 spin_lock_irqsave (&hcd_root_hub_lock, flags); 1890 spin_lock_irqsave (&hcd_root_hub_lock, flags);
1882 if (hcd->rh_registered) 1891 if (hcd->rh_registered)
1883 queue_work(ksuspend_usb_wq, &hcd->wakeup_work); 1892 queue_work(pm_wq, &hcd->wakeup_work);
1884 spin_unlock_irqrestore (&hcd_root_hub_lock, flags); 1893 spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
1885} 1894}
1886EXPORT_SYMBOL_GPL(usb_hcd_resume_root_hub); 1895EXPORT_SYMBOL_GPL(usb_hcd_resume_root_hub);
1887 1896
1888#endif 1897#endif /* CONFIG_USB_SUSPEND */
1889 1898
1890/*-------------------------------------------------------------------------*/ 1899/*-------------------------------------------------------------------------*/
1891 1900
@@ -2030,7 +2039,7 @@ struct usb_hcd *usb_create_hcd (const struct hc_driver *driver,
2030 init_timer(&hcd->rh_timer); 2039 init_timer(&hcd->rh_timer);
2031 hcd->rh_timer.function = rh_timer_func; 2040 hcd->rh_timer.function = rh_timer_func;
2032 hcd->rh_timer.data = (unsigned long) hcd; 2041 hcd->rh_timer.data = (unsigned long) hcd;
2033#ifdef CONFIG_PM 2042#ifdef CONFIG_USB_SUSPEND
2034 INIT_WORK(&hcd->wakeup_work, hcd_resume_work); 2043 INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
2035#endif 2044#endif
2036 mutex_init(&hcd->bandwidth_mutex); 2045 mutex_init(&hcd->bandwidth_mutex);
@@ -2230,7 +2239,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
2230 hcd->rh_registered = 0; 2239 hcd->rh_registered = 0;
2231 spin_unlock_irq (&hcd_root_hub_lock); 2240 spin_unlock_irq (&hcd_root_hub_lock);
2232 2241
2233#ifdef CONFIG_PM 2242#ifdef CONFIG_USB_SUSPEND
2234 cancel_work_sync(&hcd->wakeup_work); 2243 cancel_work_sync(&hcd->wakeup_work);
2235#endif 2244#endif
2236 2245
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index bbe2b924aae8..a3cdb09734ab 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -80,7 +80,7 @@ struct usb_hcd {
80 80
81 struct timer_list rh_timer; /* drives root-hub polling */ 81 struct timer_list rh_timer; /* drives root-hub polling */
82 struct urb *status_urb; /* the current status urb */ 82 struct urb *status_urb; /* the current status urb */
83#ifdef CONFIG_PM 83#ifdef CONFIG_USB_SUSPEND
84 struct work_struct wakeup_work; /* for remote wakeup */ 84 struct work_struct wakeup_work; /* for remote wakeup */
85#endif 85#endif
86 86
@@ -248,7 +248,7 @@ struct hc_driver {
248 /* xHCI specific functions */ 248 /* xHCI specific functions */
249 /* Called by usb_alloc_dev to alloc HC device structures */ 249 /* Called by usb_alloc_dev to alloc HC device structures */
250 int (*alloc_dev)(struct usb_hcd *, struct usb_device *); 250 int (*alloc_dev)(struct usb_hcd *, struct usb_device *);
251 /* Called by usb_release_dev to free HC device structures */ 251 /* Called by usb_disconnect to free HC device structures */
252 void (*free_dev)(struct usb_hcd *, struct usb_device *); 252 void (*free_dev)(struct usb_hcd *, struct usb_device *);
253 253
254 /* Bandwidth computation functions */ 254 /* Bandwidth computation functions */
@@ -286,6 +286,7 @@ struct hc_driver {
286 */ 286 */
287 int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev, 287 int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev,
288 struct usb_tt *tt, gfp_t mem_flags); 288 struct usb_tt *tt, gfp_t mem_flags);
289 int (*reset_device)(struct usb_hcd *, struct usb_device *);
289}; 290};
290 291
291extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); 292extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
@@ -463,16 +464,20 @@ extern int usb_find_interface_driver(struct usb_device *dev,
463#define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN)) 464#define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN))
464 465
465#ifdef CONFIG_PM 466#ifdef CONFIG_PM
466extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd);
467extern void usb_root_hub_lost_power(struct usb_device *rhdev); 467extern void usb_root_hub_lost_power(struct usb_device *rhdev);
468extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg); 468extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg);
469extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg); 469extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg);
470#endif /* CONFIG_PM */
471
472#ifdef CONFIG_USB_SUSPEND
473extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd);
470#else 474#else
471static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd) 475static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd)
472{ 476{
473 return; 477 return;
474} 478}
475#endif /* CONFIG_PM */ 479#endif /* CONFIG_USB_SUSPEND */
480
476 481
477/* 482/*
478 * USB device fs stuff 483 * USB device fs stuff
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 20ecb4cec8de..0940ccd6f4f4 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -22,6 +22,7 @@
22#include <linux/kthread.h> 22#include <linux/kthread.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/freezer.h> 24#include <linux/freezer.h>
25#include <linux/pm_runtime.h>
25 26
26#include <asm/uaccess.h> 27#include <asm/uaccess.h>
27#include <asm/byteorder.h> 28#include <asm/byteorder.h>
@@ -71,7 +72,6 @@ struct usb_hub {
71 72
72 unsigned mA_per_port; /* current for each child */ 73 unsigned mA_per_port; /* current for each child */
73 74
74 unsigned init_done:1;
75 unsigned limited_power:1; 75 unsigned limited_power:1;
76 unsigned quiescing:1; 76 unsigned quiescing:1;
77 unsigned disconnected:1; 77 unsigned disconnected:1;
@@ -820,7 +820,6 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
820 } 820 }
821 init3: 821 init3:
822 hub->quiescing = 0; 822 hub->quiescing = 0;
823 hub->init_done = 1;
824 823
825 status = usb_submit_urb(hub->urb, GFP_NOIO); 824 status = usb_submit_urb(hub->urb, GFP_NOIO);
826 if (status < 0) 825 if (status < 0)
@@ -861,11 +860,6 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
861 int i; 860 int i;
862 861
863 cancel_delayed_work_sync(&hub->init_work); 862 cancel_delayed_work_sync(&hub->init_work);
864 if (!hub->init_done) {
865 hub->init_done = 1;
866 usb_autopm_put_interface_no_suspend(
867 to_usb_interface(hub->intfdev));
868 }
869 863
870 /* khubd and related activity won't re-trigger */ 864 /* khubd and related activity won't re-trigger */
871 hub->quiescing = 1; 865 hub->quiescing = 1;
@@ -1224,6 +1218,9 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
1224 desc = intf->cur_altsetting; 1218 desc = intf->cur_altsetting;
1225 hdev = interface_to_usbdev(intf); 1219 hdev = interface_to_usbdev(intf);
1226 1220
1221 /* Hubs have proper suspend/resume support */
1222 usb_enable_autosuspend(hdev);
1223
1227 if (hdev->level == MAX_TOPO_LEVEL) { 1224 if (hdev->level == MAX_TOPO_LEVEL) {
1228 dev_err(&intf->dev, 1225 dev_err(&intf->dev,
1229 "Unsupported bus topology: hub nested too deep\n"); 1226 "Unsupported bus topology: hub nested too deep\n");
@@ -1402,10 +1399,8 @@ static void recursively_mark_NOTATTACHED(struct usb_device *udev)
1402 if (udev->children[i]) 1399 if (udev->children[i])
1403 recursively_mark_NOTATTACHED(udev->children[i]); 1400 recursively_mark_NOTATTACHED(udev->children[i]);
1404 } 1401 }
1405 if (udev->state == USB_STATE_SUSPENDED) { 1402 if (udev->state == USB_STATE_SUSPENDED)
1406 udev->discon_suspended = 1;
1407 udev->active_duration -= jiffies; 1403 udev->active_duration -= jiffies;
1408 }
1409 udev->state = USB_STATE_NOTATTACHED; 1404 udev->state = USB_STATE_NOTATTACHED;
1410} 1405}
1411 1406
@@ -1448,11 +1443,11 @@ void usb_set_device_state(struct usb_device *udev,
1448 || new_state == USB_STATE_SUSPENDED) 1443 || new_state == USB_STATE_SUSPENDED)
1449 ; /* No change to wakeup settings */ 1444 ; /* No change to wakeup settings */
1450 else if (new_state == USB_STATE_CONFIGURED) 1445 else if (new_state == USB_STATE_CONFIGURED)
1451 device_init_wakeup(&udev->dev, 1446 device_set_wakeup_capable(&udev->dev,
1452 (udev->actconfig->desc.bmAttributes 1447 (udev->actconfig->desc.bmAttributes
1453 & USB_CONFIG_ATT_WAKEUP)); 1448 & USB_CONFIG_ATT_WAKEUP));
1454 else 1449 else
1455 device_init_wakeup(&udev->dev, 0); 1450 device_set_wakeup_capable(&udev->dev, 0);
1456 } 1451 }
1457 if (udev->state == USB_STATE_SUSPENDED && 1452 if (udev->state == USB_STATE_SUSPENDED &&
1458 new_state != USB_STATE_SUSPENDED) 1453 new_state != USB_STATE_SUSPENDED)
@@ -1529,31 +1524,15 @@ static void update_address(struct usb_device *udev, int devnum)
1529 udev->devnum = devnum; 1524 udev->devnum = devnum;
1530} 1525}
1531 1526
1532#ifdef CONFIG_USB_SUSPEND 1527static void hub_free_dev(struct usb_device *udev)
1533
1534static void usb_stop_pm(struct usb_device *udev)
1535{ 1528{
1536 /* Synchronize with the ksuspend thread to prevent any more 1529 struct usb_hcd *hcd = bus_to_hcd(udev->bus);
1537 * autosuspend requests from being submitted, and decrement
1538 * the parent's count of unsuspended children.
1539 */
1540 usb_pm_lock(udev);
1541 if (udev->parent && !udev->discon_suspended)
1542 usb_autosuspend_device(udev->parent);
1543 usb_pm_unlock(udev);
1544 1530
1545 /* Stop any autosuspend or autoresume requests already submitted */ 1531 /* Root hubs aren't real devices, so don't free HCD resources */
1546 cancel_delayed_work_sync(&udev->autosuspend); 1532 if (hcd->driver->free_dev && udev->parent)
1547 cancel_work_sync(&udev->autoresume); 1533 hcd->driver->free_dev(hcd, udev);
1548} 1534}
1549 1535
1550#else
1551
1552static inline void usb_stop_pm(struct usb_device *udev)
1553{ }
1554
1555#endif
1556
1557/** 1536/**
1558 * usb_disconnect - disconnect a device (usbcore-internal) 1537 * usb_disconnect - disconnect a device (usbcore-internal)
1559 * @pdev: pointer to device being disconnected 1538 * @pdev: pointer to device being disconnected
@@ -1622,7 +1601,7 @@ void usb_disconnect(struct usb_device **pdev)
1622 *pdev = NULL; 1601 *pdev = NULL;
1623 spin_unlock_irq(&device_state_lock); 1602 spin_unlock_irq(&device_state_lock);
1624 1603
1625 usb_stop_pm(udev); 1604 hub_free_dev(udev);
1626 1605
1627 put_device(&udev->dev); 1606 put_device(&udev->dev);
1628} 1607}
@@ -1799,9 +1778,18 @@ int usb_new_device(struct usb_device *udev)
1799{ 1778{
1800 int err; 1779 int err;
1801 1780
1802 /* Increment the parent's count of unsuspended children */ 1781 if (udev->parent) {
1803 if (udev->parent) 1782 /* Initialize non-root-hub device wakeup to disabled;
1804 usb_autoresume_device(udev->parent); 1783 * device (un)configuration controls wakeup capable
1784 * sysfs power/wakeup controls wakeup enabled/disabled
1785 */
1786 device_init_wakeup(&udev->dev, 0);
1787 device_set_wakeup_enable(&udev->dev, 1);
1788 }
1789
1790 /* Tell the runtime-PM framework the device is active */
1791 pm_runtime_set_active(&udev->dev);
1792 pm_runtime_enable(&udev->dev);
1805 1793
1806 usb_detect_quirks(udev); 1794 usb_detect_quirks(udev);
1807 err = usb_enumerate_device(udev); /* Read descriptors */ 1795 err = usb_enumerate_device(udev); /* Read descriptors */
@@ -1833,7 +1821,8 @@ int usb_new_device(struct usb_device *udev)
1833 1821
1834fail: 1822fail:
1835 usb_set_device_state(udev, USB_STATE_NOTATTACHED); 1823 usb_set_device_state(udev, USB_STATE_NOTATTACHED);
1836 usb_stop_pm(udev); 1824 pm_runtime_disable(&udev->dev);
1825 pm_runtime_set_suspended(&udev->dev);
1837 return err; 1826 return err;
1838} 1827}
1839 1828
@@ -1982,7 +1971,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
1982 if (!(portstatus & USB_PORT_STAT_RESET) && 1971 if (!(portstatus & USB_PORT_STAT_RESET) &&
1983 (portstatus & USB_PORT_STAT_ENABLE)) { 1972 (portstatus & USB_PORT_STAT_ENABLE)) {
1984 if (hub_is_wusb(hub)) 1973 if (hub_is_wusb(hub))
1985 udev->speed = USB_SPEED_VARIABLE; 1974 udev->speed = USB_SPEED_WIRELESS;
1986 else if (portstatus & USB_PORT_STAT_HIGH_SPEED) 1975 else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
1987 udev->speed = USB_SPEED_HIGH; 1976 udev->speed = USB_SPEED_HIGH;
1988 else if (portstatus & USB_PORT_STAT_LOW_SPEED) 1977 else if (portstatus & USB_PORT_STAT_LOW_SPEED)
@@ -2008,7 +1997,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
2008 struct usb_device *udev, unsigned int delay) 1997 struct usb_device *udev, unsigned int delay)
2009{ 1998{
2010 int i, status; 1999 int i, status;
2000 struct usb_hcd *hcd;
2011 2001
2002 hcd = bus_to_hcd(udev->bus);
2012 /* Block EHCI CF initialization during the port reset. 2003 /* Block EHCI CF initialization during the port reset.
2013 * Some companion controllers don't like it when they mix. 2004 * Some companion controllers don't like it when they mix.
2014 */ 2005 */
@@ -2036,6 +2027,14 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
2036 /* TRSTRCY = 10 ms; plus some extra */ 2027 /* TRSTRCY = 10 ms; plus some extra */
2037 msleep(10 + 40); 2028 msleep(10 + 40);
2038 update_address(udev, 0); 2029 update_address(udev, 0);
2030 if (hcd->driver->reset_device) {
2031 status = hcd->driver->reset_device(hcd, udev);
2032 if (status < 0) {
2033 dev_err(&udev->dev, "Cannot reset "
2034 "HCD device state\n");
2035 break;
2036 }
2037 }
2039 /* FALL THROUGH */ 2038 /* FALL THROUGH */
2040 case -ENOTCONN: 2039 case -ENOTCONN:
2041 case -ENODEV: 2040 case -ENODEV:
@@ -2381,14 +2380,17 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
2381} 2380}
2382 2381
2383/* caller has locked udev */ 2382/* caller has locked udev */
2384static int remote_wakeup(struct usb_device *udev) 2383int usb_remote_wakeup(struct usb_device *udev)
2385{ 2384{
2386 int status = 0; 2385 int status = 0;
2387 2386
2388 if (udev->state == USB_STATE_SUSPENDED) { 2387 if (udev->state == USB_STATE_SUSPENDED) {
2389 dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-"); 2388 dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-");
2390 usb_mark_last_busy(udev); 2389 status = usb_autoresume_device(udev);
2391 status = usb_external_resume_device(udev, PMSG_REMOTE_RESUME); 2390 if (status == 0) {
2391 /* Let the drivers do their thing, then... */
2392 usb_autosuspend_device(udev);
2393 }
2392 } 2394 }
2393 return status; 2395 return status;
2394} 2396}
@@ -2425,11 +2427,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
2425 return status; 2427 return status;
2426} 2428}
2427 2429
2428static inline int remote_wakeup(struct usb_device *udev)
2429{
2430 return 0;
2431}
2432
2433#endif 2430#endif
2434 2431
2435static int hub_suspend(struct usb_interface *intf, pm_message_t msg) 2432static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
@@ -2496,11 +2493,6 @@ EXPORT_SYMBOL_GPL(usb_root_hub_lost_power);
2496 2493
2497#else /* CONFIG_PM */ 2494#else /* CONFIG_PM */
2498 2495
2499static inline int remote_wakeup(struct usb_device *udev)
2500{
2501 return 0;
2502}
2503
2504#define hub_suspend NULL 2496#define hub_suspend NULL
2505#define hub_resume NULL 2497#define hub_resume NULL
2506#define hub_reset_resume NULL 2498#define hub_reset_resume NULL
@@ -2645,14 +2637,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2645 2637
2646 mutex_lock(&usb_address0_mutex); 2638 mutex_lock(&usb_address0_mutex);
2647 2639
2648 if ((hcd->driver->flags & HCD_USB3) && udev->config) { 2640 if (!udev->config && oldspeed == USB_SPEED_SUPER) {
2649 /* FIXME this will need special handling by the xHCI driver. */
2650 dev_dbg(&udev->dev,
2651 "xHCI reset of configured device "
2652 "not supported yet.\n");
2653 retval = -EINVAL;
2654 goto fail;
2655 } else if (!udev->config && oldspeed == USB_SPEED_SUPER) {
2656 /* Don't reset USB 3.0 devices during an initial setup */ 2641 /* Don't reset USB 3.0 devices during an initial setup */
2657 usb_set_device_state(udev, USB_STATE_DEFAULT); 2642 usb_set_device_state(udev, USB_STATE_DEFAULT);
2658 } else { 2643 } else {
@@ -2678,7 +2663,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2678 */ 2663 */
2679 switch (udev->speed) { 2664 switch (udev->speed) {
2680 case USB_SPEED_SUPER: 2665 case USB_SPEED_SUPER:
2681 case USB_SPEED_VARIABLE: /* fixed at 512 */ 2666 case USB_SPEED_WIRELESS: /* fixed at 512 */
2682 udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512); 2667 udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
2683 break; 2668 break;
2684 case USB_SPEED_HIGH: /* fixed at 64 */ 2669 case USB_SPEED_HIGH: /* fixed at 64 */
@@ -2706,7 +2691,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2706 case USB_SPEED_SUPER: 2691 case USB_SPEED_SUPER:
2707 speed = "super"; 2692 speed = "super";
2708 break; 2693 break;
2709 case USB_SPEED_VARIABLE: 2694 case USB_SPEED_WIRELESS:
2710 speed = "variable"; 2695 speed = "variable";
2711 type = "Wireless "; 2696 type = "Wireless ";
2712 break; 2697 break;
@@ -3006,7 +2991,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
3006 /* For a suspended device, treat this as a 2991 /* For a suspended device, treat this as a
3007 * remote wakeup event. 2992 * remote wakeup event.
3008 */ 2993 */
3009 status = remote_wakeup(udev); 2994 status = usb_remote_wakeup(udev);
3010#endif 2995#endif
3011 2996
3012 } else { 2997 } else {
@@ -3192,6 +3177,7 @@ loop_disable:
3192loop: 3177loop:
3193 usb_ep0_reinit(udev); 3178 usb_ep0_reinit(udev);
3194 release_address(udev); 3179 release_address(udev);
3180 hub_free_dev(udev);
3195 usb_put_dev(udev); 3181 usb_put_dev(udev);
3196 if ((status == -ENOTCONN) || (status == -ENOTSUPP)) 3182 if ((status == -ENOTCONN) || (status == -ENOTSUPP))
3197 break; 3183 break;
@@ -3259,7 +3245,7 @@ static void hub_events(void)
3259 * disconnected while waiting for the lock to succeed. */ 3245 * disconnected while waiting for the lock to succeed. */
3260 usb_lock_device(hdev); 3246 usb_lock_device(hdev);
3261 if (unlikely(hub->disconnected)) 3247 if (unlikely(hub->disconnected))
3262 goto loop2; 3248 goto loop_disconnected;
3263 3249
3264 /* If the hub has died, clean up after it */ 3250 /* If the hub has died, clean up after it */
3265 if (hdev->state == USB_STATE_NOTATTACHED) { 3251 if (hdev->state == USB_STATE_NOTATTACHED) {
@@ -3352,7 +3338,7 @@ static void hub_events(void)
3352 msleep(10); 3338 msleep(10);
3353 3339
3354 usb_lock_device(udev); 3340 usb_lock_device(udev);
3355 ret = remote_wakeup(hdev-> 3341 ret = usb_remote_wakeup(hdev->
3356 children[i-1]); 3342 children[i-1]);
3357 usb_unlock_device(udev); 3343 usb_unlock_device(udev);
3358 if (ret < 0) 3344 if (ret < 0)
@@ -3419,7 +3405,7 @@ static void hub_events(void)
3419 * kick_khubd() and allow autosuspend. 3405 * kick_khubd() and allow autosuspend.
3420 */ 3406 */
3421 usb_autopm_put_interface(intf); 3407 usb_autopm_put_interface(intf);
3422 loop2: 3408 loop_disconnected:
3423 usb_unlock_device(hdev); 3409 usb_unlock_device(hdev);
3424 kref_put(&hub->kref, hub_release); 3410 kref_put(&hub->kref, hub_release);
3425 3411
@@ -3446,7 +3432,7 @@ static int hub_thread(void *__unused)
3446 return 0; 3432 return 0;
3447} 3433}
3448 3434
3449static struct usb_device_id hub_id_table [] = { 3435static const struct usb_device_id hub_id_table[] = {
3450 { .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS, 3436 { .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS,
3451 .bDeviceClass = USB_CLASS_HUB}, 3437 .bDeviceClass = USB_CLASS_HUB},
3452 { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS, 3438 { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index df73574a9cc9..cd220277c6c3 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1316,7 +1316,7 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
1316 1316
1317 alt = usb_altnum_to_altsetting(iface, alternate); 1317 alt = usb_altnum_to_altsetting(iface, alternate);
1318 if (!alt) { 1318 if (!alt) {
1319 dev_warn(&dev->dev, "selecting invalid altsetting %d", 1319 dev_warn(&dev->dev, "selecting invalid altsetting %d\n",
1320 alternate); 1320 alternate);
1321 return -EINVAL; 1321 return -EINVAL;
1322 } 1322 }
@@ -1471,7 +1471,7 @@ int usb_reset_configuration(struct usb_device *dev)
1471 /* If not, reinstate the old alternate settings */ 1471 /* If not, reinstate the old alternate settings */
1472 if (retval < 0) { 1472 if (retval < 0) {
1473reset_old_alts: 1473reset_old_alts:
1474 for (; i >= 0; i--) { 1474 for (i--; i >= 0; i--) {
1475 struct usb_interface *intf = config->interface[i]; 1475 struct usb_interface *intf = config->interface[i];
1476 struct usb_host_interface *alt; 1476 struct usb_host_interface *alt;
1477 1477
@@ -1843,7 +1843,6 @@ free_interfaces:
1843 intf->dev.dma_mask = dev->dev.dma_mask; 1843 intf->dev.dma_mask = dev->dev.dma_mask;
1844 INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); 1844 INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
1845 device_initialize(&intf->dev); 1845 device_initialize(&intf->dev);
1846 mark_quiesced(intf);
1847 dev_set_name(&intf->dev, "%d-%s:%d.%d", 1846 dev_set_name(&intf->dev, "%d-%s:%d.%d",
1848 dev->bus->busnum, dev->devpath, 1847 dev->bus->busnum, dev->devpath,
1849 configuration, alt->desc.bInterfaceNumber); 1848 configuration, alt->desc.bInterfaceNumber);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index ab93918d9207..f073c5cb4e7b 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -103,10 +103,19 @@ void usb_detect_quirks(struct usb_device *udev)
103 dev_dbg(&udev->dev, "USB quirks for this device: %x\n", 103 dev_dbg(&udev->dev, "USB quirks for this device: %x\n",
104 udev->quirks); 104 udev->quirks);
105 105
106 /* By default, disable autosuspend for all non-hubs */
107#ifdef CONFIG_USB_SUSPEND 106#ifdef CONFIG_USB_SUSPEND
108 if (udev->descriptor.bDeviceClass != USB_CLASS_HUB) 107
109 udev->autosuspend_disabled = 1; 108 /* By default, disable autosuspend for all devices. The hub driver
109 * will enable it for hubs.
110 */
111 usb_disable_autosuspend(udev);
112
113 /* Autosuspend can also be disabled if the initial autosuspend_delay
114 * is negative.
115 */
116 if (udev->autosuspend_delay < 0)
117 usb_autoresume_device(udev);
118
110#endif 119#endif
111 120
112 /* For the present, all devices default to USB-PERSIST enabled */ 121 /* For the present, all devices default to USB-PERSIST enabled */
@@ -120,6 +129,7 @@ void usb_detect_quirks(struct usb_device *udev)
120 * for all devices. It will affect things like hub resets 129 * for all devices. It will affect things like hub resets
121 * and EMF-related port disables. 130 * and EMF-related port disables.
122 */ 131 */
123 udev->persist_enabled = 1; 132 if (!(udev->quirks & USB_QUIRK_RESET_MORPHS))
133 udev->persist_enabled = 1;
124#endif /* CONFIG_PM */ 134#endif /* CONFIG_PM */
125} 135}
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 5f3908f6e2dc..43c002e3a9aa 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -115,7 +115,7 @@ show_speed(struct device *dev, struct device_attribute *attr, char *buf)
115 case USB_SPEED_HIGH: 115 case USB_SPEED_HIGH:
116 speed = "480"; 116 speed = "480";
117 break; 117 break;
118 case USB_SPEED_VARIABLE: 118 case USB_SPEED_WIRELESS:
119 speed = "480"; 119 speed = "480";
120 break; 120 break;
121 case USB_SPEED_SUPER: 121 case USB_SPEED_SUPER:
@@ -191,6 +191,36 @@ show_quirks(struct device *dev, struct device_attribute *attr, char *buf)
191static DEVICE_ATTR(quirks, S_IRUGO, show_quirks, NULL); 191static DEVICE_ATTR(quirks, S_IRUGO, show_quirks, NULL);
192 192
193static ssize_t 193static ssize_t
194show_avoid_reset_quirk(struct device *dev, struct device_attribute *attr, char *buf)
195{
196 struct usb_device *udev;
197
198 udev = to_usb_device(dev);
199 return sprintf(buf, "%d\n", !!(udev->quirks & USB_QUIRK_RESET_MORPHS));
200}
201
202static ssize_t
203set_avoid_reset_quirk(struct device *dev, struct device_attribute *attr,
204 const char *buf, size_t count)
205{
206 struct usb_device *udev = to_usb_device(dev);
207 int config;
208
209 if (sscanf(buf, "%d", &config) != 1 || config < 0 || config > 1)
210 return -EINVAL;
211 usb_lock_device(udev);
212 if (config)
213 udev->quirks |= USB_QUIRK_RESET_MORPHS;
214 else
215 udev->quirks &= ~USB_QUIRK_RESET_MORPHS;
216 usb_unlock_device(udev);
217 return count;
218}
219
220static DEVICE_ATTR(avoid_reset_quirk, S_IRUGO | S_IWUSR,
221 show_avoid_reset_quirk, set_avoid_reset_quirk);
222
223static ssize_t
194show_urbnum(struct device *dev, struct device_attribute *attr, char *buf) 224show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
195{ 225{
196 struct usb_device *udev; 226 struct usb_device *udev;
@@ -226,9 +256,10 @@ set_persist(struct device *dev, struct device_attribute *attr,
226 256
227 if (sscanf(buf, "%d", &value) != 1) 257 if (sscanf(buf, "%d", &value) != 1)
228 return -EINVAL; 258 return -EINVAL;
229 usb_pm_lock(udev); 259
260 usb_lock_device(udev);
230 udev->persist_enabled = !!value; 261 udev->persist_enabled = !!value;
231 usb_pm_unlock(udev); 262 usb_unlock_device(udev);
232 return count; 263 return count;
233} 264}
234 265
@@ -315,20 +346,34 @@ set_autosuspend(struct device *dev, struct device_attribute *attr,
315 const char *buf, size_t count) 346 const char *buf, size_t count)
316{ 347{
317 struct usb_device *udev = to_usb_device(dev); 348 struct usb_device *udev = to_usb_device(dev);
318 int value; 349 int value, old_delay;
350 int rc;
319 351
320 if (sscanf(buf, "%d", &value) != 1 || value >= INT_MAX/HZ || 352 if (sscanf(buf, "%d", &value) != 1 || value >= INT_MAX/HZ ||
321 value <= - INT_MAX/HZ) 353 value <= - INT_MAX/HZ)
322 return -EINVAL; 354 return -EINVAL;
323 value *= HZ; 355 value *= HZ;
324 356
357 usb_lock_device(udev);
358 old_delay = udev->autosuspend_delay;
325 udev->autosuspend_delay = value; 359 udev->autosuspend_delay = value;
326 if (value >= 0) 360
327 usb_try_autosuspend_device(udev); 361 if (old_delay < 0) { /* Autosuspend wasn't allowed */
328 else { 362 if (value >= 0)
329 if (usb_autoresume_device(udev) == 0)
330 usb_autosuspend_device(udev); 363 usb_autosuspend_device(udev);
364 } else { /* Autosuspend was allowed */
365 if (value < 0) {
366 rc = usb_autoresume_device(udev);
367 if (rc < 0) {
368 count = rc;
369 udev->autosuspend_delay = old_delay;
370 }
371 } else {
372 usb_try_autosuspend_device(udev);
373 }
331 } 374 }
375
376 usb_unlock_device(udev);
332 return count; 377 return count;
333} 378}
334 379
@@ -356,34 +401,25 @@ set_level(struct device *dev, struct device_attribute *attr,
356 struct usb_device *udev = to_usb_device(dev); 401 struct usb_device *udev = to_usb_device(dev);
357 int len = count; 402 int len = count;
358 char *cp; 403 char *cp;
359 int rc = 0; 404 int rc;
360 int old_autosuspend_disabled;
361 405
362 cp = memchr(buf, '\n', count); 406 cp = memchr(buf, '\n', count);
363 if (cp) 407 if (cp)
364 len = cp - buf; 408 len = cp - buf;
365 409
366 usb_lock_device(udev); 410 usb_lock_device(udev);
367 old_autosuspend_disabled = udev->autosuspend_disabled;
368 411
369 /* Setting the flags without calling usb_pm_lock is a subject to
370 * races, but who cares...
371 */
372 if (len == sizeof on_string - 1 && 412 if (len == sizeof on_string - 1 &&
373 strncmp(buf, on_string, len) == 0) { 413 strncmp(buf, on_string, len) == 0)
374 udev->autosuspend_disabled = 1; 414 rc = usb_disable_autosuspend(udev);
375 rc = usb_external_resume_device(udev, PMSG_USER_RESUME);
376 415
377 } else if (len == sizeof auto_string - 1 && 416 else if (len == sizeof auto_string - 1 &&
378 strncmp(buf, auto_string, len) == 0) { 417 strncmp(buf, auto_string, len) == 0)
379 udev->autosuspend_disabled = 0; 418 rc = usb_enable_autosuspend(udev);
380 rc = usb_external_resume_device(udev, PMSG_USER_RESUME);
381 419
382 } else 420 else
383 rc = -EINVAL; 421 rc = -EINVAL;
384 422
385 if (rc)
386 udev->autosuspend_disabled = old_autosuspend_disabled;
387 usb_unlock_device(udev); 423 usb_unlock_device(udev);
388 return (rc < 0 ? rc : count); 424 return (rc < 0 ? rc : count);
389} 425}
@@ -558,6 +594,7 @@ static struct attribute *dev_attrs[] = {
558 &dev_attr_version.attr, 594 &dev_attr_version.attr,
559 &dev_attr_maxchild.attr, 595 &dev_attr_maxchild.attr,
560 &dev_attr_quirks.attr, 596 &dev_attr_quirks.attr,
597 &dev_attr_avoid_reset_quirk.attr,
561 &dev_attr_authorized.attr, 598 &dev_attr_authorized.attr,
562 &dev_attr_remove.attr, 599 &dev_attr_remove.attr,
563 NULL, 600 NULL,
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index e7cae1334693..27080561a1c2 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -387,6 +387,13 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
387 { 387 {
388 unsigned int orig_flags = urb->transfer_flags; 388 unsigned int orig_flags = urb->transfer_flags;
389 unsigned int allowed; 389 unsigned int allowed;
390 static int pipetypes[4] = {
391 PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
392 };
393
394 /* Check that the pipe's type matches the endpoint's type */
395 if (usb_pipetype(urb->pipe) != pipetypes[xfertype])
396 return -EPIPE; /* The most suitable error code :-) */
390 397
391 /* enforce simple/standard policy */ 398 /* enforce simple/standard policy */
392 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP | 399 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP |
@@ -430,7 +437,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
430 case USB_ENDPOINT_XFER_INT: 437 case USB_ENDPOINT_XFER_INT:
431 /* too small? */ 438 /* too small? */
432 switch (dev->speed) { 439 switch (dev->speed) {
433 case USB_SPEED_VARIABLE: 440 case USB_SPEED_WIRELESS:
434 if (urb->interval < 6) 441 if (urb->interval < 6)
435 return -EINVAL; 442 return -EINVAL;
436 break; 443 break;
@@ -446,7 +453,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
446 if (urb->interval > (1 << 15)) 453 if (urb->interval > (1 << 15))
447 return -EINVAL; 454 return -EINVAL;
448 max = 1 << 15; 455 max = 1 << 15;
449 case USB_SPEED_VARIABLE: 456 case USB_SPEED_WIRELESS:
450 if (urb->interval > 16) 457 if (urb->interval > 16)
451 return -EINVAL; 458 return -EINVAL;
452 break; 459 break;
@@ -473,7 +480,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
473 default: 480 default:
474 return -EINVAL; 481 return -EINVAL;
475 } 482 }
476 if (dev->speed != USB_SPEED_VARIABLE) { 483 if (dev->speed != USB_SPEED_WIRELESS) {
477 /* Round down to a power of 2, no more than max */ 484 /* Round down to a power of 2, no more than max */
478 urb->interval = min(max, 1 << ilog2(urb->interval)); 485 urb->interval = min(max, 1 << ilog2(urb->interval));
479 } 486 }
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 0daff0d968ba..1297e9b16a51 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -49,9 +49,6 @@ const char *usbcore_name = "usbcore";
49 49
50static int nousb; /* Disable USB when built into kernel image */ 50static int nousb; /* Disable USB when built into kernel image */
51 51
52/* Workqueue for autosuspend and for remote wakeup of root hubs */
53struct workqueue_struct *ksuspend_usb_wq;
54
55#ifdef CONFIG_USB_SUSPEND 52#ifdef CONFIG_USB_SUSPEND
56static int usb_autosuspend_delay = 2; /* Default delay value, 53static int usb_autosuspend_delay = 2; /* Default delay value,
57 * in seconds */ 54 * in seconds */
@@ -228,9 +225,6 @@ static void usb_release_dev(struct device *dev)
228 hcd = bus_to_hcd(udev->bus); 225 hcd = bus_to_hcd(udev->bus);
229 226
230 usb_destroy_configuration(udev); 227 usb_destroy_configuration(udev);
231 /* Root hubs aren't real devices, so don't free HCD resources */
232 if (hcd->driver->free_dev && udev->parent)
233 hcd->driver->free_dev(hcd, udev);
234 usb_put_hcd(hcd); 228 usb_put_hcd(hcd);
235 kfree(udev->product); 229 kfree(udev->product);
236 kfree(udev->manufacturer); 230 kfree(udev->manufacturer);
@@ -264,23 +258,6 @@ static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
264 258
265#ifdef CONFIG_PM 259#ifdef CONFIG_PM
266 260
267static int ksuspend_usb_init(void)
268{
269 /* This workqueue is supposed to be both freezable and
270 * singlethreaded. Its job doesn't justify running on more
271 * than one CPU.
272 */
273 ksuspend_usb_wq = create_freezeable_workqueue("ksuspend_usbd");
274 if (!ksuspend_usb_wq)
275 return -ENOMEM;
276 return 0;
277}
278
279static void ksuspend_usb_cleanup(void)
280{
281 destroy_workqueue(ksuspend_usb_wq);
282}
283
284/* USB device Power-Management thunks. 261/* USB device Power-Management thunks.
285 * There's no need to distinguish here between quiescing a USB device 262 * There's no need to distinguish here between quiescing a USB device
286 * and powering it down; the generic_suspend() routine takes care of 263 * and powering it down; the generic_suspend() routine takes care of
@@ -296,7 +273,7 @@ static int usb_dev_prepare(struct device *dev)
296static void usb_dev_complete(struct device *dev) 273static void usb_dev_complete(struct device *dev)
297{ 274{
298 /* Currently used only for rebinding interfaces */ 275 /* Currently used only for rebinding interfaces */
299 usb_resume(dev, PMSG_RESUME); /* Message event is meaningless */ 276 usb_resume(dev, PMSG_ON); /* FIXME: change to PMSG_COMPLETE */
300} 277}
301 278
302static int usb_dev_suspend(struct device *dev) 279static int usb_dev_suspend(struct device *dev)
@@ -342,9 +319,7 @@ static const struct dev_pm_ops usb_device_pm_ops = {
342 319
343#else 320#else
344 321
345#define ksuspend_usb_init() 0 322#define usb_device_pm_ops (*(struct dev_pm_ops *) NULL)
346#define ksuspend_usb_cleanup() do {} while (0)
347#define usb_device_pm_ops (*(struct dev_pm_ops *)0)
348 323
349#endif /* CONFIG_PM */ 324#endif /* CONFIG_PM */
350 325
@@ -472,9 +447,6 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
472 INIT_LIST_HEAD(&dev->filelist); 447 INIT_LIST_HEAD(&dev->filelist);
473 448
474#ifdef CONFIG_PM 449#ifdef CONFIG_PM
475 mutex_init(&dev->pm_mutex);
476 INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work);
477 INIT_WORK(&dev->autoresume, usb_autoresume_work);
478 dev->autosuspend_delay = usb_autosuspend_delay * HZ; 450 dev->autosuspend_delay = usb_autosuspend_delay * HZ;
479 dev->connect_time = jiffies; 451 dev->connect_time = jiffies;
480 dev->active_duration = -jiffies; 452 dev->active_duration = -jiffies;
@@ -1117,9 +1089,6 @@ static int __init usb_init(void)
1117 if (retval) 1089 if (retval)
1118 goto out; 1090 goto out;
1119 1091
1120 retval = ksuspend_usb_init();
1121 if (retval)
1122 goto out;
1123 retval = bus_register(&usb_bus_type); 1092 retval = bus_register(&usb_bus_type);
1124 if (retval) 1093 if (retval)
1125 goto bus_register_failed; 1094 goto bus_register_failed;
@@ -1159,7 +1128,7 @@ major_init_failed:
1159bus_notifier_failed: 1128bus_notifier_failed:
1160 bus_unregister(&usb_bus_type); 1129 bus_unregister(&usb_bus_type);
1161bus_register_failed: 1130bus_register_failed:
1162 ksuspend_usb_cleanup(); 1131 usb_debugfs_cleanup();
1163out: 1132out:
1164 return retval; 1133 return retval;
1165} 1134}
@@ -1181,7 +1150,6 @@ static void __exit usb_exit(void)
1181 usb_hub_cleanup(); 1150 usb_hub_cleanup();
1182 bus_unregister_notifier(&usb_bus_type, &usb_bus_nb); 1151 bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
1183 bus_unregister(&usb_bus_type); 1152 bus_unregister(&usb_bus_type);
1184 ksuspend_usb_cleanup();
1185 usb_debugfs_cleanup(); 1153 usb_debugfs_cleanup();
1186} 1154}
1187 1155
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 4c36c7f512a0..cd882203ad34 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -55,24 +55,8 @@ extern void usb_major_cleanup(void);
55extern int usb_suspend(struct device *dev, pm_message_t msg); 55extern int usb_suspend(struct device *dev, pm_message_t msg);
56extern int usb_resume(struct device *dev, pm_message_t msg); 56extern int usb_resume(struct device *dev, pm_message_t msg);
57 57
58extern void usb_autosuspend_work(struct work_struct *work);
59extern void usb_autoresume_work(struct work_struct *work);
60extern int usb_port_suspend(struct usb_device *dev, pm_message_t msg); 58extern int usb_port_suspend(struct usb_device *dev, pm_message_t msg);
61extern int usb_port_resume(struct usb_device *dev, pm_message_t msg); 59extern int usb_port_resume(struct usb_device *dev, pm_message_t msg);
62extern int usb_external_suspend_device(struct usb_device *udev,
63 pm_message_t msg);
64extern int usb_external_resume_device(struct usb_device *udev,
65 pm_message_t msg);
66
67static inline void usb_pm_lock(struct usb_device *udev)
68{
69 mutex_lock_nested(&udev->pm_mutex, udev->level);
70}
71
72static inline void usb_pm_unlock(struct usb_device *udev)
73{
74 mutex_unlock(&udev->pm_mutex);
75}
76 60
77#else 61#else
78 62
@@ -86,9 +70,6 @@ static inline int usb_port_resume(struct usb_device *udev, pm_message_t msg)
86 return 0; 70 return 0;
87} 71}
88 72
89static inline void usb_pm_lock(struct usb_device *udev) {}
90static inline void usb_pm_unlock(struct usb_device *udev) {}
91
92#endif 73#endif
93 74
94#ifdef CONFIG_USB_SUSPEND 75#ifdef CONFIG_USB_SUSPEND
@@ -96,6 +77,7 @@ static inline void usb_pm_unlock(struct usb_device *udev) {}
96extern void usb_autosuspend_device(struct usb_device *udev); 77extern void usb_autosuspend_device(struct usb_device *udev);
97extern void usb_try_autosuspend_device(struct usb_device *udev); 78extern void usb_try_autosuspend_device(struct usb_device *udev);
98extern int usb_autoresume_device(struct usb_device *udev); 79extern int usb_autoresume_device(struct usb_device *udev);
80extern int usb_remote_wakeup(struct usb_device *dev);
99 81
100#else 82#else
101 83
@@ -106,9 +88,13 @@ static inline int usb_autoresume_device(struct usb_device *udev)
106 return 0; 88 return 0;
107} 89}
108 90
91static inline int usb_remote_wakeup(struct usb_device *udev)
92{
93 return 0;
94}
95
109#endif 96#endif
110 97
111extern struct workqueue_struct *ksuspend_usb_wq;
112extern struct bus_type usb_bus_type; 98extern struct bus_type usb_bus_type;
113extern struct device_type usb_device_type; 99extern struct device_type usb_device_type;
114extern struct device_type usb_if_device_type; 100extern struct device_type usb_if_device_type;
@@ -138,23 +124,6 @@ static inline int is_usb_device_driver(struct device_driver *drv)
138 for_devices; 124 for_devices;
139} 125}
140 126
141/* Interfaces and their "power state" are owned by usbcore */
142
143static inline void mark_active(struct usb_interface *f)
144{
145 f->is_active = 1;
146}
147
148static inline void mark_quiesced(struct usb_interface *f)
149{
150 f->is_active = 0;
151}
152
153static inline int is_active(const struct usb_interface *f)
154{
155 return f->is_active;
156}
157
158 127
159/* for labeling diagnostics */ 128/* for labeling diagnostics */
160extern const char *usbcore_name; 129extern const char *usbcore_name;
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index 2958a1271b20..6e98a3697844 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -66,8 +66,6 @@ static struct ehci_dev ehci_dev;
66 66
67#define USB_DEBUG_DEVNUM 127 67#define USB_DEBUG_DEVNUM 127
68 68
69#define DBGP_DATA_TOGGLE 0x8800
70
71#ifdef DBGP_DEBUG 69#ifdef DBGP_DEBUG
72#define dbgp_printk printk 70#define dbgp_printk printk
73static void dbgp_ehci_status(char *str) 71static void dbgp_ehci_status(char *str)
@@ -88,11 +86,6 @@ static inline void dbgp_ehci_status(char *str) { }
88static inline void dbgp_printk(const char *fmt, ...) { } 86static inline void dbgp_printk(const char *fmt, ...) { }
89#endif 87#endif
90 88
91static inline u32 dbgp_pid_update(u32 x, u32 tok)
92{
93 return ((x ^ DBGP_DATA_TOGGLE) & 0xffff00) | (tok & 0xff);
94}
95
96static inline u32 dbgp_len_update(u32 x, u32 len) 89static inline u32 dbgp_len_update(u32 x, u32 len)
97{ 90{
98 return (x & ~0x0f) | (len & 0x0f); 91 return (x & ~0x0f) | (len & 0x0f);
@@ -136,6 +129,19 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
136 129
137#define DBGP_MAX_PACKET 8 130#define DBGP_MAX_PACKET 8
138#define DBGP_TIMEOUT (250 * 1000) 131#define DBGP_TIMEOUT (250 * 1000)
132#define DBGP_LOOPS 1000
133
134static inline u32 dbgp_pid_write_update(u32 x, u32 tok)
135{
136 static int data0 = USB_PID_DATA1;
137 data0 ^= USB_PID_DATA_TOGGLE;
138 return (x & 0xffff0000) | (data0 << 8) | (tok & 0xff);
139}
140
141static inline u32 dbgp_pid_read_update(u32 x, u32 tok)
142{
143 return (x & 0xffff0000) | (USB_PID_DATA0 << 8) | (tok & 0xff);
144}
139 145
140static int dbgp_wait_until_complete(void) 146static int dbgp_wait_until_complete(void)
141{ 147{
@@ -180,7 +186,7 @@ static int dbgp_wait_until_done(unsigned ctrl)
180{ 186{
181 u32 pids, lpid; 187 u32 pids, lpid;
182 int ret; 188 int ret;
183 int loop = 3; 189 int loop = DBGP_LOOPS;
184 190
185retry: 191retry:
186 writel(ctrl | DBGP_GO, &ehci_debug->control); 192 writel(ctrl | DBGP_GO, &ehci_debug->control);
@@ -197,6 +203,8 @@ retry:
197 */ 203 */
198 if (ret == -DBGP_TIMEOUT && !dbgp_not_safe) 204 if (ret == -DBGP_TIMEOUT && !dbgp_not_safe)
199 dbgp_not_safe = 1; 205 dbgp_not_safe = 1;
206 if (ret == -DBGP_ERR_BAD && --loop > 0)
207 goto retry;
200 return ret; 208 return ret;
201 } 209 }
202 210
@@ -245,12 +253,20 @@ static inline void dbgp_get_data(void *buf, int size)
245 bytes[i] = (hi >> (8*(i - 4))) & 0xff; 253 bytes[i] = (hi >> (8*(i - 4))) & 0xff;
246} 254}
247 255
248static int dbgp_out(u32 addr, const char *bytes, int size) 256static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
257 const char *bytes, int size)
249{ 258{
259 int ret;
260 u32 addr;
250 u32 pids, ctrl; 261 u32 pids, ctrl;
251 262
263 if (size > DBGP_MAX_PACKET)
264 return -1;
265
266 addr = DBGP_EPADDR(devnum, endpoint);
267
252 pids = readl(&ehci_debug->pids); 268 pids = readl(&ehci_debug->pids);
253 pids = dbgp_pid_update(pids, USB_PID_OUT); 269 pids = dbgp_pid_write_update(pids, USB_PID_OUT);
254 270
255 ctrl = readl(&ehci_debug->control); 271 ctrl = readl(&ehci_debug->control);
256 ctrl = dbgp_len_update(ctrl, size); 272 ctrl = dbgp_len_update(ctrl, size);
@@ -260,34 +276,7 @@ static int dbgp_out(u32 addr, const char *bytes, int size)
260 dbgp_set_data(bytes, size); 276 dbgp_set_data(bytes, size);
261 writel(addr, &ehci_debug->address); 277 writel(addr, &ehci_debug->address);
262 writel(pids, &ehci_debug->pids); 278 writel(pids, &ehci_debug->pids);
263 return dbgp_wait_until_done(ctrl); 279 ret = dbgp_wait_until_done(ctrl);
264}
265
266static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
267 const char *bytes, int size)
268{
269 int ret;
270 int loops = 5;
271 u32 addr;
272 if (size > DBGP_MAX_PACKET)
273 return -1;
274
275 addr = DBGP_EPADDR(devnum, endpoint);
276try_again:
277 if (loops--) {
278 ret = dbgp_out(addr, bytes, size);
279 if (ret == -DBGP_ERR_BAD) {
280 int try_loops = 3;
281 do {
282 /* Emit a dummy packet to re-sync communication
283 * with the debug device */
284 if (dbgp_out(addr, "12345678", 8) >= 0) {
285 udelay(2);
286 goto try_again;
287 }
288 } while (try_loops--);
289 }
290 }
291 280
292 return ret; 281 return ret;
293} 282}
@@ -304,7 +293,7 @@ static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
304 addr = DBGP_EPADDR(devnum, endpoint); 293 addr = DBGP_EPADDR(devnum, endpoint);
305 294
306 pids = readl(&ehci_debug->pids); 295 pids = readl(&ehci_debug->pids);
307 pids = dbgp_pid_update(pids, USB_PID_IN); 296 pids = dbgp_pid_read_update(pids, USB_PID_IN);
308 297
309 ctrl = readl(&ehci_debug->control); 298 ctrl = readl(&ehci_debug->control);
310 ctrl = dbgp_len_update(ctrl, size); 299 ctrl = dbgp_len_update(ctrl, size);
@@ -362,7 +351,6 @@ static int dbgp_control_msg(unsigned devnum, int requesttype,
362 return dbgp_bulk_read(devnum, 0, data, size); 351 return dbgp_bulk_read(devnum, 0, data, size);
363} 352}
364 353
365
366/* Find a PCI capability */ 354/* Find a PCI capability */
367static u32 __init find_cap(u32 num, u32 slot, u32 func, int cap) 355static u32 __init find_cap(u32 num, u32 slot, u32 func, int cap)
368{ 356{
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index ee411206c699..7460cd797f45 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -812,6 +812,16 @@ config USB_CDC_COMPOSITE
812 Say "y" to link the driver statically, or "m" to build a 812 Say "y" to link the driver statically, or "m" to build a
813 dynamically linked module. 813 dynamically linked module.
814 814
815config USB_G_NOKIA
816 tristate "Nokia composite gadget"
817 depends on PHONET
818 help
819 The Nokia composite gadget provides support for acm, obex
820 and phonet in only one composite gadget driver.
821
822 It's only really useful for N900 hardware. If you're building
823 a kernel for N900, say Y or M here. If unsure, say N.
824
815config USB_G_MULTI 825config USB_G_MULTI
816 tristate "Multifunction Composite Gadget (EXPERIMENTAL)" 826 tristate "Multifunction Composite Gadget (EXPERIMENTAL)"
817 depends on BLOCK && NET 827 depends on BLOCK && NET
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 2e2c047262b7..43b51da8d727 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -43,6 +43,7 @@ g_mass_storage-objs := mass_storage.o
43g_printer-objs := printer.o 43g_printer-objs := printer.o
44g_cdc-objs := cdc2.o 44g_cdc-objs := cdc2.o
45g_multi-objs := multi.o 45g_multi-objs := multi.o
46g_nokia-objs := nokia.o
46 47
47obj-$(CONFIG_USB_ZERO) += g_zero.o 48obj-$(CONFIG_USB_ZERO) += g_zero.o
48obj-$(CONFIG_USB_AUDIO) += g_audio.o 49obj-$(CONFIG_USB_AUDIO) += g_audio.o
@@ -55,4 +56,5 @@ obj-$(CONFIG_USB_G_PRINTER) += g_printer.o
55obj-$(CONFIG_USB_MIDI_GADGET) += g_midi.o 56obj-$(CONFIG_USB_MIDI_GADGET) += g_midi.o
56obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc.o 57obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc.o
57obj-$(CONFIG_USB_G_MULTI) += g_multi.o 58obj-$(CONFIG_USB_G_MULTI) += g_multi.o
59obj-$(CONFIG_USB_G_NOKIA) += g_nokia.o
58 60
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 043e04db2a05..12ac9cd32a07 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1656,9 +1656,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
1656 if (!res) 1656 if (!res)
1657 return -ENXIO; 1657 return -ENXIO;
1658 1658
1659 if (!request_mem_region(res->start, 1659 if (!request_mem_region(res->start, resource_size(res), driver_name)) {
1660 res->end - res->start + 1,
1661 driver_name)) {
1662 DBG("someone's using UDC memory\n"); 1660 DBG("someone's using UDC memory\n");
1663 return -EBUSY; 1661 return -EBUSY;
1664 } 1662 }
@@ -1699,7 +1697,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
1699 udc->ep[3].maxpacket = 64; 1697 udc->ep[3].maxpacket = 64;
1700 } 1698 }
1701 1699
1702 udc->udp_baseaddr = ioremap(res->start, res->end - res->start + 1); 1700 udc->udp_baseaddr = ioremap(res->start, resource_size(res));
1703 if (!udc->udp_baseaddr) { 1701 if (!udc->udp_baseaddr) {
1704 retval = -ENOMEM; 1702 retval = -ENOMEM;
1705 goto fail0a; 1703 goto fail0a;
@@ -1781,7 +1779,7 @@ fail0a:
1781 if (cpu_is_at91rm9200()) 1779 if (cpu_is_at91rm9200())
1782 gpio_free(udc->board.pullup_pin); 1780 gpio_free(udc->board.pullup_pin);
1783fail0: 1781fail0:
1784 release_mem_region(res->start, res->end - res->start + 1); 1782 release_mem_region(res->start, resource_size(res));
1785 DBG("%s probe failed, %d\n", driver_name, retval); 1783 DBG("%s probe failed, %d\n", driver_name, retval);
1786 return retval; 1784 return retval;
1787} 1785}
@@ -1813,7 +1811,7 @@ static int __exit at91udc_remove(struct platform_device *pdev)
1813 gpio_free(udc->board.pullup_pin); 1811 gpio_free(udc->board.pullup_pin);
1814 1812
1815 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1813 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1816 release_mem_region(res->start, res->end - res->start + 1); 1814 release_mem_region(res->start, resource_size(res));
1817 1815
1818 clk_put(udc->iclk); 1816 clk_put(udc->iclk);
1819 clk_put(udc->fclk); 1817 clk_put(udc->fclk);
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 4e970cf0e29a..f79bdfe4bed9 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -320,7 +320,7 @@ static inline void usba_cleanup_debugfs(struct usba_udc *udc)
320static int vbus_is_present(struct usba_udc *udc) 320static int vbus_is_present(struct usba_udc *udc)
321{ 321{
322 if (gpio_is_valid(udc->vbus_pin)) 322 if (gpio_is_valid(udc->vbus_pin))
323 return gpio_get_value(udc->vbus_pin); 323 return gpio_get_value(udc->vbus_pin) ^ udc->vbus_pin_inverted;
324 324
325 /* No Vbus detection: Assume always present */ 325 /* No Vbus detection: Assume always present */
326 return 1; 326 return 1;
@@ -1763,7 +1763,7 @@ static irqreturn_t usba_vbus_irq(int irq, void *devid)
1763 if (!udc->driver) 1763 if (!udc->driver)
1764 goto out; 1764 goto out;
1765 1765
1766 vbus = gpio_get_value(udc->vbus_pin); 1766 vbus = vbus_is_present(udc);
1767 if (vbus != udc->vbus_prev) { 1767 if (vbus != udc->vbus_prev) {
1768 if (vbus) { 1768 if (vbus) {
1769 toggle_bias(1); 1769 toggle_bias(1);
@@ -1914,14 +1914,14 @@ static int __init usba_udc_probe(struct platform_device *pdev)
1914 udc->vbus_pin = -ENODEV; 1914 udc->vbus_pin = -ENODEV;
1915 1915
1916 ret = -ENOMEM; 1916 ret = -ENOMEM;
1917 udc->regs = ioremap(regs->start, regs->end - regs->start + 1); 1917 udc->regs = ioremap(regs->start, resource_size(regs));
1918 if (!udc->regs) { 1918 if (!udc->regs) {
1919 dev_err(&pdev->dev, "Unable to map I/O memory, aborting.\n"); 1919 dev_err(&pdev->dev, "Unable to map I/O memory, aborting.\n");
1920 goto err_map_regs; 1920 goto err_map_regs;
1921 } 1921 }
1922 dev_info(&pdev->dev, "MMIO registers at 0x%08lx mapped at %p\n", 1922 dev_info(&pdev->dev, "MMIO registers at 0x%08lx mapped at %p\n",
1923 (unsigned long)regs->start, udc->regs); 1923 (unsigned long)regs->start, udc->regs);
1924 udc->fifo = ioremap(fifo->start, fifo->end - fifo->start + 1); 1924 udc->fifo = ioremap(fifo->start, resource_size(fifo));
1925 if (!udc->fifo) { 1925 if (!udc->fifo) {
1926 dev_err(&pdev->dev, "Unable to map FIFO, aborting.\n"); 1926 dev_err(&pdev->dev, "Unable to map FIFO, aborting.\n");
1927 goto err_map_fifo; 1927 goto err_map_fifo;
@@ -2000,6 +2000,7 @@ static int __init usba_udc_probe(struct platform_device *pdev)
2000 if (gpio_is_valid(pdata->vbus_pin)) { 2000 if (gpio_is_valid(pdata->vbus_pin)) {
2001 if (!gpio_request(pdata->vbus_pin, "atmel_usba_udc")) { 2001 if (!gpio_request(pdata->vbus_pin, "atmel_usba_udc")) {
2002 udc->vbus_pin = pdata->vbus_pin; 2002 udc->vbus_pin = pdata->vbus_pin;
2003 udc->vbus_pin_inverted = pdata->vbus_pin_inverted;
2003 2004
2004 ret = request_irq(gpio_to_irq(udc->vbus_pin), 2005 ret = request_irq(gpio_to_irq(udc->vbus_pin),
2005 usba_vbus_irq, 0, 2006 usba_vbus_irq, 0,
diff --git a/drivers/usb/gadget/atmel_usba_udc.h b/drivers/usb/gadget/atmel_usba_udc.h
index f7baea307f0d..88a2e07a11a8 100644
--- a/drivers/usb/gadget/atmel_usba_udc.h
+++ b/drivers/usb/gadget/atmel_usba_udc.h
@@ -323,6 +323,7 @@ struct usba_udc {
323 struct platform_device *pdev; 323 struct platform_device *pdev;
324 int irq; 324 int irq;
325 int vbus_pin; 325 int vbus_pin;
326 int vbus_pin_inverted;
326 struct clk *pclk; 327 struct clk *pclk;
327 struct clk *hclk; 328 struct clk *hclk;
328 329
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index cd0914ec898e..65a5f94cbc04 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -265,16 +265,24 @@ struct usb_ep * __init usb_ep_autoconfig (
265 return ep; 265 return ep;
266 } 266 }
267 267
268 } else if (gadget_is_sh (gadget) && USB_ENDPOINT_XFER_INT == type) { 268#ifdef CONFIG_BLACKFIN
269 /* single buffering is enough; maybe 8 byte fifo is too */ 269 } else if (gadget_is_musbhsfc(gadget) || gadget_is_musbhdrc(gadget)) {
270 ep = find_ep (gadget, "ep3in-bulk"); 270 if ((USB_ENDPOINT_XFER_BULK == type) ||
271 if (ep && ep_matches (gadget, ep, desc)) 271 (USB_ENDPOINT_XFER_ISOC == type)) {
272 return ep; 272 if (USB_DIR_IN & desc->bEndpointAddress)
273 273 ep = find_ep (gadget, "ep5in");
274 } else if (gadget_is_mq11xx (gadget) && USB_ENDPOINT_XFER_INT == type) { 274 else
275 ep = find_ep (gadget, "ep1-bulk"); 275 ep = find_ep (gadget, "ep6out");
276 } else if (USB_ENDPOINT_XFER_INT == type) {
277 if (USB_DIR_IN & desc->bEndpointAddress)
278 ep = find_ep(gadget, "ep1in");
279 else
280 ep = find_ep(gadget, "ep2out");
281 } else
282 ep = NULL;
276 if (ep && ep_matches (gadget, ep, desc)) 283 if (ep && ep_matches (gadget, ep, desc))
277 return ep; 284 return ep;
285#endif
278 } 286 }
279 287
280 /* Second, look at endpoints until an unclaimed one looks usable */ 288 /* Second, look at endpoints until an unclaimed one looks usable */
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 141372b6e7a1..400f80372d93 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -259,7 +259,7 @@ static struct usb_configuration rndis_config_driver = {
259 259
260/*-------------------------------------------------------------------------*/ 260/*-------------------------------------------------------------------------*/
261 261
262#ifdef USB_ETH_EEM 262#ifdef CONFIG_USB_ETH_EEM
263static int use_eem = 1; 263static int use_eem = 1;
264#else 264#else
265static int use_eem; 265static int use_eem;
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index d10353d46b86..e49c7325dce2 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -702,14 +702,6 @@ acm_unbind(struct usb_configuration *c, struct usb_function *f)
702/* Some controllers can't support CDC ACM ... */ 702/* Some controllers can't support CDC ACM ... */
703static inline bool can_support_cdc(struct usb_configuration *c) 703static inline bool can_support_cdc(struct usb_configuration *c)
704{ 704{
705 /* SH3 doesn't support multiple interfaces */
706 if (gadget_is_sh(c->cdev->gadget))
707 return false;
708
709 /* sa1100 doesn't have a third interrupt endpoint */
710 if (gadget_is_sa1100(c->cdev->gadget))
711 return false;
712
713 /* everything else is *probably* fine ... */ 705 /* everything else is *probably* fine ... */
714 return true; 706 return true;
715} 707}
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index ecf5bdd0ae06..2fff530efc19 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -497,12 +497,9 @@ static int ecm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
497 struct net_device *net; 497 struct net_device *net;
498 498
499 /* Enable zlps by default for ECM conformance; 499 /* Enable zlps by default for ECM conformance;
500 * override for musb_hdrc (avoids txdma ovhead) 500 * override for musb_hdrc (avoids txdma ovhead).
501 * and sa1100 (can't).
502 */ 501 */
503 ecm->port.is_zlp_ok = !( 502 ecm->port.is_zlp_ok = !(gadget_is_musbhdrc(cdev->gadget)
504 gadget_is_sa1100(cdev->gadget)
505 || gadget_is_musbhdrc(cdev->gadget)
506 ); 503 );
507 ecm->port.cdc_filter = DEFAULT_FILTER; 504 ecm->port.cdc_filter = DEFAULT_FILTER;
508 DBG(cdev, "activate ecm\n"); 505 DBG(cdev, "activate ecm\n");
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index a37640eba434..b1935fe156a0 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -368,7 +368,7 @@ struct fsg_common {
368 struct task_struct *thread_task; 368 struct task_struct *thread_task;
369 369
370 /* Callback function to call when thread exits. */ 370 /* Callback function to call when thread exits. */
371 void (*thread_exits)(struct fsg_common *common); 371 int (*thread_exits)(struct fsg_common *common);
372 /* Gadget's private data. */ 372 /* Gadget's private data. */
373 void *private_data; 373 void *private_data;
374 374
@@ -392,8 +392,12 @@ struct fsg_config {
392 const char *lun_name_format; 392 const char *lun_name_format;
393 const char *thread_name; 393 const char *thread_name;
394 394
395 /* Callback function to call when thread exits. */ 395 /* Callback function to call when thread exits. If no
396 void (*thread_exits)(struct fsg_common *common); 396 * callback is set or it returns value lower then zero MSF
397 * will force eject all LUNs it operates on (including those
398 * marked as non-removable or with prevent_medium_removal flag
399 * set). */
400 int (*thread_exits)(struct fsg_common *common);
397 /* Gadget's private data. */ 401 /* Gadget's private data. */
398 void *private_data; 402 void *private_data;
399 403
@@ -614,7 +618,12 @@ static int fsg_setup(struct usb_function *f,
614 return -EDOM; 618 return -EDOM;
615 VDBG(fsg, "get max LUN\n"); 619 VDBG(fsg, "get max LUN\n");
616 *(u8 *) req->buf = fsg->common->nluns - 1; 620 *(u8 *) req->buf = fsg->common->nluns - 1;
617 return 1; 621
622 /* Respond with data/status */
623 req->length = min((u16)1, w_length);
624 fsg->common->ep0req_name =
625 ctrl->bRequestType & USB_DIR_IN ? "ep0-in" : "ep0-out";
626 return ep0_queue(fsg->common);
618 } 627 }
619 628
620 VDBG(fsg, 629 VDBG(fsg,
@@ -2524,14 +2533,6 @@ static void handle_exception(struct fsg_common *common)
2524 2533
2525 case FSG_STATE_CONFIG_CHANGE: 2534 case FSG_STATE_CONFIG_CHANGE:
2526 rc = do_set_config(common, new_config); 2535 rc = do_set_config(common, new_config);
2527 if (common->ep0_req_tag != exception_req_tag)
2528 break;
2529 if (rc != 0) { /* STALL on errors */
2530 DBG(common, "ep0 set halt\n");
2531 usb_ep_set_halt(common->ep0);
2532 } else { /* Complete the status stage */
2533 ep0_queue(common);
2534 }
2535 break; 2536 break;
2536 2537
2537 case FSG_STATE_EXIT: 2538 case FSG_STATE_EXIT:
@@ -2615,8 +2616,20 @@ static int fsg_main_thread(void *common_)
2615 common->thread_task = NULL; 2616 common->thread_task = NULL;
2616 spin_unlock_irq(&common->lock); 2617 spin_unlock_irq(&common->lock);
2617 2618
2618 if (common->thread_exits) 2619 if (!common->thread_exits || common->thread_exits(common) < 0) {
2619 common->thread_exits(common); 2620 struct fsg_lun *curlun = common->luns;
2621 unsigned i = common->nluns;
2622
2623 down_write(&common->filesem);
2624 for (; i--; ++curlun) {
2625 if (!fsg_lun_is_open(curlun))
2626 continue;
2627
2628 fsg_lun_close(curlun);
2629 curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
2630 }
2631 up_write(&common->filesem);
2632 }
2620 2633
2621 /* Let the unbind and cleanup routines know the thread has exited */ 2634 /* Let the unbind and cleanup routines know the thread has exited */
2622 complete_and_exit(&common->thread_notifier, 0); 2635 complete_and_exit(&common->thread_notifier, 0);
@@ -2763,10 +2776,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
2763 if (cfg->release != 0xffff) { 2776 if (cfg->release != 0xffff) {
2764 i = cfg->release; 2777 i = cfg->release;
2765 } else { 2778 } else {
2766 /* The sa1100 controller is not supported */ 2779 i = usb_gadget_controller_number(gadget);
2767 i = gadget_is_sa1100(gadget)
2768 ? -1
2769 : usb_gadget_controller_number(gadget);
2770 if (i >= 0) { 2780 if (i >= 0) {
2771 i = 0x0300 + i; 2781 i = 0x0300 + i;
2772 } else { 2782 } else {
@@ -2791,8 +2801,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
2791 * disable stalls. 2801 * disable stalls.
2792 */ 2802 */
2793 common->can_stall = cfg->can_stall && 2803 common->can_stall = cfg->can_stall &&
2794 !(gadget_is_sh(common->gadget) || 2804 !(gadget_is_at91(common->gadget));
2795 gadget_is_at91(common->gadget));
2796 2805
2797 2806
2798 spin_lock_init(&common->lock); 2807 spin_lock_init(&common->lock);
@@ -2852,7 +2861,6 @@ error_release:
2852 /* Call fsg_common_release() directly, ref might be not 2861 /* Call fsg_common_release() directly, ref might be not
2853 * initialised */ 2862 * initialised */
2854 fsg_common_release(&common->ref); 2863 fsg_common_release(&common->ref);
2855 complete(&common->thread_notifier);
2856 return ERR_PTR(rc); 2864 return ERR_PTR(rc);
2857} 2865}
2858 2866
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 95dae4c1ea40..a30e60c7f129 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -769,10 +769,6 @@ rndis_unbind(struct usb_configuration *c, struct usb_function *f)
769/* Some controllers can't support RNDIS ... */ 769/* Some controllers can't support RNDIS ... */
770static inline bool can_support_rndis(struct usb_configuration *c) 770static inline bool can_support_rndis(struct usb_configuration *c)
771{ 771{
772 /* only two endpoints on sa1100 */
773 if (gadget_is_sa1100(c->cdev->gadget))
774 return false;
775
776 /* everything else is *presumably* fine */ 772 /* everything else is *presumably* fine */
777 return true; 773 return true;
778} 774}
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 29dfb0277ffb..a90dd2db0488 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -3208,15 +3208,11 @@ static int __init check_parameters(struct fsg_dev *fsg)
3208 * halt bulk endpoints correctly. If one of them is present, 3208 * halt bulk endpoints correctly. If one of them is present,
3209 * disable stalls. 3209 * disable stalls.
3210 */ 3210 */
3211 if (gadget_is_sh(fsg->gadget) || gadget_is_at91(fsg->gadget)) 3211 if (gadget_is_at91(fsg->gadget))
3212 mod_data.can_stall = 0; 3212 mod_data.can_stall = 0;
3213 3213
3214 if (mod_data.release == 0xffff) { // Parameter wasn't set 3214 if (mod_data.release == 0xffff) { // Parameter wasn't set
3215 /* The sa1100 controller is not supported */ 3215 gcnum = usb_gadget_controller_number(fsg->gadget);
3216 if (gadget_is_sa1100(fsg->gadget))
3217 gcnum = -1;
3218 else
3219 gcnum = usb_gadget_controller_number(fsg->gadget);
3220 if (gcnum >= 0) 3216 if (gcnum >= 0)
3221 mod_data.release = 0x0300 + gcnum; 3217 mod_data.release = 0x0300 + gcnum;
3222 else { 3218 else {
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 7881f12413c4..3537d51073b2 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2749,7 +2749,7 @@ static int __devexit qe_udc_remove(struct of_device *ofdev)
2749} 2749}
2750 2750
2751/*-------------------------------------------------------------------------*/ 2751/*-------------------------------------------------------------------------*/
2752static struct of_device_id __devinitdata qe_udc_match[] = { 2752static const struct of_device_id qe_udc_match[] __devinitconst = {
2753 { 2753 {
2754 .compatible = "fsl,mpc8323-qe-usb", 2754 .compatible = "fsl,mpc8323-qe-usb",
2755 .data = (void *)PORT_QE, 2755 .data = (void *)PORT_QE,
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index f2d270b202f2..1edbc12fff18 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -45,46 +45,18 @@
45#define gadget_is_goku(g) 0 45#define gadget_is_goku(g) 0
46#endif 46#endif
47 47
48/* SH3 UDC -- not yet ported 2.4 --> 2.6 */
49#ifdef CONFIG_USB_GADGET_SUPERH
50#define gadget_is_sh(g) !strcmp("sh_udc", (g)->name)
51#else
52#define gadget_is_sh(g) 0
53#endif
54
55/* not yet stable on 2.6 (would help "original Zaurus") */
56#ifdef CONFIG_USB_GADGET_SA1100
57#define gadget_is_sa1100(g) !strcmp("sa1100_udc", (g)->name)
58#else
59#define gadget_is_sa1100(g) 0
60#endif
61
62#ifdef CONFIG_USB_GADGET_LH7A40X 48#ifdef CONFIG_USB_GADGET_LH7A40X
63#define gadget_is_lh7a40x(g) !strcmp("lh7a40x_udc", (g)->name) 49#define gadget_is_lh7a40x(g) !strcmp("lh7a40x_udc", (g)->name)
64#else 50#else
65#define gadget_is_lh7a40x(g) 0 51#define gadget_is_lh7a40x(g) 0
66#endif 52#endif
67 53
68/* handhelds.org tree (?) */
69#ifdef CONFIG_USB_GADGET_MQ11XX
70#define gadget_is_mq11xx(g) !strcmp("mq11xx_udc", (g)->name)
71#else
72#define gadget_is_mq11xx(g) 0
73#endif
74
75#ifdef CONFIG_USB_GADGET_OMAP 54#ifdef CONFIG_USB_GADGET_OMAP
76#define gadget_is_omap(g) !strcmp("omap_udc", (g)->name) 55#define gadget_is_omap(g) !strcmp("omap_udc", (g)->name)
77#else 56#else
78#define gadget_is_omap(g) 0 57#define gadget_is_omap(g) 0
79#endif 58#endif
80 59
81/* not yet ported 2.4 --> 2.6 */
82#ifdef CONFIG_USB_GADGET_N9604
83#define gadget_is_n9604(g) !strcmp("n9604_udc", (g)->name)
84#else
85#define gadget_is_n9604(g) 0
86#endif
87
88/* various unstable versions available */ 60/* various unstable versions available */
89#ifdef CONFIG_USB_GADGET_PXA27X 61#ifdef CONFIG_USB_GADGET_PXA27X
90#define gadget_is_pxa27x(g) !strcmp("pxa27x_udc", (g)->name) 62#define gadget_is_pxa27x(g) !strcmp("pxa27x_udc", (g)->name)
@@ -122,14 +94,6 @@
122#define gadget_is_fsl_usb2(g) 0 94#define gadget_is_fsl_usb2(g) 0
123#endif 95#endif
124 96
125/* Mentor high speed function controller */
126/* from Montavista kernel (?) */
127#ifdef CONFIG_USB_GADGET_MUSBHSFC
128#define gadget_is_musbhsfc(g) !strcmp("musbhsfc_udc", (g)->name)
129#else
130#define gadget_is_musbhsfc(g) 0
131#endif
132
133/* Mentor high speed "dual role" controller, in peripheral role */ 97/* Mentor high speed "dual role" controller, in peripheral role */
134#ifdef CONFIG_USB_GADGET_MUSB_HDRC 98#ifdef CONFIG_USB_GADGET_MUSB_HDRC
135#define gadget_is_musbhdrc(g) !strcmp("musb_hdrc", (g)->name) 99#define gadget_is_musbhdrc(g) !strcmp("musb_hdrc", (g)->name)
@@ -143,13 +107,6 @@
143#define gadget_is_langwell(g) 0 107#define gadget_is_langwell(g) 0
144#endif 108#endif
145 109
146/* from Montavista kernel (?) */
147#ifdef CONFIG_USB_GADGET_MPC8272
148#define gadget_is_mpc8272(g) !strcmp("mpc8272_udc", (g)->name)
149#else
150#define gadget_is_mpc8272(g) 0
151#endif
152
153#ifdef CONFIG_USB_GADGET_M66592 110#ifdef CONFIG_USB_GADGET_M66592
154#define gadget_is_m66592(g) !strcmp("m66592_udc", (g)->name) 111#define gadget_is_m66592(g) !strcmp("m66592_udc", (g)->name)
155#else 112#else
@@ -203,20 +160,12 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
203 return 0x02; 160 return 0x02;
204 else if (gadget_is_pxa(gadget)) 161 else if (gadget_is_pxa(gadget))
205 return 0x03; 162 return 0x03;
206 else if (gadget_is_sh(gadget))
207 return 0x04;
208 else if (gadget_is_sa1100(gadget))
209 return 0x05;
210 else if (gadget_is_goku(gadget)) 163 else if (gadget_is_goku(gadget))
211 return 0x06; 164 return 0x06;
212 else if (gadget_is_mq11xx(gadget))
213 return 0x07;
214 else if (gadget_is_omap(gadget)) 165 else if (gadget_is_omap(gadget))
215 return 0x08; 166 return 0x08;
216 else if (gadget_is_lh7a40x(gadget)) 167 else if (gadget_is_lh7a40x(gadget))
217 return 0x09; 168 return 0x09;
218 else if (gadget_is_n9604(gadget))
219 return 0x10;
220 else if (gadget_is_pxa27x(gadget)) 169 else if (gadget_is_pxa27x(gadget))
221 return 0x11; 170 return 0x11;
222 else if (gadget_is_s3c2410(gadget)) 171 else if (gadget_is_s3c2410(gadget))
@@ -225,12 +174,8 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
225 return 0x13; 174 return 0x13;
226 else if (gadget_is_imx(gadget)) 175 else if (gadget_is_imx(gadget))
227 return 0x14; 176 return 0x14;
228 else if (gadget_is_musbhsfc(gadget))
229 return 0x15;
230 else if (gadget_is_musbhdrc(gadget)) 177 else if (gadget_is_musbhdrc(gadget))
231 return 0x16; 178 return 0x16;
232 else if (gadget_is_mpc8272(gadget))
233 return 0x17;
234 else if (gadget_is_atmel_usba(gadget)) 179 else if (gadget_is_atmel_usba(gadget))
235 return 0x18; 180 return 0x18;
236 else if (gadget_is_fsl_usb2(gadget)) 181 else if (gadget_is_fsl_usb2(gadget))
@@ -265,10 +210,6 @@ static inline bool gadget_supports_altsettings(struct usb_gadget *gadget)
265 if (gadget_is_pxa27x(gadget)) 210 if (gadget_is_pxa27x(gadget))
266 return false; 211 return false;
267 212
268 /* SH3 hardware just doesn't do altsettings */
269 if (gadget_is_sh(gadget))
270 return false;
271
272 /* Everything else is *presumably* fine ... */ 213 /* Everything else is *presumably* fine ... */
273 return true; 214 return true;
274} 215}
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index 5f6a2e0a9357..04f6224b7e06 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -618,11 +618,6 @@ gmidi_set_config(struct gmidi_device *dev, unsigned number, gfp_t gfp_flags)
618 } 618 }
619#endif 619#endif
620 620
621 if (gadget_is_sa1100(gadget) && dev->config) {
622 /* tx fifo is full, but we can't clear it...*/
623 ERROR(dev, "can't change configurations\n");
624 return -ESPIPE;
625 }
626 gmidi_reset_config(dev); 621 gmidi_reset_config(dev);
627 622
628 switch (number) { 623 switch (number) {
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 112bb40a427c..e8edc640381e 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1859,7 +1859,7 @@ done:
1859 1859
1860/*-------------------------------------------------------------------------*/ 1860/*-------------------------------------------------------------------------*/
1861 1861
1862static struct pci_device_id pci_ids [] = { { 1862static const struct pci_device_id pci_ids[] = { {
1863 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), 1863 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
1864 .class_mask = ~0, 1864 .class_mask = ~0,
1865 .vendor = 0x102f, /* Toshiba */ 1865 .vendor = 0x102f, /* Toshiba */
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index bf0f6520c6df..de8a83803505 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -194,7 +194,7 @@ enum ep_state {
194}; 194};
195 195
196struct ep_data { 196struct ep_data {
197 struct semaphore lock; 197 struct mutex lock;
198 enum ep_state state; 198 enum ep_state state;
199 atomic_t count; 199 atomic_t count;
200 struct dev_data *dev; 200 struct dev_data *dev;
@@ -298,10 +298,10 @@ get_ready_ep (unsigned f_flags, struct ep_data *epdata)
298 int val; 298 int val;
299 299
300 if (f_flags & O_NONBLOCK) { 300 if (f_flags & O_NONBLOCK) {
301 if (down_trylock (&epdata->lock) != 0) 301 if (!mutex_trylock(&epdata->lock))
302 goto nonblock; 302 goto nonblock;
303 if (epdata->state != STATE_EP_ENABLED) { 303 if (epdata->state != STATE_EP_ENABLED) {
304 up (&epdata->lock); 304 mutex_unlock(&epdata->lock);
305nonblock: 305nonblock:
306 val = -EAGAIN; 306 val = -EAGAIN;
307 } else 307 } else
@@ -309,7 +309,8 @@ nonblock:
309 return val; 309 return val;
310 } 310 }
311 311
312 if ((val = down_interruptible (&epdata->lock)) < 0) 312 val = mutex_lock_interruptible(&epdata->lock);
313 if (val < 0)
313 return val; 314 return val;
314 315
315 switch (epdata->state) { 316 switch (epdata->state) {
@@ -323,7 +324,7 @@ nonblock:
323 // FALLTHROUGH 324 // FALLTHROUGH
324 case STATE_EP_UNBOUND: /* clean disconnect */ 325 case STATE_EP_UNBOUND: /* clean disconnect */
325 val = -ENODEV; 326 val = -ENODEV;
326 up (&epdata->lock); 327 mutex_unlock(&epdata->lock);
327 } 328 }
328 return val; 329 return val;
329} 330}
@@ -393,7 +394,7 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
393 if (likely (data->ep != NULL)) 394 if (likely (data->ep != NULL))
394 usb_ep_set_halt (data->ep); 395 usb_ep_set_halt (data->ep);
395 spin_unlock_irq (&data->dev->lock); 396 spin_unlock_irq (&data->dev->lock);
396 up (&data->lock); 397 mutex_unlock(&data->lock);
397 return -EBADMSG; 398 return -EBADMSG;
398 } 399 }
399 400
@@ -411,7 +412,7 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
411 value = -EFAULT; 412 value = -EFAULT;
412 413
413free1: 414free1:
414 up (&data->lock); 415 mutex_unlock(&data->lock);
415 kfree (kbuf); 416 kfree (kbuf);
416 return value; 417 return value;
417} 418}
@@ -436,7 +437,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
436 if (likely (data->ep != NULL)) 437 if (likely (data->ep != NULL))
437 usb_ep_set_halt (data->ep); 438 usb_ep_set_halt (data->ep);
438 spin_unlock_irq (&data->dev->lock); 439 spin_unlock_irq (&data->dev->lock);
439 up (&data->lock); 440 mutex_unlock(&data->lock);
440 return -EBADMSG; 441 return -EBADMSG;
441 } 442 }
442 443
@@ -455,7 +456,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
455 VDEBUG (data->dev, "%s write %zu IN, status %d\n", 456 VDEBUG (data->dev, "%s write %zu IN, status %d\n",
456 data->name, len, (int) value); 457 data->name, len, (int) value);
457free1: 458free1:
458 up (&data->lock); 459 mutex_unlock(&data->lock);
459 kfree (kbuf); 460 kfree (kbuf);
460 return value; 461 return value;
461} 462}
@@ -466,7 +467,8 @@ ep_release (struct inode *inode, struct file *fd)
466 struct ep_data *data = fd->private_data; 467 struct ep_data *data = fd->private_data;
467 int value; 468 int value;
468 469
469 if ((value = down_interruptible(&data->lock)) < 0) 470 value = mutex_lock_interruptible(&data->lock);
471 if (value < 0)
470 return value; 472 return value;
471 473
472 /* clean up if this can be reopened */ 474 /* clean up if this can be reopened */
@@ -476,7 +478,7 @@ ep_release (struct inode *inode, struct file *fd)
476 data->hs_desc.bDescriptorType = 0; 478 data->hs_desc.bDescriptorType = 0;
477 usb_ep_disable(data->ep); 479 usb_ep_disable(data->ep);
478 } 480 }
479 up (&data->lock); 481 mutex_unlock(&data->lock);
480 put_ep (data); 482 put_ep (data);
481 return 0; 483 return 0;
482} 484}
@@ -507,7 +509,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
507 } else 509 } else
508 status = -ENODEV; 510 status = -ENODEV;
509 spin_unlock_irq (&data->dev->lock); 511 spin_unlock_irq (&data->dev->lock);
510 up (&data->lock); 512 mutex_unlock(&data->lock);
511 return status; 513 return status;
512} 514}
513 515
@@ -673,7 +675,7 @@ fail:
673 value = -ENODEV; 675 value = -ENODEV;
674 spin_unlock_irq(&epdata->dev->lock); 676 spin_unlock_irq(&epdata->dev->lock);
675 677
676 up(&epdata->lock); 678 mutex_unlock(&epdata->lock);
677 679
678 if (unlikely(value)) { 680 if (unlikely(value)) {
679 kfree(priv); 681 kfree(priv);
@@ -765,7 +767,8 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
765 u32 tag; 767 u32 tag;
766 int value, length = len; 768 int value, length = len;
767 769
768 if ((value = down_interruptible (&data->lock)) < 0) 770 value = mutex_lock_interruptible(&data->lock);
771 if (value < 0)
769 return value; 772 return value;
770 773
771 if (data->state != STATE_EP_READY) { 774 if (data->state != STATE_EP_READY) {
@@ -854,7 +857,7 @@ fail:
854 data->desc.bDescriptorType = 0; 857 data->desc.bDescriptorType = 0;
855 data->hs_desc.bDescriptorType = 0; 858 data->hs_desc.bDescriptorType = 0;
856 } 859 }
857 up (&data->lock); 860 mutex_unlock(&data->lock);
858 return value; 861 return value;
859fail0: 862fail0:
860 value = -EINVAL; 863 value = -EINVAL;
@@ -870,7 +873,7 @@ ep_open (struct inode *inode, struct file *fd)
870 struct ep_data *data = inode->i_private; 873 struct ep_data *data = inode->i_private;
871 int value = -EBUSY; 874 int value = -EBUSY;
872 875
873 if (down_interruptible (&data->lock) != 0) 876 if (mutex_lock_interruptible(&data->lock) != 0)
874 return -EINTR; 877 return -EINTR;
875 spin_lock_irq (&data->dev->lock); 878 spin_lock_irq (&data->dev->lock);
876 if (data->dev->state == STATE_DEV_UNBOUND) 879 if (data->dev->state == STATE_DEV_UNBOUND)
@@ -885,7 +888,7 @@ ep_open (struct inode *inode, struct file *fd)
885 DBG (data->dev, "%s state %d\n", 888 DBG (data->dev, "%s state %d\n",
886 data->name, data->state); 889 data->name, data->state);
887 spin_unlock_irq (&data->dev->lock); 890 spin_unlock_irq (&data->dev->lock);
888 up (&data->lock); 891 mutex_unlock(&data->lock);
889 return value; 892 return value;
890} 893}
891 894
@@ -1631,7 +1634,7 @@ static int activate_ep_files (struct dev_data *dev)
1631 if (!data) 1634 if (!data)
1632 goto enomem0; 1635 goto enomem0;
1633 data->state = STATE_EP_DISABLED; 1636 data->state = STATE_EP_DISABLED;
1634 init_MUTEX (&data->lock); 1637 mutex_init(&data->lock);
1635 init_waitqueue_head (&data->wait); 1638 init_waitqueue_head (&data->wait);
1636 1639
1637 strncpy (data->name, ep->name, sizeof (data->name) - 1); 1640 strncpy (data->name, ep->name, sizeof (data->name) - 1);
diff --git a/drivers/usb/gadget/mass_storage.c b/drivers/usb/gadget/mass_storage.c
index 19619fbf20ac..705cc1f76327 100644
--- a/drivers/usb/gadget/mass_storage.c
+++ b/drivers/usb/gadget/mass_storage.c
@@ -135,6 +135,12 @@ FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
135static unsigned long msg_registered = 0; 135static unsigned long msg_registered = 0;
136static void msg_cleanup(void); 136static void msg_cleanup(void);
137 137
138static int msg_thread_exits(struct fsg_common *common)
139{
140 msg_cleanup();
141 return 0;
142}
143
138static int __init msg_do_config(struct usb_configuration *c) 144static int __init msg_do_config(struct usb_configuration *c)
139{ 145{
140 struct fsg_common *common; 146 struct fsg_common *common;
@@ -147,7 +153,7 @@ static int __init msg_do_config(struct usb_configuration *c)
147 } 153 }
148 154
149 fsg_config_from_params(&config, &mod_data); 155 fsg_config_from_params(&config, &mod_data);
150 config.thread_exits = (void(*)(struct fsg_common*))&msg_cleanup; 156 config.thread_exits = msg_thread_exits;
151 common = fsg_common_init(0, c->cdev, &config); 157 common = fsg_common_init(0, c->cdev, &config);
152 if (IS_ERR(common)) 158 if (IS_ERR(common))
153 return PTR_ERR(common); 159 return PTR_ERR(common);
diff --git a/drivers/usb/gadget/nokia.c b/drivers/usb/gadget/nokia.c
new file mode 100644
index 000000000000..7d6b66a85724
--- /dev/null
+++ b/drivers/usb/gadget/nokia.c
@@ -0,0 +1,259 @@
1/*
2 * nokia.c -- Nokia Composite Gadget Driver
3 *
4 * Copyright (C) 2008-2010 Nokia Corporation
5 * Contact: Felipe Balbi <felipe.balbi@nokia.com>
6 *
7 * This gadget driver borrows from serial.c which is:
8 *
9 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
10 * Copyright (C) 2008 by David Brownell
11 * Copyright (C) 2008 by Nokia Corporation
12 *
13 * This software is distributed under the terms of the GNU General
14 * Public License ("GPL") as published by the Free Software Foundation,
15 * version 2 of that License.
16 */
17
18#include <linux/kernel.h>
19#include <linux/utsname.h>
20#include <linux/device.h>
21
22#include "u_serial.h"
23#include "u_ether.h"
24#include "u_phonet.h"
25#include "gadget_chips.h"
26
27/* Defines */
28
29#define NOKIA_VERSION_NUM 0x0211
30#define NOKIA_LONG_NAME "N900 (PC-Suite Mode)"
31
32/*-------------------------------------------------------------------------*/
33
34/*
35 * Kbuild is not very cooperative with respect to linking separately
36 * compiled library objects into one module. So for now we won't use
37 * separate compilation ... ensuring init/exit sections work to shrink
38 * the runtime footprint, and giving us at least some parts of what
39 * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
40 */
41#include "composite.c"
42#include "usbstring.c"
43#include "config.c"
44#include "epautoconf.c"
45
46#include "u_serial.c"
47#include "f_acm.c"
48#include "f_ecm.c"
49#include "f_obex.c"
50#include "f_serial.c"
51#include "f_phonet.c"
52#include "u_ether.c"
53
54/*-------------------------------------------------------------------------*/
55
56#define NOKIA_VENDOR_ID 0x0421 /* Nokia */
57#define NOKIA_PRODUCT_ID 0x01c8 /* Nokia Gadget */
58
59/* string IDs are assigned dynamically */
60
61#define STRING_MANUFACTURER_IDX 0
62#define STRING_PRODUCT_IDX 1
63#define STRING_DESCRIPTION_IDX 2
64
65static char manufacturer_nokia[] = "Nokia";
66static const char product_nokia[] = NOKIA_LONG_NAME;
67static const char description_nokia[] = "PC-Suite Configuration";
68
69static struct usb_string strings_dev[] = {
70 [STRING_MANUFACTURER_IDX].s = manufacturer_nokia,
71 [STRING_PRODUCT_IDX].s = NOKIA_LONG_NAME,
72 [STRING_DESCRIPTION_IDX].s = description_nokia,
73 { } /* end of list */
74};
75
76static struct usb_gadget_strings stringtab_dev = {
77 .language = 0x0409, /* en-us */
78 .strings = strings_dev,
79};
80
81static struct usb_gadget_strings *dev_strings[] = {
82 &stringtab_dev,
83 NULL,
84};
85
86static struct usb_device_descriptor device_desc = {
87 .bLength = USB_DT_DEVICE_SIZE,
88 .bDescriptorType = USB_DT_DEVICE,
89 .bcdUSB = __constant_cpu_to_le16(0x0200),
90 .bDeviceClass = USB_CLASS_COMM,
91 .idVendor = __constant_cpu_to_le16(NOKIA_VENDOR_ID),
92 .idProduct = __constant_cpu_to_le16(NOKIA_PRODUCT_ID),
93 /* .iManufacturer = DYNAMIC */
94 /* .iProduct = DYNAMIC */
95 .bNumConfigurations = 1,
96};
97
98/*-------------------------------------------------------------------------*/
99
100/* Module */
101MODULE_DESCRIPTION("Nokia composite gadget driver for N900");
102MODULE_AUTHOR("Felipe Balbi");
103MODULE_LICENSE("GPL");
104
105/*-------------------------------------------------------------------------*/
106
107static u8 hostaddr[ETH_ALEN];
108
109static int __init nokia_bind_config(struct usb_configuration *c)
110{
111 int status = 0;
112
113 status = phonet_bind_config(c);
114 if (status)
115 printk(KERN_DEBUG "could not bind phonet config\n");
116
117 status = obex_bind_config(c, 0);
118 if (status)
119 printk(KERN_DEBUG "could not bind obex config %d\n", 0);
120
121 status = obex_bind_config(c, 1);
122 if (status)
123 printk(KERN_DEBUG "could not bind obex config %d\n", 0);
124
125 status = acm_bind_config(c, 2);
126 if (status)
127 printk(KERN_DEBUG "could not bind acm config\n");
128
129 status = ecm_bind_config(c, hostaddr);
130 if (status)
131 printk(KERN_DEBUG "could not bind ecm config\n");
132
133 return status;
134}
135
136static struct usb_configuration nokia_config_500ma_driver = {
137 .label = "Bus Powered",
138 .bind = nokia_bind_config,
139 .bConfigurationValue = 1,
140 /* .iConfiguration = DYNAMIC */
141 .bmAttributes = USB_CONFIG_ATT_ONE,
142 .bMaxPower = 250, /* 500mA */
143};
144
145static struct usb_configuration nokia_config_100ma_driver = {
146 .label = "Self Powered",
147 .bind = nokia_bind_config,
148 .bConfigurationValue = 2,
149 /* .iConfiguration = DYNAMIC */
150 .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
151 .bMaxPower = 50, /* 100 mA */
152};
153
154static int __init nokia_bind(struct usb_composite_dev *cdev)
155{
156 int gcnum;
157 struct usb_gadget *gadget = cdev->gadget;
158 int status;
159
160 status = gphonet_setup(cdev->gadget);
161 if (status < 0)
162 goto err_phonet;
163
164 status = gserial_setup(cdev->gadget, 3);
165 if (status < 0)
166 goto err_serial;
167
168 status = gether_setup(cdev->gadget, hostaddr);
169 if (status < 0)
170 goto err_ether;
171
172 status = usb_string_id(cdev);
173 if (status < 0)
174 goto err_usb;
175 strings_dev[STRING_MANUFACTURER_IDX].id = status;
176
177 device_desc.iManufacturer = status;
178
179 status = usb_string_id(cdev);
180 if (status < 0)
181 goto err_usb;
182 strings_dev[STRING_PRODUCT_IDX].id = status;
183
184 device_desc.iProduct = status;
185
186 /* config description */
187 status = usb_string_id(cdev);
188 if (status < 0)
189 goto err_usb;
190 strings_dev[STRING_DESCRIPTION_IDX].id = status;
191
192 nokia_config_500ma_driver.iConfiguration = status;
193 nokia_config_100ma_driver.iConfiguration = status;
194
195 /* set up other descriptors */
196 gcnum = usb_gadget_controller_number(gadget);
197 if (gcnum >= 0)
198 device_desc.bcdDevice = cpu_to_le16(NOKIA_VERSION_NUM);
199 else {
200 /* this should only work with hw that supports altsettings
201 * and several endpoints, anything else, panic.
202 */
203 pr_err("nokia_bind: controller '%s' not recognized\n",
204 gadget->name);
205 goto err_usb;
206 }
207
208 /* finaly register the configuration */
209 status = usb_add_config(cdev, &nokia_config_500ma_driver);
210 if (status < 0)
211 goto err_usb;
212
213 status = usb_add_config(cdev, &nokia_config_100ma_driver);
214 if (status < 0)
215 goto err_usb;
216
217 dev_info(&gadget->dev, "%s\n", NOKIA_LONG_NAME);
218
219 return 0;
220
221err_usb:
222 gether_cleanup();
223err_ether:
224 gserial_cleanup();
225err_serial:
226 gphonet_cleanup();
227err_phonet:
228 return status;
229}
230
231static int __exit nokia_unbind(struct usb_composite_dev *cdev)
232{
233 gphonet_cleanup();
234 gserial_cleanup();
235 gether_cleanup();
236
237 return 0;
238}
239
240static struct usb_composite_driver nokia_driver = {
241 .name = "g_nokia",
242 .dev = &device_desc,
243 .strings = dev_strings,
244 .bind = nokia_bind,
245 .unbind = __exit_p(nokia_unbind),
246};
247
248static int __init nokia_init(void)
249{
250 return usb_composite_register(&nokia_driver);
251}
252module_init(nokia_init);
253
254static void __exit nokia_cleanup(void)
255{
256 usb_composite_unregister(&nokia_driver);
257}
258module_exit(nokia_cleanup);
259
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 2d867fd22413..6b8bf8c781c4 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -949,12 +949,6 @@ printer_set_config(struct printer_dev *dev, unsigned number)
949 int result = 0; 949 int result = 0;
950 struct usb_gadget *gadget = dev->gadget; 950 struct usb_gadget *gadget = dev->gadget;
951 951
952 if (gadget_is_sa1100(gadget) && dev->config) {
953 /* tx fifo is full, but we can't clear it...*/
954 INFO(dev, "can't change configurations\n");
955 return -ESPIPE;
956 }
957
958 switch (number) { 952 switch (number) {
959 case DEV_CONFIG_VALUE: 953 case DEV_CONFIG_VALUE:
960 result = 0; 954 result = 0;
@@ -1033,12 +1027,6 @@ set_interface(struct printer_dev *dev, unsigned number)
1033{ 1027{
1034 int result = 0; 1028 int result = 0;
1035 1029
1036 if (gadget_is_sa1100(dev->gadget) && dev->interface < 0) {
1037 /* tx fifo is full, but we can't clear it...*/
1038 INFO(dev, "can't change interfaces\n");
1039 return -ESPIPE;
1040 }
1041
1042 /* Free the current interface */ 1030 /* Free the current interface */
1043 switch (dev->interface) { 1031 switch (dev->interface) {
1044 case PRINTER_INTERFACE: 1032 case PRINTER_INTERFACE:
@@ -1392,12 +1380,6 @@ printer_bind(struct usb_gadget *gadget)
1392 goto fail; 1380 goto fail;
1393 } 1381 }
1394 1382
1395 if (gadget_is_sa1100(gadget)) {
1396 /* hardware can't write zero length packets. */
1397 ERROR(dev, "SA1100 controller is unsupport by this driver\n");
1398 goto fail;
1399 }
1400
1401 gcnum = usb_gadget_controller_number(gadget); 1383 gcnum = usb_gadget_controller_number(gadget);
1402 if (gcnum >= 0) { 1384 if (gcnum >= 0) {
1403 device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum); 1385 device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum);
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index adda1208a1ec..05b892c3d686 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -742,13 +742,17 @@ static void ep_del_request(struct pxa_ep *ep, struct pxa27x_request *req)
742 * @ep: pxa physical endpoint 742 * @ep: pxa physical endpoint
743 * @req: pxa request 743 * @req: pxa request
744 * @status: usb request status sent to gadget API 744 * @status: usb request status sent to gadget API
745 * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
745 * 746 *
746 * Context: ep->lock held 747 * Context: ep->lock held if flags not NULL, else ep->lock released
747 * 748 *
748 * Retire a pxa27x usb request. Endpoint must be locked. 749 * Retire a pxa27x usb request. Endpoint must be locked.
749 */ 750 */
750static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status) 751static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status,
752 unsigned long *pflags)
751{ 753{
754 unsigned long flags;
755
752 ep_del_request(ep, req); 756 ep_del_request(ep, req);
753 if (likely(req->req.status == -EINPROGRESS)) 757 if (likely(req->req.status == -EINPROGRESS))
754 req->req.status = status; 758 req->req.status = status;
@@ -760,38 +764,48 @@ static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status)
760 &req->req, status, 764 &req->req, status,
761 req->req.actual, req->req.length); 765 req->req.actual, req->req.length);
762 766
767 if (pflags)
768 spin_unlock_irqrestore(&ep->lock, *pflags);
769 local_irq_save(flags);
763 req->req.complete(&req->udc_usb_ep->usb_ep, &req->req); 770 req->req.complete(&req->udc_usb_ep->usb_ep, &req->req);
771 local_irq_restore(flags);
772 if (pflags)
773 spin_lock_irqsave(&ep->lock, *pflags);
764} 774}
765 775
766/** 776/**
767 * ep_end_out_req - Ends endpoint OUT request 777 * ep_end_out_req - Ends endpoint OUT request
768 * @ep: physical endpoint 778 * @ep: physical endpoint
769 * @req: pxa request 779 * @req: pxa request
780 * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
770 * 781 *
771 * Context: ep->lock held 782 * Context: ep->lock held or released (see req_done())
772 * 783 *
773 * Ends endpoint OUT request (completes usb request). 784 * Ends endpoint OUT request (completes usb request).
774 */ 785 */
775static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req) 786static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
787 unsigned long *pflags)
776{ 788{
777 inc_ep_stats_reqs(ep, !USB_DIR_IN); 789 inc_ep_stats_reqs(ep, !USB_DIR_IN);
778 req_done(ep, req, 0); 790 req_done(ep, req, 0, pflags);
779} 791}
780 792
781/** 793/**
782 * ep0_end_out_req - Ends control endpoint OUT request (ends data stage) 794 * ep0_end_out_req - Ends control endpoint OUT request (ends data stage)
783 * @ep: physical endpoint 795 * @ep: physical endpoint
784 * @req: pxa request 796 * @req: pxa request
797 * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
785 * 798 *
786 * Context: ep->lock held 799 * Context: ep->lock held or released (see req_done())
787 * 800 *
788 * Ends control endpoint OUT request (completes usb request), and puts 801 * Ends control endpoint OUT request (completes usb request), and puts
789 * control endpoint into idle state 802 * control endpoint into idle state
790 */ 803 */
791static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req) 804static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
805 unsigned long *pflags)
792{ 806{
793 set_ep0state(ep->dev, OUT_STATUS_STAGE); 807 set_ep0state(ep->dev, OUT_STATUS_STAGE);
794 ep_end_out_req(ep, req); 808 ep_end_out_req(ep, req, pflags);
795 ep0_idle(ep->dev); 809 ep0_idle(ep->dev);
796} 810}
797 811
@@ -799,31 +813,35 @@ static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req)
799 * ep_end_in_req - Ends endpoint IN request 813 * ep_end_in_req - Ends endpoint IN request
800 * @ep: physical endpoint 814 * @ep: physical endpoint
801 * @req: pxa request 815 * @req: pxa request
816 * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
802 * 817 *
803 * Context: ep->lock held 818 * Context: ep->lock held or released (see req_done())
804 * 819 *
805 * Ends endpoint IN request (completes usb request). 820 * Ends endpoint IN request (completes usb request).
806 */ 821 */
807static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req) 822static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
823 unsigned long *pflags)
808{ 824{
809 inc_ep_stats_reqs(ep, USB_DIR_IN); 825 inc_ep_stats_reqs(ep, USB_DIR_IN);
810 req_done(ep, req, 0); 826 req_done(ep, req, 0, pflags);
811} 827}
812 828
813/** 829/**
814 * ep0_end_in_req - Ends control endpoint IN request (ends data stage) 830 * ep0_end_in_req - Ends control endpoint IN request (ends data stage)
815 * @ep: physical endpoint 831 * @ep: physical endpoint
816 * @req: pxa request 832 * @req: pxa request
833 * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
817 * 834 *
818 * Context: ep->lock held 835 * Context: ep->lock held or released (see req_done())
819 * 836 *
820 * Ends control endpoint IN request (completes usb request), and puts 837 * Ends control endpoint IN request (completes usb request), and puts
821 * control endpoint into status state 838 * control endpoint into status state
822 */ 839 */
823static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req) 840static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
841 unsigned long *pflags)
824{ 842{
825 set_ep0state(ep->dev, IN_STATUS_STAGE); 843 set_ep0state(ep->dev, IN_STATUS_STAGE);
826 ep_end_in_req(ep, req); 844 ep_end_in_req(ep, req, pflags);
827} 845}
828 846
829/** 847/**
@@ -831,19 +849,22 @@ static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req)
831 * @ep: pxa endpoint 849 * @ep: pxa endpoint
832 * @status: usb request status 850 * @status: usb request status
833 * 851 *
834 * Context: ep->lock held 852 * Context: ep->lock released
835 * 853 *
836 * Dequeues all requests on an endpoint. As a side effect, interrupts will be 854 * Dequeues all requests on an endpoint. As a side effect, interrupts will be
837 * disabled on that endpoint (because no more requests). 855 * disabled on that endpoint (because no more requests).
838 */ 856 */
839static void nuke(struct pxa_ep *ep, int status) 857static void nuke(struct pxa_ep *ep, int status)
840{ 858{
841 struct pxa27x_request *req; 859 struct pxa27x_request *req;
860 unsigned long flags;
842 861
862 spin_lock_irqsave(&ep->lock, flags);
843 while (!list_empty(&ep->queue)) { 863 while (!list_empty(&ep->queue)) {
844 req = list_entry(ep->queue.next, struct pxa27x_request, queue); 864 req = list_entry(ep->queue.next, struct pxa27x_request, queue);
845 req_done(ep, req, status); 865 req_done(ep, req, status, &flags);
846 } 866 }
867 spin_unlock_irqrestore(&ep->lock, flags);
847} 868}
848 869
849/** 870/**
@@ -1123,6 +1144,7 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1123 int rc = 0; 1144 int rc = 0;
1124 int is_first_req; 1145 int is_first_req;
1125 unsigned length; 1146 unsigned length;
1147 int recursion_detected;
1126 1148
1127 req = container_of(_req, struct pxa27x_request, req); 1149 req = container_of(_req, struct pxa27x_request, req);
1128 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); 1150 udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
@@ -1152,6 +1174,7 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1152 return -EMSGSIZE; 1174 return -EMSGSIZE;
1153 1175
1154 spin_lock_irqsave(&ep->lock, flags); 1176 spin_lock_irqsave(&ep->lock, flags);
1177 recursion_detected = ep->in_handle_ep;
1155 1178
1156 is_first_req = list_empty(&ep->queue); 1179 is_first_req = list_empty(&ep->queue);
1157 ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n", 1180 ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n",
@@ -1161,12 +1184,12 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1161 if (!ep->enabled) { 1184 if (!ep->enabled) {
1162 _req->status = -ESHUTDOWN; 1185 _req->status = -ESHUTDOWN;
1163 rc = -ESHUTDOWN; 1186 rc = -ESHUTDOWN;
1164 goto out; 1187 goto out_locked;
1165 } 1188 }
1166 1189
1167 if (req->in_use) { 1190 if (req->in_use) {
1168 ep_err(ep, "refusing to queue req %p (already queued)\n", req); 1191 ep_err(ep, "refusing to queue req %p (already queued)\n", req);
1169 goto out; 1192 goto out_locked;
1170 } 1193 }
1171 1194
1172 length = _req->length; 1195 length = _req->length;
@@ -1174,12 +1197,13 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1174 _req->actual = 0; 1197 _req->actual = 0;
1175 1198
1176 ep_add_request(ep, req); 1199 ep_add_request(ep, req);
1200 spin_unlock_irqrestore(&ep->lock, flags);
1177 1201
1178 if (is_ep0(ep)) { 1202 if (is_ep0(ep)) {
1179 switch (dev->ep0state) { 1203 switch (dev->ep0state) {
1180 case WAIT_ACK_SET_CONF_INTERF: 1204 case WAIT_ACK_SET_CONF_INTERF:
1181 if (length == 0) { 1205 if (length == 0) {
1182 ep_end_in_req(ep, req); 1206 ep_end_in_req(ep, req, NULL);
1183 } else { 1207 } else {
1184 ep_err(ep, "got a request of %d bytes while" 1208 ep_err(ep, "got a request of %d bytes while"
1185 "in state WAIT_ACK_SET_CONF_INTERF\n", 1209 "in state WAIT_ACK_SET_CONF_INTERF\n",
@@ -1192,12 +1216,12 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1192 case IN_DATA_STAGE: 1216 case IN_DATA_STAGE:
1193 if (!ep_is_full(ep)) 1217 if (!ep_is_full(ep))
1194 if (write_ep0_fifo(ep, req)) 1218 if (write_ep0_fifo(ep, req))
1195 ep0_end_in_req(ep, req); 1219 ep0_end_in_req(ep, req, NULL);
1196 break; 1220 break;
1197 case OUT_DATA_STAGE: 1221 case OUT_DATA_STAGE:
1198 if ((length == 0) || !epout_has_pkt(ep)) 1222 if ((length == 0) || !epout_has_pkt(ep))
1199 if (read_ep0_fifo(ep, req)) 1223 if (read_ep0_fifo(ep, req))
1200 ep0_end_out_req(ep, req); 1224 ep0_end_out_req(ep, req, NULL);
1201 break; 1225 break;
1202 default: 1226 default:
1203 ep_err(ep, "odd state %s to send me a request\n", 1227 ep_err(ep, "odd state %s to send me a request\n",
@@ -1207,12 +1231,15 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1207 break; 1231 break;
1208 } 1232 }
1209 } else { 1233 } else {
1210 handle_ep(ep); 1234 if (!recursion_detected)
1235 handle_ep(ep);
1211 } 1236 }
1212 1237
1213out: 1238out:
1214 spin_unlock_irqrestore(&ep->lock, flags);
1215 return rc; 1239 return rc;
1240out_locked:
1241 spin_unlock_irqrestore(&ep->lock, flags);
1242 goto out;
1216} 1243}
1217 1244
1218/** 1245/**
@@ -1242,13 +1269,14 @@ static int pxa_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1242 /* make sure it's actually queued on this endpoint */ 1269 /* make sure it's actually queued on this endpoint */
1243 list_for_each_entry(req, &ep->queue, queue) { 1270 list_for_each_entry(req, &ep->queue, queue) {
1244 if (&req->req == _req) { 1271 if (&req->req == _req) {
1245 req_done(ep, req, -ECONNRESET);
1246 rc = 0; 1272 rc = 0;
1247 break; 1273 break;
1248 } 1274 }
1249 } 1275 }
1250 1276
1251 spin_unlock_irqrestore(&ep->lock, flags); 1277 spin_unlock_irqrestore(&ep->lock, flags);
1278 if (!rc)
1279 req_done(ep, req, -ECONNRESET, NULL);
1252 return rc; 1280 return rc;
1253} 1281}
1254 1282
@@ -1445,7 +1473,6 @@ static int pxa_ep_disable(struct usb_ep *_ep)
1445{ 1473{
1446 struct pxa_ep *ep; 1474 struct pxa_ep *ep;
1447 struct udc_usb_ep *udc_usb_ep; 1475 struct udc_usb_ep *udc_usb_ep;
1448 unsigned long flags;
1449 1476
1450 if (!_ep) 1477 if (!_ep)
1451 return -EINVAL; 1478 return -EINVAL;
@@ -1455,10 +1482,8 @@ static int pxa_ep_disable(struct usb_ep *_ep)
1455 if (!ep || is_ep0(ep) || !list_empty(&ep->queue)) 1482 if (!ep || is_ep0(ep) || !list_empty(&ep->queue))
1456 return -EINVAL; 1483 return -EINVAL;
1457 1484
1458 spin_lock_irqsave(&ep->lock, flags);
1459 ep->enabled = 0; 1485 ep->enabled = 0;
1460 nuke(ep, -ESHUTDOWN); 1486 nuke(ep, -ESHUTDOWN);
1461 spin_unlock_irqrestore(&ep->lock, flags);
1462 1487
1463 pxa_ep_fifo_flush(_ep); 1488 pxa_ep_fifo_flush(_ep);
1464 udc_usb_ep->pxa_ep = NULL; 1489 udc_usb_ep->pxa_ep = NULL;
@@ -1907,8 +1932,10 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc,
1907 } u; 1932 } u;
1908 int i; 1933 int i;
1909 int have_extrabytes = 0; 1934 int have_extrabytes = 0;
1935 unsigned long flags;
1910 1936
1911 nuke(ep, -EPROTO); 1937 nuke(ep, -EPROTO);
1938 spin_lock_irqsave(&ep->lock, flags);
1912 1939
1913 /* 1940 /*
1914 * In the PXA320 manual, in the section about Back-to-Back setup 1941 * In the PXA320 manual, in the section about Back-to-Back setup
@@ -1947,10 +1974,13 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc,
1947 /* Tell UDC to enter Data Stage */ 1974 /* Tell UDC to enter Data Stage */
1948 ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC); 1975 ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC);
1949 1976
1977 spin_unlock_irqrestore(&ep->lock, flags);
1950 i = udc->driver->setup(&udc->gadget, &u.r); 1978 i = udc->driver->setup(&udc->gadget, &u.r);
1979 spin_lock_irqsave(&ep->lock, flags);
1951 if (i < 0) 1980 if (i < 0)
1952 goto stall; 1981 goto stall;
1953out: 1982out:
1983 spin_unlock_irqrestore(&ep->lock, flags);
1954 return; 1984 return;
1955stall: 1985stall:
1956 ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n", 1986 ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n",
@@ -2055,13 +2085,13 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
2055 if (req && !ep_is_full(ep)) 2085 if (req && !ep_is_full(ep))
2056 completed = write_ep0_fifo(ep, req); 2086 completed = write_ep0_fifo(ep, req);
2057 if (completed) 2087 if (completed)
2058 ep0_end_in_req(ep, req); 2088 ep0_end_in_req(ep, req, NULL);
2059 break; 2089 break;
2060 case OUT_DATA_STAGE: /* SET_DESCRIPTOR */ 2090 case OUT_DATA_STAGE: /* SET_DESCRIPTOR */
2061 if (epout_has_pkt(ep) && req) 2091 if (epout_has_pkt(ep) && req)
2062 completed = read_ep0_fifo(ep, req); 2092 completed = read_ep0_fifo(ep, req);
2063 if (completed) 2093 if (completed)
2064 ep0_end_out_req(ep, req); 2094 ep0_end_out_req(ep, req, NULL);
2065 break; 2095 break;
2066 case STALL: 2096 case STALL:
2067 ep_write_UDCCSR(ep, UDCCSR0_FST); 2097 ep_write_UDCCSR(ep, UDCCSR0_FST);
@@ -2091,7 +2121,7 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
2091 * Tries to transfer all pending request data into the endpoint and/or 2121 * Tries to transfer all pending request data into the endpoint and/or
2092 * transfer all pending data in the endpoint into usb requests. 2122 * transfer all pending data in the endpoint into usb requests.
2093 * 2123 *
2094 * Is always called when in_interrupt() or with ep->lock held. 2124 * Is always called when in_interrupt() and with ep->lock released.
2095 */ 2125 */
2096static void handle_ep(struct pxa_ep *ep) 2126static void handle_ep(struct pxa_ep *ep)
2097{ 2127{
@@ -2100,10 +2130,17 @@ static void handle_ep(struct pxa_ep *ep)
2100 u32 udccsr; 2130 u32 udccsr;
2101 int is_in = ep->dir_in; 2131 int is_in = ep->dir_in;
2102 int loop = 0; 2132 int loop = 0;
2133 unsigned long flags;
2134
2135 spin_lock_irqsave(&ep->lock, flags);
2136 if (ep->in_handle_ep)
2137 goto recursion_detected;
2138 ep->in_handle_ep = 1;
2103 2139
2104 do { 2140 do {
2105 completed = 0; 2141 completed = 0;
2106 udccsr = udc_ep_readl(ep, UDCCSR); 2142 udccsr = udc_ep_readl(ep, UDCCSR);
2143
2107 if (likely(!list_empty(&ep->queue))) 2144 if (likely(!list_empty(&ep->queue)))
2108 req = list_entry(ep->queue.next, 2145 req = list_entry(ep->queue.next,
2109 struct pxa27x_request, queue); 2146 struct pxa27x_request, queue);
@@ -2122,15 +2159,22 @@ static void handle_ep(struct pxa_ep *ep)
2122 if (unlikely(is_in)) { 2159 if (unlikely(is_in)) {
2123 if (likely(!ep_is_full(ep))) 2160 if (likely(!ep_is_full(ep)))
2124 completed = write_fifo(ep, req); 2161 completed = write_fifo(ep, req);
2125 if (completed)
2126 ep_end_in_req(ep, req);
2127 } else { 2162 } else {
2128 if (likely(epout_has_pkt(ep))) 2163 if (likely(epout_has_pkt(ep)))
2129 completed = read_fifo(ep, req); 2164 completed = read_fifo(ep, req);
2130 if (completed) 2165 }
2131 ep_end_out_req(ep, req); 2166
2167 if (completed) {
2168 if (is_in)
2169 ep_end_in_req(ep, req, &flags);
2170 else
2171 ep_end_out_req(ep, req, &flags);
2132 } 2172 }
2133 } while (completed); 2173 } while (completed);
2174
2175 ep->in_handle_ep = 0;
2176recursion_detected:
2177 spin_unlock_irqrestore(&ep->lock, flags);
2134} 2178}
2135 2179
2136/** 2180/**
@@ -2218,9 +2262,13 @@ static void irq_handle_data(int irq, struct pxa_udc *udc)
2218 continue; 2262 continue;
2219 2263
2220 udc_writel(udc, UDCISR0, UDCISR_INT(i, UDCISR_INT_MASK)); 2264 udc_writel(udc, UDCISR0, UDCISR_INT(i, UDCISR_INT_MASK));
2221 ep = &udc->pxa_ep[i]; 2265
2222 ep->stats.irqs++; 2266 WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
2223 handle_ep(ep); 2267 if (i < ARRAY_SIZE(udc->pxa_ep)) {
2268 ep = &udc->pxa_ep[i];
2269 ep->stats.irqs++;
2270 handle_ep(ep);
2271 }
2224 } 2272 }
2225 2273
2226 for (i = 16; udcisr1 != 0 && i < 24; udcisr1 >>= 2, i++) { 2274 for (i = 16; udcisr1 != 0 && i < 24; udcisr1 >>= 2, i++) {
@@ -2228,9 +2276,12 @@ static void irq_handle_data(int irq, struct pxa_udc *udc)
2228 if (!(udcisr1 & UDCISR_INT_MASK)) 2276 if (!(udcisr1 & UDCISR_INT_MASK))
2229 continue; 2277 continue;
2230 2278
2231 ep = &udc->pxa_ep[i]; 2279 WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
2232 ep->stats.irqs++; 2280 if (i < ARRAY_SIZE(udc->pxa_ep)) {
2233 handle_ep(ep); 2281 ep = &udc->pxa_ep[i];
2282 ep->stats.irqs++;
2283 handle_ep(ep);
2284 }
2234 } 2285 }
2235 2286
2236} 2287}
@@ -2439,7 +2490,7 @@ static int __init pxa_udc_probe(struct platform_device *pdev)
2439 } 2490 }
2440 2491
2441 retval = -ENOMEM; 2492 retval = -ENOMEM;
2442 udc->regs = ioremap(regs->start, regs->end - regs->start + 1); 2493 udc->regs = ioremap(regs->start, resource_size(regs));
2443 if (!udc->regs) { 2494 if (!udc->regs) {
2444 dev_err(&pdev->dev, "Unable to map UDC I/O memory\n"); 2495 dev_err(&pdev->dev, "Unable to map UDC I/O memory\n");
2445 goto err_map; 2496 goto err_map;
diff --git a/drivers/usb/gadget/pxa27x_udc.h b/drivers/usb/gadget/pxa27x_udc.h
index e25225e26586..ff61e4866e8a 100644
--- a/drivers/usb/gadget/pxa27x_udc.h
+++ b/drivers/usb/gadget/pxa27x_udc.h
@@ -318,6 +318,11 @@ struct udc_usb_ep {
318 * @queue: requests queue 318 * @queue: requests queue
319 * @lock: lock to pxa_ep data (queues and stats) 319 * @lock: lock to pxa_ep data (queues and stats)
320 * @enabled: true when endpoint enabled (not stopped by gadget layer) 320 * @enabled: true when endpoint enabled (not stopped by gadget layer)
321 * @in_handle_ep: number of recursions of handle_ep() function
322 * Prevents deadlocks or infinite recursions of types :
323 * irq->handle_ep()->req_done()->req.complete()->pxa_ep_queue()->handle_ep()
324 * or
325 * pxa_ep_queue()->handle_ep()->req_done()->req.complete()->pxa_ep_queue()
321 * @idx: endpoint index (1 => epA, 2 => epB, ..., 24 => epX) 326 * @idx: endpoint index (1 => epA, 2 => epB, ..., 24 => epX)
322 * @name: endpoint name (for trace/debug purpose) 327 * @name: endpoint name (for trace/debug purpose)
323 * @dir_in: 1 if IN endpoint, 0 if OUT endpoint 328 * @dir_in: 1 if IN endpoint, 0 if OUT endpoint
@@ -346,6 +351,7 @@ struct pxa_ep {
346 spinlock_t lock; /* Protects this structure */ 351 spinlock_t lock; /* Protects this structure */
347 /* (queues, stats) */ 352 /* (queues, stats) */
348 unsigned enabled:1; 353 unsigned enabled:1;
354 unsigned in_handle_ep:1;
349 355
350 unsigned idx:5; 356 unsigned idx:5;
351 char *name; 357 char *name;
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 5fc80a104150..7e5bf593d386 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -317,7 +317,8 @@ static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg)
317 * 317 *
318 * Allocate a new USB request structure appropriate for the specified endpoint 318 * Allocate a new USB request structure appropriate for the specified endpoint
319 */ 319 */
320struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep, gfp_t flags) 320static struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep,
321 gfp_t flags)
321{ 322{
322 struct s3c_hsotg_req *req; 323 struct s3c_hsotg_req *req;
323 324
@@ -373,7 +374,7 @@ static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg,
373 req->dma = DMA_ADDR_INVALID; 374 req->dma = DMA_ADDR_INVALID;
374 hs_req->mapped = 0; 375 hs_req->mapped = 0;
375 } else { 376 } else {
376 dma_sync_single(hsotg->dev, req->dma, req->length, dir); 377 dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);
377 } 378 }
378} 379}
379 380
@@ -755,7 +756,7 @@ static int s3c_hsotg_map_dma(struct s3c_hsotg *hsotg,
755 hs_req->mapped = 1; 756 hs_req->mapped = 1;
756 req->dma = dma; 757 req->dma = dma;
757 } else { 758 } else {
758 dma_sync_single(hsotg->dev, req->dma, req->length, dir); 759 dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);
759 hs_req->mapped = 0; 760 hs_req->mapped = 0;
760 } 761 }
761 762
@@ -1460,7 +1461,7 @@ static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg)
1460 * as the actual data should be sent to the memory directly and we turn 1461 * as the actual data should be sent to the memory directly and we turn
1461 * on the completion interrupts to get notifications of transfer completion. 1462 * on the completion interrupts to get notifications of transfer completion.
1462 */ 1463 */
1463void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg) 1464static void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg)
1464{ 1465{
1465 u32 grxstsr = readl(hsotg->regs + S3C_GRXSTSP); 1466 u32 grxstsr = readl(hsotg->regs + S3C_GRXSTSP);
1466 u32 epnum, status, size; 1467 u32 epnum, status, size;
@@ -3094,7 +3095,7 @@ static void s3c_hsotg_gate(struct platform_device *pdev, bool on)
3094 local_irq_restore(flags); 3095 local_irq_restore(flags);
3095} 3096}
3096 3097
3097struct s3c_hsotg_plat s3c_hsotg_default_pdata; 3098static struct s3c_hsotg_plat s3c_hsotg_default_pdata;
3098 3099
3099static int __devinit s3c_hsotg_probe(struct platform_device *pdev) 3100static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
3100{ 3101{
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 2fc02bd95848..84ca195c2d10 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -746,6 +746,10 @@ static const struct net_device_ops eth_netdev_ops = {
746 .ndo_validate_addr = eth_validate_addr, 746 .ndo_validate_addr = eth_validate_addr,
747}; 747};
748 748
749static struct device_type gadget_type = {
750 .name = "gadget",
751};
752
749/** 753/**
750 * gether_setup - initialize one ethernet-over-usb link 754 * gether_setup - initialize one ethernet-over-usb link
751 * @g: gadget to associated with these links 755 * @g: gadget to associated with these links
@@ -808,6 +812,7 @@ int __init gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
808 812
809 dev->gadget = g; 813 dev->gadget = g;
810 SET_NETDEV_DEV(net, &g->dev); 814 SET_NETDEV_DEV(net, &g->dev);
815 SET_NETDEV_DEVTYPE(net, &gadget_type);
811 816
812 status = register_netdev(net); 817 status = register_netdev(net);
813 if (status < 0) { 818 if (status < 0) {
diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h
index fd55f450bc0e..3c8c0c9f9d72 100644
--- a/drivers/usb/gadget/u_ether.h
+++ b/drivers/usb/gadget/u_ether.h
@@ -93,13 +93,6 @@ static inline bool can_support_ecm(struct usb_gadget *gadget)
93 if (!gadget_supports_altsettings(gadget)) 93 if (!gadget_supports_altsettings(gadget))
94 return false; 94 return false;
95 95
96 /* SA1100 can do ECM, *without* status endpoint ... but we'll
97 * only use it in non-ECM mode for backwards compatibility
98 * (and since we currently require a status endpoint)
99 */
100 if (gadget_is_sa1100(gadget))
101 return false;
102
103 /* Everything else is *presumably* fine ... but this is a bit 96 /* Everything else is *presumably* fine ... but this is a bit
104 * chancy, so be **CERTAIN** there are no hardware issues with 97 * chancy, so be **CERTAIN** there are no hardware issues with
105 * your controller. Add it above if it can't handle CDC. 98 * your controller. Add it above if it can't handle CDC.
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 2d772401b7ad..fac81ee193dd 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -297,12 +297,10 @@ static int __init zero_bind(struct usb_composite_dev *cdev)
297 */ 297 */
298 if (loopdefault) { 298 if (loopdefault) {
299 loopback_add(cdev, autoresume != 0); 299 loopback_add(cdev, autoresume != 0);
300 if (!gadget_is_sh(gadget)) 300 sourcesink_add(cdev, autoresume != 0);
301 sourcesink_add(cdev, autoresume != 0);
302 } else { 301 } else {
303 sourcesink_add(cdev, autoresume != 0); 302 sourcesink_add(cdev, autoresume != 0);
304 if (!gadget_is_sh(gadget)) 303 loopback_add(cdev, autoresume != 0);
305 loopback_add(cdev, autoresume != 0);
306 } 304 }
307 305
308 gcnum = usb_gadget_controller_number(gadget); 306 gcnum = usb_gadget_controller_number(gadget);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 2678a1624fcc..8d3df0397de3 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -399,3 +399,14 @@ config USB_HWA_HCD
399 399
400 To compile this driver a module, choose M here: the module 400 To compile this driver a module, choose M here: the module
401 will be called "hwa-hc". 401 will be called "hwa-hc".
402
403config USB_IMX21_HCD
404 tristate "iMX21 HCD support"
405 depends on USB && ARM && MACH_MX21
406 help
407 This driver enables support for the on-chip USB host in the
408 iMX21 processor.
409
410 To compile this driver as a module, choose M here: the
411 module will be called "imx21-hcd".
412
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index f58b2494c44a..4e0c67f1f51b 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -32,3 +32,5 @@ obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
32obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o 32obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o
33obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o 33obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o
34obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o 34obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
35obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o
36
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 87c1b7c34c0e..51bd0edf544f 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -149,7 +149,7 @@ static int __init ehci_atmel_drv_probe(struct platform_device *pdev)
149 goto fail_request_resource; 149 goto fail_request_resource;
150 } 150 }
151 hcd->rsrc_start = res->start; 151 hcd->rsrc_start = res->start;
152 hcd->rsrc_len = res->end - res->start + 1; 152 hcd->rsrc_len = resource_size(res);
153 153
154 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, 154 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
155 driver->description)) { 155 driver->description)) {
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index dbfb482a94e3..e3a74e75e822 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -121,6 +121,7 @@ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
121{ 121{
122 struct usb_hcd *hcd; 122 struct usb_hcd *hcd;
123 struct ehci_hcd *ehci; 123 struct ehci_hcd *ehci;
124 struct resource *res;
124 int ret; 125 int ret;
125 126
126 if (usb_disabled()) 127 if (usb_disabled())
@@ -144,8 +145,9 @@ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
144 if (!hcd) 145 if (!hcd)
145 return -ENOMEM; 146 return -ENOMEM;
146 147
147 hcd->rsrc_start = pdev->resource[0].start; 148 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
148 hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1; 149 hcd->rsrc_start = res->start;
150 hcd->rsrc_len = resource_size(res);
149 151
150 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { 152 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
151 pr_debug("request_mem_region failed"); 153 pr_debug("request_mem_region failed");
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 991174937db3..0e26aa13f158 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2005 MontaVista Software 2 * Copyright 2005-2009 MontaVista Software, Inc.
3 * Copyright 2008 Freescale Semiconductor, Inc.
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the 6 * under the terms of the GNU General Public License as published by the
@@ -17,17 +18,20 @@
17 * 18 *
18 * Ported to 834x by Randy Vinson <rvinson@mvista.com> using code provided 19 * Ported to 834x by Randy Vinson <rvinson@mvista.com> using code provided
19 * by Hunter Wu. 20 * by Hunter Wu.
21 * Power Management support by Dave Liu <daveliu@freescale.com>,
22 * Jerry Huang <Chang-Ming.Huang@freescale.com> and
23 * Anton Vorontsov <avorontsov@ru.mvista.com>.
20 */ 24 */
21 25
26#include <linux/kernel.h>
27#include <linux/types.h>
28#include <linux/delay.h>
29#include <linux/pm.h>
22#include <linux/platform_device.h> 30#include <linux/platform_device.h>
23#include <linux/fsl_devices.h> 31#include <linux/fsl_devices.h>
24 32
25#include "ehci-fsl.h" 33#include "ehci-fsl.h"
26 34
27/* FIXME: Power Management is un-ported so temporarily disable it */
28#undef CONFIG_PM
29
30
31/* configure so an HC device and id are always provided */ 35/* configure so an HC device and id are always provided */
32/* always called with process context; sleeping is OK */ 36/* always called with process context; sleeping is OK */
33 37
@@ -40,8 +44,8 @@
40 * Allocates basic resources for this USB host controller. 44 * Allocates basic resources for this USB host controller.
41 * 45 *
42 */ 46 */
43int usb_hcd_fsl_probe(const struct hc_driver *driver, 47static int usb_hcd_fsl_probe(const struct hc_driver *driver,
44 struct platform_device *pdev) 48 struct platform_device *pdev)
45{ 49{
46 struct fsl_usb2_platform_data *pdata; 50 struct fsl_usb2_platform_data *pdata;
47 struct usb_hcd *hcd; 51 struct usb_hcd *hcd;
@@ -147,7 +151,8 @@ int usb_hcd_fsl_probe(const struct hc_driver *driver,
147 * Reverses the effect of usb_hcd_fsl_probe(). 151 * Reverses the effect of usb_hcd_fsl_probe().
148 * 152 *
149 */ 153 */
150void usb_hcd_fsl_remove(struct usb_hcd *hcd, struct platform_device *pdev) 154static void usb_hcd_fsl_remove(struct usb_hcd *hcd,
155 struct platform_device *pdev)
151{ 156{
152 usb_remove_hcd(hcd); 157 usb_remove_hcd(hcd);
153 iounmap(hcd->regs); 158 iounmap(hcd->regs);
@@ -284,10 +289,81 @@ static int ehci_fsl_setup(struct usb_hcd *hcd)
284 return retval; 289 return retval;
285} 290}
286 291
292struct ehci_fsl {
293 struct ehci_hcd ehci;
294
295#ifdef CONFIG_PM
296 /* Saved USB PHY settings, need to restore after deep sleep. */
297 u32 usb_ctrl;
298#endif
299};
300
301#ifdef CONFIG_PM
302
303static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd)
304{
305 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
306
307 return container_of(ehci, struct ehci_fsl, ehci);
308}
309
310static int ehci_fsl_drv_suspend(struct device *dev)
311{
312 struct usb_hcd *hcd = dev_get_drvdata(dev);
313 struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
314 void __iomem *non_ehci = hcd->regs;
315
316 if (!fsl_deep_sleep())
317 return 0;
318
319 ehci_fsl->usb_ctrl = in_be32(non_ehci + FSL_SOC_USB_CTRL);
320 return 0;
321}
322
323static int ehci_fsl_drv_resume(struct device *dev)
324{
325 struct usb_hcd *hcd = dev_get_drvdata(dev);
326 struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
327 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
328 void __iomem *non_ehci = hcd->regs;
329
330 if (!fsl_deep_sleep())
331 return 0;
332
333 usb_root_hub_lost_power(hcd->self.root_hub);
334
335 /* Restore USB PHY settings and enable the controller. */
336 out_be32(non_ehci + FSL_SOC_USB_CTRL, ehci_fsl->usb_ctrl);
337
338 ehci_reset(ehci);
339 ehci_fsl_reinit(ehci);
340
341 return 0;
342}
343
344static int ehci_fsl_drv_restore(struct device *dev)
345{
346 struct usb_hcd *hcd = dev_get_drvdata(dev);
347
348 usb_root_hub_lost_power(hcd->self.root_hub);
349 return 0;
350}
351
352static struct dev_pm_ops ehci_fsl_pm_ops = {
353 .suspend = ehci_fsl_drv_suspend,
354 .resume = ehci_fsl_drv_resume,
355 .restore = ehci_fsl_drv_restore,
356};
357
358#define EHCI_FSL_PM_OPS (&ehci_fsl_pm_ops)
359#else
360#define EHCI_FSL_PM_OPS NULL
361#endif /* CONFIG_PM */
362
287static const struct hc_driver ehci_fsl_hc_driver = { 363static const struct hc_driver ehci_fsl_hc_driver = {
288 .description = hcd_name, 364 .description = hcd_name,
289 .product_desc = "Freescale On-Chip EHCI Host Controller", 365 .product_desc = "Freescale On-Chip EHCI Host Controller",
290 .hcd_priv_size = sizeof(struct ehci_hcd), 366 .hcd_priv_size = sizeof(struct ehci_fsl),
291 367
292 /* 368 /*
293 * generic hardware linkage 369 * generic hardware linkage
@@ -354,6 +430,7 @@ static struct platform_driver ehci_fsl_driver = {
354 .remove = ehci_fsl_drv_remove, 430 .remove = ehci_fsl_drv_remove,
355 .shutdown = usb_hcd_platform_shutdown, 431 .shutdown = usb_hcd_platform_shutdown,
356 .driver = { 432 .driver = {
357 .name = "fsl-ehci", 433 .name = "fsl-ehci",
434 .pm = EHCI_FSL_PM_OPS,
358 }, 435 },
359}; 436};
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index 35c56f40bdbb..23cd917088b4 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -162,6 +162,17 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
162 goto err_ioremap; 162 goto err_ioremap;
163 } 163 }
164 164
165 /* call platform specific init function */
166 if (pdata->init) {
167 ret = pdata->init(pdev);
168 if (ret) {
169 dev_err(dev, "platform init failed\n");
170 goto err_init;
171 }
172 /* platforms need some time to settle changed IO settings */
173 mdelay(10);
174 }
175
165 /* enable clocks */ 176 /* enable clocks */
166 priv->usbclk = clk_get(dev, "usb"); 177 priv->usbclk = clk_get(dev, "usb");
167 if (IS_ERR(priv->usbclk)) { 178 if (IS_ERR(priv->usbclk)) {
@@ -192,18 +203,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
192 if (ret < 0) 203 if (ret < 0)
193 goto err_init; 204 goto err_init;
194 205
195 /* call platform specific init function */
196 if (pdata->init) {
197 ret = pdata->init(pdev);
198 if (ret) {
199 dev_err(dev, "platform init failed\n");
200 goto err_init;
201 }
202 }
203
204 /* most platforms need some time to settle changed IO settings */
205 mdelay(10);
206
207 /* Initialize the transceiver */ 206 /* Initialize the transceiver */
208 if (pdata->otg) { 207 if (pdata->otg) {
209 pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET; 208 pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET;
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 74d07f4e8b7d..f0282d6bb7aa 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -26,10 +26,9 @@
26 * along with this program; if not, write to the Free Software 26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 * 28 *
29 * TODO (last updated Feb 23rd, 2009): 29 * TODO (last updated Feb 12, 2010):
30 * - add kernel-doc 30 * - add kernel-doc
31 * - enable AUTOIDLE 31 * - enable AUTOIDLE
32 * - move DPLL5 programming to clock fw
33 * - add suspend/resume 32 * - add suspend/resume
34 * - move workarounds to board-files 33 * - move workarounds to board-files
35 */ 34 */
@@ -37,6 +36,7 @@
37#include <linux/platform_device.h> 36#include <linux/platform_device.h>
38#include <linux/clk.h> 37#include <linux/clk.h>
39#include <linux/gpio.h> 38#include <linux/gpio.h>
39#include <linux/regulator/consumer.h>
40#include <plat/usb.h> 40#include <plat/usb.h>
41 41
42/* 42/*
@@ -178,6 +178,11 @@ struct ehci_hcd_omap {
178 void __iomem *uhh_base; 178 void __iomem *uhh_base;
179 void __iomem *tll_base; 179 void __iomem *tll_base;
180 void __iomem *ehci_base; 180 void __iomem *ehci_base;
181
182 /* Regulators for USB PHYs.
183 * Each PHY can have a seperate regulator.
184 */
185 struct regulator *regulator[OMAP3_HS_USB_PORTS];
181}; 186};
182 187
183/*-------------------------------------------------------------------------*/ 188/*-------------------------------------------------------------------------*/
@@ -546,6 +551,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
546 551
547 int irq = platform_get_irq(pdev, 0); 552 int irq = platform_get_irq(pdev, 0);
548 int ret = -ENODEV; 553 int ret = -ENODEV;
554 int i;
555 char supply[7];
549 556
550 if (!pdata) { 557 if (!pdata) {
551 dev_dbg(&pdev->dev, "missing platform_data\n"); 558 dev_dbg(&pdev->dev, "missing platform_data\n");
@@ -613,6 +620,21 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
613 goto err_tll_ioremap; 620 goto err_tll_ioremap;
614 } 621 }
615 622
623 /* get ehci regulator and enable */
624 for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
625 if (omap->port_mode[i] != EHCI_HCD_OMAP_MODE_PHY) {
626 omap->regulator[i] = NULL;
627 continue;
628 }
629 snprintf(supply, sizeof(supply), "hsusb%d", i);
630 omap->regulator[i] = regulator_get(omap->dev, supply);
631 if (IS_ERR(omap->regulator[i]))
632 dev_dbg(&pdev->dev,
633 "failed to get ehci port%d regulator\n", i);
634 else
635 regulator_enable(omap->regulator[i]);
636 }
637
616 ret = omap_start_ehc(omap, hcd); 638 ret = omap_start_ehc(omap, hcd);
617 if (ret) { 639 if (ret) {
618 dev_dbg(&pdev->dev, "failed to start ehci\n"); 640 dev_dbg(&pdev->dev, "failed to start ehci\n");
@@ -622,13 +644,12 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
622 omap->ehci->regs = hcd->regs 644 omap->ehci->regs = hcd->regs
623 + HC_LENGTH(readl(&omap->ehci->caps->hc_capbase)); 645 + HC_LENGTH(readl(&omap->ehci->caps->hc_capbase));
624 646
647 dbg_hcs_params(omap->ehci, "reset");
648 dbg_hcc_params(omap->ehci, "reset");
649
625 /* cache this readonly data; minimize chip reads */ 650 /* cache this readonly data; minimize chip reads */
626 omap->ehci->hcs_params = readl(&omap->ehci->caps->hcs_params); 651 omap->ehci->hcs_params = readl(&omap->ehci->caps->hcs_params);
627 652
628 /* SET 1 micro-frame Interrupt interval */
629 writel(readl(&omap->ehci->regs->command) | (1 << 16),
630 &omap->ehci->regs->command);
631
632 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); 653 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
633 if (ret) { 654 if (ret) {
634 dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret); 655 dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
@@ -641,6 +662,12 @@ err_add_hcd:
641 omap_stop_ehc(omap, hcd); 662 omap_stop_ehc(omap, hcd);
642 663
643err_start: 664err_start:
665 for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
666 if (omap->regulator[i]) {
667 regulator_disable(omap->regulator[i]);
668 regulator_put(omap->regulator[i]);
669 }
670 }
644 iounmap(omap->tll_base); 671 iounmap(omap->tll_base);
645 672
646err_tll_ioremap: 673err_tll_ioremap:
@@ -674,13 +701,21 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
674{ 701{
675 struct ehci_hcd_omap *omap = platform_get_drvdata(pdev); 702 struct ehci_hcd_omap *omap = platform_get_drvdata(pdev);
676 struct usb_hcd *hcd = ehci_to_hcd(omap->ehci); 703 struct usb_hcd *hcd = ehci_to_hcd(omap->ehci);
704 int i;
677 705
678 usb_remove_hcd(hcd); 706 usb_remove_hcd(hcd);
679 omap_stop_ehc(omap, hcd); 707 omap_stop_ehc(omap, hcd);
680 iounmap(hcd->regs); 708 iounmap(hcd->regs);
709 for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
710 if (omap->regulator[i]) {
711 regulator_disable(omap->regulator[i]);
712 regulator_put(omap->regulator[i]);
713 }
714 }
681 iounmap(omap->tll_base); 715 iounmap(omap->tll_base);
682 iounmap(omap->uhh_base); 716 iounmap(omap->uhh_base);
683 usb_put_hcd(hcd); 717 usb_put_hcd(hcd);
718 kfree(omap);
684 719
685 return 0; 720 return 0;
686} 721}
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 1d283e1b2b8d..0f87dc72820a 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -222,14 +222,14 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
222 goto err1; 222 goto err1;
223 } 223 }
224 224
225 if (!request_mem_region(res->start, res->end - res->start + 1, 225 if (!request_mem_region(res->start, resource_size(res),
226 ehci_orion_hc_driver.description)) { 226 ehci_orion_hc_driver.description)) {
227 dev_dbg(&pdev->dev, "controller already in use\n"); 227 dev_dbg(&pdev->dev, "controller already in use\n");
228 err = -EBUSY; 228 err = -EBUSY;
229 goto err1; 229 goto err1;
230 } 230 }
231 231
232 regs = ioremap(res->start, res->end - res->start + 1); 232 regs = ioremap(res->start, resource_size(res));
233 if (regs == NULL) { 233 if (regs == NULL) {
234 dev_dbg(&pdev->dev, "error mapping memory\n"); 234 dev_dbg(&pdev->dev, "error mapping memory\n");
235 err = -EFAULT; 235 err = -EFAULT;
@@ -244,7 +244,7 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
244 } 244 }
245 245
246 hcd->rsrc_start = res->start; 246 hcd->rsrc_start = res->start;
247 hcd->rsrc_len = res->end - res->start + 1; 247 hcd->rsrc_len = resource_size(res);
248 hcd->regs = regs; 248 hcd->regs = regs;
249 249
250 ehci = hcd_to_ehci(hcd); 250 ehci = hcd_to_ehci(hcd);
@@ -287,7 +287,7 @@ err4:
287err3: 287err3:
288 iounmap(regs); 288 iounmap(regs);
289err2: 289err2:
290 release_mem_region(res->start, res->end - res->start + 1); 290 release_mem_region(res->start, resource_size(res));
291err1: 291err1:
292 dev_err(&pdev->dev, "init %s fail, %d\n", 292 dev_err(&pdev->dev, "init %s fail, %d\n",
293 dev_name(&pdev->dev), err); 293 dev_name(&pdev->dev), err);
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 36f96da129f5..8df33b8a634c 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -134,21 +134,21 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
134 hcd->rsrc_len = res.end - res.start + 1; 134 hcd->rsrc_len = res.end - res.start + 1;
135 135
136 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { 136 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
137 printk(KERN_ERR __FILE__ ": request_mem_region failed\n"); 137 printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
138 rv = -EBUSY; 138 rv = -EBUSY;
139 goto err_rmr; 139 goto err_rmr;
140 } 140 }
141 141
142 irq = irq_of_parse_and_map(dn, 0); 142 irq = irq_of_parse_and_map(dn, 0);
143 if (irq == NO_IRQ) { 143 if (irq == NO_IRQ) {
144 printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n"); 144 printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
145 rv = -EBUSY; 145 rv = -EBUSY;
146 goto err_irq; 146 goto err_irq;
147 } 147 }
148 148
149 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); 149 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
150 if (!hcd->regs) { 150 if (!hcd->regs) {
151 printk(KERN_ERR __FILE__ ": ioremap failed\n"); 151 printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
152 rv = -ENOMEM; 152 rv = -ENOMEM;
153 goto err_ioremap; 153 goto err_ioremap;
154 } 154 }
@@ -161,9 +161,9 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
161 ehci->ohci_hcctrl_reg = ioremap(res.start + 161 ehci->ohci_hcctrl_reg = ioremap(res.start +
162 OHCI_HCCTRL_OFFSET, OHCI_HCCTRL_LEN); 162 OHCI_HCCTRL_OFFSET, OHCI_HCCTRL_LEN);
163 else 163 else
164 pr_debug(__FILE__ ": no ohci offset in fdt\n"); 164 pr_debug("%s: no ohci offset in fdt\n", __FILE__);
165 if (!ehci->ohci_hcctrl_reg) { 165 if (!ehci->ohci_hcctrl_reg) {
166 pr_debug(__FILE__ ": ioremap for ohci hcctrl failed\n"); 166 pr_debug("%s: ioremap for ohci hcctrl failed\n", __FILE__);
167 } else { 167 } else {
168 ehci->has_amcc_usb23 = 1; 168 ehci->has_amcc_usb23 = 1;
169 } 169 }
@@ -241,7 +241,7 @@ static int ehci_hcd_ppc_of_remove(struct of_device *op)
241 else 241 else
242 release_mem_region(res.start, 0x4); 242 release_mem_region(res.start, 0x4);
243 else 243 else
244 pr_debug(__FILE__ ": no ohci offset in fdt\n"); 244 pr_debug("%s: no ohci offset in fdt\n", __FILE__);
245 of_node_put(np); 245 of_node_put(np);
246 } 246 }
247 247
@@ -264,7 +264,7 @@ static int ehci_hcd_ppc_of_shutdown(struct of_device *op)
264} 264}
265 265
266 266
267static struct of_device_id ehci_hcd_ppc_of_match[] = { 267static const struct of_device_id ehci_hcd_ppc_of_match[] = {
268 { 268 {
269 .compatible = "usb-ehci", 269 .compatible = "usb-ehci",
270 }, 270 },
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 1e391e624c8a..39340ae00ac4 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -510,6 +510,8 @@ static int disable_periodic (struct ehci_hcd *ehci)
510 ehci_writel(ehci, cmd, &ehci->regs->command); 510 ehci_writel(ehci, cmd, &ehci->regs->command);
511 /* posted write ... */ 511 /* posted write ... */
512 512
513 free_cached_itd_list(ehci);
514
513 ehci->next_uframe = -1; 515 ehci->next_uframe = -1;
514 return 0; 516 return 0;
515} 517}
@@ -2322,9 +2324,13 @@ restart:
2322 * No need to check for activity unless the 2324 * No need to check for activity unless the
2323 * frame is current. 2325 * frame is current.
2324 */ 2326 */
2325 if (frame == clock_frame && live && 2327 if (((frame == clock_frame) ||
2326 (q.sitd->hw_results & 2328 (((frame + 1) % ehci->periodic_size)
2327 SITD_ACTIVE(ehci))) { 2329 == clock_frame))
2330 && live
2331 && (q.sitd->hw_results &
2332 SITD_ACTIVE(ehci))) {
2333
2328 incomplete = true; 2334 incomplete = true;
2329 q_p = &q.sitd->sitd_next; 2335 q_p = &q.sitd->sitd_next;
2330 hw_p = &q.sitd->hw_next; 2336 hw_p = &q.sitd->hw_next;
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index a5861531ad3e..f603bb2c0a8e 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -177,21 +177,21 @@ ehci_hcd_xilinx_of_probe(struct of_device *op, const struct of_device_id *match)
177 hcd->rsrc_len = res.end - res.start + 1; 177 hcd->rsrc_len = res.end - res.start + 1;
178 178
179 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { 179 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
180 printk(KERN_ERR __FILE__ ": request_mem_region failed\n"); 180 printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
181 rv = -EBUSY; 181 rv = -EBUSY;
182 goto err_rmr; 182 goto err_rmr;
183 } 183 }
184 184
185 irq = irq_of_parse_and_map(dn, 0); 185 irq = irq_of_parse_and_map(dn, 0);
186 if (irq == NO_IRQ) { 186 if (irq == NO_IRQ) {
187 printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n"); 187 printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
188 rv = -EBUSY; 188 rv = -EBUSY;
189 goto err_irq; 189 goto err_irq;
190 } 190 }
191 191
192 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); 192 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
193 if (!hcd->regs) { 193 if (!hcd->regs) {
194 printk(KERN_ERR __FILE__ ": ioremap failed\n"); 194 printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
195 rv = -ENOMEM; 195 rv = -ENOMEM;
196 goto err_ioremap; 196 goto err_ioremap;
197 } 197 }
@@ -281,7 +281,7 @@ static int ehci_hcd_xilinx_of_shutdown(struct of_device *op)
281} 281}
282 282
283 283
284static struct of_device_id ehci_hcd_xilinx_of_match[] = { 284static const struct of_device_id ehci_hcd_xilinx_of_match[] = {
285 {.compatible = "xlnx,xps-usb-host-1.00.a",}, 285 {.compatible = "xlnx,xps-usb-host-1.00.a",},
286 {}, 286 {},
287}; 287};
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 78e7c3cfcb72..5dcfb3de9945 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -433,7 +433,7 @@ static int fhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
433 return -ENOMEM; 433 return -ENOMEM;
434 434
435 /* allocate the private part of the URB */ 435 /* allocate the private part of the URB */
436 urb_priv->tds = kzalloc(size * sizeof(struct td), mem_flags); 436 urb_priv->tds = kcalloc(size, sizeof(*urb_priv->tds), mem_flags);
437 if (!urb_priv->tds) { 437 if (!urb_priv->tds) {
438 kfree(urb_priv); 438 kfree(urb_priv);
439 return -ENOMEM; 439 return -ENOMEM;
@@ -805,7 +805,7 @@ static int __devexit of_fhci_remove(struct of_device *ofdev)
805 return fhci_remove(&ofdev->dev); 805 return fhci_remove(&ofdev->dev);
806} 806}
807 807
808static struct of_device_id of_fhci_match[] = { 808static const struct of_device_id of_fhci_match[] = {
809 { .compatible = "fsl,mpc8323-qe-usb", }, 809 { .compatible = "fsl,mpc8323-qe-usb", },
810 {}, 810 {},
811}; 811};
diff --git a/drivers/usb/host/imx21-dbg.c b/drivers/usb/host/imx21-dbg.c
new file mode 100644
index 000000000000..512f647448ca
--- /dev/null
+++ b/drivers/usb/host/imx21-dbg.c
@@ -0,0 +1,527 @@
1/*
2 * Copyright (c) 2009 by Martin Fuzzey
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/* this file is part of imx21-hcd.c */
20
21#ifndef DEBUG
22
23static inline void create_debug_files(struct imx21 *imx21) { }
24static inline void remove_debug_files(struct imx21 *imx21) { }
25static inline void debug_urb_submitted(struct imx21 *imx21, struct urb *urb) {}
26static inline void debug_urb_completed(struct imx21 *imx21, struct urb *urb,
27 int status) {}
28static inline void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb) {}
29static inline void debug_urb_queued_for_etd(struct imx21 *imx21,
30 struct urb *urb) {}
31static inline void debug_urb_queued_for_dmem(struct imx21 *imx21,
32 struct urb *urb) {}
33static inline void debug_etd_allocated(struct imx21 *imx21) {}
34static inline void debug_etd_freed(struct imx21 *imx21) {}
35static inline void debug_dmem_allocated(struct imx21 *imx21, int size) {}
36static inline void debug_dmem_freed(struct imx21 *imx21, int size) {}
37static inline void debug_isoc_submitted(struct imx21 *imx21,
38 int frame, struct td *td) {}
39static inline void debug_isoc_completed(struct imx21 *imx21,
40 int frame, struct td *td, int cc, int len) {}
41
42#else
43
44#include <linux/debugfs.h>
45#include <linux/seq_file.h>
46
47static const char *dir_labels[] = {
48 "TD 0",
49 "OUT",
50 "IN",
51 "TD 1"
52};
53
54static const char *speed_labels[] = {
55 "Full",
56 "Low"
57};
58
59static const char *format_labels[] = {
60 "Control",
61 "ISO",
62 "Bulk",
63 "Interrupt"
64};
65
66static inline struct debug_stats *stats_for_urb(struct imx21 *imx21,
67 struct urb *urb)
68{
69 return usb_pipeisoc(urb->pipe) ?
70 &imx21->isoc_stats : &imx21->nonisoc_stats;
71}
72
73static void debug_urb_submitted(struct imx21 *imx21, struct urb *urb)
74{
75 stats_for_urb(imx21, urb)->submitted++;
76}
77
78static void debug_urb_completed(struct imx21 *imx21, struct urb *urb, int st)
79{
80 if (st)
81 stats_for_urb(imx21, urb)->completed_failed++;
82 else
83 stats_for_urb(imx21, urb)->completed_ok++;
84}
85
86static void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb)
87{
88 stats_for_urb(imx21, urb)->unlinked++;
89}
90
91static void debug_urb_queued_for_etd(struct imx21 *imx21, struct urb *urb)
92{
93 stats_for_urb(imx21, urb)->queue_etd++;
94}
95
96static void debug_urb_queued_for_dmem(struct imx21 *imx21, struct urb *urb)
97{
98 stats_for_urb(imx21, urb)->queue_dmem++;
99}
100
101static inline void debug_etd_allocated(struct imx21 *imx21)
102{
103 imx21->etd_usage.maximum = max(
104 ++(imx21->etd_usage.value),
105 imx21->etd_usage.maximum);
106}
107
108static inline void debug_etd_freed(struct imx21 *imx21)
109{
110 imx21->etd_usage.value--;
111}
112
113static inline void debug_dmem_allocated(struct imx21 *imx21, int size)
114{
115 imx21->dmem_usage.value += size;
116 imx21->dmem_usage.maximum = max(
117 imx21->dmem_usage.value,
118 imx21->dmem_usage.maximum);
119}
120
121static inline void debug_dmem_freed(struct imx21 *imx21, int size)
122{
123 imx21->dmem_usage.value -= size;
124}
125
126
127static void debug_isoc_submitted(struct imx21 *imx21,
128 int frame, struct td *td)
129{
130 struct debug_isoc_trace *trace = &imx21->isoc_trace[
131 imx21->isoc_trace_index++];
132
133 imx21->isoc_trace_index %= ARRAY_SIZE(imx21->isoc_trace);
134 trace->schedule_frame = td->frame;
135 trace->submit_frame = frame;
136 trace->request_len = td->len;
137 trace->td = td;
138}
139
140static inline void debug_isoc_completed(struct imx21 *imx21,
141 int frame, struct td *td, int cc, int len)
142{
143 struct debug_isoc_trace *trace, *trace_failed;
144 int i;
145 int found = 0;
146
147 trace = imx21->isoc_trace;
148 for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) {
149 if (trace->td == td) {
150 trace->done_frame = frame;
151 trace->done_len = len;
152 trace->cc = cc;
153 trace->td = NULL;
154 found = 1;
155 break;
156 }
157 }
158
159 if (found && cc) {
160 trace_failed = &imx21->isoc_trace_failed[
161 imx21->isoc_trace_index_failed++];
162
163 imx21->isoc_trace_index_failed %= ARRAY_SIZE(
164 imx21->isoc_trace_failed);
165 *trace_failed = *trace;
166 }
167}
168
169
170static char *format_ep(struct usb_host_endpoint *ep, char *buf, int bufsize)
171{
172 if (ep)
173 snprintf(buf, bufsize, "ep_%02x (type:%02X kaddr:%p)",
174 ep->desc.bEndpointAddress,
175 usb_endpoint_type(&ep->desc),
176 ep);
177 else
178 snprintf(buf, bufsize, "none");
179 return buf;
180}
181
182static char *format_etd_dword0(u32 value, char *buf, int bufsize)
183{
184 snprintf(buf, bufsize,
185 "addr=%d ep=%d dir=%s speed=%s format=%s halted=%d",
186 value & 0x7F,
187 (value >> DW0_ENDPNT) & 0x0F,
188 dir_labels[(value >> DW0_DIRECT) & 0x03],
189 speed_labels[(value >> DW0_SPEED) & 0x01],
190 format_labels[(value >> DW0_FORMAT) & 0x03],
191 (value >> DW0_HALTED) & 0x01);
192 return buf;
193}
194
195static int debug_status_show(struct seq_file *s, void *v)
196{
197 struct imx21 *imx21 = s->private;
198 int etds_allocated = 0;
199 int etds_sw_busy = 0;
200 int etds_hw_busy = 0;
201 int dmem_blocks = 0;
202 int queued_for_etd = 0;
203 int queued_for_dmem = 0;
204 unsigned int dmem_bytes = 0;
205 int i;
206 struct etd_priv *etd;
207 u32 etd_enable_mask;
208 unsigned long flags;
209 struct imx21_dmem_area *dmem;
210 struct ep_priv *ep_priv;
211
212 spin_lock_irqsave(&imx21->lock, flags);
213
214 etd_enable_mask = readl(imx21->regs + USBH_ETDENSET);
215 for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) {
216 if (etd->alloc)
217 etds_allocated++;
218 if (etd->urb)
219 etds_sw_busy++;
220 if (etd_enable_mask & (1<<i))
221 etds_hw_busy++;
222 }
223
224 list_for_each_entry(dmem, &imx21->dmem_list, list) {
225 dmem_bytes += dmem->size;
226 dmem_blocks++;
227 }
228
229 list_for_each_entry(ep_priv, &imx21->queue_for_etd, queue)
230 queued_for_etd++;
231
232 list_for_each_entry(etd, &imx21->queue_for_dmem, queue)
233 queued_for_dmem++;
234
235 spin_unlock_irqrestore(&imx21->lock, flags);
236
237 seq_printf(s,
238 "Frame: %d\n"
239 "ETDs allocated: %d/%d (max=%d)\n"
240 "ETDs in use sw: %d\n"
241 "ETDs in use hw: %d\n"
242 "DMEM alocated: %d/%d (max=%d)\n"
243 "DMEM blocks: %d\n"
244 "Queued waiting for ETD: %d\n"
245 "Queued waiting for DMEM: %d\n",
246 readl(imx21->regs + USBH_FRMNUB) & 0xFFFF,
247 etds_allocated, USB_NUM_ETD, imx21->etd_usage.maximum,
248 etds_sw_busy,
249 etds_hw_busy,
250 dmem_bytes, DMEM_SIZE, imx21->dmem_usage.maximum,
251 dmem_blocks,
252 queued_for_etd,
253 queued_for_dmem);
254
255 return 0;
256}
257
258static int debug_dmem_show(struct seq_file *s, void *v)
259{
260 struct imx21 *imx21 = s->private;
261 struct imx21_dmem_area *dmem;
262 unsigned long flags;
263 char ep_text[40];
264
265 spin_lock_irqsave(&imx21->lock, flags);
266
267 list_for_each_entry(dmem, &imx21->dmem_list, list)
268 seq_printf(s,
269 "%04X: size=0x%X "
270 "ep=%s\n",
271 dmem->offset, dmem->size,
272 format_ep(dmem->ep, ep_text, sizeof(ep_text)));
273
274 spin_unlock_irqrestore(&imx21->lock, flags);
275
276 return 0;
277}
278
279static int debug_etd_show(struct seq_file *s, void *v)
280{
281 struct imx21 *imx21 = s->private;
282 struct etd_priv *etd;
283 char buf[60];
284 u32 dword;
285 int i, j;
286 unsigned long flags;
287
288 spin_lock_irqsave(&imx21->lock, flags);
289
290 for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) {
291 int state = -1;
292 struct urb_priv *urb_priv;
293 if (etd->urb) {
294 urb_priv = etd->urb->hcpriv;
295 if (urb_priv)
296 state = urb_priv->state;
297 }
298
299 seq_printf(s,
300 "etd_num: %d\n"
301 "ep: %s\n"
302 "alloc: %d\n"
303 "len: %d\n"
304 "busy sw: %d\n"
305 "busy hw: %d\n"
306 "urb state: %d\n"
307 "current urb: %p\n",
308
309 i,
310 format_ep(etd->ep, buf, sizeof(buf)),
311 etd->alloc,
312 etd->len,
313 etd->urb != NULL,
314 (readl(imx21->regs + USBH_ETDENSET) & (1 << i)) > 0,
315 state,
316 etd->urb);
317
318 for (j = 0; j < 4; j++) {
319 dword = etd_readl(imx21, i, j);
320 switch (j) {
321 case 0:
322 format_etd_dword0(dword, buf, sizeof(buf));
323 break;
324 case 2:
325 snprintf(buf, sizeof(buf),
326 "cc=0X%02X", dword >> DW2_COMPCODE);
327 break;
328 default:
329 *buf = 0;
330 break;
331 }
332 seq_printf(s,
333 "dword %d: submitted=%08X cur=%08X [%s]\n",
334 j,
335 etd->submitted_dwords[j],
336 dword,
337 buf);
338 }
339 seq_printf(s, "\n");
340 }
341
342 spin_unlock_irqrestore(&imx21->lock, flags);
343
344 return 0;
345}
346
347static void debug_statistics_show_one(struct seq_file *s,
348 const char *name, struct debug_stats *stats)
349{
350 seq_printf(s, "%s:\n"
351 "submitted URBs: %lu\n"
352 "completed OK: %lu\n"
353 "completed failed: %lu\n"
354 "unlinked: %lu\n"
355 "queued for ETD: %lu\n"
356 "queued for DMEM: %lu\n\n",
357 name,
358 stats->submitted,
359 stats->completed_ok,
360 stats->completed_failed,
361 stats->unlinked,
362 stats->queue_etd,
363 stats->queue_dmem);
364}
365
366static int debug_statistics_show(struct seq_file *s, void *v)
367{
368 struct imx21 *imx21 = s->private;
369 unsigned long flags;
370
371 spin_lock_irqsave(&imx21->lock, flags);
372
373 debug_statistics_show_one(s, "nonisoc", &imx21->nonisoc_stats);
374 debug_statistics_show_one(s, "isoc", &imx21->isoc_stats);
375 seq_printf(s, "unblock kludge triggers: %lu\n", imx21->debug_unblocks);
376 spin_unlock_irqrestore(&imx21->lock, flags);
377
378 return 0;
379}
380
381static void debug_isoc_show_one(struct seq_file *s,
382 const char *name, int index, struct debug_isoc_trace *trace)
383{
384 seq_printf(s, "%s %d:\n"
385 "cc=0X%02X\n"
386 "scheduled frame %d (%d)\n"
387 "submittted frame %d (%d)\n"
388 "completed frame %d (%d)\n"
389 "requested length=%d\n"
390 "completed length=%d\n\n",
391 name, index,
392 trace->cc,
393 trace->schedule_frame, trace->schedule_frame & 0xFFFF,
394 trace->submit_frame, trace->submit_frame & 0xFFFF,
395 trace->done_frame, trace->done_frame & 0xFFFF,
396 trace->request_len,
397 trace->done_len);
398}
399
400static int debug_isoc_show(struct seq_file *s, void *v)
401{
402 struct imx21 *imx21 = s->private;
403 struct debug_isoc_trace *trace;
404 unsigned long flags;
405 int i;
406
407 spin_lock_irqsave(&imx21->lock, flags);
408
409 trace = imx21->isoc_trace_failed;
410 for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace_failed); i++, trace++)
411 debug_isoc_show_one(s, "isoc failed", i, trace);
412
413 trace = imx21->isoc_trace;
414 for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++)
415 debug_isoc_show_one(s, "isoc", i, trace);
416
417 spin_unlock_irqrestore(&imx21->lock, flags);
418
419 return 0;
420}
421
422static int debug_status_open(struct inode *inode, struct file *file)
423{
424 return single_open(file, debug_status_show, inode->i_private);
425}
426
427static int debug_dmem_open(struct inode *inode, struct file *file)
428{
429 return single_open(file, debug_dmem_show, inode->i_private);
430}
431
432static int debug_etd_open(struct inode *inode, struct file *file)
433{
434 return single_open(file, debug_etd_show, inode->i_private);
435}
436
437static int debug_statistics_open(struct inode *inode, struct file *file)
438{
439 return single_open(file, debug_statistics_show, inode->i_private);
440}
441
442static int debug_isoc_open(struct inode *inode, struct file *file)
443{
444 return single_open(file, debug_isoc_show, inode->i_private);
445}
446
447static const struct file_operations debug_status_fops = {
448 .open = debug_status_open,
449 .read = seq_read,
450 .llseek = seq_lseek,
451 .release = single_release,
452};
453
454static const struct file_operations debug_dmem_fops = {
455 .open = debug_dmem_open,
456 .read = seq_read,
457 .llseek = seq_lseek,
458 .release = single_release,
459};
460
461static const struct file_operations debug_etd_fops = {
462 .open = debug_etd_open,
463 .read = seq_read,
464 .llseek = seq_lseek,
465 .release = single_release,
466};
467
468static const struct file_operations debug_statistics_fops = {
469 .open = debug_statistics_open,
470 .read = seq_read,
471 .llseek = seq_lseek,
472 .release = single_release,
473};
474
475static const struct file_operations debug_isoc_fops = {
476 .open = debug_isoc_open,
477 .read = seq_read,
478 .llseek = seq_lseek,
479 .release = single_release,
480};
481
482static void create_debug_files(struct imx21 *imx21)
483{
484 imx21->debug_root = debugfs_create_dir(dev_name(imx21->dev), NULL);
485 if (!imx21->debug_root)
486 goto failed_create_rootdir;
487
488 if (!debugfs_create_file("status", S_IRUGO,
489 imx21->debug_root, imx21, &debug_status_fops))
490 goto failed_create;
491
492 if (!debugfs_create_file("dmem", S_IRUGO,
493 imx21->debug_root, imx21, &debug_dmem_fops))
494 goto failed_create;
495
496 if (!debugfs_create_file("etd", S_IRUGO,
497 imx21->debug_root, imx21, &debug_etd_fops))
498 goto failed_create;
499
500 if (!debugfs_create_file("statistics", S_IRUGO,
501 imx21->debug_root, imx21, &debug_statistics_fops))
502 goto failed_create;
503
504 if (!debugfs_create_file("isoc", S_IRUGO,
505 imx21->debug_root, imx21, &debug_isoc_fops))
506 goto failed_create;
507
508 return;
509
510failed_create:
511 debugfs_remove_recursive(imx21->debug_root);
512
513failed_create_rootdir:
514 imx21->debug_root = NULL;
515}
516
517
518static void remove_debug_files(struct imx21 *imx21)
519{
520 if (imx21->debug_root) {
521 debugfs_remove_recursive(imx21->debug_root);
522 imx21->debug_root = NULL;
523 }
524}
525
526#endif
527
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
new file mode 100644
index 000000000000..213e270e1c29
--- /dev/null
+++ b/drivers/usb/host/imx21-hcd.c
@@ -0,0 +1,1789 @@
1/*
2 * USB Host Controller Driver for IMX21
3 *
4 * Copyright (C) 2006 Loping Dog Embedded Systems
5 * Copyright (C) 2009 Martin Fuzzey
6 * Originally written by Jay Monkman <jtm@lopingdog.com>
7 * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24
25 /*
26 * The i.MX21 USB hardware contains
27 * * 32 transfer descriptors (called ETDs)
28 * * 4Kb of Data memory
29 *
30 * The data memory is shared between the host and fuction controlers
31 * (but this driver only supports the host controler)
32 *
33 * So setting up a transfer involves:
34 * * Allocating a ETD
35 * * Fill in ETD with appropriate information
36 * * Allocating data memory (and putting the offset in the ETD)
37 * * Activate the ETD
38 * * Get interrupt when done.
39 *
40 * An ETD is assigned to each active endpoint.
41 *
42 * Low resource (ETD and Data memory) situations are handled differently for
43 * isochronous and non insosynchronous transactions :
44 *
45 * Non ISOC transfers are queued if either ETDs or Data memory are unavailable
46 *
47 * ISOC transfers use 2 ETDs per endpoint to achieve double buffering.
48 * They allocate both ETDs and Data memory during URB submission
49 * (and fail if unavailable).
50 */
51
52#include <linux/clk.h>
53#include <linux/io.h>
54#include <linux/kernel.h>
55#include <linux/list.h>
56#include <linux/platform_device.h>
57#include <linux/usb.h>
58
59#include "../core/hcd.h"
60#include "imx21-hcd.h"
61
62#ifdef DEBUG
63#define DEBUG_LOG_FRAME(imx21, etd, event) \
64 (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
65#else
66#define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
67#endif
68
69static const char hcd_name[] = "imx21-hcd";
70
71static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd)
72{
73 return (struct imx21 *)hcd->hcd_priv;
74}
75
76
77/* =========================================== */
78/* Hardware access helpers */
79/* =========================================== */
80
81static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask)
82{
83 void __iomem *reg = imx21->regs + offset;
84 writel(readl(reg) | mask, reg);
85}
86
87static inline void clear_register_bits(struct imx21 *imx21,
88 u32 offset, u32 mask)
89{
90 void __iomem *reg = imx21->regs + offset;
91 writel(readl(reg) & ~mask, reg);
92}
93
94static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
95{
96 void __iomem *reg = imx21->regs + offset;
97
98 if (readl(reg) & mask)
99 writel(mask, reg);
100}
101
102static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
103{
104 void __iomem *reg = imx21->regs + offset;
105
106 if (!(readl(reg) & mask))
107 writel(mask, reg);
108}
109
110static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value)
111{
112 writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword));
113}
114
115static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword)
116{
117 return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword));
118}
119
120static inline int wrap_frame(int counter)
121{
122 return counter & 0xFFFF;
123}
124
125static inline int frame_after(int frame, int after)
126{
127 /* handle wrapping like jiffies time_afer */
128 return (s16)((s16)after - (s16)frame) < 0;
129}
130
131static int imx21_hc_get_frame(struct usb_hcd *hcd)
132{
133 struct imx21 *imx21 = hcd_to_imx21(hcd);
134
135 return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
136}
137
138
139#include "imx21-dbg.c"
140
141/* =========================================== */
142/* ETD management */
143/* =========================================== */
144
145static int alloc_etd(struct imx21 *imx21)
146{
147 int i;
148 struct etd_priv *etd = imx21->etd;
149
150 for (i = 0; i < USB_NUM_ETD; i++, etd++) {
151 if (etd->alloc == 0) {
152 memset(etd, 0, sizeof(imx21->etd[0]));
153 etd->alloc = 1;
154 debug_etd_allocated(imx21);
155 return i;
156 }
157 }
158 return -1;
159}
160
161static void disactivate_etd(struct imx21 *imx21, int num)
162{
163 int etd_mask = (1 << num);
164 struct etd_priv *etd = &imx21->etd[num];
165
166 writel(etd_mask, imx21->regs + USBH_ETDENCLR);
167 clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
168 writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR);
169 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
170
171 etd->active_count = 0;
172
173 DEBUG_LOG_FRAME(imx21, etd, disactivated);
174}
175
176static void reset_etd(struct imx21 *imx21, int num)
177{
178 struct etd_priv *etd = imx21->etd + num;
179 int i;
180
181 disactivate_etd(imx21, num);
182
183 for (i = 0; i < 4; i++)
184 etd_writel(imx21, num, i, 0);
185 etd->urb = NULL;
186 etd->ep = NULL;
187 etd->td = NULL;;
188}
189
190static void free_etd(struct imx21 *imx21, int num)
191{
192 if (num < 0)
193 return;
194
195 if (num >= USB_NUM_ETD) {
196 dev_err(imx21->dev, "BAD etd=%d!\n", num);
197 return;
198 }
199 if (imx21->etd[num].alloc == 0) {
200 dev_err(imx21->dev, "ETD %d already free!\n", num);
201 return;
202 }
203
204 debug_etd_freed(imx21);
205 reset_etd(imx21, num);
206 memset(&imx21->etd[num], 0, sizeof(imx21->etd[0]));
207}
208
209
210static void setup_etd_dword0(struct imx21 *imx21,
211 int etd_num, struct urb *urb, u8 dir, u16 maxpacket)
212{
213 etd_writel(imx21, etd_num, 0,
214 ((u32) usb_pipedevice(urb->pipe)) << DW0_ADDRESS |
215 ((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) |
216 ((u32) dir << DW0_DIRECT) |
217 ((u32) ((urb->dev->speed == USB_SPEED_LOW) ?
218 1 : 0) << DW0_SPEED) |
219 ((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) |
220 ((u32) maxpacket << DW0_MAXPKTSIZ));
221}
222
223static void activate_etd(struct imx21 *imx21,
224 int etd_num, dma_addr_t dma, u8 dir)
225{
226 u32 etd_mask = 1 << etd_num;
227 struct etd_priv *etd = &imx21->etd[etd_num];
228
229 clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
230 set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
231 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
232 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
233
234 if (dma) {
235 set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
236 clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
237 clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
238 writel(dma, imx21->regs + USB_ETDSMSA(etd_num));
239 set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
240 } else {
241 if (dir != TD_DIR_IN) {
242 /* need to set for ZLP */
243 set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
244 set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
245 }
246 }
247
248 DEBUG_LOG_FRAME(imx21, etd, activated);
249
250#ifdef DEBUG
251 if (!etd->active_count) {
252 int i;
253 etd->activated_frame = readl(imx21->regs + USBH_FRMNUB);
254 etd->disactivated_frame = -1;
255 etd->last_int_frame = -1;
256 etd->last_req_frame = -1;
257
258 for (i = 0; i < 4; i++)
259 etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i);
260 }
261#endif
262
263 etd->active_count = 1;
264 writel(etd_mask, imx21->regs + USBH_ETDENSET);
265}
266
267/* =========================================== */
268/* Data memory management */
269/* =========================================== */
270
271static int alloc_dmem(struct imx21 *imx21, unsigned int size,
272 struct usb_host_endpoint *ep)
273{
274 unsigned int offset = 0;
275 struct imx21_dmem_area *area;
276 struct imx21_dmem_area *tmp;
277
278 size += (~size + 1) & 0x3; /* Round to 4 byte multiple */
279
280 if (size > DMEM_SIZE) {
281 dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n",
282 size, DMEM_SIZE);
283 return -EINVAL;
284 }
285
286 list_for_each_entry(tmp, &imx21->dmem_list, list) {
287 if ((size + offset) < offset)
288 goto fail;
289 if ((size + offset) <= tmp->offset)
290 break;
291 offset = tmp->size + tmp->offset;
292 if ((offset + size) > DMEM_SIZE)
293 goto fail;
294 }
295
296 area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC);
297 if (area == NULL)
298 return -ENOMEM;
299
300 area->ep = ep;
301 area->offset = offset;
302 area->size = size;
303 list_add_tail(&area->list, &tmp->list);
304 debug_dmem_allocated(imx21, size);
305 return offset;
306
307fail:
308 return -ENOMEM;
309}
310
311/* Memory now available for a queued ETD - activate it */
312static void activate_queued_etd(struct imx21 *imx21,
313 struct etd_priv *etd, u32 dmem_offset)
314{
315 struct urb_priv *urb_priv = etd->urb->hcpriv;
316 int etd_num = etd - &imx21->etd[0];
317 u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD;
318 u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03;
319
320 dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n",
321 etd_num);
322 etd_writel(imx21, etd_num, 1,
323 ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset);
324
325 urb_priv->active = 1;
326 activate_etd(imx21, etd_num, etd->dma_handle, dir);
327}
328
329static void free_dmem(struct imx21 *imx21, int offset)
330{
331 struct imx21_dmem_area *area;
332 struct etd_priv *etd, *tmp;
333 int found = 0;
334
335 list_for_each_entry(area, &imx21->dmem_list, list) {
336 if (area->offset == offset) {
337 debug_dmem_freed(imx21, area->size);
338 list_del(&area->list);
339 kfree(area);
340 found = 1;
341 break;
342 }
343 }
344
345 if (!found) {
346 dev_err(imx21->dev,
347 "Trying to free unallocated DMEM %d\n", offset);
348 return;
349 }
350
351 /* Try again to allocate memory for anything we've queued */
352 list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) {
353 offset = alloc_dmem(imx21, etd->dmem_size, etd->ep);
354 if (offset >= 0) {
355 list_del(&etd->queue);
356 activate_queued_etd(imx21, etd, (u32)offset);
357 }
358 }
359}
360
361static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
362{
363 struct imx21_dmem_area *area, *tmp;
364
365 list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) {
366 if (area->ep == ep) {
367 dev_err(imx21->dev,
368 "Active DMEM %d for disabled ep=%p\n",
369 area->offset, ep);
370 list_del(&area->list);
371 kfree(area);
372 }
373 }
374}
375
376
377/* =========================================== */
378/* End handling */
379/* =========================================== */
380static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
381
382/* Endpoint now idle - release it's ETD(s) or asssign to queued request */
383static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
384{
385 int etd_num;
386 int i;
387
388 for (i = 0; i < NUM_ISO_ETDS; i++) {
389 etd_num = ep_priv->etd[i];
390 if (etd_num < 0)
391 continue;
392
393 ep_priv->etd[i] = -1;
394 if (list_empty(&imx21->queue_for_etd)) {
395 free_etd(imx21, etd_num);
396 continue;
397 }
398
399 dev_dbg(imx21->dev,
400 "assigning idle etd %d for queued request\n", etd_num);
401 ep_priv = list_first_entry(&imx21->queue_for_etd,
402 struct ep_priv, queue);
403 list_del(&ep_priv->queue);
404 reset_etd(imx21, etd_num);
405 ep_priv->waiting_etd = 0;
406 ep_priv->etd[i] = etd_num;
407
408 if (list_empty(&ep_priv->ep->urb_list)) {
409 dev_err(imx21->dev, "No urb for queued ep!\n");
410 continue;
411 }
412 schedule_nonisoc_etd(imx21, list_first_entry(
413 &ep_priv->ep->urb_list, struct urb, urb_list));
414 }
415}
416
417static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status)
418__releases(imx21->lock)
419__acquires(imx21->lock)
420{
421 struct imx21 *imx21 = hcd_to_imx21(hcd);
422 struct ep_priv *ep_priv = urb->ep->hcpriv;
423 struct urb_priv *urb_priv = urb->hcpriv;
424
425 debug_urb_completed(imx21, urb, status);
426 dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status);
427
428 kfree(urb_priv->isoc_td);
429 kfree(urb->hcpriv);
430 urb->hcpriv = NULL;
431 usb_hcd_unlink_urb_from_ep(hcd, urb);
432 spin_unlock(&imx21->lock);
433 usb_hcd_giveback_urb(hcd, urb, status);
434 spin_lock(&imx21->lock);
435 if (list_empty(&ep_priv->ep->urb_list))
436 ep_idle(imx21, ep_priv);
437}
438
439/* =========================================== */
440/* ISOC Handling ... */
441/* =========================================== */
442
443static void schedule_isoc_etds(struct usb_hcd *hcd,
444 struct usb_host_endpoint *ep)
445{
446 struct imx21 *imx21 = hcd_to_imx21(hcd);
447 struct ep_priv *ep_priv = ep->hcpriv;
448 struct etd_priv *etd;
449 struct urb_priv *urb_priv;
450 struct td *td;
451 int etd_num;
452 int i;
453 int cur_frame;
454 u8 dir;
455
456 for (i = 0; i < NUM_ISO_ETDS; i++) {
457too_late:
458 if (list_empty(&ep_priv->td_list))
459 break;
460
461 etd_num = ep_priv->etd[i];
462 if (etd_num < 0)
463 break;
464
465 etd = &imx21->etd[etd_num];
466 if (etd->urb)
467 continue;
468
469 td = list_entry(ep_priv->td_list.next, struct td, list);
470 list_del(&td->list);
471 urb_priv = td->urb->hcpriv;
472
473 cur_frame = imx21_hc_get_frame(hcd);
474 if (frame_after(cur_frame, td->frame)) {
475 dev_dbg(imx21->dev, "isoc too late frame %d > %d\n",
476 cur_frame, td->frame);
477 urb_priv->isoc_status = -EXDEV;
478 td->urb->iso_frame_desc[
479 td->isoc_index].actual_length = 0;
480 td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV;
481 if (--urb_priv->isoc_remaining == 0)
482 urb_done(hcd, td->urb, urb_priv->isoc_status);
483 goto too_late;
484 }
485
486 urb_priv->active = 1;
487 etd->td = td;
488 etd->ep = td->ep;
489 etd->urb = td->urb;
490 etd->len = td->len;
491
492 debug_isoc_submitted(imx21, cur_frame, td);
493
494 dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN;
495 setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size);
496 etd_writel(imx21, etd_num, 1, etd->dmem_offset);
497 etd_writel(imx21, etd_num, 2,
498 (TD_NOTACCESSED << DW2_COMPCODE) |
499 ((td->frame & 0xFFFF) << DW2_STARTFRM));
500 etd_writel(imx21, etd_num, 3,
501 (TD_NOTACCESSED << DW3_COMPCODE0) |
502 (td->len << DW3_PKTLEN0));
503
504 activate_etd(imx21, etd_num, td->data, dir);
505 }
506}
507
508static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
509{
510 struct imx21 *imx21 = hcd_to_imx21(hcd);
511 int etd_mask = 1 << etd_num;
512 struct urb_priv *urb_priv = urb->hcpriv;
513 struct etd_priv *etd = imx21->etd + etd_num;
514 struct td *td = etd->td;
515 struct usb_host_endpoint *ep = etd->ep;
516 int isoc_index = td->isoc_index;
517 unsigned int pipe = urb->pipe;
518 int dir_in = usb_pipein(pipe);
519 int cc;
520 int bytes_xfrd;
521
522 disactivate_etd(imx21, etd_num);
523
524 cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf;
525 bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff;
526
527 /* Input doesn't always fill the buffer, don't generate an error
528 * when this happens.
529 */
530 if (dir_in && (cc == TD_DATAUNDERRUN))
531 cc = TD_CC_NOERROR;
532
533 if (cc == TD_NOTACCESSED)
534 bytes_xfrd = 0;
535
536 debug_isoc_completed(imx21,
537 imx21_hc_get_frame(hcd), td, cc, bytes_xfrd);
538 if (cc) {
539 urb_priv->isoc_status = -EXDEV;
540 dev_dbg(imx21->dev,
541 "bad iso cc=0x%X frame=%d sched frame=%d "
542 "cnt=%d len=%d urb=%p etd=%d index=%d\n",
543 cc, imx21_hc_get_frame(hcd), td->frame,
544 bytes_xfrd, td->len, urb, etd_num, isoc_index);
545 }
546
547 if (dir_in)
548 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
549
550 urb->actual_length += bytes_xfrd;
551 urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
552 urb->iso_frame_desc[isoc_index].status = cc_to_error[cc];
553
554 etd->td = NULL;
555 etd->urb = NULL;
556 etd->ep = NULL;
557
558 if (--urb_priv->isoc_remaining == 0)
559 urb_done(hcd, urb, urb_priv->isoc_status);
560
561 schedule_isoc_etds(hcd, ep);
562}
563
564static struct ep_priv *alloc_isoc_ep(
565 struct imx21 *imx21, struct usb_host_endpoint *ep)
566{
567 struct ep_priv *ep_priv;
568 int i;
569
570 ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
571 if (ep_priv == NULL)
572 return NULL;
573
574 /* Allocate the ETDs */
575 for (i = 0; i < NUM_ISO_ETDS; i++) {
576 ep_priv->etd[i] = alloc_etd(imx21);
577 if (ep_priv->etd[i] < 0) {
578 int j;
579 dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
580 for (j = 0; j < i; j++)
581 free_etd(imx21, ep_priv->etd[j]);
582 goto alloc_etd_failed;
583 }
584 imx21->etd[ep_priv->etd[i]].ep = ep;
585 }
586
587 INIT_LIST_HEAD(&ep_priv->td_list);
588 ep_priv->ep = ep;
589 ep->hcpriv = ep_priv;
590 return ep_priv;
591
592alloc_etd_failed:
593 kfree(ep_priv);
594 return NULL;
595}
596
597static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
598 struct usb_host_endpoint *ep,
599 struct urb *urb, gfp_t mem_flags)
600{
601 struct imx21 *imx21 = hcd_to_imx21(hcd);
602 struct urb_priv *urb_priv;
603 unsigned long flags;
604 struct ep_priv *ep_priv;
605 struct td *td = NULL;
606 int i;
607 int ret;
608 int cur_frame;
609 u16 maxpacket;
610
611 urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
612 if (urb_priv == NULL)
613 return -ENOMEM;
614
615 urb_priv->isoc_td = kzalloc(
616 sizeof(struct td) * urb->number_of_packets, mem_flags);
617 if (urb_priv->isoc_td == NULL) {
618 ret = -ENOMEM;
619 goto alloc_td_failed;
620 }
621
622 spin_lock_irqsave(&imx21->lock, flags);
623
624 if (ep->hcpriv == NULL) {
625 ep_priv = alloc_isoc_ep(imx21, ep);
626 if (ep_priv == NULL) {
627 ret = -ENOMEM;
628 goto alloc_ep_failed;
629 }
630 } else {
631 ep_priv = ep->hcpriv;
632 }
633
634 ret = usb_hcd_link_urb_to_ep(hcd, urb);
635 if (ret)
636 goto link_failed;
637
638 urb->status = -EINPROGRESS;
639 urb->actual_length = 0;
640 urb->error_count = 0;
641 urb->hcpriv = urb_priv;
642 urb_priv->ep = ep;
643
644 /* allocate data memory for largest packets if not already done */
645 maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
646 for (i = 0; i < NUM_ISO_ETDS; i++) {
647 struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]];
648
649 if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) {
650 /* not sure if this can really occur.... */
651 dev_err(imx21->dev, "increasing isoc buffer %d->%d\n",
652 etd->dmem_size, maxpacket);
653 ret = -EMSGSIZE;
654 goto alloc_dmem_failed;
655 }
656
657 if (etd->dmem_size == 0) {
658 etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep);
659 if (etd->dmem_offset < 0) {
660 dev_dbg(imx21->dev, "failed alloc isoc dmem\n");
661 ret = -EAGAIN;
662 goto alloc_dmem_failed;
663 }
664 etd->dmem_size = maxpacket;
665 }
666 }
667
668 /* calculate frame */
669 cur_frame = imx21_hc_get_frame(hcd);
670 if (urb->transfer_flags & URB_ISO_ASAP) {
671 if (list_empty(&ep_priv->td_list))
672 urb->start_frame = cur_frame + 5;
673 else
674 urb->start_frame = list_entry(
675 ep_priv->td_list.prev,
676 struct td, list)->frame + urb->interval;
677 }
678 urb->start_frame = wrap_frame(urb->start_frame);
679 if (frame_after(cur_frame, urb->start_frame)) {
680 dev_dbg(imx21->dev,
681 "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
682 urb->start_frame, cur_frame,
683 (urb->transfer_flags & URB_ISO_ASAP) != 0);
684 urb->start_frame = wrap_frame(cur_frame + 1);
685 }
686
687 /* set up transfers */
688 td = urb_priv->isoc_td;
689 for (i = 0; i < urb->number_of_packets; i++, td++) {
690 td->ep = ep;
691 td->urb = urb;
692 td->len = urb->iso_frame_desc[i].length;
693 td->isoc_index = i;
694 td->frame = wrap_frame(urb->start_frame + urb->interval * i);
695 td->data = urb->transfer_dma + urb->iso_frame_desc[i].offset;
696 list_add_tail(&td->list, &ep_priv->td_list);
697 }
698
699 urb_priv->isoc_remaining = urb->number_of_packets;
700 dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n",
701 urb->number_of_packets, urb->start_frame, td->frame);
702
703 debug_urb_submitted(imx21, urb);
704 schedule_isoc_etds(hcd, ep);
705
706 spin_unlock_irqrestore(&imx21->lock, flags);
707 return 0;
708
709alloc_dmem_failed:
710 usb_hcd_unlink_urb_from_ep(hcd, urb);
711
712link_failed:
713alloc_ep_failed:
714 spin_unlock_irqrestore(&imx21->lock, flags);
715 kfree(urb_priv->isoc_td);
716
717alloc_td_failed:
718 kfree(urb_priv);
719 return ret;
720}
721
722static void dequeue_isoc_urb(struct imx21 *imx21,
723 struct urb *urb, struct ep_priv *ep_priv)
724{
725 struct urb_priv *urb_priv = urb->hcpriv;
726 struct td *td, *tmp;
727 int i;
728
729 if (urb_priv->active) {
730 for (i = 0; i < NUM_ISO_ETDS; i++) {
731 int etd_num = ep_priv->etd[i];
732 if (etd_num != -1 && imx21->etd[etd_num].urb == urb) {
733 struct etd_priv *etd = imx21->etd + etd_num;
734
735 reset_etd(imx21, etd_num);
736 if (etd->dmem_size)
737 free_dmem(imx21, etd->dmem_offset);
738 etd->dmem_size = 0;
739 }
740 }
741 }
742
743 list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) {
744 if (td->urb == urb) {
745 dev_vdbg(imx21->dev, "removing td %p\n", td);
746 list_del(&td->list);
747 }
748 }
749}
750
751/* =========================================== */
752/* NON ISOC Handling ... */
753/* =========================================== */
754
755static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
756{
757 unsigned int pipe = urb->pipe;
758 struct urb_priv *urb_priv = urb->hcpriv;
759 struct ep_priv *ep_priv = urb_priv->ep->hcpriv;
760 int state = urb_priv->state;
761 int etd_num = ep_priv->etd[0];
762 struct etd_priv *etd;
763 int dmem_offset;
764 u32 count;
765 u16 etd_buf_size;
766 u16 maxpacket;
767 u8 dir;
768 u8 bufround;
769 u8 datatoggle;
770 u8 interval = 0;
771 u8 relpolpos = 0;
772
773 if (etd_num < 0) {
774 dev_err(imx21->dev, "No valid ETD\n");
775 return;
776 }
777 if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num))
778 dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num);
779
780 etd = &imx21->etd[etd_num];
781 maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
782 if (!maxpacket)
783 maxpacket = 8;
784
785 if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
786 if (state == US_CTRL_SETUP) {
787 dir = TD_DIR_SETUP;
788 etd->dma_handle = urb->setup_dma;
789 bufround = 0;
790 count = 8;
791 datatoggle = TD_TOGGLE_DATA0;
792 } else { /* US_CTRL_ACK */
793 dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
794 etd->dma_handle = urb->transfer_dma;
795 bufround = 0;
796 count = 0;
797 datatoggle = TD_TOGGLE_DATA1;
798 }
799 } else {
800 dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
801 bufround = (dir == TD_DIR_IN) ? 1 : 0;
802 etd->dma_handle = urb->transfer_dma;
803 if (usb_pipebulk(pipe) && (state == US_BULK0))
804 count = 0;
805 else
806 count = urb->transfer_buffer_length;
807
808 if (usb_pipecontrol(pipe)) {
809 datatoggle = TD_TOGGLE_DATA1;
810 } else {
811 if (usb_gettoggle(
812 urb->dev,
813 usb_pipeendpoint(urb->pipe),
814 usb_pipeout(urb->pipe)))
815 datatoggle = TD_TOGGLE_DATA1;
816 else
817 datatoggle = TD_TOGGLE_DATA0;
818 }
819 }
820
821 etd->urb = urb;
822 etd->ep = urb_priv->ep;
823 etd->len = count;
824
825 if (usb_pipeint(pipe)) {
826 interval = urb->interval;
827 relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff;
828 }
829
830 /* Write ETD to device memory */
831 setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket);
832
833 etd_writel(imx21, etd_num, 2,
834 (u32) interval << DW2_POLINTERV |
835 ((u32) relpolpos << DW2_RELPOLPOS) |
836 ((u32) dir << DW2_DIRPID) |
837 ((u32) bufround << DW2_BUFROUND) |
838 ((u32) datatoggle << DW2_DATATOG) |
839 ((u32) TD_NOTACCESSED << DW2_COMPCODE));
840
841 /* DMA will always transfer buffer size even if TOBYCNT in DWORD3
842 is smaller. Make sure we don't overrun the buffer!
843 */
844 if (count && count < maxpacket)
845 etd_buf_size = count;
846 else
847 etd_buf_size = maxpacket;
848
849 etd_writel(imx21, etd_num, 3,
850 ((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count);
851
852 if (!count)
853 etd->dma_handle = 0;
854
855 /* allocate x and y buffer space at once */
856 etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket;
857 dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep);
858 if (dmem_offset < 0) {
859 /* Setup everything we can in HW and update when we get DMEM */
860 etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16);
861
862 dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num);
863 debug_urb_queued_for_dmem(imx21, urb);
864 list_add_tail(&etd->queue, &imx21->queue_for_dmem);
865 return;
866 }
867
868 etd_writel(imx21, etd_num, 1,
869 (((u32) dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) |
870 (u32) dmem_offset);
871
872 urb_priv->active = 1;
873
874 /* enable the ETD to kick off transfer */
875 dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
876 etd_num, count, dir != TD_DIR_IN ? "out" : "in");
877 activate_etd(imx21, etd_num, etd->dma_handle, dir);
878
879}
880
881static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
882{
883 struct imx21 *imx21 = hcd_to_imx21(hcd);
884 struct etd_priv *etd = &imx21->etd[etd_num];
885 u32 etd_mask = 1 << etd_num;
886 struct urb_priv *urb_priv = urb->hcpriv;
887 int dir;
888 u16 xbufaddr;
889 int cc;
890 u32 bytes_xfrd;
891 int etd_done;
892
893 disactivate_etd(imx21, etd_num);
894
895 dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3;
896 xbufaddr = etd_readl(imx21, etd_num, 1) & 0xffff;
897 cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf;
898 bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
899
900 /* save toggle carry */
901 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
902 usb_pipeout(urb->pipe),
903 (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1);
904
905 if (dir == TD_DIR_IN) {
906 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
907 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
908 }
909 free_dmem(imx21, xbufaddr);
910
911 urb->error_count = 0;
912 if (!(urb->transfer_flags & URB_SHORT_NOT_OK)
913 && (cc == TD_DATAUNDERRUN))
914 cc = TD_CC_NOERROR;
915
916 if (cc != 0)
917 dev_vdbg(imx21->dev, "cc is 0x%x\n", cc);
918
919 etd_done = (cc_to_error[cc] != 0); /* stop if error */
920
921 switch (usb_pipetype(urb->pipe)) {
922 case PIPE_CONTROL:
923 switch (urb_priv->state) {
924 case US_CTRL_SETUP:
925 if (urb->transfer_buffer_length > 0)
926 urb_priv->state = US_CTRL_DATA;
927 else
928 urb_priv->state = US_CTRL_ACK;
929 break;
930 case US_CTRL_DATA:
931 urb->actual_length += bytes_xfrd;
932 urb_priv->state = US_CTRL_ACK;
933 break;
934 case US_CTRL_ACK:
935 etd_done = 1;
936 break;
937 default:
938 dev_err(imx21->dev,
939 "Invalid pipe state %d\n", urb_priv->state);
940 etd_done = 1;
941 break;
942 }
943 break;
944
945 case PIPE_BULK:
946 urb->actual_length += bytes_xfrd;
947 if ((urb_priv->state == US_BULK)
948 && (urb->transfer_flags & URB_ZERO_PACKET)
949 && urb->transfer_buffer_length > 0
950 && ((urb->transfer_buffer_length %
951 usb_maxpacket(urb->dev, urb->pipe,
952 usb_pipeout(urb->pipe))) == 0)) {
953 /* need a 0-packet */
954 urb_priv->state = US_BULK0;
955 } else {
956 etd_done = 1;
957 }
958 break;
959
960 case PIPE_INTERRUPT:
961 urb->actual_length += bytes_xfrd;
962 etd_done = 1;
963 break;
964 }
965
966 if (!etd_done) {
967 dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
968 schedule_nonisoc_etd(imx21, urb);
969 } else {
970 struct usb_host_endpoint *ep = urb->ep;
971
972 urb_done(hcd, urb, cc_to_error[cc]);
973 etd->urb = NULL;
974
975 if (!list_empty(&ep->urb_list)) {
976 urb = list_first_entry(&ep->urb_list,
977 struct urb, urb_list);
978 dev_vdbg(imx21->dev, "next URB %p\n", urb);
979 schedule_nonisoc_etd(imx21, urb);
980 }
981 }
982}
983
984static struct ep_priv *alloc_ep(void)
985{
986 int i;
987 struct ep_priv *ep_priv;
988
989 ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
990 if (!ep_priv)
991 return NULL;
992
993 for (i = 0; i < NUM_ISO_ETDS; ++i)
994 ep_priv->etd[i] = -1;
995
996 return ep_priv;
997}
998
999static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
1000 struct urb *urb, gfp_t mem_flags)
1001{
1002 struct imx21 *imx21 = hcd_to_imx21(hcd);
1003 struct usb_host_endpoint *ep = urb->ep;
1004 struct urb_priv *urb_priv;
1005 struct ep_priv *ep_priv;
1006 struct etd_priv *etd;
1007 int ret;
1008 unsigned long flags;
1009 int new_ep = 0;
1010
1011 dev_vdbg(imx21->dev,
1012 "enqueue urb=%p ep=%p len=%d "
1013 "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n",
1014 urb, ep,
1015 urb->transfer_buffer_length,
1016 urb->transfer_buffer, urb->transfer_dma,
1017 urb->setup_packet, urb->setup_dma);
1018
1019 if (usb_pipeisoc(urb->pipe))
1020 return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags);
1021
1022 urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
1023 if (!urb_priv)
1024 return -ENOMEM;
1025
1026 spin_lock_irqsave(&imx21->lock, flags);
1027
1028 ep_priv = ep->hcpriv;
1029 if (ep_priv == NULL) {
1030 ep_priv = alloc_ep();
1031 if (!ep_priv) {
1032 ret = -ENOMEM;
1033 goto failed_alloc_ep;
1034 }
1035 ep->hcpriv = ep_priv;
1036 ep_priv->ep = ep;
1037 new_ep = 1;
1038 }
1039
1040 ret = usb_hcd_link_urb_to_ep(hcd, urb);
1041 if (ret)
1042 goto failed_link;
1043
1044 urb->status = -EINPROGRESS;
1045 urb->actual_length = 0;
1046 urb->error_count = 0;
1047 urb->hcpriv = urb_priv;
1048 urb_priv->ep = ep;
1049
1050 switch (usb_pipetype(urb->pipe)) {
1051 case PIPE_CONTROL:
1052 urb_priv->state = US_CTRL_SETUP;
1053 break;
1054 case PIPE_BULK:
1055 urb_priv->state = US_BULK;
1056 break;
1057 }
1058
1059 debug_urb_submitted(imx21, urb);
1060 if (ep_priv->etd[0] < 0) {
1061 if (ep_priv->waiting_etd) {
1062 dev_dbg(imx21->dev,
1063 "no ETD available already queued %p\n",
1064 ep_priv);
1065 debug_urb_queued_for_etd(imx21, urb);
1066 goto out;
1067 }
1068 ep_priv->etd[0] = alloc_etd(imx21);
1069 if (ep_priv->etd[0] < 0) {
1070 dev_dbg(imx21->dev,
1071 "no ETD available queueing %p\n", ep_priv);
1072 debug_urb_queued_for_etd(imx21, urb);
1073 list_add_tail(&ep_priv->queue, &imx21->queue_for_etd);
1074 ep_priv->waiting_etd = 1;
1075 goto out;
1076 }
1077 }
1078
1079 /* Schedule if no URB already active for this endpoint */
1080 etd = &imx21->etd[ep_priv->etd[0]];
1081 if (etd->urb == NULL) {
1082 DEBUG_LOG_FRAME(imx21, etd, last_req);
1083 schedule_nonisoc_etd(imx21, urb);
1084 }
1085
1086out:
1087 spin_unlock_irqrestore(&imx21->lock, flags);
1088 return 0;
1089
1090failed_link:
1091failed_alloc_ep:
1092 spin_unlock_irqrestore(&imx21->lock, flags);
1093 kfree(urb_priv);
1094 return ret;
1095}
1096
1097static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1098 int status)
1099{
1100 struct imx21 *imx21 = hcd_to_imx21(hcd);
1101 unsigned long flags;
1102 struct usb_host_endpoint *ep;
1103 struct ep_priv *ep_priv;
1104 struct urb_priv *urb_priv = urb->hcpriv;
1105 int ret = -EINVAL;
1106
1107 dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n",
1108 urb, usb_pipeisoc(urb->pipe), status);
1109
1110 spin_lock_irqsave(&imx21->lock, flags);
1111
1112 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1113 if (ret)
1114 goto fail;
1115 ep = urb_priv->ep;
1116 ep_priv = ep->hcpriv;
1117
1118 debug_urb_unlinked(imx21, urb);
1119
1120 if (usb_pipeisoc(urb->pipe)) {
1121 dequeue_isoc_urb(imx21, urb, ep_priv);
1122 schedule_isoc_etds(hcd, ep);
1123 } else if (urb_priv->active) {
1124 int etd_num = ep_priv->etd[0];
1125 if (etd_num != -1) {
1126 disactivate_etd(imx21, etd_num);
1127 free_dmem(imx21, etd_readl(imx21, etd_num, 1) & 0xffff);
1128 imx21->etd[etd_num].urb = NULL;
1129 }
1130 }
1131
1132 urb_done(hcd, urb, status);
1133
1134 spin_unlock_irqrestore(&imx21->lock, flags);
1135 return 0;
1136
1137fail:
1138 spin_unlock_irqrestore(&imx21->lock, flags);
1139 return ret;
1140}
1141
1142/* =========================================== */
1143/* Interrupt dispatch */
1144/* =========================================== */
1145
1146static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
1147{
1148 int etd_num;
1149 int enable_sof_int = 0;
1150 unsigned long flags;
1151
1152 spin_lock_irqsave(&imx21->lock, flags);
1153
1154 for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) {
1155 u32 etd_mask = 1 << etd_num;
1156 u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask;
1157 u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask;
1158 struct etd_priv *etd = &imx21->etd[etd_num];
1159
1160
1161 if (done) {
1162 DEBUG_LOG_FRAME(imx21, etd, last_int);
1163 } else {
1164/*
1165 * Kludge warning!
1166 *
1167 * When multiple transfers are using the bus we sometimes get into a state
1168 * where the transfer has completed (the CC field of the ETD is != 0x0F),
1169 * the ETD has self disabled but the ETDDONESTAT flag is not set
1170 * (and hence no interrupt occurs).
1171 * This causes the transfer in question to hang.
1172 * The kludge below checks for this condition at each SOF and processes any
1173 * blocked ETDs (after an arbitary 10 frame wait)
1174 *
1175 * With a single active transfer the usbtest test suite will run for days
1176 * without the kludge.
1177 * With other bus activity (eg mass storage) even just test1 will hang without
1178 * the kludge.
1179 */
1180 u32 dword0;
1181 int cc;
1182
1183 if (etd->active_count && !enabled) /* suspicious... */
1184 enable_sof_int = 1;
1185
1186 if (!sof || enabled || !etd->active_count)
1187 continue;
1188
1189 cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE;
1190 if (cc == TD_NOTACCESSED)
1191 continue;
1192
1193 if (++etd->active_count < 10)
1194 continue;
1195
1196 dword0 = etd_readl(imx21, etd_num, 0);
1197 dev_dbg(imx21->dev,
1198 "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
1199 etd_num, dword0 & 0x7F,
1200 (dword0 >> DW0_ENDPNT) & 0x0F,
1201 cc);
1202
1203#ifdef DEBUG
1204 dev_dbg(imx21->dev,
1205 "frame: act=%d disact=%d"
1206 " int=%d req=%d cur=%d\n",
1207 etd->activated_frame,
1208 etd->disactivated_frame,
1209 etd->last_int_frame,
1210 etd->last_req_frame,
1211 readl(imx21->regs + USBH_FRMNUB));
1212 imx21->debug_unblocks++;
1213#endif
1214 etd->active_count = 0;
1215/* End of kludge */
1216 }
1217
1218 if (etd->ep == NULL || etd->urb == NULL) {
1219 dev_dbg(imx21->dev,
1220 "Interrupt for unexpected etd %d"
1221 " ep=%p urb=%p\n",
1222 etd_num, etd->ep, etd->urb);
1223 disactivate_etd(imx21, etd_num);
1224 continue;
1225 }
1226
1227 if (usb_pipeisoc(etd->urb->pipe))
1228 isoc_etd_done(hcd, etd->urb, etd_num);
1229 else
1230 nonisoc_etd_done(hcd, etd->urb, etd_num);
1231 }
1232
1233 /* only enable SOF interrupt if it may be needed for the kludge */
1234 if (enable_sof_int)
1235 set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1236 else
1237 clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1238
1239
1240 spin_unlock_irqrestore(&imx21->lock, flags);
1241}
1242
1243static irqreturn_t imx21_irq(struct usb_hcd *hcd)
1244{
1245 struct imx21 *imx21 = hcd_to_imx21(hcd);
1246 u32 ints = readl(imx21->regs + USBH_SYSISR);
1247
1248 if (ints & USBH_SYSIEN_HERRINT)
1249 dev_dbg(imx21->dev, "Scheduling error\n");
1250
1251 if (ints & USBH_SYSIEN_SORINT)
1252 dev_dbg(imx21->dev, "Scheduling overrun\n");
1253
1254 if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT))
1255 process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT);
1256
1257 writel(ints, imx21->regs + USBH_SYSISR);
1258 return IRQ_HANDLED;
1259}
1260
1261static void imx21_hc_endpoint_disable(struct usb_hcd *hcd,
1262 struct usb_host_endpoint *ep)
1263{
1264 struct imx21 *imx21 = hcd_to_imx21(hcd);
1265 unsigned long flags;
1266 struct ep_priv *ep_priv;
1267 int i;
1268
1269 if (ep == NULL)
1270 return;
1271
1272 spin_lock_irqsave(&imx21->lock, flags);
1273 ep_priv = ep->hcpriv;
1274 dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv);
1275
1276 if (!list_empty(&ep->urb_list))
1277 dev_dbg(imx21->dev, "ep's URB list is not empty\n");
1278
1279 if (ep_priv != NULL) {
1280 for (i = 0; i < NUM_ISO_ETDS; i++) {
1281 if (ep_priv->etd[i] > -1)
1282 dev_dbg(imx21->dev, "free etd %d for disable\n",
1283 ep_priv->etd[i]);
1284
1285 free_etd(imx21, ep_priv->etd[i]);
1286 }
1287 kfree(ep_priv);
1288 ep->hcpriv = NULL;
1289 }
1290
1291 for (i = 0; i < USB_NUM_ETD; i++) {
1292 if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) {
1293 dev_err(imx21->dev,
1294 "Active etd %d for disabled ep=%p!\n", i, ep);
1295 free_etd(imx21, i);
1296 }
1297 }
1298 free_epdmem(imx21, ep);
1299 spin_unlock_irqrestore(&imx21->lock, flags);
1300}
1301
1302/* =========================================== */
1303/* Hub handling */
1304/* =========================================== */
1305
1306static int get_hub_descriptor(struct usb_hcd *hcd,
1307 struct usb_hub_descriptor *desc)
1308{
1309 struct imx21 *imx21 = hcd_to_imx21(hcd);
1310 desc->bDescriptorType = 0x29; /* HUB descriptor */
1311 desc->bHubContrCurrent = 0;
1312
1313 desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA)
1314 & USBH_ROOTHUBA_NDNSTMPRT_MASK;
1315 desc->bDescLength = 9;
1316 desc->bPwrOn2PwrGood = 0;
1317 desc->wHubCharacteristics = (__force __u16) cpu_to_le16(
1318 0x0002 | /* No power switching */
1319 0x0010 | /* No over current protection */
1320 0);
1321
1322 desc->bitmap[0] = 1 << 1;
1323 desc->bitmap[1] = ~0;
1324 return 0;
1325}
1326
1327static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf)
1328{
1329 struct imx21 *imx21 = hcd_to_imx21(hcd);
1330 int ports;
1331 int changed = 0;
1332 int i;
1333 unsigned long flags;
1334
1335 spin_lock_irqsave(&imx21->lock, flags);
1336 ports = readl(imx21->regs + USBH_ROOTHUBA)
1337 & USBH_ROOTHUBA_NDNSTMPRT_MASK;
1338 if (ports > 7) {
1339 ports = 7;
1340 dev_err(imx21->dev, "ports %d > 7\n", ports);
1341 }
1342 for (i = 0; i < ports; i++) {
1343 if (readl(imx21->regs + USBH_PORTSTAT(i)) &
1344 (USBH_PORTSTAT_CONNECTSC |
1345 USBH_PORTSTAT_PRTENBLSC |
1346 USBH_PORTSTAT_PRTSTATSC |
1347 USBH_PORTSTAT_OVRCURIC |
1348 USBH_PORTSTAT_PRTRSTSC)) {
1349
1350 changed = 1;
1351 buf[0] |= 1 << (i + 1);
1352 }
1353 }
1354 spin_unlock_irqrestore(&imx21->lock, flags);
1355
1356 if (changed)
1357 dev_info(imx21->dev, "Hub status changed\n");
1358 return changed;
1359}
1360
1361static int imx21_hc_hub_control(struct usb_hcd *hcd,
1362 u16 typeReq,
1363 u16 wValue, u16 wIndex, char *buf, u16 wLength)
1364{
1365 struct imx21 *imx21 = hcd_to_imx21(hcd);
1366 int rc = 0;
1367 u32 status_write = 0;
1368
1369 switch (typeReq) {
1370 case ClearHubFeature:
1371 dev_dbg(imx21->dev, "ClearHubFeature\n");
1372 switch (wValue) {
1373 case C_HUB_OVER_CURRENT:
1374 dev_dbg(imx21->dev, " OVER_CURRENT\n");
1375 break;
1376 case C_HUB_LOCAL_POWER:
1377 dev_dbg(imx21->dev, " LOCAL_POWER\n");
1378 break;
1379 default:
1380 dev_dbg(imx21->dev, " unknown\n");
1381 rc = -EINVAL;
1382 break;
1383 }
1384 break;
1385
1386 case ClearPortFeature:
1387 dev_dbg(imx21->dev, "ClearPortFeature\n");
1388 switch (wValue) {
1389 case USB_PORT_FEAT_ENABLE:
1390 dev_dbg(imx21->dev, " ENABLE\n");
1391 status_write = USBH_PORTSTAT_CURCONST;
1392 break;
1393 case USB_PORT_FEAT_SUSPEND:
1394 dev_dbg(imx21->dev, " SUSPEND\n");
1395 status_write = USBH_PORTSTAT_PRTOVRCURI;
1396 break;
1397 case USB_PORT_FEAT_POWER:
1398 dev_dbg(imx21->dev, " POWER\n");
1399 status_write = USBH_PORTSTAT_LSDEVCON;
1400 break;
1401 case USB_PORT_FEAT_C_ENABLE:
1402 dev_dbg(imx21->dev, " C_ENABLE\n");
1403 status_write = USBH_PORTSTAT_PRTENBLSC;
1404 break;
1405 case USB_PORT_FEAT_C_SUSPEND:
1406 dev_dbg(imx21->dev, " C_SUSPEND\n");
1407 status_write = USBH_PORTSTAT_PRTSTATSC;
1408 break;
1409 case USB_PORT_FEAT_C_CONNECTION:
1410 dev_dbg(imx21->dev, " C_CONNECTION\n");
1411 status_write = USBH_PORTSTAT_CONNECTSC;
1412 break;
1413 case USB_PORT_FEAT_C_OVER_CURRENT:
1414 dev_dbg(imx21->dev, " C_OVER_CURRENT\n");
1415 status_write = USBH_PORTSTAT_OVRCURIC;
1416 break;
1417 case USB_PORT_FEAT_C_RESET:
1418 dev_dbg(imx21->dev, " C_RESET\n");
1419 status_write = USBH_PORTSTAT_PRTRSTSC;
1420 break;
1421 default:
1422 dev_dbg(imx21->dev, " unknown\n");
1423 rc = -EINVAL;
1424 break;
1425 }
1426
1427 break;
1428
1429 case GetHubDescriptor:
1430 dev_dbg(imx21->dev, "GetHubDescriptor\n");
1431 rc = get_hub_descriptor(hcd, (void *)buf);
1432 break;
1433
1434 case GetHubStatus:
1435 dev_dbg(imx21->dev, " GetHubStatus\n");
1436 *(__le32 *) buf = 0;
1437 break;
1438
1439 case GetPortStatus:
1440 dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n",
1441 wIndex, USBH_PORTSTAT(wIndex - 1));
1442 *(__le32 *) buf = readl(imx21->regs +
1443 USBH_PORTSTAT(wIndex - 1));
1444 break;
1445
1446 case SetHubFeature:
1447 dev_dbg(imx21->dev, "SetHubFeature\n");
1448 switch (wValue) {
1449 case C_HUB_OVER_CURRENT:
1450 dev_dbg(imx21->dev, " OVER_CURRENT\n");
1451 break;
1452
1453 case C_HUB_LOCAL_POWER:
1454 dev_dbg(imx21->dev, " LOCAL_POWER\n");
1455 break;
1456 default:
1457 dev_dbg(imx21->dev, " unknown\n");
1458 rc = -EINVAL;
1459 break;
1460 }
1461
1462 break;
1463
1464 case SetPortFeature:
1465 dev_dbg(imx21->dev, "SetPortFeature\n");
1466 switch (wValue) {
1467 case USB_PORT_FEAT_SUSPEND:
1468 dev_dbg(imx21->dev, " SUSPEND\n");
1469 status_write = USBH_PORTSTAT_PRTSUSPST;
1470 break;
1471 case USB_PORT_FEAT_POWER:
1472 dev_dbg(imx21->dev, " POWER\n");
1473 status_write = USBH_PORTSTAT_PRTPWRST;
1474 break;
1475 case USB_PORT_FEAT_RESET:
1476 dev_dbg(imx21->dev, " RESET\n");
1477 status_write = USBH_PORTSTAT_PRTRSTST;
1478 break;
1479 default:
1480 dev_dbg(imx21->dev, " unknown\n");
1481 rc = -EINVAL;
1482 break;
1483 }
1484 break;
1485
1486 default:
1487 dev_dbg(imx21->dev, " unknown\n");
1488 rc = -EINVAL;
1489 break;
1490 }
1491
1492 if (status_write)
1493 writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1));
1494 return rc;
1495}
1496
1497/* =========================================== */
1498/* Host controller management */
1499/* =========================================== */
1500
1501static int imx21_hc_reset(struct usb_hcd *hcd)
1502{
1503 struct imx21 *imx21 = hcd_to_imx21(hcd);
1504 unsigned long timeout;
1505 unsigned long flags;
1506
1507 spin_lock_irqsave(&imx21->lock, flags);
1508
1509 /* Reset the Host controler modules */
1510 writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
1511 USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
1512 imx21->regs + USBOTG_RST_CTRL);
1513
1514 /* Wait for reset to finish */
1515 timeout = jiffies + HZ;
1516 while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) {
1517 if (time_after(jiffies, timeout)) {
1518 spin_unlock_irqrestore(&imx21->lock, flags);
1519 dev_err(imx21->dev, "timeout waiting for reset\n");
1520 return -ETIMEDOUT;
1521 }
1522 spin_unlock_irq(&imx21->lock);
1523 schedule_timeout(1);
1524 spin_lock_irq(&imx21->lock);
1525 }
1526 spin_unlock_irqrestore(&imx21->lock, flags);
1527 return 0;
1528}
1529
1530static int __devinit imx21_hc_start(struct usb_hcd *hcd)
1531{
1532 struct imx21 *imx21 = hcd_to_imx21(hcd);
1533 unsigned long flags;
1534 int i, j;
1535 u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST;
1536 u32 usb_control = 0;
1537
1538 hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) &
1539 USBOTG_HWMODE_HOSTXCVR_MASK);
1540 hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) &
1541 USBOTG_HWMODE_OTGXCVR_MASK);
1542
1543 if (imx21->pdata->host1_txenoe)
1544 usb_control |= USBCTRL_HOST1_TXEN_OE;
1545
1546 if (!imx21->pdata->host1_xcverless)
1547 usb_control |= USBCTRL_HOST1_BYP_TLL;
1548
1549 if (imx21->pdata->otg_ext_xcvr)
1550 usb_control |= USBCTRL_OTC_RCV_RXDP;
1551
1552
1553 spin_lock_irqsave(&imx21->lock, flags);
1554
1555 writel((USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN),
1556 imx21->regs + USBOTG_CLK_CTRL);
1557 writel(hw_mode, imx21->regs + USBOTG_HWMODE);
1558 writel(usb_control, imx21->regs + USBCTRL);
1559 writel(USB_MISCCONTROL_SKPRTRY | USB_MISCCONTROL_ARBMODE,
1560 imx21->regs + USB_MISCCONTROL);
1561
1562 /* Clear the ETDs */
1563 for (i = 0; i < USB_NUM_ETD; i++)
1564 for (j = 0; j < 4; j++)
1565 etd_writel(imx21, i, j, 0);
1566
1567 /* Take the HC out of reset */
1568 writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL | USBH_HOST_CTRL_CTLBLKSR_1,
1569 imx21->regs + USBH_HOST_CTRL);
1570
1571 /* Enable ports */
1572 if (imx21->pdata->enable_otg_host)
1573 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1574 imx21->regs + USBH_PORTSTAT(0));
1575
1576 if (imx21->pdata->enable_host1)
1577 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1578 imx21->regs + USBH_PORTSTAT(1));
1579
1580 if (imx21->pdata->enable_host2)
1581 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1582 imx21->regs + USBH_PORTSTAT(2));
1583
1584
1585 hcd->state = HC_STATE_RUNNING;
1586
1587 /* Enable host controller interrupts */
1588 set_register_bits(imx21, USBH_SYSIEN,
1589 USBH_SYSIEN_HERRINT |
1590 USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT);
1591 set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1592
1593 spin_unlock_irqrestore(&imx21->lock, flags);
1594
1595 return 0;
1596}
1597
1598static void imx21_hc_stop(struct usb_hcd *hcd)
1599{
1600 struct imx21 *imx21 = hcd_to_imx21(hcd);
1601 unsigned long flags;
1602
1603 spin_lock_irqsave(&imx21->lock, flags);
1604
1605 writel(0, imx21->regs + USBH_SYSIEN);
1606 clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1607 clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN,
1608 USBOTG_CLK_CTRL);
1609 spin_unlock_irqrestore(&imx21->lock, flags);
1610}
1611
1612/* =========================================== */
1613/* Driver glue */
1614/* =========================================== */
1615
1616static struct hc_driver imx21_hc_driver = {
1617 .description = hcd_name,
1618 .product_desc = "IMX21 USB Host Controller",
1619 .hcd_priv_size = sizeof(struct imx21),
1620
1621 .flags = HCD_USB11,
1622 .irq = imx21_irq,
1623
1624 .reset = imx21_hc_reset,
1625 .start = imx21_hc_start,
1626 .stop = imx21_hc_stop,
1627
1628 /* I/O requests */
1629 .urb_enqueue = imx21_hc_urb_enqueue,
1630 .urb_dequeue = imx21_hc_urb_dequeue,
1631 .endpoint_disable = imx21_hc_endpoint_disable,
1632
1633 /* scheduling support */
1634 .get_frame_number = imx21_hc_get_frame,
1635
1636 /* Root hub support */
1637 .hub_status_data = imx21_hc_hub_status_data,
1638 .hub_control = imx21_hc_hub_control,
1639
1640};
1641
1642static struct mx21_usbh_platform_data default_pdata = {
1643 .host_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
1644 .otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
1645 .enable_host1 = 1,
1646 .enable_host2 = 1,
1647 .enable_otg_host = 1,
1648
1649};
1650
1651static int imx21_remove(struct platform_device *pdev)
1652{
1653 struct usb_hcd *hcd = platform_get_drvdata(pdev);
1654 struct imx21 *imx21 = hcd_to_imx21(hcd);
1655 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1656
1657 remove_debug_files(imx21);
1658 usb_remove_hcd(hcd);
1659
1660 if (res != NULL) {
1661 clk_disable(imx21->clk);
1662 clk_put(imx21->clk);
1663 iounmap(imx21->regs);
1664 release_mem_region(res->start, resource_size(res));
1665 }
1666
1667 kfree(hcd);
1668 return 0;
1669}
1670
1671
1672static int imx21_probe(struct platform_device *pdev)
1673{
1674 struct usb_hcd *hcd;
1675 struct imx21 *imx21;
1676 struct resource *res;
1677 int ret;
1678 int irq;
1679
1680 printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc);
1681
1682 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1683 if (!res)
1684 return -ENODEV;
1685 irq = platform_get_irq(pdev, 0);
1686 if (irq < 0)
1687 return -ENXIO;
1688
1689 hcd = usb_create_hcd(&imx21_hc_driver,
1690 &pdev->dev, dev_name(&pdev->dev));
1691 if (hcd == NULL) {
1692 dev_err(&pdev->dev, "Cannot create hcd (%s)\n",
1693 dev_name(&pdev->dev));
1694 return -ENOMEM;
1695 }
1696
1697 imx21 = hcd_to_imx21(hcd);
1698 imx21->dev = &pdev->dev;
1699 imx21->pdata = pdev->dev.platform_data;
1700 if (!imx21->pdata)
1701 imx21->pdata = &default_pdata;
1702
1703 spin_lock_init(&imx21->lock);
1704 INIT_LIST_HEAD(&imx21->dmem_list);
1705 INIT_LIST_HEAD(&imx21->queue_for_etd);
1706 INIT_LIST_HEAD(&imx21->queue_for_dmem);
1707 create_debug_files(imx21);
1708
1709 res = request_mem_region(res->start, resource_size(res), hcd_name);
1710 if (!res) {
1711 ret = -EBUSY;
1712 goto failed_request_mem;
1713 }
1714
1715 imx21->regs = ioremap(res->start, resource_size(res));
1716 if (imx21->regs == NULL) {
1717 dev_err(imx21->dev, "Cannot map registers\n");
1718 ret = -ENOMEM;
1719 goto failed_ioremap;
1720 }
1721
1722 /* Enable clocks source */
1723 imx21->clk = clk_get(imx21->dev, NULL);
1724 if (IS_ERR(imx21->clk)) {
1725 dev_err(imx21->dev, "no clock found\n");
1726 ret = PTR_ERR(imx21->clk);
1727 goto failed_clock_get;
1728 }
1729
1730 ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000));
1731 if (ret)
1732 goto failed_clock_set;
1733 ret = clk_enable(imx21->clk);
1734 if (ret)
1735 goto failed_clock_enable;
1736
1737 dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n",
1738 (readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF);
1739
1740 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
1741 if (ret != 0) {
1742 dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret);
1743 goto failed_add_hcd;
1744 }
1745
1746 return 0;
1747
1748failed_add_hcd:
1749 clk_disable(imx21->clk);
1750failed_clock_enable:
1751failed_clock_set:
1752 clk_put(imx21->clk);
1753failed_clock_get:
1754 iounmap(imx21->regs);
1755failed_ioremap:
1756 release_mem_region(res->start, res->end - res->start);
1757failed_request_mem:
1758 remove_debug_files(imx21);
1759 usb_put_hcd(hcd);
1760 return ret;
1761}
1762
1763static struct platform_driver imx21_hcd_driver = {
1764 .driver = {
1765 .name = (char *)hcd_name,
1766 },
1767 .probe = imx21_probe,
1768 .remove = imx21_remove,
1769 .suspend = NULL,
1770 .resume = NULL,
1771};
1772
1773static int __init imx21_hcd_init(void)
1774{
1775 return platform_driver_register(&imx21_hcd_driver);
1776}
1777
1778static void __exit imx21_hcd_cleanup(void)
1779{
1780 platform_driver_unregister(&imx21_hcd_driver);
1781}
1782
1783module_init(imx21_hcd_init);
1784module_exit(imx21_hcd_cleanup);
1785
1786MODULE_DESCRIPTION("i.MX21 USB Host controller");
1787MODULE_AUTHOR("Martin Fuzzey");
1788MODULE_LICENSE("GPL");
1789MODULE_ALIAS("platform:imx21-hcd");
diff --git a/drivers/usb/host/imx21-hcd.h b/drivers/usb/host/imx21-hcd.h
new file mode 100644
index 000000000000..1b0d913780a5
--- /dev/null
+++ b/drivers/usb/host/imx21-hcd.h
@@ -0,0 +1,436 @@
1/*
2 * Macros and prototypes for i.MX21
3 *
4 * Copyright (C) 2006 Loping Dog Embedded Systems
5 * Copyright (C) 2009 Martin Fuzzey
6 * Originally written by Jay Monkman <jtm@lopingdog.com>
7 * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 * for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#ifndef __LINUX_IMX21_HCD_H__
25#define __LINUX_IMX21_HCD_H__
26
27#include <mach/mx21-usbhost.h>
28
29#define NUM_ISO_ETDS 2
30#define USB_NUM_ETD 32
31#define DMEM_SIZE 4096
32
33/* Register definitions */
34#define USBOTG_HWMODE 0x00
35#define USBOTG_HWMODE_ANASDBEN (1 << 14)
36#define USBOTG_HWMODE_OTGXCVR_SHIFT 6
37#define USBOTG_HWMODE_OTGXCVR_MASK (3 << 6)
38#define USBOTG_HWMODE_OTGXCVR_TD_RD (0 << 6)
39#define USBOTG_HWMODE_OTGXCVR_TS_RD (2 << 6)
40#define USBOTG_HWMODE_OTGXCVR_TD_RS (1 << 6)
41#define USBOTG_HWMODE_OTGXCVR_TS_RS (3 << 6)
42#define USBOTG_HWMODE_HOSTXCVR_SHIFT 4
43#define USBOTG_HWMODE_HOSTXCVR_MASK (3 << 4)
44#define USBOTG_HWMODE_HOSTXCVR_TD_RD (0 << 4)
45#define USBOTG_HWMODE_HOSTXCVR_TS_RD (2 << 4)
46#define USBOTG_HWMODE_HOSTXCVR_TD_RS (1 << 4)
47#define USBOTG_HWMODE_HOSTXCVR_TS_RS (3 << 4)
48#define USBOTG_HWMODE_CRECFG_MASK (3 << 0)
49#define USBOTG_HWMODE_CRECFG_HOST (1 << 0)
50#define USBOTG_HWMODE_CRECFG_FUNC (2 << 0)
51#define USBOTG_HWMODE_CRECFG_HNP (3 << 0)
52
53#define USBOTG_CINT_STAT 0x04
54#define USBOTG_CINT_STEN 0x08
55#define USBOTG_ASHNPINT (1 << 5)
56#define USBOTG_ASFCINT (1 << 4)
57#define USBOTG_ASHCINT (1 << 3)
58#define USBOTG_SHNPINT (1 << 2)
59#define USBOTG_FCINT (1 << 1)
60#define USBOTG_HCINT (1 << 0)
61
62#define USBOTG_CLK_CTRL 0x0c
63#define USBOTG_CLK_CTRL_FUNC (1 << 2)
64#define USBOTG_CLK_CTRL_HST (1 << 1)
65#define USBOTG_CLK_CTRL_MAIN (1 << 0)
66
67#define USBOTG_RST_CTRL 0x10
68#define USBOTG_RST_RSTI2C (1 << 15)
69#define USBOTG_RST_RSTCTRL (1 << 5)
70#define USBOTG_RST_RSTFC (1 << 4)
71#define USBOTG_RST_RSTFSKE (1 << 3)
72#define USBOTG_RST_RSTRH (1 << 2)
73#define USBOTG_RST_RSTHSIE (1 << 1)
74#define USBOTG_RST_RSTHC (1 << 0)
75
76#define USBOTG_FRM_INTVL 0x14
77#define USBOTG_FRM_REMAIN 0x18
78#define USBOTG_HNP_CSR 0x1c
79#define USBOTG_HNP_ISR 0x2c
80#define USBOTG_HNP_IEN 0x30
81
82#define USBOTG_I2C_TXCVR_REG(x) (0x100 + (x))
83#define USBOTG_I2C_XCVR_DEVAD 0x118
84#define USBOTG_I2C_SEQ_OP_REG 0x119
85#define USBOTG_I2C_SEQ_RD_STARTAD 0x11a
86#define USBOTG_I2C_OP_CTRL_REG 0x11b
87#define USBOTG_I2C_SCLK_TO_SCK_HPER 0x11e
88#define USBOTG_I2C_MASTER_INT_REG 0x11f
89
90#define USBH_HOST_CTRL 0x80
91#define USBH_HOST_CTRL_HCRESET (1 << 31)
92#define USBH_HOST_CTRL_SCHDOVR(x) ((x) << 16)
93#define USBH_HOST_CTRL_RMTWUEN (1 << 4)
94#define USBH_HOST_CTRL_HCUSBSTE_RESET (0 << 2)
95#define USBH_HOST_CTRL_HCUSBSTE_RESUME (1 << 2)
96#define USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL (2 << 2)
97#define USBH_HOST_CTRL_HCUSBSTE_SUSPEND (3 << 2)
98#define USBH_HOST_CTRL_CTLBLKSR_1 (0 << 0)
99#define USBH_HOST_CTRL_CTLBLKSR_2 (1 << 0)
100#define USBH_HOST_CTRL_CTLBLKSR_3 (2 << 0)
101#define USBH_HOST_CTRL_CTLBLKSR_4 (3 << 0)
102
103#define USBH_SYSISR 0x88
104#define USBH_SYSISR_PSCINT (1 << 6)
105#define USBH_SYSISR_FMOFINT (1 << 5)
106#define USBH_SYSISR_HERRINT (1 << 4)
107#define USBH_SYSISR_RESDETINT (1 << 3)
108#define USBH_SYSISR_SOFINT (1 << 2)
109#define USBH_SYSISR_DONEINT (1 << 1)
110#define USBH_SYSISR_SORINT (1 << 0)
111
112#define USBH_SYSIEN 0x8c
113#define USBH_SYSIEN_PSCINT (1 << 6)
114#define USBH_SYSIEN_FMOFINT (1 << 5)
115#define USBH_SYSIEN_HERRINT (1 << 4)
116#define USBH_SYSIEN_RESDETINT (1 << 3)
117#define USBH_SYSIEN_SOFINT (1 << 2)
118#define USBH_SYSIEN_DONEINT (1 << 1)
119#define USBH_SYSIEN_SORINT (1 << 0)
120
121#define USBH_XBUFSTAT 0x98
122#define USBH_YBUFSTAT 0x9c
123#define USBH_XYINTEN 0xa0
124#define USBH_XFILLSTAT 0xa8
125#define USBH_YFILLSTAT 0xac
126#define USBH_ETDENSET 0xc0
127#define USBH_ETDENCLR 0xc4
128#define USBH_IMMEDINT 0xcc
129#define USBH_ETDDONESTAT 0xd0
130#define USBH_ETDDONEEN 0xd4
131#define USBH_FRMNUB 0xe0
132#define USBH_LSTHRESH 0xe4
133
134#define USBH_ROOTHUBA 0xe8
135#define USBH_ROOTHUBA_PWRTOGOOD_MASK (0xff)
136#define USBH_ROOTHUBA_PWRTOGOOD_SHIFT (24)
137#define USBH_ROOTHUBA_NOOVRCURP (1 << 12)
138#define USBH_ROOTHUBA_OVRCURPM (1 << 11)
139#define USBH_ROOTHUBA_DEVTYPE (1 << 10)
140#define USBH_ROOTHUBA_PWRSWTMD (1 << 9)
141#define USBH_ROOTHUBA_NOPWRSWT (1 << 8)
142#define USBH_ROOTHUBA_NDNSTMPRT_MASK (0xff)
143
144#define USBH_ROOTHUBB 0xec
145#define USBH_ROOTHUBB_PRTPWRCM(x) (1 << ((x) + 16))
146#define USBH_ROOTHUBB_DEVREMOVE(x) (1 << (x))
147
148#define USBH_ROOTSTAT 0xf0
149#define USBH_ROOTSTAT_CLRRMTWUE (1 << 31)
150#define USBH_ROOTSTAT_OVRCURCHG (1 << 17)
151#define USBH_ROOTSTAT_DEVCONWUE (1 << 15)
152#define USBH_ROOTSTAT_OVRCURI (1 << 1)
153#define USBH_ROOTSTAT_LOCPWRS (1 << 0)
154
155#define USBH_PORTSTAT(x) (0xf4 + ((x) * 4))
156#define USBH_PORTSTAT_PRTRSTSC (1 << 20)
157#define USBH_PORTSTAT_OVRCURIC (1 << 19)
158#define USBH_PORTSTAT_PRTSTATSC (1 << 18)
159#define USBH_PORTSTAT_PRTENBLSC (1 << 17)
160#define USBH_PORTSTAT_CONNECTSC (1 << 16)
161#define USBH_PORTSTAT_LSDEVCON (1 << 9)
162#define USBH_PORTSTAT_PRTPWRST (1 << 8)
163#define USBH_PORTSTAT_PRTRSTST (1 << 4)
164#define USBH_PORTSTAT_PRTOVRCURI (1 << 3)
165#define USBH_PORTSTAT_PRTSUSPST (1 << 2)
166#define USBH_PORTSTAT_PRTENABST (1 << 1)
167#define USBH_PORTSTAT_CURCONST (1 << 0)
168
169#define USB_DMAREV 0x800
170#define USB_DMAINTSTAT 0x804
171#define USB_DMAINTSTAT_EPERR (1 << 1)
172#define USB_DMAINTSTAT_ETDERR (1 << 0)
173
174#define USB_DMAINTEN 0x808
175#define USB_DMAINTEN_EPERRINTEN (1 << 1)
176#define USB_DMAINTEN_ETDERRINTEN (1 << 0)
177
178#define USB_ETDDMAERSTAT 0x80c
179#define USB_EPDMAERSTAT 0x810
180#define USB_ETDDMAEN 0x820
181#define USB_EPDMAEN 0x824
182#define USB_ETDDMAXTEN 0x828
183#define USB_EPDMAXTEN 0x82c
184#define USB_ETDDMAENXYT 0x830
185#define USB_EPDMAENXYT 0x834
186#define USB_ETDDMABST4EN 0x838
187#define USB_EPDMABST4EN 0x83c
188
189#define USB_MISCCONTROL 0x840
190#define USB_MISCCONTROL_ISOPREVFRM (1 << 3)
191#define USB_MISCCONTROL_SKPRTRY (1 << 2)
192#define USB_MISCCONTROL_ARBMODE (1 << 1)
193#define USB_MISCCONTROL_FILTCC (1 << 0)
194
195#define USB_ETDDMACHANLCLR 0x848
196#define USB_EPDMACHANLCLR 0x84c
197#define USB_ETDSMSA(x) (0x900 + ((x) * 4))
198#define USB_EPSMSA(x) (0x980 + ((x) * 4))
199#define USB_ETDDMABUFPTR(x) (0xa00 + ((x) * 4))
200#define USB_EPDMABUFPTR(x) (0xa80 + ((x) * 4))
201
202#define USB_ETD_DWORD(x, w) (0x200 + ((x) * 16) + ((w) * 4))
203#define DW0_ADDRESS 0
204#define DW0_ENDPNT 7
205#define DW0_DIRECT 11
206#define DW0_SPEED 13
207#define DW0_FORMAT 14
208#define DW0_MAXPKTSIZ 16
209#define DW0_HALTED 27
210#define DW0_TOGCRY 28
211#define DW0_SNDNAK 30
212
213#define DW1_XBUFSRTAD 0
214#define DW1_YBUFSRTAD 16
215
216#define DW2_RTRYDELAY 0
217#define DW2_POLINTERV 0
218#define DW2_STARTFRM 0
219#define DW2_RELPOLPOS 8
220#define DW2_DIRPID 16
221#define DW2_BUFROUND 18
222#define DW2_DELAYINT 19
223#define DW2_DATATOG 22
224#define DW2_ERRORCNT 24
225#define DW2_COMPCODE 28
226
227#define DW3_TOTBYECNT 0
228#define DW3_PKTLEN0 0
229#define DW3_COMPCODE0 12
230#define DW3_PKTLEN1 16
231#define DW3_BUFSIZE 21
232#define DW3_COMPCODE1 28
233
234#define USBCTRL 0x600
235#define USBCTRL_I2C_WU_INT_STAT (1 << 27)
236#define USBCTRL_OTG_WU_INT_STAT (1 << 26)
237#define USBCTRL_HOST_WU_INT_STAT (1 << 25)
238#define USBCTRL_FNT_WU_INT_STAT (1 << 24)
239#define USBCTRL_I2C_WU_INT_EN (1 << 19)
240#define USBCTRL_OTG_WU_INT_EN (1 << 18)
241#define USBCTRL_HOST_WU_INT_EN (1 << 17)
242#define USBCTRL_FNT_WU_INT_EN (1 << 16)
243#define USBCTRL_OTC_RCV_RXDP (1 << 13)
244#define USBCTRL_HOST1_BYP_TLL (1 << 12)
245#define USBCTRL_OTG_BYP_VAL(x) ((x) << 10)
246#define USBCTRL_HOST1_BYP_VAL(x) ((x) << 8)
247#define USBCTRL_OTG_PWR_MASK (1 << 6)
248#define USBCTRL_HOST1_PWR_MASK (1 << 5)
249#define USBCTRL_HOST2_PWR_MASK (1 << 4)
250#define USBCTRL_USB_BYP (1 << 2)
251#define USBCTRL_HOST1_TXEN_OE (1 << 1)
252
253
254/* Values in TD blocks */
255#define TD_DIR_SETUP 0
256#define TD_DIR_OUT 1
257#define TD_DIR_IN 2
258#define TD_FORMAT_CONTROL 0
259#define TD_FORMAT_ISO 1
260#define TD_FORMAT_BULK 2
261#define TD_FORMAT_INT 3
262#define TD_TOGGLE_CARRY 0
263#define TD_TOGGLE_DATA0 2
264#define TD_TOGGLE_DATA1 3
265
266/* control transfer states */
267#define US_CTRL_SETUP 2
268#define US_CTRL_DATA 1
269#define US_CTRL_ACK 0
270
271/* bulk transfer main state and 0-length packet */
272#define US_BULK 1
273#define US_BULK0 0
274
275/*ETD format description*/
276#define IMX_FMT_CTRL 0x0
277#define IMX_FMT_ISO 0x1
278#define IMX_FMT_BULK 0x2
279#define IMX_FMT_INT 0x3
280
281static char fmt_urb_to_etd[4] = {
282/*PIPE_ISOCHRONOUS*/ IMX_FMT_ISO,
283/*PIPE_INTERRUPT*/ IMX_FMT_INT,
284/*PIPE_CONTROL*/ IMX_FMT_CTRL,
285/*PIPE_BULK*/ IMX_FMT_BULK
286};
287
288/* condition (error) CC codes and mapping (OHCI like) */
289
290#define TD_CC_NOERROR 0x00
291#define TD_CC_CRC 0x01
292#define TD_CC_BITSTUFFING 0x02
293#define TD_CC_DATATOGGLEM 0x03
294#define TD_CC_STALL 0x04
295#define TD_DEVNOTRESP 0x05
296#define TD_PIDCHECKFAIL 0x06
297/*#define TD_UNEXPECTEDPID 0x07 - reserved, not active on MX2*/
298#define TD_DATAOVERRUN 0x08
299#define TD_DATAUNDERRUN 0x09
300#define TD_BUFFEROVERRUN 0x0C
301#define TD_BUFFERUNDERRUN 0x0D
302#define TD_SCHEDULEOVERRUN 0x0E
303#define TD_NOTACCESSED 0x0F
304
305static const int cc_to_error[16] = {
306 /* No Error */ 0,
307 /* CRC Error */ -EILSEQ,
308 /* Bit Stuff */ -EPROTO,
309 /* Data Togg */ -EILSEQ,
310 /* Stall */ -EPIPE,
311 /* DevNotResp */ -ETIMEDOUT,
312 /* PIDCheck */ -EPROTO,
313 /* UnExpPID */ -EPROTO,
314 /* DataOver */ -EOVERFLOW,
315 /* DataUnder */ -EREMOTEIO,
316 /* (for hw) */ -EIO,
317 /* (for hw) */ -EIO,
318 /* BufferOver */ -ECOMM,
319 /* BuffUnder */ -ENOSR,
320 /* (for HCD) */ -ENOSPC,
321 /* (for HCD) */ -EALREADY
322};
323
324/* HCD data associated with a usb core URB */
325struct urb_priv {
326 struct urb *urb;
327 struct usb_host_endpoint *ep;
328 int active;
329 int state;
330 struct td *isoc_td;
331 int isoc_remaining;
332 int isoc_status;
333};
334
335/* HCD data associated with a usb core endpoint */
336struct ep_priv {
337 struct usb_host_endpoint *ep;
338 struct list_head td_list;
339 struct list_head queue;
340 int etd[NUM_ISO_ETDS];
341 int waiting_etd;
342};
343
344/* isoc packet */
345struct td {
346 struct list_head list;
347 struct urb *urb;
348 struct usb_host_endpoint *ep;
349 dma_addr_t data;
350 unsigned long buf_addr;
351 int len;
352 int frame;
353 int isoc_index;
354};
355
356/* HCD data associated with a hardware ETD */
357struct etd_priv {
358 struct usb_host_endpoint *ep;
359 struct urb *urb;
360 struct td *td;
361 struct list_head queue;
362 dma_addr_t dma_handle;
363 int alloc;
364 int len;
365 int dmem_size;
366 int dmem_offset;
367 int active_count;
368#ifdef DEBUG
369 int activated_frame;
370 int disactivated_frame;
371 int last_int_frame;
372 int last_req_frame;
373 u32 submitted_dwords[4];
374#endif
375};
376
377/* Hardware data memory info */
378struct imx21_dmem_area {
379 struct usb_host_endpoint *ep;
380 unsigned int offset;
381 unsigned int size;
382 struct list_head list;
383};
384
385#ifdef DEBUG
386struct debug_usage_stats {
387 unsigned int value;
388 unsigned int maximum;
389};
390
391struct debug_stats {
392 unsigned long submitted;
393 unsigned long completed_ok;
394 unsigned long completed_failed;
395 unsigned long unlinked;
396 unsigned long queue_etd;
397 unsigned long queue_dmem;
398};
399
400struct debug_isoc_trace {
401 int schedule_frame;
402 int submit_frame;
403 int request_len;
404 int done_frame;
405 int done_len;
406 int cc;
407 struct td *td;
408};
409#endif
410
411/* HCD data structure */
412struct imx21 {
413 spinlock_t lock;
414 struct device *dev;
415 struct mx21_usbh_platform_data *pdata;
416 struct list_head dmem_list;
417 struct list_head queue_for_etd; /* eps queued due to etd shortage */
418 struct list_head queue_for_dmem; /* etds queued due to dmem shortage */
419 struct etd_priv etd[USB_NUM_ETD];
420 struct clk *clk;
421 void __iomem *regs;
422#ifdef DEBUG
423 struct dentry *debug_root;
424 struct debug_stats nonisoc_stats;
425 struct debug_stats isoc_stats;
426 struct debug_usage_stats etd_usage;
427 struct debug_usage_stats dmem_usage;
428 struct debug_isoc_trace isoc_trace[20];
429 struct debug_isoc_trace isoc_trace_failed[20];
430 unsigned long debug_unblocks;
431 int isoc_trace_index;
432 int isoc_trace_index_failed;
433#endif
434};
435
436#endif
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 42971657fde2..217fb5170200 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -1257,7 +1257,7 @@ static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1257 1257
1258 /* avoid all allocations within spinlocks: request or endpoint */ 1258 /* avoid all allocations within spinlocks: request or endpoint */
1259 if (!hep->hcpriv) { 1259 if (!hep->hcpriv) {
1260 ep = kcalloc(1, sizeof *ep, mem_flags); 1260 ep = kzalloc(sizeof *ep, mem_flags);
1261 if (!ep) 1261 if (!ep)
1262 return -ENOMEM; 1262 return -ENOMEM;
1263 } 1263 }
@@ -2719,24 +2719,11 @@ static int __init isp1362_probe(struct platform_device *pdev)
2719 } 2719 }
2720 irq = irq_res->start; 2720 irq = irq_res->start;
2721 2721
2722#ifdef CONFIG_USB_HCD_DMA
2723 if (pdev->dev.dma_mask) {
2724 struct resource *dma_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
2725
2726 if (!dma_res) {
2727 retval = -ENODEV;
2728 goto err1;
2729 }
2730 isp1362_hcd->data_dma = dma_res->start;
2731 isp1362_hcd->max_dma_size = resource_len(dma_res);
2732 }
2733#else
2734 if (pdev->dev.dma_mask) { 2722 if (pdev->dev.dma_mask) {
2735 DBG(1, "won't do DMA"); 2723 DBG(1, "won't do DMA");
2736 retval = -ENODEV; 2724 retval = -ENODEV;
2737 goto err1; 2725 goto err1;
2738 } 2726 }
2739#endif
2740 2727
2741 if (!request_mem_region(addr->start, resource_len(addr), hcd_name)) { 2728 if (!request_mem_region(addr->start, resource_len(addr), hcd_name)) {
2742 retval = -EBUSY; 2729 retval = -EBUSY;
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 27b8f7cb4471..9f01293600b0 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -17,7 +17,9 @@
17#include <linux/debugfs.h> 17#include <linux/debugfs.h>
18#include <linux/uaccess.h> 18#include <linux/uaccess.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/mm.h>
20#include <asm/unaligned.h> 21#include <asm/unaligned.h>
22#include <asm/cacheflush.h>
21 23
22#include "../core/hcd.h" 24#include "../core/hcd.h"
23#include "isp1760-hcd.h" 25#include "isp1760-hcd.h"
@@ -904,6 +906,14 @@ __acquires(priv->lock)
904 status = 0; 906 status = 0;
905 } 907 }
906 908
909 if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
910 void *ptr;
911 for (ptr = urb->transfer_buffer;
912 ptr < urb->transfer_buffer + urb->transfer_buffer_length;
913 ptr += PAGE_SIZE)
914 flush_dcache_page(virt_to_page(ptr));
915 }
916
907 /* complete() can reenter this HCD */ 917 /* complete() can reenter this HCD */
908 usb_hcd_unlink_urb_from_ep(priv_to_hcd(priv), urb); 918 usb_hcd_unlink_urb_from_ep(priv_to_hcd(priv), urb);
909 spin_unlock(&priv->lock); 919 spin_unlock(&priv->lock);
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 1c9f977a5c9c..4293cfd28d61 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -109,7 +109,7 @@ static int of_isp1760_remove(struct of_device *dev)
109 return 0; 109 return 0;
110} 110}
111 111
112static struct of_device_id of_isp1760_match[] = { 112static const struct of_device_id of_isp1760_match[] = {
113 { 113 {
114 .compatible = "nxp,usb-isp1760", 114 .compatible = "nxp,usb-isp1760",
115 }, 115 },
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
new file mode 100644
index 000000000000..4aa08d36d077
--- /dev/null
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -0,0 +1,456 @@
1/*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * TI DA8xx (OMAP-L1x) Bus Glue
5 *
6 * Derived from: ohci-omap.c and ohci-s3c2410.c
7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
8 *
9 * This file is licensed under the terms of the GNU General Public License
10 * version 2. This program is licensed "as is" without any warranty of any
11 * kind, whether express or implied.
12 */
13
14#include <linux/interrupt.h>
15#include <linux/jiffies.h>
16#include <linux/platform_device.h>
17#include <linux/clk.h>
18
19#include <mach/da8xx.h>
20#include <mach/usb.h>
21
22#ifndef CONFIG_ARCH_DAVINCI_DA8XX
23#error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX."
24#endif
25
26#define CFGCHIP2 DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP2_REG)
27
28static struct clk *usb11_clk;
29static struct clk *usb20_clk;
30
31/* Over-current indicator change bitmask */
32static volatile u16 ocic_mask;
33
34static void ohci_da8xx_clock(int on)
35{
36 u32 cfgchip2;
37
38 cfgchip2 = __raw_readl(CFGCHIP2);
39 if (on) {
40 clk_enable(usb11_clk);
41
42 /*
43 * If USB 1.1 reference clock is sourced from USB 2.0 PHY, we
44 * need to enable the USB 2.0 module clocking, start its PHY,
45 * and not allow it to stop the clock during USB 2.0 suspend.
46 */
47 if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX)) {
48 clk_enable(usb20_clk);
49
50 cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN);
51 cfgchip2 |= CFGCHIP2_PHY_PLLON;
52 __raw_writel(cfgchip2, CFGCHIP2);
53
54 pr_info("Waiting for USB PHY clock good...\n");
55 while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD))
56 cpu_relax();
57 }
58
59 /* Enable USB 1.1 PHY */
60 cfgchip2 |= CFGCHIP2_USB1SUSPENDM;
61 } else {
62 clk_disable(usb11_clk);
63 if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX))
64 clk_disable(usb20_clk);
65
66 /* Disable USB 1.1 PHY */
67 cfgchip2 &= ~CFGCHIP2_USB1SUSPENDM;
68 }
69 __raw_writel(cfgchip2, CFGCHIP2);
70}
71
72/*
73 * Handle the port over-current indicator change.
74 */
75static void ohci_da8xx_ocic_handler(struct da8xx_ohci_root_hub *hub,
76 unsigned port)
77{
78 ocic_mask |= 1 << port;
79
80 /* Once over-current is detected, the port needs to be powered down */
81 if (hub->get_oci(port) > 0)
82 hub->set_power(port, 0);
83}
84
85static int ohci_da8xx_init(struct usb_hcd *hcd)
86{
87 struct device *dev = hcd->self.controller;
88 struct da8xx_ohci_root_hub *hub = dev->platform_data;
89 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
90 int result;
91 u32 rh_a;
92
93 dev_dbg(dev, "starting USB controller\n");
94
95 ohci_da8xx_clock(1);
96
97 /*
98 * DA8xx only have 1 port connected to the pins but the HC root hub
99 * register A reports 2 ports, thus we'll have to override it...
100 */
101 ohci->num_ports = 1;
102
103 result = ohci_init(ohci);
104 if (result < 0)
105 return result;
106
107 /*
108 * Since we're providing a board-specific root hub port power control
109 * and over-current reporting, we have to override the HC root hub A
110 * register's default value, so that ohci_hub_control() could return
111 * the correct hub descriptor...
112 */
113 rh_a = ohci_readl(ohci, &ohci->regs->roothub.a);
114 if (hub->set_power) {
115 rh_a &= ~RH_A_NPS;
116 rh_a |= RH_A_PSM;
117 }
118 if (hub->get_oci) {
119 rh_a &= ~RH_A_NOCP;
120 rh_a |= RH_A_OCPM;
121 }
122 rh_a &= ~RH_A_POTPGT;
123 rh_a |= hub->potpgt << 24;
124 ohci_writel(ohci, rh_a, &ohci->regs->roothub.a);
125
126 return result;
127}
128
129static void ohci_da8xx_stop(struct usb_hcd *hcd)
130{
131 ohci_stop(hcd);
132 ohci_da8xx_clock(0);
133}
134
135static int ohci_da8xx_start(struct usb_hcd *hcd)
136{
137 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
138 int result;
139
140 result = ohci_run(ohci);
141 if (result < 0)
142 ohci_da8xx_stop(hcd);
143
144 return result;
145}
146
147/*
148 * Update the status data from the hub with the over-current indicator change.
149 */
150static int ohci_da8xx_hub_status_data(struct usb_hcd *hcd, char *buf)
151{
152 int length = ohci_hub_status_data(hcd, buf);
153
154 /* See if we have OCIC bit set on port 1 */
155 if (ocic_mask & (1 << 1)) {
156 dev_dbg(hcd->self.controller, "over-current indicator change "
157 "on port 1\n");
158
159 if (!length)
160 length = 1;
161
162 buf[0] |= 1 << 1;
163 }
164 return length;
165}
166
167/*
168 * Look at the control requests to the root hub and see if we need to override.
169 */
170static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
171 u16 wIndex, char *buf, u16 wLength)
172{
173 struct device *dev = hcd->self.controller;
174 struct da8xx_ohci_root_hub *hub = dev->platform_data;
175 int temp;
176
177 switch (typeReq) {
178 case GetPortStatus:
179 /* Check the port number */
180 if (wIndex != 1)
181 break;
182
183 dev_dbg(dev, "GetPortStatus(%u)\n", wIndex);
184
185 temp = roothub_portstatus(hcd_to_ohci(hcd), wIndex - 1);
186
187 /* The port power status (PPS) bit defaults to 1 */
188 if (hub->get_power && hub->get_power(wIndex) == 0)
189 temp &= ~RH_PS_PPS;
190
191 /* The port over-current indicator (POCI) bit is always 0 */
192 if (hub->get_oci && hub->get_oci(wIndex) > 0)
193 temp |= RH_PS_POCI;
194
195 /* The over-current indicator change (OCIC) bit is 0 too */
196 if (ocic_mask & (1 << wIndex))
197 temp |= RH_PS_OCIC;
198
199 put_unaligned(cpu_to_le32(temp), (__le32 *)buf);
200 return 0;
201 case SetPortFeature:
202 temp = 1;
203 goto check_port;
204 case ClearPortFeature:
205 temp = 0;
206
207check_port:
208 /* Check the port number */
209 if (wIndex != 1)
210 break;
211
212 switch (wValue) {
213 case USB_PORT_FEAT_POWER:
214 dev_dbg(dev, "%sPortFeature(%u): %s\n",
215 temp ? "Set" : "Clear", wIndex, "POWER");
216
217 if (!hub->set_power)
218 return -EPIPE;
219
220 return hub->set_power(wIndex, temp) ? -EPIPE : 0;
221 case USB_PORT_FEAT_C_OVER_CURRENT:
222 dev_dbg(dev, "%sPortFeature(%u): %s\n",
223 temp ? "Set" : "Clear", wIndex,
224 "C_OVER_CURRENT");
225
226 if (temp)
227 ocic_mask |= 1 << wIndex;
228 else
229 ocic_mask &= ~(1 << wIndex);
230 return 0;
231 }
232 }
233
234 return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
235}
236
237static const struct hc_driver ohci_da8xx_hc_driver = {
238 .description = hcd_name,
239 .product_desc = "DA8xx OHCI",
240 .hcd_priv_size = sizeof(struct ohci_hcd),
241
242 /*
243 * generic hardware linkage
244 */
245 .irq = ohci_irq,
246 .flags = HCD_USB11 | HCD_MEMORY,
247
248 /*
249 * basic lifecycle operations
250 */
251 .reset = ohci_da8xx_init,
252 .start = ohci_da8xx_start,
253 .stop = ohci_da8xx_stop,
254 .shutdown = ohci_shutdown,
255
256 /*
257 * managing i/o requests and associated device resources
258 */
259 .urb_enqueue = ohci_urb_enqueue,
260 .urb_dequeue = ohci_urb_dequeue,
261 .endpoint_disable = ohci_endpoint_disable,
262
263 /*
264 * scheduling support
265 */
266 .get_frame_number = ohci_get_frame,
267
268 /*
269 * root hub support
270 */
271 .hub_status_data = ohci_da8xx_hub_status_data,
272 .hub_control = ohci_da8xx_hub_control,
273
274#ifdef CONFIG_PM
275 .bus_suspend = ohci_bus_suspend,
276 .bus_resume = ohci_bus_resume,
277#endif
278 .start_port_reset = ohci_start_port_reset,
279};
280
281/*-------------------------------------------------------------------------*/
282
283
284/**
285 * usb_hcd_da8xx_probe - initialize DA8xx-based HCDs
286 * Context: !in_interrupt()
287 *
288 * Allocates basic resources for this USB host controller, and
289 * then invokes the start() method for the HCD associated with it
290 * through the hotplug entry's driver_data.
291 */
292static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
293 struct platform_device *pdev)
294{
295 struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data;
296 struct usb_hcd *hcd;
297 struct resource *mem;
298 int error, irq;
299
300 if (hub == NULL)
301 return -ENODEV;
302
303 usb11_clk = clk_get(&pdev->dev, "usb11");
304 if (IS_ERR(usb11_clk))
305 return PTR_ERR(usb11_clk);
306
307 usb20_clk = clk_get(&pdev->dev, "usb20");
308 if (IS_ERR(usb20_clk)) {
309 error = PTR_ERR(usb20_clk);
310 goto err0;
311 }
312
313 hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
314 if (!hcd) {
315 error = -ENOMEM;
316 goto err1;
317 }
318
319 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
320 if (!mem) {
321 error = -ENODEV;
322 goto err2;
323 }
324 hcd->rsrc_start = mem->start;
325 hcd->rsrc_len = mem->end - mem->start + 1;
326
327 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
328 dev_dbg(&pdev->dev, "request_mem_region failed\n");
329 error = -EBUSY;
330 goto err2;
331 }
332
333 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
334 if (!hcd->regs) {
335 dev_err(&pdev->dev, "ioremap failed\n");
336 error = -ENOMEM;
337 goto err3;
338 }
339
340 ohci_hcd_init(hcd_to_ohci(hcd));
341
342 irq = platform_get_irq(pdev, 0);
343 if (irq < 0) {
344 error = -ENODEV;
345 goto err4;
346 }
347 error = usb_add_hcd(hcd, irq, IRQF_DISABLED);
348 if (error)
349 goto err4;
350
351 if (hub->ocic_notify) {
352 error = hub->ocic_notify(ohci_da8xx_ocic_handler);
353 if (!error)
354 return 0;
355 }
356
357 usb_remove_hcd(hcd);
358err4:
359 iounmap(hcd->regs);
360err3:
361 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
362err2:
363 usb_put_hcd(hcd);
364err1:
365 clk_put(usb20_clk);
366err0:
367 clk_put(usb11_clk);
368 return error;
369}
370
371/**
372 * usb_hcd_da8xx_remove - shutdown processing for DA8xx-based HCDs
373 * @dev: USB Host Controller being removed
374 * Context: !in_interrupt()
375 *
376 * Reverses the effect of usb_hcd_da8xx_probe(), first invoking
377 * the HCD's stop() method. It is always called from a thread
378 * context, normally "rmmod", "apmd", or something similar.
379 */
380static inline void
381usb_hcd_da8xx_remove(struct usb_hcd *hcd, struct platform_device *pdev)
382{
383 struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data;
384
385 hub->ocic_notify(NULL);
386 usb_remove_hcd(hcd);
387 iounmap(hcd->regs);
388 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
389 usb_put_hcd(hcd);
390 clk_put(usb20_clk);
391 clk_put(usb11_clk);
392}
393
394static int ohci_hcd_da8xx_drv_probe(struct platform_device *dev)
395{
396 return usb_hcd_da8xx_probe(&ohci_da8xx_hc_driver, dev);
397}
398
399static int ohci_hcd_da8xx_drv_remove(struct platform_device *dev)
400{
401 struct usb_hcd *hcd = platform_get_drvdata(dev);
402
403 usb_hcd_da8xx_remove(hcd, dev);
404 platform_set_drvdata(dev, NULL);
405
406 return 0;
407}
408
409#ifdef CONFIG_PM
410static int ohci_da8xx_suspend(struct platform_device *dev, pm_message_t message)
411{
412 struct usb_hcd *hcd = platform_get_drvdata(dev);
413 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
414
415 if (time_before(jiffies, ohci->next_statechange))
416 msleep(5);
417 ohci->next_statechange = jiffies;
418
419 ohci_da8xx_clock(0);
420 hcd->state = HC_STATE_SUSPENDED;
421 dev->dev.power.power_state = PMSG_SUSPEND;
422 return 0;
423}
424
425static int ohci_da8xx_resume(struct platform_device *dev)
426{
427 struct usb_hcd *hcd = platform_get_drvdata(dev);
428 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
429
430 if (time_before(jiffies, ohci->next_statechange))
431 msleep(5);
432 ohci->next_statechange = jiffies;
433
434 ohci_da8xx_clock(1);
435 dev->dev.power.power_state = PMSG_ON;
436 usb_hcd_resume_root_hub(hcd);
437 return 0;
438}
439#endif
440
441/*
442 * Driver definition to register with platform structure.
443 */
444static struct platform_driver ohci_hcd_da8xx_driver = {
445 .probe = ohci_hcd_da8xx_drv_probe,
446 .remove = ohci_hcd_da8xx_drv_remove,
447 .shutdown = usb_hcd_platform_shutdown,
448#ifdef CONFIG_PM
449 .suspend = ohci_da8xx_suspend,
450 .resume = ohci_da8xx_resume,
451#endif
452 .driver = {
453 .owner = THIS_MODULE,
454 .name = "ohci",
455 },
456};
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index 811f5dfdc582..8ad2441b0284 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -53,13 +53,13 @@ urb_print(struct urb * urb, char * str, int small, int status)
53 int i, len; 53 int i, len;
54 54
55 if (usb_pipecontrol (pipe)) { 55 if (usb_pipecontrol (pipe)) {
56 printk (KERN_DEBUG __FILE__ ": setup(8):"); 56 printk (KERN_DEBUG "%s: setup(8):", __FILE__);
57 for (i = 0; i < 8 ; i++) 57 for (i = 0; i < 8 ; i++)
58 printk (" %02x", ((__u8 *) urb->setup_packet) [i]); 58 printk (" %02x", ((__u8 *) urb->setup_packet) [i]);
59 printk ("\n"); 59 printk ("\n");
60 } 60 }
61 if (urb->transfer_buffer_length > 0 && urb->transfer_buffer) { 61 if (urb->transfer_buffer_length > 0 && urb->transfer_buffer) {
62 printk (KERN_DEBUG __FILE__ ": data(%d/%d):", 62 printk (KERN_DEBUG "%s: data(%d/%d):", __FILE__,
63 urb->actual_length, 63 urb->actual_length,
64 urb->transfer_buffer_length); 64 urb->transfer_buffer_length);
65 len = usb_pipeout (pipe)? 65 len = usb_pipeout (pipe)?
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 24eb74781919..afe59be23645 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1051,6 +1051,11 @@ MODULE_LICENSE ("GPL");
1051#define PLATFORM_DRIVER usb_hcd_pnx4008_driver 1051#define PLATFORM_DRIVER usb_hcd_pnx4008_driver
1052#endif 1052#endif
1053 1053
1054#ifdef CONFIG_ARCH_DAVINCI_DA8XX
1055#include "ohci-da8xx.c"
1056#define PLATFORM_DRIVER ohci_hcd_da8xx_driver
1057#endif
1058
1054#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \ 1059#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
1055 defined(CONFIG_CPU_SUBTYPE_SH7721) || \ 1060 defined(CONFIG_CPU_SUBTYPE_SH7721) || \
1056 defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 1061 defined(CONFIG_CPU_SUBTYPE_SH7763) || \
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c
index de42283149c7..18d39f0463ee 100644
--- a/drivers/usb/host/ohci-lh7a404.c
+++ b/drivers/usb/host/ohci-lh7a404.c
@@ -28,8 +28,8 @@ extern int usb_disabled(void);
28 28
29static void lh7a404_start_hc(struct platform_device *dev) 29static void lh7a404_start_hc(struct platform_device *dev)
30{ 30{
31 printk(KERN_DEBUG __FILE__ 31 printk(KERN_DEBUG "%s: starting LH7A404 OHCI USB Controller\n",
32 ": starting LH7A404 OHCI USB Controller\n"); 32 __FILE__);
33 33
34 /* 34 /*
35 * Now, carefully enable the USB clock, and take 35 * Now, carefully enable the USB clock, and take
@@ -39,14 +39,13 @@ static void lh7a404_start_hc(struct platform_device *dev)
39 udelay(1000); 39 udelay(1000);
40 USBH_CMDSTATUS = OHCI_HCR; 40 USBH_CMDSTATUS = OHCI_HCR;
41 41
42 printk(KERN_DEBUG __FILE__ 42 printk(KERN_DEBUG "%s: Clock to USB host has been enabled \n", __FILE__);
43 ": Clock to USB host has been enabled \n");
44} 43}
45 44
46static void lh7a404_stop_hc(struct platform_device *dev) 45static void lh7a404_stop_hc(struct platform_device *dev)
47{ 46{
48 printk(KERN_DEBUG __FILE__ 47 printk(KERN_DEBUG "%s: stopping LH7A404 OHCI USB Controller\n",
49 ": stopping LH7A404 OHCI USB Controller\n"); 48 __FILE__);
50 49
51 CSC_PWRCNT &= ~CSC_PWRCNT_USBH_EN; /* Disable clock */ 50 CSC_PWRCNT &= ~CSC_PWRCNT_USBH_EN; /* Disable clock */
52} 51}
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index 2769326da42e..cd74bbdd007c 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -327,7 +327,7 @@ static int __devinit usb_hcd_pnx4008_probe(struct platform_device *pdev)
327 } 327 }
328 i2c_adap = i2c_get_adapter(2); 328 i2c_adap = i2c_get_adapter(2);
329 memset(&i2c_info, 0, sizeof(struct i2c_board_info)); 329 memset(&i2c_info, 0, sizeof(struct i2c_board_info));
330 strlcpy(i2c_info.name, "isp1301_pnx", I2C_NAME_SIZE); 330 strlcpy(i2c_info.type, "isp1301_pnx", I2C_NAME_SIZE);
331 isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info, 331 isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info,
332 normal_i2c); 332 normal_i2c);
333 i2c_put_adapter(i2c_adap); 333 i2c_put_adapter(i2c_adap);
@@ -411,7 +411,7 @@ out3:
411out2: 411out2:
412 clk_put(usb_clk); 412 clk_put(usb_clk);
413out1: 413out1:
414 i2c_unregister_client(isp1301_i2c_client); 414 i2c_unregister_device(isp1301_i2c_client);
415 isp1301_i2c_client = NULL; 415 isp1301_i2c_client = NULL;
416out_i2c_driver: 416out_i2c_driver:
417 i2c_del_driver(&isp1301_driver); 417 i2c_del_driver(&isp1301_driver);
@@ -430,7 +430,7 @@ static int usb_hcd_pnx4008_remove(struct platform_device *pdev)
430 pnx4008_unset_usb_bits(); 430 pnx4008_unset_usb_bits();
431 clk_disable(usb_clk); 431 clk_disable(usb_clk);
432 clk_put(usb_clk); 432 clk_put(usb_clk);
433 i2c_unregister_client(isp1301_i2c_client); 433 i2c_unregister_device(isp1301_i2c_client);
434 isp1301_i2c_client = NULL; 434 isp1301_i2c_client = NULL;
435 i2c_del_driver(&isp1301_driver); 435 i2c_del_driver(&isp1301_driver);
436 436
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 68a301710297..103263c230cf 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -114,21 +114,21 @@ ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
114 hcd->rsrc_len = res.end - res.start + 1; 114 hcd->rsrc_len = res.end - res.start + 1;
115 115
116 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { 116 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
117 printk(KERN_ERR __FILE__ ": request_mem_region failed\n"); 117 printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
118 rv = -EBUSY; 118 rv = -EBUSY;
119 goto err_rmr; 119 goto err_rmr;
120 } 120 }
121 121
122 irq = irq_of_parse_and_map(dn, 0); 122 irq = irq_of_parse_and_map(dn, 0);
123 if (irq == NO_IRQ) { 123 if (irq == NO_IRQ) {
124 printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n"); 124 printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
125 rv = -EBUSY; 125 rv = -EBUSY;
126 goto err_irq; 126 goto err_irq;
127 } 127 }
128 128
129 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); 129 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
130 if (!hcd->regs) { 130 if (!hcd->regs) {
131 printk(KERN_ERR __FILE__ ": ioremap failed\n"); 131 printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
132 rv = -ENOMEM; 132 rv = -ENOMEM;
133 goto err_ioremap; 133 goto err_ioremap;
134 } 134 }
@@ -169,7 +169,7 @@ ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
169 } else 169 } else
170 release_mem_region(res.start, 0x4); 170 release_mem_region(res.start, 0x4);
171 } else 171 } else
172 pr_debug(__FILE__ ": cannot get ehci offset from fdt\n"); 172 pr_debug("%s: cannot get ehci offset from fdt\n", __FILE__);
173 } 173 }
174 174
175 iounmap(hcd->regs); 175 iounmap(hcd->regs);
@@ -212,7 +212,7 @@ static int ohci_hcd_ppc_of_shutdown(struct of_device *op)
212} 212}
213 213
214 214
215static struct of_device_id ohci_hcd_ppc_of_match[] = { 215static const struct of_device_id ohci_hcd_ppc_of_match[] = {
216#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_BE 216#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_BE
217 { 217 {
218 .name = "usb", 218 .name = "usb",
diff --git a/drivers/usb/host/ohci-ppc-soc.c b/drivers/usb/host/ohci-ppc-soc.c
index cd3398b675b2..89e670e38c10 100644
--- a/drivers/usb/host/ohci-ppc-soc.c
+++ b/drivers/usb/host/ohci-ppc-soc.c
@@ -41,14 +41,14 @@ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver,
41 41
42 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 42 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
43 if (!res) { 43 if (!res) {
44 pr_debug(__FILE__ ": no irq\n"); 44 pr_debug("%s: no irq\n", __FILE__);
45 return -ENODEV; 45 return -ENODEV;
46 } 46 }
47 irq = res->start; 47 irq = res->start;
48 48
49 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 49 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
50 if (!res) { 50 if (!res) {
51 pr_debug(__FILE__ ": no reg addr\n"); 51 pr_debug("%s: no reg addr\n", __FILE__);
52 return -ENODEV; 52 return -ENODEV;
53 } 53 }
54 54
@@ -59,14 +59,14 @@ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver,
59 hcd->rsrc_len = res->end - res->start + 1; 59 hcd->rsrc_len = res->end - res->start + 1;
60 60
61 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { 61 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
62 pr_debug(__FILE__ ": request_mem_region failed\n"); 62 pr_debug("%s: request_mem_region failed\n", __FILE__);
63 retval = -EBUSY; 63 retval = -EBUSY;
64 goto err1; 64 goto err1;
65 } 65 }
66 66
67 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); 67 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
68 if (!hcd->regs) { 68 if (!hcd->regs) {
69 pr_debug(__FILE__ ": ioremap failed\n"); 69 pr_debug("%s: ioremap failed\n", __FILE__);
70 retval = -ENOMEM; 70 retval = -ENOMEM;
71 goto err2; 71 goto err2;
72 } 72 }
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index e4bbe8e188e4..d8eb3bdafabb 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -31,8 +31,8 @@ static void sa1111_start_hc(struct sa1111_dev *dev)
31{ 31{
32 unsigned int usb_rst = 0; 32 unsigned int usb_rst = 0;
33 33
34 printk(KERN_DEBUG __FILE__ 34 printk(KERN_DEBUG "%s: starting SA-1111 OHCI USB Controller\n",
35 ": starting SA-1111 OHCI USB Controller\n"); 35 __FILE__);
36 36
37#ifdef CONFIG_SA1100_BADGE4 37#ifdef CONFIG_SA1100_BADGE4
38 if (machine_is_badge4()) { 38 if (machine_is_badge4()) {
@@ -65,8 +65,8 @@ static void sa1111_start_hc(struct sa1111_dev *dev)
65static void sa1111_stop_hc(struct sa1111_dev *dev) 65static void sa1111_stop_hc(struct sa1111_dev *dev)
66{ 66{
67 unsigned int usb_rst; 67 unsigned int usb_rst;
68 printk(KERN_DEBUG __FILE__ 68 printk(KERN_DEBUG "%s: stopping SA-1111 OHCI USB Controller\n",
69 ": stopping SA-1111 OHCI USB Controller\n"); 69 __FILE__);
70 70
71 /* 71 /*
72 * Put the USB host controller into reset. 72 * Put the USB host controller into reset.
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 5b22a4d1c9e4..e11cc3aa4b82 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -51,6 +51,7 @@
51#include <asm/irq.h> 51#include <asm/irq.h>
52#include <asm/system.h> 52#include <asm/system.h>
53#include <asm/byteorder.h> 53#include <asm/byteorder.h>
54#include <asm/unaligned.h>
54 55
55#include "../core/hcd.h" 56#include "../core/hcd.h"
56#include "sl811.h" 57#include "sl811.h"
@@ -1272,12 +1273,12 @@ sl811h_hub_control(
1272 sl811h_hub_descriptor(sl811, (struct usb_hub_descriptor *) buf); 1273 sl811h_hub_descriptor(sl811, (struct usb_hub_descriptor *) buf);
1273 break; 1274 break;
1274 case GetHubStatus: 1275 case GetHubStatus:
1275 *(__le32 *) buf = cpu_to_le32(0); 1276 put_unaligned_le32(0, buf);
1276 break; 1277 break;
1277 case GetPortStatus: 1278 case GetPortStatus:
1278 if (wIndex != 1) 1279 if (wIndex != 1)
1279 goto error; 1280 goto error;
1280 *(__le32 *) buf = cpu_to_le32(sl811->port1); 1281 put_unaligned_le32(sl811->port1, buf);
1281 1282
1282#ifndef VERBOSE 1283#ifndef VERBOSE
1283 if (*(u16*)(buf+2)) /* only if wPortChange is interesting */ 1284 if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 99cd00fd3514..09197067fe6b 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -735,6 +735,7 @@ static void uhci_stop(struct usb_hcd *hcd)
735 uhci_hc_died(uhci); 735 uhci_hc_died(uhci);
736 uhci_scan_schedule(uhci); 736 uhci_scan_schedule(uhci);
737 spin_unlock_irq(&uhci->lock); 737 spin_unlock_irq(&uhci->lock);
738 synchronize_irq(hcd->irq);
738 739
739 del_timer_sync(&uhci->fsbr_timer); 740 del_timer_sync(&uhci->fsbr_timer);
740 release_uhci(uhci); 741 release_uhci(uhci);
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 33128d52f212..105fa8b025bb 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -406,6 +406,25 @@ static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
406 } 406 }
407} 407}
408 408
409char *xhci_get_slot_state(struct xhci_hcd *xhci,
410 struct xhci_container_ctx *ctx)
411{
412 struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
413
414 switch (GET_SLOT_STATE(slot_ctx->dev_state)) {
415 case 0:
416 return "enabled/disabled";
417 case 1:
418 return "default";
419 case 2:
420 return "addressed";
421 case 3:
422 return "configured";
423 default:
424 return "reserved";
425 }
426}
427
409void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) 428void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
410{ 429{
411 /* Fields are 32 bits wide, DMA addresses are in bytes */ 430 /* Fields are 32 bits wide, DMA addresses are in bytes */
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index ecc131c3fe33..78c4edac1db1 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -101,12 +101,15 @@ static inline int xhci_find_next_cap_offset(void __iomem *base, int ext_offset)
101 101
102 next = readl(base + ext_offset); 102 next = readl(base + ext_offset);
103 103
104 if (ext_offset == XHCI_HCC_PARAMS_OFFSET) 104 if (ext_offset == XHCI_HCC_PARAMS_OFFSET) {
105 /* Find the first extended capability */ 105 /* Find the first extended capability */
106 next = XHCI_HCC_EXT_CAPS(next); 106 next = XHCI_HCC_EXT_CAPS(next);
107 else 107 ext_offset = 0;
108 } else {
108 /* Find the next extended capability */ 109 /* Find the next extended capability */
109 next = XHCI_EXT_CAPS_NEXT(next); 110 next = XHCI_EXT_CAPS_NEXT(next);
111 }
112
110 if (!next) 113 if (!next)
111 return 0; 114 return 0;
112 /* 115 /*
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 5e92c72df642..4cb69e0af834 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -1007,7 +1007,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1007 * for usb_set_interface() and usb_set_configuration() claim). 1007 * for usb_set_interface() and usb_set_configuration() claim).
1008 */ 1008 */
1009 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id], 1009 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
1010 udev, ep, GFP_KERNEL) < 0) { 1010 udev, ep, GFP_NOIO) < 0) {
1011 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", 1011 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1012 __func__, ep->desc.bEndpointAddress); 1012 __func__, ep->desc.bEndpointAddress);
1013 return -ENOMEM; 1013 return -ENOMEM;
@@ -1181,6 +1181,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1181 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, 1181 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
1182 udev->slot_id); 1182 udev->slot_id);
1183 if (ret < 0) { 1183 if (ret < 0) {
1184 if (command)
1185 list_del(&command->cmd_list);
1184 spin_unlock_irqrestore(&xhci->lock, flags); 1186 spin_unlock_irqrestore(&xhci->lock, flags);
1185 xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); 1187 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
1186 return -ENOMEM; 1188 return -ENOMEM;
@@ -1264,30 +1266,13 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1264 xhci_zero_in_ctx(xhci, virt_dev); 1266 xhci_zero_in_ctx(xhci, virt_dev);
1265 /* Install new rings and free or cache any old rings */ 1267 /* Install new rings and free or cache any old rings */
1266 for (i = 1; i < 31; ++i) { 1268 for (i = 1; i < 31; ++i) {
1267 int rings_cached;
1268
1269 if (!virt_dev->eps[i].new_ring) 1269 if (!virt_dev->eps[i].new_ring)
1270 continue; 1270 continue;
1271 /* Only cache or free the old ring if it exists. 1271 /* Only cache or free the old ring if it exists.
1272 * It may not if this is the first add of an endpoint. 1272 * It may not if this is the first add of an endpoint.
1273 */ 1273 */
1274 if (virt_dev->eps[i].ring) { 1274 if (virt_dev->eps[i].ring) {
1275 rings_cached = virt_dev->num_rings_cached; 1275 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
1276 if (rings_cached < XHCI_MAX_RINGS_CACHED) {
1277 virt_dev->num_rings_cached++;
1278 rings_cached = virt_dev->num_rings_cached;
1279 virt_dev->ring_cache[rings_cached] =
1280 virt_dev->eps[i].ring;
1281 xhci_dbg(xhci, "Cached old ring, "
1282 "%d ring%s cached\n",
1283 rings_cached,
1284 (rings_cached > 1) ? "s" : "");
1285 } else {
1286 xhci_ring_free(xhci, virt_dev->eps[i].ring);
1287 xhci_dbg(xhci, "Ring cache full (%d rings), "
1288 "freeing ring\n",
1289 virt_dev->num_rings_cached);
1290 }
1291 } 1276 }
1292 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; 1277 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
1293 virt_dev->eps[i].new_ring = NULL; 1278 virt_dev->eps[i].new_ring = NULL;
@@ -1458,6 +1443,131 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
1458} 1443}
1459 1444
1460/* 1445/*
1446 * This submits a Reset Device Command, which will set the device state to 0,
1447 * set the device address to 0, and disable all the endpoints except the default
1448 * control endpoint. The USB core should come back and call
1449 * xhci_address_device(), and then re-set up the configuration. If this is
1450 * called because of a usb_reset_and_verify_device(), then the old alternate
1451 * settings will be re-installed through the normal bandwidth allocation
1452 * functions.
1453 *
1454 * Wait for the Reset Device command to finish. Remove all structures
1455 * associated with the endpoints that were disabled. Clear the input device
1456 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
1457 */
1458int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
1459{
1460 int ret, i;
1461 unsigned long flags;
1462 struct xhci_hcd *xhci;
1463 unsigned int slot_id;
1464 struct xhci_virt_device *virt_dev;
1465 struct xhci_command *reset_device_cmd;
1466 int timeleft;
1467 int last_freed_endpoint;
1468
1469 ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
1470 if (ret <= 0)
1471 return ret;
1472 xhci = hcd_to_xhci(hcd);
1473 slot_id = udev->slot_id;
1474 virt_dev = xhci->devs[slot_id];
1475 if (!virt_dev) {
1476 xhci_dbg(xhci, "%s called with invalid slot ID %u\n",
1477 __func__, slot_id);
1478 return -EINVAL;
1479 }
1480
1481 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
1482 /* Allocate the command structure that holds the struct completion.
1483 * Assume we're in process context, since the normal device reset
1484 * process has to wait for the device anyway. Storage devices are
1485 * reset as part of error handling, so use GFP_NOIO instead of
1486 * GFP_KERNEL.
1487 */
1488 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
1489 if (!reset_device_cmd) {
1490 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
1491 return -ENOMEM;
1492 }
1493
1494 /* Attempt to submit the Reset Device command to the command ring */
1495 spin_lock_irqsave(&xhci->lock, flags);
1496 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
1497 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
1498 ret = xhci_queue_reset_device(xhci, slot_id);
1499 if (ret) {
1500 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1501 list_del(&reset_device_cmd->cmd_list);
1502 spin_unlock_irqrestore(&xhci->lock, flags);
1503 goto command_cleanup;
1504 }
1505 xhci_ring_cmd_db(xhci);
1506 spin_unlock_irqrestore(&xhci->lock, flags);
1507
1508 /* Wait for the Reset Device command to finish */
1509 timeleft = wait_for_completion_interruptible_timeout(
1510 reset_device_cmd->completion,
1511 USB_CTRL_SET_TIMEOUT);
1512 if (timeleft <= 0) {
1513 xhci_warn(xhci, "%s while waiting for reset device command\n",
1514 timeleft == 0 ? "Timeout" : "Signal");
1515 spin_lock_irqsave(&xhci->lock, flags);
1516 /* The timeout might have raced with the event ring handler, so
1517 * only delete from the list if the item isn't poisoned.
1518 */
1519 if (reset_device_cmd->cmd_list.next != LIST_POISON1)
1520 list_del(&reset_device_cmd->cmd_list);
1521 spin_unlock_irqrestore(&xhci->lock, flags);
1522 ret = -ETIME;
1523 goto command_cleanup;
1524 }
1525
1526 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
1527 * unless we tried to reset a slot ID that wasn't enabled,
1528 * or the device wasn't in the addressed or configured state.
1529 */
1530 ret = reset_device_cmd->status;
1531 switch (ret) {
1532 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
1533 case COMP_CTX_STATE: /* 0.96 completion code for same thing */
1534 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
1535 slot_id,
1536 xhci_get_slot_state(xhci, virt_dev->out_ctx));
1537 xhci_info(xhci, "Not freeing device rings.\n");
1538 /* Don't treat this as an error. May change my mind later. */
1539 ret = 0;
1540 goto command_cleanup;
1541 case COMP_SUCCESS:
1542 xhci_dbg(xhci, "Successful reset device command.\n");
1543 break;
1544 default:
1545 if (xhci_is_vendor_info_code(xhci, ret))
1546 break;
1547 xhci_warn(xhci, "Unknown completion code %u for "
1548 "reset device command.\n", ret);
1549 ret = -EINVAL;
1550 goto command_cleanup;
1551 }
1552
1553 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
1554 last_freed_endpoint = 1;
1555 for (i = 1; i < 31; ++i) {
1556 if (!virt_dev->eps[i].ring)
1557 continue;
1558 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
1559 last_freed_endpoint = i;
1560 }
1561 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
1562 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
1563 ret = 0;
1564
1565command_cleanup:
1566 xhci_free_command(xhci, reset_device_cmd);
1567 return ret;
1568}
1569
1570/*
1461 * At this point, the struct usb_device is about to go away, the device has 1571 * At this point, the struct usb_device is about to go away, the device has
1462 * disconnected, and all traffic has been stopped and the endpoints have been 1572 * disconnected, and all traffic has been stopped and the endpoints have been
1463 * disabled. Free any HC data structures associated with that device. 1573 * disabled. Free any HC data structures associated with that device.
@@ -1694,7 +1804,7 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
1694 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); 1804 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
1695 return -EINVAL; 1805 return -EINVAL;
1696 } 1806 }
1697 config_cmd = xhci_alloc_command(xhci, true, mem_flags); 1807 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
1698 if (!config_cmd) { 1808 if (!config_cmd) {
1699 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); 1809 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
1700 return -ENOMEM; 1810 return -ENOMEM;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index eac5b53aa9e7..208b805b80eb 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -129,6 +129,50 @@ static u32 xhci_port_state_to_neutral(u32 state)
129 return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS); 129 return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS);
130} 130}
131 131
132static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex,
133 u32 __iomem *addr, u32 port_status)
134{
135 /* Write 1 to disable the port */
136 xhci_writel(xhci, port_status | PORT_PE, addr);
137 port_status = xhci_readl(xhci, addr);
138 xhci_dbg(xhci, "disable port, actual port %d status = 0x%x\n",
139 wIndex, port_status);
140}
141
142static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
143 u16 wIndex, u32 __iomem *addr, u32 port_status)
144{
145 char *port_change_bit;
146 u32 status;
147
148 switch (wValue) {
149 case USB_PORT_FEAT_C_RESET:
150 status = PORT_RC;
151 port_change_bit = "reset";
152 break;
153 case USB_PORT_FEAT_C_CONNECTION:
154 status = PORT_CSC;
155 port_change_bit = "connect";
156 break;
157 case USB_PORT_FEAT_C_OVER_CURRENT:
158 status = PORT_OCC;
159 port_change_bit = "over-current";
160 break;
161 case USB_PORT_FEAT_C_ENABLE:
162 status = PORT_PEC;
163 port_change_bit = "enable/disable";
164 break;
165 default:
166 /* Should never happen */
167 return;
168 }
169 /* Change bits are all write 1 to clear */
170 xhci_writel(xhci, port_status | status, addr);
171 port_status = xhci_readl(xhci, addr);
172 xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n",
173 port_change_bit, wIndex, port_status);
174}
175
132int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, 176int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
133 u16 wIndex, char *buf, u16 wLength) 177 u16 wIndex, char *buf, u16 wLength)
134{ 178{
@@ -138,7 +182,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
138 u32 temp, status; 182 u32 temp, status;
139 int retval = 0; 183 int retval = 0;
140 u32 __iomem *addr; 184 u32 __iomem *addr;
141 char *port_change_bit;
142 185
143 ports = HCS_MAX_PORTS(xhci->hcs_params1); 186 ports = HCS_MAX_PORTS(xhci->hcs_params1);
144 187
@@ -229,26 +272,18 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
229 temp = xhci_port_state_to_neutral(temp); 272 temp = xhci_port_state_to_neutral(temp);
230 switch (wValue) { 273 switch (wValue) {
231 case USB_PORT_FEAT_C_RESET: 274 case USB_PORT_FEAT_C_RESET:
232 status = PORT_RC;
233 port_change_bit = "reset";
234 break;
235 case USB_PORT_FEAT_C_CONNECTION: 275 case USB_PORT_FEAT_C_CONNECTION:
236 status = PORT_CSC;
237 port_change_bit = "connect";
238 break;
239 case USB_PORT_FEAT_C_OVER_CURRENT: 276 case USB_PORT_FEAT_C_OVER_CURRENT:
240 status = PORT_OCC; 277 case USB_PORT_FEAT_C_ENABLE:
241 port_change_bit = "over-current"; 278 xhci_clear_port_change_bit(xhci, wValue, wIndex,
279 addr, temp);
280 break;
281 case USB_PORT_FEAT_ENABLE:
282 xhci_disable_port(xhci, wIndex, addr, temp);
242 break; 283 break;
243 default: 284 default:
244 goto error; 285 goto error;
245 } 286 }
246 /* Change bits are all write 1 to clear */
247 xhci_writel(xhci, temp | status, addr);
248 temp = xhci_readl(xhci, addr);
249 xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n",
250 port_change_bit, wIndex, temp);
251 temp = xhci_readl(xhci, addr); /* unblock any posted writes */
252 break; 287 break;
253 default: 288 default:
254error: 289error:
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index bffcef7a5545..49f7d72f8b1b 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -198,6 +198,31 @@ fail:
198 return 0; 198 return 0;
199} 199}
200 200
201void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
202 struct xhci_virt_device *virt_dev,
203 unsigned int ep_index)
204{
205 int rings_cached;
206
207 rings_cached = virt_dev->num_rings_cached;
208 if (rings_cached < XHCI_MAX_RINGS_CACHED) {
209 virt_dev->num_rings_cached++;
210 rings_cached = virt_dev->num_rings_cached;
211 virt_dev->ring_cache[rings_cached] =
212 virt_dev->eps[ep_index].ring;
213 xhci_dbg(xhci, "Cached old ring, "
214 "%d ring%s cached\n",
215 rings_cached,
216 (rings_cached > 1) ? "s" : "");
217 } else {
218 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
219 xhci_dbg(xhci, "Ring cache full (%d rings), "
220 "freeing ring\n",
221 virt_dev->num_rings_cached);
222 }
223 virt_dev->eps[ep_index].ring = NULL;
224}
225
201/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue 226/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
202 * pointers to the beginning of the ring. 227 * pointers to the beginning of the ring.
203 */ 228 */
@@ -242,6 +267,8 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
242void xhci_free_container_ctx(struct xhci_hcd *xhci, 267void xhci_free_container_ctx(struct xhci_hcd *xhci,
243 struct xhci_container_ctx *ctx) 268 struct xhci_container_ctx *ctx)
244{ 269{
270 if (!ctx)
271 return;
245 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); 272 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
246 kfree(ctx); 273 kfree(ctx);
247} 274}
@@ -427,7 +454,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
427 case USB_SPEED_LOW: 454 case USB_SPEED_LOW:
428 slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; 455 slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
429 break; 456 break;
430 case USB_SPEED_VARIABLE: 457 case USB_SPEED_WIRELESS:
431 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 458 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
432 return -EINVAL; 459 return -EINVAL;
433 break; 460 break;
@@ -471,7 +498,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
471 case USB_SPEED_LOW: 498 case USB_SPEED_LOW:
472 ep0_ctx->ep_info2 |= MAX_PACKET(8); 499 ep0_ctx->ep_info2 |= MAX_PACKET(8);
473 break; 500 break;
474 case USB_SPEED_VARIABLE: 501 case USB_SPEED_WIRELESS:
475 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 502 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
476 return -EINVAL; 503 return -EINVAL;
477 break; 504 break;
@@ -819,7 +846,8 @@ static void scratchpad_free(struct xhci_hcd *xhci)
819} 846}
820 847
821struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, 848struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
822 bool allocate_completion, gfp_t mem_flags) 849 bool allocate_in_ctx, bool allocate_completion,
850 gfp_t mem_flags)
823{ 851{
824 struct xhci_command *command; 852 struct xhci_command *command;
825 853
@@ -827,11 +855,14 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
827 if (!command) 855 if (!command)
828 return NULL; 856 return NULL;
829 857
830 command->in_ctx = 858 if (allocate_in_ctx) {
831 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags); 859 command->in_ctx =
832 if (!command->in_ctx) { 860 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
833 kfree(command); 861 mem_flags);
834 return NULL; 862 if (!command->in_ctx) {
863 kfree(command);
864 return NULL;
865 }
835 } 866 }
836 867
837 if (allocate_completion) { 868 if (allocate_completion) {
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index e097008d6fb1..417d37aff8d7 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -139,6 +139,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
139 .reset_bandwidth = xhci_reset_bandwidth, 139 .reset_bandwidth = xhci_reset_bandwidth,
140 .address_device = xhci_address_device, 140 .address_device = xhci_address_device,
141 .update_hub_device = xhci_update_hub_device, 141 .update_hub_device = xhci_update_hub_device,
142 .reset_device = xhci_reset_device,
142 143
143 /* 144 /*
144 * scheduling support 145 * scheduling support
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index ee7bc7ecbc59..6ba841bca4a2 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -953,6 +953,17 @@ bandwidth_change:
953 case TRB_TYPE(TRB_RESET_EP): 953 case TRB_TYPE(TRB_RESET_EP):
954 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); 954 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
955 break; 955 break;
956 case TRB_TYPE(TRB_RESET_DEV):
957 xhci_dbg(xhci, "Completed reset device command.\n");
958 slot_id = TRB_TO_SLOT_ID(
959 xhci->cmd_ring->dequeue->generic.field[3]);
960 virt_dev = xhci->devs[slot_id];
961 if (virt_dev)
962 handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
963 else
964 xhci_warn(xhci, "Reset device command completion "
965 "for disabled slot %u\n", slot_id);
966 break;
956 default: 967 default:
957 /* Skip over unknown commands on the event ring */ 968 /* Skip over unknown commands on the event ring */
958 xhci->error_bitmask |= 1 << 6; 969 xhci->error_bitmask |= 1 << 6;
@@ -1080,6 +1091,20 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1080 return 0; 1091 return 0;
1081} 1092}
1082 1093
1094int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1095{
1096 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1097 /* Vendor defined "informational" completion code,
1098 * treat as not-an-error.
1099 */
1100 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1101 trb_comp_code);
1102 xhci_dbg(xhci, "Treating code as success.\n");
1103 return 1;
1104 }
1105 return 0;
1106}
1107
1083/* 1108/*
1084 * If this function returns an error condition, it means it got a Transfer 1109 * If this function returns an error condition, it means it got a Transfer
1085 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. 1110 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
@@ -1196,13 +1221,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1196 status = -ENOSR; 1221 status = -ENOSR;
1197 break; 1222 break;
1198 default: 1223 default:
1199 if (trb_comp_code >= 224 && trb_comp_code <= 255) { 1224 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
1200 /* Vendor defined "informational" completion code,
1201 * treat as not-an-error.
1202 */
1203 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1204 trb_comp_code);
1205 xhci_dbg(xhci, "Treating code as success.\n");
1206 status = 0; 1225 status = 0;
1207 break; 1226 break;
1208 } 1227 }
@@ -2181,6 +2200,14 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
2181 false); 2200 false);
2182} 2201}
2183 2202
2203/* Queue a reset device command TRB */
2204int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
2205{
2206 return queue_command(xhci, 0, 0, 0,
2207 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
2208 false);
2209}
2210
2184/* Queue a configure endpoint command TRB */ 2211/* Queue a configure endpoint command TRB */
2185int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, 2212int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
2186 u32 slot_id, bool command_must_succeed) 2213 u32 slot_id, bool command_must_succeed)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 877813505ef2..e5eb09b2f38e 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1210,6 +1210,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
1210void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); 1210void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
1211void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); 1211void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
1212void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep); 1212void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
1213char *xhci_get_slot_state(struct xhci_hcd *xhci,
1214 struct xhci_container_ctx *ctx);
1213 1215
1214/* xHCI memory management */ 1216/* xHCI memory management */
1215void xhci_mem_cleanup(struct xhci_hcd *xhci); 1217void xhci_mem_cleanup(struct xhci_hcd *xhci);
@@ -1233,8 +1235,12 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
1233 struct usb_device *udev, struct usb_host_endpoint *ep, 1235 struct usb_device *udev, struct usb_host_endpoint *ep,
1234 gfp_t mem_flags); 1236 gfp_t mem_flags);
1235void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring); 1237void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
1238void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
1239 struct xhci_virt_device *virt_dev,
1240 unsigned int ep_index);
1236struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, 1241struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1237 bool allocate_completion, gfp_t mem_flags); 1242 bool allocate_in_ctx, bool allocate_completion,
1243 gfp_t mem_flags);
1238void xhci_free_command(struct xhci_hcd *xhci, 1244void xhci_free_command(struct xhci_hcd *xhci,
1239 struct xhci_command *command); 1245 struct xhci_command *command);
1240 1246
@@ -1264,6 +1270,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
1264int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); 1270int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
1265int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); 1271int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
1266void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep); 1272void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
1273int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev);
1267int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); 1274int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1268void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); 1275void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1269 1276
@@ -1272,6 +1279,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
1272struct xhci_segment *trb_in_td(struct xhci_segment *start_seg, 1279struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1273 union xhci_trb *start_trb, union xhci_trb *end_trb, 1280 union xhci_trb *start_trb, union xhci_trb *end_trb,
1274 dma_addr_t suspect_dma); 1281 dma_addr_t suspect_dma);
1282int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
1275void xhci_ring_cmd_db(struct xhci_hcd *xhci); 1283void xhci_ring_cmd_db(struct xhci_hcd *xhci);
1276void *xhci_setup_one_noop(struct xhci_hcd *xhci); 1284void *xhci_setup_one_noop(struct xhci_hcd *xhci);
1277void xhci_handle_event(struct xhci_hcd *xhci); 1285void xhci_handle_event(struct xhci_hcd *xhci);
@@ -1293,6 +1301,7 @@ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1293 u32 slot_id); 1301 u32 slot_id);
1294int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, 1302int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
1295 unsigned int ep_index); 1303 unsigned int ep_index);
1304int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id);
1296void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, 1305void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
1297 unsigned int slot_id, unsigned int ep_index, 1306 unsigned int slot_id, unsigned int ep_index,
1298 struct xhci_td *cur_td, struct xhci_dequeue_state *state); 1307 struct xhci_td *cur_td, struct xhci_dequeue_state *state);
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c
index eca355dccf65..e192e8f7c560 100644
--- a/drivers/usb/image/mdc800.c
+++ b/drivers/usb/image/mdc800.c
@@ -967,7 +967,7 @@ static const struct file_operations mdc800_device_ops =
967 967
968 968
969 969
970static struct usb_device_id mdc800_table [] = { 970static const struct usb_device_id mdc800_table[] = {
971 { USB_DEVICE(MDC800_VENDOR_ID, MDC800_PRODUCT_ID) }, 971 { USB_DEVICE(MDC800_VENDOR_ID, MDC800_PRODUCT_ID) },
972 { } /* Terminating entry */ 972 { } /* Terminating entry */
973}; 973};
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 459a7287fe01..3a6bcd5fee09 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -155,7 +155,7 @@ static int mts_usb_probe(struct usb_interface *intf,
155 const struct usb_device_id *id); 155 const struct usb_device_id *id);
156static void mts_usb_disconnect(struct usb_interface *intf); 156static void mts_usb_disconnect(struct usb_interface *intf);
157 157
158static struct usb_device_id mts_usb_ids []; 158static const struct usb_device_id mts_usb_ids[];
159 159
160static struct usb_driver mts_usb_driver = { 160static struct usb_driver mts_usb_driver = {
161 .name = "microtekX6", 161 .name = "microtekX6",
@@ -656,7 +656,7 @@ static struct scsi_host_template mts_scsi_host_template = {
656/* The entries of microtek_table must correspond, line-by-line to 656/* The entries of microtek_table must correspond, line-by-line to
657 the entries of mts_supported_products[]. */ 657 the entries of mts_supported_products[]. */
658 658
659static struct usb_device_id mts_usb_ids [] = 659static const struct usb_device_id mts_usb_ids[] =
660{ 660{
661 { USB_DEVICE(0x4ce, 0x0300) }, 661 { USB_DEVICE(0x4ce, 0x0300) },
662 { USB_DEVICE(0x5da, 0x0094) }, 662 { USB_DEVICE(0x5da, 0x0094) },
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index abe3aa67ed00..55660eaf947c 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -87,17 +87,6 @@ config USB_LCD
87 To compile this driver as a module, choose M here: the 87 To compile this driver as a module, choose M here: the
88 module will be called usblcd. 88 module will be called usblcd.
89 89
90config USB_BERRY_CHARGE
91 tristate "USB BlackBerry recharge support"
92 depends on USB
93 help
94 Say Y here if you want to connect a BlackBerry device to your
95 computer's USB port and have it automatically switch to "recharge"
96 mode.
97
98 To compile this driver as a module, choose M here: the
99 module will be called berry_charge.
100
101config USB_LED 90config USB_LED
102 tristate "USB LED driver support" 91 tristate "USB LED driver support"
103 depends on USB 92 depends on USB
@@ -242,17 +231,3 @@ config USB_ISIGHTFW
242 driver beforehand. Tools for doing so are available at 231 driver beforehand. Tools for doing so are available at
243 http://bersace03.free.fr 232 http://bersace03.free.fr
244 233
245config USB_VST
246 tristate "USB VST driver"
247 depends on USB
248 help
249 This driver is intended for Vernier Software Technologies
250 bulk usb devices such as their Ocean-Optics spectrometers or
251 Labquest.
252 It is a bulk channel driver with configurable read and write
253 timeouts.
254
255 To compile this driver as a module, choose M here: the
256 module will be called vstusb.
257
258
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 0826aab8303f..717703e81425 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -5,7 +5,6 @@
5 5
6obj-$(CONFIG_USB_ADUTUX) += adutux.o 6obj-$(CONFIG_USB_ADUTUX) += adutux.o
7obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o 7obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o
8obj-$(CONFIG_USB_BERRY_CHARGE) += berry_charge.o
9obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o 8obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o
10obj-$(CONFIG_USB_CYTHERM) += cytherm.o 9obj-$(CONFIG_USB_CYTHERM) += cytherm.o
11obj-$(CONFIG_USB_EMI26) += emi26.o 10obj-$(CONFIG_USB_EMI26) += emi26.o
@@ -23,7 +22,6 @@ obj-$(CONFIG_USB_TEST) += usbtest.o
23obj-$(CONFIG_USB_TRANCEVIBRATOR) += trancevibrator.o 22obj-$(CONFIG_USB_TRANCEVIBRATOR) += trancevibrator.o
24obj-$(CONFIG_USB_USS720) += uss720.o 23obj-$(CONFIG_USB_USS720) += uss720.o
25obj-$(CONFIG_USB_SEVSEG) += usbsevseg.o 24obj-$(CONFIG_USB_SEVSEG) += usbsevseg.o
26obj-$(CONFIG_USB_VST) += vstusb.o
27 25
28obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga/ 26obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga/
29 27
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 203526542013..d240de097c62 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -38,7 +38,7 @@ static int debug = 1;
38#define dbg(lvl, format, arg...) \ 38#define dbg(lvl, format, arg...) \
39do { \ 39do { \
40 if (debug >= lvl) \ 40 if (debug >= lvl) \
41 printk(KERN_DEBUG __FILE__ " : " format " \n", ## arg); \ 41 printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \
42} while (0) 42} while (0)
43 43
44 44
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(debug, "Debug enabled or not");
56#define ADU_PRODUCT_ID 0x0064 56#define ADU_PRODUCT_ID 0x0064
57 57
58/* table of devices that work with this driver */ 58/* table of devices that work with this driver */
59static struct usb_device_id device_table [] = { 59static const struct usb_device_id device_table[] = {
60 { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID) }, /* ADU100 */ 60 { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID) }, /* ADU100 */
61 { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+20) }, /* ADU120 */ 61 { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+20) }, /* ADU120 */
62 { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+30) }, /* ADU130 */ 62 { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+30) }, /* ADU130 */
@@ -132,8 +132,8 @@ static void adu_debug_data(int level, const char *function, int size,
132 if (debug < level) 132 if (debug < level)
133 return; 133 return;
134 134
135 printk(KERN_DEBUG __FILE__": %s - length = %d, data = ", 135 printk(KERN_DEBUG "%s: %s - length = %d, data = ",
136 function, size); 136 __FILE__, function, size);
137 for (i = 0; i < size; ++i) 137 for (i = 0; i < size; ++i)
138 printk("%.2x ", data[i]); 138 printk("%.2x ", data[i]);
139 printk("\n"); 139 printk("\n");
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 1eb9e4162cc6..4d2952f1fb13 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -57,7 +57,7 @@
57 .bInterfaceProtocol = 0x00 57 .bInterfaceProtocol = 0x00
58 58
59/* table of devices that work with this driver */ 59/* table of devices that work with this driver */
60static struct usb_device_id appledisplay_table [] = { 60static const struct usb_device_id appledisplay_table[] = {
61 { APPLEDISPLAY_DEVICE(0x9218) }, 61 { APPLEDISPLAY_DEVICE(0x9218) },
62 { APPLEDISPLAY_DEVICE(0x9219) }, 62 { APPLEDISPLAY_DEVICE(0x9219) },
63 { APPLEDISPLAY_DEVICE(0x921c) }, 63 { APPLEDISPLAY_DEVICE(0x921c) },
@@ -179,7 +179,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
179 return pdata->msgdata[1]; 179 return pdata->msgdata[1];
180} 180}
181 181
182static struct backlight_ops appledisplay_bl_data = { 182static const struct backlight_ops appledisplay_bl_data = {
183 .get_brightness = appledisplay_bl_get_brightness, 183 .get_brightness = appledisplay_bl_get_brightness,
184 .update_status = appledisplay_bl_update_status, 184 .update_status = appledisplay_bl_update_status,
185}; 185};
@@ -283,6 +283,7 @@ static int appledisplay_probe(struct usb_interface *iface,
283 &appledisplay_bl_data); 283 &appledisplay_bl_data);
284 if (IS_ERR(pdata->bd)) { 284 if (IS_ERR(pdata->bd)) {
285 dev_err(&iface->dev, "Backlight registration failed\n"); 285 dev_err(&iface->dev, "Backlight registration failed\n");
286 retval = PTR_ERR(pdata->bd);
286 goto error; 287 goto error;
287 } 288 }
288 289
diff --git a/drivers/usb/misc/berry_charge.c b/drivers/usb/misc/berry_charge.c
deleted file mode 100644
index c05a85bc5925..000000000000
--- a/drivers/usb/misc/berry_charge.c
+++ /dev/null
@@ -1,183 +0,0 @@
1/*
2 * USB BlackBerry charging module
3 *
4 * Copyright (C) 2007 Greg Kroah-Hartman <gregkh@suse.de>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation, version 2.
9 *
10 * Information on how to switch configs was taken by the bcharge.cc file
11 * created by the barry.sf.net project.
12 *
13 * bcharge.cc has the following copyright:
14 * Copyright (C) 2006, Net Direct Inc. (http://www.netdirect.ca/)
15 * and is released under the GPLv2.
16 *
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/init.h>
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/usb.h>
26
27#define RIM_VENDOR 0x0fca
28#define BLACKBERRY 0x0001
29#define BLACKBERRY_PEARL_DUAL 0x0004
30#define BLACKBERRY_PEARL 0x0006
31
32static int debug;
33static int pearl_dual_mode = 1;
34
35#ifdef dbg
36#undef dbg
37#endif
38#define dbg(dev, format, arg...) \
39 if (debug) \
40 dev_printk(KERN_DEBUG , dev , format , ## arg)
41
42static struct usb_device_id id_table [] = {
43 { USB_DEVICE(RIM_VENDOR, BLACKBERRY) },
44 { USB_DEVICE(RIM_VENDOR, BLACKBERRY_PEARL) },
45 { USB_DEVICE(RIM_VENDOR, BLACKBERRY_PEARL_DUAL) },
46 { }, /* Terminating entry */
47};
48MODULE_DEVICE_TABLE(usb, id_table);
49
50static int magic_charge(struct usb_device *udev)
51{
52 char *dummy_buffer = kzalloc(2, GFP_KERNEL);
53 int retval;
54
55 if (!dummy_buffer)
56 return -ENOMEM;
57
58 /* send two magic commands and then set the configuration. The device
59 * will then reset itself with the new power usage and should start
60 * charging. */
61
62 /* Note, with testing, it only seems that the first message is really
63 * needed (at least for the 8700c), but to be safe, we emulate what
64 * other operating systems seem to be sending to their device. We
65 * really need to get some specs for this device to be sure about what
66 * is going on here.
67 */
68 dbg(&udev->dev, "Sending first magic command\n");
69 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
70 0xa5, 0xc0, 0, 1, dummy_buffer, 2, 100);
71 if (retval != 2) {
72 dev_err(&udev->dev, "First magic command failed: %d.\n",
73 retval);
74 goto exit;
75 }
76
77 dbg(&udev->dev, "Sending second magic command\n");
78 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
79 0xa2, 0x40, 0, 1, dummy_buffer, 0, 100);
80 if (retval != 0) {
81 dev_err(&udev->dev, "Second magic command failed: %d.\n",
82 retval);
83 goto exit;
84 }
85
86 dbg(&udev->dev, "Calling set_configuration\n");
87 retval = usb_driver_set_configuration(udev, 1);
88 if (retval)
89 dev_err(&udev->dev, "Set Configuration failed :%d.\n", retval);
90
91exit:
92 kfree(dummy_buffer);
93 return retval;
94}
95
96static int magic_dual_mode(struct usb_device *udev)
97{
98 char *dummy_buffer = kzalloc(2, GFP_KERNEL);
99 int retval;
100
101 if (!dummy_buffer)
102 return -ENOMEM;
103
104 /* send magic command so that the Blackberry Pearl device exposes
105 * two interfaces: both the USB mass-storage one and one which can
106 * be used for database access. */
107 dbg(&udev->dev, "Sending magic pearl command\n");
108 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
109 0xa9, 0xc0, 1, 1, dummy_buffer, 2, 100);
110 dbg(&udev->dev, "Magic pearl command returned %d\n", retval);
111
112 dbg(&udev->dev, "Calling set_configuration\n");
113 retval = usb_driver_set_configuration(udev, 1);
114 if (retval)
115 dev_err(&udev->dev, "Set Configuration failed :%d.\n", retval);
116
117 kfree(dummy_buffer);
118 return retval;
119}
120
121static int berry_probe(struct usb_interface *intf,
122 const struct usb_device_id *id)
123{
124 struct usb_device *udev = interface_to_usbdev(intf);
125
126 if (udev->bus_mA < 500) {
127 dbg(&udev->dev, "Not enough power to charge available\n");
128 return -ENODEV;
129 }
130
131 dbg(&udev->dev, "Power is set to %dmA\n",
132 udev->actconfig->desc.bMaxPower * 2);
133
134 /* check the power usage so we don't try to enable something that is
135 * already enabled */
136 if ((udev->actconfig->desc.bMaxPower * 2) == 500) {
137 dbg(&udev->dev, "device is already charging, power is "
138 "set to %dmA\n", udev->actconfig->desc.bMaxPower * 2);
139 return -ENODEV;
140 }
141
142 /* turn the power on */
143 magic_charge(udev);
144
145 if ((le16_to_cpu(udev->descriptor.idProduct) == BLACKBERRY_PEARL) &&
146 (pearl_dual_mode))
147 magic_dual_mode(udev);
148
149 /* we don't really want to bind to the device, userspace programs can
150 * handle the syncing just fine, so get outta here. */
151 return -ENODEV;
152}
153
154static void berry_disconnect(struct usb_interface *intf)
155{
156}
157
158static struct usb_driver berry_driver = {
159 .name = "berry_charge",
160 .probe = berry_probe,
161 .disconnect = berry_disconnect,
162 .id_table = id_table,
163};
164
165static int __init berry_init(void)
166{
167 return usb_register(&berry_driver);
168}
169
170static void __exit berry_exit(void)
171{
172 usb_deregister(&berry_driver);
173}
174
175module_init(berry_init);
176module_exit(berry_exit);
177
178MODULE_LICENSE("GPL");
179MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@suse.de>");
180module_param(debug, bool, S_IRUGO | S_IWUSR);
181MODULE_PARM_DESC(debug, "Debug enabled or not");
182module_param(pearl_dual_mode, bool, S_IRUGO | S_IWUSR);
183MODULE_PARM_DESC(pearl_dual_mode, "Change Blackberry Pearl to run in dual mode");
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index 5720bfef6a38..1547d8cac5fb 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -56,7 +56,7 @@
56 56
57 57
58/* table of devices that work with this driver */ 58/* table of devices that work with this driver */
59static struct usb_device_id cypress_table [] = { 59static const struct usb_device_id cypress_table[] = {
60 { USB_DEVICE(CYPRESS_VENDOR_ID, CYPRESS_PRODUCT_ID) }, 60 { USB_DEVICE(CYPRESS_VENDOR_ID, CYPRESS_PRODUCT_ID) },
61 { } 61 { }
62}; 62};
diff --git a/drivers/usb/misc/cytherm.c b/drivers/usb/misc/cytherm.c
index 4fb3c38b924b..b9cbbbda8245 100644
--- a/drivers/usb/misc/cytherm.c
+++ b/drivers/usb/misc/cytherm.c
@@ -27,7 +27,7 @@
27#define USB_SKEL_VENDOR_ID 0x04b4 27#define USB_SKEL_VENDOR_ID 0x04b4
28#define USB_SKEL_PRODUCT_ID 0x0002 28#define USB_SKEL_PRODUCT_ID 0x0002
29 29
30static struct usb_device_id id_table [] = { 30static const struct usb_device_id id_table[] = {
31 { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) }, 31 { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
32 { } 32 { }
33}; 33};
diff --git a/drivers/usb/misc/emi26.c b/drivers/usb/misc/emi26.c
index 879a980ca8c4..a6521c95f683 100644
--- a/drivers/usb/misc/emi26.c
+++ b/drivers/usb/misc/emi26.c
@@ -245,7 +245,7 @@ wraperr:
245 return err; 245 return err;
246} 246}
247 247
248static struct usb_device_id id_table [] = { 248static const struct usb_device_id id_table[] = {
249 { USB_DEVICE(EMI26_VENDOR_ID, EMI26_PRODUCT_ID) }, 249 { USB_DEVICE(EMI26_VENDOR_ID, EMI26_PRODUCT_ID) },
250 { USB_DEVICE(EMI26_VENDOR_ID, EMI26B_PRODUCT_ID) }, 250 { USB_DEVICE(EMI26_VENDOR_ID, EMI26B_PRODUCT_ID) },
251 { } /* Terminating entry */ 251 { } /* Terminating entry */
diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
index 59860b328534..fc15ad4c3139 100644
--- a/drivers/usb/misc/emi62.c
+++ b/drivers/usb/misc/emi62.c
@@ -259,7 +259,7 @@ wraperr:
259 return err; 259 return err;
260} 260}
261 261
262static __devinitdata struct usb_device_id id_table [] = { 262static const struct usb_device_id id_table[] __devinitconst = {
263 { USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) }, 263 { USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) },
264 { } /* Terminating entry */ 264 { } /* Terminating entry */
265}; 265};
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 9d0675ed0d4c..1edb6d361896 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -86,7 +86,7 @@ static struct list_head ftdi_static_list;
86#define USB_FTDI_ELAN_VENDOR_ID 0x0403 86#define USB_FTDI_ELAN_VENDOR_ID 0x0403
87#define USB_FTDI_ELAN_PRODUCT_ID 0xd6ea 87#define USB_FTDI_ELAN_PRODUCT_ID 0xd6ea
88/* table of devices that work with this driver*/ 88/* table of devices that work with this driver*/
89static struct usb_device_id ftdi_elan_table[] = { 89static const struct usb_device_id ftdi_elan_table[] = {
90 {USB_DEVICE(USB_FTDI_ELAN_VENDOR_ID, USB_FTDI_ELAN_PRODUCT_ID)}, 90 {USB_DEVICE(USB_FTDI_ELAN_VENDOR_ID, USB_FTDI_ELAN_PRODUCT_ID)},
91 { /* Terminating entry */ } 91 { /* Terminating entry */ }
92}; 92};
@@ -623,9 +623,12 @@ static void ftdi_elan_status_work(struct work_struct *work)
623*/ 623*/
624static int ftdi_elan_open(struct inode *inode, struct file *file) 624static int ftdi_elan_open(struct inode *inode, struct file *file)
625{ 625{
626 int subminor = iminor(inode); 626 int subminor;
627 struct usb_interface *interface = usb_find_interface(&ftdi_elan_driver, 627 struct usb_interface *interface;
628 subminor); 628
629 subminor = iminor(inode);
630 interface = usb_find_interface(&ftdi_elan_driver, subminor);
631
629 if (!interface) { 632 if (!interface) {
630 printk(KERN_ERR "can't find device for minor %d\n", subminor); 633 printk(KERN_ERR "can't find device for minor %d\n", subminor);
631 return -ENODEV; 634 return -ENODEV;
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 1337a9ce80b9..a54c3cb804ce 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -48,7 +48,7 @@
48#define ID_CHERRY 0x0010 48#define ID_CHERRY 0x0010
49 49
50/* device ID table */ 50/* device ID table */
51static struct usb_device_id idmouse_table[] = { 51static const struct usb_device_id idmouse_table[] = {
52 {USB_DEVICE(ID_SIEMENS, ID_IDMOUSE)}, /* Siemens ID Mouse (Professional) */ 52 {USB_DEVICE(ID_SIEMENS, ID_IDMOUSE)}, /* Siemens ID Mouse (Professional) */
53 {USB_DEVICE(ID_SIEMENS, ID_CHERRY )}, /* Cherry FingerTIP ID Board */ 53 {USB_DEVICE(ID_SIEMENS, ID_CHERRY )}, /* Cherry FingerTIP ID Board */
54 {} /* terminating null entry */ 54 {} /* terminating null entry */
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index e75bb87ee92b..d3c852363883 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -139,7 +139,7 @@ static int usb_set_report(struct usb_interface *intf, unsigned char type,
139/* driver registration */ 139/* driver registration */
140/*---------------------*/ 140/*---------------------*/
141/* table of devices that work with this driver */ 141/* table of devices that work with this driver */
142static struct usb_device_id iowarrior_ids[] = { 142static const struct usb_device_id iowarrior_ids[] = {
143 {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW40)}, 143 {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW40)},
144 {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24)}, 144 {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24)},
145 {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)}, 145 {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)},
@@ -602,10 +602,12 @@ static int iowarrior_open(struct inode *inode, struct file *file)
602 602
603 dbg("%s", __func__); 603 dbg("%s", __func__);
604 604
605 lock_kernel();
605 subminor = iminor(inode); 606 subminor = iminor(inode);
606 607
607 interface = usb_find_interface(&iowarrior_driver, subminor); 608 interface = usb_find_interface(&iowarrior_driver, subminor);
608 if (!interface) { 609 if (!interface) {
610 unlock_kernel();
609 err("%s - error, can't find device for minor %d", __func__, 611 err("%s - error, can't find device for minor %d", __func__,
610 subminor); 612 subminor);
611 return -ENODEV; 613 return -ENODEV;
@@ -615,6 +617,7 @@ static int iowarrior_open(struct inode *inode, struct file *file)
615 dev = usb_get_intfdata(interface); 617 dev = usb_get_intfdata(interface);
616 if (!dev) { 618 if (!dev) {
617 mutex_unlock(&iowarrior_open_disc_lock); 619 mutex_unlock(&iowarrior_open_disc_lock);
620 unlock_kernel();
618 return -ENODEV; 621 return -ENODEV;
619 } 622 }
620 623
@@ -641,6 +644,7 @@ static int iowarrior_open(struct inode *inode, struct file *file)
641 644
642out: 645out:
643 mutex_unlock(&dev->mutex); 646 mutex_unlock(&dev->mutex);
647 unlock_kernel();
644 return retval; 648 return retval;
645} 649}
646 650
diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c
index b897f6554ecd..06e990adc6cd 100644
--- a/drivers/usb/misc/isight_firmware.c
+++ b/drivers/usb/misc/isight_firmware.c
@@ -26,7 +26,7 @@
26#include <linux/errno.h> 26#include <linux/errno.h>
27#include <linux/module.h> 27#include <linux/module.h>
28 28
29static struct usb_device_id id_table[] = { 29static const struct usb_device_id id_table[] = {
30 {USB_DEVICE(0x05ac, 0x8300)}, 30 {USB_DEVICE(0x05ac, 0x8300)},
31 {}, 31 {},
32}; 32};
@@ -112,6 +112,8 @@ out:
112 return ret; 112 return ret;
113} 113}
114 114
115MODULE_FIRMWARE("isight.fw");
116
115static void isight_firmware_disconnect(struct usb_interface *intf) 117static void isight_firmware_disconnect(struct usb_interface *intf)
116{ 118{
117} 119}
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 90f130126c10..dd41d8710043 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -69,7 +69,7 @@
69#endif 69#endif
70 70
71/* table of devices that work with this driver */ 71/* table of devices that work with this driver */
72static struct usb_device_id ld_usb_table [] = { 72static const struct usb_device_id ld_usb_table[] = {
73 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) }, 73 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) },
74 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) }, 74 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) },
75 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) }, 75 { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) },
@@ -798,7 +798,7 @@ static int __init ld_usb_init(void)
798 /* register this driver with the USB subsystem */ 798 /* register this driver with the USB subsystem */
799 retval = usb_register(&ld_usb_driver); 799 retval = usb_register(&ld_usb_driver);
800 if (retval) 800 if (retval)
801 err("usb_register failed for the "__FILE__" driver. Error number %d\n", retval); 801 err("usb_register failed for the %s driver. Error number %d\n", __FILE__, retval);
802 802
803 return retval; 803 return retval;
804} 804}
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index faa6d623de78..8547bf9e3175 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -95,8 +95,11 @@
95 95
96/* Use our own dbg macro */ 96/* Use our own dbg macro */
97#undef dbg 97#undef dbg
98#define dbg(lvl, format, arg...) do { if (debug >= lvl) printk(KERN_DEBUG __FILE__ ": " format "\n", ## arg); } while (0) 98#define dbg(lvl, format, arg...) \
99 99do { \
100 if (debug >= lvl) \
101 printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \
102} while (0)
100 103
101/* Version Information */ 104/* Version Information */
102#define DRIVER_VERSION "v0.96" 105#define DRIVER_VERSION "v0.96"
@@ -192,7 +195,7 @@ struct tower_get_version_reply {
192 195
193 196
194/* table of devices that work with this driver */ 197/* table of devices that work with this driver */
195static struct usb_device_id tower_table [] = { 198static const struct usb_device_id tower_table[] = {
196 { USB_DEVICE(LEGO_USB_TOWER_VENDOR_ID, LEGO_USB_TOWER_PRODUCT_ID) }, 199 { USB_DEVICE(LEGO_USB_TOWER_VENDOR_ID, LEGO_USB_TOWER_PRODUCT_ID) },
197 { } /* Terminating entry */ 200 { } /* Terminating entry */
198}; 201};
@@ -302,7 +305,7 @@ static inline void lego_usb_tower_debug_data (int level, const char *function, i
302 if (debug < level) 305 if (debug < level)
303 return; 306 return;
304 307
305 printk (KERN_DEBUG __FILE__": %s - length = %d, data = ", function, size); 308 printk (KERN_DEBUG "%s: %s - length = %d, data = ", __FILE__, function, size);
306 for (i = 0; i < size; ++i) { 309 for (i = 0; i < size; ++i) {
307 printk ("%.2x ", data[i]); 310 printk ("%.2x ", data[i]);
308 } 311 }
@@ -1055,7 +1058,7 @@ static int __init lego_usb_tower_init(void)
1055 /* register this driver with the USB subsystem */ 1058 /* register this driver with the USB subsystem */
1056 result = usb_register(&tower_driver); 1059 result = usb_register(&tower_driver);
1057 if (result < 0) { 1060 if (result < 0) {
1058 err("usb_register failed for the "__FILE__" driver. Error number %d", result); 1061 err("usb_register failed for the %s driver. Error number %d", __FILE__, result);
1059 retval = -1; 1062 retval = -1;
1060 goto exit; 1063 goto exit;
1061 } 1064 }
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index 32d0199d0c32..a85771b1563d 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -78,10 +78,13 @@ static int open_rio(struct inode *inode, struct file *file)
78{ 78{
79 struct rio_usb_data *rio = &rio_instance; 79 struct rio_usb_data *rio = &rio_instance;
80 80
81 /* against disconnect() */
82 lock_kernel();
81 mutex_lock(&(rio->lock)); 83 mutex_lock(&(rio->lock));
82 84
83 if (rio->isopen || !rio->present) { 85 if (rio->isopen || !rio->present) {
84 mutex_unlock(&(rio->lock)); 86 mutex_unlock(&(rio->lock));
87 unlock_kernel();
85 return -EBUSY; 88 return -EBUSY;
86 } 89 }
87 rio->isopen = 1; 90 rio->isopen = 1;
@@ -91,6 +94,7 @@ static int open_rio(struct inode *inode, struct file *file)
91 mutex_unlock(&(rio->lock)); 94 mutex_unlock(&(rio->lock));
92 95
93 dev_info(&rio->rio_dev->dev, "Rio opened.\n"); 96 dev_info(&rio->rio_dev->dev, "Rio opened.\n");
97 unlock_kernel();
94 98
95 return 0; 99 return 0;
96} 100}
@@ -115,7 +119,6 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
115 int retries; 119 int retries;
116 int retval=0; 120 int retval=0;
117 121
118 lock_kernel();
119 mutex_lock(&(rio->lock)); 122 mutex_lock(&(rio->lock));
120 /* Sanity check to make sure rio is connected, powered, etc */ 123 /* Sanity check to make sure rio is connected, powered, etc */
121 if (rio->present == 0 || rio->rio_dev == NULL) { 124 if (rio->present == 0 || rio->rio_dev == NULL) {
@@ -254,7 +257,6 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
254 257
255err_out: 258err_out:
256 mutex_unlock(&(rio->lock)); 259 mutex_unlock(&(rio->lock));
257 unlock_kernel();
258 return retval; 260 return retval;
259} 261}
260 262
@@ -489,6 +491,7 @@ static void disconnect_rio(struct usb_interface *intf)
489 struct rio_usb_data *rio = usb_get_intfdata (intf); 491 struct rio_usb_data *rio = usb_get_intfdata (intf);
490 492
491 usb_set_intfdata (intf, NULL); 493 usb_set_intfdata (intf, NULL);
494 lock_kernel();
492 if (rio) { 495 if (rio) {
493 usb_deregister_dev(intf, &usb_rio_class); 496 usb_deregister_dev(intf, &usb_rio_class);
494 497
@@ -498,6 +501,7 @@ static void disconnect_rio(struct usb_interface *intf)
498 /* better let it finish - the release will do whats needed */ 501 /* better let it finish - the release will do whats needed */
499 rio->rio_dev = NULL; 502 rio->rio_dev = NULL;
500 mutex_unlock(&(rio->lock)); 503 mutex_unlock(&(rio->lock));
504 unlock_kernel();
501 return; 505 return;
502 } 506 }
503 kfree(rio->ibuf); 507 kfree(rio->ibuf);
@@ -508,9 +512,10 @@ static void disconnect_rio(struct usb_interface *intf)
508 rio->present = 0; 512 rio->present = 0;
509 mutex_unlock(&(rio->lock)); 513 mutex_unlock(&(rio->lock));
510 } 514 }
515 unlock_kernel();
511} 516}
512 517
513static struct usb_device_id rio_table [] = { 518static const struct usb_device_id rio_table[] = {
514 { USB_DEVICE(0x0841, 1) }, /* Rio 500 */ 519 { USB_DEVICE(0x0841, 1) }, /* Rio 500 */
515 { } /* Terminating entry */ 520 { } /* Terminating entry */
516}; 521};
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 8b37a4b9839e..aae95a009bd5 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -250,7 +250,7 @@ sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index, unsigned int pipe,
250 sisusb->urbstatus[index] |= SU_URB_BUSY; 250 sisusb->urbstatus[index] |= SU_URB_BUSY;
251 251
252 /* Submit URB */ 252 /* Submit URB */
253 retval = usb_submit_urb(urb, GFP_ATOMIC); 253 retval = usb_submit_urb(urb, GFP_KERNEL);
254 254
255 /* If OK, and if timeout > 0, wait for completion */ 255 /* If OK, and if timeout > 0, wait for completion */
256 if ((retval == 0) && timeout) { 256 if ((retval == 0) && timeout) {
@@ -306,7 +306,7 @@ sisusb_bulkin_msg(struct sisusb_usb_data *sisusb, unsigned int pipe, void *data,
306 urb->actual_length = 0; 306 urb->actual_length = 0;
307 307
308 sisusb->completein = 0; 308 sisusb->completein = 0;
309 retval = usb_submit_urb(urb, GFP_ATOMIC); 309 retval = usb_submit_urb(urb, GFP_KERNEL);
310 if (retval == 0) { 310 if (retval == 0) {
311 wait_event_timeout(sisusb->wait_q, sisusb->completein, timeout); 311 wait_event_timeout(sisusb->wait_q, sisusb->completein, timeout);
312 if (!sisusb->completein) { 312 if (!sisusb->completein) {
@@ -2416,21 +2416,28 @@ sisusb_open(struct inode *inode, struct file *file)
2416 struct usb_interface *interface; 2416 struct usb_interface *interface;
2417 int subminor = iminor(inode); 2417 int subminor = iminor(inode);
2418 2418
2419 if (!(interface = usb_find_interface(&sisusb_driver, subminor))) 2419 lock_kernel();
2420 if (!(interface = usb_find_interface(&sisusb_driver, subminor))) {
2421 unlock_kernel();
2420 return -ENODEV; 2422 return -ENODEV;
2423 }
2421 2424
2422 if (!(sisusb = usb_get_intfdata(interface))) 2425 if (!(sisusb = usb_get_intfdata(interface))) {
2426 unlock_kernel();
2423 return -ENODEV; 2427 return -ENODEV;
2428 }
2424 2429
2425 mutex_lock(&sisusb->lock); 2430 mutex_lock(&sisusb->lock);
2426 2431
2427 if (!sisusb->present || !sisusb->ready) { 2432 if (!sisusb->present || !sisusb->ready) {
2428 mutex_unlock(&sisusb->lock); 2433 mutex_unlock(&sisusb->lock);
2434 unlock_kernel();
2429 return -ENODEV; 2435 return -ENODEV;
2430 } 2436 }
2431 2437
2432 if (sisusb->isopen) { 2438 if (sisusb->isopen) {
2433 mutex_unlock(&sisusb->lock); 2439 mutex_unlock(&sisusb->lock);
2440 unlock_kernel();
2434 return -EBUSY; 2441 return -EBUSY;
2435 } 2442 }
2436 2443
@@ -2439,11 +2446,13 @@ sisusb_open(struct inode *inode, struct file *file)
2439 if (sisusb_init_gfxdevice(sisusb, 0)) { 2446 if (sisusb_init_gfxdevice(sisusb, 0)) {
2440 mutex_unlock(&sisusb->lock); 2447 mutex_unlock(&sisusb->lock);
2441 dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n"); 2448 dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n");
2449 unlock_kernel();
2442 return -EIO; 2450 return -EIO;
2443 } 2451 }
2444 } else { 2452 } else {
2445 mutex_unlock(&sisusb->lock); 2453 mutex_unlock(&sisusb->lock);
2446 dev_err(&sisusb->sisusb_dev->dev, "Device not attached to USB 2.0 hub\n"); 2454 dev_err(&sisusb->sisusb_dev->dev, "Device not attached to USB 2.0 hub\n");
2455 unlock_kernel();
2447 return -EIO; 2456 return -EIO;
2448 } 2457 }
2449 } 2458 }
@@ -2456,6 +2465,7 @@ sisusb_open(struct inode *inode, struct file *file)
2456 file->private_data = sisusb; 2465 file->private_data = sisusb;
2457 2466
2458 mutex_unlock(&sisusb->lock); 2467 mutex_unlock(&sisusb->lock);
2468 unlock_kernel();
2459 2469
2460 return 0; 2470 return 0;
2461} 2471}
@@ -3238,7 +3248,7 @@ static void sisusb_disconnect(struct usb_interface *intf)
3238 kref_put(&sisusb->kref, sisusb_delete); 3248 kref_put(&sisusb->kref, sisusb_delete);
3239} 3249}
3240 3250
3241static struct usb_device_id sisusb_table [] = { 3251static const struct usb_device_id sisusb_table[] = {
3242 { USB_DEVICE(0x0711, 0x0550) }, 3252 { USB_DEVICE(0x0711, 0x0550) },
3243 { USB_DEVICE(0x0711, 0x0900) }, 3253 { USB_DEVICE(0x0711, 0x0900) },
3244 { USB_DEVICE(0x0711, 0x0901) }, 3254 { USB_DEVICE(0x0711, 0x0901) },
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index 2e14102955c5..5da28eaee314 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -33,7 +33,7 @@
33#define TRANCEVIBRATOR_VENDOR_ID 0x0b49 /* ASCII Corporation */ 33#define TRANCEVIBRATOR_VENDOR_ID 0x0b49 /* ASCII Corporation */
34#define TRANCEVIBRATOR_PRODUCT_ID 0x064f /* Trance Vibrator */ 34#define TRANCEVIBRATOR_PRODUCT_ID 0x064f /* Trance Vibrator */
35 35
36static struct usb_device_id id_table [] = { 36static const struct usb_device_id id_table[] = {
37 { USB_DEVICE(TRANCEVIBRATOR_VENDOR_ID, TRANCEVIBRATOR_PRODUCT_ID) }, 37 { USB_DEVICE(TRANCEVIBRATOR_VENDOR_ID, TRANCEVIBRATOR_PRODUCT_ID) },
38 { }, 38 { },
39}; 39};
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 4fb120357c55..90aede90553e 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -30,7 +30,7 @@
30#define IOCTL_GET_DRV_VERSION 2 30#define IOCTL_GET_DRV_VERSION 2
31 31
32 32
33static struct usb_device_id id_table [] = { 33static const struct usb_device_id id_table[] = {
34 { .idVendor = 0x10D2, .match_flags = USB_DEVICE_ID_MATCH_VENDOR, }, 34 { .idVendor = 0x10D2, .match_flags = USB_DEVICE_ID_MATCH_VENDOR, },
35 { }, 35 { },
36}; 36};
@@ -74,10 +74,12 @@ static int lcd_open(struct inode *inode, struct file *file)
74 struct usb_interface *interface; 74 struct usb_interface *interface;
75 int subminor, r; 75 int subminor, r;
76 76
77 lock_kernel();
77 subminor = iminor(inode); 78 subminor = iminor(inode);
78 79
79 interface = usb_find_interface(&lcd_driver, subminor); 80 interface = usb_find_interface(&lcd_driver, subminor);
80 if (!interface) { 81 if (!interface) {
82 unlock_kernel();
81 err ("USBLCD: %s - error, can't find device for minor %d", 83 err ("USBLCD: %s - error, can't find device for minor %d",
82 __func__, subminor); 84 __func__, subminor);
83 return -ENODEV; 85 return -ENODEV;
@@ -87,6 +89,7 @@ static int lcd_open(struct inode *inode, struct file *file)
87 dev = usb_get_intfdata(interface); 89 dev = usb_get_intfdata(interface);
88 if (!dev) { 90 if (!dev) {
89 mutex_unlock(&open_disc_mutex); 91 mutex_unlock(&open_disc_mutex);
92 unlock_kernel();
90 return -ENODEV; 93 return -ENODEV;
91 } 94 }
92 95
@@ -98,11 +101,13 @@ static int lcd_open(struct inode *inode, struct file *file)
98 r = usb_autopm_get_interface(interface); 101 r = usb_autopm_get_interface(interface);
99 if (r < 0) { 102 if (r < 0) {
100 kref_put(&dev->kref, lcd_delete); 103 kref_put(&dev->kref, lcd_delete);
104 unlock_kernel();
101 return r; 105 return r;
102 } 106 }
103 107
104 /* save our object in the file's private structure */ 108 /* save our object in the file's private structure */
105 file->private_data = dev; 109 file->private_data = dev;
110 unlock_kernel();
106 111
107 return 0; 112 return 0;
108} 113}
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
index 06cb71942dc7..63da2c3c838f 100644
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -24,7 +24,7 @@
24#define PRODUCT_ID 0x1223 24#define PRODUCT_ID 0x1223
25 25
26/* table of devices that work with this driver */ 26/* table of devices that work with this driver */
27static struct usb_device_id id_table [] = { 27static const struct usb_device_id id_table[] = {
28 { USB_DEVICE(VENDOR_ID, PRODUCT_ID) }, 28 { USB_DEVICE(VENDOR_ID, PRODUCT_ID) },
29 { }, 29 { },
30}; 30};
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index 3db255537e79..a9555cb901a1 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -27,7 +27,7 @@
27#define MAXLEN 6 27#define MAXLEN 6
28 28
29/* table of devices that work with this driver */ 29/* table of devices that work with this driver */
30static struct usb_device_id id_table[] = { 30static const struct usb_device_id id_table[] = {
31 { USB_DEVICE(VENDOR_ID, PRODUCT_ID) }, 31 { USB_DEVICE(VENDOR_ID, PRODUCT_ID) },
32 { }, 32 { },
33}; 33};
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 3dab0c0b196f..a21cce6f7403 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1580,10 +1580,6 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1580 return -ERESTARTSYS; 1580 return -ERESTARTSYS;
1581 1581
1582 /* FIXME: What if a system sleep starts while a test is running? */ 1582 /* FIXME: What if a system sleep starts while a test is running? */
1583 if (!intf->is_active) {
1584 mutex_unlock(&dev->lock);
1585 return -EHOSTUNREACH;
1586 }
1587 1583
1588 /* some devices, like ez-usb default devices, need a non-default 1584 /* some devices, like ez-usb default devices, need a non-default
1589 * altsetting to have any active endpoints. some tests change 1585 * altsetting to have any active endpoints. some tests change
@@ -2101,7 +2097,7 @@ static struct usbtest_info generic_info = {
2101#endif 2097#endif
2102 2098
2103 2099
2104static struct usb_device_id id_table [] = { 2100static const struct usb_device_id id_table[] = {
2105 2101
2106 /*-------------------------------------------------------------*/ 2102 /*-------------------------------------------------------------*/
2107 2103
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 9a6c27a01793..f56fed53f2dd 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -770,7 +770,7 @@ static void uss720_disconnect(struct usb_interface *intf)
770} 770}
771 771
772/* table of cables that work through this driver */ 772/* table of cables that work through this driver */
773static struct usb_device_id uss720_table [] = { 773static const struct usb_device_id uss720_table[] = {
774 { USB_DEVICE(0x047e, 0x1001) }, 774 { USB_DEVICE(0x047e, 0x1001) },
775 { USB_DEVICE(0x0557, 0x2001) }, 775 { USB_DEVICE(0x0557, 0x2001) },
776 { USB_DEVICE(0x0729, 0x1284) }, 776 { USB_DEVICE(0x0729, 0x1284) },
diff --git a/drivers/usb/misc/vstusb.c b/drivers/usb/misc/vstusb.c
deleted file mode 100644
index f26ea8dc1577..000000000000
--- a/drivers/usb/misc/vstusb.c
+++ /dev/null
@@ -1,783 +0,0 @@
1/*****************************************************************************
2 * File: drivers/usb/misc/vstusb.c
3 *
4 * Purpose: Support for the bulk USB Vernier Spectrophotometers
5 *
6 * Author: Johnnie Peters
7 * Axian Consulting
8 * Beaverton, OR, USA 97005
9 *
10 * Modified by: EQware Engineering, Inc.
11 * Oregon City, OR, USA 97045
12 *
13 * Copyright: 2007, 2008
14 * Vernier Software & Technology
15 * Beaverton, OR, USA 97005
16 *
17 * Web: www.vernier.com
18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License version 2 as
21 * published by the Free Software Foundation.
22 *
23 *****************************************************************************/
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/init.h>
27#include <linux/slab.h>
28#include <linux/module.h>
29#include <linux/mutex.h>
30#include <linux/uaccess.h>
31#include <linux/usb.h>
32
33#include <linux/usb/vstusb.h>
34
35#define DRIVER_VERSION "VST USB Driver Version 1.5"
36#define DRIVER_DESC "Vernier Software Technology Bulk USB Driver"
37
38#ifdef CONFIG_USB_DYNAMIC_MINORS
39 #define VSTUSB_MINOR_BASE 0
40#else
41 #define VSTUSB_MINOR_BASE 199
42#endif
43
44#define USB_VENDOR_OCEANOPTICS 0x2457
45#define USB_VENDOR_VERNIER 0x08F7 /* Vernier Software & Technology */
46
47#define USB_PRODUCT_USB2000 0x1002
48#define USB_PRODUCT_ADC1000_FW 0x1003 /* firmware download (renumerates) */
49#define USB_PRODUCT_ADC1000 0x1004
50#define USB_PRODUCT_HR2000_FW 0x1009 /* firmware download (renumerates) */
51#define USB_PRODUCT_HR2000 0x100A
52#define USB_PRODUCT_HR4000_FW 0x1011 /* firmware download (renumerates) */
53#define USB_PRODUCT_HR4000 0x1012
54#define USB_PRODUCT_USB650 0x1014 /* "Red Tide" */
55#define USB_PRODUCT_QE65000 0x1018
56#define USB_PRODUCT_USB4000 0x1022
57#define USB_PRODUCT_USB325 0x1024 /* "Vernier Spectrometer" */
58
59#define USB_PRODUCT_LABPRO 0x0001
60#define USB_PRODUCT_LABQUEST 0x0005
61
62#define VST_MAXBUFFER (64*1024)
63
64static struct usb_device_id id_table[] = {
65 { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB2000)},
66 { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_HR4000)},
67 { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB650)},
68 { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB4000)},
69 { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB325)},
70 { USB_DEVICE(USB_VENDOR_VERNIER, USB_PRODUCT_LABQUEST)},
71 { USB_DEVICE(USB_VENDOR_VERNIER, USB_PRODUCT_LABPRO)},
72 {},
73};
74
75MODULE_DEVICE_TABLE(usb, id_table);
76
77struct vstusb_device {
78 struct kref kref;
79 struct mutex lock;
80 struct usb_device *usb_dev;
81 char present;
82 char isopen;
83 struct usb_anchor submitted;
84 int rd_pipe;
85 int rd_timeout_ms;
86 int wr_pipe;
87 int wr_timeout_ms;
88};
89#define to_vst_dev(d) container_of(d, struct vstusb_device, kref)
90
91static struct usb_driver vstusb_driver;
92
93static void vstusb_delete(struct kref *kref)
94{
95 struct vstusb_device *vstdev = to_vst_dev(kref);
96
97 usb_put_dev(vstdev->usb_dev);
98 kfree(vstdev);
99}
100
101static int vstusb_open(struct inode *inode, struct file *file)
102{
103 struct vstusb_device *vstdev;
104 struct usb_interface *interface;
105
106 interface = usb_find_interface(&vstusb_driver, iminor(inode));
107
108 if (!interface) {
109 printk(KERN_ERR KBUILD_MODNAME
110 ": %s - error, can't find device for minor %d\n",
111 __func__, iminor(inode));
112 return -ENODEV;
113 }
114
115 vstdev = usb_get_intfdata(interface);
116
117 if (!vstdev)
118 return -ENODEV;
119
120 /* lock this device */
121 mutex_lock(&vstdev->lock);
122
123 /* can only open one time */
124 if ((!vstdev->present) || (vstdev->isopen)) {
125 mutex_unlock(&vstdev->lock);
126 return -EBUSY;
127 }
128
129 /* increment our usage count */
130 kref_get(&vstdev->kref);
131
132 vstdev->isopen = 1;
133
134 /* save device in the file's private structure */
135 file->private_data = vstdev;
136
137 dev_dbg(&vstdev->usb_dev->dev, "%s: opened\n", __func__);
138
139 mutex_unlock(&vstdev->lock);
140
141 return 0;
142}
143
144static int vstusb_release(struct inode *inode, struct file *file)
145{
146 struct vstusb_device *vstdev;
147
148 vstdev = file->private_data;
149
150 if (vstdev == NULL)
151 return -ENODEV;
152
153 mutex_lock(&vstdev->lock);
154
155 vstdev->isopen = 0;
156
157 dev_dbg(&vstdev->usb_dev->dev, "%s: released\n", __func__);
158
159 mutex_unlock(&vstdev->lock);
160
161 kref_put(&vstdev->kref, vstusb_delete);
162
163 return 0;
164}
165
166static void usb_api_blocking_completion(struct urb *urb)
167{
168 struct completion *completeit = urb->context;
169
170 complete(completeit);
171}
172
173static int vstusb_fill_and_send_urb(struct urb *urb,
174 struct usb_device *usb_dev,
175 unsigned int pipe, void *data,
176 unsigned int len, struct completion *done)
177{
178 struct usb_host_endpoint *ep;
179 struct usb_host_endpoint **hostep;
180 unsigned int pipend;
181
182 int status;
183
184 hostep = usb_pipein(pipe) ? usb_dev->ep_in : usb_dev->ep_out;
185 pipend = usb_pipeendpoint(pipe);
186 ep = hostep[pipend];
187
188 if (!ep || (len == 0))
189 return -EINVAL;
190
191 if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
192 == USB_ENDPOINT_XFER_INT) {
193 pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30);
194 usb_fill_int_urb(urb, usb_dev, pipe, data, len,
195 (usb_complete_t)usb_api_blocking_completion,
196 NULL, ep->desc.bInterval);
197 } else
198 usb_fill_bulk_urb(urb, usb_dev, pipe, data, len,
199 (usb_complete_t)usb_api_blocking_completion,
200 NULL);
201
202 init_completion(done);
203 urb->context = done;
204 urb->actual_length = 0;
205 status = usb_submit_urb(urb, GFP_KERNEL);
206
207 return status;
208}
209
210static int vstusb_complete_urb(struct urb *urb, struct completion *done,
211 int timeout, int *actual_length)
212{
213 unsigned long expire;
214 int status;
215
216 expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT;
217 if (!wait_for_completion_interruptible_timeout(done, expire)) {
218 usb_kill_urb(urb);
219 status = urb->status == -ENOENT ? -ETIMEDOUT : urb->status;
220
221 dev_dbg(&urb->dev->dev,
222 "%s timed out on ep%d%s len=%d/%d, urb status = %d\n",
223 current->comm,
224 usb_pipeendpoint(urb->pipe),
225 usb_pipein(urb->pipe) ? "in" : "out",
226 urb->actual_length,
227 urb->transfer_buffer_length,
228 urb->status);
229
230 } else {
231 if (signal_pending(current)) {
232 /* if really an error */
233 if (urb->status && !((urb->status == -ENOENT) ||
234 (urb->status == -ECONNRESET) ||
235 (urb->status == -ESHUTDOWN))) {
236 status = -EINTR;
237 usb_kill_urb(urb);
238 } else {
239 status = 0;
240 }
241
242 dev_dbg(&urb->dev->dev,
243 "%s: signal pending on ep%d%s len=%d/%d,"
244 "urb status = %d\n",
245 current->comm,
246 usb_pipeendpoint(urb->pipe),
247 usb_pipein(urb->pipe) ? "in" : "out",
248 urb->actual_length,
249 urb->transfer_buffer_length,
250 urb->status);
251
252 } else {
253 status = urb->status;
254 }
255 }
256
257 if (actual_length)
258 *actual_length = urb->actual_length;
259
260 return status;
261}
262
263static ssize_t vstusb_read(struct file *file, char __user *buffer,
264 size_t count, loff_t *ppos)
265{
266 struct vstusb_device *vstdev;
267 int cnt = -1;
268 void *buf;
269 int retval = 0;
270
271 struct urb *urb;
272 struct usb_device *dev;
273 unsigned int pipe;
274 int timeout;
275
276 DECLARE_COMPLETION_ONSTACK(done);
277
278 vstdev = file->private_data;
279
280 if (vstdev == NULL)
281 return -ENODEV;
282
283 /* verify that we actually want to read some data */
284 if ((count == 0) || (count > VST_MAXBUFFER))
285 return -EINVAL;
286
287 /* lock this object */
288 if (mutex_lock_interruptible(&vstdev->lock))
289 return -ERESTARTSYS;
290
291 /* anyone home */
292 if (!vstdev->present) {
293 mutex_unlock(&vstdev->lock);
294 printk(KERN_ERR KBUILD_MODNAME
295 ": %s: device not present\n", __func__);
296 return -ENODEV;
297 }
298
299 /* pull out the necessary data */
300 dev = vstdev->usb_dev;
301 pipe = usb_rcvbulkpipe(dev, vstdev->rd_pipe);
302 timeout = vstdev->rd_timeout_ms;
303
304 buf = kmalloc(count, GFP_KERNEL);
305 if (buf == NULL) {
306 mutex_unlock(&vstdev->lock);
307 return -ENOMEM;
308 }
309
310 urb = usb_alloc_urb(0, GFP_KERNEL);
311 if (!urb) {
312 kfree(buf);
313 mutex_unlock(&vstdev->lock);
314 return -ENOMEM;
315 }
316
317 usb_anchor_urb(urb, &vstdev->submitted);
318 retval = vstusb_fill_and_send_urb(urb, dev, pipe, buf, count, &done);
319 mutex_unlock(&vstdev->lock);
320 if (retval) {
321 usb_unanchor_urb(urb);
322 dev_err(&dev->dev, "%s: error %d filling and sending urb %d\n",
323 __func__, retval, pipe);
324 goto exit;
325 }
326
327 retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
328 if (retval) {
329 dev_err(&dev->dev, "%s: error %d completing urb %d\n",
330 __func__, retval, pipe);
331 goto exit;
332 }
333
334 if (copy_to_user(buffer, buf, cnt)) {
335 dev_err(&dev->dev, "%s: can't copy_to_user\n", __func__);
336 retval = -EFAULT;
337 } else {
338 retval = cnt;
339 dev_dbg(&dev->dev, "%s: read %d bytes from pipe %d\n",
340 __func__, cnt, pipe);
341 }
342
343exit:
344 usb_free_urb(urb);
345 kfree(buf);
346 return retval;
347}
348
349static ssize_t vstusb_write(struct file *file, const char __user *buffer,
350 size_t count, loff_t *ppos)
351{
352 struct vstusb_device *vstdev;
353 int cnt = -1;
354 void *buf;
355 int retval = 0;
356
357 struct urb *urb;
358 struct usb_device *dev;
359 unsigned int pipe;
360 int timeout;
361
362 DECLARE_COMPLETION_ONSTACK(done);
363
364 vstdev = file->private_data;
365
366 if (vstdev == NULL)
367 return -ENODEV;
368
369 /* verify that we actually have some data to write */
370 if ((count == 0) || (count > VST_MAXBUFFER))
371 return retval;
372
373 /* lock this object */
374 if (mutex_lock_interruptible(&vstdev->lock))
375 return -ERESTARTSYS;
376
377 /* anyone home */
378 if (!vstdev->present) {
379 mutex_unlock(&vstdev->lock);
380 printk(KERN_ERR KBUILD_MODNAME
381 ": %s: device not present\n", __func__);
382 return -ENODEV;
383 }
384
385 /* pull out the necessary data */
386 dev = vstdev->usb_dev;
387 pipe = usb_sndbulkpipe(dev, vstdev->wr_pipe);
388 timeout = vstdev->wr_timeout_ms;
389
390 buf = kmalloc(count, GFP_KERNEL);
391 if (buf == NULL) {
392 mutex_unlock(&vstdev->lock);
393 return -ENOMEM;
394 }
395
396 urb = usb_alloc_urb(0, GFP_KERNEL);
397 if (!urb) {
398 kfree(buf);
399 mutex_unlock(&vstdev->lock);
400 return -ENOMEM;
401 }
402
403 if (copy_from_user(buf, buffer, count)) {
404 mutex_unlock(&vstdev->lock);
405 dev_err(&dev->dev, "%s: can't copy_from_user\n", __func__);
406 retval = -EFAULT;
407 goto exit;
408 }
409
410 usb_anchor_urb(urb, &vstdev->submitted);
411 retval = vstusb_fill_and_send_urb(urb, dev, pipe, buf, count, &done);
412 mutex_unlock(&vstdev->lock);
413 if (retval) {
414 usb_unanchor_urb(urb);
415 dev_err(&dev->dev, "%s: error %d filling and sending urb %d\n",
416 __func__, retval, pipe);
417 goto exit;
418 }
419
420 retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
421 if (retval) {
422 dev_err(&dev->dev, "%s: error %d completing urb %d\n",
423 __func__, retval, pipe);
424 goto exit;
425 } else {
426 retval = cnt;
427 dev_dbg(&dev->dev, "%s: sent %d bytes to pipe %d\n",
428 __func__, cnt, pipe);
429 }
430
431exit:
432 usb_free_urb(urb);
433 kfree(buf);
434 return retval;
435}
436
437static long vstusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
438{
439 int retval = 0;
440 int cnt = -1;
441 void __user *data = (void __user *)arg;
442 struct vstusb_args usb_data;
443
444 struct vstusb_device *vstdev;
445 void *buffer = NULL; /* must be initialized. buffer is
446 * referenced on exit but not all
447 * ioctls allocate it */
448
449 struct urb *urb = NULL; /* must be initialized. urb is
450 * referenced on exit but not all
451 * ioctls allocate it */
452 struct usb_device *dev;
453 unsigned int pipe;
454 int timeout;
455
456 DECLARE_COMPLETION_ONSTACK(done);
457
458 vstdev = file->private_data;
459
460 if (_IOC_TYPE(cmd) != VST_IOC_MAGIC) {
461 dev_warn(&vstdev->usb_dev->dev,
462 "%s: ioctl command %x, bad ioctl magic %x, "
463 "expected %x\n", __func__, cmd,
464 _IOC_TYPE(cmd), VST_IOC_MAGIC);
465 return -EINVAL;
466 }
467
468 if (vstdev == NULL)
469 return -ENODEV;
470
471 if (copy_from_user(&usb_data, data, sizeof(struct vstusb_args))) {
472 dev_err(&vstdev->usb_dev->dev, "%s: can't copy_from_user\n",
473 __func__);
474 return -EFAULT;
475 }
476
477 /* lock this object */
478 if (mutex_lock_interruptible(&vstdev->lock)) {
479 retval = -ERESTARTSYS;
480 goto exit;
481 }
482
483 /* anyone home */
484 if (!vstdev->present) {
485 mutex_unlock(&vstdev->lock);
486 dev_err(&vstdev->usb_dev->dev, "%s: device not present\n",
487 __func__);
488 retval = -ENODEV;
489 goto exit;
490 }
491
492 /* pull out the necessary data */
493 dev = vstdev->usb_dev;
494
495 switch (cmd) {
496
497 case IOCTL_VSTUSB_CONFIG_RW:
498
499 vstdev->rd_pipe = usb_data.rd_pipe;
500 vstdev->rd_timeout_ms = usb_data.rd_timeout_ms;
501 vstdev->wr_pipe = usb_data.wr_pipe;
502 vstdev->wr_timeout_ms = usb_data.wr_timeout_ms;
503
504 mutex_unlock(&vstdev->lock);
505
506 dev_dbg(&dev->dev, "%s: setting pipes/timeouts, "
507 "rdpipe = %d, rdtimeout = %d, "
508 "wrpipe = %d, wrtimeout = %d\n", __func__,
509 vstdev->rd_pipe, vstdev->rd_timeout_ms,
510 vstdev->wr_pipe, vstdev->wr_timeout_ms);
511 break;
512
513 case IOCTL_VSTUSB_SEND_PIPE:
514
515 if ((usb_data.count == 0) || (usb_data.count > VST_MAXBUFFER)) {
516 mutex_unlock(&vstdev->lock);
517 retval = -EINVAL;
518 goto exit;
519 }
520
521 buffer = kmalloc(usb_data.count, GFP_KERNEL);
522 if (buffer == NULL) {
523 mutex_unlock(&vstdev->lock);
524 retval = -ENOMEM;
525 goto exit;
526 }
527
528 urb = usb_alloc_urb(0, GFP_KERNEL);
529 if (!urb) {
530 mutex_unlock(&vstdev->lock);
531 retval = -ENOMEM;
532 goto exit;
533 }
534
535 timeout = usb_data.timeout_ms;
536
537 pipe = usb_sndbulkpipe(dev, usb_data.pipe);
538
539 if (copy_from_user(buffer, usb_data.buffer, usb_data.count)) {
540 dev_err(&dev->dev, "%s: can't copy_from_user\n",
541 __func__);
542 mutex_unlock(&vstdev->lock);
543 retval = -EFAULT;
544 goto exit;
545 }
546
547 usb_anchor_urb(urb, &vstdev->submitted);
548 retval = vstusb_fill_and_send_urb(urb, dev, pipe, buffer,
549 usb_data.count, &done);
550 mutex_unlock(&vstdev->lock);
551 if (retval) {
552 usb_unanchor_urb(urb);
553 dev_err(&dev->dev,
554 "%s: error %d filling and sending urb %d\n",
555 __func__, retval, pipe);
556 goto exit;
557 }
558
559 retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
560 if (retval) {
561 dev_err(&dev->dev, "%s: error %d completing urb %d\n",
562 __func__, retval, pipe);
563 }
564
565 break;
566 case IOCTL_VSTUSB_RECV_PIPE:
567
568 if ((usb_data.count == 0) || (usb_data.count > VST_MAXBUFFER)) {
569 mutex_unlock(&vstdev->lock);
570 retval = -EINVAL;
571 goto exit;
572 }
573
574 buffer = kmalloc(usb_data.count, GFP_KERNEL);
575 if (buffer == NULL) {
576 mutex_unlock(&vstdev->lock);
577 retval = -ENOMEM;
578 goto exit;
579 }
580
581 urb = usb_alloc_urb(0, GFP_KERNEL);
582 if (!urb) {
583 mutex_unlock(&vstdev->lock);
584 retval = -ENOMEM;
585 goto exit;
586 }
587
588 timeout = usb_data.timeout_ms;
589
590 pipe = usb_rcvbulkpipe(dev, usb_data.pipe);
591
592 usb_anchor_urb(urb, &vstdev->submitted);
593 retval = vstusb_fill_and_send_urb(urb, dev, pipe, buffer,
594 usb_data.count, &done);
595 mutex_unlock(&vstdev->lock);
596 if (retval) {
597 usb_unanchor_urb(urb);
598 dev_err(&dev->dev,
599 "%s: error %d filling and sending urb %d\n",
600 __func__, retval, pipe);
601 goto exit;
602 }
603
604 retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
605 if (retval) {
606 dev_err(&dev->dev, "%s: error %d completing urb %d\n",
607 __func__, retval, pipe);
608 goto exit;
609 }
610
611 if (copy_to_user(usb_data.buffer, buffer, cnt)) {
612 dev_err(&dev->dev, "%s: can't copy_to_user\n",
613 __func__);
614 retval = -EFAULT;
615 goto exit;
616 }
617
618 usb_data.count = cnt;
619 if (copy_to_user(data, &usb_data, sizeof(struct vstusb_args))) {
620 dev_err(&dev->dev, "%s: can't copy_to_user\n",
621 __func__);
622 retval = -EFAULT;
623 } else {
624 dev_dbg(&dev->dev, "%s: recv %zd bytes from pipe %d\n",
625 __func__, usb_data.count, usb_data.pipe);
626 }
627
628 break;
629
630 default:
631 mutex_unlock(&vstdev->lock);
632 dev_warn(&dev->dev, "ioctl_vstusb: invalid ioctl cmd %x\n",
633 cmd);
634 return -EINVAL;
635 break;
636 }
637exit:
638 usb_free_urb(urb);
639 kfree(buffer);
640 return retval;
641}
642
643static const struct file_operations vstusb_fops = {
644 .owner = THIS_MODULE,
645 .read = vstusb_read,
646 .write = vstusb_write,
647 .unlocked_ioctl = vstusb_ioctl,
648 .compat_ioctl = vstusb_ioctl,
649 .open = vstusb_open,
650 .release = vstusb_release,
651};
652
653static struct usb_class_driver usb_vstusb_class = {
654 .name = "usb/vstusb%d",
655 .fops = &vstusb_fops,
656 .minor_base = VSTUSB_MINOR_BASE,
657};
658
659static int vstusb_probe(struct usb_interface *intf,
660 const struct usb_device_id *id)
661{
662 struct usb_device *dev = interface_to_usbdev(intf);
663 struct vstusb_device *vstdev;
664 int i;
665 int retval = 0;
666
667 /* allocate memory for our device state and intialize it */
668
669 vstdev = kzalloc(sizeof(*vstdev), GFP_KERNEL);
670 if (vstdev == NULL)
671 return -ENOMEM;
672
673 /* must do usb_get_dev() prior to kref_init() since the kref_put()
674 * release function will do a usb_put_dev() */
675 usb_get_dev(dev);
676 kref_init(&vstdev->kref);
677 mutex_init(&vstdev->lock);
678
679 i = dev->descriptor.bcdDevice;
680
681 dev_dbg(&intf->dev, "Version %1d%1d.%1d%1d found at address %d\n",
682 (i & 0xF000) >> 12, (i & 0xF00) >> 8,
683 (i & 0xF0) >> 4, (i & 0xF), dev->devnum);
684
685 vstdev->present = 1;
686 vstdev->isopen = 0;
687 vstdev->usb_dev = dev;
688 init_usb_anchor(&vstdev->submitted);
689
690 usb_set_intfdata(intf, vstdev);
691 retval = usb_register_dev(intf, &usb_vstusb_class);
692 if (retval) {
693 dev_err(&intf->dev,
694 "%s: Not able to get a minor for this device.\n",
695 __func__);
696 usb_set_intfdata(intf, NULL);
697 kref_put(&vstdev->kref, vstusb_delete);
698 return retval;
699 }
700
701 /* let the user know what node this device is now attached to */
702 dev_info(&intf->dev,
703 "VST USB Device #%d now attached to major %d minor %d\n",
704 (intf->minor - VSTUSB_MINOR_BASE), USB_MAJOR, intf->minor);
705
706 dev_info(&intf->dev, "%s, %s\n", DRIVER_DESC, DRIVER_VERSION);
707
708 return retval;
709}
710
711static void vstusb_disconnect(struct usb_interface *intf)
712{
713 struct vstusb_device *vstdev = usb_get_intfdata(intf);
714
715 usb_deregister_dev(intf, &usb_vstusb_class);
716 usb_set_intfdata(intf, NULL);
717
718 if (vstdev) {
719
720 mutex_lock(&vstdev->lock);
721 vstdev->present = 0;
722
723 usb_kill_anchored_urbs(&vstdev->submitted);
724
725 mutex_unlock(&vstdev->lock);
726
727 kref_put(&vstdev->kref, vstusb_delete);
728 }
729
730}
731
732static int vstusb_suspend(struct usb_interface *intf, pm_message_t message)
733{
734 struct vstusb_device *vstdev = usb_get_intfdata(intf);
735 int time;
736 if (!vstdev)
737 return 0;
738
739 mutex_lock(&vstdev->lock);
740 time = usb_wait_anchor_empty_timeout(&vstdev->submitted, 1000);
741 if (!time)
742 usb_kill_anchored_urbs(&vstdev->submitted);
743 mutex_unlock(&vstdev->lock);
744
745 return 0;
746}
747
748static int vstusb_resume(struct usb_interface *intf)
749{
750 return 0;
751}
752
753static struct usb_driver vstusb_driver = {
754 .name = "vstusb",
755 .probe = vstusb_probe,
756 .disconnect = vstusb_disconnect,
757 .suspend = vstusb_suspend,
758 .resume = vstusb_resume,
759 .id_table = id_table,
760};
761
762static int __init vstusb_init(void)
763{
764 int rc;
765
766 rc = usb_register(&vstusb_driver);
767 if (rc)
768 printk(KERN_ERR "%s: failed to register (%d)", __func__, rc);
769
770 return rc;
771}
772
773static void __exit vstusb_exit(void)
774{
775 usb_deregister(&vstusb_driver);
776}
777
778module_init(vstusb_init);
779module_exit(vstusb_exit);
780
781MODULE_AUTHOR("Dennis O'Brien/Stephen Ware");
782MODULE_DESCRIPTION(DRIVER_VERSION);
783MODULE_LICENSE("GPL");
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 385ec0520167..6dd44bc1f5ff 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -460,8 +460,8 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
460 char ev_type, int status) 460 char ev_type, int status)
461{ 461{
462 const struct usb_endpoint_descriptor *epd = &urb->ep->desc; 462 const struct usb_endpoint_descriptor *epd = &urb->ep->desc;
463 unsigned long flags;
464 struct timeval ts; 463 struct timeval ts;
464 unsigned long flags;
465 unsigned int urb_length; 465 unsigned int urb_length;
466 unsigned int offset; 466 unsigned int offset;
467 unsigned int length; 467 unsigned int length;
@@ -600,10 +600,13 @@ static void mon_bin_complete(void *data, struct urb *urb, int status)
600static void mon_bin_error(void *data, struct urb *urb, int error) 600static void mon_bin_error(void *data, struct urb *urb, int error)
601{ 601{
602 struct mon_reader_bin *rp = data; 602 struct mon_reader_bin *rp = data;
603 struct timeval ts;
603 unsigned long flags; 604 unsigned long flags;
604 unsigned int offset; 605 unsigned int offset;
605 struct mon_bin_hdr *ep; 606 struct mon_bin_hdr *ep;
606 607
608 do_gettimeofday(&ts);
609
607 spin_lock_irqsave(&rp->b_lock, flags); 610 spin_lock_irqsave(&rp->b_lock, flags);
608 611
609 offset = mon_buff_area_alloc(rp, PKT_SIZE); 612 offset = mon_buff_area_alloc(rp, PKT_SIZE);
@@ -623,6 +626,8 @@ static void mon_bin_error(void *data, struct urb *urb, int error)
623 ep->devnum = urb->dev->devnum; 626 ep->devnum = urb->dev->devnum;
624 ep->busnum = urb->dev->bus->busnum; 627 ep->busnum = urb->dev->bus->busnum;
625 ep->id = (unsigned long) urb; 628 ep->id = (unsigned long) urb;
629 ep->ts_sec = ts.tv_sec;
630 ep->ts_usec = ts.tv_usec;
626 ep->status = error; 631 ep->status = error;
627 632
628 ep->flag_setup = '-'; 633 ep->flag_setup = '-';
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 047568ff223d..31c11888ec6a 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -180,7 +180,7 @@ static inline unsigned int mon_get_timestamp(void)
180 unsigned int stamp; 180 unsigned int stamp;
181 181
182 do_gettimeofday(&tval); 182 do_gettimeofday(&tval);
183 stamp = tval.tv_sec & 0xFFFF; /* 2^32 = 4294967296. Limit to 4096s. */ 183 stamp = tval.tv_sec & 0xFFF; /* 2^32 = 4294967296. Limit to 4096s. */
184 stamp = stamp * 1000000 + tval.tv_usec; 184 stamp = stamp * 1000000 + tval.tv_usec;
185 return stamp; 185 return stamp;
186} 186}
@@ -273,12 +273,12 @@ static void mon_text_error(void *data, struct urb *urb, int error)
273 273
274 ep->type = 'E'; 274 ep->type = 'E';
275 ep->id = (unsigned long) urb; 275 ep->id = (unsigned long) urb;
276 ep->busnum = 0; 276 ep->busnum = urb->dev->bus->busnum;
277 ep->devnum = urb->dev->devnum; 277 ep->devnum = urb->dev->devnum;
278 ep->epnum = usb_endpoint_num(&urb->ep->desc); 278 ep->epnum = usb_endpoint_num(&urb->ep->desc);
279 ep->xfertype = usb_endpoint_type(&urb->ep->desc); 279 ep->xfertype = usb_endpoint_type(&urb->ep->desc);
280 ep->is_in = usb_urb_dir_in(urb); 280 ep->is_in = usb_urb_dir_in(urb);
281 ep->tstamp = 0; 281 ep->tstamp = mon_get_timestamp();
282 ep->length = 0; 282 ep->length = 0;
283 ep->status = error; 283 ep->status = error;
284 284
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index ad26e6569665..bcee1339d4fd 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -30,7 +30,6 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
30 void __iomem *fifo = hw_ep->fifo; 30 void __iomem *fifo = hw_ep->fifo;
31 void __iomem *epio = hw_ep->regs; 31 void __iomem *epio = hw_ep->regs;
32 u8 epnum = hw_ep->epnum; 32 u8 epnum = hw_ep->epnum;
33 u16 dma_reg = 0;
34 33
35 prefetch((u8 *)src); 34 prefetch((u8 *)src);
36 35
@@ -42,15 +41,17 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
42 dump_fifo_data(src, len); 41 dump_fifo_data(src, len);
43 42
44 if (!ANOMALY_05000380 && epnum != 0) { 43 if (!ANOMALY_05000380 && epnum != 0) {
45 flush_dcache_range((unsigned int)src, 44 u16 dma_reg;
46 (unsigned int)(src + len)); 45
46 flush_dcache_range((unsigned long)src,
47 (unsigned long)(src + len));
47 48
48 /* Setup DMA address register */ 49 /* Setup DMA address register */
49 dma_reg = (u16) ((u32) src & 0xFFFF); 50 dma_reg = (u32)src;
50 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg); 51 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
51 SSYNC(); 52 SSYNC();
52 53
53 dma_reg = (u16) (((u32) src >> 16) & 0xFFFF); 54 dma_reg = (u32)src >> 16;
54 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg); 55 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
55 SSYNC(); 56 SSYNC();
56 57
@@ -79,12 +80,9 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
79 SSYNC(); 80 SSYNC();
80 81
81 if (unlikely((unsigned long)src & 0x01)) 82 if (unlikely((unsigned long)src & 0x01))
82 outsw_8((unsigned long)fifo, src, 83 outsw_8((unsigned long)fifo, src, (len + 1) >> 1);
83 len & 0x01 ? (len >> 1) + 1 : len >> 1);
84 else 84 else
85 outsw((unsigned long)fifo, src, 85 outsw((unsigned long)fifo, src, (len + 1) >> 1);
86 len & 0x01 ? (len >> 1) + 1 : len >> 1);
87
88 } 86 }
89} 87}
90/* 88/*
@@ -94,19 +92,19 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
94{ 92{
95 void __iomem *fifo = hw_ep->fifo; 93 void __iomem *fifo = hw_ep->fifo;
96 u8 epnum = hw_ep->epnum; 94 u8 epnum = hw_ep->epnum;
97 u16 dma_reg = 0;
98 95
99 if (ANOMALY_05000467 && epnum != 0) { 96 if (ANOMALY_05000467 && epnum != 0) {
97 u16 dma_reg;
100 98
101 invalidate_dcache_range((unsigned int)dst, 99 invalidate_dcache_range((unsigned long)dst,
102 (unsigned int)(dst + len)); 100 (unsigned long)(dst + len));
103 101
104 /* Setup DMA address register */ 102 /* Setup DMA address register */
105 dma_reg = (u16) ((u32) dst & 0xFFFF); 103 dma_reg = (u32)dst;
106 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg); 104 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
107 SSYNC(); 105 SSYNC();
108 106
109 dma_reg = (u16) (((u32) dst >> 16) & 0xFFFF); 107 dma_reg = (u32)dst >> 16;
110 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg); 108 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
111 SSYNC(); 109 SSYNC();
112 110
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index a44a450c860d..3c69a76ec392 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -1191,8 +1191,13 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
1191 1191
1192 bd = tx_ch->head; 1192 bd = tx_ch->head;
1193 1193
1194 /*
1195 * If Head is null then this could mean that a abort interrupt
1196 * that needs to be acknowledged.
1197 */
1194 if (NULL == bd) { 1198 if (NULL == bd) {
1195 DBG(1, "null BD\n"); 1199 DBG(1, "null BD\n");
1200 tx_ram->tx_complete = 0;
1196 continue; 1201 continue;
1197 } 1202 }
1198 1203
@@ -1412,15 +1417,6 @@ static int cppi_channel_abort(struct dma_channel *channel)
1412 1417
1413 if (cppi_ch->transmit) { 1418 if (cppi_ch->transmit) {
1414 struct cppi_tx_stateram __iomem *tx_ram; 1419 struct cppi_tx_stateram __iomem *tx_ram;
1415 int enabled;
1416
1417 /* mask interrupts raised to signal teardown complete. */
1418 enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG)
1419 & (1 << cppi_ch->index);
1420 if (enabled)
1421 musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
1422 (1 << cppi_ch->index));
1423
1424 /* REVISIT put timeouts on these controller handshakes */ 1420 /* REVISIT put timeouts on these controller handshakes */
1425 1421
1426 cppi_dump_tx(6, cppi_ch, " (teardown)"); 1422 cppi_dump_tx(6, cppi_ch, " (teardown)");
@@ -1435,7 +1431,6 @@ static int cppi_channel_abort(struct dma_channel *channel)
1435 do { 1431 do {
1436 value = musb_readl(&tx_ram->tx_complete, 0); 1432 value = musb_readl(&tx_ram->tx_complete, 0);
1437 } while (0xFFFFFFFC != value); 1433 } while (0xFFFFFFFC != value);
1438 musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC);
1439 1434
1440 /* FIXME clean up the transfer state ... here? 1435 /* FIXME clean up the transfer state ... here?
1441 * the completion routine should get called with 1436 * the completion routine should get called with
@@ -1448,23 +1443,15 @@ static int cppi_channel_abort(struct dma_channel *channel)
1448 musb_writew(regs, MUSB_TXCSR, value); 1443 musb_writew(regs, MUSB_TXCSR, value);
1449 musb_writew(regs, MUSB_TXCSR, value); 1444 musb_writew(regs, MUSB_TXCSR, value);
1450 1445
1451 /* While we scrub the TX state RAM, ensure that we clean 1446 /*
1452 * up any interrupt that's currently asserted:
1453 * 1. Write to completion Ptr value 0x1(bit 0 set) 1447 * 1. Write to completion Ptr value 0x1(bit 0 set)
1454 * (write back mode) 1448 * (write back mode)
1455 * 2. Write to completion Ptr value 0x0(bit 0 cleared) 1449 * 2. Wait for abort interrupt and then put the channel in
1456 * (compare mode) 1450 * compare mode by writing 1 to the tx_complete register.
1457 * Value written is compared(for bits 31:2) and when
1458 * equal, interrupt is deasserted.
1459 */ 1451 */
1460 cppi_reset_tx(tx_ram, 1); 1452 cppi_reset_tx(tx_ram, 1);
1461 musb_writel(&tx_ram->tx_complete, 0, 0); 1453 cppi_ch->head = 0;
1462 1454 musb_writel(&tx_ram->tx_complete, 0, 1);
1463 /* re-enable interrupt */
1464 if (enabled)
1465 musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
1466 (1 << cppi_ch->index));
1467
1468 cppi_dump_tx(5, cppi_ch, " (done teardown)"); 1455 cppi_dump_tx(5, cppi_ch, " (done teardown)");
1469 1456
1470 /* REVISIT tx side _should_ clean up the same way 1457 /* REVISIT tx side _should_ clean up the same way
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 738efd8063b5..b4bbf8f2c238 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -557,6 +557,69 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
557 handled = IRQ_HANDLED; 557 handled = IRQ_HANDLED;
558 } 558 }
559 559
560
561 if (int_usb & MUSB_INTR_SUSPEND) {
562 DBG(1, "SUSPEND (%s) devctl %02x power %02x\n",
563 otg_state_string(musb), devctl, power);
564 handled = IRQ_HANDLED;
565
566 switch (musb->xceiv->state) {
567#ifdef CONFIG_USB_MUSB_OTG
568 case OTG_STATE_A_PERIPHERAL:
569 /* We also come here if the cable is removed, since
570 * this silicon doesn't report ID-no-longer-grounded.
571 *
572 * We depend on T(a_wait_bcon) to shut us down, and
573 * hope users don't do anything dicey during this
574 * undesired detour through A_WAIT_BCON.
575 */
576 musb_hnp_stop(musb);
577 usb_hcd_resume_root_hub(musb_to_hcd(musb));
578 musb_root_disconnect(musb);
579 musb_platform_try_idle(musb, jiffies
580 + msecs_to_jiffies(musb->a_wait_bcon
581 ? : OTG_TIME_A_WAIT_BCON));
582
583 break;
584#endif
585 case OTG_STATE_B_IDLE:
586 if (!musb->is_active)
587 break;
588 case OTG_STATE_B_PERIPHERAL:
589 musb_g_suspend(musb);
590 musb->is_active = is_otg_enabled(musb)
591 && musb->xceiv->gadget->b_hnp_enable;
592 if (musb->is_active) {
593#ifdef CONFIG_USB_MUSB_OTG
594 musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
595 DBG(1, "HNP: Setting timer for b_ase0_brst\n");
596 mod_timer(&musb->otg_timer, jiffies
597 + msecs_to_jiffies(
598 OTG_TIME_B_ASE0_BRST));
599#endif
600 }
601 break;
602 case OTG_STATE_A_WAIT_BCON:
603 if (musb->a_wait_bcon != 0)
604 musb_platform_try_idle(musb, jiffies
605 + msecs_to_jiffies(musb->a_wait_bcon));
606 break;
607 case OTG_STATE_A_HOST:
608 musb->xceiv->state = OTG_STATE_A_SUSPEND;
609 musb->is_active = is_otg_enabled(musb)
610 && musb->xceiv->host->b_hnp_enable;
611 break;
612 case OTG_STATE_B_HOST:
613 /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
614 DBG(1, "REVISIT: SUSPEND as B_HOST\n");
615 break;
616 default:
617 /* "should not happen" */
618 musb->is_active = 0;
619 break;
620 }
621 }
622
560 if (int_usb & MUSB_INTR_CONNECT) { 623 if (int_usb & MUSB_INTR_CONNECT) {
561 struct usb_hcd *hcd = musb_to_hcd(musb); 624 struct usb_hcd *hcd = musb_to_hcd(musb);
562 625
@@ -625,10 +688,61 @@ b_host:
625 } 688 }
626#endif /* CONFIG_USB_MUSB_HDRC_HCD */ 689#endif /* CONFIG_USB_MUSB_HDRC_HCD */
627 690
691 if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) {
692 DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n",
693 otg_state_string(musb),
694 MUSB_MODE(musb), devctl);
695 handled = IRQ_HANDLED;
696
697 switch (musb->xceiv->state) {
698#ifdef CONFIG_USB_MUSB_HDRC_HCD
699 case OTG_STATE_A_HOST:
700 case OTG_STATE_A_SUSPEND:
701 usb_hcd_resume_root_hub(musb_to_hcd(musb));
702 musb_root_disconnect(musb);
703 if (musb->a_wait_bcon != 0 && is_otg_enabled(musb))
704 musb_platform_try_idle(musb, jiffies
705 + msecs_to_jiffies(musb->a_wait_bcon));
706 break;
707#endif /* HOST */
708#ifdef CONFIG_USB_MUSB_OTG
709 case OTG_STATE_B_HOST:
710 /* REVISIT this behaves for "real disconnect"
711 * cases; make sure the other transitions from
712 * from B_HOST act right too. The B_HOST code
713 * in hnp_stop() is currently not used...
714 */
715 musb_root_disconnect(musb);
716 musb_to_hcd(musb)->self.is_b_host = 0;
717 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
718 MUSB_DEV_MODE(musb);
719 musb_g_disconnect(musb);
720 break;
721 case OTG_STATE_A_PERIPHERAL:
722 musb_hnp_stop(musb);
723 musb_root_disconnect(musb);
724 /* FALLTHROUGH */
725 case OTG_STATE_B_WAIT_ACON:
726 /* FALLTHROUGH */
727#endif /* OTG */
728#ifdef CONFIG_USB_GADGET_MUSB_HDRC
729 case OTG_STATE_B_PERIPHERAL:
730 case OTG_STATE_B_IDLE:
731 musb_g_disconnect(musb);
732 break;
733#endif /* GADGET */
734 default:
735 WARNING("unhandled DISCONNECT transition (%s)\n",
736 otg_state_string(musb));
737 break;
738 }
739 }
740
628 /* mentor saves a bit: bus reset and babble share the same irq. 741 /* mentor saves a bit: bus reset and babble share the same irq.
629 * only host sees babble; only peripheral sees bus reset. 742 * only host sees babble; only peripheral sees bus reset.
630 */ 743 */
631 if (int_usb & MUSB_INTR_RESET) { 744 if (int_usb & MUSB_INTR_RESET) {
745 handled = IRQ_HANDLED;
632 if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) { 746 if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) {
633 /* 747 /*
634 * Looks like non-HS BABBLE can be ignored, but 748 * Looks like non-HS BABBLE can be ignored, but
@@ -641,7 +755,7 @@ b_host:
641 DBG(1, "BABBLE devctl: %02x\n", devctl); 755 DBG(1, "BABBLE devctl: %02x\n", devctl);
642 else { 756 else {
643 ERR("Stopping host session -- babble\n"); 757 ERR("Stopping host session -- babble\n");
644 musb_writeb(mbase, MUSB_DEVCTL, 0); 758 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
645 } 759 }
646 } else if (is_peripheral_capable()) { 760 } else if (is_peripheral_capable()) {
647 DBG(1, "BUS RESET as %s\n", otg_state_string(musb)); 761 DBG(1, "BUS RESET as %s\n", otg_state_string(musb));
@@ -686,29 +800,7 @@ b_host:
686 otg_state_string(musb)); 800 otg_state_string(musb));
687 } 801 }
688 } 802 }
689
690 handled = IRQ_HANDLED;
691 } 803 }
692 schedule_work(&musb->irq_work);
693
694 return handled;
695}
696
697/*
698 * Interrupt Service Routine to record USB "global" interrupts.
699 * Since these do not happen often and signify things of
700 * paramount importance, it seems OK to check them individually;
701 * the order of the tests is specified in the manual
702 *
703 * @param musb instance pointer
704 * @param int_usb register contents
705 * @param devctl
706 * @param power
707 */
708static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
709 u8 devctl, u8 power)
710{
711 irqreturn_t handled = IRQ_NONE;
712 804
713#if 0 805#if 0
714/* REVISIT ... this would be for multiplexing periodic endpoints, or 806/* REVISIT ... this would be for multiplexing periodic endpoints, or
@@ -755,117 +847,7 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
755 } 847 }
756#endif 848#endif
757 849
758 if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) { 850 schedule_work(&musb->irq_work);
759 DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n",
760 otg_state_string(musb),
761 MUSB_MODE(musb), devctl);
762 handled = IRQ_HANDLED;
763
764 switch (musb->xceiv->state) {
765#ifdef CONFIG_USB_MUSB_HDRC_HCD
766 case OTG_STATE_A_HOST:
767 case OTG_STATE_A_SUSPEND:
768 usb_hcd_resume_root_hub(musb_to_hcd(musb));
769 musb_root_disconnect(musb);
770 if (musb->a_wait_bcon != 0 && is_otg_enabled(musb))
771 musb_platform_try_idle(musb, jiffies
772 + msecs_to_jiffies(musb->a_wait_bcon));
773 break;
774#endif /* HOST */
775#ifdef CONFIG_USB_MUSB_OTG
776 case OTG_STATE_B_HOST:
777 /* REVISIT this behaves for "real disconnect"
778 * cases; make sure the other transitions from
779 * from B_HOST act right too. The B_HOST code
780 * in hnp_stop() is currently not used...
781 */
782 musb_root_disconnect(musb);
783 musb_to_hcd(musb)->self.is_b_host = 0;
784 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
785 MUSB_DEV_MODE(musb);
786 musb_g_disconnect(musb);
787 break;
788 case OTG_STATE_A_PERIPHERAL:
789 musb_hnp_stop(musb);
790 musb_root_disconnect(musb);
791 /* FALLTHROUGH */
792 case OTG_STATE_B_WAIT_ACON:
793 /* FALLTHROUGH */
794#endif /* OTG */
795#ifdef CONFIG_USB_GADGET_MUSB_HDRC
796 case OTG_STATE_B_PERIPHERAL:
797 case OTG_STATE_B_IDLE:
798 musb_g_disconnect(musb);
799 break;
800#endif /* GADGET */
801 default:
802 WARNING("unhandled DISCONNECT transition (%s)\n",
803 otg_state_string(musb));
804 break;
805 }
806
807 schedule_work(&musb->irq_work);
808 }
809
810 if (int_usb & MUSB_INTR_SUSPEND) {
811 DBG(1, "SUSPEND (%s) devctl %02x power %02x\n",
812 otg_state_string(musb), devctl, power);
813 handled = IRQ_HANDLED;
814
815 switch (musb->xceiv->state) {
816#ifdef CONFIG_USB_MUSB_OTG
817 case OTG_STATE_A_PERIPHERAL:
818 /* We also come here if the cable is removed, since
819 * this silicon doesn't report ID-no-longer-grounded.
820 *
821 * We depend on T(a_wait_bcon) to shut us down, and
822 * hope users don't do anything dicey during this
823 * undesired detour through A_WAIT_BCON.
824 */
825 musb_hnp_stop(musb);
826 usb_hcd_resume_root_hub(musb_to_hcd(musb));
827 musb_root_disconnect(musb);
828 musb_platform_try_idle(musb, jiffies
829 + msecs_to_jiffies(musb->a_wait_bcon
830 ? : OTG_TIME_A_WAIT_BCON));
831 break;
832#endif
833 case OTG_STATE_B_PERIPHERAL:
834 musb_g_suspend(musb);
835 musb->is_active = is_otg_enabled(musb)
836 && musb->xceiv->gadget->b_hnp_enable;
837 if (musb->is_active) {
838#ifdef CONFIG_USB_MUSB_OTG
839 musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
840 DBG(1, "HNP: Setting timer for b_ase0_brst\n");
841 mod_timer(&musb->otg_timer, jiffies
842 + msecs_to_jiffies(
843 OTG_TIME_B_ASE0_BRST));
844#endif
845 }
846 break;
847 case OTG_STATE_A_WAIT_BCON:
848 if (musb->a_wait_bcon != 0)
849 musb_platform_try_idle(musb, jiffies
850 + msecs_to_jiffies(musb->a_wait_bcon));
851 break;
852 case OTG_STATE_A_HOST:
853 musb->xceiv->state = OTG_STATE_A_SUSPEND;
854 musb->is_active = is_otg_enabled(musb)
855 && musb->xceiv->host->b_hnp_enable;
856 break;
857 case OTG_STATE_B_HOST:
858 /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
859 DBG(1, "REVISIT: SUSPEND as B_HOST\n");
860 break;
861 default:
862 /* "should not happen" */
863 musb->is_active = 0;
864 break;
865 }
866 schedule_work(&musb->irq_work);
867 }
868
869 851
870 return handled; 852 return handled;
871} 853}
@@ -1095,6 +1077,36 @@ static struct fifo_cfg __initdata mode_4_cfg[] = {
1095{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, }, 1077{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
1096}; 1078};
1097 1079
1080/* mode 5 - fits in 8KB */
1081static struct fifo_cfg __initdata mode_5_cfg[] = {
1082{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1083{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1084{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1085{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1086{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
1087{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
1088{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
1089{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
1090{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
1091{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
1092{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 32, },
1093{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 32, },
1094{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 32, },
1095{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 32, },
1096{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 32, },
1097{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 32, },
1098{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 32, },
1099{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 32, },
1100{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 32, },
1101{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 32, },
1102{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 32, },
1103{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 32, },
1104{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 32, },
1105{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 32, },
1106{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, },
1107{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
1108{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
1109};
1098 1110
1099/* 1111/*
1100 * configure a fifo; for non-shared endpoints, this may be called 1112 * configure a fifo; for non-shared endpoints, this may be called
@@ -1210,6 +1222,10 @@ static int __init ep_config_from_table(struct musb *musb)
1210 cfg = mode_4_cfg; 1222 cfg = mode_4_cfg;
1211 n = ARRAY_SIZE(mode_4_cfg); 1223 n = ARRAY_SIZE(mode_4_cfg);
1212 break; 1224 break;
1225 case 5:
1226 cfg = mode_5_cfg;
1227 n = ARRAY_SIZE(mode_5_cfg);
1228 break;
1213 } 1229 }
1214 1230
1215 printk(KERN_DEBUG "%s: setup fifo_mode %d\n", 1231 printk(KERN_DEBUG "%s: setup fifo_mode %d\n",
@@ -1314,9 +1330,6 @@ enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
1314 */ 1330 */
1315static int __init musb_core_init(u16 musb_type, struct musb *musb) 1331static int __init musb_core_init(u16 musb_type, struct musb *musb)
1316{ 1332{
1317#ifdef MUSB_AHB_ID
1318 u32 data;
1319#endif
1320 u8 reg; 1333 u8 reg;
1321 char *type; 1334 char *type;
1322 char aInfo[90], aRevision[32], aDate[12]; 1335 char aInfo[90], aRevision[32], aDate[12];
@@ -1328,23 +1341,17 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
1328 reg = musb_read_configdata(mbase); 1341 reg = musb_read_configdata(mbase);
1329 1342
1330 strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); 1343 strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
1331 if (reg & MUSB_CONFIGDATA_DYNFIFO) 1344 if (reg & MUSB_CONFIGDATA_DYNFIFO) {
1332 strcat(aInfo, ", dyn FIFOs"); 1345 strcat(aInfo, ", dyn FIFOs");
1346 musb->dyn_fifo = true;
1347 }
1333 if (reg & MUSB_CONFIGDATA_MPRXE) { 1348 if (reg & MUSB_CONFIGDATA_MPRXE) {
1334 strcat(aInfo, ", bulk combine"); 1349 strcat(aInfo, ", bulk combine");
1335#ifdef C_MP_RX
1336 musb->bulk_combine = true; 1350 musb->bulk_combine = true;
1337#else
1338 strcat(aInfo, " (X)"); /* no driver support */
1339#endif
1340 } 1351 }
1341 if (reg & MUSB_CONFIGDATA_MPTXE) { 1352 if (reg & MUSB_CONFIGDATA_MPTXE) {
1342 strcat(aInfo, ", bulk split"); 1353 strcat(aInfo, ", bulk split");
1343#ifdef C_MP_TX
1344 musb->bulk_split = true; 1354 musb->bulk_split = true;
1345#else
1346 strcat(aInfo, " (X)"); /* no driver support */
1347#endif
1348 } 1355 }
1349 if (reg & MUSB_CONFIGDATA_HBRXE) { 1356 if (reg & MUSB_CONFIGDATA_HBRXE) {
1350 strcat(aInfo, ", HB-ISO Rx"); 1357 strcat(aInfo, ", HB-ISO Rx");
@@ -1360,20 +1367,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
1360 printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n", 1367 printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n",
1361 musb_driver_name, reg, aInfo); 1368 musb_driver_name, reg, aInfo);
1362 1369
1363#ifdef MUSB_AHB_ID
1364 data = musb_readl(mbase, 0x404);
1365 sprintf(aDate, "%04d-%02x-%02x", (data & 0xffff),
1366 (data >> 16) & 0xff, (data >> 24) & 0xff);
1367 /* FIXME ID2 and ID3 are unused */
1368 data = musb_readl(mbase, 0x408);
1369 printk(KERN_DEBUG "ID2=%lx\n", (long unsigned)data);
1370 data = musb_readl(mbase, 0x40c);
1371 printk(KERN_DEBUG "ID3=%lx\n", (long unsigned)data);
1372 reg = musb_readb(mbase, 0x400);
1373 musb_type = ('M' == reg) ? MUSB_CONTROLLER_MHDRC : MUSB_CONTROLLER_HDRC;
1374#else
1375 aDate[0] = 0; 1370 aDate[0] = 0;
1376#endif
1377 if (MUSB_CONTROLLER_MHDRC == musb_type) { 1371 if (MUSB_CONTROLLER_MHDRC == musb_type) {
1378 musb->is_multipoint = 1; 1372 musb->is_multipoint = 1;
1379 type = "M"; 1373 type = "M";
@@ -1404,21 +1398,10 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
1404 musb->nr_endpoints = 1; 1398 musb->nr_endpoints = 1;
1405 musb->epmask = 1; 1399 musb->epmask = 1;
1406 1400
1407 if (reg & MUSB_CONFIGDATA_DYNFIFO) { 1401 if (musb->dyn_fifo)
1408 if (musb->config->dyn_fifo) 1402 status = ep_config_from_table(musb);
1409 status = ep_config_from_table(musb); 1403 else
1410 else { 1404 status = ep_config_from_hw(musb);
1411 ERR("reconfigure software for Dynamic FIFOs\n");
1412 status = -ENODEV;
1413 }
1414 } else {
1415 if (!musb->config->dyn_fifo)
1416 status = ep_config_from_hw(musb);
1417 else {
1418 ERR("reconfigure software for static FIFOs\n");
1419 return -ENODEV;
1420 }
1421 }
1422 1405
1423 if (status < 0) 1406 if (status < 0)
1424 return status; 1407 return status;
@@ -1587,11 +1570,6 @@ irqreturn_t musb_interrupt(struct musb *musb)
1587 ep_num++; 1570 ep_num++;
1588 } 1571 }
1589 1572
1590 /* finish handling "global" interrupts after handling fifos */
1591 if (musb->int_usb)
1592 retval |= musb_stage2_irq(musb,
1593 musb->int_usb, devctl, power);
1594
1595 return retval; 1573 return retval;
1596} 1574}
1597 1575
@@ -1696,7 +1674,7 @@ musb_vbus_store(struct device *dev, struct device_attribute *attr,
1696 unsigned long val; 1674 unsigned long val;
1697 1675
1698 if (sscanf(buf, "%lu", &val) < 1) { 1676 if (sscanf(buf, "%lu", &val) < 1) {
1699 printk(KERN_ERR "Invalid VBUS timeout ms value\n"); 1677 dev_err(dev, "Invalid VBUS timeout ms value\n");
1700 return -EINVAL; 1678 return -EINVAL;
1701 } 1679 }
1702 1680
@@ -1746,7 +1724,7 @@ musb_srp_store(struct device *dev, struct device_attribute *attr,
1746 1724
1747 if (sscanf(buf, "%hu", &srp) != 1 1725 if (sscanf(buf, "%hu", &srp) != 1
1748 || (srp != 1)) { 1726 || (srp != 1)) {
1749 printk(KERN_ERR "SRP: Value must be 1\n"); 1727 dev_err(dev, "SRP: Value must be 1\n");
1750 return -EINVAL; 1728 return -EINVAL;
1751 } 1729 }
1752 1730
@@ -1759,6 +1737,19 @@ static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
1759 1737
1760#endif /* CONFIG_USB_GADGET_MUSB_HDRC */ 1738#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
1761 1739
1740static struct attribute *musb_attributes[] = {
1741 &dev_attr_mode.attr,
1742 &dev_attr_vbus.attr,
1743#ifdef CONFIG_USB_GADGET_MUSB_HDRC
1744 &dev_attr_srp.attr,
1745#endif
1746 NULL
1747};
1748
1749static const struct attribute_group musb_attr_group = {
1750 .attrs = musb_attributes,
1751};
1752
1762#endif /* sysfs */ 1753#endif /* sysfs */
1763 1754
1764/* Only used to provide driver mode change events */ 1755/* Only used to provide driver mode change events */
@@ -1833,11 +1824,7 @@ static void musb_free(struct musb *musb)
1833 */ 1824 */
1834 1825
1835#ifdef CONFIG_SYSFS 1826#ifdef CONFIG_SYSFS
1836 device_remove_file(musb->controller, &dev_attr_mode); 1827 sysfs_remove_group(&musb->controller->kobj, &musb_attr_group);
1837 device_remove_file(musb->controller, &dev_attr_vbus);
1838#ifdef CONFIG_USB_GADGET_MUSB_HDRC
1839 device_remove_file(musb->controller, &dev_attr_srp);
1840#endif
1841#endif 1828#endif
1842 1829
1843#ifdef CONFIG_USB_GADGET_MUSB_HDRC 1830#ifdef CONFIG_USB_GADGET_MUSB_HDRC
@@ -2017,22 +2004,10 @@ bad_config:
2017 musb->irq_wake = 0; 2004 musb->irq_wake = 0;
2018 } 2005 }
2019 2006
2020 pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n",
2021 musb_driver_name,
2022 ({char *s;
2023 switch (musb->board_mode) {
2024 case MUSB_HOST: s = "Host"; break;
2025 case MUSB_PERIPHERAL: s = "Peripheral"; break;
2026 default: s = "OTG"; break;
2027 }; s; }),
2028 ctrl,
2029 (is_dma_capable() && musb->dma_controller)
2030 ? "DMA" : "PIO",
2031 musb->nIrq);
2032
2033 /* host side needs more setup */ 2007 /* host side needs more setup */
2034 if (is_host_enabled(musb)) { 2008 if (is_host_enabled(musb)) {
2035 struct usb_hcd *hcd = musb_to_hcd(musb); 2009 struct usb_hcd *hcd = musb_to_hcd(musb);
2010 u8 busctl;
2036 2011
2037 otg_set_host(musb->xceiv, &hcd->self); 2012 otg_set_host(musb->xceiv, &hcd->self);
2038 2013
@@ -2040,6 +2015,13 @@ bad_config:
2040 hcd->self.otg_port = 1; 2015 hcd->self.otg_port = 1;
2041 musb->xceiv->host = &hcd->self; 2016 musb->xceiv->host = &hcd->self;
2042 hcd->power_budget = 2 * (plat->power ? : 250); 2017 hcd->power_budget = 2 * (plat->power ? : 250);
2018
2019 /* program PHY to use external vBus if required */
2020 if (plat->extvbus) {
2021 busctl = musb_readb(musb->mregs, MUSB_ULPI_BUSCONTROL);
2022 busctl |= MUSB_ULPI_USE_EXTVBUS;
2023 musb_writeb(musb->mregs, MUSB_ULPI_BUSCONTROL, busctl);
2024 }
2043 } 2025 }
2044 2026
2045 /* For the host-only role, we can activate right away. 2027 /* For the host-only role, we can activate right away.
@@ -2079,26 +2061,26 @@ bad_config:
2079 } 2061 }
2080 2062
2081#ifdef CONFIG_SYSFS 2063#ifdef CONFIG_SYSFS
2082 status = device_create_file(dev, &dev_attr_mode); 2064 status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group);
2083 status = device_create_file(dev, &dev_attr_vbus);
2084#ifdef CONFIG_USB_GADGET_MUSB_HDRC
2085 status = device_create_file(dev, &dev_attr_srp);
2086#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
2087 status = 0;
2088#endif 2065#endif
2089 if (status) 2066 if (status)
2090 goto fail2; 2067 goto fail2;
2091 2068
2069 dev_info(dev, "USB %s mode controller at %p using %s, IRQ %d\n",
2070 ({char *s;
2071 switch (musb->board_mode) {
2072 case MUSB_HOST: s = "Host"; break;
2073 case MUSB_PERIPHERAL: s = "Peripheral"; break;
2074 default: s = "OTG"; break;
2075 }; s; }),
2076 ctrl,
2077 (is_dma_capable() && musb->dma_controller)
2078 ? "DMA" : "PIO",
2079 musb->nIrq);
2080
2092 return 0; 2081 return 0;
2093 2082
2094fail2: 2083fail2:
2095#ifdef CONFIG_SYSFS
2096 device_remove_file(musb->controller, &dev_attr_mode);
2097 device_remove_file(musb->controller, &dev_attr_vbus);
2098#ifdef CONFIG_USB_GADGET_MUSB_HDRC
2099 device_remove_file(musb->controller, &dev_attr_srp);
2100#endif
2101#endif
2102 musb_platform_exit(musb); 2084 musb_platform_exit(musb);
2103fail: 2085fail:
2104 dev_err(musb->controller, 2086 dev_err(musb->controller,
@@ -2127,6 +2109,7 @@ static int __init musb_probe(struct platform_device *pdev)
2127{ 2109{
2128 struct device *dev = &pdev->dev; 2110 struct device *dev = &pdev->dev;
2129 int irq = platform_get_irq(pdev, 0); 2111 int irq = platform_get_irq(pdev, 0);
2112 int status;
2130 struct resource *iomem; 2113 struct resource *iomem;
2131 void __iomem *base; 2114 void __iomem *base;
2132 2115
@@ -2134,7 +2117,7 @@ static int __init musb_probe(struct platform_device *pdev)
2134 if (!iomem || irq == 0) 2117 if (!iomem || irq == 0)
2135 return -ENODEV; 2118 return -ENODEV;
2136 2119
2137 base = ioremap(iomem->start, iomem->end - iomem->start + 1); 2120 base = ioremap(iomem->start, resource_size(iomem));
2138 if (!base) { 2121 if (!base) {
2139 dev_err(dev, "ioremap failed\n"); 2122 dev_err(dev, "ioremap failed\n");
2140 return -ENOMEM; 2123 return -ENOMEM;
@@ -2144,7 +2127,12 @@ static int __init musb_probe(struct platform_device *pdev)
2144 /* clobbered by use_dma=n */ 2127 /* clobbered by use_dma=n */
2145 orig_dma_mask = dev->dma_mask; 2128 orig_dma_mask = dev->dma_mask;
2146#endif 2129#endif
2147 return musb_init_controller(dev, irq, base); 2130
2131 status = musb_init_controller(dev, irq, base);
2132 if (status < 0)
2133 iounmap(base);
2134
2135 return status;
2148} 2136}
2149 2137
2150static int __exit musb_remove(struct platform_device *pdev) 2138static int __exit musb_remove(struct platform_device *pdev)
@@ -2173,6 +2161,148 @@ static int __exit musb_remove(struct platform_device *pdev)
2173 2161
2174#ifdef CONFIG_PM 2162#ifdef CONFIG_PM
2175 2163
2164static struct musb_context_registers musb_context;
2165
2166void musb_save_context(struct musb *musb)
2167{
2168 int i;
2169 void __iomem *musb_base = musb->mregs;
2170
2171 if (is_host_enabled(musb)) {
2172 musb_context.frame = musb_readw(musb_base, MUSB_FRAME);
2173 musb_context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
2174 }
2175 musb_context.power = musb_readb(musb_base, MUSB_POWER);
2176 musb_context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE);
2177 musb_context.intrrxe = musb_readw(musb_base, MUSB_INTRRXE);
2178 musb_context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
2179 musb_context.index = musb_readb(musb_base, MUSB_INDEX);
2180 musb_context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
2181
2182 for (i = 0; i < MUSB_C_NUM_EPS; ++i) {
2183 musb_writeb(musb_base, MUSB_INDEX, i);
2184 musb_context.index_regs[i].txmaxp =
2185 musb_readw(musb_base, 0x10 + MUSB_TXMAXP);
2186 musb_context.index_regs[i].txcsr =
2187 musb_readw(musb_base, 0x10 + MUSB_TXCSR);
2188 musb_context.index_regs[i].rxmaxp =
2189 musb_readw(musb_base, 0x10 + MUSB_RXMAXP);
2190 musb_context.index_regs[i].rxcsr =
2191 musb_readw(musb_base, 0x10 + MUSB_RXCSR);
2192
2193 if (musb->dyn_fifo) {
2194 musb_context.index_regs[i].txfifoadd =
2195 musb_read_txfifoadd(musb_base);
2196 musb_context.index_regs[i].rxfifoadd =
2197 musb_read_rxfifoadd(musb_base);
2198 musb_context.index_regs[i].txfifosz =
2199 musb_read_txfifosz(musb_base);
2200 musb_context.index_regs[i].rxfifosz =
2201 musb_read_rxfifosz(musb_base);
2202 }
2203 if (is_host_enabled(musb)) {
2204 musb_context.index_regs[i].txtype =
2205 musb_readb(musb_base, 0x10 + MUSB_TXTYPE);
2206 musb_context.index_regs[i].txinterval =
2207 musb_readb(musb_base, 0x10 + MUSB_TXINTERVAL);
2208 musb_context.index_regs[i].rxtype =
2209 musb_readb(musb_base, 0x10 + MUSB_RXTYPE);
2210 musb_context.index_regs[i].rxinterval =
2211 musb_readb(musb_base, 0x10 + MUSB_RXINTERVAL);
2212
2213 musb_context.index_regs[i].txfunaddr =
2214 musb_read_txfunaddr(musb_base, i);
2215 musb_context.index_regs[i].txhubaddr =
2216 musb_read_txhubaddr(musb_base, i);
2217 musb_context.index_regs[i].txhubport =
2218 musb_read_txhubport(musb_base, i);
2219
2220 musb_context.index_regs[i].rxfunaddr =
2221 musb_read_rxfunaddr(musb_base, i);
2222 musb_context.index_regs[i].rxhubaddr =
2223 musb_read_rxhubaddr(musb_base, i);
2224 musb_context.index_regs[i].rxhubport =
2225 musb_read_rxhubport(musb_base, i);
2226 }
2227 }
2228
2229 musb_writeb(musb_base, MUSB_INDEX, musb_context.index);
2230
2231 musb_platform_save_context(musb, &musb_context);
2232}
2233
2234void musb_restore_context(struct musb *musb)
2235{
2236 int i;
2237 void __iomem *musb_base = musb->mregs;
2238 void __iomem *ep_target_regs;
2239
2240 musb_platform_restore_context(musb, &musb_context);
2241
2242 if (is_host_enabled(musb)) {
2243 musb_writew(musb_base, MUSB_FRAME, musb_context.frame);
2244 musb_writeb(musb_base, MUSB_TESTMODE, musb_context.testmode);
2245 }
2246 musb_writeb(musb_base, MUSB_POWER, musb_context.power);
2247 musb_writew(musb_base, MUSB_INTRTXE, musb_context.intrtxe);
2248 musb_writew(musb_base, MUSB_INTRRXE, musb_context.intrrxe);
2249 musb_writeb(musb_base, MUSB_INTRUSBE, musb_context.intrusbe);
2250 musb_writeb(musb_base, MUSB_DEVCTL, musb_context.devctl);
2251
2252 for (i = 0; i < MUSB_C_NUM_EPS; ++i) {
2253 musb_writeb(musb_base, MUSB_INDEX, i);
2254 musb_writew(musb_base, 0x10 + MUSB_TXMAXP,
2255 musb_context.index_regs[i].txmaxp);
2256 musb_writew(musb_base, 0x10 + MUSB_TXCSR,
2257 musb_context.index_regs[i].txcsr);
2258 musb_writew(musb_base, 0x10 + MUSB_RXMAXP,
2259 musb_context.index_regs[i].rxmaxp);
2260 musb_writew(musb_base, 0x10 + MUSB_RXCSR,
2261 musb_context.index_regs[i].rxcsr);
2262
2263 if (musb->dyn_fifo) {
2264 musb_write_txfifosz(musb_base,
2265 musb_context.index_regs[i].txfifosz);
2266 musb_write_rxfifosz(musb_base,
2267 musb_context.index_regs[i].rxfifosz);
2268 musb_write_txfifoadd(musb_base,
2269 musb_context.index_regs[i].txfifoadd);
2270 musb_write_rxfifoadd(musb_base,
2271 musb_context.index_regs[i].rxfifoadd);
2272 }
2273
2274 if (is_host_enabled(musb)) {
2275 musb_writeb(musb_base, 0x10 + MUSB_TXTYPE,
2276 musb_context.index_regs[i].txtype);
2277 musb_writeb(musb_base, 0x10 + MUSB_TXINTERVAL,
2278 musb_context.index_regs[i].txinterval);
2279 musb_writeb(musb_base, 0x10 + MUSB_RXTYPE,
2280 musb_context.index_regs[i].rxtype);
2281 musb_writeb(musb_base, 0x10 + MUSB_RXINTERVAL,
2282
2283 musb_context.index_regs[i].rxinterval);
2284 musb_write_txfunaddr(musb_base, i,
2285 musb_context.index_regs[i].txfunaddr);
2286 musb_write_txhubaddr(musb_base, i,
2287 musb_context.index_regs[i].txhubaddr);
2288 musb_write_txhubport(musb_base, i,
2289 musb_context.index_regs[i].txhubport);
2290
2291 ep_target_regs =
2292 musb_read_target_reg_base(i, musb_base);
2293
2294 musb_write_rxfunaddr(ep_target_regs,
2295 musb_context.index_regs[i].rxfunaddr);
2296 musb_write_rxhubaddr(ep_target_regs,
2297 musb_context.index_regs[i].rxhubaddr);
2298 musb_write_rxhubport(ep_target_regs,
2299 musb_context.index_regs[i].rxhubport);
2300 }
2301 }
2302
2303 musb_writeb(musb_base, MUSB_INDEX, musb_context.index);
2304}
2305
2176static int musb_suspend(struct device *dev) 2306static int musb_suspend(struct device *dev)
2177{ 2307{
2178 struct platform_device *pdev = to_platform_device(dev); 2308 struct platform_device *pdev = to_platform_device(dev);
@@ -2194,6 +2324,8 @@ static int musb_suspend(struct device *dev)
2194 */ 2324 */
2195 } 2325 }
2196 2326
2327 musb_save_context(musb);
2328
2197 if (musb->set_clock) 2329 if (musb->set_clock)
2198 musb->set_clock(musb->clock, 0); 2330 musb->set_clock(musb->clock, 0);
2199 else 2331 else
@@ -2215,6 +2347,8 @@ static int musb_resume_noirq(struct device *dev)
2215 else 2347 else
2216 clk_enable(musb->clock); 2348 clk_enable(musb->clock);
2217 2349
2350 musb_restore_context(musb);
2351
2218 /* for static cmos like DaVinci, register values were preserved 2352 /* for static cmos like DaVinci, register values were preserved
2219 * unless for some reason the whole soc powered down or the USB 2353 * unless for some reason the whole soc powered down or the USB
2220 * module got reset through the PSC (vs just being disabled). 2354 * module got reset through the PSC (vs just being disabled).
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 5514c7ee85bd..d849fb81c131 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -52,6 +52,15 @@ struct musb;
52struct musb_hw_ep; 52struct musb_hw_ep;
53struct musb_ep; 53struct musb_ep;
54 54
55/* Helper defines for struct musb->hwvers */
56#define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f)
57#define MUSB_HWVERS_MINOR(x) (x & 0x3ff)
58#define MUSB_HWVERS_RC 0x8000
59#define MUSB_HWVERS_1300 0x52C
60#define MUSB_HWVERS_1400 0x590
61#define MUSB_HWVERS_1800 0x720
62#define MUSB_HWVERS_1900 0x784
63#define MUSB_HWVERS_2000 0x800
55 64
56#include "musb_debug.h" 65#include "musb_debug.h"
57#include "musb_dma.h" 66#include "musb_dma.h"
@@ -322,13 +331,6 @@ struct musb {
322 struct clk *clock; 331 struct clk *clock;
323 irqreturn_t (*isr)(int, void *); 332 irqreturn_t (*isr)(int, void *);
324 struct work_struct irq_work; 333 struct work_struct irq_work;
325#define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f)
326#define MUSB_HWVERS_MINOR(x) (x & 0x3ff)
327#define MUSB_HWVERS_RC 0x8000
328#define MUSB_HWVERS_1300 0x52C
329#define MUSB_HWVERS_1400 0x590
330#define MUSB_HWVERS_1800 0x720
331#define MUSB_HWVERS_2000 0x800
332 u16 hwvers; 334 u16 hwvers;
333 335
334/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */ 336/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
@@ -411,22 +413,15 @@ struct musb {
411 413
412 unsigned hb_iso_rx:1; /* high bandwidth iso rx? */ 414 unsigned hb_iso_rx:1; /* high bandwidth iso rx? */
413 unsigned hb_iso_tx:1; /* high bandwidth iso tx? */ 415 unsigned hb_iso_tx:1; /* high bandwidth iso tx? */
416 unsigned dyn_fifo:1; /* dynamic FIFO supported? */
414 417
415#ifdef C_MP_TX 418 unsigned bulk_split:1;
416 unsigned bulk_split:1;
417#define can_bulk_split(musb,type) \ 419#define can_bulk_split(musb,type) \
418 (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split) 420 (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
419#else
420#define can_bulk_split(musb, type) 0
421#endif
422 421
423#ifdef C_MP_RX 422 unsigned bulk_combine:1;
424 unsigned bulk_combine:1;
425#define can_bulk_combine(musb,type) \ 423#define can_bulk_combine(musb,type) \
426 (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine) 424 (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
427#else
428#define can_bulk_combine(musb, type) 0
429#endif
430 425
431#ifdef CONFIG_USB_GADGET_MUSB_HDRC 426#ifdef CONFIG_USB_GADGET_MUSB_HDRC
432 /* is_suspended means USB B_PERIPHERAL suspend */ 427 /* is_suspended means USB B_PERIPHERAL suspend */
@@ -461,6 +456,45 @@ struct musb {
461#endif 456#endif
462}; 457};
463 458
459#ifdef CONFIG_PM
460struct musb_csr_regs {
461 /* FIFO registers */
462 u16 txmaxp, txcsr, rxmaxp, rxcsr;
463 u16 rxfifoadd, txfifoadd;
464 u8 txtype, txinterval, rxtype, rxinterval;
465 u8 rxfifosz, txfifosz;
466 u8 txfunaddr, txhubaddr, txhubport;
467 u8 rxfunaddr, rxhubaddr, rxhubport;
468};
469
470struct musb_context_registers {
471
472#if defined(CONFIG_ARCH_OMAP34XX) || defined(CONFIG_ARCH_OMAP2430)
473 u32 otg_sysconfig, otg_forcestandby;
474#endif
475 u8 power;
476 u16 intrtxe, intrrxe;
477 u8 intrusbe;
478 u16 frame;
479 u8 index, testmode;
480
481 u8 devctl, misc;
482
483 struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
484};
485
486#if defined(CONFIG_ARCH_OMAP34XX) || defined(CONFIG_ARCH_OMAP2430)
487extern void musb_platform_save_context(struct musb *musb,
488 struct musb_context_registers *musb_context);
489extern void musb_platform_restore_context(struct musb *musb,
490 struct musb_context_registers *musb_context);
491#else
492#define musb_platform_save_context(m, x) do {} while (0)
493#define musb_platform_restore_context(m, x) do {} while (0)
494#endif
495
496#endif
497
464static inline void musb_set_vbus(struct musb *musb, int is_on) 498static inline void musb_set_vbus(struct musb *musb, int is_on)
465{ 499{
466 musb->board_set_vbus(musb, is_on); 500 musb->board_set_vbus(musb, is_on);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index cbcf14a236e6..a9f288cd70ed 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -895,7 +895,14 @@ static int musb_gadget_enable(struct usb_ep *ep,
895 /* REVISIT if can_bulk_split(), use by updating "tmp"; 895 /* REVISIT if can_bulk_split(), use by updating "tmp";
896 * likewise high bandwidth periodic tx 896 * likewise high bandwidth periodic tx
897 */ 897 */
898 musb_writew(regs, MUSB_TXMAXP, tmp); 898 /* Set TXMAXP with the FIFO size of the endpoint
899 * to disable double buffering mode. Currently, It seems that double
900 * buffering has problem if musb RTL revision number < 2.0.
901 */
902 if (musb->hwvers < MUSB_HWVERS_2000)
903 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
904 else
905 musb_writew(regs, MUSB_TXMAXP, tmp);
899 906
900 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; 907 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
901 if (musb_readw(regs, MUSB_TXCSR) 908 if (musb_readw(regs, MUSB_TXCSR)
@@ -925,7 +932,13 @@ static int musb_gadget_enable(struct usb_ep *ep,
925 /* REVISIT if can_bulk_combine() use by updating "tmp" 932 /* REVISIT if can_bulk_combine() use by updating "tmp"
926 * likewise high bandwidth periodic rx 933 * likewise high bandwidth periodic rx
927 */ 934 */
928 musb_writew(regs, MUSB_RXMAXP, tmp); 935 /* Set RXMAXP with the FIFO size of the endpoint
936 * to disable double buffering mode.
937 */
938 if (musb->hwvers < MUSB_HWVERS_2000)
939 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx);
940 else
941 musb_writew(regs, MUSB_RXMAXP, tmp);
929 942
930 /* force shared fifo to OUT-only mode */ 943 /* force shared fifo to OUT-only mode */
931 if (hw_ep->is_shared_fifo) { 944 if (hw_ep->is_shared_fifo) {
@@ -1697,8 +1710,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1697 return -EINVAL; 1710 return -EINVAL;
1698 1711
1699 /* driver must be initialized to support peripheral mode */ 1712 /* driver must be initialized to support peripheral mode */
1700 if (!musb || !(musb->board_mode == MUSB_OTG 1713 if (!musb) {
1701 || musb->board_mode != MUSB_OTG)) {
1702 DBG(1, "%s, no dev??\n", __func__); 1714 DBG(1, "%s, no dev??\n", __func__);
1703 return -ENODEV; 1715 return -ENODEV;
1704 } 1716 }
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 74c4c3698f1e..3421cf9858b5 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -605,8 +605,14 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
605 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); 605 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
606 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); 606 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
607 /* NOTE: bulk combining rewrites high bits of maxpacket */ 607 /* NOTE: bulk combining rewrites high bits of maxpacket */
608 musb_writew(ep->regs, MUSB_RXMAXP, 608 /* Set RXMAXP with the FIFO size of the endpoint
609 qh->maxpacket | ((qh->hb_mult - 1) << 11)); 609 * to disable double buffer mode.
610 */
611 if (musb->hwvers < MUSB_HWVERS_2000)
612 musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
613 else
614 musb_writew(ep->regs, MUSB_RXMAXP,
615 qh->maxpacket | ((qh->hb_mult - 1) << 11));
610 616
611 ep->rx_reinit = 0; 617 ep->rx_reinit = 0;
612} 618}
@@ -1771,6 +1777,9 @@ static int musb_schedule(
1771 int best_end, epnum; 1777 int best_end, epnum;
1772 struct musb_hw_ep *hw_ep = NULL; 1778 struct musb_hw_ep *hw_ep = NULL;
1773 struct list_head *head = NULL; 1779 struct list_head *head = NULL;
1780 u8 toggle;
1781 u8 txtype;
1782 struct urb *urb = next_urb(qh);
1774 1783
1775 /* use fixed hardware for control and bulk */ 1784 /* use fixed hardware for control and bulk */
1776 if (qh->type == USB_ENDPOINT_XFER_CONTROL) { 1785 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
@@ -1809,6 +1818,27 @@ static int musb_schedule(
1809 diff -= (qh->maxpacket * qh->hb_mult); 1818 diff -= (qh->maxpacket * qh->hb_mult);
1810 1819
1811 if (diff >= 0 && best_diff > diff) { 1820 if (diff >= 0 && best_diff > diff) {
1821
1822 /*
1823 * Mentor controller has a bug in that if we schedule
1824 * a BULK Tx transfer on an endpoint that had earlier
1825 * handled ISOC then the BULK transfer has to start on
1826 * a zero toggle. If the BULK transfer starts on a 1
1827 * toggle then this transfer will fail as the mentor
1828 * controller starts the Bulk transfer on a 0 toggle
1829 * irrespective of the programming of the toggle bits
1830 * in the TXCSR register. Check for this condition
1831 * while allocating the EP for a Tx Bulk transfer. If
1832 * so skip this EP.
1833 */
1834 hw_ep = musb->endpoints + epnum;
1835 toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
1836 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
1837 >> 4) & 0x3;
1838 if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
1839 toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
1840 continue;
1841
1812 best_diff = diff; 1842 best_diff = diff;
1813 best_end = epnum; 1843 best_end = epnum;
1814 } 1844 }
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 473a94ef905f..292894a2c247 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -72,6 +72,10 @@
72#define MUSB_DEVCTL_HR 0x02 72#define MUSB_DEVCTL_HR 0x02
73#define MUSB_DEVCTL_SESSION 0x01 73#define MUSB_DEVCTL_SESSION 0x01
74 74
75/* MUSB ULPI VBUSCONTROL */
76#define MUSB_ULPI_USE_EXTVBUS 0x01
77#define MUSB_ULPI_USE_EXTVBUSIND 0x02
78
75/* TESTMODE */ 79/* TESTMODE */
76#define MUSB_TEST_FORCE_HOST 0x80 80#define MUSB_TEST_FORCE_HOST 0x80
77#define MUSB_TEST_FIFO_ACCESS 0x40 81#define MUSB_TEST_FIFO_ACCESS 0x40
@@ -246,6 +250,7 @@
246 250
247/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */ 251/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
248#define MUSB_HWVERS 0x6C /* 8 bit */ 252#define MUSB_HWVERS 0x6C /* 8 bit */
253#define MUSB_ULPI_BUSCONTROL 0x70 /* 8 bit */
249 254
250#define MUSB_EPINFO 0x78 /* 8 bit */ 255#define MUSB_EPINFO 0x78 /* 8 bit */
251#define MUSB_RAMINFO 0x79 /* 8 bit */ 256#define MUSB_RAMINFO 0x79 /* 8 bit */
@@ -321,6 +326,26 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
321 musb_writew(mbase, MUSB_RXFIFOADD, c_off); 326 musb_writew(mbase, MUSB_RXFIFOADD, c_off);
322} 327}
323 328
329static inline u8 musb_read_txfifosz(void __iomem *mbase)
330{
331 return musb_readb(mbase, MUSB_TXFIFOSZ);
332}
333
334static inline u16 musb_read_txfifoadd(void __iomem *mbase)
335{
336 return musb_readw(mbase, MUSB_TXFIFOADD);
337}
338
339static inline u8 musb_read_rxfifosz(void __iomem *mbase)
340{
341 return musb_readb(mbase, MUSB_RXFIFOSZ);
342}
343
344static inline u16 musb_read_rxfifoadd(void __iomem *mbase)
345{
346 return musb_readw(mbase, MUSB_RXFIFOADD);
347}
348
324static inline u8 musb_read_configdata(void __iomem *mbase) 349static inline u8 musb_read_configdata(void __iomem *mbase)
325{ 350{
326 musb_writeb(mbase, MUSB_INDEX, 0); 351 musb_writeb(mbase, MUSB_INDEX, 0);
@@ -376,6 +401,36 @@ static inline void musb_write_txhubport(void __iomem *mbase, u8 epnum,
376 qh_h_port_reg); 401 qh_h_port_reg);
377} 402}
378 403
404static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum)
405{
406 return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXFUNCADDR));
407}
408
409static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum)
410{
411 return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXHUBADDR));
412}
413
414static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum)
415{
416 return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXHUBPORT));
417}
418
419static inline u8 musb_read_txfunaddr(void __iomem *mbase, u8 epnum)
420{
421 return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR));
422}
423
424static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum)
425{
426 return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR));
427}
428
429static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum)
430{
431 return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT));
432}
433
379#else /* CONFIG_BLACKFIN */ 434#else /* CONFIG_BLACKFIN */
380 435
381#define USB_BASE USB_FADDR 436#define USB_BASE USB_FADDR
@@ -455,6 +510,22 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
455{ 510{
456} 511}
457 512
513static inline u8 musb_read_txfifosz(void __iomem *mbase)
514{
515}
516
517static inline u16 musb_read_txfifoadd(void __iomem *mbase)
518{
519}
520
521static inline u8 musb_read_rxfifosz(void __iomem *mbase)
522{
523}
524
525static inline u16 musb_read_rxfifoadd(void __iomem *mbase)
526{
527}
528
458static inline u8 musb_read_configdata(void __iomem *mbase) 529static inline u8 musb_read_configdata(void __iomem *mbase)
459{ 530{
460 return 0; 531 return 0;
@@ -462,7 +533,11 @@ static inline u8 musb_read_configdata(void __iomem *mbase)
462 533
463static inline u16 musb_read_hwvers(void __iomem *mbase) 534static inline u16 musb_read_hwvers(void __iomem *mbase)
464{ 535{
465 return 0; 536 /*
537 * This register is invisible on Blackfin, actually the MUSB
538 * RTL version of Blackfin is 1.9, so just harcode its value.
539 */
540 return MUSB_HWVERS_1900;
466} 541}
467 542
468static inline void __iomem *musb_read_target_reg_base(u8 i, void __iomem *mbase) 543static inline void __iomem *musb_read_target_reg_base(u8 i, void __iomem *mbase)
@@ -500,6 +575,30 @@ static inline void musb_write_txhubport(void __iomem *mbase, u8 epnum,
500{ 575{
501} 576}
502 577
578static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum)
579{
580}
581
582static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum)
583{
584}
585
586static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum)
587{
588}
589
590static inline u8 musb_read_txfunaddr(void __iomem *mbase, u8 epnum)
591{
592}
593
594static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum)
595{
596}
597
598static inline void musb_read_txhubport(void __iomem *mbase, u8 epnum)
599{
600}
601
503#endif /* CONFIG_BLACKFIN */ 602#endif /* CONFIG_BLACKFIN */
504 603
505#endif /* __MUSB_REGS_H__ */ 604#endif /* __MUSB_REGS_H__ */
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index a237550f91bf..2fa7d5c00f31 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -250,20 +250,39 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
250 u8 bchannel; 250 u8 bchannel;
251 u8 int_hsdma; 251 u8 int_hsdma;
252 252
253 u32 addr; 253 u32 addr, count;
254 u16 csr; 254 u16 csr;
255 255
256 spin_lock_irqsave(&musb->lock, flags); 256 spin_lock_irqsave(&musb->lock, flags);
257 257
258 int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR); 258 int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
259 if (!int_hsdma)
260 goto done;
261 259
262#ifdef CONFIG_BLACKFIN 260#ifdef CONFIG_BLACKFIN
263 /* Clear DMA interrupt flags */ 261 /* Clear DMA interrupt flags */
264 musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma); 262 musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma);
265#endif 263#endif
266 264
265 if (!int_hsdma) {
266 DBG(2, "spurious DMA irq\n");
267
268 for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
269 musb_channel = (struct musb_dma_channel *)
270 &(controller->channel[bchannel]);
271 channel = &musb_channel->channel;
272 if (channel->status == MUSB_DMA_STATUS_BUSY) {
273 count = musb_read_hsdma_count(mbase, bchannel);
274
275 if (count == 0)
276 int_hsdma |= (1 << bchannel);
277 }
278 }
279
280 DBG(2, "int_hsdma = 0x%x\n", int_hsdma);
281
282 if (!int_hsdma)
283 goto done;
284 }
285
267 for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) { 286 for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
268 if (int_hsdma & (1 << bchannel)) { 287 if (int_hsdma & (1 << bchannel)) {
269 musb_channel = (struct musb_dma_channel *) 288 musb_channel = (struct musb_dma_channel *)
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index 1299d92dc83f..613f95a058f7 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -55,6 +55,10 @@
55 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS), \ 55 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS), \
56 addr) 56 addr)
57 57
58#define musb_read_hsdma_count(mbase, bchannel) \
59 musb_readl(mbase, \
60 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT))
61
58#define musb_write_hsdma_count(mbase, bchannel, len) \ 62#define musb_write_hsdma_count(mbase, bchannel, len) \
59 musb_writel(mbase, \ 63 musb_writel(mbase, \
60 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT), \ 64 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT), \
@@ -96,6 +100,19 @@ static inline void musb_write_hsdma_addr(void __iomem *mbase,
96 ((u16)(((u32) dma_addr >> 16) & 0xFFFF))); 100 ((u16)(((u32) dma_addr >> 16) & 0xFFFF)));
97} 101}
98 102
103static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel)
104{
105 u32 count = musb_readw(mbase,
106 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH));
107
108 count = count << 16;
109
110 count |= musb_readw(mbase,
111 MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW));
112
113 return count;
114}
115
99static inline void musb_write_hsdma_count(void __iomem *mbase, 116static inline void musb_write_hsdma_count(void __iomem *mbase,
100 u8 bchannel, u32 len) 117 u8 bchannel, u32 len)
101{ 118{
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 83beeac5e7bf..3fe16867b5a8 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -220,7 +220,7 @@ int __init musb_platform_init(struct musb *musb)
220 220
221 musb_platform_resume(musb); 221 musb_platform_resume(musb);
222 222
223 l = omap_readl(OTG_SYSCONFIG); 223 l = musb_readl(musb->mregs, OTG_SYSCONFIG);
224 l &= ~ENABLEWAKEUP; /* disable wakeup */ 224 l &= ~ENABLEWAKEUP; /* disable wakeup */
225 l &= ~NOSTDBY; /* remove possible nostdby */ 225 l &= ~NOSTDBY; /* remove possible nostdby */
226 l |= SMARTSTDBY; /* enable smart standby */ 226 l |= SMARTSTDBY; /* enable smart standby */
@@ -233,17 +233,19 @@ int __init musb_platform_init(struct musb *musb)
233 */ 233 */
234 if (!cpu_is_omap3430()) 234 if (!cpu_is_omap3430())
235 l |= AUTOIDLE; /* enable auto idle */ 235 l |= AUTOIDLE; /* enable auto idle */
236 omap_writel(l, OTG_SYSCONFIG); 236 musb_writel(musb->mregs, OTG_SYSCONFIG, l);
237 237
238 l = omap_readl(OTG_INTERFSEL); 238 l = musb_readl(musb->mregs, OTG_INTERFSEL);
239 l |= ULPI_12PIN; 239 l |= ULPI_12PIN;
240 omap_writel(l, OTG_INTERFSEL); 240 musb_writel(musb->mregs, OTG_INTERFSEL, l);
241 241
242 pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, " 242 pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
243 "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n", 243 "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n",
244 omap_readl(OTG_REVISION), omap_readl(OTG_SYSCONFIG), 244 musb_readl(musb->mregs, OTG_REVISION),
245 omap_readl(OTG_SYSSTATUS), omap_readl(OTG_INTERFSEL), 245 musb_readl(musb->mregs, OTG_SYSCONFIG),
246 omap_readl(OTG_SIMENABLE)); 246 musb_readl(musb->mregs, OTG_SYSSTATUS),
247 musb_readl(musb->mregs, OTG_INTERFSEL),
248 musb_readl(musb->mregs, OTG_SIMENABLE));
247 249
248 omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1); 250 omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1);
249 251
@@ -255,6 +257,22 @@ int __init musb_platform_init(struct musb *musb)
255 return 0; 257 return 0;
256} 258}
257 259
260#ifdef CONFIG_PM
261void musb_platform_save_context(struct musb *musb,
262 struct musb_context_registers *musb_context)
263{
264 musb_context->otg_sysconfig = musb_readl(musb->mregs, OTG_SYSCONFIG);
265 musb_context->otg_forcestandby = musb_readl(musb->mregs, OTG_FORCESTDBY);
266}
267
268void musb_platform_restore_context(struct musb *musb,
269 struct musb_context_registers *musb_context)
270{
271 musb_writel(musb->mregs, OTG_SYSCONFIG, musb_context->otg_sysconfig);
272 musb_writel(musb->mregs, OTG_FORCESTDBY, musb_context->otg_forcestandby);
273}
274#endif
275
258int musb_platform_suspend(struct musb *musb) 276int musb_platform_suspend(struct musb *musb)
259{ 277{
260 u32 l; 278 u32 l;
@@ -263,13 +281,13 @@ int musb_platform_suspend(struct musb *musb)
263 return 0; 281 return 0;
264 282
265 /* in any role */ 283 /* in any role */
266 l = omap_readl(OTG_FORCESTDBY); 284 l = musb_readl(musb->mregs, OTG_FORCESTDBY);
267 l |= ENABLEFORCE; /* enable MSTANDBY */ 285 l |= ENABLEFORCE; /* enable MSTANDBY */
268 omap_writel(l, OTG_FORCESTDBY); 286 musb_writel(musb->mregs, OTG_FORCESTDBY, l);
269 287
270 l = omap_readl(OTG_SYSCONFIG); 288 l = musb_readl(musb->mregs, OTG_SYSCONFIG);
271 l |= ENABLEWAKEUP; /* enable wakeup */ 289 l |= ENABLEWAKEUP; /* enable wakeup */
272 omap_writel(l, OTG_SYSCONFIG); 290 musb_writel(musb->mregs, OTG_SYSCONFIG, l);
273 291
274 otg_set_suspend(musb->xceiv, 1); 292 otg_set_suspend(musb->xceiv, 1);
275 293
@@ -295,13 +313,13 @@ static int musb_platform_resume(struct musb *musb)
295 else 313 else
296 clk_enable(musb->clock); 314 clk_enable(musb->clock);
297 315
298 l = omap_readl(OTG_SYSCONFIG); 316 l = musb_readl(musb->mregs, OTG_SYSCONFIG);
299 l &= ~ENABLEWAKEUP; /* disable wakeup */ 317 l &= ~ENABLEWAKEUP; /* disable wakeup */
300 omap_writel(l, OTG_SYSCONFIG); 318 musb_writel(musb->mregs, OTG_SYSCONFIG, l);
301 319
302 l = omap_readl(OTG_FORCESTDBY); 320 l = musb_readl(musb->mregs, OTG_FORCESTDBY);
303 l &= ~ENABLEFORCE; /* disable MSTANDBY */ 321 l &= ~ENABLEFORCE; /* disable MSTANDBY */
304 omap_writel(l, OTG_FORCESTDBY); 322 musb_writel(musb->mregs, OTG_FORCESTDBY, l);
305 323
306 return 0; 324 return 0;
307} 325}
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
index fbede7798aed..40b3c02ae9f0 100644
--- a/drivers/usb/musb/omap2430.h
+++ b/drivers/usb/musb/omap2430.h
@@ -10,47 +10,43 @@
10#ifndef __MUSB_OMAP243X_H__ 10#ifndef __MUSB_OMAP243X_H__
11#define __MUSB_OMAP243X_H__ 11#define __MUSB_OMAP243X_H__
12 12
13#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
14#include <mach/hardware.h>
15#include <plat/usb.h> 13#include <plat/usb.h>
16 14
17/* 15/*
18 * OMAP2430-specific definitions 16 * OMAP2430-specific definitions
19 */ 17 */
20 18
21#define MENTOR_BASE_OFFSET 0 19#define OTG_REVISION 0x400
22#if defined(CONFIG_ARCH_OMAP2430) 20
23#define OMAP_HSOTG_BASE (OMAP243X_HS_BASE) 21#define OTG_SYSCONFIG 0x404
24#elif defined(CONFIG_ARCH_OMAP3430)
25#define OMAP_HSOTG_BASE (OMAP34XX_HSUSB_OTG_BASE)
26#endif
27#define OMAP_HSOTG(offset) (OMAP_HSOTG_BASE + 0x400 + (offset))
28#define OTG_REVISION OMAP_HSOTG(0x0)
29#define OTG_SYSCONFIG OMAP_HSOTG(0x4)
30# define MIDLEMODE 12 /* bit position */ 22# define MIDLEMODE 12 /* bit position */
31# define FORCESTDBY (0 << MIDLEMODE) 23# define FORCESTDBY (0 << MIDLEMODE)
32# define NOSTDBY (1 << MIDLEMODE) 24# define NOSTDBY (1 << MIDLEMODE)
33# define SMARTSTDBY (2 << MIDLEMODE) 25# define SMARTSTDBY (2 << MIDLEMODE)
26
34# define SIDLEMODE 3 /* bit position */ 27# define SIDLEMODE 3 /* bit position */
35# define FORCEIDLE (0 << SIDLEMODE) 28# define FORCEIDLE (0 << SIDLEMODE)
36# define NOIDLE (1 << SIDLEMODE) 29# define NOIDLE (1 << SIDLEMODE)
37# define SMARTIDLE (2 << SIDLEMODE) 30# define SMARTIDLE (2 << SIDLEMODE)
31
38# define ENABLEWAKEUP (1 << 2) 32# define ENABLEWAKEUP (1 << 2)
39# define SOFTRST (1 << 1) 33# define SOFTRST (1 << 1)
40# define AUTOIDLE (1 << 0) 34# define AUTOIDLE (1 << 0)
41#define OTG_SYSSTATUS OMAP_HSOTG(0x8) 35
36#define OTG_SYSSTATUS 0x408
42# define RESETDONE (1 << 0) 37# define RESETDONE (1 << 0)
43#define OTG_INTERFSEL OMAP_HSOTG(0xc) 38
39#define OTG_INTERFSEL 0x40c
44# define EXTCP (1 << 2) 40# define EXTCP (1 << 2)
45# define PHYSEL 0 /* bit position */ 41# define PHYSEL 0 /* bit position */
46# define UTMI_8BIT (0 << PHYSEL) 42# define UTMI_8BIT (0 << PHYSEL)
47# define ULPI_12PIN (1 << PHYSEL) 43# define ULPI_12PIN (1 << PHYSEL)
48# define ULPI_8PIN (2 << PHYSEL) 44# define ULPI_8PIN (2 << PHYSEL)
49#define OTG_SIMENABLE OMAP_HSOTG(0x10) 45
46#define OTG_SIMENABLE 0x410
50# define TM1 (1 << 0) 47# define TM1 (1 << 0)
51#define OTG_FORCESTDBY OMAP_HSOTG(0x14)
52# define ENABLEFORCE (1 << 0)
53 48
54#endif /* CONFIG_ARCH_OMAP2430 */ 49#define OTG_FORCESTDBY 0x414
50# define ENABLEFORCE (1 << 0)
55 51
56#endif /* __MUSB_OMAP243X_H__ */ 52#endif /* __MUSB_OMAP243X_H__ */
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 88b587c703e9..ab776a8d98ca 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -1118,7 +1118,7 @@ int __init musb_platform_init(struct musb *musb)
1118 } 1118 }
1119 musb->sync = mem->start; 1119 musb->sync = mem->start;
1120 1120
1121 sync = ioremap(mem->start, mem->end - mem->start + 1); 1121 sync = ioremap(mem->start, resource_size(mem));
1122 if (!sync) { 1122 if (!sync) {
1123 pr_debug("ioremap for sync failed\n"); 1123 pr_debug("ioremap for sync failed\n");
1124 ret = -ENOMEM; 1124 ret = -ENOMEM;
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index e13c77052e5e..1c868096bd6f 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -648,7 +648,7 @@ void dma_controller_destroy(struct dma_controller *c)
648 } 648 }
649 } 649 }
650 650
651 if (!tusb_dma->multichannel && tusb_dma && tusb_dma->ch >= 0) 651 if (tusb_dma && !tusb_dma->multichannel && tusb_dma->ch >= 0)
652 omap_free_dma(tusb_dma->ch); 652 omap_free_dma(tusb_dma->ch);
653 653
654 kfree(tusb_dma); 654 kfree(tusb_dma);
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 2be9f2fa41f9..3e4e9f434d78 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -36,7 +36,7 @@
36#include <linux/i2c/twl.h> 36#include <linux/i2c/twl.h>
37#include <linux/regulator/consumer.h> 37#include <linux/regulator/consumer.h>
38#include <linux/err.h> 38#include <linux/err.h>
39 39#include <linux/notifier.h>
40 40
41/* Register defines */ 41/* Register defines */
42 42
@@ -236,15 +236,6 @@
236#define PMBR1 0x0D 236#define PMBR1 0x0D
237#define GPIO_USB_4PIN_ULPI_2430C (3 << 0) 237#define GPIO_USB_4PIN_ULPI_2430C (3 << 0)
238 238
239
240
241enum linkstat {
242 USB_LINK_UNKNOWN = 0,
243 USB_LINK_NONE,
244 USB_LINK_VBUS,
245 USB_LINK_ID,
246};
247
248struct twl4030_usb { 239struct twl4030_usb {
249 struct otg_transceiver otg; 240 struct otg_transceiver otg;
250 struct device *dev; 241 struct device *dev;
@@ -347,10 +338,10 @@ twl4030_usb_clear_bits(struct twl4030_usb *twl, u8 reg, u8 bits)
347 338
348/*-------------------------------------------------------------------------*/ 339/*-------------------------------------------------------------------------*/
349 340
350static enum linkstat twl4030_usb_linkstat(struct twl4030_usb *twl) 341static enum usb_xceiv_events twl4030_usb_linkstat(struct twl4030_usb *twl)
351{ 342{
352 int status; 343 int status;
353 int linkstat = USB_LINK_UNKNOWN; 344 int linkstat = USB_EVENT_NONE;
354 345
355 /* 346 /*
356 * For ID/VBUS sensing, see manual section 15.4.8 ... 347 * For ID/VBUS sensing, see manual section 15.4.8 ...
@@ -368,11 +359,11 @@ static enum linkstat twl4030_usb_linkstat(struct twl4030_usb *twl)
368 dev_err(twl->dev, "USB link status err %d\n", status); 359 dev_err(twl->dev, "USB link status err %d\n", status);
369 else if (status & (BIT(7) | BIT(2))) { 360 else if (status & (BIT(7) | BIT(2))) {
370 if (status & BIT(2)) 361 if (status & BIT(2))
371 linkstat = USB_LINK_ID; 362 linkstat = USB_EVENT_ID;
372 else 363 else
373 linkstat = USB_LINK_VBUS; 364 linkstat = USB_EVENT_VBUS;
374 } else 365 } else
375 linkstat = USB_LINK_NONE; 366 linkstat = USB_EVENT_NONE;
376 367
377 dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n", 368 dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n",
378 status, status, linkstat); 369 status, status, linkstat);
@@ -383,7 +374,7 @@ static enum linkstat twl4030_usb_linkstat(struct twl4030_usb *twl)
383 374
384 spin_lock_irq(&twl->lock); 375 spin_lock_irq(&twl->lock);
385 twl->linkstat = linkstat; 376 twl->linkstat = linkstat;
386 if (linkstat == USB_LINK_ID) { 377 if (linkstat == USB_EVENT_ID) {
387 twl->otg.default_a = true; 378 twl->otg.default_a = true;
388 twl->otg.state = OTG_STATE_A_IDLE; 379 twl->otg.state = OTG_STATE_A_IDLE;
389 } else { 380 } else {
@@ -564,7 +555,7 @@ static ssize_t twl4030_usb_vbus_show(struct device *dev,
564 555
565 spin_lock_irqsave(&twl->lock, flags); 556 spin_lock_irqsave(&twl->lock, flags);
566 ret = sprintf(buf, "%s\n", 557 ret = sprintf(buf, "%s\n",
567 (twl->linkstat == USB_LINK_VBUS) ? "on" : "off"); 558 (twl->linkstat == USB_EVENT_VBUS) ? "on" : "off");
568 spin_unlock_irqrestore(&twl->lock, flags); 559 spin_unlock_irqrestore(&twl->lock, flags);
569 560
570 return ret; 561 return ret;
@@ -576,17 +567,8 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
576 struct twl4030_usb *twl = _twl; 567 struct twl4030_usb *twl = _twl;
577 int status; 568 int status;
578 569
579#ifdef CONFIG_LOCKDEP
580 /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
581 * we don't want and can't tolerate. Although it might be
582 * friendlier not to borrow this thread context...
583 */
584 local_irq_enable();
585#endif
586
587 status = twl4030_usb_linkstat(twl); 570 status = twl4030_usb_linkstat(twl);
588 if (status != USB_LINK_UNKNOWN) { 571 if (status >= 0) {
589
590 /* FIXME add a set_power() method so that B-devices can 572 /* FIXME add a set_power() method so that B-devices can
591 * configure the charger appropriately. It's not always 573 * configure the charger appropriately. It's not always
592 * correct to consume VBUS power, and how much current to 574 * correct to consume VBUS power, and how much current to
@@ -598,12 +580,13 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
598 * USB_LINK_VBUS state. musb_hdrc won't care until it 580 * USB_LINK_VBUS state. musb_hdrc won't care until it
599 * starts to handle softconnect right. 581 * starts to handle softconnect right.
600 */ 582 */
601 if (status == USB_LINK_NONE) 583 if (status == USB_EVENT_NONE)
602 twl4030_phy_suspend(twl, 0); 584 twl4030_phy_suspend(twl, 0);
603 else 585 else
604 twl4030_phy_resume(twl); 586 twl4030_phy_resume(twl);
605 587
606 twl4030charger_usb_en(status == USB_LINK_VBUS); 588 blocking_notifier_call_chain(&twl->otg.notifier, status,
589 twl->otg.gadget);
607 } 590 }
608 sysfs_notify(&twl->dev->kobj, NULL, "vbus"); 591 sysfs_notify(&twl->dev->kobj, NULL, "vbus");
609 592
@@ -693,6 +676,8 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
693 if (device_create_file(&pdev->dev, &dev_attr_vbus)) 676 if (device_create_file(&pdev->dev, &dev_attr_vbus))
694 dev_warn(&pdev->dev, "could not create sysfs file\n"); 677 dev_warn(&pdev->dev, "could not create sysfs file\n");
695 678
679 BLOCKING_INIT_NOTIFIER_HEAD(&twl->otg.notifier);
680
696 /* Our job is to use irqs and status from the power module 681 /* Our job is to use irqs and status from the power module
697 * to keep the transceiver disabled when nothing's connected. 682 * to keep the transceiver disabled when nothing's connected.
698 * 683 *
@@ -702,7 +687,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
702 * need both handles, otherwise just one suffices. 687 * need both handles, otherwise just one suffices.
703 */ 688 */
704 twl->irq_enabled = true; 689 twl->irq_enabled = true;
705 status = request_irq(twl->irq, twl4030_usb_irq, 690 status = request_threaded_irq(twl->irq, NULL, twl4030_usb_irq,
706 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, 691 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
707 "twl4030_usb", twl); 692 "twl4030_usb", twl);
708 if (status < 0) { 693 if (status < 0) {
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index c480ea4c19f2..c78b255e3f83 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -472,6 +472,17 @@ config USB_SERIAL_OTI6858
472 To compile this driver as a module, choose M here: the 472 To compile this driver as a module, choose M here: the
473 module will be called oti6858. 473 module will be called oti6858.
474 474
475config USB_SERIAL_QCAUX
476 tristate "USB Qualcomm Auxiliary Serial Port Driver"
477 ---help---
478 Say Y here if you want to use the auxiliary serial ports provided
479 by many modems based on Qualcomm chipsets. These ports often use
480 a proprietary protocol called DM and cannot be used for AT- or
481 PPP-based communication.
482
483 To compile this driver as a module, choose M here: the
484 module will be called moto_modem. If unsure, choose N.
485
475config USB_SERIAL_QUALCOMM 486config USB_SERIAL_QUALCOMM
476 tristate "USB Qualcomm Serial modem" 487 tristate "USB Qualcomm Serial modem"
477 help 488 help
@@ -600,6 +611,14 @@ config USB_SERIAL_OPTICON
600 To compile this driver as a module, choose M here: the 611 To compile this driver as a module, choose M here: the
601 module will be called opticon. 612 module will be called opticon.
602 613
614config USB_SERIAL_VIVOPAY_SERIAL
615 tristate "USB ViVOpay serial interface driver"
616 help
617 Say Y here if you want to use a ViVOtech ViVOpay USB device.
618
619 To compile this driver as a module, choose M here: the
620 module will be called vivopay-serial.
621
603config USB_SERIAL_DEBUG 622config USB_SERIAL_DEBUG
604 tristate "USB Debugging Device" 623 tristate "USB Debugging Device"
605 help 624 help
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 66619beb6cc0..83c9e431a568 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_USB_SERIAL_OPTICON) += opticon.o
45obj-$(CONFIG_USB_SERIAL_OPTION) += option.o 45obj-$(CONFIG_USB_SERIAL_OPTION) += option.o
46obj-$(CONFIG_USB_SERIAL_OTI6858) += oti6858.o 46obj-$(CONFIG_USB_SERIAL_OTI6858) += oti6858.o
47obj-$(CONFIG_USB_SERIAL_PL2303) += pl2303.o 47obj-$(CONFIG_USB_SERIAL_PL2303) += pl2303.o
48obj-$(CONFIG_USB_SERIAL_QCAUX) += qcaux.o
48obj-$(CONFIG_USB_SERIAL_QUALCOMM) += qcserial.o 49obj-$(CONFIG_USB_SERIAL_QUALCOMM) += qcserial.o
49obj-$(CONFIG_USB_SERIAL_SAFE) += safe_serial.o 50obj-$(CONFIG_USB_SERIAL_SAFE) += safe_serial.o
50obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) += siemens_mpi.o 51obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) += siemens_mpi.o
@@ -55,4 +56,5 @@ obj-$(CONFIG_USB_SERIAL_TI) += ti_usb_3410_5052.o
55obj-$(CONFIG_USB_SERIAL_VISOR) += visor.o 56obj-$(CONFIG_USB_SERIAL_VISOR) += visor.o
56obj-$(CONFIG_USB_SERIAL_WHITEHEAT) += whiteheat.o 57obj-$(CONFIG_USB_SERIAL_WHITEHEAT) += whiteheat.o
57obj-$(CONFIG_USB_SERIAL_XIRCOM) += keyspan_pda.o 58obj-$(CONFIG_USB_SERIAL_XIRCOM) += keyspan_pda.o
59obj-$(CONFIG_USB_SERIAL_VIVOPAY_SERIAL) += vivopay-serial.o
58 60
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index b10ac8409411..365db1097bfd 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -78,7 +78,7 @@ static int debug;
78#define DRIVER_DESC "AIRcable USB Driver" 78#define DRIVER_DESC "AIRcable USB Driver"
79 79
80/* ID table that will be registered with USB core */ 80/* ID table that will be registered with USB core */
81static struct usb_device_id id_table [] = { 81static const struct usb_device_id id_table[] = {
82 { USB_DEVICE(AIRCABLE_VID, AIRCABLE_USB_PID) }, 82 { USB_DEVICE(AIRCABLE_VID, AIRCABLE_USB_PID) },
83 { }, 83 { },
84}; 84};
@@ -468,10 +468,6 @@ static void aircable_read_bulk_callback(struct urb *urb)
468 468
469 if (status) { 469 if (status) {
470 dbg("%s - urb status = %d", __func__, status); 470 dbg("%s - urb status = %d", __func__, status);
471 if (!port->port.count) {
472 dbg("%s - port is closed, exiting.", __func__);
473 return;
474 }
475 if (status == -EPROTO) { 471 if (status == -EPROTO) {
476 dbg("%s - caught -EPROTO, resubmitting the urb", 472 dbg("%s - caught -EPROTO, resubmitting the urb",
477 __func__); 473 __func__);
@@ -530,23 +526,19 @@ static void aircable_read_bulk_callback(struct urb *urb)
530 } 526 }
531 tty_kref_put(tty); 527 tty_kref_put(tty);
532 528
533 /* Schedule the next read _if_ we are still open */ 529 /* Schedule the next read */
534 if (port->port.count) { 530 usb_fill_bulk_urb(port->read_urb, port->serial->dev,
535 usb_fill_bulk_urb(port->read_urb, port->serial->dev, 531 usb_rcvbulkpipe(port->serial->dev,
536 usb_rcvbulkpipe(port->serial->dev, 532 port->bulk_in_endpointAddress),
537 port->bulk_in_endpointAddress), 533 port->read_urb->transfer_buffer,
538 port->read_urb->transfer_buffer, 534 port->read_urb->transfer_buffer_length,
539 port->read_urb->transfer_buffer_length, 535 aircable_read_bulk_callback, port);
540 aircable_read_bulk_callback, port); 536
541 537 result = usb_submit_urb(urb, GFP_ATOMIC);
542 result = usb_submit_urb(urb, GFP_ATOMIC); 538 if (result && result != -EPERM)
543 if (result) 539 dev_err(&urb->dev->dev,
544 dev_err(&urb->dev->dev, 540 "%s - failed resubmitting read urb, error %d\n",
545 "%s - failed resubmitting read urb, error %d\n", 541 __func__, result);
546 __func__, result);
547 }
548
549 return;
550} 542}
551 543
552/* Based on ftdi_sio.c throttle */ 544/* Based on ftdi_sio.c throttle */
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index a9c2dec8e3fb..547c9448c28c 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -50,7 +50,7 @@ static int debug;
50/* usb timeout of 1 second */ 50/* usb timeout of 1 second */
51#define ARK_TIMEOUT (1*HZ) 51#define ARK_TIMEOUT (1*HZ)
52 52
53static struct usb_device_id id_table [] = { 53static const struct usb_device_id id_table[] = {
54 { USB_DEVICE(0x6547, 0x0232) }, 54 { USB_DEVICE(0x6547, 0x0232) },
55 { USB_DEVICE(0x18ec, 0x3118) }, /* USB to IrDA adapter */ 55 { USB_DEVICE(0x18ec, 0x3118) }, /* USB to IrDA adapter */
56 { }, 56 { },
@@ -733,7 +733,6 @@ static void ark3116_read_bulk_callback(struct urb *urb)
733 733
734 tty = tty_port_tty_get(&port->port); 734 tty = tty_port_tty_get(&port->port);
735 if (tty) { 735 if (tty) {
736 tty_buffer_request_room(tty, urb->actual_length + 1);
737 /* overrun is special, not associated with a char */ 736 /* overrun is special, not associated with a char */
738 if (unlikely(lsr & UART_LSR_OE)) 737 if (unlikely(lsr & UART_LSR_OE))
739 tty_insert_flip_char(tty, 0, TTY_OVERRUN); 738 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index a0467bc61627..1295e44e3f1c 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -103,7 +103,7 @@ static int belkin_sa_tiocmset(struct tty_struct *tty, struct file *file,
103 unsigned int set, unsigned int clear); 103 unsigned int set, unsigned int clear);
104 104
105 105
106static struct usb_device_id id_table_combined [] = { 106static const struct usb_device_id id_table_combined[] = {
107 { USB_DEVICE(BELKIN_SA_VID, BELKIN_SA_PID) }, 107 { USB_DEVICE(BELKIN_SA_VID, BELKIN_SA_PID) },
108 { USB_DEVICE(BELKIN_OLD_VID, BELKIN_OLD_PID) }, 108 { USB_DEVICE(BELKIN_OLD_VID, BELKIN_OLD_PID) },
109 { USB_DEVICE(PERACOM_VID, PERACOM_PID) }, 109 { USB_DEVICE(PERACOM_VID, PERACOM_PID) },
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 59eff721fcc5..9f4fed1968b5 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -22,6 +22,7 @@
22#include <linux/usb.h> 22#include <linux/usb.h>
23#include <linux/usb/serial.h> 23#include <linux/usb/serial.h>
24#include <linux/serial.h> 24#include <linux/serial.h>
25#include <asm/unaligned.h>
25 26
26#define DEFAULT_BAUD_RATE 9600 27#define DEFAULT_BAUD_RATE 9600
27#define DEFAULT_TIMEOUT 1000 28#define DEFAULT_TIMEOUT 1000
@@ -70,7 +71,7 @@
70 71
71static int debug; 72static int debug;
72 73
73static struct usb_device_id id_table [] = { 74static const struct usb_device_id id_table[] = {
74 { USB_DEVICE(0x4348, 0x5523) }, 75 { USB_DEVICE(0x4348, 0x5523) },
75 { USB_DEVICE(0x1a86, 0x7523) }, 76 { USB_DEVICE(0x1a86, 0x7523) },
76 { }, 77 { },
@@ -392,16 +393,22 @@ static void ch341_break_ctl(struct tty_struct *tty, int break_state)
392 struct usb_serial_port *port = tty->driver_data; 393 struct usb_serial_port *port = tty->driver_data;
393 int r; 394 int r;
394 uint16_t reg_contents; 395 uint16_t reg_contents;
395 uint8_t break_reg[2]; 396 uint8_t *break_reg;
396 397
397 dbg("%s()", __func__); 398 dbg("%s()", __func__);
398 399
400 break_reg = kmalloc(2, GFP_KERNEL);
401 if (!break_reg) {
402 dev_err(&port->dev, "%s - kmalloc failed\n", __func__);
403 return;
404 }
405
399 r = ch341_control_in(port->serial->dev, CH341_REQ_READ_REG, 406 r = ch341_control_in(port->serial->dev, CH341_REQ_READ_REG,
400 ch341_break_reg, 0, break_reg, sizeof(break_reg)); 407 ch341_break_reg, 0, break_reg, 2);
401 if (r < 0) { 408 if (r < 0) {
402 printk(KERN_WARNING "%s: USB control read error whilst getting" 409 dev_err(&port->dev, "%s - USB control read error (%d)\n",
403 " break register contents.\n", __FILE__); 410 __func__, r);
404 return; 411 goto out;
405 } 412 }
406 dbg("%s - initial ch341 break register contents - reg1: %x, reg2: %x", 413 dbg("%s - initial ch341 break register contents - reg1: %x, reg2: %x",
407 __func__, break_reg[0], break_reg[1]); 414 __func__, break_reg[0], break_reg[1]);
@@ -416,12 +423,14 @@ static void ch341_break_ctl(struct tty_struct *tty, int break_state)
416 } 423 }
417 dbg("%s - New ch341 break register contents - reg1: %x, reg2: %x", 424 dbg("%s - New ch341 break register contents - reg1: %x, reg2: %x",
418 __func__, break_reg[0], break_reg[1]); 425 __func__, break_reg[0], break_reg[1]);
419 reg_contents = (uint16_t)break_reg[0] | ((uint16_t)break_reg[1] << 8); 426 reg_contents = get_unaligned_le16(break_reg);
420 r = ch341_control_out(port->serial->dev, CH341_REQ_WRITE_REG, 427 r = ch341_control_out(port->serial->dev, CH341_REQ_WRITE_REG,
421 ch341_break_reg, reg_contents); 428 ch341_break_reg, reg_contents);
422 if (r < 0) 429 if (r < 0)
423 printk(KERN_WARNING "%s: USB control write error whilst setting" 430 dev_err(&port->dev, "%s - USB control write error (%d)\n",
424 " break register contents.\n", __FILE__); 431 __func__, r);
432out:
433 kfree(break_reg);
425} 434}
426 435
427static int ch341_tiocmset(struct tty_struct *tty, struct file *file, 436static int ch341_tiocmset(struct tty_struct *tty, struct file *file,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index bd254ec97d14..507382b0a9ed 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -55,7 +55,7 @@ static int cp210x_carrier_raised(struct usb_serial_port *p);
55 55
56static int debug; 56static int debug;
57 57
58static struct usb_device_id id_table [] = { 58static const struct usb_device_id id_table[] = {
59 { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ 59 { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
60 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ 60 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
61 { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ 61 { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
@@ -91,11 +91,12 @@ static struct usb_device_id id_table [] = {
91 { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */ 91 { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
92 { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */ 92 { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
93 { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */ 93 { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
94 { USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */
94 { USB_DEVICE(0x10C4, 0x81F2) }, /* C1007 HF band RFID controller */ 95 { USB_DEVICE(0x10C4, 0x81F2) }, /* C1007 HF band RFID controller */
95 { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */ 96 { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
96 { USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */ 97 { USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */
97 { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demostration module */ 98 { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demostration module */
98 { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ 99 { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesys ETRX2USB */
99 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ 100 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
100 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ 101 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
101 { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ 102 { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
@@ -612,7 +613,7 @@ static void cp210x_set_termios(struct tty_struct *tty,
612 baud); 613 baud);
613 if (cp210x_set_config_single(port, CP210X_SET_BAUDDIV, 614 if (cp210x_set_config_single(port, CP210X_SET_BAUDDIV,
614 ((BAUD_RATE_GEN_FREQ + baud/2) / baud))) { 615 ((BAUD_RATE_GEN_FREQ + baud/2) / baud))) {
615 dbg("Baud rate requested not supported by device\n"); 616 dbg("Baud rate requested not supported by device");
616 baud = tty_termios_baud_rate(old_termios); 617 baud = tty_termios_baud_rate(old_termios);
617 } 618 }
618 } 619 }
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index b0f6402a91ca..f744ab7a3b19 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -70,7 +70,7 @@ static void cyberjack_read_int_callback(struct urb *urb);
70static void cyberjack_read_bulk_callback(struct urb *urb); 70static void cyberjack_read_bulk_callback(struct urb *urb);
71static void cyberjack_write_bulk_callback(struct urb *urb); 71static void cyberjack_write_bulk_callback(struct urb *urb);
72 72
73static struct usb_device_id id_table [] = { 73static const struct usb_device_id id_table[] = {
74 { USB_DEVICE(CYBERJACK_VENDOR_ID, CYBERJACK_PRODUCT_ID) }, 74 { USB_DEVICE(CYBERJACK_VENDOR_ID, CYBERJACK_PRODUCT_ID) },
75 { } /* Terminating entry */ 75 { } /* Terminating entry */
76}; 76};
@@ -391,11 +391,10 @@ static void cyberjack_read_bulk_callback(struct urb *urb)
391 391
392 tty = tty_port_tty_get(&port->port); 392 tty = tty_port_tty_get(&port->port);
393 if (!tty) { 393 if (!tty) {
394 dbg("%s - ignoring since device not open\n", __func__); 394 dbg("%s - ignoring since device not open", __func__);
395 return; 395 return;
396 } 396 }
397 if (urb->actual_length) { 397 if (urb->actual_length) {
398 tty_buffer_request_room(tty, urb->actual_length);
399 tty_insert_flip_string(tty, data, urb->actual_length); 398 tty_insert_flip_string(tty, data, urb->actual_length);
400 tty_flip_buffer_push(tty); 399 tty_flip_buffer_push(tty);
401 } 400 }
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index a591ebec0f89..baf74b44e6ed 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -66,17 +66,15 @@
66#include <linux/serial.h> 66#include <linux/serial.h>
67#include <linux/delay.h> 67#include <linux/delay.h>
68#include <linux/uaccess.h> 68#include <linux/uaccess.h>
69#include <asm/unaligned.h>
69 70
70#include "cypress_m8.h" 71#include "cypress_m8.h"
71 72
72 73
73#ifdef CONFIG_USB_SERIAL_DEBUG 74static int debug;
74 static int debug = 1;
75#else
76 static int debug;
77#endif
78static int stats; 75static int stats;
79static int interval; 76static int interval;
77static int unstable_bauds;
80 78
81/* 79/*
82 * Version Information 80 * Version Information
@@ -89,24 +87,24 @@ static int interval;
89#define CYPRESS_BUF_SIZE 1024 87#define CYPRESS_BUF_SIZE 1024
90#define CYPRESS_CLOSING_WAIT (30*HZ) 88#define CYPRESS_CLOSING_WAIT (30*HZ)
91 89
92static struct usb_device_id id_table_earthmate [] = { 90static const struct usb_device_id id_table_earthmate[] = {
93 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) }, 91 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) },
94 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, 92 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
95 { } /* Terminating entry */ 93 { } /* Terminating entry */
96}; 94};
97 95
98static struct usb_device_id id_table_cyphidcomrs232 [] = { 96static const struct usb_device_id id_table_cyphidcomrs232[] = {
99 { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, 97 { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
100 { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, 98 { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
101 { } /* Terminating entry */ 99 { } /* Terminating entry */
102}; 100};
103 101
104static struct usb_device_id id_table_nokiaca42v2 [] = { 102static const struct usb_device_id id_table_nokiaca42v2[] = {
105 { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) }, 103 { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) },
106 { } /* Terminating entry */ 104 { } /* Terminating entry */
107}; 105};
108 106
109static struct usb_device_id id_table_combined [] = { 107static const struct usb_device_id id_table_combined[] = {
110 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) }, 108 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) },
111 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, 109 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
112 { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, 110 { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
@@ -295,6 +293,9 @@ static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
295 struct cypress_private *priv; 293 struct cypress_private *priv;
296 priv = usb_get_serial_port_data(port); 294 priv = usb_get_serial_port_data(port);
297 295
296 if (unstable_bauds)
297 return new_rate;
298
298 /* 299 /*
299 * The general purpose firmware for the Cypress M8 allows for 300 * The general purpose firmware for the Cypress M8 allows for
300 * a maximum speed of 57600bps (I have no idea whether DeLorme 301 * a maximum speed of 57600bps (I have no idea whether DeLorme
@@ -344,7 +345,8 @@ static int cypress_serial_control(struct tty_struct *tty,
344{ 345{
345 int new_baudrate = 0, retval = 0, tries = 0; 346 int new_baudrate = 0, retval = 0, tries = 0;
346 struct cypress_private *priv; 347 struct cypress_private *priv;
347 __u8 feature_buffer[5]; 348 u8 *feature_buffer;
349 const unsigned int feature_len = 5;
348 unsigned long flags; 350 unsigned long flags;
349 351
350 dbg("%s", __func__); 352 dbg("%s", __func__);
@@ -354,17 +356,18 @@ static int cypress_serial_control(struct tty_struct *tty,
354 if (!priv->comm_is_ok) 356 if (!priv->comm_is_ok)
355 return -ENODEV; 357 return -ENODEV;
356 358
359 feature_buffer = kcalloc(feature_len, sizeof(u8), GFP_KERNEL);
360 if (!feature_buffer)
361 return -ENOMEM;
362
357 switch (cypress_request_type) { 363 switch (cypress_request_type) {
358 case CYPRESS_SET_CONFIG: 364 case CYPRESS_SET_CONFIG:
359 new_baudrate = priv->baud_rate;
360 /* 0 means 'Hang up' so doesn't change the true bit rate */ 365 /* 0 means 'Hang up' so doesn't change the true bit rate */
361 if (baud_rate == 0) 366 new_baudrate = priv->baud_rate;
362 new_baudrate = priv->baud_rate; 367 if (baud_rate && baud_rate != priv->baud_rate) {
363 /* Change of speed ? */
364 else if (baud_rate != priv->baud_rate) {
365 dbg("%s - baud rate is changing", __func__); 368 dbg("%s - baud rate is changing", __func__);
366 retval = analyze_baud_rate(port, baud_rate); 369 retval = analyze_baud_rate(port, baud_rate);
367 if (retval >= 0) { 370 if (retval >= 0) {
368 new_baudrate = retval; 371 new_baudrate = retval;
369 dbg("%s - New baud rate set to %d", 372 dbg("%s - New baud rate set to %d",
370 __func__, new_baudrate); 373 __func__, new_baudrate);
@@ -373,9 +376,8 @@ static int cypress_serial_control(struct tty_struct *tty,
373 dbg("%s - baud rate is being sent as %d", 376 dbg("%s - baud rate is being sent as %d",
374 __func__, new_baudrate); 377 __func__, new_baudrate);
375 378
376 memset(feature_buffer, 0, sizeof(feature_buffer));
377 /* fill the feature_buffer with new configuration */ 379 /* fill the feature_buffer with new configuration */
378 *((u_int32_t *)feature_buffer) = new_baudrate; 380 put_unaligned_le32(new_baudrate, feature_buffer);
379 feature_buffer[4] |= data_bits; /* assign data bits in 2 bit space ( max 3 ) */ 381 feature_buffer[4] |= data_bits; /* assign data bits in 2 bit space ( max 3 ) */
380 /* 1 bit gap */ 382 /* 1 bit gap */
381 feature_buffer[4] |= (stop_bits << 3); /* assign stop bits in 1 bit space */ 383 feature_buffer[4] |= (stop_bits << 3); /* assign stop bits in 1 bit space */
@@ -397,15 +399,15 @@ static int cypress_serial_control(struct tty_struct *tty,
397 HID_REQ_SET_REPORT, 399 HID_REQ_SET_REPORT,
398 USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS, 400 USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
399 0x0300, 0, feature_buffer, 401 0x0300, 0, feature_buffer,
400 sizeof(feature_buffer), 500); 402 feature_len, 500);
401 403
402 if (tries++ >= 3) 404 if (tries++ >= 3)
403 break; 405 break;
404 406
405 } while (retval != sizeof(feature_buffer) && 407 } while (retval != feature_len &&
406 retval != -ENODEV); 408 retval != -ENODEV);
407 409
408 if (retval != sizeof(feature_buffer)) { 410 if (retval != feature_len) {
409 dev_err(&port->dev, "%s - failed sending serial " 411 dev_err(&port->dev, "%s - failed sending serial "
410 "line settings - %d\n", __func__, retval); 412 "line settings - %d\n", __func__, retval);
411 cypress_set_dead(port); 413 cypress_set_dead(port);
@@ -425,43 +427,42 @@ static int cypress_serial_control(struct tty_struct *tty,
425 /* Not implemented for this device, 427 /* Not implemented for this device,
426 and if we try to do it we're likely 428 and if we try to do it we're likely
427 to crash the hardware. */ 429 to crash the hardware. */
428 return -ENOTTY; 430 retval = -ENOTTY;
431 goto out;
429 } 432 }
430 dbg("%s - retreiving serial line settings", __func__); 433 dbg("%s - retreiving serial line settings", __func__);
431 /* set initial values in feature buffer */
432 memset(feature_buffer, 0, sizeof(feature_buffer));
433
434 do { 434 do {
435 retval = usb_control_msg(port->serial->dev, 435 retval = usb_control_msg(port->serial->dev,
436 usb_rcvctrlpipe(port->serial->dev, 0), 436 usb_rcvctrlpipe(port->serial->dev, 0),
437 HID_REQ_GET_REPORT, 437 HID_REQ_GET_REPORT,
438 USB_DIR_IN | USB_RECIP_INTERFACE | USB_TYPE_CLASS, 438 USB_DIR_IN | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
439 0x0300, 0, feature_buffer, 439 0x0300, 0, feature_buffer,
440 sizeof(feature_buffer), 500); 440 feature_len, 500);
441 441
442 if (tries++ >= 3) 442 if (tries++ >= 3)
443 break; 443 break;
444 } while (retval != sizeof(feature_buffer) 444 } while (retval != feature_len
445 && retval != -ENODEV); 445 && retval != -ENODEV);
446 446
447 if (retval != sizeof(feature_buffer)) { 447 if (retval != feature_len) {
448 dev_err(&port->dev, "%s - failed to retrieve serial " 448 dev_err(&port->dev, "%s - failed to retrieve serial "
449 "line settings - %d\n", __func__, retval); 449 "line settings - %d\n", __func__, retval);
450 cypress_set_dead(port); 450 cypress_set_dead(port);
451 return retval; 451 goto out;
452 } else { 452 } else {
453 spin_lock_irqsave(&priv->lock, flags); 453 spin_lock_irqsave(&priv->lock, flags);
454 /* store the config in one byte, and later 454 /* store the config in one byte, and later
455 use bit masks to check values */ 455 use bit masks to check values */
456 priv->current_config = feature_buffer[4]; 456 priv->current_config = feature_buffer[4];
457 priv->baud_rate = *((u_int32_t *)feature_buffer); 457 priv->baud_rate = get_unaligned_le32(feature_buffer);
458 spin_unlock_irqrestore(&priv->lock, flags); 458 spin_unlock_irqrestore(&priv->lock, flags);
459 } 459 }
460 } 460 }
461 spin_lock_irqsave(&priv->lock, flags); 461 spin_lock_irqsave(&priv->lock, flags);
462 ++priv->cmd_count; 462 ++priv->cmd_count;
463 spin_unlock_irqrestore(&priv->lock, flags); 463 spin_unlock_irqrestore(&priv->lock, flags);
464 464out:
465 kfree(feature_buffer);
465 return retval; 466 return retval;
466} /* cypress_serial_control */ 467} /* cypress_serial_control */
467 468
@@ -690,7 +691,6 @@ static void cypress_dtr_rts(struct usb_serial_port *port, int on)
690{ 691{
691 struct cypress_private *priv = usb_get_serial_port_data(port); 692 struct cypress_private *priv = usb_get_serial_port_data(port);
692 /* drop dtr and rts */ 693 /* drop dtr and rts */
693 priv = usb_get_serial_port_data(port);
694 spin_lock_irq(&priv->lock); 694 spin_lock_irq(&priv->lock);
695 if (on == 0) 695 if (on == 0)
696 priv->line_control = 0; 696 priv->line_control = 0;
@@ -1307,13 +1307,9 @@ static void cypress_read_int_callback(struct urb *urb)
1307 spin_unlock_irqrestore(&priv->lock, flags); 1307 spin_unlock_irqrestore(&priv->lock, flags);
1308 1308
1309 /* process read if there is data other than line status */ 1309 /* process read if there is data other than line status */
1310 if (tty && (bytes > i)) { 1310 if (tty && bytes > i) {
1311 bytes = tty_buffer_request_room(tty, bytes); 1311 tty_insert_flip_string_fixed_flag(tty, data + i,
1312 for (; i < bytes ; ++i) { 1312 bytes - i, tty_flag);
1313 dbg("pushing byte number %d - %d - %c", i, data[i],
1314 data[i]);
1315 tty_insert_flip_char(tty, data[i], tty_flag);
1316 }
1317 tty_flip_buffer_push(tty); 1313 tty_flip_buffer_push(tty);
1318 } 1314 }
1319 1315
@@ -1325,9 +1321,9 @@ static void cypress_read_int_callback(struct urb *urb)
1325continue_read: 1321continue_read:
1326 tty_kref_put(tty); 1322 tty_kref_put(tty);
1327 1323
1328 /* Continue trying to always read... unless the port has closed. */ 1324 /* Continue trying to always read */
1329 1325
1330 if (port->port.count > 0 && priv->comm_is_ok) { 1326 if (priv->comm_is_ok) {
1331 usb_fill_int_urb(port->interrupt_in_urb, port->serial->dev, 1327 usb_fill_int_urb(port->interrupt_in_urb, port->serial->dev,
1332 usb_rcvintpipe(port->serial->dev, 1328 usb_rcvintpipe(port->serial->dev,
1333 port->interrupt_in_endpointAddress), 1329 port->interrupt_in_endpointAddress),
@@ -1336,7 +1332,7 @@ continue_read:
1336 cypress_read_int_callback, port, 1332 cypress_read_int_callback, port,
1337 priv->read_urb_interval); 1333 priv->read_urb_interval);
1338 result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC); 1334 result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
1339 if (result) { 1335 if (result && result != -EPERM) {
1340 dev_err(&urb->dev->dev, "%s - failed resubmitting " 1336 dev_err(&urb->dev->dev, "%s - failed resubmitting "
1341 "read urb, error %d\n", __func__, 1337 "read urb, error %d\n", __func__,
1342 result); 1338 result);
@@ -1650,3 +1646,5 @@ module_param(stats, bool, S_IRUGO | S_IWUSR);
1650MODULE_PARM_DESC(stats, "Enable statistics or not"); 1646MODULE_PARM_DESC(stats, "Enable statistics or not");
1651module_param(interval, int, S_IRUGO | S_IWUSR); 1647module_param(interval, int, S_IRUGO | S_IWUSR);
1652MODULE_PARM_DESC(interval, "Overrides interrupt interval"); 1648MODULE_PARM_DESC(interval, "Overrides interrupt interval");
1649module_param(unstable_bauds, bool, S_IRUGO | S_IWUSR);
1650MODULE_PARM_DESC(unstable_bauds, "Allow unstable baud rates");
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 68e80be6b9e1..68b0aa5e516c 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -470,18 +470,18 @@ static int digi_read_oob_callback(struct urb *urb);
470 470
471static int debug; 471static int debug;
472 472
473static struct usb_device_id id_table_combined [] = { 473static const struct usb_device_id id_table_combined[] = {
474 { USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) }, 474 { USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) },
475 { USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) }, 475 { USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) },
476 { } /* Terminating entry */ 476 { } /* Terminating entry */
477}; 477};
478 478
479static struct usb_device_id id_table_2 [] = { 479static const struct usb_device_id id_table_2[] = {
480 { USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) }, 480 { USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) },
481 { } /* Terminating entry */ 481 { } /* Terminating entry */
482}; 482};
483 483
484static struct usb_device_id id_table_4 [] = { 484static const struct usb_device_id id_table_4[] = {
485 { USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) }, 485 { USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) },
486 { } /* Terminating entry */ 486 { } /* Terminating entry */
487}; 487};
@@ -1262,10 +1262,10 @@ static void digi_write_bulk_callback(struct urb *urb)
1262 return; 1262 return;
1263 } 1263 }
1264 1264
1265 /* try to send any buffered data on this port, if it is open */ 1265 /* try to send any buffered data on this port */
1266 spin_lock(&priv->dp_port_lock); 1266 spin_lock(&priv->dp_port_lock);
1267 priv->dp_write_urb_in_use = 0; 1267 priv->dp_write_urb_in_use = 0;
1268 if (port->port.count && priv->dp_out_buf_len > 0) { 1268 if (priv->dp_out_buf_len > 0) {
1269 *((unsigned char *)(port->write_urb->transfer_buffer)) 1269 *((unsigned char *)(port->write_urb->transfer_buffer))
1270 = (unsigned char)DIGI_CMD_SEND_DATA; 1270 = (unsigned char)DIGI_CMD_SEND_DATA;
1271 *((unsigned char *)(port->write_urb->transfer_buffer) + 1) 1271 *((unsigned char *)(port->write_urb->transfer_buffer) + 1)
@@ -1288,7 +1288,7 @@ static void digi_write_bulk_callback(struct urb *urb)
1288 schedule_work(&priv->dp_wakeup_work); 1288 schedule_work(&priv->dp_wakeup_work);
1289 1289
1290 spin_unlock(&priv->dp_port_lock); 1290 spin_unlock(&priv->dp_port_lock);
1291 if (ret) 1291 if (ret && ret != -EPERM)
1292 dev_err(&port->dev, 1292 dev_err(&port->dev,
1293 "%s: usb_submit_urb failed, ret=%d, port=%d\n", 1293 "%s: usb_submit_urb failed, ret=%d, port=%d\n",
1294 __func__, ret, priv->dp_port_num); 1294 __func__, ret, priv->dp_port_num);
@@ -1353,8 +1353,7 @@ static int digi_open(struct tty_struct *tty, struct usb_serial_port *port)
1353 struct digi_port *priv = usb_get_serial_port_data(port); 1353 struct digi_port *priv = usb_get_serial_port_data(port);
1354 struct ktermios not_termios; 1354 struct ktermios not_termios;
1355 1355
1356 dbg("digi_open: TOP: port=%d, open_count=%d", 1356 dbg("digi_open: TOP: port=%d", priv->dp_port_num);
1357 priv->dp_port_num, port->port.count);
1358 1357
1359 /* be sure the device is started up */ 1358 /* be sure the device is started up */
1360 if (digi_startup_device(port->serial) != 0) 1359 if (digi_startup_device(port->serial) != 0)
@@ -1393,8 +1392,7 @@ static void digi_close(struct usb_serial_port *port)
1393 unsigned char buf[32]; 1392 unsigned char buf[32];
1394 struct digi_port *priv = usb_get_serial_port_data(port); 1393 struct digi_port *priv = usb_get_serial_port_data(port);
1395 1394
1396 dbg("digi_close: TOP: port=%d, open_count=%d", 1395 dbg("digi_close: TOP: port=%d", priv->dp_port_num);
1397 priv->dp_port_num, port->port.count);
1398 1396
1399 mutex_lock(&port->serial->disc_mutex); 1397 mutex_lock(&port->serial->disc_mutex);
1400 /* if disconnected, just clear flags */ 1398 /* if disconnected, just clear flags */
@@ -1629,7 +1627,7 @@ static void digi_read_bulk_callback(struct urb *urb)
1629 /* continue read */ 1627 /* continue read */
1630 urb->dev = port->serial->dev; 1628 urb->dev = port->serial->dev;
1631 ret = usb_submit_urb(urb, GFP_ATOMIC); 1629 ret = usb_submit_urb(urb, GFP_ATOMIC);
1632 if (ret != 0) { 1630 if (ret != 0 && ret != -EPERM) {
1633 dev_err(&port->dev, 1631 dev_err(&port->dev,
1634 "%s: failed resubmitting urb, ret=%d, port=%d\n", 1632 "%s: failed resubmitting urb, ret=%d, port=%d\n",
1635 __func__, ret, priv->dp_port_num); 1633 __func__, ret, priv->dp_port_num);
@@ -1658,12 +1656,11 @@ static int digi_read_inb_callback(struct urb *urb)
1658 int port_status = ((unsigned char *)urb->transfer_buffer)[2]; 1656 int port_status = ((unsigned char *)urb->transfer_buffer)[2];
1659 unsigned char *data = ((unsigned char *)urb->transfer_buffer) + 3; 1657 unsigned char *data = ((unsigned char *)urb->transfer_buffer) + 3;
1660 int flag, throttled; 1658 int flag, throttled;
1661 int i;
1662 int status = urb->status; 1659 int status = urb->status;
1663 1660
1664 /* do not process callbacks on closed ports */ 1661 /* do not process callbacks on closed ports */
1665 /* but do continue the read chain */ 1662 /* but do continue the read chain */
1666 if (port->port.count == 0) 1663 if (urb->status == -ENOENT)
1667 return 0; 1664 return 0;
1668 1665
1669 /* short/multiple packet check */ 1666 /* short/multiple packet check */
@@ -1705,17 +1702,9 @@ static int digi_read_inb_callback(struct urb *urb)
1705 1702
1706 /* data length is len-1 (one byte of len is port_status) */ 1703 /* data length is len-1 (one byte of len is port_status) */
1707 --len; 1704 --len;
1708
1709 len = tty_buffer_request_room(tty, len);
1710 if (len > 0) { 1705 if (len > 0) {
1711 /* Hot path */ 1706 tty_insert_flip_string_fixed_flag(tty, data, len,
1712 if (flag == TTY_NORMAL) 1707 flag);
1713 tty_insert_flip_string(tty, data, len);
1714 else {
1715 for (i = 0; i < len; i++)
1716 tty_insert_flip_char(tty,
1717 data[i], flag);
1718 }
1719 tty_flip_buffer_push(tty); 1708 tty_flip_buffer_push(tty);
1720 } 1709 }
1721 } 1710 }
@@ -1776,8 +1765,7 @@ static int digi_read_oob_callback(struct urb *urb)
1776 1765
1777 tty = tty_port_tty_get(&port->port); 1766 tty = tty_port_tty_get(&port->port);
1778 rts = 0; 1767 rts = 0;
1779 if (port->port.count) 1768 rts = tty->termios->c_cflag & CRTSCTS;
1780 rts = tty->termios->c_cflag & CRTSCTS;
1781 1769
1782 if (opcode == DIGI_CMD_READ_INPUT_SIGNALS) { 1770 if (opcode == DIGI_CMD_READ_INPUT_SIGNALS) {
1783 spin_lock(&priv->dp_port_lock); 1771 spin_lock(&priv->dp_port_lock);
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 7dd0e3eadbe6..5f740a1eacab 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -93,7 +93,7 @@ static void empeg_init_termios(struct tty_struct *tty);
93static void empeg_write_bulk_callback(struct urb *urb); 93static void empeg_write_bulk_callback(struct urb *urb);
94static void empeg_read_bulk_callback(struct urb *urb); 94static void empeg_read_bulk_callback(struct urb *urb);
95 95
96static struct usb_device_id id_table [] = { 96static const struct usb_device_id id_table[] = {
97 { USB_DEVICE(EMPEG_VENDOR_ID, EMPEG_PRODUCT_ID) }, 97 { USB_DEVICE(EMPEG_VENDOR_ID, EMPEG_PRODUCT_ID) },
98 { } /* Terminating entry */ 98 { } /* Terminating entry */
99}; 99};
@@ -346,7 +346,6 @@ static void empeg_read_bulk_callback(struct urb *urb)
346 tty = tty_port_tty_get(&port->port); 346 tty = tty_port_tty_get(&port->port);
347 347
348 if (urb->actual_length) { 348 if (urb->actual_length) {
349 tty_buffer_request_room(tty, urb->actual_length);
350 tty_insert_flip_string(tty, data, urb->actual_length); 349 tty_insert_flip_string(tty, data, urb->actual_length);
351 tty_flip_buffer_push(tty); 350 tty_flip_buffer_push(tty);
352 bytes_in += urb->actual_length; 351 bytes_in += urb->actual_length;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 7638828e7317..6af0dfa5f5ac 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -33,12 +33,12 @@
33#include <linux/errno.h> 33#include <linux/errno.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/smp_lock.h>
37#include <linux/tty.h> 36#include <linux/tty.h>
38#include <linux/tty_driver.h> 37#include <linux/tty_driver.h>
39#include <linux/tty_flip.h> 38#include <linux/tty_flip.h>
40#include <linux/module.h> 39#include <linux/module.h>
41#include <linux/spinlock.h> 40#include <linux/spinlock.h>
41#include <linux/mutex.h>
42#include <linux/uaccess.h> 42#include <linux/uaccess.h>
43#include <linux/usb.h> 43#include <linux/usb.h>
44#include <linux/serial.h> 44#include <linux/serial.h>
@@ -88,10 +88,10 @@ struct ftdi_private {
88 88
89 unsigned int latency; /* latency setting in use */ 89 unsigned int latency; /* latency setting in use */
90 spinlock_t tx_lock; /* spinlock for transmit state */ 90 spinlock_t tx_lock; /* spinlock for transmit state */
91 unsigned long tx_bytes;
92 unsigned long tx_outstanding_bytes; 91 unsigned long tx_outstanding_bytes;
93 unsigned long tx_outstanding_urbs; 92 unsigned long tx_outstanding_urbs;
94 unsigned short max_packet_size; 93 unsigned short max_packet_size;
94 struct mutex cfg_lock; /* Avoid mess by parallel calls of config ioctl() */
95}; 95};
96 96
97/* struct ftdi_sio_quirk is used by devices requiring special attention. */ 97/* struct ftdi_sio_quirk is used by devices requiring special attention. */
@@ -614,6 +614,7 @@ static struct usb_device_id id_table_combined [] = {
614 { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) }, 614 { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
615 { USB_DEVICE(TTI_VID, TTI_QL355P_PID) }, 615 { USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
616 { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, 616 { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
617 { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
617 { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, 618 { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
618 { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, 619 { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
619 { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) }, 620 { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
@@ -737,6 +738,10 @@ static struct usb_device_id id_table_combined [] = {
737 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 738 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
738 { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) }, 739 { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
739 { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) }, 740 { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) },
741 { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) },
742 { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) },
743 { USB_DEVICE(FTDI_VID, MJSG_HD_RADIO_PID) },
744 { USB_DEVICE(FTDI_VID, MJSG_XM_RADIO_PID) },
740 { }, /* Optional parameter entry */ 745 { }, /* Optional parameter entry */
741 { } /* Terminating entry */ 746 { } /* Terminating entry */
742}; 747};
@@ -812,7 +817,7 @@ static struct usb_serial_driver ftdi_sio_device = {
812 .name = "ftdi_sio", 817 .name = "ftdi_sio",
813 }, 818 },
814 .description = "FTDI USB Serial Device", 819 .description = "FTDI USB Serial Device",
815 .usb_driver = &ftdi_driver , 820 .usb_driver = &ftdi_driver,
816 .id_table = id_table_combined, 821 .id_table = id_table_combined,
817 .num_ports = 1, 822 .num_ports = 1,
818 .probe = ftdi_sio_probe, 823 .probe = ftdi_sio_probe,
@@ -828,8 +833,8 @@ static struct usb_serial_driver ftdi_sio_device = {
828 .chars_in_buffer = ftdi_chars_in_buffer, 833 .chars_in_buffer = ftdi_chars_in_buffer,
829 .read_bulk_callback = ftdi_read_bulk_callback, 834 .read_bulk_callback = ftdi_read_bulk_callback,
830 .write_bulk_callback = ftdi_write_bulk_callback, 835 .write_bulk_callback = ftdi_write_bulk_callback,
831 .tiocmget = ftdi_tiocmget, 836 .tiocmget = ftdi_tiocmget,
832 .tiocmset = ftdi_tiocmset, 837 .tiocmset = ftdi_tiocmset,
833 .ioctl = ftdi_ioctl, 838 .ioctl = ftdi_ioctl,
834 .set_termios = ftdi_set_termios, 839 .set_termios = ftdi_set_termios,
835 .break_ctl = ftdi_break_ctl, 840 .break_ctl = ftdi_break_ctl,
@@ -935,7 +940,6 @@ static int update_mctrl(struct usb_serial_port *port, unsigned int set,
935 unsigned int clear) 940 unsigned int clear)
936{ 941{
937 struct ftdi_private *priv = usb_get_serial_port_data(port); 942 struct ftdi_private *priv = usb_get_serial_port_data(port);
938 char *buf;
939 unsigned urb_value; 943 unsigned urb_value;
940 int rv; 944 int rv;
941 945
@@ -944,10 +948,6 @@ static int update_mctrl(struct usb_serial_port *port, unsigned int set,
944 return 0; /* no change */ 948 return 0; /* no change */
945 } 949 }
946 950
947 buf = kmalloc(1, GFP_NOIO);
948 if (!buf)
949 return -ENOMEM;
950
951 clear &= ~set; /* 'set' takes precedence over 'clear' */ 951 clear &= ~set; /* 'set' takes precedence over 'clear' */
952 urb_value = 0; 952 urb_value = 0;
953 if (clear & TIOCM_DTR) 953 if (clear & TIOCM_DTR)
@@ -963,9 +963,7 @@ static int update_mctrl(struct usb_serial_port *port, unsigned int set,
963 FTDI_SIO_SET_MODEM_CTRL_REQUEST, 963 FTDI_SIO_SET_MODEM_CTRL_REQUEST,
964 FTDI_SIO_SET_MODEM_CTRL_REQUEST_TYPE, 964 FTDI_SIO_SET_MODEM_CTRL_REQUEST_TYPE,
965 urb_value, priv->interface, 965 urb_value, priv->interface,
966 buf, 0, WDR_TIMEOUT); 966 NULL, 0, WDR_TIMEOUT);
967
968 kfree(buf);
969 if (rv < 0) { 967 if (rv < 0) {
970 dbg("%s Error from MODEM_CTRL urb: DTR %s, RTS %s", 968 dbg("%s Error from MODEM_CTRL urb: DTR %s, RTS %s",
971 __func__, 969 __func__,
@@ -1124,16 +1122,11 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
1124static int change_speed(struct tty_struct *tty, struct usb_serial_port *port) 1122static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
1125{ 1123{
1126 struct ftdi_private *priv = usb_get_serial_port_data(port); 1124 struct ftdi_private *priv = usb_get_serial_port_data(port);
1127 char *buf;
1128 __u16 urb_value; 1125 __u16 urb_value;
1129 __u16 urb_index; 1126 __u16 urb_index;
1130 __u32 urb_index_value; 1127 __u32 urb_index_value;
1131 int rv; 1128 int rv;
1132 1129
1133 buf = kmalloc(1, GFP_NOIO);
1134 if (!buf)
1135 return -ENOMEM;
1136
1137 urb_index_value = get_ftdi_divisor(tty, port); 1130 urb_index_value = get_ftdi_divisor(tty, port);
1138 urb_value = (__u16)urb_index_value; 1131 urb_value = (__u16)urb_index_value;
1139 urb_index = (__u16)(urb_index_value >> 16); 1132 urb_index = (__u16)(urb_index_value >> 16);
@@ -1146,9 +1139,7 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
1146 FTDI_SIO_SET_BAUDRATE_REQUEST, 1139 FTDI_SIO_SET_BAUDRATE_REQUEST,
1147 FTDI_SIO_SET_BAUDRATE_REQUEST_TYPE, 1140 FTDI_SIO_SET_BAUDRATE_REQUEST_TYPE,
1148 urb_value, urb_index, 1141 urb_value, urb_index,
1149 buf, 0, WDR_SHORT_TIMEOUT); 1142 NULL, 0, WDR_SHORT_TIMEOUT);
1150
1151 kfree(buf);
1152 return rv; 1143 return rv;
1153} 1144}
1154 1145
@@ -1156,8 +1147,7 @@ static int write_latency_timer(struct usb_serial_port *port)
1156{ 1147{
1157 struct ftdi_private *priv = usb_get_serial_port_data(port); 1148 struct ftdi_private *priv = usb_get_serial_port_data(port);
1158 struct usb_device *udev = port->serial->dev; 1149 struct usb_device *udev = port->serial->dev;
1159 char buf[1]; 1150 int rv;
1160 int rv = 0;
1161 int l = priv->latency; 1151 int l = priv->latency;
1162 1152
1163 if (priv->flags & ASYNC_LOW_LATENCY) 1153 if (priv->flags & ASYNC_LOW_LATENCY)
@@ -1170,8 +1160,7 @@ static int write_latency_timer(struct usb_serial_port *port)
1170 FTDI_SIO_SET_LATENCY_TIMER_REQUEST, 1160 FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
1171 FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE, 1161 FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
1172 l, priv->interface, 1162 l, priv->interface,
1173 buf, 0, WDR_TIMEOUT); 1163 NULL, 0, WDR_TIMEOUT);
1174
1175 if (rv < 0) 1164 if (rv < 0)
1176 dev_err(&port->dev, "Unable to write latency timer: %i\n", rv); 1165 dev_err(&port->dev, "Unable to write latency timer: %i\n", rv);
1177 return rv; 1166 return rv;
@@ -1181,24 +1170,29 @@ static int read_latency_timer(struct usb_serial_port *port)
1181{ 1170{
1182 struct ftdi_private *priv = usb_get_serial_port_data(port); 1171 struct ftdi_private *priv = usb_get_serial_port_data(port);
1183 struct usb_device *udev = port->serial->dev; 1172 struct usb_device *udev = port->serial->dev;
1184 unsigned short latency = 0; 1173 unsigned char *buf;
1185 int rv = 0; 1174 int rv;
1186
1187 1175
1188 dbg("%s", __func__); 1176 dbg("%s", __func__);
1189 1177
1178 buf = kmalloc(1, GFP_KERNEL);
1179 if (!buf)
1180 return -ENOMEM;
1181
1190 rv = usb_control_msg(udev, 1182 rv = usb_control_msg(udev,
1191 usb_rcvctrlpipe(udev, 0), 1183 usb_rcvctrlpipe(udev, 0),
1192 FTDI_SIO_GET_LATENCY_TIMER_REQUEST, 1184 FTDI_SIO_GET_LATENCY_TIMER_REQUEST,
1193 FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE, 1185 FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE,
1194 0, priv->interface, 1186 0, priv->interface,
1195 (char *) &latency, 1, WDR_TIMEOUT); 1187 buf, 1, WDR_TIMEOUT);
1196 1188 if (rv < 0)
1197 if (rv < 0) {
1198 dev_err(&port->dev, "Unable to read latency timer: %i\n", rv); 1189 dev_err(&port->dev, "Unable to read latency timer: %i\n", rv);
1199 return -EIO; 1190 else
1200 } 1191 priv->latency = buf[0];
1201 return latency; 1192
1193 kfree(buf);
1194
1195 return rv;
1202} 1196}
1203 1197
1204static int get_serial_info(struct usb_serial_port *port, 1198static int get_serial_info(struct usb_serial_port *port,
@@ -1229,7 +1223,7 @@ static int set_serial_info(struct tty_struct *tty,
1229 if (copy_from_user(&new_serial, newinfo, sizeof(new_serial))) 1223 if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
1230 return -EFAULT; 1224 return -EFAULT;
1231 1225
1232 lock_kernel(); 1226 mutex_lock(&priv->cfg_lock);
1233 old_priv = *priv; 1227 old_priv = *priv;
1234 1228
1235 /* Do error checking and permission checking */ 1229 /* Do error checking and permission checking */
@@ -1237,7 +1231,7 @@ static int set_serial_info(struct tty_struct *tty,
1237 if (!capable(CAP_SYS_ADMIN)) { 1231 if (!capable(CAP_SYS_ADMIN)) {
1238 if (((new_serial.flags & ~ASYNC_USR_MASK) != 1232 if (((new_serial.flags & ~ASYNC_USR_MASK) !=
1239 (priv->flags & ~ASYNC_USR_MASK))) { 1233 (priv->flags & ~ASYNC_USR_MASK))) {
1240 unlock_kernel(); 1234 mutex_unlock(&priv->cfg_lock);
1241 return -EPERM; 1235 return -EPERM;
1242 } 1236 }
1243 priv->flags = ((priv->flags & ~ASYNC_USR_MASK) | 1237 priv->flags = ((priv->flags & ~ASYNC_USR_MASK) |
@@ -1248,7 +1242,7 @@ static int set_serial_info(struct tty_struct *tty,
1248 1242
1249 if ((new_serial.baud_base != priv->baud_base) && 1243 if ((new_serial.baud_base != priv->baud_base) &&
1250 (new_serial.baud_base < 9600)) { 1244 (new_serial.baud_base < 9600)) {
1251 unlock_kernel(); 1245 mutex_unlock(&priv->cfg_lock);
1252 return -EINVAL; 1246 return -EINVAL;
1253 } 1247 }
1254 1248
@@ -1278,11 +1272,11 @@ check_and_exit:
1278 (priv->flags & ASYNC_SPD_MASK)) || 1272 (priv->flags & ASYNC_SPD_MASK)) ||
1279 (((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) && 1273 (((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) &&
1280 (old_priv.custom_divisor != priv->custom_divisor))) { 1274 (old_priv.custom_divisor != priv->custom_divisor))) {
1281 unlock_kernel(); 1275 mutex_unlock(&priv->cfg_lock);
1282 change_speed(tty, port); 1276 change_speed(tty, port);
1283 } 1277 }
1284 else 1278 else
1285 unlock_kernel(); 1279 mutex_unlock(&priv->cfg_lock);
1286 return 0; 1280 return 0;
1287 1281
1288} /* set_serial_info */ 1282} /* set_serial_info */
@@ -1338,20 +1332,20 @@ static void ftdi_determine_type(struct usb_serial_port *port)
1338 __func__); 1332 __func__);
1339 } 1333 }
1340 } else if (version < 0x200) { 1334 } else if (version < 0x200) {
1341 /* Old device. Assume its the original SIO. */ 1335 /* Old device. Assume it's the original SIO. */
1342 priv->chip_type = SIO; 1336 priv->chip_type = SIO;
1343 priv->baud_base = 12000000 / 16; 1337 priv->baud_base = 12000000 / 16;
1344 priv->write_offset = 1; 1338 priv->write_offset = 1;
1345 } else if (version < 0x400) { 1339 } else if (version < 0x400) {
1346 /* Assume its an FT8U232AM (or FT8U245AM) */ 1340 /* Assume it's an FT8U232AM (or FT8U245AM) */
1347 /* (It might be a BM because of the iSerialNumber bug, 1341 /* (It might be a BM because of the iSerialNumber bug,
1348 * but it will still work as an AM device.) */ 1342 * but it will still work as an AM device.) */
1349 priv->chip_type = FT8U232AM; 1343 priv->chip_type = FT8U232AM;
1350 } else if (version < 0x600) { 1344 } else if (version < 0x600) {
1351 /* Assume its an FT232BM (or FT245BM) */ 1345 /* Assume it's an FT232BM (or FT245BM) */
1352 priv->chip_type = FT232BM; 1346 priv->chip_type = FT232BM;
1353 } else { 1347 } else {
1354 /* Assume its an FT232R */ 1348 /* Assume it's an FT232R */
1355 priv->chip_type = FT232RL; 1349 priv->chip_type = FT232RL;
1356 } 1350 }
1357 dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]); 1351 dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]);
@@ -1371,7 +1365,7 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
1371 struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc; 1365 struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc;
1372 1366
1373 unsigned num_endpoints; 1367 unsigned num_endpoints;
1374 int i = 0; 1368 int i;
1375 1369
1376 num_endpoints = interface->cur_altsetting->desc.bNumEndpoints; 1370 num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
1377 dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints); 1371 dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
@@ -1423,7 +1417,7 @@ static ssize_t store_latency_timer(struct device *dev,
1423 struct usb_serial_port *port = to_usb_serial_port(dev); 1417 struct usb_serial_port *port = to_usb_serial_port(dev);
1424 struct ftdi_private *priv = usb_get_serial_port_data(port); 1418 struct ftdi_private *priv = usb_get_serial_port_data(port);
1425 int v = simple_strtoul(valbuf, NULL, 10); 1419 int v = simple_strtoul(valbuf, NULL, 10);
1426 int rv = 0; 1420 int rv;
1427 1421
1428 priv->latency = v; 1422 priv->latency = v;
1429 rv = write_latency_timer(port); 1423 rv = write_latency_timer(port);
@@ -1440,9 +1434,8 @@ static ssize_t store_event_char(struct device *dev,
1440 struct usb_serial_port *port = to_usb_serial_port(dev); 1434 struct usb_serial_port *port = to_usb_serial_port(dev);
1441 struct ftdi_private *priv = usb_get_serial_port_data(port); 1435 struct ftdi_private *priv = usb_get_serial_port_data(port);
1442 struct usb_device *udev = port->serial->dev; 1436 struct usb_device *udev = port->serial->dev;
1443 char buf[1];
1444 int v = simple_strtoul(valbuf, NULL, 10); 1437 int v = simple_strtoul(valbuf, NULL, 10);
1445 int rv = 0; 1438 int rv;
1446 1439
1447 dbg("%s: setting event char = %i", __func__, v); 1440 dbg("%s: setting event char = %i", __func__, v);
1448 1441
@@ -1451,8 +1444,7 @@ static ssize_t store_event_char(struct device *dev,
1451 FTDI_SIO_SET_EVENT_CHAR_REQUEST, 1444 FTDI_SIO_SET_EVENT_CHAR_REQUEST,
1452 FTDI_SIO_SET_EVENT_CHAR_REQUEST_TYPE, 1445 FTDI_SIO_SET_EVENT_CHAR_REQUEST_TYPE,
1453 v, priv->interface, 1446 v, priv->interface,
1454 buf, 0, WDR_TIMEOUT); 1447 NULL, 0, WDR_TIMEOUT);
1455
1456 if (rv < 0) { 1448 if (rv < 0) {
1457 dbg("Unable to write event character: %i", rv); 1449 dbg("Unable to write event character: %i", rv);
1458 return -EIO; 1450 return -EIO;
@@ -1551,9 +1543,9 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
1551 1543
1552 kref_init(&priv->kref); 1544 kref_init(&priv->kref);
1553 spin_lock_init(&priv->tx_lock); 1545 spin_lock_init(&priv->tx_lock);
1546 mutex_init(&priv->cfg_lock);
1554 init_waitqueue_head(&priv->delta_msr_wait); 1547 init_waitqueue_head(&priv->delta_msr_wait);
1555 /* This will push the characters through immediately rather 1548
1556 than queue a task to deliver them */
1557 priv->flags = ASYNC_LOW_LATENCY; 1549 priv->flags = ASYNC_LOW_LATENCY;
1558 1550
1559 if (quirk && quirk->port_probe) 1551 if (quirk && quirk->port_probe)
@@ -1585,7 +1577,8 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
1585 1577
1586 ftdi_determine_type(port); 1578 ftdi_determine_type(port);
1587 ftdi_set_max_packet_size(port); 1579 ftdi_set_max_packet_size(port);
1588 read_latency_timer(port); 1580 if (read_latency_timer(port) < 0)
1581 priv->latency = 16;
1589 create_sysfs_attrs(port); 1582 create_sysfs_attrs(port);
1590 return 0; 1583 return 0;
1591} 1584}
@@ -1630,8 +1623,6 @@ static int ftdi_NDI_device_setup(struct usb_serial *serial)
1630{ 1623{
1631 struct usb_device *udev = serial->dev; 1624 struct usb_device *udev = serial->dev;
1632 int latency = ndi_latency_timer; 1625 int latency = ndi_latency_timer;
1633 int rv = 0;
1634 char buf[1];
1635 1626
1636 if (latency == 0) 1627 if (latency == 0)
1637 latency = 1; 1628 latency = 1;
@@ -1641,10 +1632,11 @@ static int ftdi_NDI_device_setup(struct usb_serial *serial)
1641 dbg("%s setting NDI device latency to %d", __func__, latency); 1632 dbg("%s setting NDI device latency to %d", __func__, latency);
1642 dev_info(&udev->dev, "NDI device with a latency value of %d", latency); 1633 dev_info(&udev->dev, "NDI device with a latency value of %d", latency);
1643 1634
1644 rv = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 1635 /* FIXME: errors are not returned */
1636 usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
1645 FTDI_SIO_SET_LATENCY_TIMER_REQUEST, 1637 FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
1646 FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE, 1638 FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
1647 latency, 0, buf, 0, WDR_TIMEOUT); 1639 latency, 0, NULL, 0, WDR_TIMEOUT);
1648 return 0; 1640 return 0;
1649} 1641}
1650 1642
@@ -1720,7 +1712,7 @@ static int ftdi_submit_read_urb(struct usb_serial_port *port, gfp_t mem_flags)
1720 urb->transfer_buffer_length, 1712 urb->transfer_buffer_length,
1721 ftdi_read_bulk_callback, port); 1713 ftdi_read_bulk_callback, port);
1722 result = usb_submit_urb(urb, mem_flags); 1714 result = usb_submit_urb(urb, mem_flags);
1723 if (result) 1715 if (result && result != -EPERM)
1724 dev_err(&port->dev, 1716 dev_err(&port->dev,
1725 "%s - failed submitting read urb, error %d\n", 1717 "%s - failed submitting read urb, error %d\n",
1726 __func__, result); 1718 __func__, result);
@@ -1732,16 +1724,10 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
1732 struct usb_device *dev = port->serial->dev; 1724 struct usb_device *dev = port->serial->dev;
1733 struct ftdi_private *priv = usb_get_serial_port_data(port); 1725 struct ftdi_private *priv = usb_get_serial_port_data(port);
1734 unsigned long flags; 1726 unsigned long flags;
1735 1727 int result;
1736 int result = 0;
1737 char buf[1]; /* Needed for the usb_control_msg I think */
1738 1728
1739 dbg("%s", __func__); 1729 dbg("%s", __func__);
1740 1730
1741 spin_lock_irqsave(&priv->tx_lock, flags);
1742 priv->tx_bytes = 0;
1743 spin_unlock_irqrestore(&priv->tx_lock, flags);
1744
1745 write_latency_timer(port); 1731 write_latency_timer(port);
1746 1732
1747 /* No error checking for this (will get errors later anyway) */ 1733 /* No error checking for this (will get errors later anyway) */
@@ -1749,7 +1735,7 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
1749 usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 1735 usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1750 FTDI_SIO_RESET_REQUEST, FTDI_SIO_RESET_REQUEST_TYPE, 1736 FTDI_SIO_RESET_REQUEST, FTDI_SIO_RESET_REQUEST_TYPE,
1751 FTDI_SIO_RESET_SIO, 1737 FTDI_SIO_RESET_SIO,
1752 priv->interface, buf, 0, WDR_TIMEOUT); 1738 priv->interface, NULL, 0, WDR_TIMEOUT);
1753 1739
1754 /* Termios defaults are set by usb_serial_init. We don't change 1740 /* Termios defaults are set by usb_serial_init. We don't change
1755 port->tty->termios - this would lose speed settings, etc. 1741 port->tty->termios - this would lose speed settings, etc.
@@ -1777,7 +1763,6 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
1777static void ftdi_dtr_rts(struct usb_serial_port *port, int on) 1763static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
1778{ 1764{
1779 struct ftdi_private *priv = usb_get_serial_port_data(port); 1765 struct ftdi_private *priv = usb_get_serial_port_data(port);
1780 char buf[1];
1781 1766
1782 mutex_lock(&port->serial->disc_mutex); 1767 mutex_lock(&port->serial->disc_mutex);
1783 if (!port->serial->disconnected) { 1768 if (!port->serial->disconnected) {
@@ -1786,7 +1771,7 @@ static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
1786 usb_sndctrlpipe(port->serial->dev, 0), 1771 usb_sndctrlpipe(port->serial->dev, 0),
1787 FTDI_SIO_SET_FLOW_CTRL_REQUEST, 1772 FTDI_SIO_SET_FLOW_CTRL_REQUEST,
1788 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 1773 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
1789 0, priv->interface, buf, 0, 1774 0, priv->interface, NULL, 0,
1790 WDR_TIMEOUT) < 0) { 1775 WDR_TIMEOUT) < 0) {
1791 dev_err(&port->dev, "error from flowcontrol urb\n"); 1776 dev_err(&port->dev, "error from flowcontrol urb\n");
1792 } 1777 }
@@ -1847,7 +1832,7 @@ static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
1847 spin_lock_irqsave(&priv->tx_lock, flags); 1832 spin_lock_irqsave(&priv->tx_lock, flags);
1848 if (priv->tx_outstanding_urbs > URB_UPPER_LIMIT) { 1833 if (priv->tx_outstanding_urbs > URB_UPPER_LIMIT) {
1849 spin_unlock_irqrestore(&priv->tx_lock, flags); 1834 spin_unlock_irqrestore(&priv->tx_lock, flags);
1850 dbg("%s - write limit hit\n", __func__); 1835 dbg("%s - write limit hit", __func__);
1851 return 0; 1836 return 0;
1852 } 1837 }
1853 priv->tx_outstanding_urbs++; 1838 priv->tx_outstanding_urbs++;
@@ -1927,7 +1912,6 @@ static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
1927 } else { 1912 } else {
1928 spin_lock_irqsave(&priv->tx_lock, flags); 1913 spin_lock_irqsave(&priv->tx_lock, flags);
1929 priv->tx_outstanding_bytes += count; 1914 priv->tx_outstanding_bytes += count;
1930 priv->tx_bytes += count;
1931 spin_unlock_irqrestore(&priv->tx_lock, flags); 1915 spin_unlock_irqrestore(&priv->tx_lock, flags);
1932 } 1916 }
1933 1917
@@ -2154,8 +2138,7 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
2154{ 2138{
2155 struct usb_serial_port *port = tty->driver_data; 2139 struct usb_serial_port *port = tty->driver_data;
2156 struct ftdi_private *priv = usb_get_serial_port_data(port); 2140 struct ftdi_private *priv = usb_get_serial_port_data(port);
2157 __u16 urb_value = 0; 2141 __u16 urb_value;
2158 char buf[1];
2159 2142
2160 /* break_state = -1 to turn on break, and 0 to turn off break */ 2143 /* break_state = -1 to turn on break, and 0 to turn off break */
2161 /* see drivers/char/tty_io.c to see it used */ 2144 /* see drivers/char/tty_io.c to see it used */
@@ -2171,7 +2154,7 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
2171 FTDI_SIO_SET_DATA_REQUEST, 2154 FTDI_SIO_SET_DATA_REQUEST,
2172 FTDI_SIO_SET_DATA_REQUEST_TYPE, 2155 FTDI_SIO_SET_DATA_REQUEST_TYPE,
2173 urb_value , priv->interface, 2156 urb_value , priv->interface,
2174 buf, 0, WDR_TIMEOUT) < 0) { 2157 NULL, 0, WDR_TIMEOUT) < 0) {
2175 dev_err(&port->dev, "%s FAILED to enable/disable break state " 2158 dev_err(&port->dev, "%s FAILED to enable/disable break state "
2176 "(state was %d)\n", __func__, break_state); 2159 "(state was %d)\n", __func__, break_state);
2177 } 2160 }
@@ -2195,7 +2178,6 @@ static void ftdi_set_termios(struct tty_struct *tty,
2195 struct ktermios *termios = tty->termios; 2178 struct ktermios *termios = tty->termios;
2196 unsigned int cflag = termios->c_cflag; 2179 unsigned int cflag = termios->c_cflag;
2197 __u16 urb_value; /* will hold the new flags */ 2180 __u16 urb_value; /* will hold the new flags */
2198 char buf[1]; /* Perhaps I should dynamically alloc this? */
2199 2181
2200 /* Added for xon/xoff support */ 2182 /* Added for xon/xoff support */
2201 unsigned int iflag = termios->c_iflag; 2183 unsigned int iflag = termios->c_iflag;
@@ -2246,12 +2228,10 @@ static void ftdi_set_termios(struct tty_struct *tty,
2246 } 2228 }
2247 if (cflag & CSIZE) { 2229 if (cflag & CSIZE) {
2248 switch (cflag & CSIZE) { 2230 switch (cflag & CSIZE) {
2249 case CS5: urb_value |= 5; dbg("Setting CS5"); break;
2250 case CS6: urb_value |= 6; dbg("Setting CS6"); break;
2251 case CS7: urb_value |= 7; dbg("Setting CS7"); break; 2231 case CS7: urb_value |= 7; dbg("Setting CS7"); break;
2252 case CS8: urb_value |= 8; dbg("Setting CS8"); break; 2232 case CS8: urb_value |= 8; dbg("Setting CS8"); break;
2253 default: 2233 default:
2254 dev_err(&port->dev, "CSIZE was set but not CS5-CS8\n"); 2234 dev_err(&port->dev, "CSIZE was set but not CS7-CS8\n");
2255 } 2235 }
2256 } 2236 }
2257 2237
@@ -2263,7 +2243,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
2263 FTDI_SIO_SET_DATA_REQUEST, 2243 FTDI_SIO_SET_DATA_REQUEST,
2264 FTDI_SIO_SET_DATA_REQUEST_TYPE, 2244 FTDI_SIO_SET_DATA_REQUEST_TYPE,
2265 urb_value , priv->interface, 2245 urb_value , priv->interface,
2266 buf, 0, WDR_SHORT_TIMEOUT) < 0) { 2246 NULL, 0, WDR_SHORT_TIMEOUT) < 0) {
2267 dev_err(&port->dev, "%s FAILED to set " 2247 dev_err(&port->dev, "%s FAILED to set "
2268 "databits/stopbits/parity\n", __func__); 2248 "databits/stopbits/parity\n", __func__);
2269 } 2249 }
@@ -2275,7 +2255,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
2275 FTDI_SIO_SET_FLOW_CTRL_REQUEST, 2255 FTDI_SIO_SET_FLOW_CTRL_REQUEST,
2276 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 2256 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
2277 0, priv->interface, 2257 0, priv->interface,
2278 buf, 0, WDR_TIMEOUT) < 0) { 2258 NULL, 0, WDR_TIMEOUT) < 0) {
2279 dev_err(&port->dev, 2259 dev_err(&port->dev,
2280 "%s error from disable flowcontrol urb\n", 2260 "%s error from disable flowcontrol urb\n",
2281 __func__); 2261 __func__);
@@ -2301,7 +2281,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
2301 FTDI_SIO_SET_FLOW_CTRL_REQUEST, 2281 FTDI_SIO_SET_FLOW_CTRL_REQUEST,
2302 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 2282 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
2303 0 , (FTDI_SIO_RTS_CTS_HS | priv->interface), 2283 0 , (FTDI_SIO_RTS_CTS_HS | priv->interface),
2304 buf, 0, WDR_TIMEOUT) < 0) { 2284 NULL, 0, WDR_TIMEOUT) < 0) {
2305 dev_err(&port->dev, 2285 dev_err(&port->dev,
2306 "urb failed to set to rts/cts flow control\n"); 2286 "urb failed to set to rts/cts flow control\n");
2307 } 2287 }
@@ -2333,7 +2313,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
2333 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 2313 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
2334 urb_value , (FTDI_SIO_XON_XOFF_HS 2314 urb_value , (FTDI_SIO_XON_XOFF_HS
2335 | priv->interface), 2315 | priv->interface),
2336 buf, 0, WDR_TIMEOUT) < 0) { 2316 NULL, 0, WDR_TIMEOUT) < 0) {
2337 dev_err(&port->dev, "urb failed to set to " 2317 dev_err(&port->dev, "urb failed to set to "
2338 "xon/xoff flow control\n"); 2318 "xon/xoff flow control\n");
2339 } 2319 }
@@ -2347,7 +2327,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
2347 FTDI_SIO_SET_FLOW_CTRL_REQUEST, 2327 FTDI_SIO_SET_FLOW_CTRL_REQUEST,
2348 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 2328 FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
2349 0, priv->interface, 2329 0, priv->interface,
2350 buf, 0, WDR_TIMEOUT) < 0) { 2330 NULL, 0, WDR_TIMEOUT) < 0) {
2351 dev_err(&port->dev, 2331 dev_err(&port->dev,
2352 "urb failed to clear flow control\n"); 2332 "urb failed to clear flow control\n");
2353 } 2333 }
@@ -2361,21 +2341,22 @@ static int ftdi_tiocmget(struct tty_struct *tty, struct file *file)
2361{ 2341{
2362 struct usb_serial_port *port = tty->driver_data; 2342 struct usb_serial_port *port = tty->driver_data;
2363 struct ftdi_private *priv = usb_get_serial_port_data(port); 2343 struct ftdi_private *priv = usb_get_serial_port_data(port);
2364 unsigned char buf[2]; 2344 unsigned char *buf;
2345 int len;
2365 int ret; 2346 int ret;
2366 2347
2367 dbg("%s TIOCMGET", __func__); 2348 dbg("%s TIOCMGET", __func__);
2349
2350 buf = kmalloc(2, GFP_KERNEL);
2351 if (!buf)
2352 return -ENOMEM;
2353 /*
2354 * The 8U232AM returns a two byte value (the SIO a 1 byte value) in
2355 * the same format as the data returned from the in point.
2356 */
2368 switch (priv->chip_type) { 2357 switch (priv->chip_type) {
2369 case SIO: 2358 case SIO:
2370 /* Request the status from the device */ 2359 len = 1;
2371 ret = usb_control_msg(port->serial->dev,
2372 usb_rcvctrlpipe(port->serial->dev, 0),
2373 FTDI_SIO_GET_MODEM_STATUS_REQUEST,
2374 FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
2375 0, 0,
2376 buf, 1, WDR_TIMEOUT);
2377 if (ret < 0)
2378 return ret;
2379 break; 2360 break;
2380 case FT8U232AM: 2361 case FT8U232AM:
2381 case FT232BM: 2362 case FT232BM:
@@ -2383,27 +2364,30 @@ static int ftdi_tiocmget(struct tty_struct *tty, struct file *file)
2383 case FT232RL: 2364 case FT232RL:
2384 case FT2232H: 2365 case FT2232H:
2385 case FT4232H: 2366 case FT4232H:
2386 /* the 8U232AM returns a two byte value (the sio is a 1 byte 2367 len = 2;
2387 value) - in the same format as the data returned from the in
2388 point */
2389 ret = usb_control_msg(port->serial->dev,
2390 usb_rcvctrlpipe(port->serial->dev, 0),
2391 FTDI_SIO_GET_MODEM_STATUS_REQUEST,
2392 FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
2393 0, priv->interface,
2394 buf, 2, WDR_TIMEOUT);
2395 if (ret < 0)
2396 return ret;
2397 break; 2368 break;
2398 default: 2369 default:
2399 return -EFAULT; 2370 ret = -EFAULT;
2371 goto out;
2400 } 2372 }
2401 2373
2402 return (buf[0] & FTDI_SIO_DSR_MASK ? TIOCM_DSR : 0) | 2374 ret = usb_control_msg(port->serial->dev,
2375 usb_rcvctrlpipe(port->serial->dev, 0),
2376 FTDI_SIO_GET_MODEM_STATUS_REQUEST,
2377 FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
2378 0, priv->interface,
2379 buf, len, WDR_TIMEOUT);
2380 if (ret < 0)
2381 goto out;
2382
2383 ret = (buf[0] & FTDI_SIO_DSR_MASK ? TIOCM_DSR : 0) |
2403 (buf[0] & FTDI_SIO_CTS_MASK ? TIOCM_CTS : 0) | 2384 (buf[0] & FTDI_SIO_CTS_MASK ? TIOCM_CTS : 0) |
2404 (buf[0] & FTDI_SIO_RI_MASK ? TIOCM_RI : 0) | 2385 (buf[0] & FTDI_SIO_RI_MASK ? TIOCM_RI : 0) |
2405 (buf[0] & FTDI_SIO_RLSD_MASK ? TIOCM_CD : 0) | 2386 (buf[0] & FTDI_SIO_RLSD_MASK ? TIOCM_CD : 0) |
2406 priv->last_dtr_rts; 2387 priv->last_dtr_rts;
2388out:
2389 kfree(buf);
2390 return ret;
2407} 2391}
2408 2392
2409static int ftdi_tiocmset(struct tty_struct *tty, struct file *file, 2393static int ftdi_tiocmset(struct tty_struct *tty, struct file *file,
@@ -2508,8 +2492,7 @@ void ftdi_unthrottle(struct tty_struct *tty)
2508 port->throttled = port->throttle_req = 0; 2492 port->throttled = port->throttle_req = 0;
2509 spin_unlock_irqrestore(&port->lock, flags); 2493 spin_unlock_irqrestore(&port->lock, flags);
2510 2494
2511 /* Resubmit urb if throttled and open. */ 2495 if (was_throttled)
2512 if (was_throttled && test_bit(ASYNCB_INITIALIZED, &port->port.flags))
2513 ftdi_submit_read_urb(port, GFP_KERNEL); 2496 ftdi_submit_read_urb(port, GFP_KERNEL);
2514} 2497}
2515 2498
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index b0e0d64f822e..ff9bf80327a3 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -28,13 +28,13 @@
28#define FTDI_SIO_SET_FLOW_CTRL 2 /* Set flow control register */ 28#define FTDI_SIO_SET_FLOW_CTRL 2 /* Set flow control register */
29#define FTDI_SIO_SET_BAUD_RATE 3 /* Set baud rate */ 29#define FTDI_SIO_SET_BAUD_RATE 3 /* Set baud rate */
30#define FTDI_SIO_SET_DATA 4 /* Set the data characteristics of the port */ 30#define FTDI_SIO_SET_DATA 4 /* Set the data characteristics of the port */
31#define FTDI_SIO_GET_MODEM_STATUS 5 /* Retrieve current value of modern status register */ 31#define FTDI_SIO_GET_MODEM_STATUS 5 /* Retrieve current value of modem status register */
32#define FTDI_SIO_SET_EVENT_CHAR 6 /* Set the event character */ 32#define FTDI_SIO_SET_EVENT_CHAR 6 /* Set the event character */
33#define FTDI_SIO_SET_ERROR_CHAR 7 /* Set the error character */ 33#define FTDI_SIO_SET_ERROR_CHAR 7 /* Set the error character */
34#define FTDI_SIO_SET_LATENCY_TIMER 9 /* Set the latency timer */ 34#define FTDI_SIO_SET_LATENCY_TIMER 9 /* Set the latency timer */
35#define FTDI_SIO_GET_LATENCY_TIMER 10 /* Get the latency timer */ 35#define FTDI_SIO_GET_LATENCY_TIMER 10 /* Get the latency timer */
36 36
37/* Interface indicies for FT2232, FT2232H and FT4232H devices*/ 37/* Interface indices for FT2232, FT2232H and FT4232H devices */
38#define INTERFACE_A 1 38#define INTERFACE_A 1
39#define INTERFACE_B 2 39#define INTERFACE_B 2
40#define INTERFACE_C 3 40#define INTERFACE_C 3
@@ -270,7 +270,7 @@ typedef enum {
270 * BmRequestType: 0100 0000b 270 * BmRequestType: 0100 0000b
271 * bRequest: FTDI_SIO_SET_FLOW_CTRL 271 * bRequest: FTDI_SIO_SET_FLOW_CTRL
272 * wValue: Xoff/Xon 272 * wValue: Xoff/Xon
273 * wIndex: Protocol/Port - hIndex is protocl / lIndex is port 273 * wIndex: Protocol/Port - hIndex is protocol / lIndex is port
274 * wLength: 0 274 * wLength: 0
275 * Data: None 275 * Data: None
276 * 276 *
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index c8951aeed983..0727e198503e 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -22,7 +22,7 @@
22#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */ 22#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
23#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */ 23#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
24#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */ 24#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
25#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */ 25#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
26#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */ 26#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */
27 27
28 28
@@ -49,7 +49,7 @@
49#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8 49#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8
50#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9 50#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
51 51
52#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */ 52#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmbH */
53 53
54/* OpenDCC (www.opendcc.de) product id */ 54/* OpenDCC (www.opendcc.de) product id */
55#define FTDI_OPENDCC_PID 0xBFD8 55#define FTDI_OPENDCC_PID 0xBFD8
@@ -185,7 +185,7 @@
185#define FTDI_ELV_TFD128_PID 0xE0EC /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */ 185#define FTDI_ELV_TFD128_PID 0xE0EC /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */
186#define FTDI_ELV_FM3RX_PID 0xE0ED /* ELV Messwertuebertragung FM3 RX */ 186#define FTDI_ELV_FM3RX_PID 0xE0ED /* ELV Messwertuebertragung FM3 RX */
187#define FTDI_ELV_WS777_PID 0xE0EE /* Conrad WS 777 */ 187#define FTDI_ELV_WS777_PID 0xE0EE /* Conrad WS 777 */
188#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */ 188#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Energy monitor EM 1010 PC */
189#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */ 189#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */
190#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */ 190#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */
191#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */ 191#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */
@@ -212,8 +212,8 @@
212 * drivers, or possibly the Comedi drivers in some cases. */ 212 * drivers, or possibly the Comedi drivers in some cases. */
213#define FTDI_ELV_CLI7000_PID 0xFB59 /* Computer-Light-Interface (CLI 7000) */ 213#define FTDI_ELV_CLI7000_PID 0xFB59 /* Computer-Light-Interface (CLI 7000) */
214#define FTDI_ELV_PPS7330_PID 0xFB5C /* Processor-Power-Supply (PPS 7330) */ 214#define FTDI_ELV_PPS7330_PID 0xFB5C /* Processor-Power-Supply (PPS 7330) */
215#define FTDI_ELV_TFM100_PID 0xFB5D /* Temperartur-Feuchte Messgeraet (TFM 100) */ 215#define FTDI_ELV_TFM100_PID 0xFB5D /* Temperatur-Feuchte-Messgeraet (TFM 100) */
216#define FTDI_ELV_UDF77_PID 0xFB5E /* USB DCF Funkurh (UDF 77) */ 216#define FTDI_ELV_UDF77_PID 0xFB5E /* USB DCF Funkuhr (UDF 77) */
217#define FTDI_ELV_UIO88_PID 0xFB5F /* USB-I/O Interface (UIO 88) */ 217#define FTDI_ELV_UIO88_PID 0xFB5F /* USB-I/O Interface (UIO 88) */
218 218
219/* 219/*
@@ -320,7 +320,7 @@
320 320
321/* 321/*
322 * 4N-GALAXY.DE PIDs for CAN-USB, USB-RS232, USB-RS422, USB-RS485, 322 * 4N-GALAXY.DE PIDs for CAN-USB, USB-RS232, USB-RS422, USB-RS485,
323 * USB-TTY activ, USB-TTY passiv. Some PIDs are used by several devices 323 * USB-TTY aktiv, USB-TTY passiv. Some PIDs are used by several devices
324 * and I'm not entirely sure which are used by which. 324 * and I'm not entirely sure which are used by which.
325 */ 325 */
326#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0 326#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0
@@ -330,10 +330,10 @@
330 * Linx Technologies product ids 330 * Linx Technologies product ids
331 */ 331 */
332#define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */ 332#define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */
333#define LINX_MASTERDEVEL2_PID 0xF449 /* Linx Master Development 2.0 */ 333#define LINX_MASTERDEVEL2_PID 0xF449 /* Linx Master Development 2.0 */
334#define LINX_FUTURE_0_PID 0xF44A /* Linx future device */ 334#define LINX_FUTURE_0_PID 0xF44A /* Linx future device */
335#define LINX_FUTURE_1_PID 0xF44B /* Linx future device */ 335#define LINX_FUTURE_1_PID 0xF44B /* Linx future device */
336#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */ 336#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */
337 337
338/* 338/*
339 * Oceanic product ids 339 * Oceanic product ids
@@ -494,6 +494,13 @@
494#define RATOC_PRODUCT_ID_USB60F 0xb020 494#define RATOC_PRODUCT_ID_USB60F 0xb020
495 495
496/* 496/*
497 * Contec products (http://www.contec.com)
498 * Submitted by Daniel Sangorrin
499 */
500#define CONTEC_VID 0x06CE /* Vendor ID */
501#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
502
503/*
497 * Definitions for B&B Electronics products. 504 * Definitions for B&B Electronics products.
498 */ 505 */
499#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */ 506#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
@@ -642,7 +649,7 @@
642#define FALCOM_TWIST_PID 0x0001 /* Falcom Twist USB GPRS modem */ 649#define FALCOM_TWIST_PID 0x0001 /* Falcom Twist USB GPRS modem */
643#define FALCOM_SAMBA_PID 0x0005 /* Falcom Samba USB GPRS modem */ 650#define FALCOM_SAMBA_PID 0x0005 /* Falcom Samba USB GPRS modem */
644 651
645/* Larsen and Brusgaard AltiTrack/USBtrack */ 652/* Larsen and Brusgaard AltiTrack/USBtrack */
646#define LARSENBRUSGAARD_VID 0x0FD8 653#define LARSENBRUSGAARD_VID 0x0FD8
647#define LB_ALTITRACK_PID 0x0001 654#define LB_ALTITRACK_PID 0x0001
648 655
@@ -971,7 +978,7 @@
971#define ALTI2_N3_PID 0x6001 /* Neptune 3 */ 978#define ALTI2_N3_PID 0x6001 /* Neptune 3 */
972 979
973/* 980/*
974 * Dresden Elektronic Sensor Terminal Board 981 * Dresden Elektronik Sensor Terminal Board
975 */ 982 */
976#define DE_VID 0x1cf1 /* Vendor ID */ 983#define DE_VID 0x1cf1 /* Vendor ID */
977#define STB_PID 0x0001 /* Sensor Terminal Board */ 984#define STB_PID 0x0001 /* Sensor Terminal Board */
@@ -1002,3 +1009,11 @@
1002#define EVO_8U232AM_PID 0x02FF /* Evolution robotics RCM2 (FT232AM)*/ 1009#define EVO_8U232AM_PID 0x02FF /* Evolution robotics RCM2 (FT232AM)*/
1003#define EVO_HYBRID_PID 0x0302 /* Evolution robotics RCM4 PID (FT232BM)*/ 1010#define EVO_HYBRID_PID 0x0302 /* Evolution robotics RCM4 PID (FT232BM)*/
1004#define EVO_RCM4_PID 0x0303 /* Evolution robotics RCM4 PID */ 1011#define EVO_RCM4_PID 0x0303 /* Evolution robotics RCM4 PID */
1012
1013/*
1014 * MJS Gadgets HD Radio / XM Radio / Sirius Radio interfaces (using VID 0x0403)
1015 */
1016#define MJSG_GENERIC_PID 0x9378
1017#define MJSG_SR_RADIO_PID 0x9379
1018#define MJSG_XM_RADIO_PID 0x937A
1019#define MJSG_HD_RADIO_PID 0x937C
diff --git a/drivers/usb/serial/funsoft.c b/drivers/usb/serial/funsoft.c
index d30f736d2cc5..e21ce9ddfc63 100644
--- a/drivers/usb/serial/funsoft.c
+++ b/drivers/usb/serial/funsoft.c
@@ -18,7 +18,7 @@
18 18
19static int debug; 19static int debug;
20 20
21static struct usb_device_id id_table [] = { 21static const struct usb_device_id id_table[] = {
22 { USB_DEVICE(0x1404, 0xcddc) }, 22 { USB_DEVICE(0x1404, 0xcddc) },
23 { }, 23 { },
24}; 24};
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 5ac900e5a50e..a42b29a695b2 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -210,7 +210,7 @@ static unsigned char const PRIVATE_REQ[]
210 210
211 211
212 212
213static struct usb_device_id id_table [] = { 213static const struct usb_device_id id_table[] = {
214 /* the same device id seems to be used by all 214 /* the same device id seems to be used by all
215 usb enabled GPS devices */ 215 usb enabled GPS devices */
216 { USB_DEVICE(GARMIN_VENDOR_ID, 3) }, 216 { USB_DEVICE(GARMIN_VENDOR_ID, 3) },
@@ -271,7 +271,6 @@ static void send_to_tty(struct usb_serial_port *port,
271 usb_serial_debug_data(debug, &port->dev, 271 usb_serial_debug_data(debug, &port->dev,
272 __func__, actual_length, data); 272 __func__, actual_length, data);
273 273
274 tty_buffer_request_room(tty, actual_length);
275 tty_insert_flip_string(tty, data, actual_length); 274 tty_insert_flip_string(tty, data, actual_length);
276 tty_flip_buffer_push(tty); 275 tty_flip_buffer_push(tty);
277 } 276 }
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 83443d6306d6..89fac36684c5 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -20,6 +20,7 @@
20#include <linux/usb/serial.h> 20#include <linux/usb/serial.h>
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/kfifo.h> 22#include <linux/kfifo.h>
23#include <linux/serial.h>
23 24
24static int debug; 25static int debug;
25 26
@@ -41,7 +42,7 @@ static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */
41 42
42/* we want to look at all devices, as the vendor/product id can change 43/* we want to look at all devices, as the vendor/product id can change
43 * depending on the command line argument */ 44 * depending on the command line argument */
44static struct usb_device_id generic_serial_ids[] = { 45static const struct usb_device_id generic_serial_ids[] = {
45 {.driver_info = 42}, 46 {.driver_info = 42},
46 {} 47 {}
47}; 48};
@@ -194,7 +195,7 @@ static int usb_serial_multi_urb_write(struct tty_struct *tty,
194 if (port->urbs_in_flight > 195 if (port->urbs_in_flight >
195 port->serial->type->max_in_flight_urbs) { 196 port->serial->type->max_in_flight_urbs) {
196 spin_unlock_irqrestore(&port->lock, flags); 197 spin_unlock_irqrestore(&port->lock, flags);
197 dbg("%s - write limit hit\n", __func__); 198 dbg("%s - write limit hit", __func__);
198 return bwrite; 199 return bwrite;
199 } 200 }
200 port->tx_bytes_flight += towrite; 201 port->tx_bytes_flight += towrite;
@@ -585,7 +586,7 @@ int usb_serial_generic_resume(struct usb_serial *serial)
585 586
586 for (i = 0; i < serial->num_ports; i++) { 587 for (i = 0; i < serial->num_ports; i++) {
587 port = serial->port[i]; 588 port = serial->port[i];
588 if (!port->port.count) 589 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags))
589 continue; 590 continue;
590 591
591 if (port->read_urb) { 592 if (port->read_urb) {
diff --git a/drivers/usb/serial/hp4x.c b/drivers/usb/serial/hp4x.c
index 431329275133..809379159b0e 100644
--- a/drivers/usb/serial/hp4x.c
+++ b/drivers/usb/serial/hp4x.c
@@ -29,7 +29,7 @@
29#define HP_VENDOR_ID 0x03f0 29#define HP_VENDOR_ID 0x03f0
30#define HP49GP_PRODUCT_ID 0x0121 30#define HP49GP_PRODUCT_ID 0x0121
31 31
32static struct usb_device_id id_table [] = { 32static const struct usb_device_id id_table[] = {
33 { USB_DEVICE(HP_VENDOR_ID, HP49GP_PRODUCT_ID) }, 33 { USB_DEVICE(HP_VENDOR_ID, HP49GP_PRODUCT_ID) },
34 { } /* Terminating entry */ 34 { } /* Terminating entry */
35}; 35};
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index b97960ac92f2..3ef8df0ef888 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -364,42 +364,6 @@ static void update_edgeport_E2PROM(struct edgeport_serial *edge_serial)
364 release_firmware(fw); 364 release_firmware(fw);
365} 365}
366 366
367
368/************************************************************************
369 * *
370 * Get string descriptor from device *
371 * *
372 ************************************************************************/
373static int get_string(struct usb_device *dev, int Id, char *string, int buflen)
374{
375 struct usb_string_descriptor StringDesc;
376 struct usb_string_descriptor *pStringDesc;
377
378 dbg("%s - USB String ID = %d", __func__, Id);
379
380 if (!usb_get_descriptor(dev, USB_DT_STRING, Id,
381 &StringDesc, sizeof(StringDesc)))
382 return 0;
383
384 pStringDesc = kmalloc(StringDesc.bLength, GFP_KERNEL);
385 if (!pStringDesc)
386 return 0;
387
388 if (!usb_get_descriptor(dev, USB_DT_STRING, Id,
389 pStringDesc, StringDesc.bLength)) {
390 kfree(pStringDesc);
391 return 0;
392 }
393
394 unicode_to_ascii(string, buflen,
395 pStringDesc->wData, pStringDesc->bLength/2);
396
397 kfree(pStringDesc);
398 dbg("%s - USB String %s", __func__, string);
399 return strlen(string);
400}
401
402
403#if 0 367#if 0
404/************************************************************************ 368/************************************************************************
405 * 369 *
@@ -2007,7 +1971,7 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial,
2007 return; 1971 return;
2008 1972
2009 case IOSP_EXT_STATUS_RX_CHECK_RSP: 1973 case IOSP_EXT_STATUS_RX_CHECK_RSP:
2010 dbg("%s ========== Port %u CHECK_RSP Sequence = %02x =============\n", __func__, edge_serial->rxPort, byte3); 1974 dbg("%s ========== Port %u CHECK_RSP Sequence = %02x =============", __func__, edge_serial->rxPort, byte3);
2011 /* Port->RxCheckRsp = true; */ 1975 /* Port->RxCheckRsp = true; */
2012 return; 1976 return;
2013 } 1977 }
@@ -2075,7 +2039,7 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial,
2075 break; 2039 break;
2076 2040
2077 default: 2041 default:
2078 dbg("%s - Unrecognized IOSP status code %u\n", __func__, code); 2042 dbg("%s - Unrecognized IOSP status code %u", __func__, code);
2079 break; 2043 break;
2080 } 2044 }
2081 return; 2045 return;
@@ -2091,18 +2055,13 @@ static void edge_tty_recv(struct device *dev, struct tty_struct *tty,
2091{ 2055{
2092 int cnt; 2056 int cnt;
2093 2057
2094 do { 2058 cnt = tty_insert_flip_string(tty, data, length);
2095 cnt = tty_buffer_request_room(tty, length); 2059 if (cnt < length) {
2096 if (cnt < length) { 2060 dev_err(dev, "%s - dropping data, %d bytes lost\n",
2097 dev_err(dev, "%s - dropping data, %d bytes lost\n", 2061 __func__, length - cnt);
2098 __func__, length - cnt); 2062 }
2099 if (cnt == 0) 2063 data += cnt;
2100 break; 2064 length -= cnt;
2101 }
2102 tty_insert_flip_string(tty, data, cnt);
2103 data += cnt;
2104 length -= cnt;
2105 } while (length > 0);
2106 2065
2107 tty_flip_buffer_push(tty); 2066 tty_flip_buffer_push(tty);
2108} 2067}
@@ -2530,7 +2489,7 @@ static int calc_baud_rate_divisor(int baudrate, int *divisor)
2530 2489
2531 *divisor = custom; 2490 *divisor = custom;
2532 2491
2533 dbg("%s - Baud %d = %d\n", __func__, baudrate, custom); 2492 dbg("%s - Baud %d = %d", __func__, baudrate, custom);
2534 return 0; 2493 return 0;
2535 } 2494 }
2536 2495
@@ -2915,7 +2874,7 @@ static void load_application_firmware(struct edgeport_serial *edge_serial)
2915 break; 2874 break;
2916 2875
2917 case EDGE_DOWNLOAD_FILE_NONE: 2876 case EDGE_DOWNLOAD_FILE_NONE:
2918 dbg ("No download file specified, skipping download\n"); 2877 dbg("No download file specified, skipping download");
2919 return; 2878 return;
2920 2879
2921 default: 2880 default:
@@ -2997,10 +2956,12 @@ static int edge_startup(struct usb_serial *serial)
2997 usb_set_serial_data(serial, edge_serial); 2956 usb_set_serial_data(serial, edge_serial);
2998 2957
2999 /* get the name for the device from the device */ 2958 /* get the name for the device from the device */
3000 i = get_string(dev, dev->descriptor.iManufacturer, 2959 i = usb_string(dev, dev->descriptor.iManufacturer,
3001 &edge_serial->name[0], MAX_NAME_LEN+1); 2960 &edge_serial->name[0], MAX_NAME_LEN+1);
2961 if (i < 0)
2962 i = 0;
3002 edge_serial->name[i++] = ' '; 2963 edge_serial->name[i++] = ' ';
3003 get_string(dev, dev->descriptor.iProduct, 2964 usb_string(dev, dev->descriptor.iProduct,
3004 &edge_serial->name[i], MAX_NAME_LEN+2 - i); 2965 &edge_serial->name[i], MAX_NAME_LEN+2 - i);
3005 2966
3006 dev_info(&serial->dev->dev, "%s detected\n", edge_serial->name); 2967 dev_info(&serial->dev->dev, "%s detected\n", edge_serial->name);
diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h
index 9241d3147513..feb56a4ca799 100644
--- a/drivers/usb/serial/io_tables.h
+++ b/drivers/usb/serial/io_tables.h
@@ -14,7 +14,7 @@
14#ifndef IO_TABLES_H 14#ifndef IO_TABLES_H
15#define IO_TABLES_H 15#define IO_TABLES_H
16 16
17static struct usb_device_id edgeport_2port_id_table [] = { 17static const struct usb_device_id edgeport_2port_id_table[] = {
18 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2) }, 18 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2) },
19 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2I) }, 19 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2I) },
20 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_421) }, 20 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_421) },
@@ -23,7 +23,7 @@ static struct usb_device_id edgeport_2port_id_table [] = {
23 { } 23 { }
24}; 24};
25 25
26static struct usb_device_id edgeport_4port_id_table [] = { 26static const struct usb_device_id edgeport_4port_id_table[] = {
27 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) }, 27 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) },
28 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_RAPIDPORT_4) }, 28 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_RAPIDPORT_4) },
29 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4T) }, 29 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4T) },
@@ -37,7 +37,7 @@ static struct usb_device_id edgeport_4port_id_table [] = {
37 { } 37 { }
38}; 38};
39 39
40static struct usb_device_id edgeport_8port_id_table [] = { 40static const struct usb_device_id edgeport_8port_id_table[] = {
41 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8) }, 41 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8) },
42 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_16_DUAL_CPU) }, 42 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_16_DUAL_CPU) },
43 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8I) }, 43 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8I) },
@@ -47,7 +47,7 @@ static struct usb_device_id edgeport_8port_id_table [] = {
47 { } 47 { }
48}; 48};
49 49
50static struct usb_device_id Epic_port_id_table [] = { 50static const struct usb_device_id Epic_port_id_table[] = {
51 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0202) }, 51 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0202) },
52 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0203) }, 52 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0203) },
53 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0310) }, 53 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0310) },
@@ -60,7 +60,7 @@ static struct usb_device_id Epic_port_id_table [] = {
60}; 60};
61 61
62/* Devices that this driver supports */ 62/* Devices that this driver supports */
63static struct usb_device_id id_table_combined [] = { 63static const struct usb_device_id id_table_combined[] = {
64 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) }, 64 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) },
65 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_RAPIDPORT_4) }, 65 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_RAPIDPORT_4) },
66 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4T) }, 66 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4T) },
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index d4cc0f7af400..aa876f71f228 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -134,7 +134,7 @@ struct edgeport_serial {
134 134
135 135
136/* Devices that this driver supports */ 136/* Devices that this driver supports */
137static struct usb_device_id edgeport_1port_id_table [] = { 137static const struct usb_device_id edgeport_1port_id_table[] = {
138 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) }, 138 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) },
139 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) }, 139 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) },
140 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) }, 140 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) },
@@ -154,7 +154,7 @@ static struct usb_device_id edgeport_1port_id_table [] = {
154 { } 154 { }
155}; 155};
156 156
157static struct usb_device_id edgeport_2port_id_table [] = { 157static const struct usb_device_id edgeport_2port_id_table[] = {
158 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2) }, 158 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2) },
159 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2C) }, 159 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2C) },
160 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2I) }, 160 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2I) },
@@ -177,7 +177,7 @@ static struct usb_device_id edgeport_2port_id_table [] = {
177}; 177};
178 178
179/* Devices that this driver supports */ 179/* Devices that this driver supports */
180static struct usb_device_id id_table_combined [] = { 180static const struct usb_device_id id_table_combined[] = {
181 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) }, 181 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) },
182 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) }, 182 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) },
183 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) }, 183 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) },
@@ -413,11 +413,18 @@ static int write_boot_mem(struct edgeport_serial *serial,
413{ 413{
414 int status = 0; 414 int status = 0;
415 int i; 415 int i;
416 __u8 temp; 416 u8 *temp;
417 417
418 /* Must do a read before write */ 418 /* Must do a read before write */
419 if (!serial->TiReadI2C) { 419 if (!serial->TiReadI2C) {
420 status = read_boot_mem(serial, 0, 1, &temp); 420 temp = kmalloc(1, GFP_KERNEL);
421 if (!temp) {
422 dev_err(&serial->serial->dev->dev,
423 "%s - out of memory\n", __func__);
424 return -ENOMEM;
425 }
426 status = read_boot_mem(serial, 0, 1, temp);
427 kfree(temp);
421 if (status) 428 if (status)
422 return status; 429 return status;
423 } 430 }
@@ -935,37 +942,47 @@ static int build_i2c_fw_hdr(__u8 *header, struct device *dev)
935static int i2c_type_bootmode(struct edgeport_serial *serial) 942static int i2c_type_bootmode(struct edgeport_serial *serial)
936{ 943{
937 int status; 944 int status;
938 __u8 data; 945 u8 *data;
946
947 data = kmalloc(1, GFP_KERNEL);
948 if (!data) {
949 dev_err(&serial->serial->dev->dev,
950 "%s - out of memory\n", __func__);
951 return -ENOMEM;
952 }
939 953
940 /* Try to read type 2 */ 954 /* Try to read type 2 */
941 status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ, 955 status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ,
942 DTK_ADDR_SPACE_I2C_TYPE_II, 0, &data, 0x01); 956 DTK_ADDR_SPACE_I2C_TYPE_II, 0, data, 0x01);
943 if (status) 957 if (status)
944 dbg("%s - read 2 status error = %d", __func__, status); 958 dbg("%s - read 2 status error = %d", __func__, status);
945 else 959 else
946 dbg("%s - read 2 data = 0x%x", __func__, data); 960 dbg("%s - read 2 data = 0x%x", __func__, *data);
947 if ((!status) && (data == UMP5152 || data == UMP3410)) { 961 if ((!status) && (*data == UMP5152 || *data == UMP3410)) {
948 dbg("%s - ROM_TYPE_II", __func__); 962 dbg("%s - ROM_TYPE_II", __func__);
949 serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; 963 serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
950 return 0; 964 goto out;
951 } 965 }
952 966
953 /* Try to read type 3 */ 967 /* Try to read type 3 */
954 status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ, 968 status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ,
955 DTK_ADDR_SPACE_I2C_TYPE_III, 0, &data, 0x01); 969 DTK_ADDR_SPACE_I2C_TYPE_III, 0, data, 0x01);
956 if (status) 970 if (status)
957 dbg("%s - read 3 status error = %d", __func__, status); 971 dbg("%s - read 3 status error = %d", __func__, status);
958 else 972 else
959 dbg("%s - read 2 data = 0x%x", __func__, data); 973 dbg("%s - read 2 data = 0x%x", __func__, *data);
960 if ((!status) && (data == UMP5152 || data == UMP3410)) { 974 if ((!status) && (*data == UMP5152 || *data == UMP3410)) {
961 dbg("%s - ROM_TYPE_III", __func__); 975 dbg("%s - ROM_TYPE_III", __func__);
962 serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_III; 976 serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_III;
963 return 0; 977 goto out;
964 } 978 }
965 979
966 dbg("%s - Unknown", __func__); 980 dbg("%s - Unknown", __func__);
967 serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; 981 serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
968 return -ENODEV; 982 status = -ENODEV;
983out:
984 kfree(data);
985 return status;
969} 986}
970 987
971static int bulk_xfer(struct usb_serial *serial, void *buffer, 988static int bulk_xfer(struct usb_serial *serial, void *buffer,
@@ -1113,7 +1130,7 @@ static int download_fw(struct edgeport_serial *serial)
1113 I2C_DESC_TYPE_FIRMWARE_BASIC, rom_desc); 1130 I2C_DESC_TYPE_FIRMWARE_BASIC, rom_desc);
1114 if (start_address != 0) { 1131 if (start_address != 0) {
1115 struct ti_i2c_firmware_rec *firmware_version; 1132 struct ti_i2c_firmware_rec *firmware_version;
1116 __u8 record; 1133 u8 *record;
1117 1134
1118 dbg("%s - Found Type FIRMWARE (Type 2) record", 1135 dbg("%s - Found Type FIRMWARE (Type 2) record",
1119 __func__); 1136 __func__);
@@ -1165,6 +1182,15 @@ static int download_fw(struct edgeport_serial *serial)
1165 OperationalMajorVersion, 1182 OperationalMajorVersion,
1166 OperationalMinorVersion); 1183 OperationalMinorVersion);
1167 1184
1185 record = kmalloc(1, GFP_KERNEL);
1186 if (!record) {
1187 dev_err(dev, "%s - out of memory.\n",
1188 __func__);
1189 kfree(firmware_version);
1190 kfree(rom_desc);
1191 kfree(ti_manuf_desc);
1192 return -ENOMEM;
1193 }
1168 /* In order to update the I2C firmware we must 1194 /* In order to update the I2C firmware we must
1169 * change the type 2 record to type 0xF2. This 1195 * change the type 2 record to type 0xF2. This
1170 * will force the UMP to come up in Boot Mode. 1196 * will force the UMP to come up in Boot Mode.
@@ -1177,13 +1203,14 @@ static int download_fw(struct edgeport_serial *serial)
1177 * firmware will update the record type from 1203 * firmware will update the record type from
1178 * 0xf2 to 0x02. 1204 * 0xf2 to 0x02.
1179 */ 1205 */
1180 record = I2C_DESC_TYPE_FIRMWARE_BLANK; 1206 *record = I2C_DESC_TYPE_FIRMWARE_BLANK;
1181 1207
1182 /* Change the I2C Firmware record type to 1208 /* Change the I2C Firmware record type to
1183 0xf2 to trigger an update */ 1209 0xf2 to trigger an update */
1184 status = write_rom(serial, start_address, 1210 status = write_rom(serial, start_address,
1185 sizeof(record), &record); 1211 sizeof(*record), record);
1186 if (status) { 1212 if (status) {
1213 kfree(record);
1187 kfree(firmware_version); 1214 kfree(firmware_version);
1188 kfree(rom_desc); 1215 kfree(rom_desc);
1189 kfree(ti_manuf_desc); 1216 kfree(ti_manuf_desc);
@@ -1196,19 +1223,21 @@ static int download_fw(struct edgeport_serial *serial)
1196 */ 1223 */
1197 status = read_rom(serial, 1224 status = read_rom(serial,
1198 start_address, 1225 start_address,
1199 sizeof(record), 1226 sizeof(*record),
1200 &record); 1227 record);
1201 if (status) { 1228 if (status) {
1229 kfree(record);
1202 kfree(firmware_version); 1230 kfree(firmware_version);
1203 kfree(rom_desc); 1231 kfree(rom_desc);
1204 kfree(ti_manuf_desc); 1232 kfree(ti_manuf_desc);
1205 return status; 1233 return status;
1206 } 1234 }
1207 1235
1208 if (record != I2C_DESC_TYPE_FIRMWARE_BLANK) { 1236 if (*record != I2C_DESC_TYPE_FIRMWARE_BLANK) {
1209 dev_err(dev, 1237 dev_err(dev,
1210 "%s - error resetting device\n", 1238 "%s - error resetting device\n",
1211 __func__); 1239 __func__);
1240 kfree(record);
1212 kfree(firmware_version); 1241 kfree(firmware_version);
1213 kfree(rom_desc); 1242 kfree(rom_desc);
1214 kfree(ti_manuf_desc); 1243 kfree(ti_manuf_desc);
@@ -1226,6 +1255,7 @@ static int download_fw(struct edgeport_serial *serial)
1226 __func__, status); 1255 __func__, status);
1227 1256
1228 /* return an error on purpose. */ 1257 /* return an error on purpose. */
1258 kfree(record);
1229 kfree(firmware_version); 1259 kfree(firmware_version);
1230 kfree(rom_desc); 1260 kfree(rom_desc);
1231 kfree(ti_manuf_desc); 1261 kfree(ti_manuf_desc);
@@ -1686,7 +1716,7 @@ static void edge_interrupt_callback(struct urb *urb)
1686 case TIUMP_INTERRUPT_CODE_MSR: /* MSR */ 1716 case TIUMP_INTERRUPT_CODE_MSR: /* MSR */
1687 /* Copy MSR from UMP */ 1717 /* Copy MSR from UMP */
1688 msr = data[1]; 1718 msr = data[1];
1689 dbg("%s - ===== Port %u MSR Status = %02x ======\n", 1719 dbg("%s - ===== Port %u MSR Status = %02x ======",
1690 __func__, port_number, msr); 1720 __func__, port_number, msr);
1691 handle_new_msr(edge_port, msr); 1721 handle_new_msr(edge_port, msr);
1692 break; 1722 break;
@@ -1790,7 +1820,6 @@ static void edge_tty_recv(struct device *dev, struct tty_struct *tty,
1790{ 1820{
1791 int queued; 1821 int queued;
1792 1822
1793 tty_buffer_request_room(tty, length);
1794 queued = tty_insert_flip_string(tty, data, length); 1823 queued = tty_insert_flip_string(tty, data, length);
1795 if (queued < length) 1824 if (queued < length)
1796 dev_err(dev, "%s - dropping data, %d bytes lost\n", 1825 dev_err(dev, "%s - dropping data, %d bytes lost\n",
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index d6231c38813e..3fea9298eb15 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -747,7 +747,6 @@ static void ipaq_read_bulk_callback(struct urb *urb)
747 747
748 tty = tty_port_tty_get(&port->port); 748 tty = tty_port_tty_get(&port->port);
749 if (tty && urb->actual_length) { 749 if (tty && urb->actual_length) {
750 tty_buffer_request_room(tty, urb->actual_length);
751 tty_insert_flip_string(tty, data, urb->actual_length); 750 tty_insert_flip_string(tty, data, urb->actual_length);
752 tty_flip_buffer_push(tty); 751 tty_flip_buffer_push(tty);
753 bytes_in += urb->actual_length; 752 bytes_in += urb->actual_length;
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index 727d323f092a..e1d07840cee6 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -134,7 +134,7 @@ enum {
134 134
135#define IPW_WANTS_TO_SEND 0x30 135#define IPW_WANTS_TO_SEND 0x30
136 136
137static struct usb_device_id usb_ipw_ids[] = { 137static const struct usb_device_id usb_ipw_ids[] = {
138 { USB_DEVICE(IPW_VID, IPW_PID) }, 138 { USB_DEVICE(IPW_VID, IPW_PID) },
139 { }, 139 { },
140}; 140};
@@ -172,7 +172,6 @@ static void ipw_read_bulk_callback(struct urb *urb)
172 172
173 tty = tty_port_tty_get(&port->port); 173 tty = tty_port_tty_get(&port->port);
174 if (tty && urb->actual_length) { 174 if (tty && urb->actual_length) {
175 tty_buffer_request_room(tty, urb->actual_length);
176 tty_insert_flip_string(tty, data, urb->actual_length); 175 tty_insert_flip_string(tty, data, urb->actual_length);
177 tty_flip_buffer_push(tty); 176 tty_flip_buffer_push(tty);
178 } 177 }
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 95d8d26b9a44..4a0f51974232 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -100,7 +100,7 @@ static u8 ir_baud;
100static u8 ir_xbof; 100static u8 ir_xbof;
101static u8 ir_add_bof; 101static u8 ir_add_bof;
102 102
103static struct usb_device_id ir_id_table[] = { 103static const struct usb_device_id ir_id_table[] = {
104 { USB_DEVICE(0x050f, 0x0180) }, /* KC Technology, KC-180 */ 104 { USB_DEVICE(0x050f, 0x0180) }, /* KC Technology, KC-180 */
105 { USB_DEVICE(0x08e9, 0x0100) }, /* XTNDAccess */ 105 { USB_DEVICE(0x08e9, 0x0100) }, /* XTNDAccess */
106 { USB_DEVICE(0x09c4, 0x0011) }, /* ACTiSys ACT-IR2000U */ 106 { USB_DEVICE(0x09c4, 0x0011) }, /* ACTiSys ACT-IR2000U */
@@ -445,11 +445,6 @@ static void ir_read_bulk_callback(struct urb *urb)
445 445
446 dbg("%s - port %d", __func__, port->number); 446 dbg("%s - port %d", __func__, port->number);
447 447
448 if (!port->port.count) {
449 dbg("%s - port closed.", __func__);
450 return;
451 }
452
453 switch (status) { 448 switch (status) {
454 case 0: /* Successful */ 449 case 0: /* Successful */
455 /* 450 /*
@@ -462,10 +457,8 @@ static void ir_read_bulk_callback(struct urb *urb)
462 usb_serial_debug_data(debug, &port->dev, __func__, 457 usb_serial_debug_data(debug, &port->dev, __func__,
463 urb->actual_length, data); 458 urb->actual_length, data);
464 tty = tty_port_tty_get(&port->port); 459 tty = tty_port_tty_get(&port->port);
465 if (tty_buffer_request_room(tty, urb->actual_length - 1)) { 460 tty_insert_flip_string(tty, data+1, urb->actual_length - 1);
466 tty_insert_flip_string(tty, data+1, urb->actual_length - 1); 461 tty_flip_buffer_push(tty);
467 tty_flip_buffer_push(tty);
468 }
469 tty_kref_put(tty); 462 tty_kref_put(tty);
470 463
471 /* 464 /*
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index e6e02b178d2b..43f13cf2f016 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -43,7 +43,7 @@ static int debug;
43#define DRIVER_VERSION "v0.11" 43#define DRIVER_VERSION "v0.11"
44#define DRIVER_DESC "Infinity USB Unlimited Phoenix driver" 44#define DRIVER_DESC "Infinity USB Unlimited Phoenix driver"
45 45
46static struct usb_device_id id_table[] = { 46static const struct usb_device_id id_table[] = {
47 {USB_DEVICE(IUU_USB_VENDOR_ID, IUU_USB_PRODUCT_ID)}, 47 {USB_DEVICE(IUU_USB_VENDOR_ID, IUU_USB_PRODUCT_ID)},
48 {} /* Terminating entry */ 48 {} /* Terminating entry */
49}; 49};
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index f8c4b07033ff..297163c3c610 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -464,13 +464,9 @@ static void usa26_indat_callback(struct urb *urb)
464 464
465 /* Resubmit urb so we continue receiving */ 465 /* Resubmit urb so we continue receiving */
466 urb->dev = port->serial->dev; 466 urb->dev = port->serial->dev;
467 if (port->port.count) { 467 err = usb_submit_urb(urb, GFP_ATOMIC);
468 err = usb_submit_urb(urb, GFP_ATOMIC); 468 if (err != 0)
469 if (err != 0) 469 dbg("%s - resubmit read urb failed. (%d)", __func__, err);
470 dbg("%s - resubmit read urb failed. (%d)",
471 __func__, err);
472 }
473 return;
474} 470}
475 471
476/* Outdat handling is common for all devices */ 472/* Outdat handling is common for all devices */
@@ -483,8 +479,7 @@ static void usa2x_outdat_callback(struct urb *urb)
483 p_priv = usb_get_serial_port_data(port); 479 p_priv = usb_get_serial_port_data(port);
484 dbg("%s - urb %d", __func__, urb == p_priv->out_urbs[1]); 480 dbg("%s - urb %d", __func__, urb == p_priv->out_urbs[1]);
485 481
486 if (port->port.count) 482 usb_serial_port_softint(port);
487 usb_serial_port_softint(port);
488} 483}
489 484
490static void usa26_inack_callback(struct urb *urb) 485static void usa26_inack_callback(struct urb *urb)
@@ -615,12 +610,10 @@ static void usa28_indat_callback(struct urb *urb)
615 610
616 /* Resubmit urb so we continue receiving */ 611 /* Resubmit urb so we continue receiving */
617 urb->dev = port->serial->dev; 612 urb->dev = port->serial->dev;
618 if (port->port.count) { 613 err = usb_submit_urb(urb, GFP_ATOMIC);
619 err = usb_submit_urb(urb, GFP_ATOMIC); 614 if (err != 0)
620 if (err != 0) 615 dbg("%s - resubmit read urb failed. (%d)",
621 dbg("%s - resubmit read urb failed. (%d)", 616 __func__, err);
622 __func__, err);
623 }
624 p_priv->in_flip ^= 1; 617 p_priv->in_flip ^= 1;
625 618
626 urb = p_priv->in_urbs[p_priv->in_flip]; 619 urb = p_priv->in_urbs[p_priv->in_flip];
@@ -856,12 +849,9 @@ static void usa49_indat_callback(struct urb *urb)
856 849
857 /* Resubmit urb so we continue receiving */ 850 /* Resubmit urb so we continue receiving */
858 urb->dev = port->serial->dev; 851 urb->dev = port->serial->dev;
859 if (port->port.count) { 852 err = usb_submit_urb(urb, GFP_ATOMIC);
860 err = usb_submit_urb(urb, GFP_ATOMIC); 853 if (err != 0)
861 if (err != 0) 854 dbg("%s - resubmit read urb failed. (%d)", __func__, err);
862 dbg("%s - resubmit read urb failed. (%d)",
863 __func__, err);
864 }
865} 855}
866 856
867static void usa49wg_indat_callback(struct urb *urb) 857static void usa49wg_indat_callback(struct urb *urb)
@@ -904,11 +894,7 @@ static void usa49wg_indat_callback(struct urb *urb)
904 /* no error on any byte */ 894 /* no error on any byte */
905 i++; 895 i++;
906 for (x = 1; x < len ; ++x) 896 for (x = 1; x < len ; ++x)
907 if (port->port.count) 897 tty_insert_flip_char(tty, data[i++], 0);
908 tty_insert_flip_char(tty,
909 data[i++], 0);
910 else
911 i++;
912 } else { 898 } else {
913 /* 899 /*
914 * some bytes had errors, every byte has status 900 * some bytes had errors, every byte has status
@@ -922,14 +908,12 @@ static void usa49wg_indat_callback(struct urb *urb)
922 if (stat & RXERROR_PARITY) 908 if (stat & RXERROR_PARITY)
923 flag |= TTY_PARITY; 909 flag |= TTY_PARITY;
924 /* XXX should handle break (0x10) */ 910 /* XXX should handle break (0x10) */
925 if (port->port.count) 911 tty_insert_flip_char(tty,
926 tty_insert_flip_char(tty,
927 data[i+1], flag); 912 data[i+1], flag);
928 i += 2; 913 i += 2;
929 } 914 }
930 } 915 }
931 if (port->port.count) 916 tty_flip_buffer_push(tty);
932 tty_flip_buffer_push(tty);
933 tty_kref_put(tty); 917 tty_kref_put(tty);
934 } 918 }
935 } 919 }
@@ -1013,13 +997,9 @@ static void usa90_indat_callback(struct urb *urb)
1013 997
1014 /* Resubmit urb so we continue receiving */ 998 /* Resubmit urb so we continue receiving */
1015 urb->dev = port->serial->dev; 999 urb->dev = port->serial->dev;
1016 if (port->port.count) { 1000 err = usb_submit_urb(urb, GFP_ATOMIC);
1017 err = usb_submit_urb(urb, GFP_ATOMIC); 1001 if (err != 0)
1018 if (err != 0) 1002 dbg("%s - resubmit read urb failed. (%d)", __func__, err);
1019 dbg("%s - resubmit read urb failed. (%d)",
1020 __func__, err);
1021 }
1022 return;
1023} 1003}
1024 1004
1025 1005
@@ -2418,8 +2398,7 @@ static int keyspan_usa90_send_setup(struct usb_serial *serial,
2418 msg.portEnabled = 0; 2398 msg.portEnabled = 0;
2419 /* Sending intermediate configs */ 2399 /* Sending intermediate configs */
2420 else { 2400 else {
2421 if (port->port.count) 2401 msg.portEnabled = 1;
2422 msg.portEnabled = 1;
2423 msg.txBreak = (p_priv->break_on); 2402 msg.txBreak = (p_priv->break_on);
2424 } 2403 }
2425 2404
diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
index 30771e5b3973..bf3297ddd186 100644
--- a/drivers/usb/serial/keyspan.h
+++ b/drivers/usb/serial/keyspan.h
@@ -456,7 +456,7 @@ static const struct keyspan_device_details *keyspan_devices[] = {
456 NULL, 456 NULL,
457}; 457};
458 458
459static struct usb_device_id keyspan_ids_combined[] = { 459static const struct usb_device_id keyspan_ids_combined[] = {
460 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_pre_product_id) }, 460 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_pre_product_id) },
461 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_pre_product_id) }, 461 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_pre_product_id) },
462 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19w_pre_product_id) }, 462 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19w_pre_product_id) },
@@ -497,7 +497,7 @@ static struct usb_driver keyspan_driver = {
497}; 497};
498 498
499/* usb_device_id table for the pre-firmware download keyspan devices */ 499/* usb_device_id table for the pre-firmware download keyspan devices */
500static struct usb_device_id keyspan_pre_ids[] = { 500static const struct usb_device_id keyspan_pre_ids[] = {
501 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_pre_product_id) }, 501 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_pre_product_id) },
502 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_pre_product_id) }, 502 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_pre_product_id) },
503 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qi_pre_product_id) }, 503 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qi_pre_product_id) },
@@ -513,7 +513,7 @@ static struct usb_device_id keyspan_pre_ids[] = {
513 { } /* Terminating entry */ 513 { } /* Terminating entry */
514}; 514};
515 515
516static struct usb_device_id keyspan_1port_ids[] = { 516static const struct usb_device_id keyspan_1port_ids[] = {
517 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_product_id) }, 517 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_product_id) },
518 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_product_id) }, 518 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_product_id) },
519 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qi_product_id) }, 519 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qi_product_id) },
@@ -524,7 +524,7 @@ static struct usb_device_id keyspan_1port_ids[] = {
524 { } /* Terminating entry */ 524 { } /* Terminating entry */
525}; 525};
526 526
527static struct usb_device_id keyspan_2port_ids[] = { 527static const struct usb_device_id keyspan_2port_ids[] = {
528 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) }, 528 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) },
529 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) }, 529 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) },
530 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_product_id) }, 530 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_product_id) },
@@ -532,7 +532,7 @@ static struct usb_device_id keyspan_2port_ids[] = {
532 { } /* Terminating entry */ 532 { } /* Terminating entry */
533}; 533};
534 534
535static struct usb_device_id keyspan_4port_ids[] = { 535static const struct usb_device_id keyspan_4port_ids[] = {
536 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_product_id) }, 536 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_product_id) },
537 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wlc_product_id)}, 537 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wlc_product_id)},
538 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wg_product_id)}, 538 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wg_product_id)},
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 1296a097f5c3..185fe9a7d4e0 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -125,7 +125,7 @@ struct keyspan_pda_private {
125#define ENTREGRA_VENDOR_ID 0x1645 125#define ENTREGRA_VENDOR_ID 0x1645
126#define ENTREGRA_FAKE_ID 0x8093 126#define ENTREGRA_FAKE_ID 0x8093
127 127
128static struct usb_device_id id_table_combined [] = { 128static const struct usb_device_id id_table_combined[] = {
129#ifdef KEYSPAN 129#ifdef KEYSPAN
130 { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_FAKE_ID) }, 130 { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_FAKE_ID) },
131#endif 131#endif
@@ -147,20 +147,20 @@ static struct usb_driver keyspan_pda_driver = {
147 .no_dynamic_id = 1, 147 .no_dynamic_id = 1,
148}; 148};
149 149
150static struct usb_device_id id_table_std [] = { 150static const struct usb_device_id id_table_std[] = {
151 { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) }, 151 { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) },
152 { } /* Terminating entry */ 152 { } /* Terminating entry */
153}; 153};
154 154
155#ifdef KEYSPAN 155#ifdef KEYSPAN
156static struct usb_device_id id_table_fake [] = { 156static const struct usb_device_id id_table_fake[] = {
157 { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_FAKE_ID) }, 157 { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_FAKE_ID) },
158 { } /* Terminating entry */ 158 { } /* Terminating entry */
159}; 159};
160#endif 160#endif
161 161
162#ifdef XIRCOM 162#ifdef XIRCOM
163static struct usb_device_id id_table_fake_xircom [] = { 163static const struct usb_device_id id_table_fake_xircom[] = {
164 { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) }, 164 { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
165 { USB_DEVICE(ENTREGRA_VENDOR_ID, ENTREGRA_FAKE_ID) }, 165 { USB_DEVICE(ENTREGRA_VENDOR_ID, ENTREGRA_FAKE_ID) },
166 { } 166 { }
@@ -429,13 +429,20 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial,
429 unsigned char *value) 429 unsigned char *value)
430{ 430{
431 int rc; 431 int rc;
432 unsigned char data; 432 u8 *data;
433
434 data = kmalloc(1, GFP_KERNEL);
435 if (!data)
436 return -ENOMEM;
437
433 rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 438 rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
434 3, /* get pins */ 439 3, /* get pins */
435 USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN, 440 USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN,
436 0, 0, &data, 1, 2000); 441 0, 0, data, 1, 2000);
437 if (rc >= 0) 442 if (rc >= 0)
438 *value = data; 443 *value = *data;
444
445 kfree(data);
439 return rc; 446 return rc;
440} 447}
441 448
@@ -543,7 +550,14 @@ static int keyspan_pda_write(struct tty_struct *tty,
543 device how much room it really has. This is done only on 550 device how much room it really has. This is done only on
544 scheduler time, since usb_control_msg() sleeps. */ 551 scheduler time, since usb_control_msg() sleeps. */
545 if (count > priv->tx_room && !in_interrupt()) { 552 if (count > priv->tx_room && !in_interrupt()) {
546 unsigned char room; 553 u8 *room;
554
555 room = kmalloc(1, GFP_KERNEL);
556 if (!room) {
557 rc = -ENOMEM;
558 goto exit;
559 }
560
547 rc = usb_control_msg(serial->dev, 561 rc = usb_control_msg(serial->dev,
548 usb_rcvctrlpipe(serial->dev, 0), 562 usb_rcvctrlpipe(serial->dev, 0),
549 6, /* write_room */ 563 6, /* write_room */
@@ -551,9 +565,14 @@ static int keyspan_pda_write(struct tty_struct *tty,
551 | USB_DIR_IN, 565 | USB_DIR_IN,
552 0, /* value: 0 means "remaining room" */ 566 0, /* value: 0 means "remaining room" */
553 0, /* index */ 567 0, /* index */
554 &room, 568 room,
555 1, 569 1,
556 2000); 570 2000);
571 if (rc > 0) {
572 dbg(" roomquery says %d", *room);
573 priv->tx_room = *room;
574 }
575 kfree(room);
557 if (rc < 0) { 576 if (rc < 0) {
558 dbg(" roomquery failed"); 577 dbg(" roomquery failed");
559 goto exit; 578 goto exit;
@@ -563,8 +582,6 @@ static int keyspan_pda_write(struct tty_struct *tty,
563 rc = -EIO; /* device didn't return any data */ 582 rc = -EIO; /* device didn't return any data */
564 goto exit; 583 goto exit;
565 } 584 }
566 dbg(" roomquery says %d", room);
567 priv->tx_room = room;
568 } 585 }
569 if (count > priv->tx_room) { 586 if (count > priv->tx_room) {
570 /* we're about to completely fill the Tx buffer, so 587 /* we're about to completely fill the Tx buffer, so
@@ -684,18 +701,22 @@ static int keyspan_pda_open(struct tty_struct *tty,
684 struct usb_serial_port *port) 701 struct usb_serial_port *port)
685{ 702{
686 struct usb_serial *serial = port->serial; 703 struct usb_serial *serial = port->serial;
687 unsigned char room; 704 u8 *room;
688 int rc = 0; 705 int rc = 0;
689 struct keyspan_pda_private *priv; 706 struct keyspan_pda_private *priv;
690 707
691 /* find out how much room is in the Tx ring */ 708 /* find out how much room is in the Tx ring */
709 room = kmalloc(1, GFP_KERNEL);
710 if (!room)
711 return -ENOMEM;
712
692 rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 713 rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
693 6, /* write_room */ 714 6, /* write_room */
694 USB_TYPE_VENDOR | USB_RECIP_INTERFACE 715 USB_TYPE_VENDOR | USB_RECIP_INTERFACE
695 | USB_DIR_IN, 716 | USB_DIR_IN,
696 0, /* value */ 717 0, /* value */
697 0, /* index */ 718 0, /* index */
698 &room, 719 room,
699 1, 720 1,
700 2000); 721 2000);
701 if (rc < 0) { 722 if (rc < 0) {
@@ -708,8 +729,8 @@ static int keyspan_pda_open(struct tty_struct *tty,
708 goto error; 729 goto error;
709 } 730 }
710 priv = usb_get_serial_port_data(port); 731 priv = usb_get_serial_port_data(port);
711 priv->tx_room = room; 732 priv->tx_room = *room;
712 priv->tx_throttled = room ? 0 : 1; 733 priv->tx_throttled = *room ? 0 : 1;
713 734
714 /*Start reading from the device*/ 735 /*Start reading from the device*/
715 port->interrupt_in_urb->dev = serial->dev; 736 port->interrupt_in_urb->dev = serial->dev;
@@ -718,8 +739,8 @@ static int keyspan_pda_open(struct tty_struct *tty,
718 dbg("%s - usb_submit_urb(read int) failed", __func__); 739 dbg("%s - usb_submit_urb(read int) failed", __func__);
719 goto error; 740 goto error;
720 } 741 }
721
722error: 742error:
743 kfree(room);
723 return rc; 744 return rc;
724} 745}
725static void keyspan_pda_close(struct usb_serial_port *port) 746static void keyspan_pda_close(struct usb_serial_port *port)
@@ -789,6 +810,13 @@ static int keyspan_pda_fake_startup(struct usb_serial *serial)
789 return 1; 810 return 1;
790} 811}
791 812
813#ifdef KEYSPAN
814MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw");
815#endif
816#ifdef XIRCOM
817MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw");
818#endif
819
792static int keyspan_pda_startup(struct usb_serial *serial) 820static int keyspan_pda_startup(struct usb_serial *serial)
793{ 821{
794 822
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 3a7873806f46..8eef91ba4b1c 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -94,7 +94,7 @@ static int klsi_105_tiocmset(struct tty_struct *tty, struct file *file,
94/* 94/*
95 * All of the device info needed for the KLSI converters. 95 * All of the device info needed for the KLSI converters.
96 */ 96 */
97static struct usb_device_id id_table [] = { 97static const struct usb_device_id id_table[] = {
98 { USB_DEVICE(PALMCONNECT_VID, PALMCONNECT_PID) }, 98 { USB_DEVICE(PALMCONNECT_VID, PALMCONNECT_PID) },
99 { USB_DEVICE(KLSI_VID, KLSI_KL5KUSB105D_PID) }, 99 { USB_DEVICE(KLSI_VID, KLSI_KL5KUSB105D_PID) },
100 { } /* Terminating entry */ 100 { } /* Terminating entry */
@@ -212,10 +212,19 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
212 unsigned long *line_state_p) 212 unsigned long *line_state_p)
213{ 213{
214 int rc; 214 int rc;
215 __u8 status_buf[KLSI_STATUSBUF_LEN] = { -1, -1}; 215 u8 *status_buf;
216 __u16 status; 216 __u16 status;
217 217
218 dev_info(&port->serial->dev->dev, "sending SIO Poll request\n"); 218 dev_info(&port->serial->dev->dev, "sending SIO Poll request\n");
219
220 status_buf = kmalloc(KLSI_STATUSBUF_LEN, GFP_KERNEL);
221 if (!status_buf) {
222 dev_err(&port->dev, "%s - out of memory for status buffer.\n",
223 __func__);
224 return -ENOMEM;
225 }
226 status_buf[0] = 0xff;
227 status_buf[1] = 0xff;
219 rc = usb_control_msg(port->serial->dev, 228 rc = usb_control_msg(port->serial->dev,
220 usb_rcvctrlpipe(port->serial->dev, 0), 229 usb_rcvctrlpipe(port->serial->dev, 0),
221 KL5KUSB105A_SIO_POLL, 230 KL5KUSB105A_SIO_POLL,
@@ -236,6 +245,8 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
236 245
237 *line_state_p = klsi_105_status2linestate(status); 246 *line_state_p = klsi_105_status2linestate(status);
238 } 247 }
248
249 kfree(status_buf);
239 return rc; 250 return rc;
240} 251}
241 252
@@ -364,7 +375,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
364 int rc; 375 int rc;
365 int i; 376 int i;
366 unsigned long line_state; 377 unsigned long line_state;
367 struct klsi_105_port_settings cfg; 378 struct klsi_105_port_settings *cfg;
368 unsigned long flags; 379 unsigned long flags;
369 380
370 dbg("%s port %d", __func__, port->number); 381 dbg("%s port %d", __func__, port->number);
@@ -376,12 +387,18 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
376 * Then read the modem line control and store values in 387 * Then read the modem line control and store values in
377 * priv->line_state. 388 * priv->line_state.
378 */ 389 */
379 cfg.pktlen = 5; 390 cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
380 cfg.baudrate = kl5kusb105a_sio_b9600; 391 if (!cfg) {
381 cfg.databits = kl5kusb105a_dtb_8; 392 dev_err(&port->dev, "%s - out of memory for config buffer.\n",
382 cfg.unknown1 = 0; 393 __func__);
383 cfg.unknown2 = 1; 394 return -ENOMEM;
384 klsi_105_chg_port_settings(port, &cfg); 395 }
396 cfg->pktlen = 5;
397 cfg->baudrate = kl5kusb105a_sio_b9600;
398 cfg->databits = kl5kusb105a_dtb_8;
399 cfg->unknown1 = 0;
400 cfg->unknown2 = 1;
401 klsi_105_chg_port_settings(port, cfg);
385 402
386 /* set up termios structure */ 403 /* set up termios structure */
387 spin_lock_irqsave(&priv->lock, flags); 404 spin_lock_irqsave(&priv->lock, flags);
@@ -391,11 +408,11 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
391 priv->termios.c_lflag = tty->termios->c_lflag; 408 priv->termios.c_lflag = tty->termios->c_lflag;
392 for (i = 0; i < NCCS; i++) 409 for (i = 0; i < NCCS; i++)
393 priv->termios.c_cc[i] = tty->termios->c_cc[i]; 410 priv->termios.c_cc[i] = tty->termios->c_cc[i];
394 priv->cfg.pktlen = cfg.pktlen; 411 priv->cfg.pktlen = cfg->pktlen;
395 priv->cfg.baudrate = cfg.baudrate; 412 priv->cfg.baudrate = cfg->baudrate;
396 priv->cfg.databits = cfg.databits; 413 priv->cfg.databits = cfg->databits;
397 priv->cfg.unknown1 = cfg.unknown1; 414 priv->cfg.unknown1 = cfg->unknown1;
398 priv->cfg.unknown2 = cfg.unknown2; 415 priv->cfg.unknown2 = cfg->unknown2;
399 spin_unlock_irqrestore(&priv->lock, flags); 416 spin_unlock_irqrestore(&priv->lock, flags);
400 417
401 /* READ_ON and urb submission */ 418 /* READ_ON and urb submission */
@@ -441,6 +458,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
441 retval = rc; 458 retval = rc;
442 459
443exit: 460exit:
461 kfree(cfg);
444 return retval; 462 return retval;
445} /* klsi_105_open */ 463} /* klsi_105_open */
446 464
@@ -681,7 +699,6 @@ static void klsi_105_read_bulk_callback(struct urb *urb)
681 bytes_sent = urb->actual_length - 2; 699 bytes_sent = urb->actual_length - 2;
682 } 700 }
683 701
684 tty_buffer_request_room(tty, bytes_sent);
685 tty_insert_flip_string(tty, data + 2, bytes_sent); 702 tty_insert_flip_string(tty, data + 2, bytes_sent);
686 tty_flip_buffer_push(tty); 703 tty_flip_buffer_push(tty);
687 tty_kref_put(tty); 704 tty_kref_put(tty);
@@ -714,10 +731,17 @@ static void klsi_105_set_termios(struct tty_struct *tty,
714 unsigned int old_iflag = old_termios->c_iflag; 731 unsigned int old_iflag = old_termios->c_iflag;
715 unsigned int cflag = tty->termios->c_cflag; 732 unsigned int cflag = tty->termios->c_cflag;
716 unsigned int old_cflag = old_termios->c_cflag; 733 unsigned int old_cflag = old_termios->c_cflag;
717 struct klsi_105_port_settings cfg; 734 struct klsi_105_port_settings *cfg;
718 unsigned long flags; 735 unsigned long flags;
719 speed_t baud; 736 speed_t baud;
720 737
738 cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
739 if (!cfg) {
740 dev_err(&port->dev, "%s - out of memory for config buffer.\n",
741 __func__);
742 return;
743 }
744
721 /* lock while we are modifying the settings */ 745 /* lock while we are modifying the settings */
722 spin_lock_irqsave(&priv->lock, flags); 746 spin_lock_irqsave(&priv->lock, flags);
723 747
@@ -793,11 +817,11 @@ static void klsi_105_set_termios(struct tty_struct *tty,
793 case CS5: 817 case CS5:
794 dbg("%s - 5 bits/byte not supported", __func__); 818 dbg("%s - 5 bits/byte not supported", __func__);
795 spin_unlock_irqrestore(&priv->lock, flags); 819 spin_unlock_irqrestore(&priv->lock, flags);
796 return ; 820 goto err;
797 case CS6: 821 case CS6:
798 dbg("%s - 6 bits/byte not supported", __func__); 822 dbg("%s - 6 bits/byte not supported", __func__);
799 spin_unlock_irqrestore(&priv->lock, flags); 823 spin_unlock_irqrestore(&priv->lock, flags);
800 return ; 824 goto err;
801 case CS7: 825 case CS7:
802 priv->cfg.databits = kl5kusb105a_dtb_7; 826 priv->cfg.databits = kl5kusb105a_dtb_7;
803 break; 827 break;
@@ -856,11 +880,13 @@ static void klsi_105_set_termios(struct tty_struct *tty,
856#endif 880#endif
857 ; 881 ;
858 } 882 }
859 memcpy(&cfg, &priv->cfg, sizeof(cfg)); 883 memcpy(cfg, &priv->cfg, sizeof(*cfg));
860 spin_unlock_irqrestore(&priv->lock, flags); 884 spin_unlock_irqrestore(&priv->lock, flags);
861 885
862 /* now commit changes to device */ 886 /* now commit changes to device */
863 klsi_105_chg_port_settings(port, &cfg); 887 klsi_105_chg_port_settings(port, cfg);
888err:
889 kfree(cfg);
864} /* klsi_105_set_termios */ 890} /* klsi_105_set_termios */
865 891
866 892
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 45ea694b3ae6..c113a2a0e10c 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -86,7 +86,7 @@ static void kobil_set_termios(struct tty_struct *tty,
86 struct usb_serial_port *port, struct ktermios *old); 86 struct usb_serial_port *port, struct ktermios *old);
87static void kobil_init_termios(struct tty_struct *tty); 87static void kobil_init_termios(struct tty_struct *tty);
88 88
89static struct usb_device_id id_table [] = { 89static const struct usb_device_id id_table[] = {
90 { USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_B_PRODUCT_ID) }, 90 { USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_B_PRODUCT_ID) },
91 { USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_K_PRODUCT_ID) }, 91 { USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_K_PRODUCT_ID) },
92 { USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_USBTWIN_PRODUCT_ID) }, 92 { USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_USBTWIN_PRODUCT_ID) },
@@ -388,7 +388,6 @@ static void kobil_read_int_callback(struct urb *urb)
388 */ 388 */
389 /* END DEBUG */ 389 /* END DEBUG */
390 390
391 tty_buffer_request_room(tty, urb->actual_length);
392 tty_insert_flip_string(tty, data, urb->actual_length); 391 tty_insert_flip_string(tty, data, urb->actual_length);
393 tty_flip_buffer_push(tty); 392 tty_flip_buffer_push(tty);
394 } 393 }
@@ -624,7 +623,6 @@ static void kobil_set_termios(struct tty_struct *tty,
624 unsigned short urb_val = 0; 623 unsigned short urb_val = 0;
625 int c_cflag = tty->termios->c_cflag; 624 int c_cflag = tty->termios->c_cflag;
626 speed_t speed; 625 speed_t speed;
627 void *settings;
628 626
629 priv = usb_get_serial_port_data(port); 627 priv = usb_get_serial_port_data(port);
630 if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID || 628 if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID ||
@@ -647,25 +645,13 @@ static void kobil_set_termios(struct tty_struct *tty,
647 } 645 }
648 urb_val |= (c_cflag & CSTOPB) ? SUSBCR_SPASB_2StopBits : 646 urb_val |= (c_cflag & CSTOPB) ? SUSBCR_SPASB_2StopBits :
649 SUSBCR_SPASB_1StopBit; 647 SUSBCR_SPASB_1StopBit;
650
651 settings = kzalloc(50, GFP_KERNEL);
652 if (!settings)
653 return;
654
655 sprintf(settings, "%d ", speed);
656
657 if (c_cflag & PARENB) { 648 if (c_cflag & PARENB) {
658 if (c_cflag & PARODD) { 649 if (c_cflag & PARODD)
659 urb_val |= SUSBCR_SPASB_OddParity; 650 urb_val |= SUSBCR_SPASB_OddParity;
660 strcat(settings, "Odd Parity"); 651 else
661 } else {
662 urb_val |= SUSBCR_SPASB_EvenParity; 652 urb_val |= SUSBCR_SPASB_EvenParity;
663 strcat(settings, "Even Parity"); 653 } else
664 }
665 } else {
666 urb_val |= SUSBCR_SPASB_NoParity; 654 urb_val |= SUSBCR_SPASB_NoParity;
667 strcat(settings, "No Parity");
668 }
669 tty->termios->c_cflag &= ~CMSPAR; 655 tty->termios->c_cflag &= ~CMSPAR;
670 tty_encode_baud_rate(tty, speed, speed); 656 tty_encode_baud_rate(tty, speed, speed);
671 657
@@ -675,11 +661,10 @@ static void kobil_set_termios(struct tty_struct *tty,
675 USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT, 661 USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
676 urb_val, 662 urb_val,
677 0, 663 0,
678 settings, 664 NULL,
679 0, 665 0,
680 KOBIL_TIMEOUT 666 KOBIL_TIMEOUT
681 ); 667 );
682 kfree(settings);
683} 668}
684 669
685static int kobil_ioctl(struct tty_struct *tty, struct file *file, 670static int kobil_ioctl(struct tty_struct *tty, struct file *file,
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index cd009cb280a5..2849f8c32015 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -75,6 +75,7 @@
75#include <linux/module.h> 75#include <linux/module.h>
76#include <linux/spinlock.h> 76#include <linux/spinlock.h>
77#include <linux/uaccess.h> 77#include <linux/uaccess.h>
78#include <asm/unaligned.h>
78#include <linux/usb.h> 79#include <linux/usb.h>
79#include <linux/usb/serial.h> 80#include <linux/usb/serial.h>
80#include "mct_u232.h" 81#include "mct_u232.h"
@@ -110,7 +111,7 @@ static void mct_u232_unthrottle(struct tty_struct *tty);
110/* 111/*
111 * All of the device info needed for the MCT USB-RS232 converter. 112 * All of the device info needed for the MCT USB-RS232 converter.
112 */ 113 */
113static struct usb_device_id id_table_combined [] = { 114static const struct usb_device_id id_table_combined[] = {
114 { USB_DEVICE(MCT_U232_VID, MCT_U232_PID) }, 115 { USB_DEVICE(MCT_U232_VID, MCT_U232_PID) },
115 { USB_DEVICE(MCT_U232_VID, MCT_U232_SITECOM_PID) }, 116 { USB_DEVICE(MCT_U232_VID, MCT_U232_SITECOM_PID) },
116 { USB_DEVICE(MCT_U232_VID, MCT_U232_DU_H3SP_PID) }, 117 { USB_DEVICE(MCT_U232_VID, MCT_U232_DU_H3SP_PID) },
@@ -231,19 +232,22 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial,
231static int mct_u232_set_baud_rate(struct tty_struct *tty, 232static int mct_u232_set_baud_rate(struct tty_struct *tty,
232 struct usb_serial *serial, struct usb_serial_port *port, speed_t value) 233 struct usb_serial *serial, struct usb_serial_port *port, speed_t value)
233{ 234{
234 __le32 divisor; 235 unsigned int divisor;
235 int rc; 236 int rc;
236 unsigned char zero_byte = 0; 237 unsigned char *buf;
237 unsigned char cts_enable_byte = 0; 238 unsigned char cts_enable_byte = 0;
238 speed_t speed; 239 speed_t speed;
239 240
240 divisor = cpu_to_le32(mct_u232_calculate_baud_rate(serial, value, 241 buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
241 &speed)); 242 if (buf == NULL)
243 return -ENOMEM;
242 244
245 divisor = mct_u232_calculate_baud_rate(serial, value, &speed);
246 put_unaligned_le32(cpu_to_le32(divisor), buf);
243 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 247 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
244 MCT_U232_SET_BAUD_RATE_REQUEST, 248 MCT_U232_SET_BAUD_RATE_REQUEST,
245 MCT_U232_SET_REQUEST_TYPE, 249 MCT_U232_SET_REQUEST_TYPE,
246 0, 0, &divisor, MCT_U232_SET_BAUD_RATE_SIZE, 250 0, 0, buf, MCT_U232_SET_BAUD_RATE_SIZE,
247 WDR_TIMEOUT); 251 WDR_TIMEOUT);
248 if (rc < 0) /*FIXME: What value speed results */ 252 if (rc < 0) /*FIXME: What value speed results */
249 dev_err(&port->dev, "Set BAUD RATE %d failed (error = %d)\n", 253 dev_err(&port->dev, "Set BAUD RATE %d failed (error = %d)\n",
@@ -269,10 +273,11 @@ static int mct_u232_set_baud_rate(struct tty_struct *tty,
269 a device which is not asserting 'CTS'. 273 a device which is not asserting 'CTS'.
270 */ 274 */
271 275
276 buf[0] = 0;
272 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 277 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
273 MCT_U232_SET_UNKNOWN1_REQUEST, 278 MCT_U232_SET_UNKNOWN1_REQUEST,
274 MCT_U232_SET_REQUEST_TYPE, 279 MCT_U232_SET_REQUEST_TYPE,
275 0, 0, &zero_byte, MCT_U232_SET_UNKNOWN1_SIZE, 280 0, 0, buf, MCT_U232_SET_UNKNOWN1_SIZE,
276 WDR_TIMEOUT); 281 WDR_TIMEOUT);
277 if (rc < 0) 282 if (rc < 0)
278 dev_err(&port->dev, "Sending USB device request code %d " 283 dev_err(&port->dev, "Sending USB device request code %d "
@@ -284,30 +289,40 @@ static int mct_u232_set_baud_rate(struct tty_struct *tty,
284 289
285 dbg("set_baud_rate: send second control message, data = %02X", 290 dbg("set_baud_rate: send second control message, data = %02X",
286 cts_enable_byte); 291 cts_enable_byte);
292 buf[0] = cts_enable_byte;
287 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 293 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
288 MCT_U232_SET_CTS_REQUEST, 294 MCT_U232_SET_CTS_REQUEST,
289 MCT_U232_SET_REQUEST_TYPE, 295 MCT_U232_SET_REQUEST_TYPE,
290 0, 0, &cts_enable_byte, MCT_U232_SET_CTS_SIZE, 296 0, 0, buf, MCT_U232_SET_CTS_SIZE,
291 WDR_TIMEOUT); 297 WDR_TIMEOUT);
292 if (rc < 0) 298 if (rc < 0)
293 dev_err(&port->dev, "Sending USB device request code %d " 299 dev_err(&port->dev, "Sending USB device request code %d "
294 "failed (error = %d)\n", MCT_U232_SET_CTS_REQUEST, rc); 300 "failed (error = %d)\n", MCT_U232_SET_CTS_REQUEST, rc);
295 301
302 kfree(buf);
296 return rc; 303 return rc;
297} /* mct_u232_set_baud_rate */ 304} /* mct_u232_set_baud_rate */
298 305
299static int mct_u232_set_line_ctrl(struct usb_serial *serial, unsigned char lcr) 306static int mct_u232_set_line_ctrl(struct usb_serial *serial, unsigned char lcr)
300{ 307{
301 int rc; 308 int rc;
309 unsigned char *buf;
310
311 buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
312 if (buf == NULL)
313 return -ENOMEM;
314
315 buf[0] = lcr;
302 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 316 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
303 MCT_U232_SET_LINE_CTRL_REQUEST, 317 MCT_U232_SET_LINE_CTRL_REQUEST,
304 MCT_U232_SET_REQUEST_TYPE, 318 MCT_U232_SET_REQUEST_TYPE,
305 0, 0, &lcr, MCT_U232_SET_LINE_CTRL_SIZE, 319 0, 0, buf, MCT_U232_SET_LINE_CTRL_SIZE,
306 WDR_TIMEOUT); 320 WDR_TIMEOUT);
307 if (rc < 0) 321 if (rc < 0)
308 dev_err(&serial->dev->dev, 322 dev_err(&serial->dev->dev,
309 "Set LINE CTRL 0x%x failed (error = %d)\n", lcr, rc); 323 "Set LINE CTRL 0x%x failed (error = %d)\n", lcr, rc);
310 dbg("set_line_ctrl: 0x%x", lcr); 324 dbg("set_line_ctrl: 0x%x", lcr);
325 kfree(buf);
311 return rc; 326 return rc;
312} /* mct_u232_set_line_ctrl */ 327} /* mct_u232_set_line_ctrl */
313 328
@@ -315,23 +330,31 @@ static int mct_u232_set_modem_ctrl(struct usb_serial *serial,
315 unsigned int control_state) 330 unsigned int control_state)
316{ 331{
317 int rc; 332 int rc;
318 unsigned char mcr = MCT_U232_MCR_NONE; 333 unsigned char mcr;
334 unsigned char *buf;
335
336 buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
337 if (buf == NULL)
338 return -ENOMEM;
319 339
340 mcr = MCT_U232_MCR_NONE;
320 if (control_state & TIOCM_DTR) 341 if (control_state & TIOCM_DTR)
321 mcr |= MCT_U232_MCR_DTR; 342 mcr |= MCT_U232_MCR_DTR;
322 if (control_state & TIOCM_RTS) 343 if (control_state & TIOCM_RTS)
323 mcr |= MCT_U232_MCR_RTS; 344 mcr |= MCT_U232_MCR_RTS;
324 345
346 buf[0] = mcr;
325 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 347 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
326 MCT_U232_SET_MODEM_CTRL_REQUEST, 348 MCT_U232_SET_MODEM_CTRL_REQUEST,
327 MCT_U232_SET_REQUEST_TYPE, 349 MCT_U232_SET_REQUEST_TYPE,
328 0, 0, &mcr, MCT_U232_SET_MODEM_CTRL_SIZE, 350 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE,
329 WDR_TIMEOUT); 351 WDR_TIMEOUT);
330 if (rc < 0) 352 if (rc < 0)
331 dev_err(&serial->dev->dev, 353 dev_err(&serial->dev->dev,
332 "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc); 354 "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
333 dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr); 355 dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr);
334 356
357 kfree(buf);
335 return rc; 358 return rc;
336} /* mct_u232_set_modem_ctrl */ 359} /* mct_u232_set_modem_ctrl */
337 360
@@ -339,17 +362,27 @@ static int mct_u232_get_modem_stat(struct usb_serial *serial,
339 unsigned char *msr) 362 unsigned char *msr)
340{ 363{
341 int rc; 364 int rc;
365 unsigned char *buf;
366
367 buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
368 if (buf == NULL) {
369 *msr = 0;
370 return -ENOMEM;
371 }
342 rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 372 rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
343 MCT_U232_GET_MODEM_STAT_REQUEST, 373 MCT_U232_GET_MODEM_STAT_REQUEST,
344 MCT_U232_GET_REQUEST_TYPE, 374 MCT_U232_GET_REQUEST_TYPE,
345 0, 0, msr, MCT_U232_GET_MODEM_STAT_SIZE, 375 0, 0, buf, MCT_U232_GET_MODEM_STAT_SIZE,
346 WDR_TIMEOUT); 376 WDR_TIMEOUT);
347 if (rc < 0) { 377 if (rc < 0) {
348 dev_err(&serial->dev->dev, 378 dev_err(&serial->dev->dev,
349 "Get MODEM STATus failed (error = %d)\n", rc); 379 "Get MODEM STATus failed (error = %d)\n", rc);
350 *msr = 0; 380 *msr = 0;
381 } else {
382 *msr = buf[0];
351 } 383 }
352 dbg("get_modem_stat: 0x%x", *msr); 384 dbg("get_modem_stat: 0x%x", *msr);
385 kfree(buf);
353 return rc; 386 return rc;
354} /* mct_u232_get_modem_stat */ 387} /* mct_u232_get_modem_stat */
355 388
diff --git a/drivers/usb/serial/mct_u232.h b/drivers/usb/serial/mct_u232.h
index 07b6bec31dc8..7417d5ce1e23 100644
--- a/drivers/usb/serial/mct_u232.h
+++ b/drivers/usb/serial/mct_u232.h
@@ -73,6 +73,8 @@
73#define MCT_U232_SET_CTS_REQUEST 12 73#define MCT_U232_SET_CTS_REQUEST 12
74#define MCT_U232_SET_CTS_SIZE 1 74#define MCT_U232_SET_CTS_SIZE 1
75 75
76#define MCT_U232_MAX_SIZE 4 /* of MCT_XXX_SIZE */
77
76/* 78/*
77 * Baud rate (divisor) 79 * Baud rate (divisor)
78 * Actually, there are two of them, MCT website calls them "Philips solution" 80 * Actually, there are two of them, MCT website calls them "Philips solution"
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 763e32a44be0..0d47f2c4d59f 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -81,12 +81,15 @@ struct moschip_serial {
81 81
82static int debug; 82static int debug;
83 83
84static struct usb_serial_driver moschip7720_2port_driver;
85
84#define USB_VENDOR_ID_MOSCHIP 0x9710 86#define USB_VENDOR_ID_MOSCHIP 0x9710
85#define MOSCHIP_DEVICE_ID_7720 0x7720 87#define MOSCHIP_DEVICE_ID_7720 0x7720
86#define MOSCHIP_DEVICE_ID_7715 0x7715 88#define MOSCHIP_DEVICE_ID_7715 0x7715
87 89
88static struct usb_device_id moschip_port_id_table[] = { 90static const struct usb_device_id moschip_port_id_table[] = {
89 { USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7720) }, 91 { USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7720) },
92 { USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7715) },
90 { } /* terminating entry */ 93 { } /* terminating entry */
91}; 94};
92MODULE_DEVICE_TABLE(usb, moschip_port_id_table); 95MODULE_DEVICE_TABLE(usb, moschip_port_id_table);
@@ -106,7 +109,7 @@ static void mos7720_interrupt_callback(struct urb *urb)
106 __u8 sp1; 109 __u8 sp1;
107 __u8 sp2; 110 __u8 sp2;
108 111
109 dbg("%s", " : Entering\n"); 112 dbg(" : Entering");
110 113
111 switch (status) { 114 switch (status) {
112 case 0: 115 case 0:
@@ -186,6 +189,75 @@ exit:
186} 189}
187 190
188/* 191/*
192 * mos7715_interrupt_callback
193 * this is the 7715's callback function for when we have received data on
194 * the interrupt endpoint.
195 */
196static void mos7715_interrupt_callback(struct urb *urb)
197{
198 int result;
199 int length;
200 int status = urb->status;
201 __u8 *data;
202 __u8 iir;
203
204 switch (status) {
205 case 0:
206 /* success */
207 break;
208 case -ECONNRESET:
209 case -ENOENT:
210 case -ESHUTDOWN:
211 /* this urb is terminated, clean up */
212 dbg("%s - urb shutting down with status: %d", __func__,
213 status);
214 return;
215 default:
216 dbg("%s - nonzero urb status received: %d", __func__,
217 status);
218 goto exit;
219 }
220
221 length = urb->actual_length;
222 data = urb->transfer_buffer;
223
224 /* Structure of data from 7715 device:
225 * Byte 1: IIR serial Port
226 * Byte 2: unused
227 * Byte 2: DSR parallel port
228 * Byte 4: FIFO status for both */
229
230 if (unlikely(length != 4)) {
231 dbg("Wrong data !!!");
232 return;
233 }
234
235 iir = data[0];
236 if (!(iir & 0x01)) { /* serial port interrupt pending */
237 switch (iir & 0x0f) {
238 case SERIAL_IIR_RLS:
239 dbg("Serial Port: Receiver status error or address "
240 "bit detected in 9-bit mode\n");
241 break;
242 case SERIAL_IIR_CTI:
243 dbg("Serial Port: Receiver time out");
244 break;
245 case SERIAL_IIR_MS:
246 dbg("Serial Port: Modem status change");
247 break;
248 }
249 }
250
251exit:
252 result = usb_submit_urb(urb, GFP_ATOMIC);
253 if (result)
254 dev_err(&urb->dev->dev,
255 "%s - Error %d submitting control urb\n",
256 __func__, result);
257 return;
258}
259
260/*
189 * mos7720_bulk_in_callback 261 * mos7720_bulk_in_callback
190 * this is the callback function for when we have received data on the 262 * this is the callback function for when we have received data on the
191 * bulk in endpoint. 263 * bulk in endpoint.
@@ -206,7 +278,7 @@ static void mos7720_bulk_in_callback(struct urb *urb)
206 278
207 mos7720_port = urb->context; 279 mos7720_port = urb->context;
208 if (!mos7720_port) { 280 if (!mos7720_port) {
209 dbg("%s", "NULL mos7720_port pointer \n"); 281 dbg("NULL mos7720_port pointer");
210 return ; 282 return ;
211 } 283 }
212 284
@@ -218,7 +290,6 @@ static void mos7720_bulk_in_callback(struct urb *urb)
218 290
219 tty = tty_port_tty_get(&port->port); 291 tty = tty_port_tty_get(&port->port);
220 if (tty && urb->actual_length) { 292 if (tty && urb->actual_length) {
221 tty_buffer_request_room(tty, urb->actual_length);
222 tty_insert_flip_string(tty, data, urb->actual_length); 293 tty_insert_flip_string(tty, data, urb->actual_length);
223 tty_flip_buffer_push(tty); 294 tty_flip_buffer_push(tty);
224 } 295 }
@@ -275,17 +346,15 @@ static void mos7720_bulk_out_data_callback(struct urb *urb)
275 * this function will be used for sending command to device 346 * this function will be used for sending command to device
276 */ 347 */
277static int send_mos_cmd(struct usb_serial *serial, __u8 request, __u16 value, 348static int send_mos_cmd(struct usb_serial *serial, __u8 request, __u16 value,
278 __u16 index, void *data) 349 __u16 index, u8 *data)
279{ 350{
280 int status; 351 int status;
281 unsigned int pipe; 352 u8 *buf;
282 u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); 353 u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
283 __u8 requesttype;
284 __u16 size = 0x0000;
285 354
286 if (value < MOS_MAX_PORT) { 355 if (value < MOS_MAX_PORT) {
287 if (product == MOSCHIP_DEVICE_ID_7715) 356 if (product == MOSCHIP_DEVICE_ID_7715)
288 value = value*0x100+0x100; 357 value = 0x0200; /* identifies the 7715's serial port */
289 else 358 else
290 value = value*0x100+0x200; 359 value = value*0x100+0x200;
291 } else { 360 } else {
@@ -298,27 +367,58 @@ static int send_mos_cmd(struct usb_serial *serial, __u8 request, __u16 value,
298 } 367 }
299 368
300 if (request == MOS_WRITE) { 369 if (request == MOS_WRITE) {
301 request = (__u8)MOS_WRITE; 370 value = value + *data;
302 requesttype = (__u8)0x40; 371 status = usb_control_msg(serial->dev,
303 value = value + (__u16)*((unsigned char *)data); 372 usb_sndctrlpipe(serial->dev, 0), MOS_WRITE,
304 data = NULL; 373 0x40, value, index, NULL, 0, MOS_WDR_TIMEOUT);
305 pipe = usb_sndctrlpipe(serial->dev, 0);
306 } else { 374 } else {
307 request = (__u8)MOS_READ; 375 buf = kmalloc(1, GFP_KERNEL);
308 requesttype = (__u8)0xC0; 376 if (!buf) {
309 size = 0x01; 377 status = -ENOMEM;
310 pipe = usb_rcvctrlpipe(serial->dev, 0); 378 goto out;
379 }
380 status = usb_control_msg(serial->dev,
381 usb_rcvctrlpipe(serial->dev, 0), MOS_READ,
382 0xc0, value, index, buf, 1, MOS_WDR_TIMEOUT);
383 *data = *buf;
384 kfree(buf);
311 } 385 }
312 386out:
313 status = usb_control_msg(serial->dev, pipe, request, requesttype,
314 value, index, data, size, MOS_WDR_TIMEOUT);
315
316 if (status < 0) 387 if (status < 0)
317 dbg("Command Write failed Value %x index %x\n", value, index); 388 dbg("Command Write failed Value %x index %x", value, index);
318 389
319 return status; 390 return status;
320} 391}
321 392
393
394/*
395 * mos77xx_probe
396 * this function installs the appropriate read interrupt endpoint callback
397 * depending on whether the device is a 7720 or 7715, thus avoiding costly
398 * run-time checks in the high-frequency callback routine itself.
399 */
400static int mos77xx_probe(struct usb_serial *serial,
401 const struct usb_device_id *id)
402{
403 if (id->idProduct == MOSCHIP_DEVICE_ID_7715)
404 moschip7720_2port_driver.read_int_callback =
405 mos7715_interrupt_callback;
406 else
407 moschip7720_2port_driver.read_int_callback =
408 mos7720_interrupt_callback;
409
410 return 0;
411}
412
413static int mos77xx_calc_num_ports(struct usb_serial *serial)
414{
415 u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
416 if (product == MOSCHIP_DEVICE_ID_7715)
417 return 1;
418
419 return 2;
420}
421
322static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port) 422static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
323{ 423{
324 struct usb_serial *serial; 424 struct usb_serial *serial;
@@ -390,7 +490,7 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
390 */ 490 */
391 port_number = port->number - port->serial->minor; 491 port_number = port->number - port->serial->minor;
392 send_mos_cmd(port->serial, MOS_READ, port_number, UART_LSR, &data); 492 send_mos_cmd(port->serial, MOS_READ, port_number, UART_LSR, &data);
393 dbg("SS::%p LSR:%x\n", mos7720_port, data); 493 dbg("SS::%p LSR:%x", mos7720_port, data);
394 494
395 dbg("Check:Sending Command .........."); 495 dbg("Check:Sending Command ..........");
396 496
@@ -729,7 +829,7 @@ static void mos7720_throttle(struct tty_struct *tty)
729 struct moschip_port *mos7720_port; 829 struct moschip_port *mos7720_port;
730 int status; 830 int status;
731 831
732 dbg("%s- port %d\n", __func__, port->number); 832 dbg("%s- port %d", __func__, port->number);
733 833
734 mos7720_port = usb_get_serial_port_data(port); 834 mos7720_port = usb_get_serial_port_data(port);
735 835
@@ -1208,7 +1308,7 @@ static void mos7720_set_termios(struct tty_struct *tty,
1208 return; 1308 return;
1209 } 1309 }
1210 1310
1211 dbg("%s\n", "setting termios - ASPIRE"); 1311 dbg("setting termios - ASPIRE");
1212 1312
1213 cflag = tty->termios->c_cflag; 1313 cflag = tty->termios->c_cflag;
1214 1314
@@ -1226,7 +1326,7 @@ static void mos7720_set_termios(struct tty_struct *tty,
1226 change_port_settings(tty, mos7720_port, old_termios); 1326 change_port_settings(tty, mos7720_port, old_termios);
1227 1327
1228 if (!port->read_urb) { 1328 if (!port->read_urb) {
1229 dbg("%s", "URB KILLED !!!!!\n"); 1329 dbg("URB KILLED !!!!!");
1230 return; 1330 return;
1231 } 1331 }
1232 1332
@@ -1495,6 +1595,7 @@ static int mos7720_startup(struct usb_serial *serial)
1495 struct usb_device *dev; 1595 struct usb_device *dev;
1496 int i; 1596 int i;
1497 char data; 1597 char data;
1598 u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
1498 1599
1499 dbg("%s: Entering ..........", __func__); 1600 dbg("%s: Entering ..........", __func__);
1500 1601
@@ -1514,6 +1615,29 @@ static int mos7720_startup(struct usb_serial *serial)
1514 1615
1515 usb_set_serial_data(serial, mos7720_serial); 1616 usb_set_serial_data(serial, mos7720_serial);
1516 1617
1618 /*
1619 * The 7715 uses the first bulk in/out endpoint pair for the parallel
1620 * port, and the second for the serial port. Because the usbserial core
1621 * assumes both pairs are serial ports, we must engage in a bit of
1622 * subterfuge and swap the pointers for ports 0 and 1 in order to make
1623 * port 0 point to the serial port. However, both moschip devices use a
1624 * single interrupt-in endpoint for both ports (as mentioned a little
1625 * further down), and this endpoint was assigned to port 0. So after
1626 * the swap, we must copy the interrupt endpoint elements from port 1
1627 * (as newly assigned) to port 0, and null out port 1 pointers.
1628 */
1629 if (product == MOSCHIP_DEVICE_ID_7715) {
1630 struct usb_serial_port *tmp = serial->port[0];
1631 serial->port[0] = serial->port[1];
1632 serial->port[1] = tmp;
1633 serial->port[0]->interrupt_in_urb = tmp->interrupt_in_urb;
1634 serial->port[0]->interrupt_in_buffer = tmp->interrupt_in_buffer;
1635 serial->port[0]->interrupt_in_endpointAddress =
1636 tmp->interrupt_in_endpointAddress;
1637 serial->port[1]->interrupt_in_urb = NULL;
1638 serial->port[1]->interrupt_in_buffer = NULL;
1639 }
1640
1517 /* we set up the pointers to the endpoints in the mos7720_open * 1641 /* we set up the pointers to the endpoints in the mos7720_open *
1518 * function, as the structures aren't created yet. */ 1642 * function, as the structures aren't created yet. */
1519 1643
@@ -1529,7 +1653,7 @@ static int mos7720_startup(struct usb_serial *serial)
1529 1653
1530 /* Initialize all port interrupt end point to port 0 int 1654 /* Initialize all port interrupt end point to port 0 int
1531 * endpoint. Our device has only one interrupt endpoint 1655 * endpoint. Our device has only one interrupt endpoint
1532 * comman to all ports */ 1656 * common to all ports */
1533 serial->port[i]->interrupt_in_endpointAddress = 1657 serial->port[i]->interrupt_in_endpointAddress =
1534 serial->port[0]->interrupt_in_endpointAddress; 1658 serial->port[0]->interrupt_in_endpointAddress;
1535 1659
@@ -1584,11 +1708,12 @@ static struct usb_serial_driver moschip7720_2port_driver = {
1584 .description = "Moschip 2 port adapter", 1708 .description = "Moschip 2 port adapter",
1585 .usb_driver = &usb_driver, 1709 .usb_driver = &usb_driver,
1586 .id_table = moschip_port_id_table, 1710 .id_table = moschip_port_id_table,
1587 .num_ports = 2, 1711 .calc_num_ports = mos77xx_calc_num_ports,
1588 .open = mos7720_open, 1712 .open = mos7720_open,
1589 .close = mos7720_close, 1713 .close = mos7720_close,
1590 .throttle = mos7720_throttle, 1714 .throttle = mos7720_throttle,
1591 .unthrottle = mos7720_unthrottle, 1715 .unthrottle = mos7720_unthrottle,
1716 .probe = mos77xx_probe,
1592 .attach = mos7720_startup, 1717 .attach = mos7720_startup,
1593 .release = mos7720_release, 1718 .release = mos7720_release,
1594 .ioctl = mos7720_ioctl, 1719 .ioctl = mos7720_ioctl,
@@ -1600,7 +1725,7 @@ static struct usb_serial_driver moschip7720_2port_driver = {
1600 .chars_in_buffer = mos7720_chars_in_buffer, 1725 .chars_in_buffer = mos7720_chars_in_buffer,
1601 .break_ctl = mos7720_break, 1726 .break_ctl = mos7720_break,
1602 .read_bulk_callback = mos7720_bulk_in_callback, 1727 .read_bulk_callback = mos7720_bulk_in_callback,
1603 .read_int_callback = mos7720_interrupt_callback, 1728 .read_int_callback = NULL /* dynamically assigned in probe() */
1604}; 1729};
1605 1730
1606static int __init moschip7720_init(void) 1731static int __init moschip7720_init(void)
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 2cfe2451ed97..2fda1c0182b7 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -181,7 +181,7 @@
181#define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */ 181#define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */
182 182
183 183
184static struct usb_device_id moschip_port_id_table[] = { 184static const struct usb_device_id moschip_port_id_table[] = {
185 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, 185 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
186 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, 186 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
187 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, 187 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
@@ -198,7 +198,7 @@ static struct usb_device_id moschip_port_id_table[] = {
198 {} /* terminating entry */ 198 {} /* terminating entry */
199}; 199};
200 200
201static __devinitdata struct usb_device_id moschip_id_table_combined[] = { 201static const struct usb_device_id moschip_id_table_combined[] __devinitconst = {
202 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, 202 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
203 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, 203 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
204 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, 204 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
@@ -283,12 +283,19 @@ static int mos7840_get_reg_sync(struct usb_serial_port *port, __u16 reg,
283{ 283{
284 struct usb_device *dev = port->serial->dev; 284 struct usb_device *dev = port->serial->dev;
285 int ret = 0; 285 int ret = 0;
286 u8 *buf;
287
288 buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
289 if (!buf)
290 return -ENOMEM;
286 291
287 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, 292 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
288 MCS_RD_RTYPE, 0, reg, val, VENDOR_READ_LENGTH, 293 MCS_RD_RTYPE, 0, reg, buf, VENDOR_READ_LENGTH,
289 MOS_WDR_TIMEOUT); 294 MOS_WDR_TIMEOUT);
295 *val = buf[0];
290 dbg("mos7840_get_reg_sync offset is %x, return val %x", reg, *val); 296 dbg("mos7840_get_reg_sync offset is %x, return val %x", reg, *val);
291 *val = (*val) & 0x00ff; 297
298 kfree(buf);
292 return ret; 299 return ret;
293} 300}
294 301
@@ -341,6 +348,11 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
341 struct usb_device *dev = port->serial->dev; 348 struct usb_device *dev = port->serial->dev;
342 int ret = 0; 349 int ret = 0;
343 __u16 Wval; 350 __u16 Wval;
351 u8 *buf;
352
353 buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
354 if (!buf)
355 return -ENOMEM;
344 356
345 /* dbg("application number is %4x", 357 /* dbg("application number is %4x",
346 (((__u16)port->number - (__u16)(port->serial->minor))+1)<<8); */ 358 (((__u16)port->number - (__u16)(port->serial->minor))+1)<<8); */
@@ -364,9 +376,11 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
364 } 376 }
365 } 377 }
366 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, 378 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
367 MCS_RD_RTYPE, Wval, reg, val, VENDOR_READ_LENGTH, 379 MCS_RD_RTYPE, Wval, reg, buf, VENDOR_READ_LENGTH,
368 MOS_WDR_TIMEOUT); 380 MOS_WDR_TIMEOUT);
369 *val = (*val) & 0x00ff; 381 *val = buf[0];
382
383 kfree(buf);
370 return ret; 384 return ret;
371} 385}
372 386
@@ -750,7 +764,6 @@ static void mos7840_bulk_in_callback(struct urb *urb)
750 if (urb->actual_length) { 764 if (urb->actual_length) {
751 tty = tty_port_tty_get(&mos7840_port->port->port); 765 tty = tty_port_tty_get(&mos7840_port->port->port);
752 if (tty) { 766 if (tty) {
753 tty_buffer_request_room(tty, urb->actual_length);
754 tty_insert_flip_string(tty, data, urb->actual_length); 767 tty_insert_flip_string(tty, data, urb->actual_length);
755 dbg(" %s ", data); 768 dbg(" %s ", data);
756 tty_flip_buffer_push(tty); 769 tty_flip_buffer_push(tty);
diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c
index 99bd00f5188a..cf1718394e18 100644
--- a/drivers/usb/serial/moto_modem.c
+++ b/drivers/usb/serial/moto_modem.c
@@ -21,7 +21,7 @@
21#include <linux/usb.h> 21#include <linux/usb.h>
22#include <linux/usb/serial.h> 22#include <linux/usb/serial.h>
23 23
24static struct usb_device_id id_table [] = { 24static const struct usb_device_id id_table[] = {
25 { USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */ 25 { USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */
26 { USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */ 26 { USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */
27 { USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */ 27 { USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index 5ceaa4c6be09..04a6cbbed2c0 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -22,7 +22,7 @@
22 22
23static int debug; 23static int debug;
24 24
25static struct usb_device_id id_table [] = { 25static const struct usb_device_id id_table[] = {
26 { USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */ 26 { USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
27 { }, 27 { },
28}; 28};
@@ -66,7 +66,6 @@ static void navman_read_int_callback(struct urb *urb)
66 66
67 tty = tty_port_tty_get(&port->port); 67 tty = tty_port_tty_get(&port->port);
68 if (tty && urb->actual_length) { 68 if (tty && urb->actual_length) {
69 tty_buffer_request_room(tty, urb->actual_length);
70 tty_insert_flip_string(tty, data, urb->actual_length); 69 tty_insert_flip_string(tty, data, urb->actual_length);
71 tty_flip_buffer_push(tty); 70 tty_flip_buffer_push(tty);
72 } 71 }
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 062265038bf0..89c724c0ac0a 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -75,7 +75,7 @@ static void omninet_disconnect(struct usb_serial *serial);
75static void omninet_release(struct usb_serial *serial); 75static void omninet_release(struct usb_serial *serial);
76static int omninet_attach(struct usb_serial *serial); 76static int omninet_attach(struct usb_serial *serial);
77 77
78static struct usb_device_id id_table[] = { 78static const struct usb_device_id id_table[] = {
79 { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) }, 79 { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
80 { USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) }, 80 { USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) },
81 { } /* Terminating entry */ 81 { } /* Terminating entry */
@@ -218,8 +218,8 @@ static void omninet_read_bulk_callback(struct urb *urb)
218 218
219 if (debug && header->oh_xxx != 0x30) { 219 if (debug && header->oh_xxx != 0x30) {
220 if (urb->actual_length) { 220 if (urb->actual_length) {
221 printk(KERN_DEBUG __FILE__ 221 printk(KERN_DEBUG "%s: omninet_read %d: ",
222 ": omninet_read %d: ", header->oh_len); 222 __FILE__, header->oh_len);
223 for (i = 0; i < (header->oh_len + 223 for (i = 0; i < (header->oh_len +
224 OMNINET_HEADERLEN); i++) 224 OMNINET_HEADERLEN); i++)
225 printk("%.2x ", data[i]); 225 printk("%.2x ", data[i]);
@@ -332,7 +332,7 @@ static void omninet_write_bulk_callback(struct urb *urb)
332 struct usb_serial_port *port = urb->context; 332 struct usb_serial_port *port = urb->context;
333 int status = urb->status; 333 int status = urb->status;
334 334
335 dbg("%s - port %0x\n", __func__, port->number); 335 dbg("%s - port %0x", __func__, port->number);
336 336
337 port->write_urb_busy = 0; 337 port->write_urb_busy = 0;
338 if (status) { 338 if (status) {
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index 4cdb975caa89..f37476e22684 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -22,7 +22,7 @@
22 22
23static int debug; 23static int debug;
24 24
25static struct usb_device_id id_table[] = { 25static const struct usb_device_id id_table[] = {
26 { USB_DEVICE(0x065a, 0x0009) }, 26 { USB_DEVICE(0x065a, 0x0009) },
27 { }, 27 { },
28}; 28};
@@ -55,7 +55,6 @@ static void opticon_bulk_callback(struct urb *urb)
55 int status = urb->status; 55 int status = urb->status;
56 struct tty_struct *tty; 56 struct tty_struct *tty;
57 int result; 57 int result;
58 int available_room = 0;
59 int data_length; 58 int data_length;
60 59
61 dbg("%s - port %d", __func__, port->number); 60 dbg("%s - port %d", __func__, port->number);
@@ -96,13 +95,9 @@ static void opticon_bulk_callback(struct urb *urb)
96 /* real data, send it to the tty layer */ 95 /* real data, send it to the tty layer */
97 tty = tty_port_tty_get(&port->port); 96 tty = tty_port_tty_get(&port->port);
98 if (tty) { 97 if (tty) {
99 available_room = tty_buffer_request_room(tty, 98 tty_insert_flip_string(tty, data,
100 data_length); 99 data_length);
101 if (available_room) { 100 tty_flip_buffer_push(tty);
102 tty_insert_flip_string(tty, data,
103 available_room);
104 tty_flip_buffer_push(tty);
105 }
106 tty_kref_put(tty); 101 tty_kref_put(tty);
107 } 102 }
108 } else { 103 } else {
@@ -217,7 +212,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
217 spin_lock_irqsave(&priv->lock, flags); 212 spin_lock_irqsave(&priv->lock, flags);
218 if (priv->outstanding_urbs > URB_UPPER_LIMIT) { 213 if (priv->outstanding_urbs > URB_UPPER_LIMIT) {
219 spin_unlock_irqrestore(&priv->lock, flags); 214 spin_unlock_irqrestore(&priv->lock, flags);
220 dbg("%s - write limit hit\n", __func__); 215 dbg("%s - write limit hit", __func__);
221 return 0; 216 return 0;
222 } 217 }
223 priv->outstanding_urbs++; 218 priv->outstanding_urbs++;
@@ -288,7 +283,7 @@ static int opticon_write_room(struct tty_struct *tty)
288 spin_lock_irqsave(&priv->lock, flags); 283 spin_lock_irqsave(&priv->lock, flags);
289 if (priv->outstanding_urbs > URB_UPPER_LIMIT * 2 / 3) { 284 if (priv->outstanding_urbs > URB_UPPER_LIMIT * 2 / 3) {
290 spin_unlock_irqrestore(&priv->lock, flags); 285 spin_unlock_irqrestore(&priv->lock, flags);
291 dbg("%s - write limit hit\n", __func__); 286 dbg("%s - write limit hit", __func__);
292 return 0; 287 return 0;
293 } 288 }
294 spin_unlock_irqrestore(&priv->lock, flags); 289 spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 6e94a6711f08..847b805d63a3 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -336,15 +336,42 @@ static int option_resume(struct usb_serial *serial);
336#define AIRPLUS_VENDOR_ID 0x1011 336#define AIRPLUS_VENDOR_ID 0x1011
337#define AIRPLUS_PRODUCT_MCD650 0x3198 337#define AIRPLUS_PRODUCT_MCD650 0x3198
338 338
339/* Longcheer/Longsung vendor ID; makes whitelabel devices that
340 * many other vendors like 4G Systems, Alcatel, ChinaBird,
341 * Mobidata, etc sell under their own brand names.
342 */
343#define LONGCHEER_VENDOR_ID 0x1c9e
344
339/* 4G Systems products */ 345/* 4G Systems products */
340#define FOUR_G_SYSTEMS_VENDOR_ID 0x1c9e 346/* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
347 * It seems to contain a Qualcomm QSC6240/6290 chipset */
341#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 348#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
342 349
343/* Haier products */ 350/* Haier products */
344#define HAIER_VENDOR_ID 0x201e 351#define HAIER_VENDOR_ID 0x201e
345#define HAIER_PRODUCT_CE100 0x2009 352#define HAIER_PRODUCT_CE100 0x2009
346 353
347static struct usb_device_id option_ids[] = { 354/* some devices interfaces need special handling due to a number of reasons */
355enum option_blacklist_reason {
356 OPTION_BLACKLIST_NONE = 0,
357 OPTION_BLACKLIST_SENDSETUP = 1,
358 OPTION_BLACKLIST_RESERVED_IF = 2
359};
360
361struct option_blacklist_info {
362 const u32 infolen; /* number of interface numbers on blacklist */
363 const u8 *ifaceinfo; /* pointer to the array holding the numbers */
364 enum option_blacklist_reason reason;
365};
366
367static const u8 four_g_w14_no_sendsetup[] = { 0, 1 };
368static const struct option_blacklist_info four_g_w14_blacklist = {
369 .infolen = ARRAY_SIZE(four_g_w14_no_sendsetup),
370 .ifaceinfo = four_g_w14_no_sendsetup,
371 .reason = OPTION_BLACKLIST_SENDSETUP
372};
373
374static const struct usb_device_id option_ids[] = {
348 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 375 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
349 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, 376 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
350 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_LIGHT) }, 377 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_LIGHT) },
@@ -644,7 +671,9 @@ static struct usb_device_id option_ids[] = {
644 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, 671 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
645 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, 672 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
646 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, 673 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
647 { USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) }, 674 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
675 .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
676 },
648 { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, 677 { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
649 { } /* Terminating entry */ 678 { } /* Terminating entry */
650}; 679};
@@ -709,6 +738,7 @@ struct option_intf_private {
709 spinlock_t susp_lock; 738 spinlock_t susp_lock;
710 unsigned int suspended:1; 739 unsigned int suspended:1;
711 int in_flight; 740 int in_flight;
741 struct option_blacklist_info *blacklist_info;
712}; 742};
713 743
714struct option_port_private { 744struct option_port_private {
@@ -778,9 +808,27 @@ static int option_probe(struct usb_serial *serial,
778 if (!data) 808 if (!data)
779 return -ENOMEM; 809 return -ENOMEM;
780 spin_lock_init(&data->susp_lock); 810 spin_lock_init(&data->susp_lock);
811 data->blacklist_info = (struct option_blacklist_info*) id->driver_info;
781 return 0; 812 return 0;
782} 813}
783 814
815static enum option_blacklist_reason is_blacklisted(const u8 ifnum,
816 const struct option_blacklist_info *blacklist)
817{
818 const u8 *info;
819 int i;
820
821 if (blacklist) {
822 info = blacklist->ifaceinfo;
823
824 for (i = 0; i < blacklist->infolen; i++) {
825 if (info[i] == ifnum)
826 return blacklist->reason;
827 }
828 }
829 return OPTION_BLACKLIST_NONE;
830}
831
784static void option_set_termios(struct tty_struct *tty, 832static void option_set_termios(struct tty_struct *tty,
785 struct usb_serial_port *port, struct ktermios *old_termios) 833 struct usb_serial_port *port, struct ktermios *old_termios)
786{ 834{
@@ -921,7 +969,6 @@ static void option_indat_callback(struct urb *urb)
921 } else { 969 } else {
922 tty = tty_port_tty_get(&port->port); 970 tty = tty_port_tty_get(&port->port);
923 if (urb->actual_length) { 971 if (urb->actual_length) {
924 tty_buffer_request_room(tty, urb->actual_length);
925 tty_insert_flip_string(tty, data, urb->actual_length); 972 tty_insert_flip_string(tty, data, urb->actual_length);
926 tty_flip_buffer_push(tty); 973 tty_flip_buffer_push(tty);
927 } else 974 } else
@@ -929,9 +976,9 @@ static void option_indat_callback(struct urb *urb)
929 tty_kref_put(tty); 976 tty_kref_put(tty);
930 977
931 /* Resubmit urb so we continue receiving */ 978 /* Resubmit urb so we continue receiving */
932 if (port->port.count && status != -ESHUTDOWN) { 979 if (status != -ESHUTDOWN) {
933 err = usb_submit_urb(urb, GFP_ATOMIC); 980 err = usb_submit_urb(urb, GFP_ATOMIC);
934 if (err) 981 if (err && err != -EPERM)
935 printk(KERN_ERR "%s: resubmit read urb failed. " 982 printk(KERN_ERR "%s: resubmit read urb failed. "
936 "(%d)", __func__, err); 983 "(%d)", __func__, err);
937 else 984 else
@@ -985,7 +1032,7 @@ static void option_instat_callback(struct urb *urb)
985 (struct usb_ctrlrequest *)urb->transfer_buffer; 1032 (struct usb_ctrlrequest *)urb->transfer_buffer;
986 1033
987 if (!req_pkt) { 1034 if (!req_pkt) {
988 dbg("%s: NULL req_pkt\n", __func__); 1035 dbg("%s: NULL req_pkt", __func__);
989 return; 1036 return;
990 } 1037 }
991 if ((req_pkt->bRequestType == 0xA1) && 1038 if ((req_pkt->bRequestType == 0xA1) &&
@@ -1211,11 +1258,19 @@ static void option_setup_urbs(struct usb_serial *serial)
1211static int option_send_setup(struct usb_serial_port *port) 1258static int option_send_setup(struct usb_serial_port *port)
1212{ 1259{
1213 struct usb_serial *serial = port->serial; 1260 struct usb_serial *serial = port->serial;
1261 struct option_intf_private *intfdata =
1262 (struct option_intf_private *) serial->private;
1214 struct option_port_private *portdata; 1263 struct option_port_private *portdata;
1215 int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber; 1264 int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
1216 int val = 0; 1265 int val = 0;
1217 dbg("%s", __func__); 1266 dbg("%s", __func__);
1218 1267
1268 if (is_blacklisted(ifNum, intfdata->blacklist_info) ==
1269 OPTION_BLACKLIST_SENDSETUP) {
1270 dbg("No send_setup on blacklisted interface #%d\n", ifNum);
1271 return -EIO;
1272 }
1273
1219 portdata = usb_get_serial_port_data(port); 1274 portdata = usb_get_serial_port_data(port);
1220 1275
1221 if (portdata->dtr_state) 1276 if (portdata->dtr_state)
@@ -1401,7 +1456,7 @@ static int option_resume(struct usb_serial *serial)
1401 for (i = 0; i < serial->num_ports; i++) { 1456 for (i = 0; i < serial->num_ports; i++) {
1402 port = serial->port[i]; 1457 port = serial->port[i];
1403 if (!port->interrupt_in_urb) { 1458 if (!port->interrupt_in_urb) {
1404 dbg("%s: No interrupt URB for port %d\n", __func__, i); 1459 dbg("%s: No interrupt URB for port %d", __func__, i);
1405 continue; 1460 continue;
1406 } 1461 }
1407 err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); 1462 err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index c644e26394b4..deeacdea05db 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -58,7 +58,7 @@
58#define OTI6858_AUTHOR "Tomasz Michal Lukaszewski <FIXME@FIXME>" 58#define OTI6858_AUTHOR "Tomasz Michal Lukaszewski <FIXME@FIXME>"
59#define OTI6858_VERSION "0.1" 59#define OTI6858_VERSION "0.1"
60 60
61static struct usb_device_id id_table [] = { 61static const struct usb_device_id id_table[] = {
62 { USB_DEVICE(OTI6858_VENDOR_ID, OTI6858_PRODUCT_ID) }, 62 { USB_DEVICE(OTI6858_VENDOR_ID, OTI6858_PRODUCT_ID) },
63 { } 63 { }
64}; 64};
@@ -302,7 +302,7 @@ void send_data(struct work_struct *work)
302 struct usb_serial_port *port = priv->port; 302 struct usb_serial_port *port = priv->port;
303 int count = 0, result; 303 int count = 0, result;
304 unsigned long flags; 304 unsigned long flags;
305 unsigned char allow; 305 u8 *allow;
306 306
307 dbg("%s(port = %d)", __func__, port->number); 307 dbg("%s(port = %d)", __func__, port->number);
308 308
@@ -321,13 +321,20 @@ void send_data(struct work_struct *work)
321 count = port->bulk_out_size; 321 count = port->bulk_out_size;
322 322
323 if (count != 0) { 323 if (count != 0) {
324 allow = kmalloc(1, GFP_KERNEL);
325 if (!allow) {
326 dev_err(&port->dev, "%s(): kmalloc failed\n",
327 __func__);
328 return;
329 }
324 result = usb_control_msg(port->serial->dev, 330 result = usb_control_msg(port->serial->dev,
325 usb_rcvctrlpipe(port->serial->dev, 0), 331 usb_rcvctrlpipe(port->serial->dev, 0),
326 OTI6858_REQ_T_CHECK_TXBUFF, 332 OTI6858_REQ_T_CHECK_TXBUFF,
327 OTI6858_REQ_CHECK_TXBUFF, 333 OTI6858_REQ_CHECK_TXBUFF,
328 count, 0, &allow, 1, 100); 334 count, 0, allow, 1, 100);
329 if (result != 1 || allow != 0) 335 if (result != 1 || *allow != 0)
330 count = 0; 336 count = 0;
337 kfree(allow);
331 } 338 }
332 339
333 if (count == 0) { 340 if (count == 0) {
@@ -578,9 +585,6 @@ static int oti6858_open(struct tty_struct *tty, struct usb_serial_port *port)
578 usb_clear_halt(serial->dev, port->write_urb->pipe); 585 usb_clear_halt(serial->dev, port->write_urb->pipe);
579 usb_clear_halt(serial->dev, port->read_urb->pipe); 586 usb_clear_halt(serial->dev, port->read_urb->pipe);
580 587
581 if (port->port.count != 1)
582 return 0;
583
584 buf = kmalloc(OTI6858_CTRL_PKT_SIZE, GFP_KERNEL); 588 buf = kmalloc(OTI6858_CTRL_PKT_SIZE, GFP_KERNEL);
585 if (buf == NULL) { 589 if (buf == NULL) {
586 dev_err(&port->dev, "%s(): out of memory!\n", __func__); 590 dev_err(&port->dev, "%s(): out of memory!\n", __func__);
@@ -927,10 +931,6 @@ static void oti6858_read_bulk_callback(struct urb *urb)
927 spin_unlock_irqrestore(&priv->lock, flags); 931 spin_unlock_irqrestore(&priv->lock, flags);
928 932
929 if (status != 0) { 933 if (status != 0) {
930 if (!port->port.count) {
931 dbg("%s(): port is closed, exiting", __func__);
932 return;
933 }
934 /* 934 /*
935 if (status == -EPROTO) { 935 if (status == -EPROTO) {
936 * PL2303 mysteriously fails with -EPROTO reschedule 936 * PL2303 mysteriously fails with -EPROTO reschedule
@@ -954,14 +954,12 @@ static void oti6858_read_bulk_callback(struct urb *urb)
954 } 954 }
955 tty_kref_put(tty); 955 tty_kref_put(tty);
956 956
957 /* schedule the interrupt urb if we are still open */ 957 /* schedule the interrupt urb */
958 if (port->port.count != 0) { 958 port->interrupt_in_urb->dev = port->serial->dev;
959 port->interrupt_in_urb->dev = port->serial->dev; 959 result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
960 result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC); 960 if (result != 0 && result != -EPERM) {
961 if (result != 0) { 961 dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
962 dev_err(&port->dev, "%s(): usb_submit_urb() failed," 962 " error %d\n", __func__, result);
963 " error %d\n", __func__, result);
964 }
965 } 963 }
966} 964}
967 965
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 9ec1a49e2362..73d5f346d3e0 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -50,7 +50,7 @@ struct pl2303_buf {
50 char *buf_put; 50 char *buf_put;
51}; 51};
52 52
53static struct usb_device_id id_table [] = { 53static const struct usb_device_id id_table[] = {
54 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID) }, 54 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID) },
55 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) }, 55 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
56 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) }, 56 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) },
@@ -451,7 +451,6 @@ static void pl2303_send(struct usb_serial_port *port)
451 port->write_urb->transfer_buffer); 451 port->write_urb->transfer_buffer);
452 452
453 port->write_urb->transfer_buffer_length = count; 453 port->write_urb->transfer_buffer_length = count;
454 port->write_urb->dev = port->serial->dev;
455 result = usb_submit_urb(port->write_urb, GFP_ATOMIC); 454 result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
456 if (result) { 455 if (result) {
457 dev_err(&port->dev, "%s - failed submitting write urb," 456 dev_err(&port->dev, "%s - failed submitting write urb,"
@@ -769,7 +768,6 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
769 pl2303_set_termios(tty, port, &tmp_termios); 768 pl2303_set_termios(tty, port, &tmp_termios);
770 769
771 dbg("%s - submitting read urb", __func__); 770 dbg("%s - submitting read urb", __func__);
772 port->read_urb->dev = serial->dev;
773 result = usb_submit_urb(port->read_urb, GFP_KERNEL); 771 result = usb_submit_urb(port->read_urb, GFP_KERNEL);
774 if (result) { 772 if (result) {
775 dev_err(&port->dev, "%s - failed submitting read urb," 773 dev_err(&port->dev, "%s - failed submitting read urb,"
@@ -779,7 +777,6 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
779 } 777 }
780 778
781 dbg("%s - submitting interrupt urb", __func__); 779 dbg("%s - submitting interrupt urb", __func__);
782 port->interrupt_in_urb->dev = serial->dev;
783 result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); 780 result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
784 if (result) { 781 if (result) {
785 dev_err(&port->dev, "%s - failed submitting interrupt urb," 782 dev_err(&port->dev, "%s - failed submitting interrupt urb,"
@@ -895,10 +892,23 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
895static int pl2303_ioctl(struct tty_struct *tty, struct file *file, 892static int pl2303_ioctl(struct tty_struct *tty, struct file *file,
896 unsigned int cmd, unsigned long arg) 893 unsigned int cmd, unsigned long arg)
897{ 894{
895 struct serial_struct ser;
898 struct usb_serial_port *port = tty->driver_data; 896 struct usb_serial_port *port = tty->driver_data;
899 dbg("%s (%d) cmd = 0x%04x", __func__, port->number, cmd); 897 dbg("%s (%d) cmd = 0x%04x", __func__, port->number, cmd);
900 898
901 switch (cmd) { 899 switch (cmd) {
900 case TIOCGSERIAL:
901 memset(&ser, 0, sizeof ser);
902 ser.type = PORT_16654;
903 ser.line = port->serial->minor;
904 ser.port = port->number;
905 ser.baud_base = 460800;
906
907 if (copy_to_user((void __user *)arg, &ser, sizeof ser))
908 return -EFAULT;
909
910 return 0;
911
902 case TIOCMIWAIT: 912 case TIOCMIWAIT:
903 dbg("%s (%d) TIOCMIWAIT", __func__, port->number); 913 dbg("%s (%d) TIOCMIWAIT", __func__, port->number);
904 return wait_modem_info(port, arg); 914 return wait_modem_info(port, arg);
@@ -1042,7 +1052,6 @@ static void pl2303_push_data(struct tty_struct *tty,
1042 tty_flag = TTY_FRAME; 1052 tty_flag = TTY_FRAME;
1043 dbg("%s - tty_flag = %d", __func__, tty_flag); 1053 dbg("%s - tty_flag = %d", __func__, tty_flag);
1044 1054
1045 tty_buffer_request_room(tty, urb->actual_length + 1);
1046 /* overrun is special, not associated with a char */ 1055 /* overrun is special, not associated with a char */
1047 if (line_status & UART_OVERRUN_ERROR) 1056 if (line_status & UART_OVERRUN_ERROR)
1048 tty_insert_flip_char(tty, 0, TTY_OVERRUN); 1057 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
@@ -1072,16 +1081,11 @@ static void pl2303_read_bulk_callback(struct urb *urb)
1072 1081
1073 if (status) { 1082 if (status) {
1074 dbg("%s - urb status = %d", __func__, status); 1083 dbg("%s - urb status = %d", __func__, status);
1075 if (!port->port.count) {
1076 dbg("%s - port is closed, exiting.", __func__);
1077 return;
1078 }
1079 if (status == -EPROTO) { 1084 if (status == -EPROTO) {
1080 /* PL2303 mysteriously fails with -EPROTO reschedule 1085 /* PL2303 mysteriously fails with -EPROTO reschedule
1081 * the read */ 1086 * the read */
1082 dbg("%s - caught -EPROTO, resubmitting the urb", 1087 dbg("%s - caught -EPROTO, resubmitting the urb",
1083 __func__); 1088 __func__);
1084 urb->dev = port->serial->dev;
1085 result = usb_submit_urb(urb, GFP_ATOMIC); 1089 result = usb_submit_urb(urb, GFP_ATOMIC);
1086 if (result) 1090 if (result)
1087 dev_err(&urb->dev->dev, "%s - failed" 1091 dev_err(&urb->dev->dev, "%s - failed"
@@ -1108,15 +1112,10 @@ static void pl2303_read_bulk_callback(struct urb *urb)
1108 } 1112 }
1109 tty_kref_put(tty); 1113 tty_kref_put(tty);
1110 /* Schedule the next read _if_ we are still open */ 1114 /* Schedule the next read _if_ we are still open */
1111 if (port->port.count) { 1115 result = usb_submit_urb(urb, GFP_ATOMIC);
1112 urb->dev = port->serial->dev; 1116 if (result && result != -EPERM)
1113 result = usb_submit_urb(urb, GFP_ATOMIC); 1117 dev_err(&urb->dev->dev, "%s - failed resubmitting"
1114 if (result) 1118 " read urb, error %d\n", __func__, result);
1115 dev_err(&urb->dev->dev, "%s - failed resubmitting"
1116 " read urb, error %d\n", __func__, result);
1117 }
1118
1119 return;
1120} 1119}
1121 1120
1122static void pl2303_write_bulk_callback(struct urb *urb) 1121static void pl2303_write_bulk_callback(struct urb *urb)
@@ -1146,7 +1145,6 @@ static void pl2303_write_bulk_callback(struct urb *urb)
1146 dbg("%s - nonzero write bulk status received: %d", __func__, 1145 dbg("%s - nonzero write bulk status received: %d", __func__,
1147 status); 1146 status);
1148 port->write_urb->transfer_buffer_length = 1; 1147 port->write_urb->transfer_buffer_length = 1;
1149 port->write_urb->dev = port->serial->dev;
1150 result = usb_submit_urb(port->write_urb, GFP_ATOMIC); 1148 result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
1151 if (result) 1149 if (result)
1152 dev_err(&urb->dev->dev, "%s - failed resubmitting write" 1150 dev_err(&urb->dev->dev, "%s - failed resubmitting write"
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
new file mode 100644
index 000000000000..0b9362061713
--- /dev/null
+++ b/drivers/usb/serial/qcaux.c
@@ -0,0 +1,96 @@
1/*
2 * Qualcomm USB Auxiliary Serial Port driver
3 *
4 * Copyright (C) 2008 Greg Kroah-Hartman <greg@kroah.com>
5 * Copyright (C) 2010 Dan Williams <dcbw@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Devices listed here usually provide a CDC ACM port on which normal modem
12 * AT commands and PPP can be used. But when that port is in-use by PPP it
13 * cannot be used simultaneously for status or signal strength. Instead, the
14 * ports here can be queried for that information using the Qualcomm DM
15 * protocol.
16 */
17
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/tty.h>
21#include <linux/module.h>
22#include <linux/usb.h>
23#include <linux/usb/serial.h>
24
25/* NOTE: for now, only use this driver for devices that provide a CDC-ACM port
26 * for normal AT commands, but also provide secondary USB interfaces for the
27 * QCDM-capable ports. Devices that do not provide a CDC-ACM port should
28 * probably be driven by option.ko.
29 */
30
31/* UTStarcom/Pantech/Curitel devices */
32#define UTSTARCOM_VENDOR_ID 0x106c
33#define UTSTARCOM_PRODUCT_PC5740 0x3701
34#define UTSTARCOM_PRODUCT_PC5750 0x3702 /* aka Pantech PX-500 */
35#define UTSTARCOM_PRODUCT_UM150 0x3711
36#define UTSTARCOM_PRODUCT_UM175_V1 0x3712
37#define UTSTARCOM_PRODUCT_UM175_V2 0x3714
38#define UTSTARCOM_PRODUCT_UM175_ALLTEL 0x3715
39
40/* CMOTECH devices */
41#define CMOTECH_VENDOR_ID 0x16d8
42#define CMOTECH_PRODUCT_CDU550 0x5553
43#define CMOTECH_PRODUCT_CDX650 0x6512
44
45static struct usb_device_id id_table[] = {
46 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) },
47 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) },
48 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM150, 0xff, 0x00, 0x00) },
49 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_V1, 0xff, 0x00, 0x00) },
50 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_V2, 0xff, 0x00, 0x00) },
51 { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_ALLTEL, 0xff, 0x00, 0x00) },
52 { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU550, 0xff, 0xff, 0x00) },
53 { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) },
54 { },
55};
56MODULE_DEVICE_TABLE(usb, id_table);
57
58static struct usb_driver qcaux_driver = {
59 .name = "qcaux",
60 .probe = usb_serial_probe,
61 .disconnect = usb_serial_disconnect,
62 .id_table = id_table,
63 .no_dynamic_id = 1,
64};
65
66static struct usb_serial_driver qcaux_device = {
67 .driver = {
68 .owner = THIS_MODULE,
69 .name = "qcaux",
70 },
71 .id_table = id_table,
72 .num_ports = 1,
73};
74
75static int __init qcaux_init(void)
76{
77 int retval;
78
79 retval = usb_serial_register(&qcaux_device);
80 if (retval)
81 return retval;
82 retval = usb_register(&qcaux_driver);
83 if (retval)
84 usb_serial_deregister(&qcaux_device);
85 return retval;
86}
87
88static void __exit qcaux_exit(void)
89{
90 usb_deregister(&qcaux_driver);
91 usb_serial_deregister(&qcaux_device);
92}
93
94module_init(qcaux_init);
95module_exit(qcaux_exit);
96MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 7528b8d57f1c..310ff6ec6567 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -21,7 +21,7 @@
21 21
22static int debug; 22static int debug;
23 23
24static struct usb_device_id id_table[] = { 24static const struct usb_device_id id_table[] = {
25 {USB_DEVICE(0x05c6, 0x9211)}, /* Acer Gobi QDL device */ 25 {USB_DEVICE(0x05c6, 0x9211)}, /* Acer Gobi QDL device */
26 {USB_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 26 {USB_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
27 {USB_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ 27 {USB_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
diff --git a/drivers/usb/serial/siemens_mpi.c b/drivers/usb/serial/siemens_mpi.c
index 951ea0c6ba77..cb8195cabfde 100644
--- a/drivers/usb/serial/siemens_mpi.c
+++ b/drivers/usb/serial/siemens_mpi.c
@@ -22,7 +22,7 @@
22#define DRIVER_DESC "Driver for Siemens USB/MPI adapter" 22#define DRIVER_DESC "Driver for Siemens USB/MPI adapter"
23 23
24 24
25static struct usb_device_id id_table[] = { 25static const struct usb_device_id id_table[] = {
26 /* Vendor and product id for 6ES7-972-0CB20-0XA0 */ 26 /* Vendor and product id for 6ES7-972-0CB20-0XA0 */
27 { USB_DEVICE(0x908, 0x0004) }, 27 { USB_DEVICE(0x908, 0x0004) },
28 { }, 28 { },
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 3eb6143bb646..34e6f894cba9 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -226,7 +226,7 @@ static const struct sierra_iface_info direct_ip_interface_blacklist = {
226 .ifaceinfo = direct_ip_non_serial_ifaces, 226 .ifaceinfo = direct_ip_non_serial_ifaces,
227}; 227};
228 228
229static struct usb_device_id id_table [] = { 229static const struct usb_device_id id_table[] = {
230 { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ 230 { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
231 { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */ 231 { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */
232 { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */ 232 { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */
@@ -304,16 +304,6 @@ static struct usb_device_id id_table [] = {
304}; 304};
305MODULE_DEVICE_TABLE(usb, id_table); 305MODULE_DEVICE_TABLE(usb, id_table);
306 306
307static struct usb_driver sierra_driver = {
308 .name = "sierra",
309 .probe = usb_serial_probe,
310 .disconnect = usb_serial_disconnect,
311 .suspend = usb_serial_suspend,
312 .resume = usb_serial_resume,
313 .id_table = id_table,
314 .no_dynamic_id = 1,
315 .supports_autosuspend = 1,
316};
317 307
318struct sierra_port_private { 308struct sierra_port_private {
319 spinlock_t lock; /* lock the structure */ 309 spinlock_t lock; /* lock the structure */
@@ -477,7 +467,7 @@ static void sierra_outdat_callback(struct urb *urb)
477static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port, 467static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
478 const unsigned char *buf, int count) 468 const unsigned char *buf, int count)
479{ 469{
480 struct sierra_port_private *portdata = usb_get_serial_port_data(port); 470 struct sierra_port_private *portdata;
481 struct sierra_intf_private *intfdata; 471 struct sierra_intf_private *intfdata;
482 struct usb_serial *serial = port->serial; 472 struct usb_serial *serial = port->serial;
483 unsigned long flags; 473 unsigned long flags;
@@ -604,14 +594,15 @@ static void sierra_indat_callback(struct urb *urb)
604 } else { 594 } else {
605 if (urb->actual_length) { 595 if (urb->actual_length) {
606 tty = tty_port_tty_get(&port->port); 596 tty = tty_port_tty_get(&port->port);
607 597 if (tty) {
608 tty_buffer_request_room(tty, urb->actual_length); 598 tty_insert_flip_string(tty, data,
609 tty_insert_flip_string(tty, data, urb->actual_length); 599 urb->actual_length);
610 tty_flip_buffer_push(tty); 600 tty_flip_buffer_push(tty);
611 601
612 tty_kref_put(tty); 602 tty_kref_put(tty);
613 usb_serial_debug_data(debug, &port->dev, __func__, 603 usb_serial_debug_data(debug, &port->dev,
614 urb->actual_length, data); 604 __func__, urb->actual_length, data);
605 }
615 } else { 606 } else {
616 dev_dbg(&port->dev, "%s: empty read urb" 607 dev_dbg(&port->dev, "%s: empty read urb"
617 " received\n", __func__); 608 " received\n", __func__);
@@ -619,10 +610,10 @@ static void sierra_indat_callback(struct urb *urb)
619 } 610 }
620 611
621 /* Resubmit urb so we continue receiving */ 612 /* Resubmit urb so we continue receiving */
622 if (port->port.count && status != -ESHUTDOWN && status != -EPERM) { 613 if (status != -ESHUTDOWN && status != -EPERM) {
623 usb_mark_last_busy(port->serial->dev); 614 usb_mark_last_busy(port->serial->dev);
624 err = usb_submit_urb(urb, GFP_ATOMIC); 615 err = usb_submit_urb(urb, GFP_ATOMIC);
625 if (err) 616 if (err && err != -EPERM)
626 dev_err(&port->dev, "resubmit read urb failed." 617 dev_err(&port->dev, "resubmit read urb failed."
627 "(%d)\n", err); 618 "(%d)\n", err);
628 } 619 }
@@ -681,11 +672,11 @@ static void sierra_instat_callback(struct urb *urb)
681 dev_dbg(&port->dev, "%s: error %d\n", __func__, status); 672 dev_dbg(&port->dev, "%s: error %d\n", __func__, status);
682 673
683 /* Resubmit urb so we continue receiving IRQ data */ 674 /* Resubmit urb so we continue receiving IRQ data */
684 if (port->port.count && status != -ESHUTDOWN && status != -ENOENT) { 675 if (status != -ESHUTDOWN && status != -ENOENT) {
685 usb_mark_last_busy(serial->dev); 676 usb_mark_last_busy(serial->dev);
686 urb->dev = serial->dev; 677 urb->dev = serial->dev;
687 err = usb_submit_urb(urb, GFP_ATOMIC); 678 err = usb_submit_urb(urb, GFP_ATOMIC);
688 if (err) 679 if (err && err != -EPERM)
689 dev_err(&port->dev, "%s: resubmit intr urb " 680 dev_err(&port->dev, "%s: resubmit intr urb "
690 "failed. (%d)\n", __func__, err); 681 "failed. (%d)\n", __func__, err);
691 } 682 }
@@ -1061,11 +1052,31 @@ static int sierra_resume(struct usb_serial *serial)
1061 1052
1062 return ec ? -EIO : 0; 1053 return ec ? -EIO : 0;
1063} 1054}
1055
1056static int sierra_reset_resume(struct usb_interface *intf)
1057{
1058 struct usb_serial *serial = usb_get_intfdata(intf);
1059 dev_err(&serial->dev->dev, "%s\n", __func__);
1060 return usb_serial_resume(intf);
1061}
1064#else 1062#else
1065#define sierra_suspend NULL 1063#define sierra_suspend NULL
1066#define sierra_resume NULL 1064#define sierra_resume NULL
1065#define sierra_reset_resume NULL
1067#endif 1066#endif
1068 1067
1068static struct usb_driver sierra_driver = {
1069 .name = "sierra",
1070 .probe = usb_serial_probe,
1071 .disconnect = usb_serial_disconnect,
1072 .suspend = usb_serial_suspend,
1073 .resume = usb_serial_resume,
1074 .reset_resume = sierra_reset_resume,
1075 .id_table = id_table,
1076 .no_dynamic_id = 1,
1077 .supports_autosuspend = 1,
1078};
1079
1069static struct usb_serial_driver sierra_device = { 1080static struct usb_serial_driver sierra_device = {
1070 .driver = { 1081 .driver = {
1071 .owner = THIS_MODULE, 1082 .owner = THIS_MODULE,
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 1e58220403d1..5d39191e7244 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -45,7 +45,7 @@ static int debug;
45#define SPCP8x5_835_VID 0x04fc 45#define SPCP8x5_835_VID 0x04fc
46#define SPCP8x5_835_PID 0x0231 46#define SPCP8x5_835_PID 0x0231
47 47
48static struct usb_device_id id_table [] = { 48static const struct usb_device_id id_table[] = {
49 { USB_DEVICE(SPCP8x5_PHILIPS_VID , SPCP8x5_PHILIPS_PID)}, 49 { USB_DEVICE(SPCP8x5_PHILIPS_VID , SPCP8x5_PHILIPS_PID)},
50 { USB_DEVICE(SPCP8x5_INTERMATIC_VID, SPCP8x5_INTERMATIC_PID)}, 50 { USB_DEVICE(SPCP8x5_INTERMATIC_VID, SPCP8x5_INTERMATIC_PID)},
51 { USB_DEVICE(SPCP8x5_835_VID, SPCP8x5_835_PID)}, 51 { USB_DEVICE(SPCP8x5_835_VID, SPCP8x5_835_PID)},
@@ -609,7 +609,7 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
609 if (i < 0) 609 if (i < 0)
610 dev_err(&port->dev, "Set UART format %#x failed (error = %d)\n", 610 dev_err(&port->dev, "Set UART format %#x failed (error = %d)\n",
611 uartdata, i); 611 uartdata, i);
612 dbg("0x21:0x40:0:0 %d\n", i); 612 dbg("0x21:0x40:0:0 %d", i);
613 613
614 if (cflag & CRTSCTS) { 614 if (cflag & CRTSCTS) {
615 /* enable hardware flow control */ 615 /* enable hardware flow control */
@@ -677,7 +677,6 @@ static void spcp8x5_read_bulk_callback(struct urb *urb)
677 struct tty_struct *tty; 677 struct tty_struct *tty;
678 unsigned char *data = urb->transfer_buffer; 678 unsigned char *data = urb->transfer_buffer;
679 unsigned long flags; 679 unsigned long flags;
680 int i;
681 int result = urb->status; 680 int result = urb->status;
682 u8 status; 681 u8 status;
683 char tty_flag; 682 char tty_flag;
@@ -687,8 +686,6 @@ static void spcp8x5_read_bulk_callback(struct urb *urb)
687 686
688 /* check the urb status */ 687 /* check the urb status */
689 if (result) { 688 if (result) {
690 if (!port->port.count)
691 return;
692 if (result == -EPROTO) { 689 if (result == -EPROTO) {
693 /* spcp8x5 mysteriously fails with -EPROTO */ 690 /* spcp8x5 mysteriously fails with -EPROTO */
694 /* reschedule the read */ 691 /* reschedule the read */
@@ -726,26 +723,20 @@ static void spcp8x5_read_bulk_callback(struct urb *urb)
726 723
727 tty = tty_port_tty_get(&port->port); 724 tty = tty_port_tty_get(&port->port);
728 if (tty && urb->actual_length) { 725 if (tty && urb->actual_length) {
729 tty_buffer_request_room(tty, urb->actual_length + 1);
730 /* overrun is special, not associated with a char */ 726 /* overrun is special, not associated with a char */
731 if (status & UART_OVERRUN_ERROR) 727 if (status & UART_OVERRUN_ERROR)
732 tty_insert_flip_char(tty, 0, TTY_OVERRUN); 728 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
733 for (i = 0; i < urb->actual_length; ++i) 729 tty_insert_flip_string_fixed_flag(tty, data,
734 tty_insert_flip_char(tty, data[i], tty_flag); 730 urb->actual_length, tty_flag);
735 tty_flip_buffer_push(tty); 731 tty_flip_buffer_push(tty);
736 } 732 }
737 tty_kref_put(tty); 733 tty_kref_put(tty);
738 734
739 /* Schedule the next read _if_ we are still open */ 735 /* Schedule the next read */
740 if (port->port.count) { 736 urb->dev = port->serial->dev;
741 urb->dev = port->serial->dev; 737 result = usb_submit_urb(urb , GFP_ATOMIC);
742 result = usb_submit_urb(urb , GFP_ATOMIC); 738 if (result)
743 if (result) 739 dev_dbg(&port->dev, "failed submitting read urb %d\n", result);
744 dev_dbg(&port->dev, "failed submitting read urb %d\n",
745 result);
746 }
747
748 return;
749} 740}
750 741
751/* get data from ring buffer and then write to usb bus */ 742/* get data from ring buffer and then write to usb bus */
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index b282c0f2d8e5..72398888858f 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -21,7 +21,7 @@
21 21
22static int debug; 22static int debug;
23 23
24static struct usb_device_id id_table[] = { 24static const struct usb_device_id id_table[] = {
25 { USB_DEVICE(0x05e0, 0x0600) }, 25 { USB_DEVICE(0x05e0, 0x0600) },
26 { }, 26 { },
27}; 27};
@@ -51,7 +51,6 @@ static void symbol_int_callback(struct urb *urb)
51 int status = urb->status; 51 int status = urb->status;
52 struct tty_struct *tty; 52 struct tty_struct *tty;
53 int result; 53 int result;
54 int available_room = 0;
55 int data_length; 54 int data_length;
56 55
57 dbg("%s - port %d", __func__, port->number); 56 dbg("%s - port %d", __func__, port->number);
@@ -89,13 +88,8 @@ static void symbol_int_callback(struct urb *urb)
89 */ 88 */
90 tty = tty_port_tty_get(&port->port); 89 tty = tty_port_tty_get(&port->port);
91 if (tty) { 90 if (tty) {
92 available_room = tty_buffer_request_room(tty, 91 tty_insert_flip_string(tty, &data[1], data_length);
93 data_length); 92 tty_flip_buffer_push(tty);
94 if (available_room) {
95 tty_insert_flip_string(tty, &data[1],
96 available_room);
97 tty_flip_buffer_push(tty);
98 }
99 tty_kref_put(tty); 93 tty_kref_put(tty);
100 } 94 }
101 } else { 95 } else {
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 1e9dc8821698..0afe5c71c17e 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1271,14 +1271,13 @@ static void ti_recv(struct device *dev, struct tty_struct *tty,
1271 int cnt; 1271 int cnt;
1272 1272
1273 do { 1273 do {
1274 cnt = tty_buffer_request_room(tty, length); 1274 cnt = tty_insert_flip_string(tty, data, length);
1275 if (cnt < length) { 1275 if (cnt < length) {
1276 dev_err(dev, "%s - dropping data, %d bytes lost\n", 1276 dev_err(dev, "%s - dropping data, %d bytes lost\n",
1277 __func__, length - cnt); 1277 __func__, length - cnt);
1278 if (cnt == 0) 1278 if (cnt == 0)
1279 break; 1279 break;
1280 } 1280 }
1281 tty_insert_flip_string(tty, data, cnt);
1282 tty_flip_buffer_push(tty); 1281 tty_flip_buffer_push(tty);
1283 data += cnt; 1282 data += cnt;
1284 length -= cnt; 1283 length -= cnt;
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 33c85f7084f8..3873660d8217 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -358,10 +358,6 @@ static int serial_write(struct tty_struct *tty, const unsigned char *buf,
358 358
359 dbg("%s - port %d, %d byte(s)", __func__, port->number, count); 359 dbg("%s - port %d, %d byte(s)", __func__, port->number, count);
360 360
361 /* count is managed under the mutex lock for the tty so cannot
362 drop to zero until after the last close completes */
363 WARN_ON(!port->port.count);
364
365 /* pass on to the driver specific version of this function */ 361 /* pass on to the driver specific version of this function */
366 retval = port->serial->type->write(tty, port, buf, count); 362 retval = port->serial->type->write(tty, port, buf, count);
367 363
@@ -373,7 +369,6 @@ static int serial_write_room(struct tty_struct *tty)
373{ 369{
374 struct usb_serial_port *port = tty->driver_data; 370 struct usb_serial_port *port = tty->driver_data;
375 dbg("%s - port %d", __func__, port->number); 371 dbg("%s - port %d", __func__, port->number);
376 WARN_ON(!port->port.count);
377 /* pass on to the driver specific version of this function */ 372 /* pass on to the driver specific version of this function */
378 return port->serial->type->write_room(tty); 373 return port->serial->type->write_room(tty);
379} 374}
@@ -381,7 +376,7 @@ static int serial_write_room(struct tty_struct *tty)
381static int serial_chars_in_buffer(struct tty_struct *tty) 376static int serial_chars_in_buffer(struct tty_struct *tty)
382{ 377{
383 struct usb_serial_port *port = tty->driver_data; 378 struct usb_serial_port *port = tty->driver_data;
384 dbg("%s = port %d", __func__, port->number); 379 dbg("%s - port %d", __func__, port->number);
385 380
386 /* if the device was unplugged then any remaining characters 381 /* if the device was unplugged then any remaining characters
387 fell out of the connector ;) */ 382 fell out of the connector ;) */
@@ -396,7 +391,6 @@ static void serial_throttle(struct tty_struct *tty)
396 struct usb_serial_port *port = tty->driver_data; 391 struct usb_serial_port *port = tty->driver_data;
397 dbg("%s - port %d", __func__, port->number); 392 dbg("%s - port %d", __func__, port->number);
398 393
399 WARN_ON(!port->port.count);
400 /* pass on to the driver specific version of this function */ 394 /* pass on to the driver specific version of this function */
401 if (port->serial->type->throttle) 395 if (port->serial->type->throttle)
402 port->serial->type->throttle(tty); 396 port->serial->type->throttle(tty);
@@ -407,7 +401,6 @@ static void serial_unthrottle(struct tty_struct *tty)
407 struct usb_serial_port *port = tty->driver_data; 401 struct usb_serial_port *port = tty->driver_data;
408 dbg("%s - port %d", __func__, port->number); 402 dbg("%s - port %d", __func__, port->number);
409 403
410 WARN_ON(!port->port.count);
411 /* pass on to the driver specific version of this function */ 404 /* pass on to the driver specific version of this function */
412 if (port->serial->type->unthrottle) 405 if (port->serial->type->unthrottle)
413 port->serial->type->unthrottle(tty); 406 port->serial->type->unthrottle(tty);
@@ -421,8 +414,6 @@ static int serial_ioctl(struct tty_struct *tty, struct file *file,
421 414
422 dbg("%s - port %d, cmd 0x%.4x", __func__, port->number, cmd); 415 dbg("%s - port %d, cmd 0x%.4x", __func__, port->number, cmd);
423 416
424 WARN_ON(!port->port.count);
425
426 /* pass on to the driver specific version of this function 417 /* pass on to the driver specific version of this function
427 if it is available */ 418 if it is available */
428 if (port->serial->type->ioctl) { 419 if (port->serial->type->ioctl) {
@@ -437,7 +428,6 @@ static void serial_set_termios(struct tty_struct *tty, struct ktermios *old)
437 struct usb_serial_port *port = tty->driver_data; 428 struct usb_serial_port *port = tty->driver_data;
438 dbg("%s - port %d", __func__, port->number); 429 dbg("%s - port %d", __func__, port->number);
439 430
440 WARN_ON(!port->port.count);
441 /* pass on to the driver specific version of this function 431 /* pass on to the driver specific version of this function
442 if it is available */ 432 if it is available */
443 if (port->serial->type->set_termios) 433 if (port->serial->type->set_termios)
@@ -452,7 +442,6 @@ static int serial_break(struct tty_struct *tty, int break_state)
452 442
453 dbg("%s - port %d", __func__, port->number); 443 dbg("%s - port %d", __func__, port->number);
454 444
455 WARN_ON(!port->port.count);
456 /* pass on to the driver specific version of this function 445 /* pass on to the driver specific version of this function
457 if it is available */ 446 if it is available */
458 if (port->serial->type->break_ctl) 447 if (port->serial->type->break_ctl)
@@ -513,7 +502,6 @@ static int serial_tiocmget(struct tty_struct *tty, struct file *file)
513 502
514 dbg("%s - port %d", __func__, port->number); 503 dbg("%s - port %d", __func__, port->number);
515 504
516 WARN_ON(!port->port.count);
517 if (port->serial->type->tiocmget) 505 if (port->serial->type->tiocmget)
518 return port->serial->type->tiocmget(tty, file); 506 return port->serial->type->tiocmget(tty, file);
519 return -EINVAL; 507 return -EINVAL;
@@ -526,7 +514,6 @@ static int serial_tiocmset(struct tty_struct *tty, struct file *file,
526 514
527 dbg("%s - port %d", __func__, port->number); 515 dbg("%s - port %d", __func__, port->number);
528 516
529 WARN_ON(!port->port.count);
530 if (port->serial->type->tiocmset) 517 if (port->serial->type->tiocmset)
531 return port->serial->type->tiocmset(tty, file, set, clear); 518 return port->serial->type->tiocmset(tty, file, set, clear);
532 return -EINVAL; 519 return -EINVAL;
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index 7b5bfc4edd3d..252cc2d993b2 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -29,7 +29,7 @@ static char USB_DEBUG_BRK[USB_DEBUG_BRK_SIZE] = {
29 0xff, 29 0xff,
30}; 30};
31 31
32static struct usb_device_id id_table [] = { 32static const struct usb_device_id id_table[] = {
33 { USB_DEVICE(0x0525, 0x127a) }, 33 { USB_DEVICE(0x0525, 0x127a) },
34 { }, 34 { },
35}; 35};
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index ad1f9232292d..094942707c7d 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -368,7 +368,7 @@ static int visor_write(struct tty_struct *tty, struct usb_serial_port *port,
368 spin_lock_irqsave(&priv->lock, flags); 368 spin_lock_irqsave(&priv->lock, flags);
369 if (priv->outstanding_urbs > URB_UPPER_LIMIT) { 369 if (priv->outstanding_urbs > URB_UPPER_LIMIT) {
370 spin_unlock_irqrestore(&priv->lock, flags); 370 spin_unlock_irqrestore(&priv->lock, flags);
371 dbg("%s - write limit hit\n", __func__); 371 dbg("%s - write limit hit", __func__);
372 return 0; 372 return 0;
373 } 373 }
374 priv->outstanding_urbs++; 374 priv->outstanding_urbs++;
@@ -446,7 +446,7 @@ static int visor_write_room(struct tty_struct *tty)
446 spin_lock_irqsave(&priv->lock, flags); 446 spin_lock_irqsave(&priv->lock, flags);
447 if (priv->outstanding_urbs > URB_UPPER_LIMIT * 2 / 3) { 447 if (priv->outstanding_urbs > URB_UPPER_LIMIT * 2 / 3) {
448 spin_unlock_irqrestore(&priv->lock, flags); 448 spin_unlock_irqrestore(&priv->lock, flags);
449 dbg("%s - write limit hit\n", __func__); 449 dbg("%s - write limit hit", __func__);
450 return 0; 450 return 0;
451 } 451 }
452 spin_unlock_irqrestore(&priv->lock, flags); 452 spin_unlock_irqrestore(&priv->lock, flags);
@@ -503,13 +503,9 @@ static void visor_read_bulk_callback(struct urb *urb)
503 if (urb->actual_length) { 503 if (urb->actual_length) {
504 tty = tty_port_tty_get(&port->port); 504 tty = tty_port_tty_get(&port->port);
505 if (tty) { 505 if (tty) {
506 available_room = tty_buffer_request_room(tty, 506 tty_insert_flip_string(tty, data,
507 urb->actual_length); 507 urb->actual_length);
508 if (available_room) { 508 tty_flip_buffer_push(tty);
509 tty_insert_flip_string(tty, data,
510 available_room);
511 tty_flip_buffer_push(tty);
512 }
513 tty_kref_put(tty); 509 tty_kref_put(tty);
514 } 510 }
515 spin_lock(&priv->lock); 511 spin_lock(&priv->lock);
@@ -807,10 +803,14 @@ static int clie_3_5_startup(struct usb_serial *serial)
807{ 803{
808 struct device *dev = &serial->dev->dev; 804 struct device *dev = &serial->dev->dev;
809 int result; 805 int result;
810 u8 data; 806 u8 *data;
811 807
812 dbg("%s", __func__); 808 dbg("%s", __func__);
813 809
810 data = kmalloc(1, GFP_KERNEL);
811 if (!data)
812 return -ENOMEM;
813
814 /* 814 /*
815 * Note that PEG-300 series devices expect the following two calls. 815 * Note that PEG-300 series devices expect the following two calls.
816 */ 816 */
@@ -818,36 +818,42 @@ static int clie_3_5_startup(struct usb_serial *serial)
818 /* get the config number */ 818 /* get the config number */
819 result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 819 result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
820 USB_REQ_GET_CONFIGURATION, USB_DIR_IN, 820 USB_REQ_GET_CONFIGURATION, USB_DIR_IN,
821 0, 0, &data, 1, 3000); 821 0, 0, data, 1, 3000);
822 if (result < 0) { 822 if (result < 0) {
823 dev_err(dev, "%s: get config number failed: %d\n", 823 dev_err(dev, "%s: get config number failed: %d\n",
824 __func__, result); 824 __func__, result);
825 return result; 825 goto out;
826 } 826 }
827 if (result != 1) { 827 if (result != 1) {
828 dev_err(dev, "%s: get config number bad return length: %d\n", 828 dev_err(dev, "%s: get config number bad return length: %d\n",
829 __func__, result); 829 __func__, result);
830 return -EIO; 830 result = -EIO;
831 goto out;
831 } 832 }
832 833
833 /* get the interface number */ 834 /* get the interface number */
834 result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 835 result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
835 USB_REQ_GET_INTERFACE, 836 USB_REQ_GET_INTERFACE,
836 USB_DIR_IN | USB_RECIP_INTERFACE, 837 USB_DIR_IN | USB_RECIP_INTERFACE,
837 0, 0, &data, 1, 3000); 838 0, 0, data, 1, 3000);
838 if (result < 0) { 839 if (result < 0) {
839 dev_err(dev, "%s: get interface number failed: %d\n", 840 dev_err(dev, "%s: get interface number failed: %d\n",
840 __func__, result); 841 __func__, result);
841 return result; 842 goto out;
842 } 843 }
843 if (result != 1) { 844 if (result != 1) {
844 dev_err(dev, 845 dev_err(dev,
845 "%s: get interface number bad return length: %d\n", 846 "%s: get interface number bad return length: %d\n",
846 __func__, result); 847 __func__, result);
847 return -EIO; 848 result = -EIO;
849 goto out;
848 } 850 }
849 851
850 return generic_startup(serial); 852 result = generic_startup(serial);
853out:
854 kfree(data);
855
856 return result;
851} 857}
852 858
853static int treo_attach(struct usb_serial *serial) 859static int treo_attach(struct usb_serial *serial)
diff --git a/drivers/usb/serial/vivopay-serial.c b/drivers/usb/serial/vivopay-serial.c
new file mode 100644
index 000000000000..f719d00972fc
--- /dev/null
+++ b/drivers/usb/serial/vivopay-serial.c
@@ -0,0 +1,76 @@
1/*
2 * Copyright (C) 2001-2005 Greg Kroah-Hartman (greg@kroah.com)
3 * Copyright (C) 2009 Outpost Embedded, LLC
4 */
5
6#include <linux/kernel.h>
7#include <linux/init.h>
8#include <linux/tty.h>
9#include <linux/module.h>
10#include <linux/usb.h>
11#include <linux/usb/serial.h>
12
13
14#define DRIVER_VERSION "v1.0"
15#define DRIVER_DESC "ViVOpay USB Serial Driver"
16
17#define VIVOPAY_VENDOR_ID 0x1d5f
18
19
20static struct usb_device_id id_table [] = {
21 /* ViVOpay 8800 */
22 { USB_DEVICE(VIVOPAY_VENDOR_ID, 0x1004) },
23 { },
24};
25
26MODULE_DEVICE_TABLE(usb, id_table);
27
28static struct usb_driver vivopay_serial_driver = {
29 .name = "vivopay-serial",
30 .probe = usb_serial_probe,
31 .disconnect = usb_serial_disconnect,
32 .id_table = id_table,
33 .no_dynamic_id = 1,
34};
35
36static struct usb_serial_driver vivopay_serial_device = {
37 .driver = {
38 .owner = THIS_MODULE,
39 .name = "vivopay-serial",
40 },
41 .id_table = id_table,
42 .usb_driver = &vivopay_serial_driver,
43 .num_ports = 1,
44};
45
46static int __init vivopay_serial_init(void)
47{
48 int retval;
49 retval = usb_serial_register(&vivopay_serial_device);
50 if (retval)
51 goto failed_usb_serial_register;
52 retval = usb_register(&vivopay_serial_driver);
53 if (retval)
54 goto failed_usb_register;
55 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
56 DRIVER_DESC "\n");
57 return 0;
58failed_usb_register:
59 usb_serial_deregister(&vivopay_serial_device);
60failed_usb_serial_register:
61 return retval;
62}
63
64static void __exit vivopay_serial_exit(void)
65{
66 usb_deregister(&vivopay_serial_driver);
67 usb_serial_deregister(&vivopay_serial_device);
68}
69
70module_init(vivopay_serial_init);
71module_exit(vivopay_serial_exit);
72
73MODULE_AUTHOR("Forest Bond <forest.bond@outpostembedded.com>");
74MODULE_DESCRIPTION(DRIVER_DESC);
75MODULE_VERSION(DRIVER_VERSION);
76MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 1093d2eb046a..12ed8209ca72 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -111,17 +111,17 @@ static int debug;
111 separate ID tables, and then a third table that combines them 111 separate ID tables, and then a third table that combines them
112 just for the purpose of exporting the autoloading information. 112 just for the purpose of exporting the autoloading information.
113*/ 113*/
114static struct usb_device_id id_table_std [] = { 114static const struct usb_device_id id_table_std[] = {
115 { USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) }, 115 { USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) },
116 { } /* Terminating entry */ 116 { } /* Terminating entry */
117}; 117};
118 118
119static struct usb_device_id id_table_prerenumeration [] = { 119static const struct usb_device_id id_table_prerenumeration[] = {
120 { USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_FAKE_WHITE_HEAT_ID) }, 120 { USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_FAKE_WHITE_HEAT_ID) },
121 { } /* Terminating entry */ 121 { } /* Terminating entry */
122}; 122};
123 123
124static struct usb_device_id id_table_combined [] = { 124static const struct usb_device_id id_table_combined[] = {
125 { USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) }, 125 { USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) },
126 { USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_FAKE_WHITE_HEAT_ID) }, 126 { USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_FAKE_WHITE_HEAT_ID) },
127 { } /* Terminating entry */ 127 { } /* Terminating entry */
@@ -1492,21 +1492,9 @@ static void rx_data_softint(struct work_struct *work)
1492 wrap = list_entry(tmp, struct whiteheat_urb_wrap, list); 1492 wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
1493 urb = wrap->urb; 1493 urb = wrap->urb;
1494 1494
1495 if (tty && urb->actual_length) { 1495 if (tty && urb->actual_length)
1496 int len = tty_buffer_request_room(tty, 1496 sent += tty_insert_flip_string(tty,
1497 urb->actual_length); 1497 urb->transfer_buffer, urb->actual_length);
1498 /* This stuff can go away now I suspect */
1499 if (unlikely(len < urb->actual_length)) {
1500 spin_lock_irqsave(&info->lock, flags);
1501 list_add(tmp, &info->rx_urb_q);
1502 spin_unlock_irqrestore(&info->lock, flags);
1503 tty_flip_buffer_push(tty);
1504 schedule_work(&info->rx_work);
1505 goto out;
1506 }
1507 tty_insert_flip_string(tty, urb->transfer_buffer, len);
1508 sent += len;
1509 }
1510 1498
1511 urb->dev = port->serial->dev; 1499 urb->dev = port->serial->dev;
1512 result = usb_submit_urb(urb, GFP_ATOMIC); 1500 result = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index 80e65f29921c..198bb3ed95b2 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -202,7 +202,7 @@ static int onetouch_connect_input(struct us_data *ss)
202 goto fail1; 202 goto fail1;
203 203
204 onetouch->data = usb_buffer_alloc(udev, ONETOUCH_PKT_LEN, 204 onetouch->data = usb_buffer_alloc(udev, ONETOUCH_PKT_LEN,
205 GFP_ATOMIC, &onetouch->data_dma); 205 GFP_KERNEL, &onetouch->data_dma);
206 if (!onetouch->data) 206 if (!onetouch->data)
207 goto fail1; 207 goto fail1;
208 208
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index aadc16b5eed7..4cc035562cc2 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -133,7 +133,7 @@ static int slave_configure(struct scsi_device *sdev)
133 133
134 if (us->fflags & US_FL_MAX_SECTORS_MIN) 134 if (us->fflags & US_FL_MAX_SECTORS_MIN)
135 max_sectors = PAGE_CACHE_SIZE >> 9; 135 max_sectors = PAGE_CACHE_SIZE >> 9;
136 if (queue_max_sectors(sdev->request_queue) > max_sectors) 136 if (queue_max_hw_sectors(sdev->request_queue) > max_sectors)
137 blk_queue_max_hw_sectors(sdev->request_queue, 137 blk_queue_max_hw_sectors(sdev->request_queue,
138 max_sectors); 138 max_sectors);
139 } else if (sdev->type == TYPE_TAPE) { 139 } else if (sdev->type == TYPE_TAPE) {
@@ -484,7 +484,7 @@ static ssize_t show_max_sectors(struct device *dev, struct device_attribute *att
484{ 484{
485 struct scsi_device *sdev = to_scsi_device(dev); 485 struct scsi_device *sdev = to_scsi_device(dev);
486 486
487 return sprintf(buf, "%u\n", queue_max_sectors(sdev->request_queue)); 487 return sprintf(buf, "%u\n", queue_max_hw_sectors(sdev->request_queue));
488} 488}
489 489
490/* Input routine for the sysfs max_sectors file */ 490/* Input routine for the sysfs max_sectors file */
@@ -494,9 +494,9 @@ static ssize_t store_max_sectors(struct device *dev, struct device_attribute *at
494 struct scsi_device *sdev = to_scsi_device(dev); 494 struct scsi_device *sdev = to_scsi_device(dev);
495 unsigned short ms; 495 unsigned short ms;
496 496
497 if (sscanf(buf, "%hu", &ms) > 0 && ms <= SCSI_DEFAULT_MAX_SECTORS) { 497 if (sscanf(buf, "%hu", &ms) > 0) {
498 blk_queue_max_hw_sectors(sdev->request_queue, ms); 498 blk_queue_max_hw_sectors(sdev->request_queue, ms);
499 return strlen(buf); 499 return count;
500 } 500 }
501 return -EINVAL; 501 return -EINVAL;
502} 502}
@@ -539,7 +539,7 @@ struct scsi_host_template usb_stor_host_template = {
539 .slave_configure = slave_configure, 539 .slave_configure = slave_configure,
540 540
541 /* lots of sg segments can be handled */ 541 /* lots of sg segments can be handled */
542 .sg_tablesize = SG_ALL, 542 .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
543 543
544 /* limit the total size of a transfer to 120 KB */ 544 /* limit the total size of a transfer to 120 KB */
545 .max_sectors = 240, 545 .max_sectors = 240,
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index b62a28814ebe..bd3f415893d8 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -1628,10 +1628,10 @@ static int usbat_hp8200e_transport(struct scsi_cmnd *srb, struct us_data *us)
1628 return USB_STOR_TRANSPORT_ERROR; 1628 return USB_STOR_TRANSPORT_ERROR;
1629 } 1629 }
1630 1630
1631 if ( (result = usbat_multiple_write(us, 1631 result = usbat_multiple_write(us, registers, data, 7);
1632 registers, data, 7)) != USB_STOR_TRANSPORT_GOOD) { 1632
1633 if (result != USB_STOR_TRANSPORT_GOOD)
1633 return result; 1634 return result;
1634 }
1635 1635
1636 /* 1636 /*
1637 * Write the 12-byte command header. 1637 * Write the 12-byte command header.
@@ -1643,12 +1643,11 @@ static int usbat_hp8200e_transport(struct scsi_cmnd *srb, struct us_data *us)
1643 * AT SPEED 4 IS UNRELIABLE!!! 1643 * AT SPEED 4 IS UNRELIABLE!!!
1644 */ 1644 */
1645 1645
1646 if ((result = usbat_write_block(us, 1646 result = usbat_write_block(us, USBAT_ATA, srb->cmnd, 12,
1647 USBAT_ATA, srb->cmnd, 12, 1647 srb->cmnd[0] == GPCMD_BLANK ? 75 : 10, 0);
1648 (srb->cmnd[0]==GPCMD_BLANK ? 75 : 10), 0) != 1648
1649 USB_STOR_TRANSPORT_GOOD)) { 1649 if (result != USB_STOR_TRANSPORT_GOOD)
1650 return result; 1650 return result;
1651 }
1652 1651
1653 /* If there is response data to be read in then do it here. */ 1652 /* If there is response data to be read in then do it here. */
1654 1653
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index cc313d16d727..468038126e5e 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -47,6 +47,8 @@
47#include <linux/errno.h> 47#include <linux/errno.h>
48#include <linux/slab.h> 48#include <linux/slab.h>
49 49
50#include <linux/usb/quirks.h>
51
50#include <scsi/scsi.h> 52#include <scsi/scsi.h>
51#include <scsi/scsi_eh.h> 53#include <scsi/scsi_eh.h>
52#include <scsi/scsi_device.h> 54#include <scsi/scsi_device.h>
@@ -1297,6 +1299,10 @@ int usb_stor_port_reset(struct us_data *us)
1297{ 1299{
1298 int result; 1300 int result;
1299 1301
1302 /*for these devices we must use the class specific method */
1303 if (us->pusb_dev->quirks & USB_QUIRK_RESET_MORPHS)
1304 return -EPERM;
1305
1300 result = usb_lock_device_for_reset(us->pusb_dev, us->pusb_intf); 1306 result = usb_lock_device_for_reset(us->pusb_dev, us->pusb_intf);
1301 if (result < 0) 1307 if (result < 0)
1302 US_DEBUGP("unable to lock device for reset: %d\n", result); 1308 US_DEBUGP("unable to lock device for reset: %d\n", result);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 49575fba3756..98b549b1cab2 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1147,8 +1147,8 @@ UNUSUAL_DEV( 0x0af0, 0x7401, 0x0000, 0x0000,
1147 0 ), 1147 0 ),
1148 1148
1149/* Reported by Jan Dumon <j.dumon@option.com> 1149/* Reported by Jan Dumon <j.dumon@option.com>
1150 * This device (wrongly) has a vendor-specific device descriptor. 1150 * These devices (wrongly) have a vendor-specific device descriptor.
1151 * The entry is needed so usb-storage can bind to it's mass-storage 1151 * These entries are needed so usb-storage can bind to their mass-storage
1152 * interface as an interface driver */ 1152 * interface as an interface driver */
1153UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000, 1153UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000,
1154 "Option", 1154 "Option",
@@ -1156,6 +1156,90 @@ UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000,
1156 US_SC_DEVICE, US_PR_DEVICE, NULL, 1156 US_SC_DEVICE, US_PR_DEVICE, NULL,
1157 0 ), 1157 0 ),
1158 1158
1159UNUSUAL_DEV( 0x0af0, 0x7701, 0x0000, 0x0000,
1160 "Option",
1161 "GI 0451 SD-Card",
1162 US_SC_DEVICE, US_PR_DEVICE, NULL,
1163 0 ),
1164
1165UNUSUAL_DEV( 0x0af0, 0x7706, 0x0000, 0x0000,
1166 "Option",
1167 "GI 0451 SD-Card",
1168 US_SC_DEVICE, US_PR_DEVICE, NULL,
1169 0 ),
1170
1171UNUSUAL_DEV( 0x0af0, 0x7901, 0x0000, 0x0000,
1172 "Option",
1173 "GI 0452 SD-Card",
1174 US_SC_DEVICE, US_PR_DEVICE, NULL,
1175 0 ),
1176
1177UNUSUAL_DEV( 0x0af0, 0x7A01, 0x0000, 0x0000,
1178 "Option",
1179 "GI 0461 SD-Card",
1180 US_SC_DEVICE, US_PR_DEVICE, NULL,
1181 0 ),
1182
1183UNUSUAL_DEV( 0x0af0, 0x7A05, 0x0000, 0x0000,
1184 "Option",
1185 "GI 0461 SD-Card",
1186 US_SC_DEVICE, US_PR_DEVICE, NULL,
1187 0 ),
1188
1189UNUSUAL_DEV( 0x0af0, 0x8300, 0x0000, 0x0000,
1190 "Option",
1191 "GI 033x SD-Card",
1192 US_SC_DEVICE, US_PR_DEVICE, NULL,
1193 0 ),
1194
1195UNUSUAL_DEV( 0x0af0, 0x8302, 0x0000, 0x0000,
1196 "Option",
1197 "GI 033x SD-Card",
1198 US_SC_DEVICE, US_PR_DEVICE, NULL,
1199 0 ),
1200
1201UNUSUAL_DEV( 0x0af0, 0x8304, 0x0000, 0x0000,
1202 "Option",
1203 "GI 033x SD-Card",
1204 US_SC_DEVICE, US_PR_DEVICE, NULL,
1205 0 ),
1206
1207UNUSUAL_DEV( 0x0af0, 0xc100, 0x0000, 0x0000,
1208 "Option",
1209 "GI 070x SD-Card",
1210 US_SC_DEVICE, US_PR_DEVICE, NULL,
1211 0 ),
1212
1213UNUSUAL_DEV( 0x0af0, 0xd057, 0x0000, 0x0000,
1214 "Option",
1215 "GI 1505 SD-Card",
1216 US_SC_DEVICE, US_PR_DEVICE, NULL,
1217 0 ),
1218
1219UNUSUAL_DEV( 0x0af0, 0xd058, 0x0000, 0x0000,
1220 "Option",
1221 "GI 1509 SD-Card",
1222 US_SC_DEVICE, US_PR_DEVICE, NULL,
1223 0 ),
1224
1225UNUSUAL_DEV( 0x0af0, 0xd157, 0x0000, 0x0000,
1226 "Option",
1227 "GI 1515 SD-Card",
1228 US_SC_DEVICE, US_PR_DEVICE, NULL,
1229 0 ),
1230
1231UNUSUAL_DEV( 0x0af0, 0xd257, 0x0000, 0x0000,
1232 "Option",
1233 "GI 1215 SD-Card",
1234 US_SC_DEVICE, US_PR_DEVICE, NULL,
1235 0 ),
1236
1237UNUSUAL_DEV( 0x0af0, 0xd357, 0x0000, 0x0000,
1238 "Option",
1239 "GI 1505 SD-Card",
1240 US_SC_DEVICE, US_PR_DEVICE, NULL,
1241 0 ),
1242
1159/* Reported by Ben Efros <ben@pc-doctor.com> */ 1243/* Reported by Ben Efros <ben@pc-doctor.com> */
1160UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000, 1244UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
1161 "Seagate", 1245 "Seagate",
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index b1e579c5c97c..61522787f39c 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -28,7 +28,7 @@
28#define USB_SKEL_PRODUCT_ID 0xfff0 28#define USB_SKEL_PRODUCT_ID 0xfff0
29 29
30/* table of devices that work with this driver */ 30/* table of devices that work with this driver */
31static struct usb_device_id skel_table[] = { 31static const struct usb_device_id skel_table[] = {
32 { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) }, 32 { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
33 { } /* Terminating entry */ 33 { } /* Terminating entry */
34}; 34};
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
index 25eae405f622..51a8e0d5789d 100644
--- a/drivers/usb/wusbcore/cbaf.c
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -641,7 +641,7 @@ static void cbaf_disconnect(struct usb_interface *iface)
641 kzfree(cbaf); 641 kzfree(cbaf);
642} 642}
643 643
644static struct usb_device_id cbaf_id_table[] = { 644static const struct usb_device_id cbaf_id_table[] = {
645 { USB_INTERFACE_INFO(0xef, 0x03, 0x01), }, 645 { USB_INTERFACE_INFO(0xef, 0x03, 0x01), },
646 { }, 646 { },
647}; 647};
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index dced419f7aba..1c918286159c 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -868,7 +868,7 @@ static struct usb_wireless_cap_descriptor wusb_cap_descr_default = {
868 * reference that we'll drop. 868 * reference that we'll drop.
869 * 869 *
870 * First we need to determine if the device is a WUSB device (else we 870 * First we need to determine if the device is a WUSB device (else we
871 * ignore it). For that we use the speed setting (USB_SPEED_VARIABLE) 871 * ignore it). For that we use the speed setting (USB_SPEED_WIRELESS)
872 * [FIXME: maybe we'd need something more definitive]. If so, we track 872 * [FIXME: maybe we'd need something more definitive]. If so, we track
873 * it's usb_busd and from there, the WUSB HC. 873 * it's usb_busd and from there, the WUSB HC.
874 * 874 *
diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c
index 3b52161e6e9c..2d827397e30b 100644
--- a/drivers/usb/wusbcore/mmc.c
+++ b/drivers/usb/wusbcore/mmc.c
@@ -263,7 +263,7 @@ int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
263{ 263{
264 int result = 0; 264 int result = 0;
265 265
266 if (memcmp(chid, &wusb_ckhdid_zero, sizeof(chid)) == 0) 266 if (memcmp(chid, &wusb_ckhdid_zero, sizeof(*chid)) == 0)
267 chid = NULL; 267 chid = NULL;
268 268
269 mutex_lock(&wusbhc->mutex); 269 mutex_lock(&wusbhc->mutex);
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 3681c6a88212..b0a3fa00706d 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3025,6 +3025,20 @@ static int fbcon_fb_unregistered(struct fb_info *info)
3025 return 0; 3025 return 0;
3026} 3026}
3027 3027
3028static void fbcon_remap_all(int idx)
3029{
3030 int i;
3031 for (i = first_fb_vc; i <= last_fb_vc; i++)
3032 set_con2fb_map(i, idx, 0);
3033
3034 if (con_is_bound(&fb_con)) {
3035 printk(KERN_INFO "fbcon: Remapping primary device, "
3036 "fb%i, to tty %i-%i\n", idx,
3037 first_fb_vc + 1, last_fb_vc + 1);
3038 info_idx = idx;
3039 }
3040}
3041
3028#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY 3042#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
3029static void fbcon_select_primary(struct fb_info *info) 3043static void fbcon_select_primary(struct fb_info *info)
3030{ 3044{
@@ -3225,6 +3239,10 @@ static int fbcon_event_notify(struct notifier_block *self,
3225 caps = event->data; 3239 caps = event->data;
3226 fbcon_get_requirement(info, caps); 3240 fbcon_get_requirement(info, caps);
3227 break; 3241 break;
3242 case FB_EVENT_REMAP_ALL_CONSOLE:
3243 idx = info->node;
3244 fbcon_remap_all(idx);
3245 break;
3228 } 3246 }
3229done: 3247done:
3230 return ret; 3248 return ret;
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 99bbd282ce63..a15b44e9c003 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1513,7 +1513,6 @@ register_framebuffer(struct fb_info *fb_info)
1513 fb_info->fix.id, 1513 fb_info->fix.id,
1514 registered_fb[i]->fix.id); 1514 registered_fb[i]->fix.id);
1515 unregister_framebuffer(registered_fb[i]); 1515 unregister_framebuffer(registered_fb[i]);
1516 break;
1517 } 1516 }
1518 } 1517 }
1519 } 1518 }
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index ce602dd09bc1..2f8413794d05 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -649,9 +649,13 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
649 int bit_idx = __ffs(pending_bits); 649 int bit_idx = __ffs(pending_bits);
650 int port = (word_idx * BITS_PER_LONG) + bit_idx; 650 int port = (word_idx * BITS_PER_LONG) + bit_idx;
651 int irq = evtchn_to_irq[port]; 651 int irq = evtchn_to_irq[port];
652 struct irq_desc *desc;
652 653
653 if (irq != -1) 654 if (irq != -1) {
654 handle_irq(irq, regs); 655 desc = irq_to_desc(irq);
656 if (desc)
657 generic_handle_irq_desc(irq, desc);
658 }
655 } 659 }
656 } 660 }
657 661