aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-12-24 01:16:02 -0500
committerPaul Mundt <lethal@linux-sh.org>2009-12-24 01:16:02 -0500
commitf34548cb735b7a80bbbb0bdd09ad4c2173ba92d5 (patch)
treee53c9e39b3149221779c10595bc59fa02de4f45f /drivers
parent76382b5bdb77c29ab430e1b82ef1c604c8dd113b (diff)
parent32b53076c31ce9159740b744d5eb5d9505312add (diff)
Merge branch 'sh/g3-prep' into sh/for-2.6.33
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accessibility/braille/braille_console.c1
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/ata/libata-sff.c2
-rw-r--r--drivers/ata/pata_bf54x.c19
-rw-r--r--drivers/ata/pata_cmd64x.c118
-rw-r--r--drivers/ata/pata_hpt3x2n.c64
-rw-r--r--drivers/ata/pata_octeon_cf.c2
-rw-r--r--drivers/ata/sata_mv.c144
-rw-r--r--drivers/base/bus.c2
-rw-r--r--drivers/base/core.c16
-rw-r--r--drivers/base/devtmpfs.c19
-rw-r--r--drivers/base/driver.c4
-rw-r--r--drivers/base/memory.c19
-rw-r--r--drivers/base/platform.c1
-rw-r--r--drivers/base/power/main.c128
-rw-r--r--drivers/base/power/runtime.c45
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/char/nozomi.c48
-rw-r--r--drivers/char/sonypi.c49
-rw-r--r--drivers/gpu/drm/drm_drv.c13
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_ioc32.c89
-rw-r--r--drivers/gpu/drm/drm_mm.c4
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c5
-rw-r--r--drivers/gpu/drm/i2c/ch7006_mode.c5
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c2
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c2
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c2
-rw-r--r--drivers/gpu/drm/i830/i830_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c23
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c2
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c13
-rw-r--r--drivers/gpu/drm/nouveau/Makefile5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c621
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c161
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.h133
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c147
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c197
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c212
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c678
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c16
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c2
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c16
-rw-r--r--drivers/gpu/drm/radeon/atom.c12
-rw-r--r--drivers/gpu/drm/radeon/atom.h1
-rw-r--r--drivers/gpu/drm/radeon/atombios.h199
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h4
-rw-r--r--drivers/gpu/drm/radeon/r300.c30
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c6
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c105
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c80
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/via/via_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c47
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c157
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c149
-rw-r--r--drivers/hid/hid-lg.h2
-rw-r--r--drivers/hwmon/Kconfig37
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/k10temp.c197
-rw-r--r--drivers/hwmon/lis3lv02d_i2c.c183
-rw-r--r--drivers/hwmon/sht15.c6
-rw-r--r--drivers/hwmon/smsc47m1.c153
-rw-r--r--drivers/hwmon/via-cputemp.c356
-rw-r--r--drivers/hwmon/w83627hf.c2
-rw-r--r--drivers/idle/i7300_idle.c15
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h9
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c75
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c2
-rw-r--r--drivers/leds/Kconfig33
-rw-r--r--drivers/leds/Makefile4
-rw-r--r--drivers/leds/leds-adp5520.c230
-rw-r--r--drivers/leds/leds-alix2.c115
-rw-r--r--drivers/leds/leds-cobalt-qube.c4
-rw-r--r--drivers/leds/leds-cobalt-raq.c2
-rw-r--r--drivers/leds/leds-lt3593.c217
-rw-r--r--drivers/leds/leds-pwm.c5
-rw-r--r--drivers/leds/leds-regulator.c242
-rw-r--r--drivers/leds/leds-ss4200.c556
-rw-r--r--drivers/media/video/cx23885/cx23888-ir.c44
-rw-r--r--drivers/media/video/meye.c60
-rw-r--r--drivers/media/video/meye.h4
-rw-r--r--drivers/message/fusion/mptbase.c6
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/enclosure.c1
-rw-r--r--drivers/mmc/core/sdio.c5
-rw-r--r--drivers/mmc/core/sdio_bus.c7
-rw-r--r--drivers/mmc/host/Kconfig37
-rw-r--r--drivers/mmc/host/Makefile6
-rw-r--r--drivers/mmc/host/sdhci-of-core.c (renamed from drivers/mmc/host/sdhci-of.c)143
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c143
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c65
-rw-r--r--drivers/mmc/host/sdhci-of.h42
-rw-r--r--drivers/mmc/host/sdhci.h4
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c13
-rw-r--r--drivers/mtd/nand/Kconfig8
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/excite_nandflash.c248
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/bnx2.c12
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/davinci_emac.c2
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000e/82571.c6
-rw-r--r--drivers/net/gianfar.c50
-rw-r--r--drivers/net/gianfar.h18
-rw-r--r--drivers/net/netxen/netxen_nic_main.c57
-rw-r--r--drivers/net/octeon/Kconfig10
-rw-r--r--drivers/net/octeon/Makefile2
-rw-r--r--drivers/net/octeon/octeon_mgmt.c1176
-rw-r--r--drivers/net/phy/Kconfig11
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/broadcom.c3
-rw-r--r--drivers/net/phy/mdio-octeon.c180
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h2
-rw-r--r--drivers/net/wireless/libertas/cmd.c4
-rw-r--r--drivers/net/wireless/libertas/dev.h4
-rw-r--r--drivers/net/wireless/libertas/main.c21
-rw-r--r--drivers/pcmcia/pxa2xx_base.c6
-rw-r--r--drivers/platform/x86/compal-laptop.c1
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c30
-rw-r--r--drivers/platform/x86/sony-laptop.c56
-rw-r--r--drivers/regulator/88pm8607.c685
-rw-r--r--drivers/regulator/Kconfig13
-rw-r--r--drivers/regulator/Makefile4
-rw-r--r--drivers/regulator/ab3100.c33
-rw-r--r--drivers/regulator/core.c248
-rw-r--r--drivers/regulator/da903x.c2
-rw-r--r--drivers/regulator/lp3971.c4
-rw-r--r--drivers/regulator/max8660.c510
-rw-r--r--drivers/regulator/mc13783-regulator.c245
-rw-r--r--drivers/regulator/mc13783.c410
-rw-r--r--drivers/regulator/twl-regulator.c147
-rw-r--r--drivers/regulator/wm831x-dcdc.c207
-rw-r--r--drivers/regulator/wm831x-ldo.c2
-rw-r--r--drivers/rtc/rtc-cmos.c3
-rw-r--r--drivers/rtc/rtc-ds1305.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c2
-rw-r--r--drivers/rtc/rtc-ds1374.c2
-rw-r--r--drivers/s390/block/dasd_alias.c2
-rw-r--r--drivers/s390/block/dasd_diag.c42
-rw-r--r--drivers/s390/char/fs3270.c2
-rw-r--r--drivers/s390/char/tape_34xx.c1
-rw-r--r--drivers/s390/char/tape_3590.c3
-rw-r--r--drivers/s390/char/tape_block.c1
-rw-r--r--drivers/s390/char/tape_char.c3
-rw-r--r--drivers/s390/char/tape_class.c4
-rw-r--r--drivers/s390/char/tape_core.c2
-rw-r--r--drivers/s390/char/tape_proc.c3
-rw-r--r--drivers/s390/char/tape_std.c3
-rw-r--r--drivers/s390/cio/ccwreq.c3
-rw-r--r--drivers/s390/cio/device.c1
-rw-r--r--drivers/s390/cio/device_pgid.c29
-rw-r--r--drivers/s390/cio/fcx.c4
-rw-r--r--drivers/s390/cio/io_sch.h1
-rw-r--r--drivers/s390/cio/qdio_main.c3
-rw-r--r--drivers/s390/cio/qdio_perf.c2
-rw-r--r--drivers/s390/cio/qdio_perf.h1
-rw-r--r--drivers/s390/cio/qdio_setup.c10
-rw-r--r--drivers/scsi/3w-9xxx.c11
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c12
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c51
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c16
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c24
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c137
-rw-r--r--drivers/scsi/hpsa.c3531
-rw-r--r--drivers/scsi/hpsa.h273
-rw-r--r--drivers/scsi/hpsa_cmd.h326
-rw-r--r--drivers/scsi/ipr.c1
-rw-r--r--drivers/scsi/libfc/fc_fcp.c65
-rw-r--r--drivers/scsi/libfc/fc_lport.c7
-rw-r--r--drivers/scsi/libfc/fc_rport.c1
-rw-r--r--drivers/scsi/libiscsi.c22
-rw-r--r--drivers/scsi/libiscsi_tcp.c36
-rw-r--r--drivers/scsi/libsrp.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c16
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c14
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c5
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/osd/osd_initiator.c88
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.h10
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c149
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h3
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c19
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c57
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h32
-rw-r--r--drivers/scsi/pmcraid.c34
-rw-r--r--drivers/scsi/pmcraid.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c103
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c75
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_transport_fc.c17
-rw-r--r--drivers/scsi/sd.c107
-rw-r--r--drivers/scsi/sd.h2
-rw-r--r--drivers/scsi/st.c23
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/serial/sh-sci.c20
-rw-r--r--drivers/serial/sh-sci.h118
-rw-r--r--drivers/spi/Kconfig28
-rw-r--r--drivers/spi/Makefile10
-rw-r--r--drivers/spi/atmel_spi.c6
-rw-r--r--drivers/spi/dw_spi.c944
-rw-r--r--drivers/spi/dw_spi_pci.c169
-rw-r--r--drivers/spi/spi_bfin5xx.c2
-rw-r--r--drivers/spi/spi_mpc8xxx.c2
-rw-r--r--drivers/spi/spi_s3c24xx.c244
-rw-r--r--drivers/spi/spi_s3c24xx_fiq.S116
-rw-r--r--drivers/spi/spi_s3c24xx_fiq.h26
-rw-r--r--drivers/spi/spi_s3c64xx.c1196
-rw-r--r--drivers/spi/spi_sh_sci.c2
-rw-r--r--drivers/spi/spi_txx9.c6
-rw-r--r--drivers/spi/spidev.c18
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/batman-adv/Kconfig1
-rw-r--r--drivers/staging/batman-adv/send.c4
-rw-r--r--drivers/staging/comedi/comedi.h2
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c7
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c5
-rw-r--r--drivers/staging/dst/Kconfig67
-rw-r--r--drivers/staging/dst/Makefile3
-rw-r--r--drivers/staging/dst/crypto.c733
-rw-r--r--drivers/staging/dst/dcore.c968
-rw-r--r--drivers/staging/dst/export.c660
-rw-r--r--drivers/staging/dst/state.c844
-rw-r--r--drivers/staging/dst/thread_pool.c348
-rw-r--r--drivers/staging/dst/trans.c337
-rw-r--r--drivers/staging/iio/ring_sw.h1
-rw-r--r--drivers/staging/octeon/Kconfig3
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c204
-rw-r--r--drivers/staging/octeon/ethernet-mdio.h2
-rw-r--r--drivers/staging/octeon/ethernet-proc.c112
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c52
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c2
-rw-r--r--drivers/staging/octeon/ethernet-xaui.c2
-rw-r--r--drivers/staging/octeon/ethernet.c23
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h6
-rw-r--r--drivers/staging/panel/Kconfig2
-rw-r--r--drivers/staging/panel/panel.c4
-rw-r--r--drivers/staging/pohmelfs/dir.c2
-rw-r--r--drivers/staging/ramzswap/TODO1
-rw-r--r--drivers/staging/ramzswap/ramzswap_drv.c28
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211.h10
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c14
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c2
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c10
-rw-r--r--drivers/staging/rtl8187se/r8180_wx.c2
-rw-r--r--drivers/staging/rtl8192e/ieee80211.h12
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211.h12
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_module.c10
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c24
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c2
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c2
-rw-r--r--drivers/staging/rtl8192e/ieee80211/rtl819x_BAProc.c6
-rw-r--r--drivers/staging/rtl8192e/r8192E_core.c14
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211.h8
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c6
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c16
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c2
-rw-r--r--drivers/staging/rtl8192su/r8192U_core.c10
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c6
-rw-r--r--drivers/staging/sm7xx/Kconfig15
-rw-r--r--drivers/staging/sm7xx/Makefile3
-rw-r--r--drivers/staging/sm7xx/TODO10
-rw-r--r--drivers/staging/sm7xx/smtc2d.c979
-rw-r--r--drivers/staging/sm7xx/smtc2d.h530
-rw-r--r--drivers/staging/sm7xx/smtcfb.c1253
-rw-r--r--drivers/staging/sm7xx/smtcfb.h793
-rw-r--r--drivers/staging/vt6655/Kconfig2
-rw-r--r--drivers/staging/vt6656/Kconfig2
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c2
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/core/hcd.c4
-rw-r--r--drivers/usb/core/hub.c58
-rw-r--r--drivers/usb/core/sysfs.c6
-rw-r--r--drivers/usb/core/usb.c6
-rw-r--r--drivers/usb/early/ehci-dbgp.c2
-rw-r--r--drivers/usb/gadget/audio.c1
-rw-r--r--drivers/usb/gadget/f_audio.c15
-rw-r--r--drivers/usb/gadget/u_audio.c12
-rw-r--r--drivers/usb/gadget/u_audio.h2
-rw-r--r--drivers/usb/host/ehci-omap.c2
-rw-r--r--drivers/usb/host/fhci-sched.c10
-rw-r--r--drivers/usb/host/fhci-tds.c35
-rw-r--r--drivers/usb/host/fhci.h16
-rw-r--r--drivers/usb/misc/appledisplay.c5
-rw-r--r--drivers/usb/misc/emi62.c2
-rw-r--r--drivers/usb/musb/blackfin.c134
-rw-r--r--drivers/usb/musb/blackfin.h2
-rw-r--r--drivers/usb/musb/cppi_dma.c6
-rw-r--r--drivers/usb/musb/davinci.c2
-rw-r--r--drivers/usb/musb/musb_core.c14
-rw-r--r--drivers/usb/musb/musb_gadget.c13
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c14
-rw-r--r--drivers/usb/otg/isp1301_omap.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.h959
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h986
-rw-r--r--drivers/usb/serial/generic.c12
-rw-r--r--drivers/usb/serial/mos7840.c7
-rw-r--r--drivers/usb/serial/option.c5
-rw-r--r--drivers/usb/serial/usb-serial.c7
-rw-r--r--drivers/video/atafb.c3
-rw-r--r--drivers/video/backlight/adp5520_bl.c2
-rw-r--r--drivers/video/backlight/adx_bl.c2
-rw-r--r--drivers/video/backlight/atmel-pwm-bl.c2
-rw-r--r--drivers/video/backlight/backlight.c2
-rw-r--r--drivers/video/backlight/corgi_lcd.c2
-rw-r--r--drivers/video/backlight/cr_bllcd.c4
-rw-r--r--drivers/video/backlight/da903x_bl.c2
-rw-r--r--drivers/video/backlight/generic_bl.c2
-rw-r--r--drivers/video/backlight/hp680_bl.c2
-rw-r--r--drivers/video/backlight/jornada720_bl.c2
-rw-r--r--drivers/video/backlight/kb3886_bl.c2
-rw-r--r--drivers/video/backlight/locomolcd.c2
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c20
-rw-r--r--drivers/video/backlight/omap1_bl.c2
-rw-r--r--drivers/video/backlight/progear_bl.c2
-rw-r--r--drivers/video/backlight/pwm_bl.c11
-rw-r--r--drivers/video/backlight/tosa_bl.c2
-rw-r--r--drivers/video/backlight/wm831x_bl.c2
-rw-r--r--drivers/video/omap/lcd_ldp.c4
-rw-r--r--drivers/video/omap/lcd_omap2evm.c10
-rw-r--r--drivers/video/omap/lcd_omap3beagle.c2
-rw-r--r--drivers/video/omap/lcd_omap3evm.c10
-rw-r--r--drivers/video/omap/lcd_overo.c2
-rw-r--r--drivers/video/via/viafbdev.c4
-rw-r--r--drivers/watchdog/Kconfig12
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/geodewdt.c40
-rw-r--r--drivers/watchdog/rm9k_wdt.c419
391 files changed, 22063 insertions, 9121 deletions
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index d672cfe7ca59..cb423f5aef24 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -21,7 +21,6 @@
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */ 22 */
23 23
24#include <linux/autoconf.h>
25#include <linux/kernel.h> 24#include <linux/kernel.h>
26#include <linux/module.h> 25#include <linux/module.h>
27#include <linux/moduleparam.h> 26#include <linux/moduleparam.h>
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1683ebda900b..f4ea5a8c325b 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3022,7 +3022,7 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
3022 case WRITE_16: 3022 case WRITE_16:
3023 return ata_scsi_rw_xlat; 3023 return ata_scsi_rw_xlat;
3024 3024
3025 case 0x93 /*WRITE_SAME_16*/: 3025 case WRITE_SAME_16:
3026 return ata_scsi_write_same_xlat; 3026 return ata_scsi_write_same_xlat;
3027 3027
3028 case SYNCHRONIZE_CACHE: 3028 case SYNCHRONIZE_CACHE:
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index efa8773bef5a..741065c9da67 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2275,7 +2275,7 @@ void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
2275 ap = qc->ap; 2275 ap = qc->ap;
2276 /* Drain up to 64K of data before we give up this recovery method */ 2276 /* Drain up to 64K of data before we give up this recovery method */
2277 for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) 2277 for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
2278 && count < 32768; count++) 2278 && count < 65536; count += 2)
2279 ioread16(ap->ioaddr.data_addr); 2279 ioread16(ap->ioaddr.data_addr);
2280 2280
2281 /* Can become DEBUG later */ 2281 /* Can become DEBUG later */
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index c4b47a3e5446..02c81f12c702 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1557,6 +1557,25 @@ static unsigned short atapi_io_port[] = {
1557 P_ATAPI_DMARQ, 1557 P_ATAPI_DMARQ,
1558 P_ATAPI_INTRQ, 1558 P_ATAPI_INTRQ,
1559 P_ATAPI_IORDY, 1559 P_ATAPI_IORDY,
1560 P_ATAPI_D0A,
1561 P_ATAPI_D1A,
1562 P_ATAPI_D2A,
1563 P_ATAPI_D3A,
1564 P_ATAPI_D4A,
1565 P_ATAPI_D5A,
1566 P_ATAPI_D6A,
1567 P_ATAPI_D7A,
1568 P_ATAPI_D8A,
1569 P_ATAPI_D9A,
1570 P_ATAPI_D10A,
1571 P_ATAPI_D11A,
1572 P_ATAPI_D12A,
1573 P_ATAPI_D13A,
1574 P_ATAPI_D14A,
1575 P_ATAPI_D15A,
1576 P_ATAPI_A0A,
1577 P_ATAPI_A1A,
1578 P_ATAPI_A2A,
1560 0 1579 0
1561}; 1580};
1562 1581
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index dadfc358ba1c..0efb1f58f255 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -31,7 +31,7 @@
31#include <linux/libata.h> 31#include <linux/libata.h>
32 32
33#define DRV_NAME "pata_cmd64x" 33#define DRV_NAME "pata_cmd64x"
34#define DRV_VERSION "0.3.1" 34#define DRV_VERSION "0.2.5"
35 35
36/* 36/*
37 * CMD64x specific registers definition. 37 * CMD64x specific registers definition.
@@ -219,7 +219,7 @@ static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
219 regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift; 219 regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift;
220 /* Merge the control bits */ 220 /* Merge the control bits */
221 regU |= 1 << adev->devno; /* UDMA on */ 221 regU |= 1 << adev->devno; /* UDMA on */
222 if (adev->dma_mode > 2) /* 15nS timing */ 222 if (adev->dma_mode > XFER_UDMA_2) /* 15nS timing */
223 regU |= 4 << adev->devno; 223 regU |= 4 << adev->devno;
224 } else { 224 } else {
225 regU &= ~ (1 << adev->devno); /* UDMA off */ 225 regU &= ~ (1 << adev->devno); /* UDMA off */
@@ -254,109 +254,17 @@ static void cmd648_bmdma_stop(struct ata_queued_cmd *qc)
254} 254}
255 255
256/** 256/**
257 * cmd64x_bmdma_stop - DMA stop callback 257 * cmd646r1_dma_stop - DMA stop callback
258 * @qc: Command in progress 258 * @qc: Command in progress
259 * 259 *
260 * Track the completion of live DMA commands and clear the 260 * Stub for now while investigating the r1 quirk in the old driver.
261 * host->private_data DMA tracking flag as we do.
262 */ 261 */
263 262
264static void cmd64x_bmdma_stop(struct ata_queued_cmd *qc) 263static void cmd646r1_bmdma_stop(struct ata_queued_cmd *qc)
265{ 264{
266 struct ata_port *ap = qc->ap;
267 ata_bmdma_stop(qc); 265 ata_bmdma_stop(qc);
268 WARN_ON(ap->host->private_data != ap);
269 ap->host->private_data = NULL;
270}
271
272/**
273 * cmd64x_qc_defer - Defer logic for chip limits
274 * @qc: queued command
275 *
276 * Decide whether we can issue the command. Called under the host lock.
277 */
278
279static int cmd64x_qc_defer(struct ata_queued_cmd *qc)
280{
281 struct ata_host *host = qc->ap->host;
282 struct ata_port *alt = host->ports[1 ^ qc->ap->port_no];
283 int rc;
284 int dma = 0;
285
286 /* Apply the ATA rules first */
287 rc = ata_std_qc_defer(qc);
288 if (rc)
289 return rc;
290
291 if (qc->tf.protocol == ATAPI_PROT_DMA ||
292 qc->tf.protocol == ATA_PROT_DMA)
293 dma = 1;
294
295 /* If the other port is not live then issue the command */
296 if (alt == NULL || !alt->qc_active) {
297 if (dma)
298 host->private_data = qc->ap;
299 return 0;
300 }
301 /* If there is a live DMA command then wait */
302 if (host->private_data != NULL)
303 return ATA_DEFER_PORT;
304 if (dma)
305 /* Cannot overlap our DMA command */
306 return ATA_DEFER_PORT;
307 return 0;
308} 266}
309 267
310/**
311 * cmd64x_interrupt - ATA host interrupt handler
312 * @irq: irq line (unused)
313 * @dev_instance: pointer to our ata_host information structure
314 *
315 * Our interrupt handler for PCI IDE devices. Calls
316 * ata_sff_host_intr() for each port that is flagging an IRQ. We cannot
317 * use the defaults as we need to avoid touching status/altstatus during
318 * a DMA.
319 *
320 * LOCKING:
321 * Obtains host lock during operation.
322 *
323 * RETURNS:
324 * IRQ_NONE or IRQ_HANDLED.
325 */
326irqreturn_t cmd64x_interrupt(int irq, void *dev_instance)
327{
328 struct ata_host *host = dev_instance;
329 struct pci_dev *pdev = to_pci_dev(host->dev);
330 unsigned int i;
331 unsigned int handled = 0;
332 unsigned long flags;
333 static const u8 irq_reg[2] = { CFR, ARTTIM23 };
334 static const u8 irq_mask[2] = { 1 << 2, 1 << 4 };
335
336 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
337 spin_lock_irqsave(&host->lock, flags);
338
339 for (i = 0; i < host->n_ports; i++) {
340 struct ata_port *ap;
341 u8 reg;
342
343 pci_read_config_byte(pdev, irq_reg[i], &reg);
344 ap = host->ports[i];
345 if (ap && (reg & irq_mask[i]) &&
346 !(ap->flags & ATA_FLAG_DISABLED)) {
347 struct ata_queued_cmd *qc;
348
349 qc = ata_qc_from_tag(ap, ap->link.active_tag);
350 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
351 (qc->flags & ATA_QCFLAG_ACTIVE))
352 handled |= ata_sff_host_intr(ap, qc);
353 }
354 }
355
356 spin_unlock_irqrestore(&host->lock, flags);
357
358 return IRQ_RETVAL(handled);
359}
360static struct scsi_host_template cmd64x_sht = { 268static struct scsi_host_template cmd64x_sht = {
361 ATA_BMDMA_SHT(DRV_NAME), 269 ATA_BMDMA_SHT(DRV_NAME),
362}; 270};
@@ -365,8 +273,6 @@ static const struct ata_port_operations cmd64x_base_ops = {
365 .inherits = &ata_bmdma_port_ops, 273 .inherits = &ata_bmdma_port_ops,
366 .set_piomode = cmd64x_set_piomode, 274 .set_piomode = cmd64x_set_piomode,
367 .set_dmamode = cmd64x_set_dmamode, 275 .set_dmamode = cmd64x_set_dmamode,
368 .bmdma_stop = cmd64x_bmdma_stop,
369 .qc_defer = cmd64x_qc_defer,
370}; 276};
371 277
372static struct ata_port_operations cmd64x_port_ops = { 278static struct ata_port_operations cmd64x_port_ops = {
@@ -376,6 +282,7 @@ static struct ata_port_operations cmd64x_port_ops = {
376 282
377static struct ata_port_operations cmd646r1_port_ops = { 283static struct ata_port_operations cmd646r1_port_ops = {
378 .inherits = &cmd64x_base_ops, 284 .inherits = &cmd64x_base_ops,
285 .bmdma_stop = cmd646r1_bmdma_stop,
379 .cable_detect = ata_cable_40wire, 286 .cable_detect = ata_cable_40wire,
380}; 287};
381 288
@@ -383,7 +290,6 @@ static struct ata_port_operations cmd648_port_ops = {
383 .inherits = &cmd64x_base_ops, 290 .inherits = &cmd64x_base_ops,
384 .bmdma_stop = cmd648_bmdma_stop, 291 .bmdma_stop = cmd648_bmdma_stop,
385 .cable_detect = cmd648_cable_detect, 292 .cable_detect = cmd648_cable_detect,
386 .qc_defer = ata_std_qc_defer
387}; 293};
388 294
389static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 295static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -432,7 +338,6 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
432 const struct ata_port_info *ppi[] = { &cmd_info[id->driver_data], NULL }; 338 const struct ata_port_info *ppi[] = { &cmd_info[id->driver_data], NULL };
433 u8 mrdmode; 339 u8 mrdmode;
434 int rc; 340 int rc;
435 struct ata_host *host;
436 341
437 rc = pcim_enable_device(pdev); 342 rc = pcim_enable_device(pdev);
438 if (rc) 343 if (rc)
@@ -450,25 +355,20 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
450 ppi[0] = &cmd_info[3]; 355 ppi[0] = &cmd_info[3];
451 } 356 }
452 357
453
454 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64); 358 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
455 pci_read_config_byte(pdev, MRDMODE, &mrdmode); 359 pci_read_config_byte(pdev, MRDMODE, &mrdmode);
456 mrdmode &= ~ 0x30; /* IRQ set up */ 360 mrdmode &= ~ 0x30; /* IRQ set up */
457 mrdmode |= 0x02; /* Memory read line enable */ 361 mrdmode |= 0x02; /* Memory read line enable */
458 pci_write_config_byte(pdev, MRDMODE, mrdmode); 362 pci_write_config_byte(pdev, MRDMODE, mrdmode);
459 363
364 /* Force PIO 0 here.. */
365
460 /* PPC specific fixup copied from old driver */ 366 /* PPC specific fixup copied from old driver */
461#ifdef CONFIG_PPC 367#ifdef CONFIG_PPC
462 pci_write_config_byte(pdev, UDIDETCR0, 0xF0); 368 pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
463#endif 369#endif
464 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
465 if (rc)
466 return rc;
467 /* We use this pointer to track the AP which has DMA running */
468 host->private_data = NULL;
469 370
470 pci_set_master(pdev); 371 return ata_pci_sff_init_one(pdev, ppi, &cmd64x_sht, NULL);
471 return ata_pci_sff_activate_host(host, cmd64x_interrupt, &cmd64x_sht);
472} 372}
473 373
474#ifdef CONFIG_PM 374#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 9a09a1b11ca5..dd26bc73bd9a 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -8,7 +8,7 @@
8 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> 8 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
9 * Portions Copyright (C) 2001 Sun Microsystems, Inc. 9 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
10 * Portions Copyright (C) 2003 Red Hat Inc 10 * Portions Copyright (C) 2003 Red Hat Inc
11 * Portions Copyright (C) 2005-2007 MontaVista Software, Inc. 11 * Portions Copyright (C) 2005-2009 MontaVista Software, Inc.
12 * 12 *
13 * 13 *
14 * TODO 14 * TODO
@@ -25,7 +25,7 @@
25#include <linux/libata.h> 25#include <linux/libata.h>
26 26
27#define DRV_NAME "pata_hpt3x2n" 27#define DRV_NAME "pata_hpt3x2n"
28#define DRV_VERSION "0.3.7" 28#define DRV_VERSION "0.3.8"
29 29
30enum { 30enum {
31 HPT_PCI_FAST = (1 << 31), 31 HPT_PCI_FAST = (1 << 31),
@@ -264,7 +264,7 @@ static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc)
264 264
265static void hpt3x2n_set_clock(struct ata_port *ap, int source) 265static void hpt3x2n_set_clock(struct ata_port *ap, int source)
266{ 266{
267 void __iomem *bmdma = ap->ioaddr.bmdma_addr; 267 void __iomem *bmdma = ap->ioaddr.bmdma_addr - ap->port_no * 8;
268 268
269 /* Tristate the bus */ 269 /* Tristate the bus */
270 iowrite8(0x80, bmdma+0x73); 270 iowrite8(0x80, bmdma+0x73);
@@ -274,9 +274,9 @@ static void hpt3x2n_set_clock(struct ata_port *ap, int source)
274 iowrite8(source, bmdma+0x7B); 274 iowrite8(source, bmdma+0x7B);
275 iowrite8(0xC0, bmdma+0x79); 275 iowrite8(0xC0, bmdma+0x79);
276 276
277 /* Reset state machines */ 277 /* Reset state machines, avoid enabling the disabled channels */
278 iowrite8(0x37, bmdma+0x70); 278 iowrite8(ioread8(bmdma+0x70) | 0x32, bmdma+0x70);
279 iowrite8(0x37, bmdma+0x74); 279 iowrite8(ioread8(bmdma+0x74) | 0x32, bmdma+0x74);
280 280
281 /* Complete reset */ 281 /* Complete reset */
282 iowrite8(0x00, bmdma+0x79); 282 iowrite8(0x00, bmdma+0x79);
@@ -286,21 +286,10 @@ static void hpt3x2n_set_clock(struct ata_port *ap, int source)
286 iowrite8(0x00, bmdma+0x77); 286 iowrite8(0x00, bmdma+0x77);
287} 287}
288 288
289/* Check if our partner interface is busy */
290
291static int hpt3x2n_pair_idle(struct ata_port *ap)
292{
293 struct ata_host *host = ap->host;
294 struct ata_port *pair = host->ports[ap->port_no ^ 1];
295
296 if (pair->hsm_task_state == HSM_ST_IDLE)
297 return 1;
298 return 0;
299}
300
301static int hpt3x2n_use_dpll(struct ata_port *ap, int writing) 289static int hpt3x2n_use_dpll(struct ata_port *ap, int writing)
302{ 290{
303 long flags = (long)ap->host->private_data; 291 long flags = (long)ap->host->private_data;
292
304 /* See if we should use the DPLL */ 293 /* See if we should use the DPLL */
305 if (writing) 294 if (writing)
306 return USE_DPLL; /* Needed for write */ 295 return USE_DPLL; /* Needed for write */
@@ -309,20 +298,35 @@ static int hpt3x2n_use_dpll(struct ata_port *ap, int writing)
309 return 0; 298 return 0;
310} 299}
311 300
301static int hpt3x2n_qc_defer(struct ata_queued_cmd *qc)
302{
303 struct ata_port *ap = qc->ap;
304 struct ata_port *alt = ap->host->ports[ap->port_no ^ 1];
305 int rc, flags = (long)ap->host->private_data;
306 int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
307
308 /* First apply the usual rules */
309 rc = ata_std_qc_defer(qc);
310 if (rc != 0)
311 return rc;
312
313 if ((flags & USE_DPLL) != dpll && alt->qc_active)
314 return ATA_DEFER_PORT;
315 return 0;
316}
317
312static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc) 318static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc)
313{ 319{
314 struct ata_taskfile *tf = &qc->tf;
315 struct ata_port *ap = qc->ap; 320 struct ata_port *ap = qc->ap;
316 int flags = (long)ap->host->private_data; 321 int flags = (long)ap->host->private_data;
322 int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
317 323
318 if (hpt3x2n_pair_idle(ap)) { 324 if ((flags & USE_DPLL) != dpll) {
319 int dpll = hpt3x2n_use_dpll(ap, (tf->flags & ATA_TFLAG_WRITE)); 325 flags &= ~USE_DPLL;
320 if ((flags & USE_DPLL) != dpll) { 326 flags |= dpll;
321 if (dpll == 1) 327 ap->host->private_data = (void *)(long)flags;
322 hpt3x2n_set_clock(ap, 0x21); 328
323 else 329 hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23);
324 hpt3x2n_set_clock(ap, 0x23);
325 }
326 } 330 }
327 return ata_sff_qc_issue(qc); 331 return ata_sff_qc_issue(qc);
328} 332}
@@ -339,6 +343,8 @@ static struct ata_port_operations hpt3x2n_port_ops = {
339 .inherits = &ata_bmdma_port_ops, 343 .inherits = &ata_bmdma_port_ops,
340 344
341 .bmdma_stop = hpt3x2n_bmdma_stop, 345 .bmdma_stop = hpt3x2n_bmdma_stop,
346
347 .qc_defer = hpt3x2n_qc_defer,
342 .qc_issue = hpt3x2n_qc_issue, 348 .qc_issue = hpt3x2n_qc_issue,
343 349
344 .cable_detect = hpt3x2n_cable_detect, 350 .cable_detect = hpt3x2n_cable_detect,
@@ -454,7 +460,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
454 unsigned int f_low, f_high; 460 unsigned int f_low, f_high;
455 int adjust; 461 int adjust;
456 unsigned long iobase = pci_resource_start(dev, 4); 462 unsigned long iobase = pci_resource_start(dev, 4);
457 void *hpriv = NULL; 463 void *hpriv = (void *)USE_DPLL;
458 int rc; 464 int rc;
459 465
460 rc = pcim_enable_device(dev); 466 rc = pcim_enable_device(dev);
@@ -539,7 +545,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
539 /* Set our private data up. We only need a few flags so we use 545 /* Set our private data up. We only need a few flags so we use
540 it directly */ 546 it directly */
541 if (pci_mhz > 60) { 547 if (pci_mhz > 60) {
542 hpriv = (void *)PCI66; 548 hpriv = (void *)(PCI66 | USE_DPLL);
543 /* 549 /*
544 * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in 550 * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in
545 * the MISC. register to stretch the UltraDMA Tss timing. 551 * the MISC. register to stretch the UltraDMA Tss timing.
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index d6f69561dc86..37ef416c1242 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -853,7 +853,7 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
853 return -EINVAL; 853 return -EINVAL;
854 854
855 cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start, 855 cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
856 res_cs0->end - res_cs1->start + 1); 856 resource_size(res_cs1));
857 857
858 if (!cs1) 858 if (!cs1)
859 return -ENOMEM; 859 return -ENOMEM;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index a8a7be0d06ff..df8ee325d3ca 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -59,6 +59,7 @@
59#include <linux/dmapool.h> 59#include <linux/dmapool.h>
60#include <linux/dma-mapping.h> 60#include <linux/dma-mapping.h>
61#include <linux/device.h> 61#include <linux/device.h>
62#include <linux/clk.h>
62#include <linux/platform_device.h> 63#include <linux/platform_device.h>
63#include <linux/ata_platform.h> 64#include <linux/ata_platform.h>
64#include <linux/mbus.h> 65#include <linux/mbus.h>
@@ -538,6 +539,7 @@ struct mv_port_signal {
538 539
539struct mv_host_priv { 540struct mv_host_priv {
540 u32 hp_flags; 541 u32 hp_flags;
542 unsigned int board_idx;
541 u32 main_irq_mask; 543 u32 main_irq_mask;
542 struct mv_port_signal signal[8]; 544 struct mv_port_signal signal[8];
543 const struct mv_hw_ops *ops; 545 const struct mv_hw_ops *ops;
@@ -548,6 +550,10 @@ struct mv_host_priv {
548 u32 irq_cause_offset; 550 u32 irq_cause_offset;
549 u32 irq_mask_offset; 551 u32 irq_mask_offset;
550 u32 unmask_all_irqs; 552 u32 unmask_all_irqs;
553
554#if defined(CONFIG_HAVE_CLK)
555 struct clk *clk;
556#endif
551 /* 557 /*
552 * These consistent DMA memory pools give us guaranteed 558 * These consistent DMA memory pools give us guaranteed
553 * alignment for hardware-accessed data structures, 559 * alignment for hardware-accessed data structures,
@@ -2775,7 +2781,7 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2775 struct mv_port_priv *pp; 2781 struct mv_port_priv *pp;
2776 int edma_was_enabled; 2782 int edma_was_enabled;
2777 2783
2778 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) { 2784 if (ap->flags & ATA_FLAG_DISABLED) {
2779 mv_unexpected_intr(ap, 0); 2785 mv_unexpected_intr(ap, 0);
2780 return; 2786 return;
2781 } 2787 }
@@ -3393,7 +3399,7 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3393 ZERO(0x024); /* respq outp */ 3399 ZERO(0x024); /* respq outp */
3394 ZERO(0x020); /* respq inp */ 3400 ZERO(0x020); /* respq inp */
3395 ZERO(0x02c); /* test control */ 3401 ZERO(0x02c); /* test control */
3396 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); 3402 writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
3397} 3403}
3398 3404
3399#undef ZERO 3405#undef ZERO
@@ -3854,7 +3860,6 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
3854/** 3860/**
3855 * mv_init_host - Perform some early initialization of the host. 3861 * mv_init_host - Perform some early initialization of the host.
3856 * @host: ATA host to initialize 3862 * @host: ATA host to initialize
3857 * @board_idx: controller index
3858 * 3863 *
3859 * If possible, do an early global reset of the host. Then do 3864 * If possible, do an early global reset of the host. Then do
3860 * our port init and clear/unmask all/relevant host interrupts. 3865 * our port init and clear/unmask all/relevant host interrupts.
@@ -3862,13 +3867,13 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
3862 * LOCKING: 3867 * LOCKING:
3863 * Inherited from caller. 3868 * Inherited from caller.
3864 */ 3869 */
3865static int mv_init_host(struct ata_host *host, unsigned int board_idx) 3870static int mv_init_host(struct ata_host *host)
3866{ 3871{
3867 int rc = 0, n_hc, port, hc; 3872 int rc = 0, n_hc, port, hc;
3868 struct mv_host_priv *hpriv = host->private_data; 3873 struct mv_host_priv *hpriv = host->private_data;
3869 void __iomem *mmio = hpriv->base; 3874 void __iomem *mmio = hpriv->base;
3870 3875
3871 rc = mv_chip_id(host, board_idx); 3876 rc = mv_chip_id(host, hpriv->board_idx);
3872 if (rc) 3877 if (rc)
3873 goto done; 3878 goto done;
3874 3879
@@ -3905,14 +3910,6 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
3905 void __iomem *port_mmio = mv_port_base(mmio, port); 3910 void __iomem *port_mmio = mv_port_base(mmio, port);
3906 3911
3907 mv_port_init(&ap->ioaddr, port_mmio); 3912 mv_port_init(&ap->ioaddr, port_mmio);
3908
3909#ifdef CONFIG_PCI
3910 if (!IS_SOC(hpriv)) {
3911 unsigned int offset = port_mmio - mmio;
3912 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
3913 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
3914 }
3915#endif
3916 } 3913 }
3917 3914
3918 for (hc = 0; hc < n_hc; hc++) { 3915 for (hc = 0; hc < n_hc; hc++) {
@@ -4035,12 +4032,21 @@ static int mv_platform_probe(struct platform_device *pdev)
4035 return -ENOMEM; 4032 return -ENOMEM;
4036 host->private_data = hpriv; 4033 host->private_data = hpriv;
4037 hpriv->n_ports = n_ports; 4034 hpriv->n_ports = n_ports;
4035 hpriv->board_idx = chip_soc;
4038 4036
4039 host->iomap = NULL; 4037 host->iomap = NULL;
4040 hpriv->base = devm_ioremap(&pdev->dev, res->start, 4038 hpriv->base = devm_ioremap(&pdev->dev, res->start,
4041 resource_size(res)); 4039 resource_size(res));
4042 hpriv->base -= SATAHC0_REG_BASE; 4040 hpriv->base -= SATAHC0_REG_BASE;
4043 4041
4042#if defined(CONFIG_HAVE_CLK)
4043 hpriv->clk = clk_get(&pdev->dev, NULL);
4044 if (IS_ERR(hpriv->clk))
4045 dev_notice(&pdev->dev, "cannot get clkdev\n");
4046 else
4047 clk_enable(hpriv->clk);
4048#endif
4049
4044 /* 4050 /*
4045 * (Re-)program MBUS remapping windows if we are asked to. 4051 * (Re-)program MBUS remapping windows if we are asked to.
4046 */ 4052 */
@@ -4049,12 +4055,12 @@ static int mv_platform_probe(struct platform_device *pdev)
4049 4055
4050 rc = mv_create_dma_pools(hpriv, &pdev->dev); 4056 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4051 if (rc) 4057 if (rc)
4052 return rc; 4058 goto err;
4053 4059
4054 /* initialize adapter */ 4060 /* initialize adapter */
4055 rc = mv_init_host(host, chip_soc); 4061 rc = mv_init_host(host);
4056 if (rc) 4062 if (rc)
4057 return rc; 4063 goto err;
4058 4064
4059 dev_printk(KERN_INFO, &pdev->dev, 4065 dev_printk(KERN_INFO, &pdev->dev,
4060 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH, 4066 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
@@ -4062,6 +4068,15 @@ static int mv_platform_probe(struct platform_device *pdev)
4062 4068
4063 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt, 4069 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
4064 IRQF_SHARED, &mv6_sht); 4070 IRQF_SHARED, &mv6_sht);
4071err:
4072#if defined(CONFIG_HAVE_CLK)
4073 if (!IS_ERR(hpriv->clk)) {
4074 clk_disable(hpriv->clk);
4075 clk_put(hpriv->clk);
4076 }
4077#endif
4078
4079 return rc;
4065} 4080}
4066 4081
4067/* 4082/*
@@ -4076,14 +4091,66 @@ static int __devexit mv_platform_remove(struct platform_device *pdev)
4076{ 4091{
4077 struct device *dev = &pdev->dev; 4092 struct device *dev = &pdev->dev;
4078 struct ata_host *host = dev_get_drvdata(dev); 4093 struct ata_host *host = dev_get_drvdata(dev);
4079 4094#if defined(CONFIG_HAVE_CLK)
4095 struct mv_host_priv *hpriv = host->private_data;
4096#endif
4080 ata_host_detach(host); 4097 ata_host_detach(host);
4098
4099#if defined(CONFIG_HAVE_CLK)
4100 if (!IS_ERR(hpriv->clk)) {
4101 clk_disable(hpriv->clk);
4102 clk_put(hpriv->clk);
4103 }
4104#endif
4081 return 0; 4105 return 0;
4082} 4106}
4083 4107
4108#ifdef CONFIG_PM
4109static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
4110{
4111 struct ata_host *host = dev_get_drvdata(&pdev->dev);
4112 if (host)
4113 return ata_host_suspend(host, state);
4114 else
4115 return 0;
4116}
4117
4118static int mv_platform_resume(struct platform_device *pdev)
4119{
4120 struct ata_host *host = dev_get_drvdata(&pdev->dev);
4121 int ret;
4122
4123 if (host) {
4124 struct mv_host_priv *hpriv = host->private_data;
4125 const struct mv_sata_platform_data *mv_platform_data = \
4126 pdev->dev.platform_data;
4127 /*
4128 * (Re-)program MBUS remapping windows if we are asked to.
4129 */
4130 if (mv_platform_data->dram != NULL)
4131 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
4132
4133 /* initialize adapter */
4134 ret = mv_init_host(host);
4135 if (ret) {
4136 printk(KERN_ERR DRV_NAME ": Error during HW init\n");
4137 return ret;
4138 }
4139 ata_host_resume(host);
4140 }
4141
4142 return 0;
4143}
4144#else
4145#define mv_platform_suspend NULL
4146#define mv_platform_resume NULL
4147#endif
4148
4084static struct platform_driver mv_platform_driver = { 4149static struct platform_driver mv_platform_driver = {
4085 .probe = mv_platform_probe, 4150 .probe = mv_platform_probe,
4086 .remove = __devexit_p(mv_platform_remove), 4151 .remove = __devexit_p(mv_platform_remove),
4152 .suspend = mv_platform_suspend,
4153 .resume = mv_platform_resume,
4087 .driver = { 4154 .driver = {
4088 .name = DRV_NAME, 4155 .name = DRV_NAME,
4089 .owner = THIS_MODULE, 4156 .owner = THIS_MODULE,
@@ -4094,6 +4161,9 @@ static struct platform_driver mv_platform_driver = {
4094#ifdef CONFIG_PCI 4161#ifdef CONFIG_PCI
4095static int mv_pci_init_one(struct pci_dev *pdev, 4162static int mv_pci_init_one(struct pci_dev *pdev,
4096 const struct pci_device_id *ent); 4163 const struct pci_device_id *ent);
4164#ifdef CONFIG_PM
4165static int mv_pci_device_resume(struct pci_dev *pdev);
4166#endif
4097 4167
4098 4168
4099static struct pci_driver mv_pci_driver = { 4169static struct pci_driver mv_pci_driver = {
@@ -4101,6 +4171,11 @@ static struct pci_driver mv_pci_driver = {
4101 .id_table = mv_pci_tbl, 4171 .id_table = mv_pci_tbl,
4102 .probe = mv_pci_init_one, 4172 .probe = mv_pci_init_one,
4103 .remove = ata_pci_remove_one, 4173 .remove = ata_pci_remove_one,
4174#ifdef CONFIG_PM
4175 .suspend = ata_pci_device_suspend,
4176 .resume = mv_pci_device_resume,
4177#endif
4178
4104}; 4179};
4105 4180
4106/* move to PCI layer or libata core? */ 4181/* move to PCI layer or libata core? */
@@ -4194,7 +4269,7 @@ static int mv_pci_init_one(struct pci_dev *pdev,
4194 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; 4269 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4195 struct ata_host *host; 4270 struct ata_host *host;
4196 struct mv_host_priv *hpriv; 4271 struct mv_host_priv *hpriv;
4197 int n_ports, rc; 4272 int n_ports, port, rc;
4198 4273
4199 if (!printed_version++) 4274 if (!printed_version++)
4200 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 4275 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
@@ -4208,6 +4283,7 @@ static int mv_pci_init_one(struct pci_dev *pdev,
4208 return -ENOMEM; 4283 return -ENOMEM;
4209 host->private_data = hpriv; 4284 host->private_data = hpriv;
4210 hpriv->n_ports = n_ports; 4285 hpriv->n_ports = n_ports;
4286 hpriv->board_idx = board_idx;
4211 4287
4212 /* acquire resources */ 4288 /* acquire resources */
4213 rc = pcim_enable_device(pdev); 4289 rc = pcim_enable_device(pdev);
@@ -4230,8 +4306,17 @@ static int mv_pci_init_one(struct pci_dev *pdev,
4230 if (rc) 4306 if (rc)
4231 return rc; 4307 return rc;
4232 4308
4309 for (port = 0; port < host->n_ports; port++) {
4310 struct ata_port *ap = host->ports[port];
4311 void __iomem *port_mmio = mv_port_base(hpriv->base, port);
4312 unsigned int offset = port_mmio - hpriv->base;
4313
4314 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
4315 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
4316 }
4317
4233 /* initialize adapter */ 4318 /* initialize adapter */
4234 rc = mv_init_host(host, board_idx); 4319 rc = mv_init_host(host);
4235 if (rc) 4320 if (rc)
4236 return rc; 4321 return rc;
4237 4322
@@ -4247,6 +4332,27 @@ static int mv_pci_init_one(struct pci_dev *pdev,
4247 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, 4332 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
4248 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); 4333 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
4249} 4334}
4335
4336#ifdef CONFIG_PM
4337static int mv_pci_device_resume(struct pci_dev *pdev)
4338{
4339 struct ata_host *host = dev_get_drvdata(&pdev->dev);
4340 int rc;
4341
4342 rc = ata_pci_device_do_resume(pdev);
4343 if (rc)
4344 return rc;
4345
4346 /* initialize adapter */
4347 rc = mv_init_host(host);
4348 if (rc)
4349 return rc;
4350
4351 ata_host_resume(host);
4352
4353 return 0;
4354}
4355#endif
4250#endif 4356#endif
4251 4357
4252static int mv_platform_probe(struct platform_device *pdev); 4358static int mv_platform_probe(struct platform_device *pdev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 63c143e54a57..c0c5a43d9fb3 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -703,9 +703,9 @@ int bus_add_driver(struct device_driver *drv)
703 return 0; 703 return 0;
704 704
705out_unregister: 705out_unregister:
706 kobject_put(&priv->kobj);
706 kfree(drv->p); 707 kfree(drv->p);
707 drv->p = NULL; 708 drv->p = NULL;
708 kobject_put(&priv->kobj);
709out_put_bus: 709out_put_bus:
710 bus_put(bus); 710 bus_put(bus);
711 return error; 711 return error;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index f1290cbd1350..282025770429 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -446,7 +446,8 @@ struct kset *devices_kset;
446 * @dev: device. 446 * @dev: device.
447 * @attr: device attribute descriptor. 447 * @attr: device attribute descriptor.
448 */ 448 */
449int device_create_file(struct device *dev, struct device_attribute *attr) 449int device_create_file(struct device *dev,
450 const struct device_attribute *attr)
450{ 451{
451 int error = 0; 452 int error = 0;
452 if (dev) 453 if (dev)
@@ -459,7 +460,8 @@ int device_create_file(struct device *dev, struct device_attribute *attr)
459 * @dev: device. 460 * @dev: device.
460 * @attr: device attribute descriptor. 461 * @attr: device attribute descriptor.
461 */ 462 */
462void device_remove_file(struct device *dev, struct device_attribute *attr) 463void device_remove_file(struct device *dev,
464 const struct device_attribute *attr)
463{ 465{
464 if (dev) 466 if (dev)
465 sysfs_remove_file(&dev->kobj, &attr->attr); 467 sysfs_remove_file(&dev->kobj, &attr->attr);
@@ -470,7 +472,8 @@ void device_remove_file(struct device *dev, struct device_attribute *attr)
470 * @dev: device. 472 * @dev: device.
471 * @attr: device binary attribute descriptor. 473 * @attr: device binary attribute descriptor.
472 */ 474 */
473int device_create_bin_file(struct device *dev, struct bin_attribute *attr) 475int device_create_bin_file(struct device *dev,
476 const struct bin_attribute *attr)
474{ 477{
475 int error = -EINVAL; 478 int error = -EINVAL;
476 if (dev) 479 if (dev)
@@ -484,7 +487,8 @@ EXPORT_SYMBOL_GPL(device_create_bin_file);
484 * @dev: device. 487 * @dev: device.
485 * @attr: device binary attribute descriptor. 488 * @attr: device binary attribute descriptor.
486 */ 489 */
487void device_remove_bin_file(struct device *dev, struct bin_attribute *attr) 490void device_remove_bin_file(struct device *dev,
491 const struct bin_attribute *attr)
488{ 492{
489 if (dev) 493 if (dev)
490 sysfs_remove_bin_file(&dev->kobj, attr); 494 sysfs_remove_bin_file(&dev->kobj, attr);
@@ -905,8 +909,10 @@ int device_add(struct device *dev)
905 dev->init_name = NULL; 909 dev->init_name = NULL;
906 } 910 }
907 911
908 if (!dev_name(dev)) 912 if (!dev_name(dev)) {
913 error = -EINVAL;
909 goto name_error; 914 goto name_error;
915 }
910 916
911 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 917 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
912 918
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 50375bb8e51d..090dd4851301 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -32,7 +32,7 @@ static int dev_mount = 1;
32static int dev_mount; 32static int dev_mount;
33#endif 33#endif
34 34
35static rwlock_t dirlock; 35static DEFINE_MUTEX(dirlock);
36 36
37static int __init mount_param(char *str) 37static int __init mount_param(char *str)
38{ 38{
@@ -93,7 +93,7 @@ static int create_path(const char *nodepath)
93{ 93{
94 int err; 94 int err;
95 95
96 read_lock(&dirlock); 96 mutex_lock(&dirlock);
97 err = dev_mkdir(nodepath, 0755); 97 err = dev_mkdir(nodepath, 0755);
98 if (err == -ENOENT) { 98 if (err == -ENOENT) {
99 char *path; 99 char *path;
@@ -101,8 +101,10 @@ static int create_path(const char *nodepath)
101 101
102 /* parent directories do not exist, create them */ 102 /* parent directories do not exist, create them */
103 path = kstrdup(nodepath, GFP_KERNEL); 103 path = kstrdup(nodepath, GFP_KERNEL);
104 if (!path) 104 if (!path) {
105 return -ENOMEM; 105 err = -ENOMEM;
106 goto out;
107 }
106 s = path; 108 s = path;
107 for (;;) { 109 for (;;) {
108 s = strchr(s, '/'); 110 s = strchr(s, '/');
@@ -117,7 +119,8 @@ static int create_path(const char *nodepath)
117 } 119 }
118 kfree(path); 120 kfree(path);
119 } 121 }
120 read_unlock(&dirlock); 122out:
123 mutex_unlock(&dirlock);
121 return err; 124 return err;
122} 125}
123 126
@@ -229,7 +232,7 @@ static int delete_path(const char *nodepath)
229 if (!path) 232 if (!path)
230 return -ENOMEM; 233 return -ENOMEM;
231 234
232 write_lock(&dirlock); 235 mutex_lock(&dirlock);
233 for (;;) { 236 for (;;) {
234 char *base; 237 char *base;
235 238
@@ -241,7 +244,7 @@ static int delete_path(const char *nodepath)
241 if (err) 244 if (err)
242 break; 245 break;
243 } 246 }
244 write_unlock(&dirlock); 247 mutex_unlock(&dirlock);
245 248
246 kfree(path); 249 kfree(path);
247 return err; 250 return err;
@@ -352,8 +355,6 @@ int __init devtmpfs_init(void)
352 int err; 355 int err;
353 struct vfsmount *mnt; 356 struct vfsmount *mnt;
354 357
355 rwlock_init(&dirlock);
356
357 err = register_filesystem(&dev_fs_type); 358 err = register_filesystem(&dev_fs_type);
358 if (err) { 359 if (err) {
359 printk(KERN_ERR "devtmpfs: unable to register devtmpfs " 360 printk(KERN_ERR "devtmpfs: unable to register devtmpfs "
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index f367885a7646..90c9fff09ead 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -98,7 +98,7 @@ EXPORT_SYMBOL_GPL(driver_find_device);
98 * @attr: driver attribute descriptor. 98 * @attr: driver attribute descriptor.
99 */ 99 */
100int driver_create_file(struct device_driver *drv, 100int driver_create_file(struct device_driver *drv,
101 struct driver_attribute *attr) 101 const struct driver_attribute *attr)
102{ 102{
103 int error; 103 int error;
104 if (drv) 104 if (drv)
@@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(driver_create_file);
115 * @attr: driver attribute descriptor. 115 * @attr: driver attribute descriptor.
116 */ 116 */
117void driver_remove_file(struct device_driver *drv, 117void driver_remove_file(struct device_driver *drv,
118 struct driver_attribute *attr) 118 const struct driver_attribute *attr)
119{ 119{
120 if (drv) 120 if (drv)
121 sysfs_remove_file(&drv->p->kobj, &attr->attr); 121 sysfs_remove_file(&drv->p->kobj, &attr->attr);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index c4c8f2e1dd15..d7d77d4a402c 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -63,6 +63,20 @@ void unregister_memory_notifier(struct notifier_block *nb)
63} 63}
64EXPORT_SYMBOL(unregister_memory_notifier); 64EXPORT_SYMBOL(unregister_memory_notifier);
65 65
66static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain);
67
68int register_memory_isolate_notifier(struct notifier_block *nb)
69{
70 return atomic_notifier_chain_register(&memory_isolate_chain, nb);
71}
72EXPORT_SYMBOL(register_memory_isolate_notifier);
73
74void unregister_memory_isolate_notifier(struct notifier_block *nb)
75{
76 atomic_notifier_chain_unregister(&memory_isolate_chain, nb);
77}
78EXPORT_SYMBOL(unregister_memory_isolate_notifier);
79
66/* 80/*
67 * register_memory - Setup a sysfs device for a memory block 81 * register_memory - Setup a sysfs device for a memory block
68 */ 82 */
@@ -157,6 +171,11 @@ int memory_notify(unsigned long val, void *v)
157 return blocking_notifier_call_chain(&memory_chain, val, v); 171 return blocking_notifier_call_chain(&memory_chain, val, v);
158} 172}
159 173
174int memory_isolate_notify(unsigned long val, void *v)
175{
176 return atomic_notifier_call_chain(&memory_isolate_chain, val, v);
177}
178
160/* 179/*
161 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is 180 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
162 * OK to have direct references to sparsemem variables in here. 181 * OK to have direct references to sparsemem variables in here.
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 9d2ee25deaf5..58efaf2f1259 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -441,6 +441,7 @@ error:
441 platform_device_put(pdev); 441 platform_device_put(pdev);
442 return ERR_PTR(retval); 442 return ERR_PTR(retval);
443} 443}
444EXPORT_SYMBOL_GPL(platform_device_register_data);
444 445
445static int platform_drv_probe(struct device *_dev) 446static int platform_drv_probe(struct device *_dev)
446{ 447{
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 1a216c114a0f..48adf80926a0 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -161,6 +161,32 @@ void device_pm_move_last(struct device *dev)
161 list_move_tail(&dev->power.entry, &dpm_list); 161 list_move_tail(&dev->power.entry, &dpm_list);
162} 162}
163 163
164static ktime_t initcall_debug_start(struct device *dev)
165{
166 ktime_t calltime = ktime_set(0, 0);
167
168 if (initcall_debug) {
169 pr_info("calling %s+ @ %i\n",
170 dev_name(dev), task_pid_nr(current));
171 calltime = ktime_get();
172 }
173
174 return calltime;
175}
176
177static void initcall_debug_report(struct device *dev, ktime_t calltime,
178 int error)
179{
180 ktime_t delta, rettime;
181
182 if (initcall_debug) {
183 rettime = ktime_get();
184 delta = ktime_sub(rettime, calltime);
185 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
186 error, (unsigned long long)ktime_to_ns(delta) >> 10);
187 }
188}
189
164/** 190/**
165 * pm_op - Execute the PM operation appropriate for given PM event. 191 * pm_op - Execute the PM operation appropriate for given PM event.
166 * @dev: Device to handle. 192 * @dev: Device to handle.
@@ -172,13 +198,9 @@ static int pm_op(struct device *dev,
172 pm_message_t state) 198 pm_message_t state)
173{ 199{
174 int error = 0; 200 int error = 0;
175 ktime_t calltime, delta, rettime; 201 ktime_t calltime;
176 202
177 if (initcall_debug) { 203 calltime = initcall_debug_start(dev);
178 pr_info("calling %s+ @ %i\n",
179 dev_name(dev), task_pid_nr(current));
180 calltime = ktime_get();
181 }
182 204
183 switch (state.event) { 205 switch (state.event) {
184#ifdef CONFIG_SUSPEND 206#ifdef CONFIG_SUSPEND
@@ -227,12 +249,7 @@ static int pm_op(struct device *dev,
227 error = -EINVAL; 249 error = -EINVAL;
228 } 250 }
229 251
230 if (initcall_debug) { 252 initcall_debug_report(dev, calltime, error);
231 rettime = ktime_get();
232 delta = ktime_sub(rettime, calltime);
233 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
234 error, (unsigned long long)ktime_to_ns(delta) >> 10);
235 }
236 253
237 return error; 254 return error;
238} 255}
@@ -309,8 +326,9 @@ static int pm_noirq_op(struct device *dev,
309 if (initcall_debug) { 326 if (initcall_debug) {
310 rettime = ktime_get(); 327 rettime = ktime_get();
311 delta = ktime_sub(rettime, calltime); 328 delta = ktime_sub(rettime, calltime);
312 printk("initcall %s_i+ returned %d after %Ld usecs\n", dev_name(dev), 329 printk("initcall %s_i+ returned %d after %Ld usecs\n",
313 error, (unsigned long long)ktime_to_ns(delta) >> 10); 330 dev_name(dev), error,
331 (unsigned long long)ktime_to_ns(delta) >> 10);
314 } 332 }
315 333
316 return error; 334 return error;
@@ -354,6 +372,23 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
354 kobject_name(&dev->kobj), pm_verb(state.event), info, error); 372 kobject_name(&dev->kobj), pm_verb(state.event), info, error);
355} 373}
356 374
375static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
376{
377 ktime_t calltime;
378 s64 usecs64;
379 int usecs;
380
381 calltime = ktime_get();
382 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
383 do_div(usecs64, NSEC_PER_USEC);
384 usecs = usecs64;
385 if (usecs == 0)
386 usecs = 1;
387 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
388 info ?: "", info ? " " : "", pm_verb(state.event),
389 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
390}
391
357/*------------------------- Resume routines -------------------------*/ 392/*------------------------- Resume routines -------------------------*/
358 393
359/** 394/**
@@ -390,6 +425,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
390void dpm_resume_noirq(pm_message_t state) 425void dpm_resume_noirq(pm_message_t state)
391{ 426{
392 struct device *dev; 427 struct device *dev;
428 ktime_t starttime = ktime_get();
393 429
394 mutex_lock(&dpm_list_mtx); 430 mutex_lock(&dpm_list_mtx);
395 transition_started = false; 431 transition_started = false;
@@ -403,11 +439,32 @@ void dpm_resume_noirq(pm_message_t state)
403 pm_dev_err(dev, state, " early", error); 439 pm_dev_err(dev, state, " early", error);
404 } 440 }
405 mutex_unlock(&dpm_list_mtx); 441 mutex_unlock(&dpm_list_mtx);
442 dpm_show_time(starttime, state, "early");
406 resume_device_irqs(); 443 resume_device_irqs();
407} 444}
408EXPORT_SYMBOL_GPL(dpm_resume_noirq); 445EXPORT_SYMBOL_GPL(dpm_resume_noirq);
409 446
410/** 447/**
448 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
449 * dev: Device to resume.
450 * cb: Resume callback to execute.
451 */
452static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
453{
454 int error;
455 ktime_t calltime;
456
457 calltime = initcall_debug_start(dev);
458
459 error = cb(dev);
460 suspend_report_result(cb, error);
461
462 initcall_debug_report(dev, calltime, error);
463
464 return error;
465}
466
467/**
411 * device_resume - Execute "resume" callbacks for given device. 468 * device_resume - Execute "resume" callbacks for given device.
412 * @dev: Device to handle. 469 * @dev: Device to handle.
413 * @state: PM transition of the system being carried out. 470 * @state: PM transition of the system being carried out.
@@ -427,7 +484,7 @@ static int device_resume(struct device *dev, pm_message_t state)
427 error = pm_op(dev, dev->bus->pm, state); 484 error = pm_op(dev, dev->bus->pm, state);
428 } else if (dev->bus->resume) { 485 } else if (dev->bus->resume) {
429 pm_dev_dbg(dev, state, "legacy "); 486 pm_dev_dbg(dev, state, "legacy ");
430 error = dev->bus->resume(dev); 487 error = legacy_resume(dev, dev->bus->resume);
431 } 488 }
432 if (error) 489 if (error)
433 goto End; 490 goto End;
@@ -448,7 +505,7 @@ static int device_resume(struct device *dev, pm_message_t state)
448 error = pm_op(dev, dev->class->pm, state); 505 error = pm_op(dev, dev->class->pm, state);
449 } else if (dev->class->resume) { 506 } else if (dev->class->resume) {
450 pm_dev_dbg(dev, state, "legacy class "); 507 pm_dev_dbg(dev, state, "legacy class ");
451 error = dev->class->resume(dev); 508 error = legacy_resume(dev, dev->class->resume);
452 } 509 }
453 } 510 }
454 End: 511 End:
@@ -468,6 +525,7 @@ static int device_resume(struct device *dev, pm_message_t state)
468static void dpm_resume(pm_message_t state) 525static void dpm_resume(pm_message_t state)
469{ 526{
470 struct list_head list; 527 struct list_head list;
528 ktime_t starttime = ktime_get();
471 529
472 INIT_LIST_HEAD(&list); 530 INIT_LIST_HEAD(&list);
473 mutex_lock(&dpm_list_mtx); 531 mutex_lock(&dpm_list_mtx);
@@ -496,6 +554,7 @@ static void dpm_resume(pm_message_t state)
496 } 554 }
497 list_splice(&list, &dpm_list); 555 list_splice(&list, &dpm_list);
498 mutex_unlock(&dpm_list_mtx); 556 mutex_unlock(&dpm_list_mtx);
557 dpm_show_time(starttime, state, NULL);
499} 558}
500 559
501/** 560/**
@@ -548,7 +607,7 @@ static void dpm_complete(pm_message_t state)
548 mutex_unlock(&dpm_list_mtx); 607 mutex_unlock(&dpm_list_mtx);
549 608
550 device_complete(dev, state); 609 device_complete(dev, state);
551 pm_runtime_put_noidle(dev); 610 pm_runtime_put_sync(dev);
552 611
553 mutex_lock(&dpm_list_mtx); 612 mutex_lock(&dpm_list_mtx);
554 } 613 }
@@ -628,6 +687,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
628int dpm_suspend_noirq(pm_message_t state) 687int dpm_suspend_noirq(pm_message_t state)
629{ 688{
630 struct device *dev; 689 struct device *dev;
690 ktime_t starttime = ktime_get();
631 int error = 0; 691 int error = 0;
632 692
633 suspend_device_irqs(); 693 suspend_device_irqs();
@@ -643,11 +703,34 @@ int dpm_suspend_noirq(pm_message_t state)
643 mutex_unlock(&dpm_list_mtx); 703 mutex_unlock(&dpm_list_mtx);
644 if (error) 704 if (error)
645 dpm_resume_noirq(resume_event(state)); 705 dpm_resume_noirq(resume_event(state));
706 else
707 dpm_show_time(starttime, state, "late");
646 return error; 708 return error;
647} 709}
648EXPORT_SYMBOL_GPL(dpm_suspend_noirq); 710EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
649 711
650/** 712/**
713 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
714 * dev: Device to suspend.
715 * cb: Suspend callback to execute.
716 */
717static int legacy_suspend(struct device *dev, pm_message_t state,
718 int (*cb)(struct device *dev, pm_message_t state))
719{
720 int error;
721 ktime_t calltime;
722
723 calltime = initcall_debug_start(dev);
724
725 error = cb(dev, state);
726 suspend_report_result(cb, error);
727
728 initcall_debug_report(dev, calltime, error);
729
730 return error;
731}
732
733/**
651 * device_suspend - Execute "suspend" callbacks for given device. 734 * device_suspend - Execute "suspend" callbacks for given device.
652 * @dev: Device to handle. 735 * @dev: Device to handle.
653 * @state: PM transition of the system being carried out. 736 * @state: PM transition of the system being carried out.
@@ -664,8 +747,7 @@ static int device_suspend(struct device *dev, pm_message_t state)
664 error = pm_op(dev, dev->class->pm, state); 747 error = pm_op(dev, dev->class->pm, state);
665 } else if (dev->class->suspend) { 748 } else if (dev->class->suspend) {
666 pm_dev_dbg(dev, state, "legacy class "); 749 pm_dev_dbg(dev, state, "legacy class ");
667 error = dev->class->suspend(dev, state); 750 error = legacy_suspend(dev, state, dev->class->suspend);
668 suspend_report_result(dev->class->suspend, error);
669 } 751 }
670 if (error) 752 if (error)
671 goto End; 753 goto End;
@@ -686,8 +768,7 @@ static int device_suspend(struct device *dev, pm_message_t state)
686 error = pm_op(dev, dev->bus->pm, state); 768 error = pm_op(dev, dev->bus->pm, state);
687 } else if (dev->bus->suspend) { 769 } else if (dev->bus->suspend) {
688 pm_dev_dbg(dev, state, "legacy "); 770 pm_dev_dbg(dev, state, "legacy ");
689 error = dev->bus->suspend(dev, state); 771 error = legacy_suspend(dev, state, dev->bus->suspend);
690 suspend_report_result(dev->bus->suspend, error);
691 } 772 }
692 } 773 }
693 End: 774 End:
@@ -703,6 +784,7 @@ static int device_suspend(struct device *dev, pm_message_t state)
703static int dpm_suspend(pm_message_t state) 784static int dpm_suspend(pm_message_t state)
704{ 785{
705 struct list_head list; 786 struct list_head list;
787 ktime_t starttime = ktime_get();
706 int error = 0; 788 int error = 0;
707 789
708 INIT_LIST_HEAD(&list); 790 INIT_LIST_HEAD(&list);
@@ -728,6 +810,8 @@ static int dpm_suspend(pm_message_t state)
728 } 810 }
729 list_splice(&list, dpm_list.prev); 811 list_splice(&list, dpm_list.prev);
730 mutex_unlock(&dpm_list_mtx); 812 mutex_unlock(&dpm_list_mtx);
813 if (!error)
814 dpm_show_time(starttime, state, NULL);
731 return error; 815 return error;
732} 816}
733 817
@@ -796,7 +880,7 @@ static int dpm_prepare(pm_message_t state)
796 pm_runtime_get_noresume(dev); 880 pm_runtime_get_noresume(dev);
797 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { 881 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
798 /* Wake-up requested during system sleep transition. */ 882 /* Wake-up requested during system sleep transition. */
799 pm_runtime_put_noidle(dev); 883 pm_runtime_put_sync(dev);
800 error = -EBUSY; 884 error = -EBUSY;
801 } else { 885 } else {
802 error = device_prepare(dev, state); 886 error = device_prepare(dev, state);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 40d7720a4b21..f8b044e8aef7 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -85,6 +85,19 @@ static int __pm_runtime_idle(struct device *dev)
85 dev->bus->pm->runtime_idle(dev); 85 dev->bus->pm->runtime_idle(dev);
86 86
87 spin_lock_irq(&dev->power.lock); 87 spin_lock_irq(&dev->power.lock);
88 } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) {
89 spin_unlock_irq(&dev->power.lock);
90
91 dev->type->pm->runtime_idle(dev);
92
93 spin_lock_irq(&dev->power.lock);
94 } else if (dev->class && dev->class->pm
95 && dev->class->pm->runtime_idle) {
96 spin_unlock_irq(&dev->power.lock);
97
98 dev->class->pm->runtime_idle(dev);
99
100 spin_lock_irq(&dev->power.lock);
88 } 101 }
89 102
90 dev->power.idle_notification = false; 103 dev->power.idle_notification = false;
@@ -194,6 +207,22 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
194 207
195 spin_lock_irq(&dev->power.lock); 208 spin_lock_irq(&dev->power.lock);
196 dev->power.runtime_error = retval; 209 dev->power.runtime_error = retval;
210 } else if (dev->type && dev->type->pm
211 && dev->type->pm->runtime_suspend) {
212 spin_unlock_irq(&dev->power.lock);
213
214 retval = dev->type->pm->runtime_suspend(dev);
215
216 spin_lock_irq(&dev->power.lock);
217 dev->power.runtime_error = retval;
218 } else if (dev->class && dev->class->pm
219 && dev->class->pm->runtime_suspend) {
220 spin_unlock_irq(&dev->power.lock);
221
222 retval = dev->class->pm->runtime_suspend(dev);
223
224 spin_lock_irq(&dev->power.lock);
225 dev->power.runtime_error = retval;
197 } else { 226 } else {
198 retval = -ENOSYS; 227 retval = -ENOSYS;
199 } 228 }
@@ -359,6 +388,22 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
359 388
360 spin_lock_irq(&dev->power.lock); 389 spin_lock_irq(&dev->power.lock);
361 dev->power.runtime_error = retval; 390 dev->power.runtime_error = retval;
391 } else if (dev->type && dev->type->pm
392 && dev->type->pm->runtime_resume) {
393 spin_unlock_irq(&dev->power.lock);
394
395 retval = dev->type->pm->runtime_resume(dev);
396
397 spin_lock_irq(&dev->power.lock);
398 dev->power.runtime_error = retval;
399 } else if (dev->class && dev->class->pm
400 && dev->class->pm->runtime_resume) {
401 spin_unlock_irq(&dev->power.lock);
402
403 retval = dev->class->pm->runtime_resume(dev);
404
405 spin_lock_irq(&dev->power.lock);
406 dev->power.runtime_error = retval;
362 } else { 407 } else {
363 retval = -ENOSYS; 408 retval = -ENOSYS;
364 } 409 }
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 4d2905996751..a699f09ddf7c 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -307,6 +307,7 @@ static void btusb_bulk_complete(struct urb *urb)
307 return; 307 return;
308 308
309 usb_anchor_urb(urb, &data->bulk_anchor); 309 usb_anchor_urb(urb, &data->bulk_anchor);
310 usb_mark_last_busy(data->udev);
310 311
311 err = usb_submit_urb(urb, GFP_ATOMIC); 312 err = usb_submit_urb(urb, GFP_ATOMIC);
312 if (err < 0) { 313 if (err < 0) {
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index d3400b20444f..7d73cd430340 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -358,7 +358,7 @@ struct port {
358 u8 update_flow_control; 358 u8 update_flow_control;
359 struct ctrl_ul ctrl_ul; 359 struct ctrl_ul ctrl_ul;
360 struct ctrl_dl ctrl_dl; 360 struct ctrl_dl ctrl_dl;
361 struct kfifo *fifo_ul; 361 struct kfifo fifo_ul;
362 void __iomem *dl_addr[2]; 362 void __iomem *dl_addr[2];
363 u32 dl_size[2]; 363 u32 dl_size[2];
364 u8 toggle_dl; 364 u8 toggle_dl;
@@ -685,8 +685,6 @@ static int nozomi_read_config_table(struct nozomi *dc)
685 dump_table(dc); 685 dump_table(dc);
686 686
687 for (i = PORT_MDM; i < MAX_PORT; i++) { 687 for (i = PORT_MDM; i < MAX_PORT; i++) {
688 dc->port[i].fifo_ul =
689 kfifo_alloc(FIFO_BUFFER_SIZE_UL, GFP_ATOMIC, NULL);
690 memset(&dc->port[i].ctrl_dl, 0, sizeof(struct ctrl_dl)); 688 memset(&dc->port[i].ctrl_dl, 0, sizeof(struct ctrl_dl));
691 memset(&dc->port[i].ctrl_ul, 0, sizeof(struct ctrl_ul)); 689 memset(&dc->port[i].ctrl_ul, 0, sizeof(struct ctrl_ul));
692 } 690 }
@@ -798,7 +796,7 @@ static int send_data(enum port_type index, struct nozomi *dc)
798 struct tty_struct *tty = tty_port_tty_get(&port->port); 796 struct tty_struct *tty = tty_port_tty_get(&port->port);
799 797
800 /* Get data from tty and place in buf for now */ 798 /* Get data from tty and place in buf for now */
801 size = __kfifo_get(port->fifo_ul, dc->send_buf, 799 size = kfifo_out(&port->fifo_ul, dc->send_buf,
802 ul_size < SEND_BUF_MAX ? ul_size : SEND_BUF_MAX); 800 ul_size < SEND_BUF_MAX ? ul_size : SEND_BUF_MAX);
803 801
804 if (size == 0) { 802 if (size == 0) {
@@ -988,11 +986,11 @@ static int receive_flow_control(struct nozomi *dc)
988 986
989 } else if (old_ctrl.CTS == 0 && ctrl_dl.CTS == 1) { 987 } else if (old_ctrl.CTS == 0 && ctrl_dl.CTS == 1) {
990 988
991 if (__kfifo_len(dc->port[port].fifo_ul)) { 989 if (kfifo_len(&dc->port[port].fifo_ul)) {
992 DBG1("Enable interrupt (0x%04X) on port: %d", 990 DBG1("Enable interrupt (0x%04X) on port: %d",
993 enable_ier, port); 991 enable_ier, port);
994 DBG1("Data in buffer [%d], enable transmit! ", 992 DBG1("Data in buffer [%d], enable transmit! ",
995 __kfifo_len(dc->port[port].fifo_ul)); 993 kfifo_len(&dc->port[port].fifo_ul));
996 enable_transmit_ul(port, dc); 994 enable_transmit_ul(port, dc);
997 } else { 995 } else {
998 DBG1("No data in buffer..."); 996 DBG1("No data in buffer...");
@@ -1433,6 +1431,16 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev,
1433 goto err_free_sbuf; 1431 goto err_free_sbuf;
1434 } 1432 }
1435 1433
1434 for (i = PORT_MDM; i < MAX_PORT; i++) {
1435 if (kfifo_alloc(&dc->port[i].fifo_ul,
1436 FIFO_BUFFER_SIZE_UL, GFP_ATOMIC)) {
1437 dev_err(&pdev->dev,
1438 "Could not allocate kfifo buffer\n");
1439 ret = -ENOMEM;
1440 goto err_free_kfifo;
1441 }
1442 }
1443
1436 spin_lock_init(&dc->spin_mutex); 1444 spin_lock_init(&dc->spin_mutex);
1437 1445
1438 nozomi_setup_private_data(dc); 1446 nozomi_setup_private_data(dc);
@@ -1445,7 +1453,7 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev,
1445 NOZOMI_NAME, dc); 1453 NOZOMI_NAME, dc);
1446 if (unlikely(ret)) { 1454 if (unlikely(ret)) {
1447 dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq); 1455 dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq);
1448 goto err_free_sbuf; 1456 goto err_free_kfifo;
1449 } 1457 }
1450 1458
1451 DBG1("base_addr: %p", dc->base_addr); 1459 DBG1("base_addr: %p", dc->base_addr);
@@ -1464,13 +1472,28 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev,
1464 dc->state = NOZOMI_STATE_ENABLED; 1472 dc->state = NOZOMI_STATE_ENABLED;
1465 1473
1466 for (i = 0; i < MAX_PORT; i++) { 1474 for (i = 0; i < MAX_PORT; i++) {
1475 struct device *tty_dev;
1476
1467 mutex_init(&dc->port[i].tty_sem); 1477 mutex_init(&dc->port[i].tty_sem);
1468 tty_port_init(&dc->port[i].port); 1478 tty_port_init(&dc->port[i].port);
1469 tty_register_device(ntty_driver, dc->index_start + i, 1479 tty_dev = tty_register_device(ntty_driver, dc->index_start + i,
1470 &pdev->dev); 1480 &pdev->dev);
1481
1482 if (IS_ERR(tty_dev)) {
1483 ret = PTR_ERR(tty_dev);
1484 dev_err(&pdev->dev, "Could not allocate tty?\n");
1485 goto err_free_tty;
1486 }
1471 } 1487 }
1488
1472 return 0; 1489 return 0;
1473 1490
1491err_free_tty:
1492 for (i = dc->index_start; i < dc->index_start + MAX_PORT; ++i)
1493 tty_unregister_device(ntty_driver, i);
1494err_free_kfifo:
1495 for (i = 0; i < MAX_PORT; i++)
1496 kfifo_free(&dc->port[i].fifo_ul);
1474err_free_sbuf: 1497err_free_sbuf:
1475 kfree(dc->send_buf); 1498 kfree(dc->send_buf);
1476 iounmap(dc->base_addr); 1499 iounmap(dc->base_addr);
@@ -1536,8 +1559,7 @@ static void __devexit nozomi_card_exit(struct pci_dev *pdev)
1536 free_irq(pdev->irq, dc); 1559 free_irq(pdev->irq, dc);
1537 1560
1538 for (i = 0; i < MAX_PORT; i++) 1561 for (i = 0; i < MAX_PORT; i++)
1539 if (dc->port[i].fifo_ul) 1562 kfifo_free(&dc->port[i].fifo_ul);
1540 kfifo_free(dc->port[i].fifo_ul);
1541 1563
1542 kfree(dc->send_buf); 1564 kfree(dc->send_buf);
1543 1565
@@ -1673,7 +1695,7 @@ static int ntty_write(struct tty_struct *tty, const unsigned char *buffer,
1673 goto exit; 1695 goto exit;
1674 } 1696 }
1675 1697
1676 rval = __kfifo_put(port->fifo_ul, (unsigned char *)buffer, count); 1698 rval = kfifo_in(&port->fifo_ul, (unsigned char *)buffer, count);
1677 1699
1678 /* notify card */ 1700 /* notify card */
1679 if (unlikely(dc == NULL)) { 1701 if (unlikely(dc == NULL)) {
@@ -1721,7 +1743,7 @@ static int ntty_write_room(struct tty_struct *tty)
1721 if (!port->port.count) 1743 if (!port->port.count)
1722 goto exit; 1744 goto exit;
1723 1745
1724 room = port->fifo_ul->size - __kfifo_len(port->fifo_ul); 1746 room = port->fifo_ul.size - kfifo_len(&port->fifo_ul);
1725 1747
1726exit: 1748exit:
1727 mutex_unlock(&port->tty_sem); 1749 mutex_unlock(&port->tty_sem);
@@ -1878,7 +1900,7 @@ static s32 ntty_chars_in_buffer(struct tty_struct *tty)
1878 goto exit_in_buffer; 1900 goto exit_in_buffer;
1879 } 1901 }
1880 1902
1881 rval = __kfifo_len(port->fifo_ul); 1903 rval = kfifo_len(&port->fifo_ul);
1882 1904
1883exit_in_buffer: 1905exit_in_buffer:
1884 return rval; 1906 return rval;
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 8c262aaf7c26..0798754a607c 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -487,7 +487,7 @@ static struct sonypi_device {
487 int camera_power; 487 int camera_power;
488 int bluetooth_power; 488 int bluetooth_power;
489 struct mutex lock; 489 struct mutex lock;
490 struct kfifo *fifo; 490 struct kfifo fifo;
491 spinlock_t fifo_lock; 491 spinlock_t fifo_lock;
492 wait_queue_head_t fifo_proc_list; 492 wait_queue_head_t fifo_proc_list;
493 struct fasync_struct *fifo_async; 493 struct fasync_struct *fifo_async;
@@ -496,7 +496,7 @@ static struct sonypi_device {
496 struct input_dev *input_jog_dev; 496 struct input_dev *input_jog_dev;
497 struct input_dev *input_key_dev; 497 struct input_dev *input_key_dev;
498 struct work_struct input_work; 498 struct work_struct input_work;
499 struct kfifo *input_fifo; 499 struct kfifo input_fifo;
500 spinlock_t input_fifo_lock; 500 spinlock_t input_fifo_lock;
501} sonypi_device; 501} sonypi_device;
502 502
@@ -777,8 +777,9 @@ static void input_keyrelease(struct work_struct *work)
777{ 777{
778 struct sonypi_keypress kp; 778 struct sonypi_keypress kp;
779 779
780 while (kfifo_get(sonypi_device.input_fifo, (unsigned char *)&kp, 780 while (kfifo_out_locked(&sonypi_device.input_fifo, (unsigned char *)&kp,
781 sizeof(kp)) == sizeof(kp)) { 781 sizeof(kp), &sonypi_device.input_fifo_lock)
782 == sizeof(kp)) {
782 msleep(10); 783 msleep(10);
783 input_report_key(kp.dev, kp.key, 0); 784 input_report_key(kp.dev, kp.key, 0);
784 input_sync(kp.dev); 785 input_sync(kp.dev);
@@ -827,8 +828,9 @@ static void sonypi_report_input_event(u8 event)
827 if (kp.dev) { 828 if (kp.dev) {
828 input_report_key(kp.dev, kp.key, 1); 829 input_report_key(kp.dev, kp.key, 1);
829 input_sync(kp.dev); 830 input_sync(kp.dev);
830 kfifo_put(sonypi_device.input_fifo, 831 kfifo_in_locked(&sonypi_device.input_fifo,
831 (unsigned char *)&kp, sizeof(kp)); 832 (unsigned char *)&kp, sizeof(kp),
833 &sonypi_device.input_fifo_lock);
832 schedule_work(&sonypi_device.input_work); 834 schedule_work(&sonypi_device.input_work);
833 } 835 }
834} 836}
@@ -880,7 +882,8 @@ found:
880 acpi_bus_generate_proc_event(sonypi_acpi_device, 1, event); 882 acpi_bus_generate_proc_event(sonypi_acpi_device, 1, event);
881#endif 883#endif
882 884
883 kfifo_put(sonypi_device.fifo, (unsigned char *)&event, sizeof(event)); 885 kfifo_in_locked(&sonypi_device.fifo, (unsigned char *)&event,
886 sizeof(event), &sonypi_device.fifo_lock);
884 kill_fasync(&sonypi_device.fifo_async, SIGIO, POLL_IN); 887 kill_fasync(&sonypi_device.fifo_async, SIGIO, POLL_IN);
885 wake_up_interruptible(&sonypi_device.fifo_proc_list); 888 wake_up_interruptible(&sonypi_device.fifo_proc_list);
886 889
@@ -906,7 +909,7 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
906 mutex_lock(&sonypi_device.lock); 909 mutex_lock(&sonypi_device.lock);
907 /* Flush input queue on first open */ 910 /* Flush input queue on first open */
908 if (!sonypi_device.open_count) 911 if (!sonypi_device.open_count)
909 kfifo_reset(sonypi_device.fifo); 912 kfifo_reset(&sonypi_device.fifo);
910 sonypi_device.open_count++; 913 sonypi_device.open_count++;
911 mutex_unlock(&sonypi_device.lock); 914 mutex_unlock(&sonypi_device.lock);
912 unlock_kernel(); 915 unlock_kernel();
@@ -919,17 +922,18 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
919 ssize_t ret; 922 ssize_t ret;
920 unsigned char c; 923 unsigned char c;
921 924
922 if ((kfifo_len(sonypi_device.fifo) == 0) && 925 if ((kfifo_len(&sonypi_device.fifo) == 0) &&
923 (file->f_flags & O_NONBLOCK)) 926 (file->f_flags & O_NONBLOCK))
924 return -EAGAIN; 927 return -EAGAIN;
925 928
926 ret = wait_event_interruptible(sonypi_device.fifo_proc_list, 929 ret = wait_event_interruptible(sonypi_device.fifo_proc_list,
927 kfifo_len(sonypi_device.fifo) != 0); 930 kfifo_len(&sonypi_device.fifo) != 0);
928 if (ret) 931 if (ret)
929 return ret; 932 return ret;
930 933
931 while (ret < count && 934 while (ret < count &&
932 (kfifo_get(sonypi_device.fifo, &c, sizeof(c)) == sizeof(c))) { 935 (kfifo_out_locked(&sonypi_device.fifo, &c, sizeof(c),
936 &sonypi_device.fifo_lock) == sizeof(c))) {
933 if (put_user(c, buf++)) 937 if (put_user(c, buf++))
934 return -EFAULT; 938 return -EFAULT;
935 ret++; 939 ret++;
@@ -946,7 +950,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
946static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait) 950static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
947{ 951{
948 poll_wait(file, &sonypi_device.fifo_proc_list, wait); 952 poll_wait(file, &sonypi_device.fifo_proc_list, wait);
949 if (kfifo_len(sonypi_device.fifo)) 953 if (kfifo_len(&sonypi_device.fifo))
950 return POLLIN | POLLRDNORM; 954 return POLLIN | POLLRDNORM;
951 return 0; 955 return 0;
952} 956}
@@ -1313,11 +1317,10 @@ static int __devinit sonypi_probe(struct platform_device *dev)
1313 "http://www.linux.it/~malattia/wiki/index.php/Sony_drivers\n"); 1317 "http://www.linux.it/~malattia/wiki/index.php/Sony_drivers\n");
1314 1318
1315 spin_lock_init(&sonypi_device.fifo_lock); 1319 spin_lock_init(&sonypi_device.fifo_lock);
1316 sonypi_device.fifo = kfifo_alloc(SONYPI_BUF_SIZE, GFP_KERNEL, 1320 error = kfifo_alloc(&sonypi_device.fifo, SONYPI_BUF_SIZE, GFP_KERNEL);
1317 &sonypi_device.fifo_lock); 1321 if (error) {
1318 if (IS_ERR(sonypi_device.fifo)) {
1319 printk(KERN_ERR "sonypi: kfifo_alloc failed\n"); 1322 printk(KERN_ERR "sonypi: kfifo_alloc failed\n");
1320 return PTR_ERR(sonypi_device.fifo); 1323 return error;
1321 } 1324 }
1322 1325
1323 init_waitqueue_head(&sonypi_device.fifo_proc_list); 1326 init_waitqueue_head(&sonypi_device.fifo_proc_list);
@@ -1393,12 +1396,10 @@ static int __devinit sonypi_probe(struct platform_device *dev)
1393 } 1396 }
1394 1397
1395 spin_lock_init(&sonypi_device.input_fifo_lock); 1398 spin_lock_init(&sonypi_device.input_fifo_lock);
1396 sonypi_device.input_fifo = 1399 error = kfifo_alloc(&sonypi_device.input_fifo, SONYPI_BUF_SIZE,
1397 kfifo_alloc(SONYPI_BUF_SIZE, GFP_KERNEL, 1400 GFP_KERNEL);
1398 &sonypi_device.input_fifo_lock); 1401 if (error) {
1399 if (IS_ERR(sonypi_device.input_fifo)) {
1400 printk(KERN_ERR "sonypi: kfifo_alloc failed\n"); 1402 printk(KERN_ERR "sonypi: kfifo_alloc failed\n");
1401 error = PTR_ERR(sonypi_device.input_fifo);
1402 goto err_inpdev_unregister; 1403 goto err_inpdev_unregister;
1403 } 1404 }
1404 1405
@@ -1423,7 +1424,7 @@ static int __devinit sonypi_probe(struct platform_device *dev)
1423 pci_disable_device(pcidev); 1424 pci_disable_device(pcidev);
1424 err_put_pcidev: 1425 err_put_pcidev:
1425 pci_dev_put(pcidev); 1426 pci_dev_put(pcidev);
1426 kfifo_free(sonypi_device.fifo); 1427 kfifo_free(&sonypi_device.fifo);
1427 1428
1428 return error; 1429 return error;
1429} 1430}
@@ -1438,7 +1439,7 @@ static int __devexit sonypi_remove(struct platform_device *dev)
1438 if (useinput) { 1439 if (useinput) {
1439 input_unregister_device(sonypi_device.input_key_dev); 1440 input_unregister_device(sonypi_device.input_key_dev);
1440 input_unregister_device(sonypi_device.input_jog_dev); 1441 input_unregister_device(sonypi_device.input_jog_dev);
1441 kfifo_free(sonypi_device.input_fifo); 1442 kfifo_free(&sonypi_device.input_fifo);
1442 } 1443 }
1443 1444
1444 misc_deregister(&sonypi_misc_device); 1445 misc_deregister(&sonypi_misc_device);
@@ -1451,7 +1452,7 @@ static int __devexit sonypi_remove(struct platform_device *dev)
1451 pci_dev_put(sonypi_device.dev); 1452 pci_dev_put(sonypi_device.dev);
1452 } 1453 }
1453 1454
1454 kfifo_free(sonypi_device.fifo); 1455 kfifo_free(&sonypi_device.fifo);
1455 1456
1456 return 0; 1457 return 0;
1457} 1458}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index ff2f1042cb44..766c46875a20 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -434,11 +434,11 @@ static int drm_version(struct drm_device *dev, void *data,
434 * Looks up the ioctl function in the ::ioctls table, checking for root 434 * Looks up the ioctl function in the ::ioctls table, checking for root
435 * previleges if so required, and dispatches to the respective function. 435 * previleges if so required, and dispatches to the respective function.
436 */ 436 */
437int drm_ioctl(struct inode *inode, struct file *filp, 437long drm_ioctl(struct file *filp,
438 unsigned int cmd, unsigned long arg) 438 unsigned int cmd, unsigned long arg)
439{ 439{
440 struct drm_file *file_priv = filp->private_data; 440 struct drm_file *file_priv = filp->private_data;
441 struct drm_device *dev = file_priv->minor->dev; 441 struct drm_device *dev;
442 struct drm_ioctl_desc *ioctl; 442 struct drm_ioctl_desc *ioctl;
443 drm_ioctl_t *func; 443 drm_ioctl_t *func;
444 unsigned int nr = DRM_IOCTL_NR(cmd); 444 unsigned int nr = DRM_IOCTL_NR(cmd);
@@ -446,6 +446,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
446 char stack_kdata[128]; 446 char stack_kdata[128];
447 char *kdata = NULL; 447 char *kdata = NULL;
448 448
449 dev = file_priv->minor->dev;
449 atomic_inc(&dev->ioctl_count); 450 atomic_inc(&dev->ioctl_count);
450 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); 451 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
451 ++file_priv->ioctl_count; 452 ++file_priv->ioctl_count;
@@ -501,7 +502,13 @@ int drm_ioctl(struct inode *inode, struct file *filp,
501 goto err_i1; 502 goto err_i1;
502 } 503 }
503 } 504 }
504 retcode = func(dev, kdata, file_priv); 505 if (ioctl->flags & DRM_UNLOCKED)
506 retcode = func(dev, kdata, file_priv);
507 else {
508 lock_kernel();
509 retcode = func(dev, kdata, file_priv);
510 unlock_kernel();
511 }
505 512
506 if (cmd & IOC_OUT) { 513 if (cmd & IOC_OUT) {
507 if (copy_to_user((void __user *)arg, kdata, 514 if (copy_to_user((void __user *)arg, kdata,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c39b26f1abed..5c9f79877cbf 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -913,7 +913,7 @@ static int drm_cvt_modes(struct drm_connector *connector,
913 const int rates[] = { 60, 85, 75, 60, 50 }; 913 const int rates[] = { 60, 85, 75, 60, 50 };
914 914
915 for (i = 0; i < 4; i++) { 915 for (i = 0; i < 4; i++) {
916 int width, height; 916 int uninitialized_var(width), height;
917 cvt = &(timing->data.other_data.data.cvt[i]); 917 cvt = &(timing->data.other_data.data.cvt[i]);
918 918
919 height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2; 919 height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 282d9fdf9f4e..d61d185cf040 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -104,7 +104,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
104 &version->desc)) 104 &version->desc))
105 return -EFAULT; 105 return -EFAULT;
106 106
107 err = drm_ioctl(file->f_path.dentry->d_inode, file, 107 err = drm_ioctl(file,
108 DRM_IOCTL_VERSION, (unsigned long)version); 108 DRM_IOCTL_VERSION, (unsigned long)version);
109 if (err) 109 if (err)
110 return err; 110 return err;
@@ -145,8 +145,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
145 &u->unique)) 145 &u->unique))
146 return -EFAULT; 146 return -EFAULT;
147 147
148 err = drm_ioctl(file->f_path.dentry->d_inode, file, 148 err = drm_ioctl(file, DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
149 DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
150 if (err) 149 if (err)
151 return err; 150 return err;
152 151
@@ -174,8 +173,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
174 &u->unique)) 173 &u->unique))
175 return -EFAULT; 174 return -EFAULT;
176 175
177 return drm_ioctl(file->f_path.dentry->d_inode, file, 176 return drm_ioctl(file, DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
178 DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
179} 177}
180 178
181typedef struct drm_map32 { 179typedef struct drm_map32 {
@@ -205,8 +203,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
205 if (__put_user(idx, &map->offset)) 203 if (__put_user(idx, &map->offset))
206 return -EFAULT; 204 return -EFAULT;
207 205
208 err = drm_ioctl(file->f_path.dentry->d_inode, file, 206 err = drm_ioctl(file, DRM_IOCTL_GET_MAP, (unsigned long)map);
209 DRM_IOCTL_GET_MAP, (unsigned long)map);
210 if (err) 207 if (err)
211 return err; 208 return err;
212 209
@@ -246,8 +243,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
246 || __put_user(m32.flags, &map->flags)) 243 || __put_user(m32.flags, &map->flags))
247 return -EFAULT; 244 return -EFAULT;
248 245
249 err = drm_ioctl(file->f_path.dentry->d_inode, file, 246 err = drm_ioctl(file, DRM_IOCTL_ADD_MAP, (unsigned long)map);
250 DRM_IOCTL_ADD_MAP, (unsigned long)map);
251 if (err) 247 if (err)
252 return err; 248 return err;
253 249
@@ -284,8 +280,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd,
284 if (__put_user((void *)(unsigned long)handle, &map->handle)) 280 if (__put_user((void *)(unsigned long)handle, &map->handle))
285 return -EFAULT; 281 return -EFAULT;
286 282
287 return drm_ioctl(file->f_path.dentry->d_inode, file, 283 return drm_ioctl(file, DRM_IOCTL_RM_MAP, (unsigned long)map);
288 DRM_IOCTL_RM_MAP, (unsigned long)map);
289} 284}
290 285
291typedef struct drm_client32 { 286typedef struct drm_client32 {
@@ -314,8 +309,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
314 if (__put_user(idx, &client->idx)) 309 if (__put_user(idx, &client->idx))
315 return -EFAULT; 310 return -EFAULT;
316 311
317 err = drm_ioctl(file->f_path.dentry->d_inode, file, 312 err = drm_ioctl(file, DRM_IOCTL_GET_CLIENT, (unsigned long)client);
318 DRM_IOCTL_GET_CLIENT, (unsigned long)client);
319 if (err) 313 if (err)
320 return err; 314 return err;
321 315
@@ -351,8 +345,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
351 if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats))) 345 if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
352 return -EFAULT; 346 return -EFAULT;
353 347
354 err = drm_ioctl(file->f_path.dentry->d_inode, file, 348 err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats);
355 DRM_IOCTL_GET_STATS, (unsigned long)stats);
356 if (err) 349 if (err)
357 return err; 350 return err;
358 351
@@ -395,8 +388,7 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd,
395 || __put_user(agp_start, &buf->agp_start)) 388 || __put_user(agp_start, &buf->agp_start))
396 return -EFAULT; 389 return -EFAULT;
397 390
398 err = drm_ioctl(file->f_path.dentry->d_inode, file, 391 err = drm_ioctl(file, DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
399 DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
400 if (err) 392 if (err)
401 return err; 393 return err;
402 394
@@ -427,8 +419,7 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd,
427 || __put_user(b32.high_mark, &buf->high_mark)) 419 || __put_user(b32.high_mark, &buf->high_mark))
428 return -EFAULT; 420 return -EFAULT;
429 421
430 return drm_ioctl(file->f_path.dentry->d_inode, file, 422 return drm_ioctl(file, DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
431 DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
432} 423}
433 424
434typedef struct drm_buf_info32 { 425typedef struct drm_buf_info32 {
@@ -469,8 +460,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
469 || __put_user(list, &request->list)) 460 || __put_user(list, &request->list))
470 return -EFAULT; 461 return -EFAULT;
471 462
472 err = drm_ioctl(file->f_path.dentry->d_inode, file, 463 err = drm_ioctl(file, DRM_IOCTL_INFO_BUFS, (unsigned long)request);
473 DRM_IOCTL_INFO_BUFS, (unsigned long)request);
474 if (err) 464 if (err)
475 return err; 465 return err;
476 466
@@ -531,8 +521,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
531 || __put_user(list, &request->list)) 521 || __put_user(list, &request->list))
532 return -EFAULT; 522 return -EFAULT;
533 523
534 err = drm_ioctl(file->f_path.dentry->d_inode, file, 524 err = drm_ioctl(file, DRM_IOCTL_MAP_BUFS, (unsigned long)request);
535 DRM_IOCTL_MAP_BUFS, (unsigned long)request);
536 if (err) 525 if (err)
537 return err; 526 return err;
538 527
@@ -578,8 +567,7 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd,
578 &request->list)) 567 &request->list))
579 return -EFAULT; 568 return -EFAULT;
580 569
581 return drm_ioctl(file->f_path.dentry->d_inode, file, 570 return drm_ioctl(file, DRM_IOCTL_FREE_BUFS, (unsigned long)request);
582 DRM_IOCTL_FREE_BUFS, (unsigned long)request);
583} 571}
584 572
585typedef struct drm_ctx_priv_map32 { 573typedef struct drm_ctx_priv_map32 {
@@ -605,8 +593,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
605 &request->handle)) 593 &request->handle))
606 return -EFAULT; 594 return -EFAULT;
607 595
608 return drm_ioctl(file->f_path.dentry->d_inode, file, 596 return drm_ioctl(file, DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
609 DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
610} 597}
611 598
612static int compat_drm_getsareactx(struct file *file, unsigned int cmd, 599static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
@@ -628,8 +615,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
628 if (__put_user(ctx_id, &request->ctx_id)) 615 if (__put_user(ctx_id, &request->ctx_id))
629 return -EFAULT; 616 return -EFAULT;
630 617
631 err = drm_ioctl(file->f_path.dentry->d_inode, file, 618 err = drm_ioctl(file, DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
632 DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
633 if (err) 619 if (err)
634 return err; 620 return err;
635 621
@@ -664,8 +650,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd,
664 &res->contexts)) 650 &res->contexts))
665 return -EFAULT; 651 return -EFAULT;
666 652
667 err = drm_ioctl(file->f_path.dentry->d_inode, file, 653 err = drm_ioctl(file, DRM_IOCTL_RES_CTX, (unsigned long)res);
668 DRM_IOCTL_RES_CTX, (unsigned long)res);
669 if (err) 654 if (err)
670 return err; 655 return err;
671 656
@@ -718,8 +703,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
718 &d->request_sizes)) 703 &d->request_sizes))
719 return -EFAULT; 704 return -EFAULT;
720 705
721 err = drm_ioctl(file->f_path.dentry->d_inode, file, 706 err = drm_ioctl(file, DRM_IOCTL_DMA, (unsigned long)d);
722 DRM_IOCTL_DMA, (unsigned long)d);
723 if (err) 707 if (err)
724 return err; 708 return err;
725 709
@@ -751,8 +735,7 @@ static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
751 if (put_user(m32.mode, &mode->mode)) 735 if (put_user(m32.mode, &mode->mode))
752 return -EFAULT; 736 return -EFAULT;
753 737
754 return drm_ioctl(file->f_path.dentry->d_inode, file, 738 return drm_ioctl(file, DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
755 DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
756} 739}
757 740
758typedef struct drm_agp_info32 { 741typedef struct drm_agp_info32 {
@@ -781,8 +764,7 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd,
781 if (!access_ok(VERIFY_WRITE, info, sizeof(*info))) 764 if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
782 return -EFAULT; 765 return -EFAULT;
783 766
784 err = drm_ioctl(file->f_path.dentry->d_inode, file, 767 err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info);
785 DRM_IOCTL_AGP_INFO, (unsigned long)info);
786 if (err) 768 if (err)
787 return err; 769 return err;
788 770
@@ -827,16 +809,14 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
827 || __put_user(req32.type, &request->type)) 809 || __put_user(req32.type, &request->type))
828 return -EFAULT; 810 return -EFAULT;
829 811
830 err = drm_ioctl(file->f_path.dentry->d_inode, file, 812 err = drm_ioctl(file, DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
831 DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
832 if (err) 813 if (err)
833 return err; 814 return err;
834 815
835 if (__get_user(req32.handle, &request->handle) 816 if (__get_user(req32.handle, &request->handle)
836 || __get_user(req32.physical, &request->physical) 817 || __get_user(req32.physical, &request->physical)
837 || copy_to_user(argp, &req32, sizeof(req32))) { 818 || copy_to_user(argp, &req32, sizeof(req32))) {
838 drm_ioctl(file->f_path.dentry->d_inode, file, 819 drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
839 DRM_IOCTL_AGP_FREE, (unsigned long)request);
840 return -EFAULT; 820 return -EFAULT;
841 } 821 }
842 822
@@ -856,8 +836,7 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd,
856 || __put_user(handle, &request->handle)) 836 || __put_user(handle, &request->handle))
857 return -EFAULT; 837 return -EFAULT;
858 838
859 return drm_ioctl(file->f_path.dentry->d_inode, file, 839 return drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
860 DRM_IOCTL_AGP_FREE, (unsigned long)request);
861} 840}
862 841
863typedef struct drm_agp_binding32 { 842typedef struct drm_agp_binding32 {
@@ -881,8 +860,7 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
881 || __put_user(req32.offset, &request->offset)) 860 || __put_user(req32.offset, &request->offset))
882 return -EFAULT; 861 return -EFAULT;
883 862
884 return drm_ioctl(file->f_path.dentry->d_inode, file, 863 return drm_ioctl(file, DRM_IOCTL_AGP_BIND, (unsigned long)request);
885 DRM_IOCTL_AGP_BIND, (unsigned long)request);
886} 864}
887 865
888static int compat_drm_agp_unbind(struct file *file, unsigned int cmd, 866static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
@@ -898,8 +876,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
898 || __put_user(handle, &request->handle)) 876 || __put_user(handle, &request->handle))
899 return -EFAULT; 877 return -EFAULT;
900 878
901 return drm_ioctl(file->f_path.dentry->d_inode, file, 879 return drm_ioctl(file, DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
902 DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
903} 880}
904#endif /* __OS_HAS_AGP */ 881#endif /* __OS_HAS_AGP */
905 882
@@ -923,8 +900,7 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
923 || __put_user(x, &request->size)) 900 || __put_user(x, &request->size))
924 return -EFAULT; 901 return -EFAULT;
925 902
926 err = drm_ioctl(file->f_path.dentry->d_inode, file, 903 err = drm_ioctl(file, DRM_IOCTL_SG_ALLOC, (unsigned long)request);
927 DRM_IOCTL_SG_ALLOC, (unsigned long)request);
928 if (err) 904 if (err)
929 return err; 905 return err;
930 906
@@ -950,8 +926,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
950 || __put_user(x << PAGE_SHIFT, &request->handle)) 926 || __put_user(x << PAGE_SHIFT, &request->handle))
951 return -EFAULT; 927 return -EFAULT;
952 928
953 return drm_ioctl(file->f_path.dentry->d_inode, file, 929 return drm_ioctl(file, DRM_IOCTL_SG_FREE, (unsigned long)request);
954 DRM_IOCTL_SG_FREE, (unsigned long)request);
955} 930}
956 931
957#if defined(CONFIG_X86) || defined(CONFIG_IA64) 932#if defined(CONFIG_X86) || defined(CONFIG_IA64)
@@ -981,8 +956,7 @@ static int compat_drm_update_draw(struct file *file, unsigned int cmd,
981 __put_user(update32.data, &request->data)) 956 __put_user(update32.data, &request->data))
982 return -EFAULT; 957 return -EFAULT;
983 958
984 err = drm_ioctl(file->f_path.dentry->d_inode, file, 959 err = drm_ioctl(file, DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
985 DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
986 return err; 960 return err;
987} 961}
988#endif 962#endif
@@ -1023,8 +997,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
1023 || __put_user(req32.request.signal, &request->request.signal)) 997 || __put_user(req32.request.signal, &request->request.signal))
1024 return -EFAULT; 998 return -EFAULT;
1025 999
1026 err = drm_ioctl(file->f_path.dentry->d_inode, file, 1000 err = drm_ioctl(file, DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
1027 DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
1028 if (err) 1001 if (err)
1029 return err; 1002 return err;
1030 1003
@@ -1094,16 +1067,14 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1094 * than always failing. 1067 * than always failing.
1095 */ 1068 */
1096 if (nr >= ARRAY_SIZE(drm_compat_ioctls)) 1069 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
1097 return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); 1070 return drm_ioctl(filp, cmd, arg);
1098 1071
1099 fn = drm_compat_ioctls[nr]; 1072 fn = drm_compat_ioctls[nr];
1100 1073
1101 lock_kernel(); /* XXX for now */
1102 if (fn != NULL) 1074 if (fn != NULL)
1103 ret = (*fn) (filp, cmd, arg); 1075 ret = (*fn) (filp, cmd, arg);
1104 else 1076 else
1105 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); 1077 ret = drm_ioctl(filp, cmd, arg);
1106 unlock_kernel();
1107 1078
1108 return ret; 1079 return ret;
1109} 1080}
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index d7d7eac3ddd2..cdec32977129 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -358,7 +358,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
358 if (entry->size >= size + wasted) { 358 if (entry->size >= size + wasted) {
359 if (!best_match) 359 if (!best_match)
360 return entry; 360 return entry;
361 if (size < best_size) { 361 if (entry->size < best_size) {
362 best = entry; 362 best = entry;
363 best_size = entry->size; 363 best_size = entry->size;
364 } 364 }
@@ -408,7 +408,7 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
408 if (entry->size >= size + wasted) { 408 if (entry->size >= size + wasted) {
409 if (!best_match) 409 if (!best_match)
410 return entry; 410 return entry;
411 if (size < best_size) { 411 if (entry->size < best_size) {
412 best = entry; 412 best = entry;
413 best_size = entry->size; 413 best_size = entry->size;
414 } 414 }
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 9422a74c8b54..81681a07a806 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -408,6 +408,11 @@ static int ch7006_probe(struct i2c_client *client, const struct i2c_device_id *i
408 408
409 ch7006_info(client, "Detected version ID: %x\n", val); 409 ch7006_info(client, "Detected version ID: %x\n", val);
410 410
411 /* I don't know what this is for, but otherwise I get no
412 * signal.
413 */
414 ch7006_write(client, 0x3d, 0x0);
415
411 return 0; 416 return 0;
412 417
413fail: 418fail:
diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/i2c/ch7006_mode.c
index 87f5445092e8..e447dfb63890 100644
--- a/drivers/gpu/drm/i2c/ch7006_mode.c
+++ b/drivers/gpu/drm/i2c/ch7006_mode.c
@@ -427,11 +427,6 @@ void ch7006_state_load(struct i2c_client *client,
427 ch7006_load_reg(client, state, CH7006_SUBC_INC7); 427 ch7006_load_reg(client, state, CH7006_SUBC_INC7);
428 ch7006_load_reg(client, state, CH7006_PLL_CONTROL); 428 ch7006_load_reg(client, state, CH7006_PLL_CONTROL);
429 ch7006_load_reg(client, state, CH7006_CALC_SUBC_INC0); 429 ch7006_load_reg(client, state, CH7006_CALC_SUBC_INC0);
430
431 /* I don't know what this is for, but otherwise I get no
432 * signal.
433 */
434 ch7006_write(client, 0x3d, 0x0);
435} 430}
436 431
437void ch7006_state_save(struct i2c_client *client, 432void ch7006_state_save(struct i2c_client *client,
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 7d1d88cdf2dc..de32d22a8c39 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -115,7 +115,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
115static const struct file_operations i810_buffer_fops = { 115static const struct file_operations i810_buffer_fops = {
116 .open = drm_open, 116 .open = drm_open,
117 .release = drm_release, 117 .release = drm_release,
118 .ioctl = drm_ioctl, 118 .unlocked_ioctl = drm_ioctl,
119 .mmap = i810_mmap_buffers, 119 .mmap = i810_mmap_buffers,
120 .fasync = drm_fasync, 120 .fasync = drm_fasync,
121}; 121};
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index fabb9a817966..c1e02752e023 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -59,7 +59,7 @@ static struct drm_driver driver = {
59 .owner = THIS_MODULE, 59 .owner = THIS_MODULE,
60 .open = drm_open, 60 .open = drm_open,
61 .release = drm_release, 61 .release = drm_release,
62 .ioctl = drm_ioctl, 62 .unlocked_ioctl = drm_ioctl,
63 .mmap = drm_mmap, 63 .mmap = drm_mmap,
64 .poll = drm_poll, 64 .poll = drm_poll,
65 .fasync = drm_fasync, 65 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index 877bf6cb14a4..06bd732e6463 100644
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -117,7 +117,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
117static const struct file_operations i830_buffer_fops = { 117static const struct file_operations i830_buffer_fops = {
118 .open = drm_open, 118 .open = drm_open,
119 .release = drm_release, 119 .release = drm_release,
120 .ioctl = drm_ioctl, 120 .unlocked_ioctl = drm_ioctl,
121 .mmap = i830_mmap_buffers, 121 .mmap = i830_mmap_buffers,
122 .fasync = drm_fasync, 122 .fasync = drm_fasync,
123}; 123};
diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
index 389597e4a623..44f990bed8f4 100644
--- a/drivers/gpu/drm/i830/i830_drv.c
+++ b/drivers/gpu/drm/i830/i830_drv.c
@@ -70,7 +70,7 @@ static struct drm_driver driver = {
70 .owner = THIS_MODULE, 70 .owner = THIS_MODULE,
71 .open = drm_open, 71 .open = drm_open,
72 .release = drm_release, 72 .release = drm_release,
73 .ioctl = drm_ioctl, 73 .unlocked_ioctl = drm_ioctl,
74 .mmap = drm_mmap, 74 .mmap = drm_mmap,
75 .poll = drm_poll, 75 .poll = drm_poll,
76 .fasync = drm_fasync, 76 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2fa217862058..24286ca168fc 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -329,7 +329,7 @@ static struct drm_driver driver = {
329 .owner = THIS_MODULE, 329 .owner = THIS_MODULE,
330 .open = drm_open, 330 .open = drm_open,
331 .release = drm_release, 331 .release = drm_release,
332 .ioctl = drm_ioctl, 332 .unlocked_ioctl = drm_ioctl,
333 .mmap = drm_gem_mmap, 333 .mmap = drm_gem_mmap,
334 .poll = drm_poll, 334 .poll = drm_poll,
335 .fasync = drm_fasync, 335 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 1fe68a251b75..13b028994b2b 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -66,8 +66,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
66 &batchbuffer->cliprects)) 66 &batchbuffer->cliprects))
67 return -EFAULT; 67 return -EFAULT;
68 68
69 return drm_ioctl(file->f_path.dentry->d_inode, file, 69 return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER,
70 DRM_IOCTL_I915_BATCHBUFFER,
71 (unsigned long)batchbuffer); 70 (unsigned long)batchbuffer);
72} 71}
73 72
@@ -102,8 +101,8 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
102 &cmdbuffer->cliprects)) 101 &cmdbuffer->cliprects))
103 return -EFAULT; 102 return -EFAULT;
104 103
105 return drm_ioctl(file->f_path.dentry->d_inode, file, 104 return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER,
106 DRM_IOCTL_I915_CMDBUFFER, (unsigned long)cmdbuffer); 105 (unsigned long)cmdbuffer);
107} 106}
108 107
109typedef struct drm_i915_irq_emit32 { 108typedef struct drm_i915_irq_emit32 {
@@ -125,8 +124,8 @@ static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
125 &request->irq_seq)) 124 &request->irq_seq))
126 return -EFAULT; 125 return -EFAULT;
127 126
128 return drm_ioctl(file->f_path.dentry->d_inode, file, 127 return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT,
129 DRM_IOCTL_I915_IRQ_EMIT, (unsigned long)request); 128 (unsigned long)request);
130} 129}
131typedef struct drm_i915_getparam32 { 130typedef struct drm_i915_getparam32 {
132 int param; 131 int param;
@@ -149,8 +148,8 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
149 &request->value)) 148 &request->value))
150 return -EFAULT; 149 return -EFAULT;
151 150
152 return drm_ioctl(file->f_path.dentry->d_inode, file, 151 return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM,
153 DRM_IOCTL_I915_GETPARAM, (unsigned long)request); 152 (unsigned long)request);
154} 153}
155 154
156typedef struct drm_i915_mem_alloc32 { 155typedef struct drm_i915_mem_alloc32 {
@@ -178,8 +177,8 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
178 &request->region_offset)) 177 &request->region_offset))
179 return -EFAULT; 178 return -EFAULT;
180 179
181 return drm_ioctl(file->f_path.dentry->d_inode, file, 180 return drm_ioctl(file, DRM_IOCTL_I915_ALLOC,
182 DRM_IOCTL_I915_ALLOC, (unsigned long)request); 181 (unsigned long)request);
183} 182}
184 183
185drm_ioctl_compat_t *i915_compat_ioctls[] = { 184drm_ioctl_compat_t *i915_compat_ioctls[] = {
@@ -211,12 +210,10 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
211 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) 210 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
212 fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE]; 211 fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
213 212
214 lock_kernel(); /* XXX for now */
215 if (fn != NULL) 213 if (fn != NULL)
216 ret = (*fn) (filp, cmd, arg); 214 ret = (*fn) (filp, cmd, arg);
217 else 215 else
218 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); 216 ret = drm_ioctl(filp, cmd, arg);
219 unlock_kernel();
220 217
221 return ret; 218 return ret;
222} 219}
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 97ee566ef749..ddfe16197b59 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -68,7 +68,7 @@ static struct drm_driver driver = {
68 .owner = THIS_MODULE, 68 .owner = THIS_MODULE,
69 .open = drm_open, 69 .open = drm_open,
70 .release = drm_release, 70 .release = drm_release,
71 .ioctl = drm_ioctl, 71 .unlocked_ioctl = drm_ioctl,
72 .mmap = drm_mmap, 72 .mmap = drm_mmap,
73 .poll = drm_poll, 73 .poll = drm_poll,
74 .fasync = drm_fasync, 74 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 30d00478ddee..c1f877b7bac1 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -100,8 +100,7 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
100 if (err) 100 if (err)
101 return -EFAULT; 101 return -EFAULT;
102 102
103 return drm_ioctl(file->f_path.dentry->d_inode, file, 103 return drm_ioctl(file, DRM_IOCTL_MGA_INIT, (unsigned long)init);
104 DRM_IOCTL_MGA_INIT, (unsigned long)init);
105} 104}
106 105
107typedef struct drm_mga_getparam32 { 106typedef struct drm_mga_getparam32 {
@@ -125,8 +124,7 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,
125 &getparam->value)) 124 &getparam->value))
126 return -EFAULT; 125 return -EFAULT;
127 126
128 return drm_ioctl(file->f_path.dentry->d_inode, file, 127 return drm_ioctl(file, DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
129 DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
130} 128}
131 129
132typedef struct drm_mga_drm_bootstrap32 { 130typedef struct drm_mga_drm_bootstrap32 {
@@ -166,8 +164,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
166 || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size)) 164 || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
167 return -EFAULT; 165 return -EFAULT;
168 166
169 err = drm_ioctl(file->f_path.dentry->d_inode, file, 167 err = drm_ioctl(file, DRM_IOCTL_MGA_DMA_BOOTSTRAP,
170 DRM_IOCTL_MGA_DMA_BOOTSTRAP,
171 (unsigned long)dma_bootstrap); 168 (unsigned long)dma_bootstrap);
172 if (err) 169 if (err)
173 return err; 170 return err;
@@ -220,12 +217,10 @@ long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
220 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) 217 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
221 fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE]; 218 fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
222 219
223 lock_kernel(); /* XXX for now */
224 if (fn != NULL) 220 if (fn != NULL)
225 ret = (*fn) (filp, cmd, arg); 221 ret = (*fn) (filp, cmd, arg);
226 else 222 else
227 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); 223 ret = drm_ioctl(filp, cmd, arg);
228 unlock_kernel();
229 224
230 return ret; 225 return ret;
231} 226}
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 1d90d4d0144f..48c290b5da8c 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -8,14 +8,15 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
8 nouveau_sgdma.o nouveau_dma.o \ 8 nouveau_sgdma.o nouveau_dma.o \
9 nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ 9 nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
10 nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ 10 nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
11 nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ 11 nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
12 nouveau_dp.o \ 12 nouveau_dp.o nouveau_grctx.o \
13 nv04_timer.o \ 13 nv04_timer.o \
14 nv04_mc.o nv40_mc.o nv50_mc.o \ 14 nv04_mc.o nv40_mc.o nv50_mc.o \
15 nv04_fb.o nv10_fb.o nv40_fb.o \ 15 nv04_fb.o nv10_fb.o nv40_fb.o \
16 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ 16 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
17 nv04_graph.o nv10_graph.o nv20_graph.o \ 17 nv04_graph.o nv10_graph.o nv20_graph.o \
18 nv40_graph.o nv50_graph.o \ 18 nv40_graph.o nv50_graph.o \
19 nv40_grctx.o \
19 nv04_instmem.o nv50_instmem.o \ 20 nv04_instmem.o nv50_instmem.o \
20 nv50_crtc.o nv50_dac.o nv50_sor.o \ 21 nv50_crtc.o nv50_dac.o nv50_sor.o \
21 nv50_cursor.o nv50_display.o nv50_fbcon.o \ 22 nv50_cursor.o nv50_display.o nv50_fbcon.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 5eec5ed69489..ba143972769f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -181,43 +181,42 @@ struct methods {
181 const char desc[8]; 181 const char desc[8];
182 void (*loadbios)(struct drm_device *, uint8_t *); 182 void (*loadbios)(struct drm_device *, uint8_t *);
183 const bool rw; 183 const bool rw;
184 int score;
185}; 184};
186 185
187static struct methods nv04_methods[] = { 186static struct methods nv04_methods[] = {
188 { "PROM", load_vbios_prom, false }, 187 { "PROM", load_vbios_prom, false },
189 { "PRAMIN", load_vbios_pramin, true }, 188 { "PRAMIN", load_vbios_pramin, true },
190 { "PCIROM", load_vbios_pci, true }, 189 { "PCIROM", load_vbios_pci, true },
191 { }
192}; 190};
193 191
194static struct methods nv50_methods[] = { 192static struct methods nv50_methods[] = {
195 { "PRAMIN", load_vbios_pramin, true }, 193 { "PRAMIN", load_vbios_pramin, true },
196 { "PROM", load_vbios_prom, false }, 194 { "PROM", load_vbios_prom, false },
197 { "PCIROM", load_vbios_pci, true }, 195 { "PCIROM", load_vbios_pci, true },
198 { }
199}; 196};
200 197
198#define METHODCNT 3
199
201static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) 200static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
202{ 201{
203 struct drm_nouveau_private *dev_priv = dev->dev_private; 202 struct drm_nouveau_private *dev_priv = dev->dev_private;
204 struct methods *methods, *method; 203 struct methods *methods;
204 int i;
205 int testscore = 3; 205 int testscore = 3;
206 int scores[METHODCNT];
206 207
207 if (nouveau_vbios) { 208 if (nouveau_vbios) {
208 method = nv04_methods; 209 methods = nv04_methods;
209 while (method->loadbios) { 210 for (i = 0; i < METHODCNT; i++)
210 if (!strcasecmp(nouveau_vbios, method->desc)) 211 if (!strcasecmp(nouveau_vbios, methods[i].desc))
211 break; 212 break;
212 method++;
213 }
214 213
215 if (method->loadbios) { 214 if (i < METHODCNT) {
216 NV_INFO(dev, "Attempting to use BIOS image from %s\n", 215 NV_INFO(dev, "Attempting to use BIOS image from %s\n",
217 method->desc); 216 methods[i].desc);
218 217
219 method->loadbios(dev, data); 218 methods[i].loadbios(dev, data);
220 if (score_vbios(dev, data, method->rw)) 219 if (score_vbios(dev, data, methods[i].rw))
221 return true; 220 return true;
222 } 221 }
223 222
@@ -229,28 +228,24 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
229 else 228 else
230 methods = nv50_methods; 229 methods = nv50_methods;
231 230
232 method = methods; 231 for (i = 0; i < METHODCNT; i++) {
233 while (method->loadbios) {
234 NV_TRACE(dev, "Attempting to load BIOS image from %s\n", 232 NV_TRACE(dev, "Attempting to load BIOS image from %s\n",
235 method->desc); 233 methods[i].desc);
236 data[0] = data[1] = 0; /* avoid reuse of previous image */ 234 data[0] = data[1] = 0; /* avoid reuse of previous image */
237 method->loadbios(dev, data); 235 methods[i].loadbios(dev, data);
238 method->score = score_vbios(dev, data, method->rw); 236 scores[i] = score_vbios(dev, data, methods[i].rw);
239 if (method->score == testscore) 237 if (scores[i] == testscore)
240 return true; 238 return true;
241 method++;
242 } 239 }
243 240
244 while (--testscore > 0) { 241 while (--testscore > 0) {
245 method = methods; 242 for (i = 0; i < METHODCNT; i++) {
246 while (method->loadbios) { 243 if (scores[i] == testscore) {
247 if (method->score == testscore) {
248 NV_TRACE(dev, "Using BIOS image from %s\n", 244 NV_TRACE(dev, "Using BIOS image from %s\n",
249 method->desc); 245 methods[i].desc);
250 method->loadbios(dev, data); 246 methods[i].loadbios(dev, data);
251 return true; 247 return true;
252 } 248 }
253 method++;
254 } 249 }
255 } 250 }
256 251
@@ -261,10 +256,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
261struct init_tbl_entry { 256struct init_tbl_entry {
262 char *name; 257 char *name;
263 uint8_t id; 258 uint8_t id;
264 int length; 259 int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
265 int length_offset;
266 int length_multiplier;
267 bool (*handler)(struct nvbios *, uint16_t, struct init_exec *);
268}; 260};
269 261
270struct bit_entry { 262struct bit_entry {
@@ -820,7 +812,7 @@ static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
820 } 812 }
821} 813}
822 814
823static bool 815static int
824init_io_restrict_prog(struct nvbios *bios, uint16_t offset, 816init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
825 struct init_exec *iexec) 817 struct init_exec *iexec)
826{ 818{
@@ -852,9 +844,10 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
852 uint32_t reg = ROM32(bios->data[offset + 7]); 844 uint32_t reg = ROM32(bios->data[offset + 7]);
853 uint8_t config; 845 uint8_t config;
854 uint32_t configval; 846 uint32_t configval;
847 int len = 11 + count * 4;
855 848
856 if (!iexec->execute) 849 if (!iexec->execute)
857 return true; 850 return len;
858 851
859 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, " 852 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
860 "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n", 853 "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
@@ -865,7 +858,7 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
865 NV_ERROR(bios->dev, 858 NV_ERROR(bios->dev,
866 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", 859 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
867 offset, config, count); 860 offset, config, count);
868 return false; 861 return 0;
869 } 862 }
870 863
871 configval = ROM32(bios->data[offset + 11 + config * 4]); 864 configval = ROM32(bios->data[offset + 11 + config * 4]);
@@ -874,10 +867,10 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
874 867
875 bios_wr32(bios, reg, configval); 868 bios_wr32(bios, reg, configval);
876 869
877 return true; 870 return len;
878} 871}
879 872
880static bool 873static int
881init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 874init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
882{ 875{
883 /* 876 /*
@@ -912,10 +905,10 @@ init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
912 905
913 iexec->repeat = false; 906 iexec->repeat = false;
914 907
915 return true; 908 return 2;
916} 909}
917 910
918static bool 911static int
919init_io_restrict_pll(struct nvbios *bios, uint16_t offset, 912init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
920 struct init_exec *iexec) 913 struct init_exec *iexec)
921{ 914{
@@ -951,9 +944,10 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
951 uint32_t reg = ROM32(bios->data[offset + 8]); 944 uint32_t reg = ROM32(bios->data[offset + 8]);
952 uint8_t config; 945 uint8_t config;
953 uint16_t freq; 946 uint16_t freq;
947 int len = 12 + count * 2;
954 948
955 if (!iexec->execute) 949 if (!iexec->execute)
956 return true; 950 return len;
957 951
958 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, " 952 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
959 "Shift: 0x%02X, IO Flag Condition: 0x%02X, " 953 "Shift: 0x%02X, IO Flag Condition: 0x%02X, "
@@ -966,7 +960,7 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
966 NV_ERROR(bios->dev, 960 NV_ERROR(bios->dev,
967 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", 961 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
968 offset, config, count); 962 offset, config, count);
969 return false; 963 return 0;
970 } 964 }
971 965
972 freq = ROM16(bios->data[offset + 12 + config * 2]); 966 freq = ROM16(bios->data[offset + 12 + config * 2]);
@@ -986,10 +980,10 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
986 980
987 setPLL(bios, reg, freq * 10); 981 setPLL(bios, reg, freq * 10);
988 982
989 return true; 983 return len;
990} 984}
991 985
992static bool 986static int
993init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 987init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
994{ 988{
995 /* 989 /*
@@ -1007,12 +1001,12 @@ init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1007 * we're not in repeat mode 1001 * we're not in repeat mode
1008 */ 1002 */
1009 if (iexec->repeat) 1003 if (iexec->repeat)
1010 return false; 1004 return 0;
1011 1005
1012 return true; 1006 return 1;
1013} 1007}
1014 1008
1015static bool 1009static int
1016init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 1010init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1017{ 1011{
1018 /* 1012 /*
@@ -1041,7 +1035,7 @@ init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1041 uint8_t crtcdata; 1035 uint8_t crtcdata;
1042 1036
1043 if (!iexec->execute) 1037 if (!iexec->execute)
1044 return true; 1038 return 11;
1045 1039
1046 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%02X, " 1040 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%02X, "
1047 "Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X\n", 1041 "Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X\n",
@@ -1060,10 +1054,10 @@ init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1060 crtcdata |= (uint8_t)data; 1054 crtcdata |= (uint8_t)data;
1061 bios_idxprt_wr(bios, crtcport, crtcindex, crtcdata); 1055 bios_idxprt_wr(bios, crtcport, crtcindex, crtcdata);
1062 1056
1063 return true; 1057 return 11;
1064} 1058}
1065 1059
1066static bool 1060static int
1067init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 1061init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1068{ 1062{
1069 /* 1063 /*
@@ -1079,10 +1073,10 @@ init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1079 BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", offset); 1073 BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", offset);
1080 1074
1081 iexec->execute = !iexec->execute; 1075 iexec->execute = !iexec->execute;
1082 return true; 1076 return 1;
1083} 1077}
1084 1078
1085static bool 1079static int
1086init_io_flag_condition(struct nvbios *bios, uint16_t offset, 1080init_io_flag_condition(struct nvbios *bios, uint16_t offset,
1087 struct init_exec *iexec) 1081 struct init_exec *iexec)
1088{ 1082{
@@ -1100,7 +1094,7 @@ init_io_flag_condition(struct nvbios *bios, uint16_t offset,
1100 uint8_t cond = bios->data[offset + 1]; 1094 uint8_t cond = bios->data[offset + 1];
1101 1095
1102 if (!iexec->execute) 1096 if (!iexec->execute)
1103 return true; 1097 return 2;
1104 1098
1105 if (io_flag_condition_met(bios, offset, cond)) 1099 if (io_flag_condition_met(bios, offset, cond))
1106 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset); 1100 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
@@ -1109,10 +1103,10 @@ init_io_flag_condition(struct nvbios *bios, uint16_t offset,
1109 iexec->execute = false; 1103 iexec->execute = false;
1110 } 1104 }
1111 1105
1112 return true; 1106 return 2;
1113} 1107}
1114 1108
1115static bool 1109static int
1116init_idx_addr_latched(struct nvbios *bios, uint16_t offset, 1110init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
1117 struct init_exec *iexec) 1111 struct init_exec *iexec)
1118{ 1112{
@@ -1140,11 +1134,12 @@ init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
1140 uint32_t mask = ROM32(bios->data[offset + 9]); 1134 uint32_t mask = ROM32(bios->data[offset + 9]);
1141 uint32_t data = ROM32(bios->data[offset + 13]); 1135 uint32_t data = ROM32(bios->data[offset + 13]);
1142 uint8_t count = bios->data[offset + 17]; 1136 uint8_t count = bios->data[offset + 17];
1137 int len = 18 + count * 2;
1143 uint32_t value; 1138 uint32_t value;
1144 int i; 1139 int i;
1145 1140
1146 if (!iexec->execute) 1141 if (!iexec->execute)
1147 return true; 1142 return len;
1148 1143
1149 BIOSLOG(bios, "0x%04X: ControlReg: 0x%08X, DataReg: 0x%08X, " 1144 BIOSLOG(bios, "0x%04X: ControlReg: 0x%08X, DataReg: 0x%08X, "
1150 "Mask: 0x%08X, Data: 0x%08X, Count: 0x%02X\n", 1145 "Mask: 0x%08X, Data: 0x%08X, Count: 0x%02X\n",
@@ -1164,10 +1159,10 @@ init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
1164 bios_wr32(bios, controlreg, value); 1159 bios_wr32(bios, controlreg, value);
1165 } 1160 }
1166 1161
1167 return true; 1162 return len;
1168} 1163}
1169 1164
1170static bool 1165static int
1171init_io_restrict_pll2(struct nvbios *bios, uint16_t offset, 1166init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
1172 struct init_exec *iexec) 1167 struct init_exec *iexec)
1173{ 1168{
@@ -1196,25 +1191,26 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
1196 uint8_t shift = bios->data[offset + 5]; 1191 uint8_t shift = bios->data[offset + 5];
1197 uint8_t count = bios->data[offset + 6]; 1192 uint8_t count = bios->data[offset + 6];
1198 uint32_t reg = ROM32(bios->data[offset + 7]); 1193 uint32_t reg = ROM32(bios->data[offset + 7]);
1194 int len = 11 + count * 4;
1199 uint8_t config; 1195 uint8_t config;
1200 uint32_t freq; 1196 uint32_t freq;
1201 1197
1202 if (!iexec->execute) 1198 if (!iexec->execute)
1203 return true; 1199 return len;
1204 1200
1205 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, " 1201 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
1206 "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n", 1202 "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
1207 offset, crtcport, crtcindex, mask, shift, count, reg); 1203 offset, crtcport, crtcindex, mask, shift, count, reg);
1208 1204
1209 if (!reg) 1205 if (!reg)
1210 return true; 1206 return len;
1211 1207
1212 config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift; 1208 config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
1213 if (config > count) { 1209 if (config > count) {
1214 NV_ERROR(bios->dev, 1210 NV_ERROR(bios->dev,
1215 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", 1211 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
1216 offset, config, count); 1212 offset, config, count);
1217 return false; 1213 return 0;
1218 } 1214 }
1219 1215
1220 freq = ROM32(bios->data[offset + 11 + config * 4]); 1216 freq = ROM32(bios->data[offset + 11 + config * 4]);
@@ -1224,10 +1220,10 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
1224 1220
1225 setPLL(bios, reg, freq); 1221 setPLL(bios, reg, freq);
1226 1222
1227 return true; 1223 return len;
1228} 1224}
1229 1225
1230static bool 1226static int
1231init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 1227init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1232{ 1228{
1233 /* 1229 /*
@@ -1244,16 +1240,16 @@ init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1244 uint32_t freq = ROM32(bios->data[offset + 5]); 1240 uint32_t freq = ROM32(bios->data[offset + 5]);
1245 1241
1246 if (!iexec->execute) 1242 if (!iexec->execute)
1247 return true; 1243 return 9;
1248 1244
1249 BIOSLOG(bios, "0x%04X: Reg: 0x%04X, Freq: %dkHz\n", 1245 BIOSLOG(bios, "0x%04X: Reg: 0x%04X, Freq: %dkHz\n",
1250 offset, reg, freq); 1246 offset, reg, freq);
1251 1247
1252 setPLL(bios, reg, freq); 1248 setPLL(bios, reg, freq);
1253 return true; 1249 return 9;
1254} 1250}
1255 1251
1256static bool 1252static int
1257init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 1253init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1258{ 1254{
1259 /* 1255 /*
@@ -1277,12 +1273,13 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1277 uint8_t i2c_index = bios->data[offset + 1]; 1273 uint8_t i2c_index = bios->data[offset + 1];
1278 uint8_t i2c_address = bios->data[offset + 2]; 1274 uint8_t i2c_address = bios->data[offset + 2];
1279 uint8_t count = bios->data[offset + 3]; 1275 uint8_t count = bios->data[offset + 3];
1276 int len = 4 + count * 3;
1280 struct nouveau_i2c_chan *chan; 1277 struct nouveau_i2c_chan *chan;
1281 struct i2c_msg msg; 1278 struct i2c_msg msg;
1282 int i; 1279 int i;
1283 1280
1284 if (!iexec->execute) 1281 if (!iexec->execute)
1285 return true; 1282 return len;
1286 1283
1287 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, " 1284 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
1288 "Count: 0x%02X\n", 1285 "Count: 0x%02X\n",
@@ -1290,7 +1287,7 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1290 1287
1291 chan = init_i2c_device_find(bios->dev, i2c_index); 1288 chan = init_i2c_device_find(bios->dev, i2c_index);
1292 if (!chan) 1289 if (!chan)
1293 return false; 1290 return 0;
1294 1291
1295 for (i = 0; i < count; i++) { 1292 for (i = 0; i < count; i++) {
1296 uint8_t i2c_reg = bios->data[offset + 4 + i * 3]; 1293 uint8_t i2c_reg = bios->data[offset + 4 + i * 3];
@@ -1303,7 +1300,7 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1303 msg.len = 1; 1300 msg.len = 1;
1304 msg.buf = &value; 1301 msg.buf = &value;
1305 if (i2c_transfer(&chan->adapter, &msg, 1) != 1) 1302 if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
1306 return false; 1303 return 0;
1307 1304
1308 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, " 1305 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
1309 "Mask: 0x%02X, Data: 0x%02X\n", 1306 "Mask: 0x%02X, Data: 0x%02X\n",
@@ -1317,14 +1314,14 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1317 msg.len = 1; 1314 msg.len = 1;
1318 msg.buf = &value; 1315 msg.buf = &value;
1319 if (i2c_transfer(&chan->adapter, &msg, 1) != 1) 1316 if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
1320 return false; 1317 return 0;
1321 } 1318 }
1322 } 1319 }
1323 1320
1324 return true; 1321 return len;
1325} 1322}
1326 1323
1327static bool 1324static int
1328init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 1325init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1329{ 1326{
1330 /* 1327 /*
@@ -1346,12 +1343,13 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1346 uint8_t i2c_index = bios->data[offset + 1]; 1343 uint8_t i2c_index = bios->data[offset + 1];
1347 uint8_t i2c_address = bios->data[offset + 2]; 1344 uint8_t i2c_address = bios->data[offset + 2];
1348 uint8_t count = bios->data[offset + 3]; 1345 uint8_t count = bios->data[offset + 3];
1346 int len = 4 + count * 2;
1349 struct nouveau_i2c_chan *chan; 1347 struct nouveau_i2c_chan *chan;
1350 struct i2c_msg msg; 1348 struct i2c_msg msg;
1351 int i; 1349 int i;
1352 1350
1353 if (!iexec->execute) 1351 if (!iexec->execute)
1354 return true; 1352 return len;
1355 1353
1356 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, " 1354 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
1357 "Count: 0x%02X\n", 1355 "Count: 0x%02X\n",
@@ -1359,7 +1357,7 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1359 1357
1360 chan = init_i2c_device_find(bios->dev, i2c_index); 1358 chan = init_i2c_device_find(bios->dev, i2c_index);
1361 if (!chan) 1359 if (!chan)
1362 return false; 1360 return 0;
1363 1361
1364 for (i = 0; i < count; i++) { 1362 for (i = 0; i < count; i++) {
1365 uint8_t i2c_reg = bios->data[offset + 4 + i * 2]; 1363 uint8_t i2c_reg = bios->data[offset + 4 + i * 2];
@@ -1374,14 +1372,14 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1374 msg.len = 1; 1372 msg.len = 1;
1375 msg.buf = &data; 1373 msg.buf = &data;
1376 if (i2c_transfer(&chan->adapter, &msg, 1) != 1) 1374 if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
1377 return false; 1375 return 0;
1378 } 1376 }
1379 } 1377 }
1380 1378
1381 return true; 1379 return len;
1382} 1380}
1383 1381
1384static bool 1382static int
1385init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 1383init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1386{ 1384{
1387 /* 1385 /*
@@ -1401,13 +1399,14 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1401 uint8_t i2c_index = bios->data[offset + 1]; 1399 uint8_t i2c_index = bios->data[offset + 1];
1402 uint8_t i2c_address = bios->data[offset + 2]; 1400 uint8_t i2c_address = bios->data[offset + 2];
1403 uint8_t count = bios->data[offset + 3]; 1401 uint8_t count = bios->data[offset + 3];
1402 int len = 4 + count;
1404 struct nouveau_i2c_chan *chan; 1403 struct nouveau_i2c_chan *chan;
1405 struct i2c_msg msg; 1404 struct i2c_msg msg;
1406 uint8_t data[256]; 1405 uint8_t data[256];
1407 int i; 1406 int i;
1408 1407
1409 if (!iexec->execute) 1408 if (!iexec->execute)
1410 return true; 1409 return len;
1411 1410
1412 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, " 1411 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
1413 "Count: 0x%02X\n", 1412 "Count: 0x%02X\n",
@@ -1415,7 +1414,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1415 1414
1416 chan = init_i2c_device_find(bios->dev, i2c_index); 1415 chan = init_i2c_device_find(bios->dev, i2c_index);
1417 if (!chan) 1416 if (!chan)
1418 return false; 1417 return 0;
1419 1418
1420 for (i = 0; i < count; i++) { 1419 for (i = 0; i < count; i++) {
1421 data[i] = bios->data[offset + 4 + i]; 1420 data[i] = bios->data[offset + 4 + i];
@@ -1429,13 +1428,13 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1429 msg.len = count; 1428 msg.len = count;
1430 msg.buf = data; 1429 msg.buf = data;
1431 if (i2c_transfer(&chan->adapter, &msg, 1) != 1) 1430 if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
1432 return false; 1431 return 0;
1433 } 1432 }
1434 1433
1435 return true; 1434 return len;
1436} 1435}
1437 1436
1438static bool 1437static int
1439init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 1438init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1440{ 1439{
1441 /* 1440 /*
@@ -1460,7 +1459,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1460 uint32_t reg, value; 1459 uint32_t reg, value;
1461 1460
1462 if (!iexec->execute) 1461 if (!iexec->execute)
1463 return true; 1462 return 5;
1464 1463
1465 BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, TMDSAddr: 0x%02X, " 1464 BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, TMDSAddr: 0x%02X, "
1466 "Mask: 0x%02X, Data: 0x%02X\n", 1465 "Mask: 0x%02X, Data: 0x%02X\n",
@@ -1468,7 +1467,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1468 1467
1469 reg = get_tmds_index_reg(bios->dev, mlv); 1468 reg = get_tmds_index_reg(bios->dev, mlv);
1470 if (!reg) 1469 if (!reg)
1471 return false; 1470 return 0;
1472 1471
1473 bios_wr32(bios, reg, 1472 bios_wr32(bios, reg,
1474 tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE); 1473 tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
@@ -1476,10 +1475,10 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1476 bios_wr32(bios, reg + 4, value); 1475 bios_wr32(bios, reg + 4, value);
1477 bios_wr32(bios, reg, tmdsaddr); 1476 bios_wr32(bios, reg, tmdsaddr);
1478 1477
1479 return true; 1478 return 5;
1480} 1479}
1481 1480
1482static bool 1481static int
1483init_zm_tmds_group(struct nvbios *bios, uint16_t offset, 1482init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
1484 struct init_exec *iexec) 1483 struct init_exec *iexec)
1485{ 1484{
@@ -1500,18 +1499,19 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
1500 1499
1501 uint8_t mlv = bios->data[offset + 1]; 1500 uint8_t mlv = bios->data[offset + 1];
1502 uint8_t count = bios->data[offset + 2]; 1501 uint8_t count = bios->data[offset + 2];
1502 int len = 3 + count * 2;
1503 uint32_t reg; 1503 uint32_t reg;
1504 int i; 1504 int i;
1505 1505
1506 if (!iexec->execute) 1506 if (!iexec->execute)
1507 return true; 1507 return len;
1508 1508
1509 BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, Count: 0x%02X\n", 1509 BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, Count: 0x%02X\n",
1510 offset, mlv, count); 1510 offset, mlv, count);
1511 1511
1512 reg = get_tmds_index_reg(bios->dev, mlv); 1512 reg = get_tmds_index_reg(bios->dev, mlv);
1513 if (!reg) 1513 if (!reg)
1514 return false; 1514 return 0;
1515 1515
1516 for (i = 0; i < count; i++) { 1516 for (i = 0; i < count; i++) {
1517 uint8_t tmdsaddr = bios->data[offset + 3 + i * 2]; 1517 uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
@@ -1521,10 +1521,10 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
1521 bios_wr32(bios, reg, tmdsaddr); 1521 bios_wr32(bios, reg, tmdsaddr);
1522 } 1522 }
1523 1523
1524 return true; 1524 return len;
1525} 1525}
1526 1526
1527static bool 1527static int
1528init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset, 1528init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset,
1529 struct init_exec *iexec) 1529 struct init_exec *iexec)
1530{ 1530{
@@ -1547,11 +1547,12 @@ init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset,
1547 uint8_t crtcindex2 = bios->data[offset + 2]; 1547 uint8_t crtcindex2 = bios->data[offset + 2];
1548 uint8_t baseaddr = bios->data[offset + 3]; 1548 uint8_t baseaddr = bios->data[offset + 3];
1549 uint8_t count = bios->data[offset + 4]; 1549 uint8_t count = bios->data[offset + 4];
1550 int len = 5 + count;
1550 uint8_t oldaddr, data; 1551 uint8_t oldaddr, data;
1551 int i; 1552 int i;
1552 1553
1553 if (!iexec->execute) 1554 if (!iexec->execute)
1554 return true; 1555 return len;
1555 1556
1556 BIOSLOG(bios, "0x%04X: Index1: 0x%02X, Index2: 0x%02X, " 1557 BIOSLOG(bios, "0x%04X: Index1: 0x%02X, Index2: 0x%02X, "
1557 "BaseAddr: 0x%02X, Count: 0x%02X\n", 1558 "BaseAddr: 0x%02X, Count: 0x%02X\n",
@@ -1568,10 +1569,10 @@ init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset,
1568 1569
1569 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1, oldaddr); 1570 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1, oldaddr);
1570 1571
1571 return true; 1572 return len;
1572} 1573}
1573 1574
1574static bool 1575static int
1575init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 1576init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1576{ 1577{
1577 /* 1578 /*
@@ -1592,7 +1593,7 @@ init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1592 uint8_t value; 1593 uint8_t value;
1593 1594
1594 if (!iexec->execute) 1595 if (!iexec->execute)
1595 return true; 1596 return 4;
1596 1597
1597 BIOSLOG(bios, "0x%04X: Index: 0x%02X, Mask: 0x%02X, Data: 0x%02X\n", 1598 BIOSLOG(bios, "0x%04X: Index: 0x%02X, Mask: 0x%02X, Data: 0x%02X\n",
1598 offset, crtcindex, mask, data); 1599 offset, crtcindex, mask, data);
@@ -1601,10 +1602,10 @@ init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1601 value |= data; 1602 value |= data;
1602 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, value); 1603 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, value);
1603 1604
1604 return true; 1605 return 4;
1605} 1606}
1606 1607
1607static bool 1608static int
1608init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 1609init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1609{ 1610{
1610 /* 1611 /*
@@ -1621,14 +1622,14 @@ init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1621 uint8_t data = bios->data[offset + 2]; 1622 uint8_t data = bios->data[offset + 2];
1622 1623
1623 if (!iexec->execute) 1624 if (!iexec->execute)
1624 return true; 1625 return 3;
1625 1626
1626 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, data); 1627 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, data);
1627 1628
1628 return true; 1629 return 3;
1629} 1630}
1630 1631
1631static bool 1632static int
1632init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 1633init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1633{ 1634{
1634 /* 1635 /*
@@ -1645,18 +1646,19 @@ init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1645 */ 1646 */
1646 1647
1647 uint8_t count = bios->data[offset + 1]; 1648 uint8_t count = bios->data[offset + 1];
1649 int len = 2 + count * 2;
1648 int i; 1650 int i;
1649 1651
1650 if (!iexec->execute) 1652 if (!iexec->execute)
1651 return true; 1653 return len;
1652 1654
1653 for (i = 0; i < count; i++) 1655 for (i = 0; i < count; i++)
1654 init_zm_cr(bios, offset + 2 + 2 * i - 1, iexec); 1656 init_zm_cr(bios, offset + 2 + 2 * i - 1, iexec);
1655 1657
1656 return true; 1658 return len;
1657} 1659}
1658 1660
1659static bool 1661static int
1660init_condition_time(struct nvbios *bios, uint16_t offset, 1662init_condition_time(struct nvbios *bios, uint16_t offset,
1661 struct init_exec *iexec) 1663 struct init_exec *iexec)
1662{ 1664{
@@ -1680,7 +1682,7 @@ init_condition_time(struct nvbios *bios, uint16_t offset,
1680 unsigned cnt; 1682 unsigned cnt;
1681 1683
1682 if (!iexec->execute) 1684 if (!iexec->execute)
1683 return true; 1685 return 3;
1684 1686
1685 if (retries > 100) 1687 if (retries > 100)
1686 retries = 100; 1688 retries = 100;
@@ -1711,10 +1713,10 @@ init_condition_time(struct nvbios *bios, uint16_t offset,
1711 iexec->execute = false; 1713 iexec->execute = false;
1712 } 1714 }
1713 1715
1714 return true; 1716 return 3;
1715} 1717}
1716 1718
1717static bool 1719static int
1718init_zm_reg_sequence(struct nvbios *bios, uint16_t offset, 1720init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
1719 struct init_exec *iexec) 1721 struct init_exec *iexec)
1720{ 1722{
@@ -1734,10 +1736,11 @@ init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
1734 1736
1735 uint32_t basereg = ROM32(bios->data[offset + 1]); 1737 uint32_t basereg = ROM32(bios->data[offset + 1]);
1736 uint32_t count = bios->data[offset + 5]; 1738 uint32_t count = bios->data[offset + 5];
1739 int len = 6 + count * 4;
1737 int i; 1740 int i;
1738 1741
1739 if (!iexec->execute) 1742 if (!iexec->execute)
1740 return true; 1743 return len;
1741 1744
1742 BIOSLOG(bios, "0x%04X: BaseReg: 0x%08X, Count: 0x%02X\n", 1745 BIOSLOG(bios, "0x%04X: BaseReg: 0x%08X, Count: 0x%02X\n",
1743 offset, basereg, count); 1746 offset, basereg, count);
@@ -1749,10 +1752,10 @@ init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
1749 bios_wr32(bios, reg, data); 1752 bios_wr32(bios, reg, data);
1750 } 1753 }
1751 1754
1752 return true; 1755 return len;
1753} 1756}
1754 1757
1755static bool 1758static int
1756init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 1759init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1757{ 1760{
1758 /* 1761 /*
@@ -1768,7 +1771,7 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1768 uint16_t sub_offset = ROM16(bios->data[offset + 1]); 1771 uint16_t sub_offset = ROM16(bios->data[offset + 1]);
1769 1772
1770 if (!iexec->execute) 1773 if (!iexec->execute)
1771 return true; 1774 return 3;
1772 1775
1773 BIOSLOG(bios, "0x%04X: Executing subroutine at 0x%04X\n", 1776 BIOSLOG(bios, "0x%04X: Executing subroutine at 0x%04X\n",
1774 offset, sub_offset); 1777 offset, sub_offset);
@@ -1777,10 +1780,10 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1777 1780
1778 BIOSLOG(bios, "0x%04X: End of 0x%04X subroutine\n", offset, sub_offset); 1781 BIOSLOG(bios, "0x%04X: End of 0x%04X subroutine\n", offset, sub_offset);
1779 1782
1780 return true; 1783 return 3;
1781} 1784}
1782 1785
1783static bool 1786static int
1784init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 1787init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1785{ 1788{
1786 /* 1789 /*
@@ -1808,7 +1811,7 @@ init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1808 uint32_t srcvalue, dstvalue; 1811 uint32_t srcvalue, dstvalue;
1809 1812
1810 if (!iexec->execute) 1813 if (!iexec->execute)
1811 return true; 1814 return 22;
1812 1815
1813 BIOSLOG(bios, "0x%04X: SrcReg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%08X, " 1816 BIOSLOG(bios, "0x%04X: SrcReg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%08X, "
1814 "Xor: 0x%08X, DstReg: 0x%08X, DstMask: 0x%08X\n", 1817 "Xor: 0x%08X, DstReg: 0x%08X, DstMask: 0x%08X\n",
@@ -1827,10 +1830,10 @@ init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1827 1830
1828 bios_wr32(bios, dstreg, dstvalue | srcvalue); 1831 bios_wr32(bios, dstreg, dstvalue | srcvalue);
1829 1832
1830 return true; 1833 return 22;
1831} 1834}
1832 1835
1833static bool 1836static int
1834init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 1837init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1835{ 1838{
1836 /* 1839 /*
@@ -1848,14 +1851,14 @@ init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1848 uint8_t data = bios->data[offset + 4]; 1851 uint8_t data = bios->data[offset + 4];
1849 1852
1850 if (!iexec->execute) 1853 if (!iexec->execute)
1851 return true; 1854 return 5;
1852 1855
1853 bios_idxprt_wr(bios, crtcport, crtcindex, data); 1856 bios_idxprt_wr(bios, crtcport, crtcindex, data);
1854 1857
1855 return true; 1858 return 5;
1856} 1859}
1857 1860
1858static bool 1861static int
1859init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 1862init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1860{ 1863{
1861 /* 1864 /*
@@ -1904,7 +1907,7 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1904 struct drm_nouveau_private *dev_priv = bios->dev->dev_private; 1907 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
1905 1908
1906 if (dev_priv->card_type >= NV_50) 1909 if (dev_priv->card_type >= NV_50)
1907 return true; 1910 return 1;
1908 1911
1909 /* 1912 /*
1910 * On every card I've seen, this step gets done for us earlier in 1913 * On every card I've seen, this step gets done for us earlier in
@@ -1922,10 +1925,10 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1922 /* write back the saved configuration value */ 1925 /* write back the saved configuration value */
1923 bios_wr32(bios, NV_PFB_CFG0, bios->state.saved_nv_pfb_cfg0); 1926 bios_wr32(bios, NV_PFB_CFG0, bios->state.saved_nv_pfb_cfg0);
1924 1927
1925 return true; 1928 return 1;
1926} 1929}
1927 1930
1928static bool 1931static int
1929init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 1932init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1930{ 1933{
1931 /* 1934 /*
@@ -1959,10 +1962,10 @@ init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1959 pci_nv_20 &= ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED; /* 0xfffffffe */ 1962 pci_nv_20 &= ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED; /* 0xfffffffe */
1960 bios_wr32(bios, NV_PBUS_PCI_NV_20, pci_nv_20); 1963 bios_wr32(bios, NV_PBUS_PCI_NV_20, pci_nv_20);
1961 1964
1962 return true; 1965 return 13;
1963} 1966}
1964 1967
1965static bool 1968static int
1966init_configure_mem(struct nvbios *bios, uint16_t offset, 1969init_configure_mem(struct nvbios *bios, uint16_t offset,
1967 struct init_exec *iexec) 1970 struct init_exec *iexec)
1968{ 1971{
@@ -1983,7 +1986,7 @@ init_configure_mem(struct nvbios *bios, uint16_t offset,
1983 uint32_t reg, data; 1986 uint32_t reg, data;
1984 1987
1985 if (bios->major_version > 2) 1988 if (bios->major_version > 2)
1986 return false; 1989 return 0;
1987 1990
1988 bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd( 1991 bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
1989 bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20); 1992 bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
@@ -2015,10 +2018,10 @@ init_configure_mem(struct nvbios *bios, uint16_t offset,
2015 bios_wr32(bios, reg, data); 2018 bios_wr32(bios, reg, data);
2016 } 2019 }
2017 2020
2018 return true; 2021 return 1;
2019} 2022}
2020 2023
2021static bool 2024static int
2022init_configure_clk(struct nvbios *bios, uint16_t offset, 2025init_configure_clk(struct nvbios *bios, uint16_t offset,
2023 struct init_exec *iexec) 2026 struct init_exec *iexec)
2024{ 2027{
@@ -2038,7 +2041,7 @@ init_configure_clk(struct nvbios *bios, uint16_t offset,
2038 int clock; 2041 int clock;
2039 2042
2040 if (bios->major_version > 2) 2043 if (bios->major_version > 2)
2041 return false; 2044 return 0;
2042 2045
2043 clock = ROM16(bios->data[meminitoffs + 4]) * 10; 2046 clock = ROM16(bios->data[meminitoffs + 4]) * 10;
2044 setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock); 2047 setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
@@ -2048,10 +2051,10 @@ init_configure_clk(struct nvbios *bios, uint16_t offset,
2048 clock *= 2; 2051 clock *= 2;
2049 setPLL(bios, NV_PRAMDAC_MPLL_COEFF, clock); 2052 setPLL(bios, NV_PRAMDAC_MPLL_COEFF, clock);
2050 2053
2051 return true; 2054 return 1;
2052} 2055}
2053 2056
2054static bool 2057static int
2055init_configure_preinit(struct nvbios *bios, uint16_t offset, 2058init_configure_preinit(struct nvbios *bios, uint16_t offset,
2056 struct init_exec *iexec) 2059 struct init_exec *iexec)
2057{ 2060{
@@ -2071,15 +2074,15 @@ init_configure_preinit(struct nvbios *bios, uint16_t offset,
2071 uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6)); 2074 uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6));
2072 2075
2073 if (bios->major_version > 2) 2076 if (bios->major_version > 2)
2074 return false; 2077 return 0;
2075 2078
2076 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, 2079 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
2077 NV_CIO_CRE_SCRATCH4__INDEX, cr3c); 2080 NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
2078 2081
2079 return true; 2082 return 1;
2080} 2083}
2081 2084
2082static bool 2085static int
2083init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2086init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2084{ 2087{
2085 /* 2088 /*
@@ -2099,7 +2102,7 @@ init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2099 uint8_t data = bios->data[offset + 4]; 2102 uint8_t data = bios->data[offset + 4];
2100 2103
2101 if (!iexec->execute) 2104 if (!iexec->execute)
2102 return true; 2105 return 5;
2103 2106
2104 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Mask: 0x%02X, Data: 0x%02X\n", 2107 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Mask: 0x%02X, Data: 0x%02X\n",
2105 offset, crtcport, mask, data); 2108 offset, crtcport, mask, data);
@@ -2158,15 +2161,15 @@ init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2158 for (i = 0; i < 2; i++) 2161 for (i = 0; i < 2; i++)
2159 bios_wr32(bios, 0x614108 + (i*0x800), bios_rd32( 2162 bios_wr32(bios, 0x614108 + (i*0x800), bios_rd32(
2160 bios, 0x614108 + (i*0x800)) & 0x0fffffff); 2163 bios, 0x614108 + (i*0x800)) & 0x0fffffff);
2161 return true; 2164 return 5;
2162 } 2165 }
2163 2166
2164 bios_port_wr(bios, crtcport, (bios_port_rd(bios, crtcport) & mask) | 2167 bios_port_wr(bios, crtcport, (bios_port_rd(bios, crtcport) & mask) |
2165 data); 2168 data);
2166 return true; 2169 return 5;
2167} 2170}
2168 2171
2169static bool 2172static int
2170init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2173init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2171{ 2174{
2172 /* 2175 /*
@@ -2181,7 +2184,7 @@ init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2181 uint8_t sub = bios->data[offset + 1]; 2184 uint8_t sub = bios->data[offset + 1];
2182 2185
2183 if (!iexec->execute) 2186 if (!iexec->execute)
2184 return true; 2187 return 2;
2185 2188
2186 BIOSLOG(bios, "0x%04X: Calling script %d\n", offset, sub); 2189 BIOSLOG(bios, "0x%04X: Calling script %d\n", offset, sub);
2187 2190
@@ -2191,10 +2194,10 @@ init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2191 2194
2192 BIOSLOG(bios, "0x%04X: End of script %d\n", offset, sub); 2195 BIOSLOG(bios, "0x%04X: End of script %d\n", offset, sub);
2193 2196
2194 return true; 2197 return 2;
2195} 2198}
2196 2199
2197static bool 2200static int
2198init_ram_condition(struct nvbios *bios, uint16_t offset, 2201init_ram_condition(struct nvbios *bios, uint16_t offset,
2199 struct init_exec *iexec) 2202 struct init_exec *iexec)
2200{ 2203{
@@ -2215,7 +2218,7 @@ init_ram_condition(struct nvbios *bios, uint16_t offset,
2215 uint8_t data; 2218 uint8_t data;
2216 2219
2217 if (!iexec->execute) 2220 if (!iexec->execute)
2218 return true; 2221 return 3;
2219 2222
2220 data = bios_rd32(bios, NV_PFB_BOOT_0) & mask; 2223 data = bios_rd32(bios, NV_PFB_BOOT_0) & mask;
2221 2224
@@ -2229,10 +2232,10 @@ init_ram_condition(struct nvbios *bios, uint16_t offset,
2229 iexec->execute = false; 2232 iexec->execute = false;
2230 } 2233 }
2231 2234
2232 return true; 2235 return 3;
2233} 2236}
2234 2237
2235static bool 2238static int
2236init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2239init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2237{ 2240{
2238 /* 2241 /*
@@ -2251,17 +2254,17 @@ init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2251 uint32_t data = ROM32(bios->data[offset + 9]); 2254 uint32_t data = ROM32(bios->data[offset + 9]);
2252 2255
2253 if (!iexec->execute) 2256 if (!iexec->execute)
2254 return true; 2257 return 13;
2255 2258
2256 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Mask: 0x%08X, Data: 0x%08X\n", 2259 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Mask: 0x%08X, Data: 0x%08X\n",
2257 offset, reg, mask, data); 2260 offset, reg, mask, data);
2258 2261
2259 bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | data); 2262 bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | data);
2260 2263
2261 return true; 2264 return 13;
2262} 2265}
2263 2266
2264static bool 2267static int
2265init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2268init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2266{ 2269{
2267 /* 2270 /*
@@ -2285,7 +2288,7 @@ init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2285 int i; 2288 int i;
2286 2289
2287 if (!iexec->execute) 2290 if (!iexec->execute)
2288 return true; 2291 return 2;
2289 2292
2290 BIOSLOG(bios, "0x%04X: Macro: 0x%02X, MacroTableIndex: 0x%02X, " 2293 BIOSLOG(bios, "0x%04X: Macro: 0x%02X, MacroTableIndex: 0x%02X, "
2291 "Count: 0x%02X\n", 2294 "Count: 0x%02X\n",
@@ -2300,10 +2303,10 @@ init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2300 bios_wr32(bios, reg, data); 2303 bios_wr32(bios, reg, data);
2301 } 2304 }
2302 2305
2303 return true; 2306 return 2;
2304} 2307}
2305 2308
2306static bool 2309static int
2307init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2310init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2308{ 2311{
2309 /* 2312 /*
@@ -2315,10 +2318,10 @@ init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2315 */ 2318 */
2316 2319
2317 /* mild retval abuse to stop parsing this table */ 2320 /* mild retval abuse to stop parsing this table */
2318 return false; 2321 return 0;
2319} 2322}
2320 2323
2321static bool 2324static int
2322init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2325init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2323{ 2326{
2324 /* 2327 /*
@@ -2330,15 +2333,15 @@ init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2330 */ 2333 */
2331 2334
2332 if (iexec->execute) 2335 if (iexec->execute)
2333 return true; 2336 return 1;
2334 2337
2335 iexec->execute = true; 2338 iexec->execute = true;
2336 BIOSLOG(bios, "0x%04X: ---- Executing following commands ----\n", offset); 2339 BIOSLOG(bios, "0x%04X: ---- Executing following commands ----\n", offset);
2337 2340
2338 return true; 2341 return 1;
2339} 2342}
2340 2343
2341static bool 2344static int
2342init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2345init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2343{ 2346{
2344 /* 2347 /*
@@ -2353,7 +2356,7 @@ init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2353 unsigned time = ROM16(bios->data[offset + 1]); 2356 unsigned time = ROM16(bios->data[offset + 1]);
2354 2357
2355 if (!iexec->execute) 2358 if (!iexec->execute)
2356 return true; 2359 return 3;
2357 2360
2358 BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X microseconds\n", 2361 BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X microseconds\n",
2359 offset, time); 2362 offset, time);
@@ -2363,10 +2366,10 @@ init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2363 else 2366 else
2364 msleep((time + 900) / 1000); 2367 msleep((time + 900) / 1000);
2365 2368
2366 return true; 2369 return 3;
2367} 2370}
2368 2371
2369static bool 2372static int
2370init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2373init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2371{ 2374{
2372 /* 2375 /*
@@ -2383,7 +2386,7 @@ init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2383 uint8_t cond = bios->data[offset + 1]; 2386 uint8_t cond = bios->data[offset + 1];
2384 2387
2385 if (!iexec->execute) 2388 if (!iexec->execute)
2386 return true; 2389 return 2;
2387 2390
2388 BIOSLOG(bios, "0x%04X: Condition: 0x%02X\n", offset, cond); 2391 BIOSLOG(bios, "0x%04X: Condition: 0x%02X\n", offset, cond);
2389 2392
@@ -2394,10 +2397,10 @@ init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2394 iexec->execute = false; 2397 iexec->execute = false;
2395 } 2398 }
2396 2399
2397 return true; 2400 return 2;
2398} 2401}
2399 2402
2400static bool 2403static int
2401init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2404init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2402{ 2405{
2403 /* 2406 /*
@@ -2414,7 +2417,7 @@ init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2414 uint8_t cond = bios->data[offset + 1]; 2417 uint8_t cond = bios->data[offset + 1];
2415 2418
2416 if (!iexec->execute) 2419 if (!iexec->execute)
2417 return true; 2420 return 2;
2418 2421
2419 BIOSLOG(bios, "0x%04X: IO condition: 0x%02X\n", offset, cond); 2422 BIOSLOG(bios, "0x%04X: IO condition: 0x%02X\n", offset, cond);
2420 2423
@@ -2425,10 +2428,10 @@ init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2425 iexec->execute = false; 2428 iexec->execute = false;
2426 } 2429 }
2427 2430
2428 return true; 2431 return 2;
2429} 2432}
2430 2433
2431static bool 2434static int
2432init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2435init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2433{ 2436{
2434 /* 2437 /*
@@ -2451,7 +2454,7 @@ init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2451 uint8_t value; 2454 uint8_t value;
2452 2455
2453 if (!iexec->execute) 2456 if (!iexec->execute)
2454 return true; 2457 return 6;
2455 2458
2456 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, " 2459 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
2457 "Data: 0x%02X\n", 2460 "Data: 0x%02X\n",
@@ -2460,10 +2463,10 @@ init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2460 value = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) | data; 2463 value = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) | data;
2461 bios_idxprt_wr(bios, crtcport, crtcindex, value); 2464 bios_idxprt_wr(bios, crtcport, crtcindex, value);
2462 2465
2463 return true; 2466 return 6;
2464} 2467}
2465 2468
2466static bool 2469static int
2467init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2470init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2468{ 2471{
2469 /* 2472 /*
@@ -2481,16 +2484,16 @@ init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2481 uint16_t freq = ROM16(bios->data[offset + 5]); 2484 uint16_t freq = ROM16(bios->data[offset + 5]);
2482 2485
2483 if (!iexec->execute) 2486 if (!iexec->execute)
2484 return true; 2487 return 7;
2485 2488
2486 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Freq: %d0kHz\n", offset, reg, freq); 2489 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Freq: %d0kHz\n", offset, reg, freq);
2487 2490
2488 setPLL(bios, reg, freq * 10); 2491 setPLL(bios, reg, freq * 10);
2489 2492
2490 return true; 2493 return 7;
2491} 2494}
2492 2495
2493static bool 2496static int
2494init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2497init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2495{ 2498{
2496 /* 2499 /*
@@ -2507,17 +2510,17 @@ init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2507 uint32_t value = ROM32(bios->data[offset + 5]); 2510 uint32_t value = ROM32(bios->data[offset + 5]);
2508 2511
2509 if (!iexec->execute) 2512 if (!iexec->execute)
2510 return true; 2513 return 9;
2511 2514
2512 if (reg == 0x000200) 2515 if (reg == 0x000200)
2513 value |= 1; 2516 value |= 1;
2514 2517
2515 bios_wr32(bios, reg, value); 2518 bios_wr32(bios, reg, value);
2516 2519
2517 return true; 2520 return 9;
2518} 2521}
2519 2522
2520static bool 2523static int
2521init_ram_restrict_pll(struct nvbios *bios, uint16_t offset, 2524init_ram_restrict_pll(struct nvbios *bios, uint16_t offset,
2522 struct init_exec *iexec) 2525 struct init_exec *iexec)
2523{ 2526{
@@ -2543,14 +2546,15 @@ init_ram_restrict_pll(struct nvbios *bios, uint16_t offset,
2543 uint8_t type = bios->data[offset + 1]; 2546 uint8_t type = bios->data[offset + 1];
2544 uint32_t freq = ROM32(bios->data[offset + 2 + (index * 4)]); 2547 uint32_t freq = ROM32(bios->data[offset + 2 + (index * 4)]);
2545 uint8_t *pll_limits = &bios->data[bios->pll_limit_tbl_ptr], *entry; 2548 uint8_t *pll_limits = &bios->data[bios->pll_limit_tbl_ptr], *entry;
2549 int len = 2 + bios->ram_restrict_group_count * 4;
2546 int i; 2550 int i;
2547 2551
2548 if (!iexec->execute) 2552 if (!iexec->execute)
2549 return true; 2553 return len;
2550 2554
2551 if (!bios->pll_limit_tbl_ptr || (pll_limits[0] & 0xf0) != 0x30) { 2555 if (!bios->pll_limit_tbl_ptr || (pll_limits[0] & 0xf0) != 0x30) {
2552 NV_ERROR(dev, "PLL limits table not version 3.x\n"); 2556 NV_ERROR(dev, "PLL limits table not version 3.x\n");
2553 return true; /* deliberate, allow default clocks to remain */ 2557 return len; /* deliberate, allow default clocks to remain */
2554 } 2558 }
2555 2559
2556 entry = pll_limits + pll_limits[1]; 2560 entry = pll_limits + pll_limits[1];
@@ -2563,15 +2567,15 @@ init_ram_restrict_pll(struct nvbios *bios, uint16_t offset,
2563 offset, type, reg, freq); 2567 offset, type, reg, freq);
2564 2568
2565 setPLL(bios, reg, freq); 2569 setPLL(bios, reg, freq);
2566 return true; 2570 return len;
2567 } 2571 }
2568 } 2572 }
2569 2573
2570 NV_ERROR(dev, "PLL type 0x%02x not found in PLL limits table", type); 2574 NV_ERROR(dev, "PLL type 0x%02x not found in PLL limits table", type);
2571 return true; 2575 return len;
2572} 2576}
2573 2577
2574static bool 2578static int
2575init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2579init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2576{ 2580{
2577 /* 2581 /*
@@ -2581,10 +2585,10 @@ init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2581 * 2585 *
2582 */ 2586 */
2583 2587
2584 return true; 2588 return 1;
2585} 2589}
2586 2590
2587static bool 2591static int
2588init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2592init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2589{ 2593{
2590 /* 2594 /*
@@ -2594,10 +2598,10 @@ init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2594 * 2598 *
2595 */ 2599 */
2596 2600
2597 return true; 2601 return 1;
2598} 2602}
2599 2603
2600static bool 2604static int
2601init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2605init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2602{ 2606{
2603 /* 2607 /*
@@ -2615,14 +2619,17 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2615 const uint8_t *gpio_entry; 2619 const uint8_t *gpio_entry;
2616 int i; 2620 int i;
2617 2621
2622 if (!iexec->execute)
2623 return 1;
2624
2618 if (bios->bdcb.version != 0x40) { 2625 if (bios->bdcb.version != 0x40) {
2619 NV_ERROR(bios->dev, "DCB table not version 4.0\n"); 2626 NV_ERROR(bios->dev, "DCB table not version 4.0\n");
2620 return false; 2627 return 0;
2621 } 2628 }
2622 2629
2623 if (!bios->bdcb.gpio_table_ptr) { 2630 if (!bios->bdcb.gpio_table_ptr) {
2624 NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n"); 2631 NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n");
2625 return false; 2632 return 0;
2626 } 2633 }
2627 2634
2628 gpio_entry = gpio_table + gpio_table[1]; 2635 gpio_entry = gpio_table + gpio_table[1];
@@ -2660,13 +2667,10 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2660 bios_wr32(bios, r, v); 2667 bios_wr32(bios, r, v);
2661 } 2668 }
2662 2669
2663 return true; 2670 return 1;
2664} 2671}
2665 2672
2666/* hack to avoid moving the itbl_entry array before this function */ 2673static int
2667int init_ram_restrict_zm_reg_group_blocklen;
2668
2669static bool
2670init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset, 2674init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
2671 struct init_exec *iexec) 2675 struct init_exec *iexec)
2672{ 2676{
@@ -2692,21 +2696,21 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
2692 uint8_t regincrement = bios->data[offset + 5]; 2696 uint8_t regincrement = bios->data[offset + 5];
2693 uint8_t count = bios->data[offset + 6]; 2697 uint8_t count = bios->data[offset + 6];
2694 uint32_t strap_ramcfg, data; 2698 uint32_t strap_ramcfg, data;
2695 uint16_t blocklen; 2699 /* previously set by 'M' BIT table */
2700 uint16_t blocklen = bios->ram_restrict_group_count * 4;
2701 int len = 7 + count * blocklen;
2696 uint8_t index; 2702 uint8_t index;
2697 int i; 2703 int i;
2698 2704
2699 /* previously set by 'M' BIT table */
2700 blocklen = init_ram_restrict_zm_reg_group_blocklen;
2701 2705
2702 if (!iexec->execute) 2706 if (!iexec->execute)
2703 return true; 2707 return len;
2704 2708
2705 if (!blocklen) { 2709 if (!blocklen) {
2706 NV_ERROR(bios->dev, 2710 NV_ERROR(bios->dev,
2707 "0x%04X: Zero block length - has the M table " 2711 "0x%04X: Zero block length - has the M table "
2708 "been parsed?\n", offset); 2712 "been parsed?\n", offset);
2709 return false; 2713 return 0;
2710 } 2714 }
2711 2715
2712 strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf; 2716 strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
@@ -2724,10 +2728,10 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
2724 reg += regincrement; 2728 reg += regincrement;
2725 } 2729 }
2726 2730
2727 return true; 2731 return len;
2728} 2732}
2729 2733
2730static bool 2734static int
2731init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2735init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2732{ 2736{
2733 /* 2737 /*
@@ -2744,14 +2748,14 @@ init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2744 uint32_t dstreg = ROM32(bios->data[offset + 5]); 2748 uint32_t dstreg = ROM32(bios->data[offset + 5]);
2745 2749
2746 if (!iexec->execute) 2750 if (!iexec->execute)
2747 return true; 2751 return 9;
2748 2752
2749 bios_wr32(bios, dstreg, bios_rd32(bios, srcreg)); 2753 bios_wr32(bios, dstreg, bios_rd32(bios, srcreg));
2750 2754
2751 return true; 2755 return 9;
2752} 2756}
2753 2757
2754static bool 2758static int
2755init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset, 2759init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset,
2756 struct init_exec *iexec) 2760 struct init_exec *iexec)
2757{ 2761{
@@ -2769,20 +2773,21 @@ init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset,
2769 2773
2770 uint32_t reg = ROM32(bios->data[offset + 1]); 2774 uint32_t reg = ROM32(bios->data[offset + 1]);
2771 uint8_t count = bios->data[offset + 5]; 2775 uint8_t count = bios->data[offset + 5];
2776 int len = 6 + count * 4;
2772 int i; 2777 int i;
2773 2778
2774 if (!iexec->execute) 2779 if (!iexec->execute)
2775 return true; 2780 return len;
2776 2781
2777 for (i = 0; i < count; i++) { 2782 for (i = 0; i < count; i++) {
2778 uint32_t data = ROM32(bios->data[offset + 6 + 4 * i]); 2783 uint32_t data = ROM32(bios->data[offset + 6 + 4 * i]);
2779 bios_wr32(bios, reg, data); 2784 bios_wr32(bios, reg, data);
2780 } 2785 }
2781 2786
2782 return true; 2787 return len;
2783} 2788}
2784 2789
2785static bool 2790static int
2786init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2791init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2787{ 2792{
2788 /* 2793 /*
@@ -2793,10 +2798,10 @@ init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2793 * Seemingly does nothing 2798 * Seemingly does nothing
2794 */ 2799 */
2795 2800
2796 return true; 2801 return 1;
2797} 2802}
2798 2803
2799static bool 2804static int
2800init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2805init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2801{ 2806{
2802 /* 2807 /*
@@ -2829,13 +2834,13 @@ init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2829 val <<= bios->data[offset + 16]; 2834 val <<= bios->data[offset + 16];
2830 2835
2831 if (!iexec->execute) 2836 if (!iexec->execute)
2832 return true; 2837 return 17;
2833 2838
2834 bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | val); 2839 bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | val);
2835 return true; 2840 return 17;
2836} 2841}
2837 2842
2838static bool 2843static int
2839init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2844init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2840{ 2845{
2841 /* 2846 /*
@@ -2859,13 +2864,13 @@ init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2859 val = (val & mask) | ((val + add) & ~mask); 2864 val = (val & mask) | ((val + add) & ~mask);
2860 2865
2861 if (!iexec->execute) 2866 if (!iexec->execute)
2862 return true; 2867 return 13;
2863 2868
2864 bios_wr32(bios, reg, val); 2869 bios_wr32(bios, reg, val);
2865 return true; 2870 return 13;
2866} 2871}
2867 2872
2868static bool 2873static int
2869init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2874init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2870{ 2875{
2871 /* 2876 /*
@@ -2883,32 +2888,33 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2883 struct drm_device *dev = bios->dev; 2888 struct drm_device *dev = bios->dev;
2884 struct nouveau_i2c_chan *auxch; 2889 struct nouveau_i2c_chan *auxch;
2885 uint32_t addr = ROM32(bios->data[offset + 1]); 2890 uint32_t addr = ROM32(bios->data[offset + 1]);
2886 uint8_t len = bios->data[offset + 5]; 2891 uint8_t count = bios->data[offset + 5];
2892 int len = 6 + count * 2;
2887 int ret, i; 2893 int ret, i;
2888 2894
2889 if (!bios->display.output) { 2895 if (!bios->display.output) {
2890 NV_ERROR(dev, "INIT_AUXCH: no active output\n"); 2896 NV_ERROR(dev, "INIT_AUXCH: no active output\n");
2891 return false; 2897 return 0;
2892 } 2898 }
2893 2899
2894 auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); 2900 auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
2895 if (!auxch) { 2901 if (!auxch) {
2896 NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n", 2902 NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
2897 bios->display.output->i2c_index); 2903 bios->display.output->i2c_index);
2898 return false; 2904 return 0;
2899 } 2905 }
2900 2906
2901 if (!iexec->execute) 2907 if (!iexec->execute)
2902 return true; 2908 return len;
2903 2909
2904 offset += 6; 2910 offset += 6;
2905 for (i = 0; i < len; i++, offset += 2) { 2911 for (i = 0; i < count; i++, offset += 2) {
2906 uint8_t data; 2912 uint8_t data;
2907 2913
2908 ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1); 2914 ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
2909 if (ret) { 2915 if (ret) {
2910 NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret); 2916 NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
2911 return false; 2917 return 0;
2912 } 2918 }
2913 2919
2914 data &= bios->data[offset + 0]; 2920 data &= bios->data[offset + 0];
@@ -2917,14 +2923,14 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2917 ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1); 2923 ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
2918 if (ret) { 2924 if (ret) {
2919 NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret); 2925 NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
2920 return false; 2926 return 0;
2921 } 2927 }
2922 } 2928 }
2923 2929
2924 return true; 2930 return len;
2925} 2931}
2926 2932
2927static bool 2933static int
2928init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) 2934init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2929{ 2935{
2930 /* 2936 /*
@@ -2941,106 +2947,99 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2941 struct drm_device *dev = bios->dev; 2947 struct drm_device *dev = bios->dev;
2942 struct nouveau_i2c_chan *auxch; 2948 struct nouveau_i2c_chan *auxch;
2943 uint32_t addr = ROM32(bios->data[offset + 1]); 2949 uint32_t addr = ROM32(bios->data[offset + 1]);
2944 uint8_t len = bios->data[offset + 5]; 2950 uint8_t count = bios->data[offset + 5];
2951 int len = 6 + count;
2945 int ret, i; 2952 int ret, i;
2946 2953
2947 if (!bios->display.output) { 2954 if (!bios->display.output) {
2948 NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n"); 2955 NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
2949 return false; 2956 return 0;
2950 } 2957 }
2951 2958
2952 auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); 2959 auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
2953 if (!auxch) { 2960 if (!auxch) {
2954 NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n", 2961 NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
2955 bios->display.output->i2c_index); 2962 bios->display.output->i2c_index);
2956 return false; 2963 return 0;
2957 } 2964 }
2958 2965
2959 if (!iexec->execute) 2966 if (!iexec->execute)
2960 return true; 2967 return len;
2961 2968
2962 offset += 6; 2969 offset += 6;
2963 for (i = 0; i < len; i++, offset++) { 2970 for (i = 0; i < count; i++, offset++) {
2964 ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1); 2971 ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
2965 if (ret) { 2972 if (ret) {
2966 NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret); 2973 NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
2967 return false; 2974 return 0;
2968 } 2975 }
2969 } 2976 }
2970 2977
2971 return true; 2978 return len;
2972} 2979}
2973 2980
2974static struct init_tbl_entry itbl_entry[] = { 2981static struct init_tbl_entry itbl_entry[] = {
2975 /* command name , id , length , offset , mult , command handler */ 2982 /* command name , id , length , offset , mult , command handler */
2976 /* INIT_PROG (0x31, 15, 10, 4) removed due to no example of use */ 2983 /* INIT_PROG (0x31, 15, 10, 4) removed due to no example of use */
2977 { "INIT_IO_RESTRICT_PROG" , 0x32, 11 , 6 , 4 , init_io_restrict_prog }, 2984 { "INIT_IO_RESTRICT_PROG" , 0x32, init_io_restrict_prog },
2978 { "INIT_REPEAT" , 0x33, 2 , 0 , 0 , init_repeat }, 2985 { "INIT_REPEAT" , 0x33, init_repeat },
2979 { "INIT_IO_RESTRICT_PLL" , 0x34, 12 , 7 , 2 , init_io_restrict_pll }, 2986 { "INIT_IO_RESTRICT_PLL" , 0x34, init_io_restrict_pll },
2980 { "INIT_END_REPEAT" , 0x36, 1 , 0 , 0 , init_end_repeat }, 2987 { "INIT_END_REPEAT" , 0x36, init_end_repeat },
2981 { "INIT_COPY" , 0x37, 11 , 0 , 0 , init_copy }, 2988 { "INIT_COPY" , 0x37, init_copy },
2982 { "INIT_NOT" , 0x38, 1 , 0 , 0 , init_not }, 2989 { "INIT_NOT" , 0x38, init_not },
2983 { "INIT_IO_FLAG_CONDITION" , 0x39, 2 , 0 , 0 , init_io_flag_condition }, 2990 { "INIT_IO_FLAG_CONDITION" , 0x39, init_io_flag_condition },
2984 { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, 18 , 17 , 2 , init_idx_addr_latched }, 2991 { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, init_idx_addr_latched },
2985 { "INIT_IO_RESTRICT_PLL2" , 0x4A, 11 , 6 , 4 , init_io_restrict_pll2 }, 2992 { "INIT_IO_RESTRICT_PLL2" , 0x4A, init_io_restrict_pll2 },
2986 { "INIT_PLL2" , 0x4B, 9 , 0 , 0 , init_pll2 }, 2993 { "INIT_PLL2" , 0x4B, init_pll2 },
2987 { "INIT_I2C_BYTE" , 0x4C, 4 , 3 , 3 , init_i2c_byte }, 2994 { "INIT_I2C_BYTE" , 0x4C, init_i2c_byte },
2988 { "INIT_ZM_I2C_BYTE" , 0x4D, 4 , 3 , 2 , init_zm_i2c_byte }, 2995 { "INIT_ZM_I2C_BYTE" , 0x4D, init_zm_i2c_byte },
2989 { "INIT_ZM_I2C" , 0x4E, 4 , 3 , 1 , init_zm_i2c }, 2996 { "INIT_ZM_I2C" , 0x4E, init_zm_i2c },
2990 { "INIT_TMDS" , 0x4F, 5 , 0 , 0 , init_tmds }, 2997 { "INIT_TMDS" , 0x4F, init_tmds },
2991 { "INIT_ZM_TMDS_GROUP" , 0x50, 3 , 2 , 2 , init_zm_tmds_group }, 2998 { "INIT_ZM_TMDS_GROUP" , 0x50, init_zm_tmds_group },
2992 { "INIT_CR_INDEX_ADDRESS_LATCHED" , 0x51, 5 , 4 , 1 , init_cr_idx_adr_latch }, 2999 { "INIT_CR_INDEX_ADDRESS_LATCHED" , 0x51, init_cr_idx_adr_latch },
2993 { "INIT_CR" , 0x52, 4 , 0 , 0 , init_cr }, 3000 { "INIT_CR" , 0x52, init_cr },
2994 { "INIT_ZM_CR" , 0x53, 3 , 0 , 0 , init_zm_cr }, 3001 { "INIT_ZM_CR" , 0x53, init_zm_cr },
2995 { "INIT_ZM_CR_GROUP" , 0x54, 2 , 1 , 2 , init_zm_cr_group }, 3002 { "INIT_ZM_CR_GROUP" , 0x54, init_zm_cr_group },
2996 { "INIT_CONDITION_TIME" , 0x56, 3 , 0 , 0 , init_condition_time }, 3003 { "INIT_CONDITION_TIME" , 0x56, init_condition_time },
2997 { "INIT_ZM_REG_SEQUENCE" , 0x58, 6 , 5 , 4 , init_zm_reg_sequence }, 3004 { "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence },
2998 /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */ 3005 /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
2999 { "INIT_SUB_DIRECT" , 0x5B, 3 , 0 , 0 , init_sub_direct }, 3006 { "INIT_SUB_DIRECT" , 0x5B, init_sub_direct },
3000 { "INIT_COPY_NV_REG" , 0x5F, 22 , 0 , 0 , init_copy_nv_reg }, 3007 { "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg },
3001 { "INIT_ZM_INDEX_IO" , 0x62, 5 , 0 , 0 , init_zm_index_io }, 3008 { "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io },
3002 { "INIT_COMPUTE_MEM" , 0x63, 1 , 0 , 0 , init_compute_mem }, 3009 { "INIT_COMPUTE_MEM" , 0x63, init_compute_mem },
3003 { "INIT_RESET" , 0x65, 13 , 0 , 0 , init_reset }, 3010 { "INIT_RESET" , 0x65, init_reset },
3004 { "INIT_CONFIGURE_MEM" , 0x66, 1 , 0 , 0 , init_configure_mem }, 3011 { "INIT_CONFIGURE_MEM" , 0x66, init_configure_mem },
3005 { "INIT_CONFIGURE_CLK" , 0x67, 1 , 0 , 0 , init_configure_clk }, 3012 { "INIT_CONFIGURE_CLK" , 0x67, init_configure_clk },
3006 { "INIT_CONFIGURE_PREINIT" , 0x68, 1 , 0 , 0 , init_configure_preinit }, 3013 { "INIT_CONFIGURE_PREINIT" , 0x68, init_configure_preinit },
3007 { "INIT_IO" , 0x69, 5 , 0 , 0 , init_io }, 3014 { "INIT_IO" , 0x69, init_io },
3008 { "INIT_SUB" , 0x6B, 2 , 0 , 0 , init_sub }, 3015 { "INIT_SUB" , 0x6B, init_sub },
3009 { "INIT_RAM_CONDITION" , 0x6D, 3 , 0 , 0 , init_ram_condition }, 3016 { "INIT_RAM_CONDITION" , 0x6D, init_ram_condition },
3010 { "INIT_NV_REG" , 0x6E, 13 , 0 , 0 , init_nv_reg }, 3017 { "INIT_NV_REG" , 0x6E, init_nv_reg },
3011 { "INIT_MACRO" , 0x6F, 2 , 0 , 0 , init_macro }, 3018 { "INIT_MACRO" , 0x6F, init_macro },
3012 { "INIT_DONE" , 0x71, 1 , 0 , 0 , init_done }, 3019 { "INIT_DONE" , 0x71, init_done },
3013 { "INIT_RESUME" , 0x72, 1 , 0 , 0 , init_resume }, 3020 { "INIT_RESUME" , 0x72, init_resume },
3014 /* INIT_RAM_CONDITION2 (0x73, 9, 0, 0) removed due to no example of use */ 3021 /* INIT_RAM_CONDITION2 (0x73, 9, 0, 0) removed due to no example of use */
3015 { "INIT_TIME" , 0x74, 3 , 0 , 0 , init_time }, 3022 { "INIT_TIME" , 0x74, init_time },
3016 { "INIT_CONDITION" , 0x75, 2 , 0 , 0 , init_condition }, 3023 { "INIT_CONDITION" , 0x75, init_condition },
3017 { "INIT_IO_CONDITION" , 0x76, 2 , 0 , 0 , init_io_condition }, 3024 { "INIT_IO_CONDITION" , 0x76, init_io_condition },
3018 { "INIT_INDEX_IO" , 0x78, 6 , 0 , 0 , init_index_io }, 3025 { "INIT_INDEX_IO" , 0x78, init_index_io },
3019 { "INIT_PLL" , 0x79, 7 , 0 , 0 , init_pll }, 3026 { "INIT_PLL" , 0x79, init_pll },
3020 { "INIT_ZM_REG" , 0x7A, 9 , 0 , 0 , init_zm_reg }, 3027 { "INIT_ZM_REG" , 0x7A, init_zm_reg },
3021 /* INIT_RAM_RESTRICT_PLL's length is adjusted by the BIT M table */ 3028 { "INIT_RAM_RESTRICT_PLL" , 0x87, init_ram_restrict_pll },
3022 { "INIT_RAM_RESTRICT_PLL" , 0x87, 2 , 0 , 0 , init_ram_restrict_pll }, 3029 { "INIT_8C" , 0x8C, init_8c },
3023 { "INIT_8C" , 0x8C, 1 , 0 , 0 , init_8c }, 3030 { "INIT_8D" , 0x8D, init_8d },
3024 { "INIT_8D" , 0x8D, 1 , 0 , 0 , init_8d }, 3031 { "INIT_GPIO" , 0x8E, init_gpio },
3025 { "INIT_GPIO" , 0x8E, 1 , 0 , 0 , init_gpio }, 3032 { "INIT_RAM_RESTRICT_ZM_REG_GROUP" , 0x8F, init_ram_restrict_zm_reg_group },
3026 /* INIT_RAM_RESTRICT_ZM_REG_GROUP's mult is loaded by M table in BIT */ 3033 { "INIT_COPY_ZM_REG" , 0x90, init_copy_zm_reg },
3027 { "INIT_RAM_RESTRICT_ZM_REG_GROUP" , 0x8F, 7 , 6 , 0 , init_ram_restrict_zm_reg_group }, 3034 { "INIT_ZM_REG_GROUP_ADDRESS_LATCHED" , 0x91, init_zm_reg_group_addr_latched },
3028 { "INIT_COPY_ZM_REG" , 0x90, 9 , 0 , 0 , init_copy_zm_reg }, 3035 { "INIT_RESERVED" , 0x92, init_reserved },
3029 { "INIT_ZM_REG_GROUP_ADDRESS_LATCHED" , 0x91, 6 , 5 , 4 , init_zm_reg_group_addr_latched }, 3036 { "INIT_96" , 0x96, init_96 },
3030 { "INIT_RESERVED" , 0x92, 1 , 0 , 0 , init_reserved }, 3037 { "INIT_97" , 0x97, init_97 },
3031 { "INIT_96" , 0x96, 17 , 0 , 0 , init_96 }, 3038 { "INIT_AUXCH" , 0x98, init_auxch },
3032 { "INIT_97" , 0x97, 13 , 0 , 0 , init_97 }, 3039 { "INIT_ZM_AUXCH" , 0x99, init_zm_auxch },
3033 { "INIT_AUXCH" , 0x98, 6 , 5 , 2 , init_auxch }, 3040 { NULL , 0 , NULL }
3034 { "INIT_ZM_AUXCH" , 0x99, 6 , 5 , 1 , init_zm_auxch },
3035 { NULL , 0 , 0 , 0 , 0 , NULL }
3036}; 3041};
3037 3042
3038static unsigned int get_init_table_entry_length(struct nvbios *bios, unsigned int offset, int i)
3039{
3040 /* Calculates the length of a given init table entry. */
3041 return itbl_entry[i].length + bios->data[offset + itbl_entry[i].length_offset]*itbl_entry[i].length_multiplier;
3042}
3043
3044#define MAX_TABLE_OPS 1000 3043#define MAX_TABLE_OPS 1000
3045 3044
3046static int 3045static int
@@ -3056,7 +3055,7 @@ parse_init_table(struct nvbios *bios, unsigned int offset,
3056 * is changed back to EXECUTE. 3055 * is changed back to EXECUTE.
3057 */ 3056 */
3058 3057
3059 int count = 0, i; 3058 int count = 0, i, res;
3060 uint8_t id; 3059 uint8_t id;
3061 3060
3062 /* 3061 /*
@@ -3076,22 +3075,21 @@ parse_init_table(struct nvbios *bios, unsigned int offset,
3076 offset, itbl_entry[i].id, itbl_entry[i].name); 3075 offset, itbl_entry[i].id, itbl_entry[i].name);
3077 3076
3078 /* execute eventual command handler */ 3077 /* execute eventual command handler */
3079 if (itbl_entry[i].handler) 3078 res = (*itbl_entry[i].handler)(bios, offset, iexec);
3080 if (!(*itbl_entry[i].handler)(bios, offset, iexec)) 3079 if (!res)
3081 break; 3080 break;
3081 /*
3082 * Add the offset of the current command including all data
3083 * of that command. The offset will then be pointing on the
3084 * next op code.
3085 */
3086 offset += res;
3082 } else { 3087 } else {
3083 NV_ERROR(bios->dev, 3088 NV_ERROR(bios->dev,
3084 "0x%04X: Init table command not found: " 3089 "0x%04X: Init table command not found: "
3085 "0x%02X\n", offset, id); 3090 "0x%02X\n", offset, id);
3086 return -ENOENT; 3091 return -ENOENT;
3087 } 3092 }
3088
3089 /*
3090 * Add the offset of the current command including all data
3091 * of that command. The offset will then be pointing on the
3092 * next op code.
3093 */
3094 offset += get_init_table_entry_length(bios, offset, i);
3095 } 3093 }
3096 3094
3097 if (offset >= bios->length) 3095 if (offset >= bios->length)
@@ -3854,7 +3852,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3854 * script tables is a pointer to the script to execute. 3852 * script tables is a pointer to the script to execute.
3855 */ 3853 */
3856 3854
3857 NV_DEBUG(dev, "Searching for output entry for %d %d %d\n", 3855 NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n",
3858 dcbent->type, dcbent->location, dcbent->or); 3856 dcbent->type, dcbent->location, dcbent->or);
3859 otable = bios_output_config_match(dev, dcbent, table[1] + 3857 otable = bios_output_config_match(dev, dcbent, table[1] +
3860 bios->display.script_table_ptr, 3858 bios->display.script_table_ptr,
@@ -3884,7 +3882,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3884 if (pxclk == 0) { 3882 if (pxclk == 0) {
3885 script = ROM16(otable[6]); 3883 script = ROM16(otable[6]);
3886 if (!script) { 3884 if (!script) {
3887 NV_DEBUG(dev, "output script 0 not found\n"); 3885 NV_DEBUG_KMS(dev, "output script 0 not found\n");
3888 return 1; 3886 return 1;
3889 } 3887 }
3890 3888
@@ -3894,7 +3892,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3894 if (pxclk == -1) { 3892 if (pxclk == -1) {
3895 script = ROM16(otable[8]); 3893 script = ROM16(otable[8]);
3896 if (!script) { 3894 if (!script) {
3897 NV_DEBUG(dev, "output script 1 not found\n"); 3895 NV_DEBUG_KMS(dev, "output script 1 not found\n");
3898 return 1; 3896 return 1;
3899 } 3897 }
3900 3898
@@ -3907,7 +3905,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3907 else 3905 else
3908 script = 0; 3906 script = 0;
3909 if (!script) { 3907 if (!script) {
3910 NV_DEBUG(dev, "output script 2 not found\n"); 3908 NV_DEBUG_KMS(dev, "output script 2 not found\n");
3911 return 1; 3909 return 1;
3912 } 3910 }
3913 3911
@@ -3931,7 +3929,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3931 if (script) 3929 if (script)
3932 script = clkcmptable(bios, script, -pxclk); 3930 script = clkcmptable(bios, script, -pxclk);
3933 if (!script) { 3931 if (!script) {
3934 NV_DEBUG(dev, "clock script 1 not found\n"); 3932 NV_DEBUG_KMS(dev, "clock script 1 not found\n");
3935 return 1; 3933 return 1;
3936 } 3934 }
3937 3935
@@ -4606,10 +4604,6 @@ parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios,
4606 * stuff that we don't use - their use currently unknown 4604 * stuff that we don't use - their use currently unknown
4607 */ 4605 */
4608 4606
4609 uint16_t rr_strap_xlat;
4610 uint8_t rr_group_count;
4611 int i;
4612
4613 /* 4607 /*
4614 * Older bios versions don't have a sufficiently long table for 4608 * Older bios versions don't have a sufficiently long table for
4615 * what we want 4609 * what we want
@@ -4618,24 +4612,13 @@ parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios,
4618 return 0; 4612 return 0;
4619 4613
4620 if (bitentry->id[1] < 2) { 4614 if (bitentry->id[1] < 2) {
4621 rr_group_count = bios->data[bitentry->offset + 2]; 4615 bios->ram_restrict_group_count = bios->data[bitentry->offset + 2];
4622 rr_strap_xlat = ROM16(bios->data[bitentry->offset + 3]); 4616 bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]);
4623 } else { 4617 } else {
4624 rr_group_count = bios->data[bitentry->offset + 0]; 4618 bios->ram_restrict_group_count = bios->data[bitentry->offset + 0];
4625 rr_strap_xlat = ROM16(bios->data[bitentry->offset + 1]); 4619 bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 1]);
4626 } 4620 }
4627 4621
4628 /* adjust length of INIT_87 */
4629 for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != 0x87); i++);
4630 itbl_entry[i].length += rr_group_count * 4;
4631
4632 /* set up multiplier for INIT_RAM_RESTRICT_ZM_REG_GROUP */
4633 for (; itbl_entry[i].name && (itbl_entry[i].id != 0x8f); i++);
4634 itbl_entry[i].length_multiplier = rr_group_count * 4;
4635
4636 init_ram_restrict_zm_reg_group_blocklen = itbl_entry[i].length_multiplier;
4637 bios->ram_restrict_tbl_ptr = rr_strap_xlat;
4638
4639 return 0; 4622 return 0;
4640} 4623}
4641 4624
@@ -5234,7 +5217,7 @@ parse_dcb_connector_table(struct nvbios *bios)
5234 int i; 5217 int i;
5235 5218
5236 if (!bios->bdcb.connector_table_ptr) { 5219 if (!bios->bdcb.connector_table_ptr) {
5237 NV_DEBUG(dev, "No DCB connector table present\n"); 5220 NV_DEBUG_KMS(dev, "No DCB connector table present\n");
5238 return; 5221 return;
5239 } 5222 }
5240 5223
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 1d5f10bd78ed..058e98c76d89 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -227,6 +227,7 @@ struct nvbios {
227 227
228 uint16_t pll_limit_tbl_ptr; 228 uint16_t pll_limit_tbl_ptr;
229 uint16_t ram_restrict_tbl_ptr; 229 uint16_t ram_restrict_tbl_ptr;
230 uint8_t ram_restrict_group_count;
230 231
231 uint16_t some_script_ptr; /* BIT I + 14 */ 232 uint16_t some_script_ptr; /* BIT I + 14 */
232 uint16_t init96_tbl_ptr; /* BIT I + 16 */ 233 uint16_t init96_tbl_ptr; /* BIT I + 16 */
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index aa2dfbc3e351..0cad6d834eb2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -154,6 +154,11 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype)
154 nvbo->placement.busy_placement = nvbo->placements; 154 nvbo->placement.busy_placement = nvbo->placements;
155 nvbo->placement.num_placement = n; 155 nvbo->placement.num_placement = n;
156 nvbo->placement.num_busy_placement = n; 156 nvbo->placement.num_busy_placement = n;
157
158 if (nvbo->pin_refcnt) {
159 while (n--)
160 nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT;
161 }
157} 162}
158 163
159int 164int
@@ -400,10 +405,16 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
400 struct nouveau_bo *nvbo = nouveau_bo(bo); 405 struct nouveau_bo *nvbo = nouveau_bo(bo);
401 406
402 switch (bo->mem.mem_type) { 407 switch (bo->mem.mem_type) {
408 case TTM_PL_VRAM:
409 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT |
410 TTM_PL_FLAG_SYSTEM);
411 break;
403 default: 412 default:
404 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM); 413 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
405 break; 414 break;
406 } 415 }
416
417 *pl = nvbo->placement;
407} 418}
408 419
409 420
@@ -455,11 +466,8 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait,
455 int ret; 466 int ret;
456 467
457 chan = nvbo->channel; 468 chan = nvbo->channel;
458 if (!chan || nvbo->tile_flags || nvbo->no_vm) { 469 if (!chan || nvbo->tile_flags || nvbo->no_vm)
459 chan = dev_priv->channel; 470 chan = dev_priv->channel;
460 if (!chan)
461 return -EINVAL;
462 }
463 471
464 src_offset = old_mem->mm_node->start << PAGE_SHIFT; 472 src_offset = old_mem->mm_node->start << PAGE_SHIFT;
465 dst_offset = new_mem->mm_node->start << PAGE_SHIFT; 473 dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
@@ -625,7 +633,8 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
625 return ret; 633 return ret;
626 } 634 }
627 635
628 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE) 636 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
637 !dev_priv->channel)
629 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 638 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
630 639
631 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { 640 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 032cf098fa1c..5a10deb8bdbd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -86,7 +86,7 @@ nouveau_connector_destroy(struct drm_connector *drm_connector)
86 struct nouveau_connector *connector = nouveau_connector(drm_connector); 86 struct nouveau_connector *connector = nouveau_connector(drm_connector);
87 struct drm_device *dev = connector->base.dev; 87 struct drm_device *dev = connector->base.dev;
88 88
89 NV_DEBUG(dev, "\n"); 89 NV_DEBUG_KMS(dev, "\n");
90 90
91 if (!connector) 91 if (!connector)
92 return; 92 return;
@@ -420,7 +420,7 @@ nouveau_connector_native_mode(struct nouveau_connector *connector)
420 /* Use preferred mode if there is one.. */ 420 /* Use preferred mode if there is one.. */
421 list_for_each_entry(mode, &connector->base.probed_modes, head) { 421 list_for_each_entry(mode, &connector->base.probed_modes, head) {
422 if (mode->type & DRM_MODE_TYPE_PREFERRED) { 422 if (mode->type & DRM_MODE_TYPE_PREFERRED) {
423 NV_DEBUG(dev, "native mode from preferred\n"); 423 NV_DEBUG_KMS(dev, "native mode from preferred\n");
424 return drm_mode_duplicate(dev, mode); 424 return drm_mode_duplicate(dev, mode);
425 } 425 }
426 } 426 }
@@ -445,7 +445,7 @@ nouveau_connector_native_mode(struct nouveau_connector *connector)
445 largest = mode; 445 largest = mode;
446 } 446 }
447 447
448 NV_DEBUG(dev, "native mode from largest: %dx%d@%d\n", 448 NV_DEBUG_KMS(dev, "native mode from largest: %dx%d@%d\n",
449 high_w, high_h, high_v); 449 high_w, high_h, high_v);
450 return largest ? drm_mode_duplicate(dev, largest) : NULL; 450 return largest ? drm_mode_duplicate(dev, largest) : NULL;
451} 451}
@@ -725,7 +725,7 @@ nouveau_connector_create(struct drm_device *dev, int index, int type)
725 struct drm_encoder *encoder; 725 struct drm_encoder *encoder;
726 int ret; 726 int ret;
727 727
728 NV_DEBUG(dev, "\n"); 728 NV_DEBUG_KMS(dev, "\n");
729 729
730 nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); 730 nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
731 if (!nv_connector) 731 if (!nv_connector)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index de61f4640e12..9e2926c48579 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -187,7 +187,7 @@ nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
187 if (ret) 187 if (ret)
188 return false; 188 return false;
189 189
190 NV_DEBUG(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]); 190 NV_DEBUG_KMS(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]);
191 191
192 /* Keep all lanes at the same level.. */ 192 /* Keep all lanes at the same level.. */
193 for (i = 0; i < nv_encoder->dp.link_nr; i++) { 193 for (i = 0; i < nv_encoder->dp.link_nr; i++) {
@@ -228,7 +228,7 @@ nouveau_dp_link_train_commit(struct drm_encoder *encoder, uint8_t *config)
228 int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1); 228 int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
229 int dpe_headerlen, ret, i; 229 int dpe_headerlen, ret, i;
230 230
231 NV_DEBUG(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n", 231 NV_DEBUG_KMS(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n",
232 config[0], config[1], config[2], config[3]); 232 config[0], config[1], config[2], config[3]);
233 233
234 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); 234 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
@@ -276,12 +276,12 @@ nouveau_dp_link_train(struct drm_encoder *encoder)
276 bool cr_done, cr_max_vs, eq_done; 276 bool cr_done, cr_max_vs, eq_done;
277 int ret = 0, i, tries, voltage; 277 int ret = 0, i, tries, voltage;
278 278
279 NV_DEBUG(dev, "link training!!\n"); 279 NV_DEBUG_KMS(dev, "link training!!\n");
280train: 280train:
281 cr_done = eq_done = false; 281 cr_done = eq_done = false;
282 282
283 /* set link configuration */ 283 /* set link configuration */
284 NV_DEBUG(dev, "\tbegin train: bw %d, lanes %d\n", 284 NV_DEBUG_KMS(dev, "\tbegin train: bw %d, lanes %d\n",
285 nv_encoder->dp.link_bw, nv_encoder->dp.link_nr); 285 nv_encoder->dp.link_bw, nv_encoder->dp.link_nr);
286 286
287 ret = nouveau_dp_link_bw_set(encoder, nv_encoder->dp.link_bw); 287 ret = nouveau_dp_link_bw_set(encoder, nv_encoder->dp.link_bw);
@@ -297,7 +297,7 @@ train:
297 return false; 297 return false;
298 298
299 /* clock recovery */ 299 /* clock recovery */
300 NV_DEBUG(dev, "\tbegin cr\n"); 300 NV_DEBUG_KMS(dev, "\tbegin cr\n");
301 ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_1); 301 ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_1);
302 if (ret) 302 if (ret)
303 goto stop; 303 goto stop;
@@ -314,7 +314,7 @@ train:
314 ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 2); 314 ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 2);
315 if (ret) 315 if (ret)
316 break; 316 break;
317 NV_DEBUG(dev, "\t\tstatus: 0x%02x 0x%02x\n", 317 NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n",
318 status[0], status[1]); 318 status[0], status[1]);
319 319
320 cr_done = true; 320 cr_done = true;
@@ -346,7 +346,7 @@ train:
346 goto stop; 346 goto stop;
347 347
348 /* channel equalisation */ 348 /* channel equalisation */
349 NV_DEBUG(dev, "\tbegin eq\n"); 349 NV_DEBUG_KMS(dev, "\tbegin eq\n");
350 ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_2); 350 ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_2);
351 if (ret) 351 if (ret)
352 goto stop; 352 goto stop;
@@ -357,7 +357,7 @@ train:
357 ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 3); 357 ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 3);
358 if (ret) 358 if (ret)
359 break; 359 break;
360 NV_DEBUG(dev, "\t\tstatus: 0x%02x 0x%02x\n", 360 NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n",
361 status[0], status[1]); 361 status[0], status[1]);
362 362
363 eq_done = true; 363 eq_done = true;
@@ -395,9 +395,9 @@ stop:
395 395
396 /* retry at a lower setting, if possible */ 396 /* retry at a lower setting, if possible */
397 if (!ret && !(eq_done && cr_done)) { 397 if (!ret && !(eq_done && cr_done)) {
398 NV_DEBUG(dev, "\twe failed\n"); 398 NV_DEBUG_KMS(dev, "\twe failed\n");
399 if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62) { 399 if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62) {
400 NV_DEBUG(dev, "retry link training at low rate\n"); 400 NV_DEBUG_KMS(dev, "retry link training at low rate\n");
401 nv_encoder->dp.link_bw = DP_LINK_BW_1_62; 401 nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
402 goto train; 402 goto train;
403 } 403 }
@@ -418,7 +418,7 @@ nouveau_dp_detect(struct drm_encoder *encoder)
418 if (ret) 418 if (ret)
419 return false; 419 return false;
420 420
421 NV_DEBUG(dev, "encoder: link_bw %d, link_nr %d\n" 421 NV_DEBUG_KMS(dev, "encoder: link_bw %d, link_nr %d\n"
422 "display: link_bw %d, link_nr %d version 0x%02x\n", 422 "display: link_bw %d, link_nr %d version 0x%02x\n",
423 nv_encoder->dcb->dpconf.link_bw, 423 nv_encoder->dcb->dpconf.link_bw,
424 nv_encoder->dcb->dpconf.link_nr, 424 nv_encoder->dcb->dpconf.link_nr,
@@ -446,7 +446,7 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
446 uint32_t tmp, ctrl, stat = 0, data32[4] = {}; 446 uint32_t tmp, ctrl, stat = 0, data32[4] = {};
447 int ret = 0, i, index = auxch->rd; 447 int ret = 0, i, index = auxch->rd;
448 448
449 NV_DEBUG(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr); 449 NV_DEBUG_KMS(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr);
450 450
451 tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd)); 451 tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
452 nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp | 0x00100000); 452 nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp | 0x00100000);
@@ -472,7 +472,7 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
472 if (!(cmd & 1)) { 472 if (!(cmd & 1)) {
473 memcpy(data32, data, data_nr); 473 memcpy(data32, data, data_nr);
474 for (i = 0; i < 4; i++) { 474 for (i = 0; i < 4; i++) {
475 NV_DEBUG(dev, "wr %d: 0x%08x\n", i, data32[i]); 475 NV_DEBUG_KMS(dev, "wr %d: 0x%08x\n", i, data32[i]);
476 nv_wr32(dev, NV50_AUXCH_DATA_OUT(index, i), data32[i]); 476 nv_wr32(dev, NV50_AUXCH_DATA_OUT(index, i), data32[i]);
477 } 477 }
478 } 478 }
@@ -504,7 +504,7 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
504 if (cmd & 1) { 504 if (cmd & 1) {
505 for (i = 0; i < 4; i++) { 505 for (i = 0; i < 4; i++) {
506 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); 506 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
507 NV_DEBUG(dev, "rd %d: 0x%08x\n", i, data32[i]); 507 NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
508 } 508 }
509 memcpy(data, data32, data_nr); 509 memcpy(data, data32, data_nr);
510 } 510 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 35249c35118f..06eb993e0883 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -35,6 +35,10 @@
35 35
36#include "drm_pciids.h" 36#include "drm_pciids.h"
37 37
38MODULE_PARM_DESC(ctxfw, "Use external firmware blob for grctx init (NV40)");
39int nouveau_ctxfw = 0;
40module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
41
38MODULE_PARM_DESC(noagp, "Disable AGP"); 42MODULE_PARM_DESC(noagp, "Disable AGP");
39int nouveau_noagp; 43int nouveau_noagp;
40module_param_named(noagp, nouveau_noagp, int, 0400); 44module_param_named(noagp, nouveau_noagp, int, 0400);
@@ -273,7 +277,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
273 277
274 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 278 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
275 chan = dev_priv->fifos[i]; 279 chan = dev_priv->fifos[i];
276 if (!chan) 280 if (!chan || !chan->pushbuf_bo)
277 continue; 281 continue;
278 282
279 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) 283 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
@@ -341,7 +345,7 @@ static struct drm_driver driver = {
341 .owner = THIS_MODULE, 345 .owner = THIS_MODULE,
342 .open = drm_open, 346 .open = drm_open,
343 .release = drm_release, 347 .release = drm_release,
344 .ioctl = drm_ioctl, 348 .unlocked_ioctl = drm_ioctl,
345 .mmap = nouveau_ttm_mmap, 349 .mmap = nouveau_ttm_mmap,
346 .poll = drm_poll, 350 .poll = drm_poll,
347 .fasync = drm_fasync, 351 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 88b4c7b77e7f..5f8cbb79c499 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -54,6 +54,7 @@ struct nouveau_fpriv {
54#include "nouveau_drm.h" 54#include "nouveau_drm.h"
55#include "nouveau_reg.h" 55#include "nouveau_reg.h"
56#include "nouveau_bios.h" 56#include "nouveau_bios.h"
57struct nouveau_grctx;
57 58
58#define MAX_NUM_DCB_ENTRIES 16 59#define MAX_NUM_DCB_ENTRIES 16
59 60
@@ -317,6 +318,7 @@ struct nouveau_pgraph_engine {
317 bool accel_blocked; 318 bool accel_blocked;
318 void *ctxprog; 319 void *ctxprog;
319 void *ctxvals; 320 void *ctxvals;
321 int grctx_size;
320 322
321 int (*init)(struct drm_device *); 323 int (*init)(struct drm_device *);
322 void (*takedown)(struct drm_device *); 324 void (*takedown)(struct drm_device *);
@@ -647,6 +649,7 @@ extern int nouveau_fbpercrtc;
647extern char *nouveau_tv_norm; 649extern char *nouveau_tv_norm;
648extern int nouveau_reg_debug; 650extern int nouveau_reg_debug;
649extern char *nouveau_vbios; 651extern char *nouveau_vbios;
652extern int nouveau_ctxfw;
650 653
651/* nouveau_state.c */ 654/* nouveau_state.c */
652extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); 655extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
@@ -959,9 +962,7 @@ extern int nv40_graph_create_context(struct nouveau_channel *);
959extern void nv40_graph_destroy_context(struct nouveau_channel *); 962extern void nv40_graph_destroy_context(struct nouveau_channel *);
960extern int nv40_graph_load_context(struct nouveau_channel *); 963extern int nv40_graph_load_context(struct nouveau_channel *);
961extern int nv40_graph_unload_context(struct drm_device *); 964extern int nv40_graph_unload_context(struct drm_device *);
962extern int nv40_grctx_init(struct drm_device *); 965extern void nv40_grctx_init(struct nouveau_grctx *);
963extern void nv40_grctx_fini(struct drm_device *);
964extern void nv40_grctx_vals_load(struct drm_device *, struct nouveau_gpuobj *);
965 966
966/* nv50_graph.c */ 967/* nv50_graph.c */
967extern struct nouveau_pgraph_object_class nv50_graph_grclass[]; 968extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
@@ -975,6 +976,12 @@ extern int nv50_graph_load_context(struct nouveau_channel *);
975extern int nv50_graph_unload_context(struct drm_device *); 976extern int nv50_graph_unload_context(struct drm_device *);
976extern void nv50_graph_context_switch(struct drm_device *); 977extern void nv50_graph_context_switch(struct drm_device *);
977 978
979/* nouveau_grctx.c */
980extern int nouveau_grctx_prog_load(struct drm_device *);
981extern void nouveau_grctx_vals_load(struct drm_device *,
982 struct nouveau_gpuobj *);
983extern void nouveau_grctx_fini(struct drm_device *);
984
978/* nv04_instmem.c */ 985/* nv04_instmem.c */
979extern int nv04_instmem_init(struct drm_device *); 986extern int nv04_instmem_init(struct drm_device *);
980extern void nv04_instmem_takedown(struct drm_device *); 987extern void nv04_instmem_takedown(struct drm_device *);
@@ -1207,14 +1214,24 @@ static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj,
1207 pci_name(d->pdev), ##arg) 1214 pci_name(d->pdev), ##arg)
1208#ifndef NV_DEBUG_NOTRACE 1215#ifndef NV_DEBUG_NOTRACE
1209#define NV_DEBUG(d, fmt, arg...) do { \ 1216#define NV_DEBUG(d, fmt, arg...) do { \
1210 if (drm_debug) { \ 1217 if (drm_debug & DRM_UT_DRIVER) { \
1218 NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \
1219 __LINE__, ##arg); \
1220 } \
1221} while (0)
1222#define NV_DEBUG_KMS(d, fmt, arg...) do { \
1223 if (drm_debug & DRM_UT_KMS) { \
1211 NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \ 1224 NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \
1212 __LINE__, ##arg); \ 1225 __LINE__, ##arg); \
1213 } \ 1226 } \
1214} while (0) 1227} while (0)
1215#else 1228#else
1216#define NV_DEBUG(d, fmt, arg...) do { \ 1229#define NV_DEBUG(d, fmt, arg...) do { \
1217 if (drm_debug) \ 1230 if (drm_debug & DRM_UT_DRIVER) \
1231 NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \
1232} while (0)
1233#define NV_DEBUG_KMS(d, fmt, arg...) do { \
1234 if (drm_debug & DRM_UT_KMS) \
1218 NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \ 1235 NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \
1219} while (0) 1236} while (0)
1220#endif 1237#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 36e8c5e4503a..84af25c238b6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -58,7 +58,7 @@ nouveau_fbcon_sync(struct fb_info *info)
58 struct nouveau_channel *chan = dev_priv->channel; 58 struct nouveau_channel *chan = dev_priv->channel;
59 int ret, i; 59 int ret, i;
60 60
61 if (!chan->accel_done || 61 if (!chan || !chan->accel_done ||
62 info->state != FBINFO_STATE_RUNNING || 62 info->state != FBINFO_STATE_RUNNING ||
63 info->flags & FBINFO_HWACCEL_DISABLED) 63 info->flags & FBINFO_HWACCEL_DISABLED)
64 return 0; 64 return 0;
@@ -318,14 +318,16 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
318 par->nouveau_fb = nouveau_fb; 318 par->nouveau_fb = nouveau_fb;
319 par->dev = dev; 319 par->dev = dev;
320 320
321 switch (dev_priv->card_type) { 321 if (dev_priv->channel) {
322 case NV_50: 322 switch (dev_priv->card_type) {
323 nv50_fbcon_accel_init(info); 323 case NV_50:
324 break; 324 nv50_fbcon_accel_init(info);
325 default: 325 break;
326 nv04_fbcon_accel_init(info); 326 default:
327 break; 327 nv04_fbcon_accel_init(info);
328 }; 328 break;
329 };
330 }
329 331
330 nouveau_fbcon_zfill(dev); 332 nouveau_fbcon_zfill(dev);
331 333
@@ -347,7 +349,7 @@ out:
347int 349int
348nouveau_fbcon_probe(struct drm_device *dev) 350nouveau_fbcon_probe(struct drm_device *dev)
349{ 351{
350 NV_DEBUG(dev, "\n"); 352 NV_DEBUG_KMS(dev, "\n");
351 353
352 return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create); 354 return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create);
353} 355}
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
new file mode 100644
index 000000000000..419f4c2b3b89
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -0,0 +1,161 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/firmware.h>
26
27#include "drmP.h"
28#include "nouveau_drv.h"
29
30struct nouveau_ctxprog {
31 uint32_t signature;
32 uint8_t version;
33 uint16_t length;
34 uint32_t data[];
35} __attribute__ ((packed));
36
37struct nouveau_ctxvals {
38 uint32_t signature;
39 uint8_t version;
40 uint32_t length;
41 struct {
42 uint32_t offset;
43 uint32_t value;
44 } data[];
45} __attribute__ ((packed));
46
47int
48nouveau_grctx_prog_load(struct drm_device *dev)
49{
50 struct drm_nouveau_private *dev_priv = dev->dev_private;
51 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
52 const int chipset = dev_priv->chipset;
53 const struct firmware *fw;
54 const struct nouveau_ctxprog *cp;
55 const struct nouveau_ctxvals *cv;
56 char name[32];
57 int ret, i;
58
59 if (pgraph->accel_blocked)
60 return -ENODEV;
61
62 if (!pgraph->ctxprog) {
63 sprintf(name, "nouveau/nv%02x.ctxprog", chipset);
64 ret = request_firmware(&fw, name, &dev->pdev->dev);
65 if (ret) {
66 NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset);
67 return ret;
68 }
69
70 pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
71 if (!pgraph->ctxprog) {
72 NV_ERROR(dev, "OOM copying ctxprog\n");
73 release_firmware(fw);
74 return -ENOMEM;
75 }
76 memcpy(pgraph->ctxprog, fw->data, fw->size);
77
78 cp = pgraph->ctxprog;
79 if (le32_to_cpu(cp->signature) != 0x5043564e ||
80 cp->version != 0 ||
81 le16_to_cpu(cp->length) != ((fw->size - 7) / 4)) {
82 NV_ERROR(dev, "ctxprog invalid\n");
83 release_firmware(fw);
84 nouveau_grctx_fini(dev);
85 return -EINVAL;
86 }
87 release_firmware(fw);
88 }
89
90 if (!pgraph->ctxvals) {
91 sprintf(name, "nouveau/nv%02x.ctxvals", chipset);
92 ret = request_firmware(&fw, name, &dev->pdev->dev);
93 if (ret) {
94 NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset);
95 nouveau_grctx_fini(dev);
96 return ret;
97 }
98
99 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
100 if (!pgraph->ctxprog) {
101 NV_ERROR(dev, "OOM copying ctxprog\n");
102 release_firmware(fw);
103 nouveau_grctx_fini(dev);
104 return -ENOMEM;
105 }
106 memcpy(pgraph->ctxvals, fw->data, fw->size);
107
108 cv = (void *)pgraph->ctxvals;
109 if (le32_to_cpu(cv->signature) != 0x5643564e ||
110 cv->version != 0 ||
111 le32_to_cpu(cv->length) != ((fw->size - 9) / 8)) {
112 NV_ERROR(dev, "ctxvals invalid\n");
113 release_firmware(fw);
114 nouveau_grctx_fini(dev);
115 return -EINVAL;
116 }
117 release_firmware(fw);
118 }
119
120 cp = pgraph->ctxprog;
121
122 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
123 for (i = 0; i < le16_to_cpu(cp->length); i++)
124 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA,
125 le32_to_cpu(cp->data[i]));
126
127 return 0;
128}
129
130void
131nouveau_grctx_fini(struct drm_device *dev)
132{
133 struct drm_nouveau_private *dev_priv = dev->dev_private;
134 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
135
136 if (pgraph->ctxprog) {
137 kfree(pgraph->ctxprog);
138 pgraph->ctxprog = NULL;
139 }
140
141 if (pgraph->ctxvals) {
142 kfree(pgraph->ctxprog);
143 pgraph->ctxvals = NULL;
144 }
145}
146
147void
148nouveau_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
149{
150 struct drm_nouveau_private *dev_priv = dev->dev_private;
151 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
152 struct nouveau_ctxvals *cv = pgraph->ctxvals;
153 int i;
154
155 if (!cv)
156 return;
157
158 for (i = 0; i < le32_to_cpu(cv->length); i++)
159 nv_wo32(dev, ctx, le32_to_cpu(cv->data[i].offset),
160 le32_to_cpu(cv->data[i].value));
161}
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/nouveau_grctx.h
new file mode 100644
index 000000000000..5d39c4ce8006
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.h
@@ -0,0 +1,133 @@
1#ifndef __NOUVEAU_GRCTX_H__
2#define __NOUVEAU_GRCTX_H__
3
4struct nouveau_grctx {
5 struct drm_device *dev;
6
7 enum {
8 NOUVEAU_GRCTX_PROG,
9 NOUVEAU_GRCTX_VALS
10 } mode;
11 void *data;
12
13 uint32_t ctxprog_max;
14 uint32_t ctxprog_len;
15 uint32_t ctxprog_reg;
16 int ctxprog_label[32];
17 uint32_t ctxvals_pos;
18 uint32_t ctxvals_base;
19};
20
21#ifdef CP_CTX
22static inline void
23cp_out(struct nouveau_grctx *ctx, uint32_t inst)
24{
25 uint32_t *ctxprog = ctx->data;
26
27 if (ctx->mode != NOUVEAU_GRCTX_PROG)
28 return;
29
30 BUG_ON(ctx->ctxprog_len == ctx->ctxprog_max);
31 ctxprog[ctx->ctxprog_len++] = inst;
32}
33
34static inline void
35cp_lsr(struct nouveau_grctx *ctx, uint32_t val)
36{
37 cp_out(ctx, CP_LOAD_SR | val);
38}
39
40static inline void
41cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length)
42{
43 ctx->ctxprog_reg = (reg - 0x00400000) >> 2;
44
45 ctx->ctxvals_base = ctx->ctxvals_pos;
46 ctx->ctxvals_pos = ctx->ctxvals_base + length;
47
48 if (length > (CP_CTX_COUNT >> CP_CTX_COUNT_SHIFT)) {
49 cp_lsr(ctx, length);
50 length = 0;
51 }
52
53 cp_out(ctx, CP_CTX | (length << CP_CTX_COUNT_SHIFT) | ctx->ctxprog_reg);
54}
55
56static inline void
57cp_name(struct nouveau_grctx *ctx, int name)
58{
59 uint32_t *ctxprog = ctx->data;
60 int i;
61
62 if (ctx->mode != NOUVEAU_GRCTX_PROG)
63 return;
64
65 ctx->ctxprog_label[name] = ctx->ctxprog_len;
66 for (i = 0; i < ctx->ctxprog_len; i++) {
67 if ((ctxprog[i] & 0xfff00000) != 0xff400000)
68 continue;
69 if ((ctxprog[i] & CP_BRA_IP) != ((name) << CP_BRA_IP_SHIFT))
70 continue;
71 ctxprog[i] = (ctxprog[i] & 0x00ff00ff) |
72 (ctx->ctxprog_len << CP_BRA_IP_SHIFT);
73 }
74}
75
76static inline void
77_cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name)
78{
79 int ip = 0;
80
81 if (mod != 2) {
82 ip = ctx->ctxprog_label[name] << CP_BRA_IP_SHIFT;
83 if (ip == 0)
84 ip = 0xff000000 | (name << CP_BRA_IP_SHIFT);
85 }
86
87 cp_out(ctx, CP_BRA | (mod << 18) | ip | flag |
88 (state ? 0 : CP_BRA_IF_CLEAR));
89}
90#define cp_bra(c,f,s,n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
91#ifdef CP_BRA_MOD
92#define cp_cal(c,f,s,n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
93#define cp_ret(c,f,s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0)
94#endif
95
96static inline void
97_cp_wait(struct nouveau_grctx *ctx, int flag, int state)
98{
99 cp_out(ctx, CP_WAIT | flag | (state ? CP_WAIT_SET : 0));
100}
101#define cp_wait(c,f,s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
102
103static inline void
104_cp_set(struct nouveau_grctx *ctx, int flag, int state)
105{
106 cp_out(ctx, CP_SET | flag | (state ? CP_SET_1 : 0));
107}
108#define cp_set(c,f,s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
109
110static inline void
111cp_pos(struct nouveau_grctx *ctx, int offset)
112{
113 ctx->ctxvals_pos = offset;
114 ctx->ctxvals_base = ctx->ctxvals_pos;
115
116 cp_lsr(ctx, ctx->ctxvals_pos);
117 cp_out(ctx, CP_SET_CONTEXT_POINTER);
118}
119
120static inline void
121gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
122{
123 if (ctx->mode != NOUVEAU_GRCTX_VALS)
124 return;
125
126 reg = (reg - 0x00400000) / 4;
127 reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
128
129 nv_wo32(ctx->dev, ctx->data, reg, val);
130}
131#endif
132
133#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
index a2c30f4611ba..475ba810bba3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -61,12 +61,10 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
61 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) 61 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
62 fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE]; 62 fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
63#endif 63#endif
64 lock_kernel(); /* XXX for now */
65 if (fn != NULL) 64 if (fn != NULL)
66 ret = (*fn)(filp, cmd, arg); 65 ret = (*fn)(filp, cmd, arg);
67 else 66 else
68 ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); 67 ret = drm_ioctl(filp, cmd, arg);
69 unlock_kernel();
70 68
71 return ret; 69 return ret;
72} 70}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 2ed41d339f6a..e76ec2d207a9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -299,12 +299,57 @@ nouveau_vga_set_decode(void *priv, bool state)
299 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 299 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
300} 300}
301 301
302static int
303nouveau_card_init_channel(struct drm_device *dev)
304{
305 struct drm_nouveau_private *dev_priv = dev->dev_private;
306 struct nouveau_gpuobj *gpuobj;
307 int ret;
308
309 ret = nouveau_channel_alloc(dev, &dev_priv->channel,
310 (struct drm_file *)-2,
311 NvDmaFB, NvDmaTT);
312 if (ret)
313 return ret;
314
315 gpuobj = NULL;
316 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
317 0, nouveau_mem_fb_amount(dev),
318 NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
319 &gpuobj);
320 if (ret)
321 goto out_err;
322
323 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
324 gpuobj, NULL);
325 if (ret)
326 goto out_err;
327
328 gpuobj = NULL;
329 ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
330 dev_priv->gart_info.aper_size,
331 NV_DMA_ACCESS_RW, &gpuobj, NULL);
332 if (ret)
333 goto out_err;
334
335 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
336 gpuobj, NULL);
337 if (ret)
338 goto out_err;
339
340 return 0;
341out_err:
342 nouveau_gpuobj_del(dev, &gpuobj);
343 nouveau_channel_free(dev_priv->channel);
344 dev_priv->channel = NULL;
345 return ret;
346}
347
302int 348int
303nouveau_card_init(struct drm_device *dev) 349nouveau_card_init(struct drm_device *dev)
304{ 350{
305 struct drm_nouveau_private *dev_priv = dev->dev_private; 351 struct drm_nouveau_private *dev_priv = dev->dev_private;
306 struct nouveau_engine *engine; 352 struct nouveau_engine *engine;
307 struct nouveau_gpuobj *gpuobj;
308 int ret; 353 int ret;
309 354
310 NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state); 355 NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
@@ -317,7 +362,7 @@ nouveau_card_init(struct drm_device *dev)
317 /* Initialise internal driver API hooks */ 362 /* Initialise internal driver API hooks */
318 ret = nouveau_init_engine_ptrs(dev); 363 ret = nouveau_init_engine_ptrs(dev);
319 if (ret) 364 if (ret)
320 return ret; 365 goto out;
321 engine = &dev_priv->engine; 366 engine = &dev_priv->engine;
322 dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; 367 dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
323 368
@@ -325,12 +370,12 @@ nouveau_card_init(struct drm_device *dev)
325 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 370 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
326 ret = nouveau_bios_init(dev); 371 ret = nouveau_bios_init(dev);
327 if (ret) 372 if (ret)
328 return ret; 373 goto out;
329 } 374 }
330 375
331 ret = nouveau_gpuobj_early_init(dev); 376 ret = nouveau_gpuobj_early_init(dev);
332 if (ret) 377 if (ret)
333 return ret; 378 goto out_bios;
334 379
335 /* Initialise instance memory, must happen before mem_init so we 380 /* Initialise instance memory, must happen before mem_init so we
336 * know exactly how much VRAM we're able to use for "normal" 381 * know exactly how much VRAM we're able to use for "normal"
@@ -338,100 +383,68 @@ nouveau_card_init(struct drm_device *dev)
338 */ 383 */
339 ret = engine->instmem.init(dev); 384 ret = engine->instmem.init(dev);
340 if (ret) 385 if (ret)
341 return ret; 386 goto out_gpuobj_early;
342 387
343 /* Setup the memory manager */ 388 /* Setup the memory manager */
344 ret = nouveau_mem_init(dev); 389 ret = nouveau_mem_init(dev);
345 if (ret) 390 if (ret)
346 return ret; 391 goto out_instmem;
347 392
348 ret = nouveau_gpuobj_init(dev); 393 ret = nouveau_gpuobj_init(dev);
349 if (ret) 394 if (ret)
350 return ret; 395 goto out_mem;
351 396
352 /* PMC */ 397 /* PMC */
353 ret = engine->mc.init(dev); 398 ret = engine->mc.init(dev);
354 if (ret) 399 if (ret)
355 return ret; 400 goto out_gpuobj;
356 401
357 /* PTIMER */ 402 /* PTIMER */
358 ret = engine->timer.init(dev); 403 ret = engine->timer.init(dev);
359 if (ret) 404 if (ret)
360 return ret; 405 goto out_mc;
361 406
362 /* PFB */ 407 /* PFB */
363 ret = engine->fb.init(dev); 408 ret = engine->fb.init(dev);
364 if (ret) 409 if (ret)
365 return ret; 410 goto out_timer;
366 411
367 /* PGRAPH */ 412 /* PGRAPH */
368 ret = engine->graph.init(dev); 413 ret = engine->graph.init(dev);
369 if (ret) 414 if (ret)
370 return ret; 415 goto out_fb;
371 416
372 /* PFIFO */ 417 /* PFIFO */
373 ret = engine->fifo.init(dev); 418 ret = engine->fifo.init(dev);
374 if (ret) 419 if (ret)
375 return ret; 420 goto out_graph;
376 421
377 /* this call irq_preinstall, register irq handler and 422 /* this call irq_preinstall, register irq handler and
378 * call irq_postinstall 423 * call irq_postinstall
379 */ 424 */
380 ret = drm_irq_install(dev); 425 ret = drm_irq_install(dev);
381 if (ret) 426 if (ret)
382 return ret; 427 goto out_fifo;
383 428
384 ret = drm_vblank_init(dev, 0); 429 ret = drm_vblank_init(dev, 0);
385 if (ret) 430 if (ret)
386 return ret; 431 goto out_irq;
387 432
388 /* what about PVIDEO/PCRTC/PRAMDAC etc? */ 433 /* what about PVIDEO/PCRTC/PRAMDAC etc? */
389 434
390 ret = nouveau_channel_alloc(dev, &dev_priv->channel, 435 if (!engine->graph.accel_blocked) {
391 (struct drm_file *)-2, 436 ret = nouveau_card_init_channel(dev);
392 NvDmaFB, NvDmaTT); 437 if (ret)
393 if (ret) 438 goto out_irq;
394 return ret;
395
396 gpuobj = NULL;
397 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
398 0, nouveau_mem_fb_amount(dev),
399 NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
400 &gpuobj);
401 if (ret)
402 return ret;
403
404 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
405 gpuobj, NULL);
406 if (ret) {
407 nouveau_gpuobj_del(dev, &gpuobj);
408 return ret;
409 }
410
411 gpuobj = NULL;
412 ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
413 dev_priv->gart_info.aper_size,
414 NV_DMA_ACCESS_RW, &gpuobj, NULL);
415 if (ret)
416 return ret;
417
418 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
419 gpuobj, NULL);
420 if (ret) {
421 nouveau_gpuobj_del(dev, &gpuobj);
422 return ret;
423 } 439 }
424 440
425 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 441 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
426 if (dev_priv->card_type >= NV_50) { 442 if (dev_priv->card_type >= NV_50)
427 ret = nv50_display_create(dev); 443 ret = nv50_display_create(dev);
428 if (ret) 444 else
429 return ret;
430 } else {
431 ret = nv04_display_create(dev); 445 ret = nv04_display_create(dev);
432 if (ret) 446 if (ret)
433 return ret; 447 goto out_irq;
434 }
435 } 448 }
436 449
437 ret = nouveau_backlight_init(dev); 450 ret = nouveau_backlight_init(dev);
@@ -444,6 +457,32 @@ nouveau_card_init(struct drm_device *dev)
444 drm_helper_initial_config(dev); 457 drm_helper_initial_config(dev);
445 458
446 return 0; 459 return 0;
460
461out_irq:
462 drm_irq_uninstall(dev);
463out_fifo:
464 engine->fifo.takedown(dev);
465out_graph:
466 engine->graph.takedown(dev);
467out_fb:
468 engine->fb.takedown(dev);
469out_timer:
470 engine->timer.takedown(dev);
471out_mc:
472 engine->mc.takedown(dev);
473out_gpuobj:
474 nouveau_gpuobj_takedown(dev);
475out_mem:
476 nouveau_mem_close(dev);
477out_instmem:
478 engine->instmem.takedown(dev);
479out_gpuobj_early:
480 nouveau_gpuobj_late_takedown(dev);
481out_bios:
482 nouveau_bios_takedown(dev);
483out:
484 vga_client_register(dev->pdev, NULL, NULL, NULL);
485 return ret;
447} 486}
448 487
449static void nouveau_card_takedown(struct drm_device *dev) 488static void nouveau_card_takedown(struct drm_device *dev)
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index b91363606055..d2f143ed97c1 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -143,10 +143,10 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
143 state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK; 143 state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
144 144
145 if (pv->NM2) 145 if (pv->NM2)
146 NV_TRACE(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n", 146 NV_DEBUG_KMS(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n",
147 pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P); 147 pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P);
148 else 148 else
149 NV_TRACE(dev, "vpll: n %d m %d log2p %d\n", 149 NV_DEBUG_KMS(dev, "vpll: n %d m %d log2p %d\n",
150 pv->N1, pv->M1, pv->log2P); 150 pv->N1, pv->M1, pv->log2P);
151 151
152 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset); 152 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
@@ -160,7 +160,7 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
160 unsigned char seq1 = 0, crtc17 = 0; 160 unsigned char seq1 = 0, crtc17 = 0;
161 unsigned char crtc1A; 161 unsigned char crtc1A;
162 162
163 NV_TRACE(dev, "Setting dpms mode %d on CRTC %d\n", mode, 163 NV_DEBUG_KMS(dev, "Setting dpms mode %d on CRTC %d\n", mode,
164 nv_crtc->index); 164 nv_crtc->index);
165 165
166 if (nv_crtc->last_dpms == mode) /* Don't do unnecesary mode changes. */ 166 if (nv_crtc->last_dpms == mode) /* Don't do unnecesary mode changes. */
@@ -603,7 +603,7 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
603 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 603 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
604 struct drm_nouveau_private *dev_priv = dev->dev_private; 604 struct drm_nouveau_private *dev_priv = dev->dev_private;
605 605
606 NV_DEBUG(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index); 606 NV_DEBUG_KMS(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index);
607 drm_mode_debug_printmodeline(adjusted_mode); 607 drm_mode_debug_printmodeline(adjusted_mode);
608 608
609 /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ 609 /* unlock must come after turning off FP_TG_CONTROL in output_prepare */
@@ -703,7 +703,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
703{ 703{
704 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 704 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
705 705
706 NV_DEBUG(crtc->dev, "\n"); 706 NV_DEBUG_KMS(crtc->dev, "\n");
707 707
708 if (!nv_crtc) 708 if (!nv_crtc)
709 return; 709 return;
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index a5fa51714e87..d9f32879ba38 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -205,7 +205,7 @@ out:
205 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1); 205 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
206 206
207 if (blue == 0x18) { 207 if (blue == 0x18) {
208 NV_TRACE(dev, "Load detected on head A\n"); 208 NV_INFO(dev, "Load detected on head A\n");
209 return connector_status_connected; 209 return connector_status_connected;
210 } 210 }
211 211
@@ -350,14 +350,10 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
350 struct drm_display_mode *mode, 350 struct drm_display_mode *mode,
351 struct drm_display_mode *adjusted_mode) 351 struct drm_display_mode *adjusted_mode)
352{ 352{
353 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
354 struct drm_device *dev = encoder->dev; 353 struct drm_device *dev = encoder->dev;
355 struct drm_nouveau_private *dev_priv = dev->dev_private; 354 struct drm_nouveau_private *dev_priv = dev->dev_private;
356 int head = nouveau_crtc(encoder->crtc)->index; 355 int head = nouveau_crtc(encoder->crtc)->index;
357 356
358 NV_TRACE(dev, "%s called for encoder %d\n", __func__,
359 nv_encoder->dcb->index);
360
361 if (nv_gf4_disp_arch(dev)) { 357 if (nv_gf4_disp_arch(dev)) {
362 struct drm_encoder *rebind; 358 struct drm_encoder *rebind;
363 uint32_t dac_offset = nv04_dac_output_offset(encoder); 359 uint32_t dac_offset = nv04_dac_output_offset(encoder);
@@ -466,7 +462,7 @@ static void nv04_dac_destroy(struct drm_encoder *encoder)
466{ 462{
467 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 463 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
468 464
469 NV_DEBUG(encoder->dev, "\n"); 465 NV_DEBUG_KMS(encoder->dev, "\n");
470 466
471 drm_encoder_cleanup(encoder); 467 drm_encoder_cleanup(encoder);
472 kfree(nv_encoder); 468 kfree(nv_encoder);
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index e5b33339d595..483f875bdb6a 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -261,7 +261,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
261 struct drm_display_mode *output_mode = &nv_encoder->mode; 261 struct drm_display_mode *output_mode = &nv_encoder->mode;
262 uint32_t mode_ratio, panel_ratio; 262 uint32_t mode_ratio, panel_ratio;
263 263
264 NV_DEBUG(dev, "Output mode on CRTC %d:\n", nv_crtc->index); 264 NV_DEBUG_KMS(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
265 drm_mode_debug_printmodeline(output_mode); 265 drm_mode_debug_printmodeline(output_mode);
266 266
267 /* Initialize the FP registers in this CRTC. */ 267 /* Initialize the FP registers in this CRTC. */
@@ -413,7 +413,9 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
413 struct dcb_entry *dcbe = nv_encoder->dcb; 413 struct dcb_entry *dcbe = nv_encoder->dcb;
414 int head = nouveau_crtc(encoder->crtc)->index; 414 int head = nouveau_crtc(encoder->crtc)->index;
415 415
416 NV_TRACE(dev, "%s called for encoder %d\n", __func__, nv_encoder->dcb->index); 416 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
417 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
418 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
417 419
418 if (dcbe->type == OUTPUT_TMDS) 420 if (dcbe->type == OUTPUT_TMDS)
419 run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock); 421 run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
@@ -550,7 +552,7 @@ static void nv04_dfp_destroy(struct drm_encoder *encoder)
550{ 552{
551 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 553 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
552 554
553 NV_DEBUG(encoder->dev, "\n"); 555 NV_DEBUG_KMS(encoder->dev, "\n");
554 556
555 drm_encoder_cleanup(encoder); 557 drm_encoder_cleanup(encoder);
556 kfree(nv_encoder); 558 kfree(nv_encoder);
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index b47c757ff48b..ef77215fa5b9 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -99,10 +99,11 @@ nv04_display_create(struct drm_device *dev)
99 uint16_t connector[16] = { 0 }; 99 uint16_t connector[16] = { 0 };
100 int i, ret; 100 int i, ret;
101 101
102 NV_DEBUG(dev, "\n"); 102 NV_DEBUG_KMS(dev, "\n");
103 103
104 if (nv_two_heads(dev)) 104 if (nv_two_heads(dev))
105 nv04_display_store_initial_head_owner(dev); 105 nv04_display_store_initial_head_owner(dev);
106 nouveau_hw_save_vga_fonts(dev, 1);
106 107
107 drm_mode_config_init(dev); 108 drm_mode_config_init(dev);
108 drm_mode_create_scaling_mode_property(dev); 109 drm_mode_create_scaling_mode_property(dev);
@@ -203,8 +204,6 @@ nv04_display_create(struct drm_device *dev)
203 /* Save previous state */ 204 /* Save previous state */
204 NVLockVgaCrtcs(dev, false); 205 NVLockVgaCrtcs(dev, false);
205 206
206 nouveau_hw_save_vga_fonts(dev, 1);
207
208 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 207 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
209 crtc->funcs->save(crtc); 208 crtc->funcs->save(crtc);
210 209
@@ -223,7 +222,7 @@ nv04_display_destroy(struct drm_device *dev)
223 struct drm_encoder *encoder; 222 struct drm_encoder *encoder;
224 struct drm_crtc *crtc; 223 struct drm_crtc *crtc;
225 224
226 NV_DEBUG(dev, "\n"); 225 NV_DEBUG_KMS(dev, "\n");
227 226
228 /* Turn every CRTC off. */ 227 /* Turn every CRTC off. */
229 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 228 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -246,9 +245,9 @@ nv04_display_destroy(struct drm_device *dev)
246 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 245 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
247 crtc->funcs->restore(crtc); 246 crtc->funcs->restore(crtc);
248 247
249 nouveau_hw_save_vga_fonts(dev, 0);
250
251 drm_mode_config_cleanup(dev); 248 drm_mode_config_cleanup(dev);
249
250 nouveau_hw_save_vga_fonts(dev, 0);
252} 251}
253 252
254void 253void
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index 396ee92118f6..d561d773c0f4 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -543,7 +543,7 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
543 543
544 nv_wi32(dev, instance, tmp); 544 nv_wi32(dev, instance, tmp);
545 nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp); 545 nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
546 nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + subc, tmp); 546 nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
547 return 0; 547 return 0;
548} 548}
549 549
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 6bf6804bb0ef..6870e0ee2e7e 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -389,49 +389,50 @@ struct graph_state {
389 int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)]; 389 int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
390 int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)]; 390 int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
391 struct pipe_state pipe_state; 391 struct pipe_state pipe_state;
392 uint32_t lma_window[4];
392}; 393};
393 394
395#define PIPE_SAVE(dev, state, addr) \
396 do { \
397 int __i; \
398 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
399 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
400 state[__i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \
401 } while (0)
402
403#define PIPE_RESTORE(dev, state, addr) \
404 do { \
405 int __i; \
406 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
407 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
408 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, state[__i]); \
409 } while (0)
410
394static void nv10_graph_save_pipe(struct nouveau_channel *chan) 411static void nv10_graph_save_pipe(struct nouveau_channel *chan)
395{ 412{
396 struct drm_device *dev = chan->dev; 413 struct drm_device *dev = chan->dev;
397 struct graph_state *pgraph_ctx = chan->pgraph_ctx; 414 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
398 struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; 415 struct pipe_state *pipe = &pgraph_ctx->pipe_state;
399 int i; 416
400#define PIPE_SAVE(addr) \ 417 PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
401 do { \ 418 PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
402 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \ 419 PIPE_SAVE(dev, pipe->pipe_0x6400, 0x6400);
403 for (i = 0; i < ARRAY_SIZE(fifo_pipe_state->pipe_##addr); i++) \ 420 PIPE_SAVE(dev, pipe->pipe_0x6800, 0x6800);
404 fifo_pipe_state->pipe_##addr[i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \ 421 PIPE_SAVE(dev, pipe->pipe_0x6c00, 0x6c00);
405 } while (0) 422 PIPE_SAVE(dev, pipe->pipe_0x7000, 0x7000);
406 423 PIPE_SAVE(dev, pipe->pipe_0x7400, 0x7400);
407 PIPE_SAVE(0x4400); 424 PIPE_SAVE(dev, pipe->pipe_0x7800, 0x7800);
408 PIPE_SAVE(0x0200); 425 PIPE_SAVE(dev, pipe->pipe_0x0040, 0x0040);
409 PIPE_SAVE(0x6400); 426 PIPE_SAVE(dev, pipe->pipe_0x0000, 0x0000);
410 PIPE_SAVE(0x6800);
411 PIPE_SAVE(0x6c00);
412 PIPE_SAVE(0x7000);
413 PIPE_SAVE(0x7400);
414 PIPE_SAVE(0x7800);
415 PIPE_SAVE(0x0040);
416 PIPE_SAVE(0x0000);
417
418#undef PIPE_SAVE
419} 427}
420 428
421static void nv10_graph_load_pipe(struct nouveau_channel *chan) 429static void nv10_graph_load_pipe(struct nouveau_channel *chan)
422{ 430{
423 struct drm_device *dev = chan->dev; 431 struct drm_device *dev = chan->dev;
424 struct graph_state *pgraph_ctx = chan->pgraph_ctx; 432 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
425 struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; 433 struct pipe_state *pipe = &pgraph_ctx->pipe_state;
426 int i;
427 uint32_t xfmode0, xfmode1; 434 uint32_t xfmode0, xfmode1;
428#define PIPE_RESTORE(addr) \ 435 int i;
429 do { \
430 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
431 for (i = 0; i < ARRAY_SIZE(fifo_pipe_state->pipe_##addr); i++) \
432 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \
433 } while (0)
434
435 436
436 nouveau_wait_for_idle(dev); 437 nouveau_wait_for_idle(dev);
437 /* XXX check haiku comments */ 438 /* XXX check haiku comments */
@@ -457,24 +458,22 @@ static void nv10_graph_load_pipe(struct nouveau_channel *chan)
457 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008); 458 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
458 459
459 460
460 PIPE_RESTORE(0x0200); 461 PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
461 nouveau_wait_for_idle(dev); 462 nouveau_wait_for_idle(dev);
462 463
463 /* restore XFMODE */ 464 /* restore XFMODE */
464 nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0); 465 nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
465 nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1); 466 nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
466 PIPE_RESTORE(0x6400); 467 PIPE_RESTORE(dev, pipe->pipe_0x6400, 0x6400);
467 PIPE_RESTORE(0x6800); 468 PIPE_RESTORE(dev, pipe->pipe_0x6800, 0x6800);
468 PIPE_RESTORE(0x6c00); 469 PIPE_RESTORE(dev, pipe->pipe_0x6c00, 0x6c00);
469 PIPE_RESTORE(0x7000); 470 PIPE_RESTORE(dev, pipe->pipe_0x7000, 0x7000);
470 PIPE_RESTORE(0x7400); 471 PIPE_RESTORE(dev, pipe->pipe_0x7400, 0x7400);
471 PIPE_RESTORE(0x7800); 472 PIPE_RESTORE(dev, pipe->pipe_0x7800, 0x7800);
472 PIPE_RESTORE(0x4400); 473 PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
473 PIPE_RESTORE(0x0000); 474 PIPE_RESTORE(dev, pipe->pipe_0x0000, 0x0000);
474 PIPE_RESTORE(0x0040); 475 PIPE_RESTORE(dev, pipe->pipe_0x0040, 0x0040);
475 nouveau_wait_for_idle(dev); 476 nouveau_wait_for_idle(dev);
476
477#undef PIPE_RESTORE
478} 477}
479 478
480static void nv10_graph_create_pipe(struct nouveau_channel *chan) 479static void nv10_graph_create_pipe(struct nouveau_channel *chan)
@@ -832,6 +831,9 @@ int nv10_graph_init(struct drm_device *dev)
832 (1<<31)); 831 (1<<31));
833 if (dev_priv->chipset >= 0x17) { 832 if (dev_priv->chipset >= 0x17) {
834 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000); 833 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000);
834 nv_wr32(dev, 0x400a10, 0x3ff3fb6);
835 nv_wr32(dev, 0x400838, 0x2f8684);
836 nv_wr32(dev, 0x40083c, 0x115f3f);
835 nv_wr32(dev, 0x004006b0, 0x40000020); 837 nv_wr32(dev, 0x004006b0, 0x40000020);
836 } else 838 } else
837 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000); 839 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
@@ -867,6 +869,115 @@ void nv10_graph_takedown(struct drm_device *dev)
867{ 869{
868} 870}
869 871
872static int
873nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass,
874 int mthd, uint32_t data)
875{
876 struct drm_device *dev = chan->dev;
877 struct graph_state *ctx = chan->pgraph_ctx;
878 struct pipe_state *pipe = &ctx->pipe_state;
879 struct drm_nouveau_private *dev_priv = dev->dev_private;
880 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
881 uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
882 uint32_t xfmode0, xfmode1;
883 int i;
884
885 ctx->lma_window[(mthd - 0x1638) / 4] = data;
886
887 if (mthd != 0x1644)
888 return 0;
889
890 nouveau_wait_for_idle(dev);
891
892 PIPE_SAVE(dev, pipe_0x0040, 0x0040);
893 PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
894
895 PIPE_RESTORE(dev, ctx->lma_window, 0x6790);
896
897 nouveau_wait_for_idle(dev);
898
899 xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
900 xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
901
902 PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
903 PIPE_SAVE(dev, pipe_0x64c0, 0x64c0);
904 PIPE_SAVE(dev, pipe_0x6ab0, 0x6ab0);
905 PIPE_SAVE(dev, pipe_0x6a80, 0x6a80);
906
907 nouveau_wait_for_idle(dev);
908
909 nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
910 nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
911 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
912 for (i = 0; i < 4; i++)
913 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
914 for (i = 0; i < 4; i++)
915 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
916
917 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
918 for (i = 0; i < 3; i++)
919 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
920
921 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
922 for (i = 0; i < 3; i++)
923 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
924
925 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
926 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
927
928 PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
929
930 nouveau_wait_for_idle(dev);
931
932 PIPE_RESTORE(dev, pipe_0x0040, 0x0040);
933
934 nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
935 nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
936
937 PIPE_RESTORE(dev, pipe_0x64c0, 0x64c0);
938 PIPE_RESTORE(dev, pipe_0x6ab0, 0x6ab0);
939 PIPE_RESTORE(dev, pipe_0x6a80, 0x6a80);
940 PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
941
942 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
943 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
944
945 nouveau_wait_for_idle(dev);
946
947 pgraph->fifo_access(dev, true);
948
949 return 0;
950}
951
952static int
953nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass,
954 int mthd, uint32_t data)
955{
956 struct drm_device *dev = chan->dev;
957 struct drm_nouveau_private *dev_priv = dev->dev_private;
958 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
959
960 nouveau_wait_for_idle(dev);
961
962 nv_wr32(dev, NV10_PGRAPH_DEBUG_4,
963 nv_rd32(dev, NV10_PGRAPH_DEBUG_4) | 0x1 << 8);
964 nv_wr32(dev, 0x004006b0,
965 nv_rd32(dev, 0x004006b0) | 0x8 << 24);
966
967 pgraph->fifo_access(dev, true);
968
969 return 0;
970}
971
972static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = {
973 { 0x1638, nv17_graph_mthd_lma_window },
974 { 0x163c, nv17_graph_mthd_lma_window },
975 { 0x1640, nv17_graph_mthd_lma_window },
976 { 0x1644, nv17_graph_mthd_lma_window },
977 { 0x1658, nv17_graph_mthd_lma_enable },
978 {}
979};
980
870struct nouveau_pgraph_object_class nv10_graph_grclass[] = { 981struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
871 { 0x0030, false, NULL }, /* null */ 982 { 0x0030, false, NULL }, /* null */
872 { 0x0039, false, NULL }, /* m2mf */ 983 { 0x0039, false, NULL }, /* m2mf */
@@ -887,6 +998,6 @@ struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
887 { 0x0095, false, NULL }, /* multitex_tri */ 998 { 0x0095, false, NULL }, /* multitex_tri */
888 { 0x0056, false, NULL }, /* celcius (nv10) */ 999 { 0x0056, false, NULL }, /* celcius (nv10) */
889 { 0x0096, false, NULL }, /* celcius (nv11) */ 1000 { 0x0096, false, NULL }, /* celcius (nv11) */
890 { 0x0099, false, NULL }, /* celcius (nv17) */ 1001 { 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */
891 {} 1002 {}
892}; 1003};
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 46cfd9c60478..81c01353a9f9 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -219,7 +219,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
219 return; 219 return;
220 nouveau_encoder(encoder)->last_dpms = mode; 220 nouveau_encoder(encoder)->last_dpms = mode;
221 221
222 NV_TRACE(dev, "Setting dpms mode %d on TV encoder (output %d)\n", 222 NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
223 mode, nouveau_encoder(encoder)->dcb->index); 223 mode, nouveau_encoder(encoder)->dcb->index);
224 224
225 regs->ptv_200 &= ~1; 225 regs->ptv_200 &= ~1;
@@ -619,7 +619,7 @@ static void nv17_tv_destroy(struct drm_encoder *encoder)
619{ 619{
620 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); 620 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
621 621
622 NV_DEBUG(encoder->dev, "\n"); 622 NV_DEBUG_KMS(encoder->dev, "\n");
623 623
624 drm_encoder_cleanup(encoder); 624 drm_encoder_cleanup(encoder);
625 kfree(tv_enc); 625 kfree(tv_enc);
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 7e8547cb5833..2b332bb55acf 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -24,36 +24,10 @@
24 * 24 *
25 */ 25 */
26 26
27#include <linux/firmware.h>
28
29#include "drmP.h" 27#include "drmP.h"
30#include "drm.h" 28#include "drm.h"
31#include "nouveau_drv.h" 29#include "nouveau_drv.h"
32 30#include "nouveau_grctx.h"
33MODULE_FIRMWARE("nouveau/nv40.ctxprog");
34MODULE_FIRMWARE("nouveau/nv40.ctxvals");
35MODULE_FIRMWARE("nouveau/nv41.ctxprog");
36MODULE_FIRMWARE("nouveau/nv41.ctxvals");
37MODULE_FIRMWARE("nouveau/nv42.ctxprog");
38MODULE_FIRMWARE("nouveau/nv42.ctxvals");
39MODULE_FIRMWARE("nouveau/nv43.ctxprog");
40MODULE_FIRMWARE("nouveau/nv43.ctxvals");
41MODULE_FIRMWARE("nouveau/nv44.ctxprog");
42MODULE_FIRMWARE("nouveau/nv44.ctxvals");
43MODULE_FIRMWARE("nouveau/nv46.ctxprog");
44MODULE_FIRMWARE("nouveau/nv46.ctxvals");
45MODULE_FIRMWARE("nouveau/nv47.ctxprog");
46MODULE_FIRMWARE("nouveau/nv47.ctxvals");
47MODULE_FIRMWARE("nouveau/nv49.ctxprog");
48MODULE_FIRMWARE("nouveau/nv49.ctxvals");
49MODULE_FIRMWARE("nouveau/nv4a.ctxprog");
50MODULE_FIRMWARE("nouveau/nv4a.ctxvals");
51MODULE_FIRMWARE("nouveau/nv4b.ctxprog");
52MODULE_FIRMWARE("nouveau/nv4b.ctxvals");
53MODULE_FIRMWARE("nouveau/nv4c.ctxprog");
54MODULE_FIRMWARE("nouveau/nv4c.ctxvals");
55MODULE_FIRMWARE("nouveau/nv4e.ctxprog");
56MODULE_FIRMWARE("nouveau/nv4e.ctxvals");
57 31
58struct nouveau_channel * 32struct nouveau_channel *
59nv40_graph_channel(struct drm_device *dev) 33nv40_graph_channel(struct drm_device *dev)
@@ -83,27 +57,30 @@ nv40_graph_create_context(struct nouveau_channel *chan)
83{ 57{
84 struct drm_device *dev = chan->dev; 58 struct drm_device *dev = chan->dev;
85 struct drm_nouveau_private *dev_priv = dev->dev_private; 59 struct drm_nouveau_private *dev_priv = dev->dev_private;
86 struct nouveau_gpuobj *ctx; 60 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
87 int ret; 61 int ret;
88 62
89 /* Allocate a 175KiB block of PRAMIN to store the context. This 63 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
90 * is massive overkill for a lot of chipsets, but it should be safe 64 16, NVOBJ_FLAG_ZERO_ALLOC,
91 * until we're able to implement this properly (will happen at more 65 &chan->ramin_grctx);
92 * or less the same time we're able to write our own context programs.
93 */
94 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16,
95 NVOBJ_FLAG_ZERO_ALLOC,
96 &chan->ramin_grctx);
97 if (ret) 66 if (ret)
98 return ret; 67 return ret;
99 ctx = chan->ramin_grctx->gpuobj;
100 68
101 /* Initialise default context values */ 69 /* Initialise default context values */
102 dev_priv->engine.instmem.prepare_access(dev, true); 70 dev_priv->engine.instmem.prepare_access(dev, true);
103 nv40_grctx_vals_load(dev, ctx); 71 if (!pgraph->ctxprog) {
104 nv_wo32(dev, ctx, 0, ctx->im_pramin->start); 72 struct nouveau_grctx ctx = {};
105 dev_priv->engine.instmem.finish_access(dev);
106 73
74 ctx.dev = chan->dev;
75 ctx.mode = NOUVEAU_GRCTX_VALS;
76 ctx.data = chan->ramin_grctx->gpuobj;
77 nv40_grctx_init(&ctx);
78 } else {
79 nouveau_grctx_vals_load(dev, chan->ramin_grctx->gpuobj);
80 }
81 nv_wo32(dev, chan->ramin_grctx->gpuobj, 0,
82 chan->ramin_grctx->gpuobj->im_pramin->start);
83 dev_priv->engine.instmem.finish_access(dev);
107 return 0; 84 return 0;
108} 85}
109 86
@@ -204,139 +181,6 @@ nv40_graph_unload_context(struct drm_device *dev)
204 return ret; 181 return ret;
205} 182}
206 183
207struct nouveau_ctxprog {
208 uint32_t signature;
209 uint8_t version;
210 uint16_t length;
211 uint32_t data[];
212} __attribute__ ((packed));
213
214struct nouveau_ctxvals {
215 uint32_t signature;
216 uint8_t version;
217 uint32_t length;
218 struct {
219 uint32_t offset;
220 uint32_t value;
221 } data[];
222} __attribute__ ((packed));
223
224int
225nv40_grctx_init(struct drm_device *dev)
226{
227 struct drm_nouveau_private *dev_priv = dev->dev_private;
228 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
229 const int chipset = dev_priv->chipset;
230 const struct firmware *fw;
231 const struct nouveau_ctxprog *cp;
232 const struct nouveau_ctxvals *cv;
233 char name[32];
234 int ret, i;
235
236 pgraph->accel_blocked = true;
237
238 if (!pgraph->ctxprog) {
239 sprintf(name, "nouveau/nv%02x.ctxprog", chipset);
240 ret = request_firmware(&fw, name, &dev->pdev->dev);
241 if (ret) {
242 NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset);
243 return ret;
244 }
245
246 pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
247 if (!pgraph->ctxprog) {
248 NV_ERROR(dev, "OOM copying ctxprog\n");
249 release_firmware(fw);
250 return -ENOMEM;
251 }
252 memcpy(pgraph->ctxprog, fw->data, fw->size);
253
254 cp = pgraph->ctxprog;
255 if (le32_to_cpu(cp->signature) != 0x5043564e ||
256 cp->version != 0 ||
257 le16_to_cpu(cp->length) != ((fw->size - 7) / 4)) {
258 NV_ERROR(dev, "ctxprog invalid\n");
259 release_firmware(fw);
260 nv40_grctx_fini(dev);
261 return -EINVAL;
262 }
263 release_firmware(fw);
264 }
265
266 if (!pgraph->ctxvals) {
267 sprintf(name, "nouveau/nv%02x.ctxvals", chipset);
268 ret = request_firmware(&fw, name, &dev->pdev->dev);
269 if (ret) {
270 NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset);
271 nv40_grctx_fini(dev);
272 return ret;
273 }
274
275 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
276 if (!pgraph->ctxprog) {
277 NV_ERROR(dev, "OOM copying ctxprog\n");
278 release_firmware(fw);
279 nv40_grctx_fini(dev);
280 return -ENOMEM;
281 }
282 memcpy(pgraph->ctxvals, fw->data, fw->size);
283
284 cv = (void *)pgraph->ctxvals;
285 if (le32_to_cpu(cv->signature) != 0x5643564e ||
286 cv->version != 0 ||
287 le32_to_cpu(cv->length) != ((fw->size - 9) / 8)) {
288 NV_ERROR(dev, "ctxvals invalid\n");
289 release_firmware(fw);
290 nv40_grctx_fini(dev);
291 return -EINVAL;
292 }
293 release_firmware(fw);
294 }
295
296 cp = pgraph->ctxprog;
297
298 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
299 for (i = 0; i < le16_to_cpu(cp->length); i++)
300 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA,
301 le32_to_cpu(cp->data[i]));
302
303 pgraph->accel_blocked = false;
304 return 0;
305}
306
307void
308nv40_grctx_fini(struct drm_device *dev)
309{
310 struct drm_nouveau_private *dev_priv = dev->dev_private;
311 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
312
313 if (pgraph->ctxprog) {
314 kfree(pgraph->ctxprog);
315 pgraph->ctxprog = NULL;
316 }
317
318 if (pgraph->ctxvals) {
319 kfree(pgraph->ctxprog);
320 pgraph->ctxvals = NULL;
321 }
322}
323
324void
325nv40_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
326{
327 struct drm_nouveau_private *dev_priv = dev->dev_private;
328 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
329 struct nouveau_ctxvals *cv = pgraph->ctxvals;
330 int i;
331
332 if (!cv)
333 return;
334
335 for (i = 0; i < le32_to_cpu(cv->length); i++)
336 nv_wo32(dev, ctx, le32_to_cpu(cv->data[i].offset),
337 le32_to_cpu(cv->data[i].value));
338}
339
340/* 184/*
341 * G70 0x47 185 * G70 0x47
342 * G71 0x49 186 * G71 0x49
@@ -359,7 +203,26 @@ nv40_graph_init(struct drm_device *dev)
359 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | 203 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
360 NV_PMC_ENABLE_PGRAPH); 204 NV_PMC_ENABLE_PGRAPH);
361 205
362 nv40_grctx_init(dev); 206 if (nouveau_ctxfw) {
207 nouveau_grctx_prog_load(dev);
208 dev_priv->engine.graph.grctx_size = 175 * 1024;
209 }
210
211 if (!dev_priv->engine.graph.ctxprog) {
212 struct nouveau_grctx ctx = {};
213 uint32_t cp[256];
214
215 ctx.dev = dev;
216 ctx.mode = NOUVEAU_GRCTX_PROG;
217 ctx.data = cp;
218 ctx.ctxprog_max = 256;
219 nv40_grctx_init(&ctx);
220 dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
221
222 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
223 for (i = 0; i < ctx.ctxprog_len; i++)
224 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
225 }
363 226
364 /* No context present currently */ 227 /* No context present currently */
365 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); 228 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
@@ -539,6 +402,7 @@ nv40_graph_init(struct drm_device *dev)
539 402
540void nv40_graph_takedown(struct drm_device *dev) 403void nv40_graph_takedown(struct drm_device *dev)
541{ 404{
405 nouveau_grctx_fini(dev);
542} 406}
543 407
544struct nouveau_pgraph_object_class nv40_graph_grclass[] = { 408struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
new file mode 100644
index 000000000000..11b11c31f543
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -0,0 +1,678 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25/* NVIDIA context programs handle a number of other conditions which are
26 * not implemented in our versions. It's not clear why NVIDIA context
27 * programs have this code, nor whether it's strictly necessary for
28 * correct operation. We'll implement additional handling if/when we
29 * discover it's necessary.
30 *
31 * - On context save, NVIDIA set 0x400314 bit 0 to 1 if the "3D state"
32 * flag is set, this gets saved into the context.
33 * - On context save, the context program for all cards load nsource
34 * into a flag register and check for ILLEGAL_MTHD. If it's set,
35 * opcode 0x60000d is called before resuming normal operation.
36 * - Some context programs check more conditions than the above. NV44
37 * checks: ((nsource & 0x0857) || (0x400718 & 0x0100) || (intr & 0x0001))
38 * and calls 0x60000d before resuming normal operation.
39 * - At the very beginning of NVIDIA's context programs, flag 9 is checked
40 * and if true 0x800001 is called with count=0, pos=0, the flag is cleared
41 * and then the ctxprog is aborted. It looks like a complicated NOP,
42 * its purpose is unknown.
43 * - In the section of code that loads the per-vs state, NVIDIA check
44 * flag 10. If it's set, they only transfer the small 0x300 byte block
45 * of state + the state for a single vs as opposed to the state for
46 * all vs units. It doesn't seem likely that it'll occur in normal
47 * operation, especially seeing as it appears NVIDIA may have screwed
48 * up the ctxprogs for some cards and have an invalid instruction
49 * rather than a cp_lsr(ctx, dwords_for_1_vs_unit) instruction.
50 * - There's a number of places where context offset 0 (where we place
51 * the PRAMIN offset of the context) is loaded into either 0x408000,
52 * 0x408004 or 0x408008. Not sure what's up there either.
53 * - The ctxprogs for some cards save 0x400a00 again during the cleanup
54 * path for auto-loadctx.
55 */
56
57#define CP_FLAG_CLEAR 0
58#define CP_FLAG_SET 1
59#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
60#define CP_FLAG_SWAP_DIRECTION_LOAD 0
61#define CP_FLAG_SWAP_DIRECTION_SAVE 1
62#define CP_FLAG_USER_SAVE ((0 * 32) + 5)
63#define CP_FLAG_USER_SAVE_NOT_PENDING 0
64#define CP_FLAG_USER_SAVE_PENDING 1
65#define CP_FLAG_USER_LOAD ((0 * 32) + 6)
66#define CP_FLAG_USER_LOAD_NOT_PENDING 0
67#define CP_FLAG_USER_LOAD_PENDING 1
68#define CP_FLAG_STATUS ((3 * 32) + 0)
69#define CP_FLAG_STATUS_IDLE 0
70#define CP_FLAG_STATUS_BUSY 1
71#define CP_FLAG_AUTO_SAVE ((3 * 32) + 4)
72#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0
73#define CP_FLAG_AUTO_SAVE_PENDING 1
74#define CP_FLAG_AUTO_LOAD ((3 * 32) + 5)
75#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
76#define CP_FLAG_AUTO_LOAD_PENDING 1
77#define CP_FLAG_UNK54 ((3 * 32) + 6)
78#define CP_FLAG_UNK54_CLEAR 0
79#define CP_FLAG_UNK54_SET 1
80#define CP_FLAG_ALWAYS ((3 * 32) + 8)
81#define CP_FLAG_ALWAYS_FALSE 0
82#define CP_FLAG_ALWAYS_TRUE 1
83#define CP_FLAG_UNK57 ((3 * 32) + 9)
84#define CP_FLAG_UNK57_CLEAR 0
85#define CP_FLAG_UNK57_SET 1
86
87#define CP_CTX 0x00100000
88#define CP_CTX_COUNT 0x000fc000
89#define CP_CTX_COUNT_SHIFT 14
90#define CP_CTX_REG 0x00003fff
91#define CP_LOAD_SR 0x00200000
92#define CP_LOAD_SR_VALUE 0x000fffff
93#define CP_BRA 0x00400000
94#define CP_BRA_IP 0x0000ff00
95#define CP_BRA_IP_SHIFT 8
96#define CP_BRA_IF_CLEAR 0x00000080
97#define CP_BRA_FLAG 0x0000007f
98#define CP_WAIT 0x00500000
99#define CP_WAIT_SET 0x00000080
100#define CP_WAIT_FLAG 0x0000007f
101#define CP_SET 0x00700000
102#define CP_SET_1 0x00000080
103#define CP_SET_FLAG 0x0000007f
104#define CP_NEXT_TO_SWAP 0x00600007
105#define CP_NEXT_TO_CURRENT 0x00600009
106#define CP_SET_CONTEXT_POINTER 0x0060000a
107#define CP_END 0x0060000e
108#define CP_LOAD_MAGIC_UNK01 0x00800001 /* unknown */
109#define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */
110#define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */
111
112#include "drmP.h"
113#include "nouveau_drv.h"
114#include "nouveau_grctx.h"
115
116/* TODO:
117 * - get vs count from 0x1540
118 * - document unimplemented bits compared to nvidia
119 * - nsource handling
120 * - R0 & 0x0200 handling
121 * - single-vs handling
122 * - 400314 bit 0
123 */
124
125static int
126nv40_graph_4097(struct drm_device *dev)
127{
128 struct drm_nouveau_private *dev_priv = dev->dev_private;
129
130 if ((dev_priv->chipset & 0xf0) == 0x60)
131 return 0;
132
133 return !!(0x0baf & (1 << dev_priv->chipset));
134}
135
136static int
137nv40_graph_vs_count(struct drm_device *dev)
138{
139 struct drm_nouveau_private *dev_priv = dev->dev_private;
140
141 switch (dev_priv->chipset) {
142 case 0x47:
143 case 0x49:
144 case 0x4b:
145 return 8;
146 case 0x40:
147 return 6;
148 case 0x41:
149 case 0x42:
150 return 5;
151 case 0x43:
152 case 0x44:
153 case 0x46:
154 case 0x4a:
155 return 3;
156 case 0x4c:
157 case 0x4e:
158 case 0x67:
159 default:
160 return 1;
161 }
162}
163
164
165enum cp_label {
166 cp_check_load = 1,
167 cp_setup_auto_load,
168 cp_setup_load,
169 cp_setup_save,
170 cp_swap_state,
171 cp_swap_state3d_3_is_save,
172 cp_prepare_exit,
173 cp_exit,
174};
175
176static void
177nv40_graph_construct_general(struct nouveau_grctx *ctx)
178{
179 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
180 int i;
181
182 cp_ctx(ctx, 0x4000a4, 1);
183 gr_def(ctx, 0x4000a4, 0x00000008);
184 cp_ctx(ctx, 0x400144, 58);
185 gr_def(ctx, 0x400144, 0x00000001);
186 cp_ctx(ctx, 0x400314, 1);
187 gr_def(ctx, 0x400314, 0x00000000);
188 cp_ctx(ctx, 0x400400, 10);
189 cp_ctx(ctx, 0x400480, 10);
190 cp_ctx(ctx, 0x400500, 19);
191 gr_def(ctx, 0x400514, 0x00040000);
192 gr_def(ctx, 0x400524, 0x55555555);
193 gr_def(ctx, 0x400528, 0x55555555);
194 gr_def(ctx, 0x40052c, 0x55555555);
195 gr_def(ctx, 0x400530, 0x55555555);
196 cp_ctx(ctx, 0x400560, 6);
197 gr_def(ctx, 0x400568, 0x0000ffff);
198 gr_def(ctx, 0x40056c, 0x0000ffff);
199 cp_ctx(ctx, 0x40057c, 5);
200 cp_ctx(ctx, 0x400710, 3);
201 gr_def(ctx, 0x400710, 0x20010001);
202 gr_def(ctx, 0x400714, 0x0f73ef00);
203 cp_ctx(ctx, 0x400724, 1);
204 gr_def(ctx, 0x400724, 0x02008821);
205 cp_ctx(ctx, 0x400770, 3);
206 if (dev_priv->chipset == 0x40) {
207 cp_ctx(ctx, 0x400814, 4);
208 cp_ctx(ctx, 0x400828, 5);
209 cp_ctx(ctx, 0x400840, 5);
210 gr_def(ctx, 0x400850, 0x00000040);
211 cp_ctx(ctx, 0x400858, 4);
212 gr_def(ctx, 0x400858, 0x00000040);
213 gr_def(ctx, 0x40085c, 0x00000040);
214 gr_def(ctx, 0x400864, 0x80000000);
215 cp_ctx(ctx, 0x40086c, 9);
216 gr_def(ctx, 0x40086c, 0x80000000);
217 gr_def(ctx, 0x400870, 0x80000000);
218 gr_def(ctx, 0x400874, 0x80000000);
219 gr_def(ctx, 0x400878, 0x80000000);
220 gr_def(ctx, 0x400888, 0x00000040);
221 gr_def(ctx, 0x40088c, 0x80000000);
222 cp_ctx(ctx, 0x4009c0, 8);
223 gr_def(ctx, 0x4009cc, 0x80000000);
224 gr_def(ctx, 0x4009dc, 0x80000000);
225 } else {
226 cp_ctx(ctx, 0x400840, 20);
227 if (!nv40_graph_4097(ctx->dev)) {
228 for (i = 0; i < 8; i++)
229 gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
230 }
231 gr_def(ctx, 0x400880, 0x00000040);
232 gr_def(ctx, 0x400884, 0x00000040);
233 gr_def(ctx, 0x400888, 0x00000040);
234 cp_ctx(ctx, 0x400894, 11);
235 gr_def(ctx, 0x400894, 0x00000040);
236 if (nv40_graph_4097(ctx->dev)) {
237 for (i = 0; i < 8; i++)
238 gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
239 }
240 cp_ctx(ctx, 0x4008e0, 2);
241 cp_ctx(ctx, 0x4008f8, 2);
242 if (dev_priv->chipset == 0x4c ||
243 (dev_priv->chipset & 0xf0) == 0x60)
244 cp_ctx(ctx, 0x4009f8, 1);
245 }
246 cp_ctx(ctx, 0x400a00, 73);
247 gr_def(ctx, 0x400b0c, 0x0b0b0b0c);
248 cp_ctx(ctx, 0x401000, 4);
249 cp_ctx(ctx, 0x405004, 1);
250 switch (dev_priv->chipset) {
251 case 0x47:
252 case 0x49:
253 case 0x4b:
254 cp_ctx(ctx, 0x403448, 1);
255 gr_def(ctx, 0x403448, 0x00001010);
256 break;
257 default:
258 cp_ctx(ctx, 0x403440, 1);
259 switch (dev_priv->chipset) {
260 case 0x40:
261 gr_def(ctx, 0x403440, 0x00000010);
262 break;
263 case 0x44:
264 case 0x46:
265 case 0x4a:
266 gr_def(ctx, 0x403440, 0x00003010);
267 break;
268 case 0x41:
269 case 0x42:
270 case 0x43:
271 case 0x4c:
272 case 0x4e:
273 case 0x67:
274 default:
275 gr_def(ctx, 0x403440, 0x00001010);
276 break;
277 }
278 break;
279 }
280}
281
282static void
283nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
284{
285 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
286 int i;
287
288 if (dev_priv->chipset == 0x40) {
289 cp_ctx(ctx, 0x401880, 51);
290 gr_def(ctx, 0x401940, 0x00000100);
291 } else
292 if (dev_priv->chipset == 0x46 || dev_priv->chipset == 0x47 ||
293 dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
294 cp_ctx(ctx, 0x401880, 32);
295 for (i = 0; i < 16; i++)
296 gr_def(ctx, 0x401880 + (i * 4), 0x00000111);
297 if (dev_priv->chipset == 0x46)
298 cp_ctx(ctx, 0x401900, 16);
299 cp_ctx(ctx, 0x401940, 3);
300 }
301 cp_ctx(ctx, 0x40194c, 18);
302 gr_def(ctx, 0x401954, 0x00000111);
303 gr_def(ctx, 0x401958, 0x00080060);
304 gr_def(ctx, 0x401974, 0x00000080);
305 gr_def(ctx, 0x401978, 0xffff0000);
306 gr_def(ctx, 0x40197c, 0x00000001);
307 gr_def(ctx, 0x401990, 0x46400000);
308 if (dev_priv->chipset == 0x40) {
309 cp_ctx(ctx, 0x4019a0, 2);
310 cp_ctx(ctx, 0x4019ac, 5);
311 } else {
312 cp_ctx(ctx, 0x4019a0, 1);
313 cp_ctx(ctx, 0x4019b4, 3);
314 }
315 gr_def(ctx, 0x4019bc, 0xffff0000);
316 switch (dev_priv->chipset) {
317 case 0x46:
318 case 0x47:
319 case 0x49:
320 case 0x4b:
321 cp_ctx(ctx, 0x4019c0, 18);
322 for (i = 0; i < 16; i++)
323 gr_def(ctx, 0x4019c0 + (i * 4), 0x88888888);
324 break;
325 }
326 cp_ctx(ctx, 0x401a08, 8);
327 gr_def(ctx, 0x401a10, 0x0fff0000);
328 gr_def(ctx, 0x401a14, 0x0fff0000);
329 gr_def(ctx, 0x401a1c, 0x00011100);
330 cp_ctx(ctx, 0x401a2c, 4);
331 cp_ctx(ctx, 0x401a44, 26);
332 for (i = 0; i < 16; i++)
333 gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000);
334 gr_def(ctx, 0x401a8c, 0x4b7fffff);
335 if (dev_priv->chipset == 0x40) {
336 cp_ctx(ctx, 0x401ab8, 3);
337 } else {
338 cp_ctx(ctx, 0x401ab8, 1);
339 cp_ctx(ctx, 0x401ac0, 1);
340 }
341 cp_ctx(ctx, 0x401ad0, 8);
342 gr_def(ctx, 0x401ad0, 0x30201000);
343 gr_def(ctx, 0x401ad4, 0x70605040);
344 gr_def(ctx, 0x401ad8, 0xb8a89888);
345 gr_def(ctx, 0x401adc, 0xf8e8d8c8);
346 cp_ctx(ctx, 0x401b10, dev_priv->chipset == 0x40 ? 2 : 1);
347 gr_def(ctx, 0x401b10, 0x40100000);
348 cp_ctx(ctx, 0x401b18, dev_priv->chipset == 0x40 ? 6 : 5);
349 gr_def(ctx, 0x401b28, dev_priv->chipset == 0x40 ?
350 0x00000004 : 0x00000000);
351 cp_ctx(ctx, 0x401b30, 25);
352 gr_def(ctx, 0x401b34, 0x0000ffff);
353 gr_def(ctx, 0x401b68, 0x435185d6);
354 gr_def(ctx, 0x401b6c, 0x2155b699);
355 gr_def(ctx, 0x401b70, 0xfedcba98);
356 gr_def(ctx, 0x401b74, 0x00000098);
357 gr_def(ctx, 0x401b84, 0xffffffff);
358 gr_def(ctx, 0x401b88, 0x00ff7000);
359 gr_def(ctx, 0x401b8c, 0x0000ffff);
360 if (dev_priv->chipset != 0x44 && dev_priv->chipset != 0x4a &&
361 dev_priv->chipset != 0x4e)
362 cp_ctx(ctx, 0x401b94, 1);
363 cp_ctx(ctx, 0x401b98, 8);
364 gr_def(ctx, 0x401b9c, 0x00ff0000);
365 cp_ctx(ctx, 0x401bc0, 9);
366 gr_def(ctx, 0x401be0, 0x00ffff00);
367 cp_ctx(ctx, 0x401c00, 192);
368 for (i = 0; i < 16; i++) { /* fragment texture units */
369 gr_def(ctx, 0x401c40 + (i * 4), 0x00018488);
370 gr_def(ctx, 0x401c80 + (i * 4), 0x00028202);
371 gr_def(ctx, 0x401d00 + (i * 4), 0x0000aae4);
372 gr_def(ctx, 0x401d40 + (i * 4), 0x01012000);
373 gr_def(ctx, 0x401d80 + (i * 4), 0x00080008);
374 gr_def(ctx, 0x401e00 + (i * 4), 0x00100008);
375 }
376 for (i = 0; i < 4; i++) { /* vertex texture units */
377 gr_def(ctx, 0x401e90 + (i * 4), 0x0001bc80);
378 gr_def(ctx, 0x401ea0 + (i * 4), 0x00000202);
379 gr_def(ctx, 0x401ec0 + (i * 4), 0x00000008);
380 gr_def(ctx, 0x401ee0 + (i * 4), 0x00080008);
381 }
382 cp_ctx(ctx, 0x400f5c, 3);
383 gr_def(ctx, 0x400f5c, 0x00000002);
384 cp_ctx(ctx, 0x400f84, 1);
385}
386
387static void
388nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
389{
390 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
391 int i;
392
393 cp_ctx(ctx, 0x402000, 1);
394 cp_ctx(ctx, 0x402404, dev_priv->chipset == 0x40 ? 1 : 2);
395 switch (dev_priv->chipset) {
396 case 0x40:
397 gr_def(ctx, 0x402404, 0x00000001);
398 break;
399 case 0x4c:
400 case 0x4e:
401 case 0x67:
402 gr_def(ctx, 0x402404, 0x00000020);
403 break;
404 case 0x46:
405 case 0x49:
406 case 0x4b:
407 gr_def(ctx, 0x402404, 0x00000421);
408 break;
409 default:
410 gr_def(ctx, 0x402404, 0x00000021);
411 }
412 if (dev_priv->chipset != 0x40)
413 gr_def(ctx, 0x402408, 0x030c30c3);
414 switch (dev_priv->chipset) {
415 case 0x44:
416 case 0x46:
417 case 0x4a:
418 case 0x4c:
419 case 0x4e:
420 case 0x67:
421 cp_ctx(ctx, 0x402440, 1);
422 gr_def(ctx, 0x402440, 0x00011001);
423 break;
424 default:
425 break;
426 }
427 cp_ctx(ctx, 0x402480, dev_priv->chipset == 0x40 ? 8 : 9);
428 gr_def(ctx, 0x402488, 0x3e020200);
429 gr_def(ctx, 0x40248c, 0x00ffffff);
430 switch (dev_priv->chipset) {
431 case 0x40:
432 gr_def(ctx, 0x402490, 0x60103f00);
433 break;
434 case 0x47:
435 gr_def(ctx, 0x402490, 0x40103f00);
436 break;
437 case 0x41:
438 case 0x42:
439 case 0x49:
440 case 0x4b:
441 gr_def(ctx, 0x402490, 0x20103f00);
442 break;
443 default:
444 gr_def(ctx, 0x402490, 0x0c103f00);
445 break;
446 }
447 gr_def(ctx, 0x40249c, dev_priv->chipset <= 0x43 ?
448 0x00020000 : 0x00040000);
449 cp_ctx(ctx, 0x402500, 31);
450 gr_def(ctx, 0x402530, 0x00008100);
451 if (dev_priv->chipset == 0x40)
452 cp_ctx(ctx, 0x40257c, 6);
453 cp_ctx(ctx, 0x402594, 16);
454 cp_ctx(ctx, 0x402800, 17);
455 gr_def(ctx, 0x402800, 0x00000001);
456 switch (dev_priv->chipset) {
457 case 0x47:
458 case 0x49:
459 case 0x4b:
460 cp_ctx(ctx, 0x402864, 1);
461 gr_def(ctx, 0x402864, 0x00001001);
462 cp_ctx(ctx, 0x402870, 3);
463 gr_def(ctx, 0x402878, 0x00000003);
464 if (dev_priv->chipset != 0x47) { /* belong at end!! */
465 cp_ctx(ctx, 0x402900, 1);
466 cp_ctx(ctx, 0x402940, 1);
467 cp_ctx(ctx, 0x402980, 1);
468 cp_ctx(ctx, 0x4029c0, 1);
469 cp_ctx(ctx, 0x402a00, 1);
470 cp_ctx(ctx, 0x402a40, 1);
471 cp_ctx(ctx, 0x402a80, 1);
472 cp_ctx(ctx, 0x402ac0, 1);
473 }
474 break;
475 case 0x40:
476 cp_ctx(ctx, 0x402844, 1);
477 gr_def(ctx, 0x402844, 0x00000001);
478 cp_ctx(ctx, 0x402850, 1);
479 break;
480 default:
481 cp_ctx(ctx, 0x402844, 1);
482 gr_def(ctx, 0x402844, 0x00001001);
483 cp_ctx(ctx, 0x402850, 2);
484 gr_def(ctx, 0x402854, 0x00000003);
485 break;
486 }
487
488 cp_ctx(ctx, 0x402c00, 4);
489 gr_def(ctx, 0x402c00, dev_priv->chipset == 0x40 ?
490 0x80800001 : 0x00888001);
491 switch (dev_priv->chipset) {
492 case 0x47:
493 case 0x49:
494 case 0x4b:
495 cp_ctx(ctx, 0x402c20, 40);
496 for (i = 0; i < 32; i++)
497 gr_def(ctx, 0x402c40 + (i * 4), 0xffffffff);
498 cp_ctx(ctx, 0x4030b8, 13);
499 gr_def(ctx, 0x4030dc, 0x00000005);
500 gr_def(ctx, 0x4030e8, 0x0000ffff);
501 break;
502 default:
503 cp_ctx(ctx, 0x402c10, 4);
504 if (dev_priv->chipset == 0x40)
505 cp_ctx(ctx, 0x402c20, 36);
506 else
507 if (dev_priv->chipset <= 0x42)
508 cp_ctx(ctx, 0x402c20, 24);
509 else
510 if (dev_priv->chipset <= 0x4a)
511 cp_ctx(ctx, 0x402c20, 16);
512 else
513 cp_ctx(ctx, 0x402c20, 8);
514 cp_ctx(ctx, 0x402cb0, dev_priv->chipset == 0x40 ? 12 : 13);
515 gr_def(ctx, 0x402cd4, 0x00000005);
516 if (dev_priv->chipset != 0x40)
517 gr_def(ctx, 0x402ce0, 0x0000ffff);
518 break;
519 }
520
521 cp_ctx(ctx, 0x403400, dev_priv->chipset == 0x40 ? 4 : 3);
522 cp_ctx(ctx, 0x403410, dev_priv->chipset == 0x40 ? 4 : 3);
523 cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->dev));
524 for (i = 0; i < nv40_graph_vs_count(ctx->dev); i++)
525 gr_def(ctx, 0x403420 + (i * 4), 0x00005555);
526
527 if (dev_priv->chipset != 0x40) {
528 cp_ctx(ctx, 0x403600, 1);
529 gr_def(ctx, 0x403600, 0x00000001);
530 }
531 cp_ctx(ctx, 0x403800, 1);
532
533 cp_ctx(ctx, 0x403c18, 1);
534 gr_def(ctx, 0x403c18, 0x00000001);
535 switch (dev_priv->chipset) {
536 case 0x46:
537 case 0x47:
538 case 0x49:
539 case 0x4b:
540 cp_ctx(ctx, 0x405018, 1);
541 gr_def(ctx, 0x405018, 0x08e00001);
542 cp_ctx(ctx, 0x405c24, 1);
543 gr_def(ctx, 0x405c24, 0x000e3000);
544 break;
545 }
546 if (dev_priv->chipset != 0x4e)
547 cp_ctx(ctx, 0x405800, 11);
548 cp_ctx(ctx, 0x407000, 1);
549}
550
551static void
552nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
553{
554 int len = nv40_graph_4097(ctx->dev) ? 0x0684 : 0x0084;
555
556 cp_out (ctx, 0x300000);
557 cp_lsr (ctx, len - 4);
558 cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_swap_state3d_3_is_save);
559 cp_lsr (ctx, len);
560 cp_name(ctx, cp_swap_state3d_3_is_save);
561 cp_out (ctx, 0x800001);
562
563 ctx->ctxvals_pos += len;
564}
565
566static void
567nv40_graph_construct_shader(struct nouveau_grctx *ctx)
568{
569 struct drm_device *dev = ctx->dev;
570 struct drm_nouveau_private *dev_priv = dev->dev_private;
571 struct nouveau_gpuobj *obj = ctx->data;
572 int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset;
573 int offset, i;
574
575 vs_nr = nv40_graph_vs_count(ctx->dev);
576 vs_nr_b0 = 363;
577 vs_nr_b1 = dev_priv->chipset == 0x40 ? 128 : 64;
578 if (dev_priv->chipset == 0x40) {
579 b0_offset = 0x2200/4; /* 33a0 */
580 b1_offset = 0x55a0/4; /* 1500 */
581 vs_len = 0x6aa0/4;
582 } else
583 if (dev_priv->chipset == 0x41 || dev_priv->chipset == 0x42) {
584 b0_offset = 0x2200/4; /* 2200 */
585 b1_offset = 0x4400/4; /* 0b00 */
586 vs_len = 0x4f00/4;
587 } else {
588 b0_offset = 0x1d40/4; /* 2200 */
589 b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
590 vs_len = nv40_graph_4097(dev) ? 0x4a40/4 : 0x4980/4;
591 }
592
593 cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
594 cp_out(ctx, nv40_graph_4097(dev) ? 0x800041 : 0x800029);
595
596 offset = ctx->ctxvals_pos;
597 ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
598
599 if (ctx->mode != NOUVEAU_GRCTX_VALS)
600 return;
601
602 offset += 0x0280/4;
603 for (i = 0; i < 16; i++, offset += 2)
604 nv_wo32(dev, obj, offset, 0x3f800000);
605
606 for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
607 for (i = 0; i < vs_nr_b0 * 6; i += 6)
608 nv_wo32(dev, obj, offset + b0_offset + i, 0x00000001);
609 for (i = 0; i < vs_nr_b1 * 4; i += 4)
610 nv_wo32(dev, obj, offset + b1_offset + i, 0x3f800000);
611 }
612}
613
614void
615nv40_grctx_init(struct nouveau_grctx *ctx)
616{
617 /* decide whether we're loading/unloading the context */
618 cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
619 cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
620
621 cp_name(ctx, cp_check_load);
622 cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
623 cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
624 cp_bra (ctx, ALWAYS, TRUE, cp_exit);
625
626 /* setup for context load */
627 cp_name(ctx, cp_setup_auto_load);
628 cp_wait(ctx, STATUS, IDLE);
629 cp_out (ctx, CP_NEXT_TO_SWAP);
630 cp_name(ctx, cp_setup_load);
631 cp_wait(ctx, STATUS, IDLE);
632 cp_set (ctx, SWAP_DIRECTION, LOAD);
633 cp_out (ctx, 0x00910880); /* ?? */
634 cp_out (ctx, 0x00901ffe); /* ?? */
635 cp_out (ctx, 0x01940000); /* ?? */
636 cp_lsr (ctx, 0x20);
637 cp_out (ctx, 0x0060000b); /* ?? */
638 cp_wait(ctx, UNK57, CLEAR);
639 cp_out (ctx, 0x0060000c); /* ?? */
640 cp_bra (ctx, ALWAYS, TRUE, cp_swap_state);
641
642 /* setup for context save */
643 cp_name(ctx, cp_setup_save);
644 cp_set (ctx, SWAP_DIRECTION, SAVE);
645
646 /* general PGRAPH state */
647 cp_name(ctx, cp_swap_state);
648 cp_pos (ctx, 0x00020/4);
649 nv40_graph_construct_general(ctx);
650 cp_wait(ctx, STATUS, IDLE);
651
652 /* 3D state, block 1 */
653 cp_bra (ctx, UNK54, CLEAR, cp_prepare_exit);
654 nv40_graph_construct_state3d(ctx);
655 cp_wait(ctx, STATUS, IDLE);
656
657 /* 3D state, block 2 */
658 nv40_graph_construct_state3d_2(ctx);
659
660 /* Some other block of "random" state */
661 nv40_graph_construct_state3d_3(ctx);
662
663 /* Per-vertex shader state */
664 cp_pos (ctx, ctx->ctxvals_pos);
665 nv40_graph_construct_shader(ctx);
666
667 /* pre-exit state updates */
668 cp_name(ctx, cp_prepare_exit);
669 cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
670 cp_bra (ctx, USER_SAVE, PENDING, cp_exit);
671 cp_out (ctx, CP_NEXT_TO_CURRENT);
672
673 cp_name(ctx, cp_exit);
674 cp_set (ctx, USER_SAVE, NOT_PENDING);
675 cp_set (ctx, USER_LOAD, NOT_PENDING);
676 cp_out (ctx, CP_END);
677}
678
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index f8e28a1e44e7..118d3285fd8c 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -45,7 +45,7 @@ nv50_crtc_lut_load(struct drm_crtc *crtc)
45 void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo); 45 void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
46 int i; 46 int i;
47 47
48 NV_DEBUG(crtc->dev, "\n"); 48 NV_DEBUG_KMS(crtc->dev, "\n");
49 49
50 for (i = 0; i < 256; i++) { 50 for (i = 0; i < 256; i++) {
51 writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0); 51 writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
@@ -68,8 +68,8 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
68 struct nouveau_channel *evo = dev_priv->evo; 68 struct nouveau_channel *evo = dev_priv->evo;
69 int index = nv_crtc->index, ret; 69 int index = nv_crtc->index, ret;
70 70
71 NV_DEBUG(dev, "index %d\n", nv_crtc->index); 71 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
72 NV_DEBUG(dev, "%s\n", blanked ? "blanked" : "unblanked"); 72 NV_DEBUG_KMS(dev, "%s\n", blanked ? "blanked" : "unblanked");
73 73
74 if (blanked) { 74 if (blanked) {
75 nv_crtc->cursor.hide(nv_crtc, false); 75 nv_crtc->cursor.hide(nv_crtc, false);
@@ -139,7 +139,7 @@ nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
139 struct nouveau_channel *evo = dev_priv->evo; 139 struct nouveau_channel *evo = dev_priv->evo;
140 int ret; 140 int ret;
141 141
142 NV_DEBUG(dev, "\n"); 142 NV_DEBUG_KMS(dev, "\n");
143 143
144 ret = RING_SPACE(evo, 2 + (update ? 2 : 0)); 144 ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
145 if (ret) { 145 if (ret) {
@@ -193,7 +193,7 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
193 uint32_t outX, outY, horiz, vert; 193 uint32_t outX, outY, horiz, vert;
194 int ret; 194 int ret;
195 195
196 NV_DEBUG(dev, "\n"); 196 NV_DEBUG_KMS(dev, "\n");
197 197
198 switch (scaling_mode) { 198 switch (scaling_mode) {
199 case DRM_MODE_SCALE_NONE: 199 case DRM_MODE_SCALE_NONE:
@@ -301,7 +301,7 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
301 struct drm_device *dev = crtc->dev; 301 struct drm_device *dev = crtc->dev;
302 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 302 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
303 303
304 NV_DEBUG(dev, "\n"); 304 NV_DEBUG_KMS(dev, "\n");
305 305
306 if (!crtc) 306 if (!crtc)
307 return; 307 return;
@@ -433,7 +433,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
433 struct drm_device *dev = crtc->dev; 433 struct drm_device *dev = crtc->dev;
434 struct drm_encoder *encoder; 434 struct drm_encoder *encoder;
435 435
436 NV_DEBUG(dev, "index %d\n", nv_crtc->index); 436 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
437 437
438 /* Disconnect all unused encoders. */ 438 /* Disconnect all unused encoders. */
439 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 439 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -458,7 +458,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
458 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 458 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
459 int ret; 459 int ret;
460 460
461 NV_DEBUG(dev, "index %d\n", nv_crtc->index); 461 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
462 462
463 nv50_crtc_blank(nv_crtc, false); 463 nv50_crtc_blank(nv_crtc, false);
464 464
@@ -497,7 +497,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
497 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); 497 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
498 int ret, format; 498 int ret, format;
499 499
500 NV_DEBUG(dev, "index %d\n", nv_crtc->index); 500 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
501 501
502 switch (drm_fb->depth) { 502 switch (drm_fb->depth) {
503 case 8: 503 case 8:
@@ -612,7 +612,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
612 612
613 *nv_crtc->mode = *adjusted_mode; 613 *nv_crtc->mode = *adjusted_mode;
614 614
615 NV_DEBUG(dev, "index %d\n", nv_crtc->index); 615 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
616 616
617 hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start; 617 hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
618 vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start; 618 vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
@@ -706,7 +706,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
706 struct nouveau_crtc *nv_crtc = NULL; 706 struct nouveau_crtc *nv_crtc = NULL;
707 int ret, i; 707 int ret, i;
708 708
709 NV_DEBUG(dev, "\n"); 709 NV_DEBUG_KMS(dev, "\n");
710 710
711 nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL); 711 nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
712 if (!nv_crtc) 712 if (!nv_crtc)
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index e2e79a8f220d..753e723adb3a 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -41,7 +41,7 @@ nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
41 struct drm_device *dev = nv_crtc->base.dev; 41 struct drm_device *dev = nv_crtc->base.dev;
42 int ret; 42 int ret;
43 43
44 NV_DEBUG(dev, "\n"); 44 NV_DEBUG_KMS(dev, "\n");
45 45
46 if (update && nv_crtc->cursor.visible) 46 if (update && nv_crtc->cursor.visible)
47 return; 47 return;
@@ -76,7 +76,7 @@ nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
76 struct drm_device *dev = nv_crtc->base.dev; 76 struct drm_device *dev = nv_crtc->base.dev;
77 int ret; 77 int ret;
78 78
79 NV_DEBUG(dev, "\n"); 79 NV_DEBUG_KMS(dev, "\n");
80 80
81 if (update && !nv_crtc->cursor.visible) 81 if (update && !nv_crtc->cursor.visible)
82 return; 82 return;
@@ -116,7 +116,7 @@ nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
116static void 116static void
117nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) 117nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
118{ 118{
119 NV_DEBUG(nv_crtc->base.dev, "\n"); 119 NV_DEBUG_KMS(nv_crtc->base.dev, "\n");
120 if (offset == nv_crtc->cursor.offset) 120 if (offset == nv_crtc->cursor.offset)
121 return; 121 return;
122 122
@@ -143,7 +143,7 @@ nv50_cursor_fini(struct nouveau_crtc *nv_crtc)
143 struct drm_device *dev = nv_crtc->base.dev; 143 struct drm_device *dev = nv_crtc->base.dev;
144 int idx = nv_crtc->index; 144 int idx = nv_crtc->index;
145 145
146 NV_DEBUG(dev, "\n"); 146 NV_DEBUG_KMS(dev, "\n");
147 147
148 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0); 148 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0);
149 if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 149 if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index fb5838e3be24..f08f042a8e10 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -44,7 +44,7 @@ nv50_dac_disconnect(struct nouveau_encoder *nv_encoder)
44 struct nouveau_channel *evo = dev_priv->evo; 44 struct nouveau_channel *evo = dev_priv->evo;
45 int ret; 45 int ret;
46 46
47 NV_DEBUG(dev, "Disconnecting DAC %d\n", nv_encoder->or); 47 NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or);
48 48
49 ret = RING_SPACE(evo, 2); 49 ret = RING_SPACE(evo, 2);
50 if (ret) { 50 if (ret) {
@@ -81,11 +81,11 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
81 /* Use bios provided value if possible. */ 81 /* Use bios provided value if possible. */
82 if (dev_priv->vbios->dactestval) { 82 if (dev_priv->vbios->dactestval) {
83 load_pattern = dev_priv->vbios->dactestval; 83 load_pattern = dev_priv->vbios->dactestval;
84 NV_DEBUG(dev, "Using bios provided load_pattern of %d\n", 84 NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n",
85 load_pattern); 85 load_pattern);
86 } else { 86 } else {
87 load_pattern = 340; 87 load_pattern = 340;
88 NV_DEBUG(dev, "Using default load_pattern of %d\n", 88 NV_DEBUG_KMS(dev, "Using default load_pattern of %d\n",
89 load_pattern); 89 load_pattern);
90 } 90 }
91 91
@@ -103,9 +103,9 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
103 status = connector_status_connected; 103 status = connector_status_connected;
104 104
105 if (status == connector_status_connected) 105 if (status == connector_status_connected)
106 NV_DEBUG(dev, "Load was detected on output with or %d\n", or); 106 NV_DEBUG_KMS(dev, "Load was detected on output with or %d\n", or);
107 else 107 else
108 NV_DEBUG(dev, "Load was not detected on output with or %d\n", or); 108 NV_DEBUG_KMS(dev, "Load was not detected on output with or %d\n", or);
109 109
110 return status; 110 return status;
111} 111}
@@ -118,7 +118,7 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode)
118 uint32_t val; 118 uint32_t val;
119 int or = nv_encoder->or; 119 int or = nv_encoder->or;
120 120
121 NV_DEBUG(dev, "or %d mode %d\n", or, mode); 121 NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
122 122
123 /* wait for it to be done */ 123 /* wait for it to be done */
124 if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or), 124 if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
@@ -173,7 +173,7 @@ nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
173 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 173 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
174 struct nouveau_connector *connector; 174 struct nouveau_connector *connector;
175 175
176 NV_DEBUG(encoder->dev, "or %d\n", nv_encoder->or); 176 NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or);
177 177
178 connector = nouveau_encoder_connector_get(nv_encoder); 178 connector = nouveau_encoder_connector_get(nv_encoder);
179 if (!connector) { 179 if (!connector) {
@@ -213,7 +213,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
213 uint32_t mode_ctl = 0, mode_ctl2 = 0; 213 uint32_t mode_ctl = 0, mode_ctl2 = 0;
214 int ret; 214 int ret;
215 215
216 NV_DEBUG(dev, "or %d\n", nv_encoder->or); 216 NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or);
217 217
218 nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON); 218 nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
219 219
@@ -264,7 +264,7 @@ nv50_dac_destroy(struct drm_encoder *encoder)
264 if (!encoder) 264 if (!encoder)
265 return; 265 return;
266 266
267 NV_DEBUG(encoder->dev, "\n"); 267 NV_DEBUG_KMS(encoder->dev, "\n");
268 268
269 drm_encoder_cleanup(encoder); 269 drm_encoder_cleanup(encoder);
270 kfree(nv_encoder); 270 kfree(nv_encoder);
@@ -280,7 +280,7 @@ nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry)
280 struct nouveau_encoder *nv_encoder; 280 struct nouveau_encoder *nv_encoder;
281 struct drm_encoder *encoder; 281 struct drm_encoder *encoder;
282 282
283 NV_DEBUG(dev, "\n"); 283 NV_DEBUG_KMS(dev, "\n");
284 NV_INFO(dev, "Detected a DAC output\n"); 284 NV_INFO(dev, "Detected a DAC output\n");
285 285
286 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); 286 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 12c5ee63495b..a9263d92a231 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -188,7 +188,7 @@ nv50_display_init(struct drm_device *dev)
188 uint64_t start; 188 uint64_t start;
189 int ret, i; 189 int ret, i;
190 190
191 NV_DEBUG(dev, "\n"); 191 NV_DEBUG_KMS(dev, "\n");
192 192
193 nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004)); 193 nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
194 /* 194 /*
@@ -232,7 +232,7 @@ nv50_display_init(struct drm_device *dev)
232 nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0); 232 nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
233 /* RAM is clamped to 256 MiB. */ 233 /* RAM is clamped to 256 MiB. */
234 ram_amount = nouveau_mem_fb_amount(dev); 234 ram_amount = nouveau_mem_fb_amount(dev);
235 NV_DEBUG(dev, "ram_amount %d\n", ram_amount); 235 NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
236 if (ram_amount > 256*1024*1024) 236 if (ram_amount > 256*1024*1024)
237 ram_amount = 256*1024*1024; 237 ram_amount = 256*1024*1024;
238 nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1); 238 nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
@@ -398,7 +398,7 @@ static int nv50_display_disable(struct drm_device *dev)
398 struct drm_crtc *drm_crtc; 398 struct drm_crtc *drm_crtc;
399 int ret, i; 399 int ret, i;
400 400
401 NV_DEBUG(dev, "\n"); 401 NV_DEBUG_KMS(dev, "\n");
402 402
403 list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) { 403 list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
404 struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc); 404 struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
@@ -469,7 +469,7 @@ int nv50_display_create(struct drm_device *dev)
469 uint32_t connector[16] = {}; 469 uint32_t connector[16] = {};
470 int ret, i; 470 int ret, i;
471 471
472 NV_DEBUG(dev, "\n"); 472 NV_DEBUG_KMS(dev, "\n");
473 473
474 /* init basic kernel modesetting */ 474 /* init basic kernel modesetting */
475 drm_mode_config_init(dev); 475 drm_mode_config_init(dev);
@@ -573,7 +573,7 @@ int nv50_display_destroy(struct drm_device *dev)
573{ 573{
574 struct drm_nouveau_private *dev_priv = dev->dev_private; 574 struct drm_nouveau_private *dev_priv = dev->dev_private;
575 575
576 NV_DEBUG(dev, "\n"); 576 NV_DEBUG_KMS(dev, "\n");
577 577
578 drm_mode_config_cleanup(dev); 578 drm_mode_config_cleanup(dev);
579 579
@@ -617,7 +617,7 @@ nv50_display_irq_head(struct drm_device *dev, int *phead,
617 * CRTC separately, and submission will be blocked by the GPU 617 * CRTC separately, and submission will be blocked by the GPU
618 * until we handle each in turn. 618 * until we handle each in turn.
619 */ 619 */
620 NV_DEBUG(dev, "0x610030: 0x%08x\n", unk30); 620 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
621 head = ffs((unk30 >> 9) & 3) - 1; 621 head = ffs((unk30 >> 9) & 3) - 1;
622 if (head < 0) 622 if (head < 0)
623 return -EINVAL; 623 return -EINVAL;
@@ -661,7 +661,7 @@ nv50_display_irq_head(struct drm_device *dev, int *phead,
661 or = i; 661 or = i;
662 } 662 }
663 663
664 NV_DEBUG(dev, "type %d, or %d\n", type, or); 664 NV_DEBUG_KMS(dev, "type %d, or %d\n", type, or);
665 if (type == OUTPUT_ANY) { 665 if (type == OUTPUT_ANY) {
666 NV_ERROR(dev, "unknown encoder!!\n"); 666 NV_ERROR(dev, "unknown encoder!!\n");
667 return -1; 667 return -1;
@@ -811,7 +811,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
811 pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff; 811 pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
812 script = nv50_display_script_select(dev, dcbent, pclk); 812 script = nv50_display_script_select(dev, dcbent, pclk);
813 813
814 NV_DEBUG(dev, "head %d pxclk: %dKHz\n", head, pclk); 814 NV_DEBUG_KMS(dev, "head %d pxclk: %dKHz\n", head, pclk);
815 815
816 if (dcbent->type != OUTPUT_DP) 816 if (dcbent->type != OUTPUT_DP)
817 nouveau_bios_run_display_table(dev, dcbent, 0, -2); 817 nouveau_bios_run_display_table(dev, dcbent, 0, -2);
@@ -870,7 +870,7 @@ nv50_display_irq_handler_bh(struct work_struct *work)
870 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); 870 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
871 uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1); 871 uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
872 872
873 NV_DEBUG(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1); 873 NV_DEBUG_KMS(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
874 874
875 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10) 875 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
876 nv50_display_unk10_handler(dev); 876 nv50_display_unk10_handler(dev);
@@ -974,7 +974,7 @@ nv50_display_irq_handler(struct drm_device *dev)
974 uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1); 974 uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
975 uint32_t clock; 975 uint32_t clock;
976 976
977 NV_DEBUG(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1); 977 NV_DEBUG_KMS(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
978 978
979 if (!intr0 && !(intr1 & ~delayed)) 979 if (!intr0 && !(intr1 & ~delayed))
980 break; 980 break;
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 77ae1aaa0bce..b7282284f080 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -416,7 +416,7 @@ nv50_fifo_unload_context(struct drm_device *dev)
416 NV_DEBUG(dev, "\n"); 416 NV_DEBUG(dev, "\n");
417 417
418 chid = pfifo->channel_id(dev); 418 chid = pfifo->channel_id(dev);
419 if (chid < 0 || chid >= dev_priv->engine.fifo.channels) 419 if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
420 return 0; 420 return 0;
421 421
422 chan = dev_priv->fifos[chid]; 422 chan = dev_priv->fifos[chid];
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 177d8229336f..ca79f32be44c 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -107,9 +107,13 @@ nv50_graph_init_regs(struct drm_device *dev)
107static int 107static int
108nv50_graph_init_ctxctl(struct drm_device *dev) 108nv50_graph_init_ctxctl(struct drm_device *dev)
109{ 109{
110 struct drm_nouveau_private *dev_priv = dev->dev_private;
111
110 NV_DEBUG(dev, "\n"); 112 NV_DEBUG(dev, "\n");
111 113
112 nv40_grctx_init(dev); 114 nouveau_grctx_prog_load(dev);
115 if (!dev_priv->engine.graph.ctxprog)
116 dev_priv->engine.graph.accel_blocked = true;
113 117
114 nv_wr32(dev, 0x400320, 4); 118 nv_wr32(dev, 0x400320, 4);
115 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0); 119 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
@@ -140,7 +144,7 @@ void
140nv50_graph_takedown(struct drm_device *dev) 144nv50_graph_takedown(struct drm_device *dev)
141{ 145{
142 NV_DEBUG(dev, "\n"); 146 NV_DEBUG(dev, "\n");
143 nv40_grctx_fini(dev); 147 nouveau_grctx_fini(dev);
144} 148}
145 149
146void 150void
@@ -207,7 +211,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
207 dev_priv->engine.instmem.finish_access(dev); 211 dev_priv->engine.instmem.finish_access(dev);
208 212
209 dev_priv->engine.instmem.prepare_access(dev, true); 213 dev_priv->engine.instmem.prepare_access(dev, true);
210 nv40_grctx_vals_load(dev, ctx); 214 nouveau_grctx_vals_load(dev, ctx);
211 nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12); 215 nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
212 if ((dev_priv->chipset & 0xf0) == 0xa0) 216 if ((dev_priv->chipset & 0xf0) == 0xa0)
213 nv_wo32(dev, ctx, 0x00004/4, 0x00000000); 217 nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 8c280463a664..e395c16d30f5 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -44,7 +44,7 @@ nv50_sor_disconnect(struct nouveau_encoder *nv_encoder)
44 struct nouveau_channel *evo = dev_priv->evo; 44 struct nouveau_channel *evo = dev_priv->evo;
45 int ret; 45 int ret;
46 46
47 NV_DEBUG(dev, "Disconnecting SOR %d\n", nv_encoder->or); 47 NV_DEBUG_KMS(dev, "Disconnecting SOR %d\n", nv_encoder->or);
48 48
49 ret = RING_SPACE(evo, 2); 49 ret = RING_SPACE(evo, 2);
50 if (ret) { 50 if (ret) {
@@ -70,7 +70,7 @@ nv50_sor_dp_link_train(struct drm_encoder *encoder)
70 } 70 }
71 71
72 if (dpe->script0) { 72 if (dpe->script0) {
73 NV_DEBUG(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); 73 NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
74 nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0), 74 nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0),
75 nv_encoder->dcb); 75 nv_encoder->dcb);
76 } 76 }
@@ -79,7 +79,7 @@ nv50_sor_dp_link_train(struct drm_encoder *encoder)
79 NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or); 79 NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or);
80 80
81 if (dpe->script1) { 81 if (dpe->script1) {
82 NV_DEBUG(dev, "SOR-%d: running DP script 1\n", nv_encoder->or); 82 NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or);
83 nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1), 83 nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1),
84 nv_encoder->dcb); 84 nv_encoder->dcb);
85 } 85 }
@@ -93,7 +93,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
93 uint32_t val; 93 uint32_t val;
94 int or = nv_encoder->or; 94 int or = nv_encoder->or;
95 95
96 NV_DEBUG(dev, "or %d mode %d\n", or, mode); 96 NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
97 97
98 /* wait for it to be done */ 98 /* wait for it to be done */
99 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or), 99 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
@@ -142,7 +142,7 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
142 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 142 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
143 struct nouveau_connector *connector; 143 struct nouveau_connector *connector;
144 144
145 NV_DEBUG(encoder->dev, "or %d\n", nv_encoder->or); 145 NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or);
146 146
147 connector = nouveau_encoder_connector_get(nv_encoder); 147 connector = nouveau_encoder_connector_get(nv_encoder);
148 if (!connector) { 148 if (!connector) {
@@ -182,7 +182,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
182 uint32_t mode_ctl = 0; 182 uint32_t mode_ctl = 0;
183 int ret; 183 int ret;
184 184
185 NV_DEBUG(dev, "or %d\n", nv_encoder->or); 185 NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or);
186 186
187 nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON); 187 nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
188 188
@@ -246,7 +246,7 @@ nv50_sor_destroy(struct drm_encoder *encoder)
246 if (!encoder) 246 if (!encoder)
247 return; 247 return;
248 248
249 NV_DEBUG(encoder->dev, "\n"); 249 NV_DEBUG_KMS(encoder->dev, "\n");
250 250
251 drm_encoder_cleanup(encoder); 251 drm_encoder_cleanup(encoder);
252 252
@@ -265,7 +265,7 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
265 bool dum; 265 bool dum;
266 int type; 266 int type;
267 267
268 NV_DEBUG(dev, "\n"); 268 NV_DEBUG_KMS(dev, "\n");
269 269
270 switch (entry->type) { 270 switch (entry->type) {
271 case OUTPUT_TMDS: 271 case OUTPUT_TMDS:
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 601f4c0e5da5..b806fdcc7170 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -64,7 +64,7 @@ static struct drm_driver driver = {
64 .owner = THIS_MODULE, 64 .owner = THIS_MODULE,
65 .open = drm_open, 65 .open = drm_open,
66 .release = drm_release, 66 .release = drm_release,
67 .ioctl = drm_ioctl, 67 .unlocked_ioctl = drm_ioctl,
68 .mmap = drm_mmap, 68 .mmap = drm_mmap,
69 .poll = drm_poll, 69 .poll = drm_poll,
70 .fasync = drm_fasync, 70 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index d3cb676eee84..51c99fc4dd38 100644
--- a/drivers/gpu/drm/r128/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -95,8 +95,7 @@ static int compat_r128_init(struct file *file, unsigned int cmd,
95 &init->agp_textures_offset)) 95 &init->agp_textures_offset))
96 return -EFAULT; 96 return -EFAULT;
97 97
98 return drm_ioctl(file->f_path.dentry->d_inode, file, 98 return drm_ioctl(file, DRM_IOCTL_R128_INIT, (unsigned long)init);
99 DRM_IOCTL_R128_INIT, (unsigned long)init);
100} 99}
101 100
102typedef struct drm_r128_depth32 { 101typedef struct drm_r128_depth32 {
@@ -129,8 +128,7 @@ static int compat_r128_depth(struct file *file, unsigned int cmd,
129 &depth->mask)) 128 &depth->mask))
130 return -EFAULT; 129 return -EFAULT;
131 130
132 return drm_ioctl(file->f_path.dentry->d_inode, file, 131 return drm_ioctl(file, DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
133 DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
134 132
135} 133}
136 134
@@ -153,8 +151,7 @@ static int compat_r128_stipple(struct file *file, unsigned int cmd,
153 &stipple->mask)) 151 &stipple->mask))
154 return -EFAULT; 152 return -EFAULT;
155 153
156 return drm_ioctl(file->f_path.dentry->d_inode, file, 154 return drm_ioctl(file, DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
157 DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
158} 155}
159 156
160typedef struct drm_r128_getparam32 { 157typedef struct drm_r128_getparam32 {
@@ -178,8 +175,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
178 &getparam->value)) 175 &getparam->value))
179 return -EFAULT; 176 return -EFAULT;
180 177
181 return drm_ioctl(file->f_path.dentry->d_inode, file, 178 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
182 DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
183} 179}
184 180
185drm_ioctl_compat_t *r128_compat_ioctls[] = { 181drm_ioctl_compat_t *r128_compat_ioctls[] = {
@@ -210,12 +206,10 @@ long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
210 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) 206 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
211 fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE]; 207 fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
212 208
213 lock_kernel(); /* XXX for now */
214 if (fn != NULL) 209 if (fn != NULL)
215 ret = (*fn) (filp, cmd, arg); 210 ret = (*fn) (filp, cmd, arg);
216 else 211 else
217 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); 212 ret = drm_ioctl(filp, cmd, arg);
218 unlock_kernel();
219 213
220 return ret; 214 return ret;
221} 215}
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 6578d19dff93..388140a7e651 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -58,6 +58,7 @@ typedef struct {
58} atom_exec_context; 58} atom_exec_context;
59 59
60int atom_debug = 0; 60int atom_debug = 0;
61static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
61void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); 62void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
62 63
63static uint32_t atom_arg_mask[8] = 64static uint32_t atom_arg_mask[8] =
@@ -573,7 +574,7 @@ static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
573 else 574 else
574 SDEBUG(" table: %d\n", idx); 575 SDEBUG(" table: %d\n", idx);
575 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) 576 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
576 atom_execute_table(ctx->ctx, idx, ctx->ps + ctx->ps_shift); 577 atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
577} 578}
578 579
579static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) 580static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
@@ -1040,7 +1041,7 @@ static struct {
1040 atom_op_shr, ATOM_ARG_MC}, { 1041 atom_op_shr, ATOM_ARG_MC}, {
1041atom_op_debug, 0},}; 1042atom_op_debug, 0},};
1042 1043
1043void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) 1044static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
1044{ 1045{
1045 int base = CU16(ctx->cmd_table + 4 + 2 * index); 1046 int base = CU16(ctx->cmd_table + 4 + 2 * index);
1046 int len, ws, ps, ptr; 1047 int len, ws, ps, ptr;
@@ -1092,6 +1093,13 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1092 kfree(ectx.ws); 1093 kfree(ectx.ws);
1093} 1094}
1094 1095
1096void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1097{
1098 mutex_lock(&ctx->mutex);
1099 atom_execute_table_locked(ctx, index, params);
1100 mutex_unlock(&ctx->mutex);
1101}
1102
1095static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; 1103static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1096 1104
1097static void atom_index_iio(struct atom_context *ctx, int base) 1105static void atom_index_iio(struct atom_context *ctx, int base)
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index 6671848e5ea1..47fd943f6d14 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -120,6 +120,7 @@ struct card_info {
120 120
121struct atom_context { 121struct atom_context {
122 struct card_info *card; 122 struct card_info *card;
123 struct mutex mutex;
123 void *bios; 124 void *bios;
124 uint32_t cmd_table, data_table; 125 uint32_t cmd_table, data_table;
125 uint16_t *iio; 126 uint16_t *iio;
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 5f48515c77a7..91ad0d1c1b17 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -4690,6 +4690,205 @@ typedef struct _ATOM_POWERPLAY_INFO_V3 {
4690 ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; 4690 ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
4691} ATOM_POWERPLAY_INFO_V3; 4691} ATOM_POWERPLAY_INFO_V3;
4692 4692
4693/* New PPlib */
4694/**************************************************************************/
4695typedef struct _ATOM_PPLIB_THERMALCONTROLLER
4696
4697{
4698 UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_*
4699 UCHAR ucI2cLine; // as interpreted by DAL I2C
4700 UCHAR ucI2cAddress;
4701 UCHAR ucFanParameters; // Fan Control Parameters.
4702 UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only.
4703 UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only.
4704 UCHAR ucReserved; // ----
4705 UCHAR ucFlags; // to be defined
4706} ATOM_PPLIB_THERMALCONTROLLER;
4707
4708#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
4709#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller.
4710
4711#define ATOM_PP_THERMALCONTROLLER_NONE 0
4712#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib
4713#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib
4714#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib
4715#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib
4716#define ATOM_PP_THERMALCONTROLLER_LM64 5
4717#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib
4718#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
4719#define ATOM_PP_THERMALCONTROLLER_RV770 8
4720#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
4721
4722typedef struct _ATOM_PPLIB_STATE
4723{
4724 UCHAR ucNonClockStateIndex;
4725 UCHAR ucClockStateIndices[1]; // variable-sized
4726} ATOM_PPLIB_STATE;
4727
4728//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
4729#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
4730#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
4731#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
4732#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
4733#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
4734#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
4735#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
4736#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
4737#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
4738#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
4739#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
4740#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
4741
4742typedef struct _ATOM_PPLIB_POWERPLAYTABLE
4743{
4744 ATOM_COMMON_TABLE_HEADER sHeader;
4745
4746 UCHAR ucDataRevision;
4747
4748 UCHAR ucNumStates;
4749 UCHAR ucStateEntrySize;
4750 UCHAR ucClockInfoSize;
4751 UCHAR ucNonClockSize;
4752
4753 // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
4754 USHORT usStateArrayOffset;
4755
4756 // offset from start of this table to array of ASIC-specific structures,
4757 // currently ATOM_PPLIB_CLOCK_INFO.
4758 USHORT usClockInfoArrayOffset;
4759
4760 // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
4761 USHORT usNonClockInfoArrayOffset;
4762
4763 USHORT usBackbiasTime; // in microseconds
4764 USHORT usVoltageTime; // in microseconds
4765 USHORT usTableSize; //the size of this structure, or the extended structure
4766
4767 ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_*
4768
4769 ATOM_PPLIB_THERMALCONTROLLER sThermalController;
4770
4771 USHORT usBootClockInfoOffset;
4772 USHORT usBootNonClockInfoOffset;
4773
4774} ATOM_PPLIB_POWERPLAYTABLE;
4775
4776//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
4777#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
4778#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
4779#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0
4780#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1
4781#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3
4782#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5
4783// 2, 4, 6, 7 are reserved
4784
4785#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008
4786#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010
4787#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020
4788#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040
4789#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080
4790#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100
4791#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200
4792#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
4793#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
4794#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
4795// remaining 3 bits are reserved
4796
4797//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
4798#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
4799#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
4800
4801// 0 is 2.5Gb/s, 1 is 5Gb/s
4802#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004
4803#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2
4804
4805// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
4806#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8
4807#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3
4808
4809// lookup into reduced refresh-rate table
4810#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00
4811#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
4812
4813#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0
4814#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1
4815// 2-15 TBD as needed.
4816
4817#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
4818#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
4819#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
4820
4821#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
4822
4823// Contained in an array starting at the offset
4824// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
4825// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
4826typedef struct _ATOM_PPLIB_NONCLOCK_INFO
4827{
4828 USHORT usClassification;
4829 UCHAR ucMinTemperature;
4830 UCHAR ucMaxTemperature;
4831 ULONG ulCapsAndSettings;
4832 UCHAR ucRequiredPower;
4833 UCHAR ucUnused1[3];
4834} ATOM_PPLIB_NONCLOCK_INFO;
4835
4836// Contained in an array starting at the offset
4837// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
4838// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
4839typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
4840{
4841 USHORT usEngineClockLow;
4842 UCHAR ucEngineClockHigh;
4843
4844 USHORT usMemoryClockLow;
4845 UCHAR ucMemoryClockHigh;
4846
4847 USHORT usVDDC;
4848 USHORT usUnused1;
4849 USHORT usUnused2;
4850
4851 ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
4852
4853} ATOM_PPLIB_R600_CLOCK_INFO;
4854
4855// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
4856#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1
4857#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2
4858#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
4859#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
4860#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
4861
4862typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
4863
4864{
4865 USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600).
4866 UCHAR ucLowEngineClockHigh;
4867 USHORT usHighEngineClockLow; // High Engine clock in MHz.
4868 UCHAR ucHighEngineClockHigh;
4869 USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
4870 UCHAR ucMemoryClockHigh; // Currentyl unused.
4871 UCHAR ucPadding; // For proper alignment and size.
4872 USHORT usVDDC; // For the 780, use: None, Low, High, Variable
4873 UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
4874 UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
4875 USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
4876 ULONG ulFlags;
4877} ATOM_PPLIB_RS780_CLOCK_INFO;
4878
4879#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
4880#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
4881#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
4882#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
4883
4884#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
4885#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
4886#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
4887
4888#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
4889#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
4890#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
4891
4693/**************************************************************************/ 4892/**************************************************************************/
4694 4893
4695/* Following definitions are for compatiblity issue in different SW components. */ 4894/* Following definitions are for compatiblity issue in different SW components. */
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 84e5df766d3f..71727460968f 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2881,6 +2881,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2881 2881
2882 for (i = 0; i < track->num_cb; i++) { 2882 for (i = 0; i < track->num_cb; i++) {
2883 if (track->cb[i].robj == NULL) { 2883 if (track->cb[i].robj == NULL) {
2884 if (!(track->fastfill || track->color_channel_mask ||
2885 track->blend_read_enable)) {
2886 continue;
2887 }
2884 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); 2888 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
2885 return -EINVAL; 2889 return -EINVAL;
2886 } 2890 }
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 7188c3778ee2..b27a6999d219 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -67,13 +67,15 @@ struct r100_cs_track {
67 unsigned immd_dwords; 67 unsigned immd_dwords;
68 unsigned num_arrays; 68 unsigned num_arrays;
69 unsigned max_indx; 69 unsigned max_indx;
70 unsigned color_channel_mask;
70 struct r100_cs_track_array arrays[11]; 71 struct r100_cs_track_array arrays[11];
71 struct r100_cs_track_cb cb[R300_MAX_CB]; 72 struct r100_cs_track_cb cb[R300_MAX_CB];
72 struct r100_cs_track_cb zb; 73 struct r100_cs_track_cb zb;
73 struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE]; 74 struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE];
74 bool z_enabled; 75 bool z_enabled;
75 bool separate_cube; 76 bool separate_cube;
76 77 bool fastfill;
78 bool blend_read_enable;
77}; 79};
78 80
79int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track); 81int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 83490c2b5061..3f2cc9e2e8d9 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -887,6 +887,14 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
887 track->textures[i].cpp = 1; 887 track->textures[i].cpp = 1;
888 track->textures[i].compress_format = R100_TRACK_COMP_DXT1; 888 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
889 break; 889 break;
890 case R300_TX_FORMAT_ATI2N:
891 if (p->rdev->family < CHIP_R420) {
892 DRM_ERROR("Invalid texture format %u\n",
893 (idx_value & 0x1F));
894 return -EINVAL;
895 }
896 /* The same rules apply as for DXT3/5. */
897 /* Pass through. */
890 case R300_TX_FORMAT_DXT3: 898 case R300_TX_FORMAT_DXT3:
891 case R300_TX_FORMAT_DXT5: 899 case R300_TX_FORMAT_DXT5:
892 track->textures[i].cpp = 1; 900 track->textures[i].cpp = 1;
@@ -951,6 +959,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
951 track->textures[i].width_11 = tmp; 959 track->textures[i].width_11 = tmp;
952 tmp = ((idx_value >> 16) & 1) << 11; 960 tmp = ((idx_value >> 16) & 1) << 11;
953 track->textures[i].height_11 = tmp; 961 track->textures[i].height_11 = tmp;
962
963 /* ATI1N */
964 if (idx_value & (1 << 14)) {
965 /* The same rules apply as for DXT1. */
966 track->textures[i].compress_format =
967 R100_TRACK_COMP_DXT1;
968 }
969 } else if (idx_value & (1 << 14)) {
970 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
971 return -EINVAL;
954 } 972 }
955 break; 973 break;
956 case 0x4480: 974 case 0x4480:
@@ -992,6 +1010,18 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
992 } 1010 }
993 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1011 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
994 break; 1012 break;
1013 case 0x4e0c:
1014 /* RB3D_COLOR_CHANNEL_MASK */
1015 track->color_channel_mask = idx_value;
1016 break;
1017 case 0x4d1c:
1018 /* ZB_BW_CNTL */
1019 track->fastfill = !!(idx_value & (1 << 2));
1020 break;
1021 case 0x4e04:
1022 /* RB3D_BLENDCNTL */
1023 track->blend_read_enable = !!(idx_value & (1 << 2));
1024 break;
995 case 0x4be8: 1025 case 0x4be8:
996 /* valid register only on RV530 */ 1026 /* valid register only on RV530 */
997 if (p->rdev->family == CHIP_RV530) 1027 if (p->rdev->family == CHIP_RV530)
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index cb2e470f97d4..34bffa0e4b73 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -990,7 +990,7 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
990 int sz; 990 int sz;
991 int addr; 991 int addr;
992 int type; 992 int type;
993 int clamp; 993 int isclamp;
994 int stride; 994 int stride;
995 RING_LOCALS; 995 RING_LOCALS;
996 996
@@ -999,10 +999,10 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
999 addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo; 999 addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
1000 1000
1001 type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE); 1001 type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
1002 clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP); 1002 isclamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
1003 1003
1004 addr |= (type << 16); 1004 addr |= (type << 16);
1005 addr |= (clamp << 17); 1005 addr |= (isclamp << 17);
1006 1006
1007 stride = type ? 4 : 6; 1007 stride = type ? 4 : 6;
1008 1008
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 4b7afef35a65..1735a2b69580 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -900,6 +900,7 @@
900# define R300_TX_FORMAT_FL_I32 0x1B 900# define R300_TX_FORMAT_FL_I32 0x1B
901# define R300_TX_FORMAT_FL_I32A32 0x1C 901# define R300_TX_FORMAT_FL_I32A32 0x1C
902# define R300_TX_FORMAT_FL_R32G32B32A32 0x1D 902# define R300_TX_FORMAT_FL_R32G32B32A32 0x1D
903# define R300_TX_FORMAT_ATI2N 0x1F
903 /* alpha modes, convenience mostly */ 904 /* alpha modes, convenience mostly */
904 /* if you have alpha, pick constant appropriate to the 905 /* if you have alpha, pick constant appropriate to the
905 number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */ 906 number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0d820764f340..44060b92d9e6 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -170,7 +170,7 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
170 idx, relocs_chunk->length_dw); 170 idx, relocs_chunk->length_dw);
171 return -EINVAL; 171 return -EINVAL;
172 } 172 }
173 *cs_reloc = &p->relocs[0]; 173 *cs_reloc = p->relocs;
174 (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32; 174 (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
175 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0]; 175 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
176 return 0; 176 return 0;
@@ -717,7 +717,7 @@ static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
717 if (p->chunk_relocs_idx == -1) { 717 if (p->chunk_relocs_idx == -1) {
718 return 0; 718 return 0;
719 } 719 }
720 p->relocs = kcalloc(1, sizeof(struct radeon_cs_reloc), GFP_KERNEL); 720 p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
721 if (p->relocs == NULL) { 721 if (p->relocs == NULL) {
722 return -ENOMEM; 722 return -ENOMEM;
723 } 723 }
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index cd650fd3964e..53b55608102b 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -162,6 +162,7 @@ struct radeon_fence_driver {
162 struct list_head created; 162 struct list_head created;
163 struct list_head emited; 163 struct list_head emited;
164 struct list_head signaled; 164 struct list_head signaled;
165 bool initialized;
165}; 166};
166 167
167struct radeon_fence { 168struct radeon_fence {
@@ -202,8 +203,9 @@ struct radeon_surface_reg {
202struct radeon_mman { 203struct radeon_mman {
203 struct ttm_bo_global_ref bo_global_ref; 204 struct ttm_bo_global_ref bo_global_ref;
204 struct ttm_global_reference mem_global_ref; 205 struct ttm_global_reference mem_global_ref;
205 bool mem_global_referenced;
206 struct ttm_bo_device bdev; 206 struct ttm_bo_device bdev;
207 bool mem_global_referenced;
208 bool initialized;
207}; 209};
208 210
209struct radeon_bo { 211struct radeon_bo {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 636116bedcb4..eb29217bbf1d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -33,6 +33,7 @@
33 */ 33 */
34uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev); 34uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev);
35void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock); 35void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
36uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev);
36void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); 37void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
37 38
38uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev); 39uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev);
@@ -106,7 +107,7 @@ static struct radeon_asic r100_asic = {
106 .copy = &r100_copy_blit, 107 .copy = &r100_copy_blit,
107 .get_engine_clock = &radeon_legacy_get_engine_clock, 108 .get_engine_clock = &radeon_legacy_get_engine_clock,
108 .set_engine_clock = &radeon_legacy_set_engine_clock, 109 .set_engine_clock = &radeon_legacy_set_engine_clock,
109 .get_memory_clock = NULL, 110 .get_memory_clock = &radeon_legacy_get_memory_clock,
110 .set_memory_clock = NULL, 111 .set_memory_clock = NULL,
111 .set_pcie_lanes = NULL, 112 .set_pcie_lanes = NULL,
112 .set_clock_gating = &radeon_legacy_set_clock_gating, 113 .set_clock_gating = &radeon_legacy_set_clock_gating,
@@ -166,7 +167,7 @@ static struct radeon_asic r300_asic = {
166 .copy = &r100_copy_blit, 167 .copy = &r100_copy_blit,
167 .get_engine_clock = &radeon_legacy_get_engine_clock, 168 .get_engine_clock = &radeon_legacy_get_engine_clock,
168 .set_engine_clock = &radeon_legacy_set_engine_clock, 169 .set_engine_clock = &radeon_legacy_set_engine_clock,
169 .get_memory_clock = NULL, 170 .get_memory_clock = &radeon_legacy_get_memory_clock,
170 .set_memory_clock = NULL, 171 .set_memory_clock = NULL,
171 .set_pcie_lanes = &rv370_set_pcie_lanes, 172 .set_pcie_lanes = &rv370_set_pcie_lanes,
172 .set_clock_gating = &radeon_legacy_set_clock_gating, 173 .set_clock_gating = &radeon_legacy_set_clock_gating,
@@ -259,7 +260,7 @@ static struct radeon_asic rs400_asic = {
259 .copy = &r100_copy_blit, 260 .copy = &r100_copy_blit,
260 .get_engine_clock = &radeon_legacy_get_engine_clock, 261 .get_engine_clock = &radeon_legacy_get_engine_clock,
261 .set_engine_clock = &radeon_legacy_set_engine_clock, 262 .set_engine_clock = &radeon_legacy_set_engine_clock,
262 .get_memory_clock = NULL, 263 .get_memory_clock = &radeon_legacy_get_memory_clock,
263 .set_memory_clock = NULL, 264 .set_memory_clock = NULL,
264 .set_pcie_lanes = NULL, 265 .set_pcie_lanes = NULL,
265 .set_clock_gating = &radeon_legacy_set_clock_gating, 266 .set_clock_gating = &radeon_legacy_set_clock_gating,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 12a0c760e7ff..321044bef71c 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -745,8 +745,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
745 else 745 else
746 radeon_add_legacy_encoder(dev, 746 radeon_add_legacy_encoder(dev,
747 radeon_get_encoder_id(dev, 747 radeon_get_encoder_id(dev,
748 (1 << 748 (1 << i),
749 i),
750 dac), 749 dac),
751 (1 << i)); 750 (1 << i));
752 } 751 }
@@ -758,32 +757,30 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
758 if (bios_connectors[j].valid && (i != j)) { 757 if (bios_connectors[j].valid && (i != j)) {
759 if (bios_connectors[i].line_mux == 758 if (bios_connectors[i].line_mux ==
760 bios_connectors[j].line_mux) { 759 bios_connectors[j].line_mux) {
761 if (((bios_connectors[i]. 760 /* make sure not to combine LVDS */
762 devices & 761 if (bios_connectors[i].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
763 (ATOM_DEVICE_DFP_SUPPORT)) 762 bios_connectors[i].line_mux = 53;
764 && (bios_connectors[j]. 763 bios_connectors[i].ddc_bus.valid = false;
765 devices & 764 continue;
766 (ATOM_DEVICE_CRT_SUPPORT))) 765 }
767 || 766 if (bios_connectors[j].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
768 ((bios_connectors[j]. 767 bios_connectors[j].line_mux = 53;
769 devices & 768 bios_connectors[j].ddc_bus.valid = false;
770 (ATOM_DEVICE_DFP_SUPPORT)) 769 continue;
771 && (bios_connectors[i]. 770 }
772 devices & 771 /* combine analog and digital for DVI-I */
773 (ATOM_DEVICE_CRT_SUPPORT)))) { 772 if (((bios_connectors[i].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
774 bios_connectors[i]. 773 (bios_connectors[j].devices & (ATOM_DEVICE_CRT_SUPPORT))) ||
775 devices |= 774 ((bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
776 bios_connectors[j]. 775 (bios_connectors[i].devices & (ATOM_DEVICE_CRT_SUPPORT)))) {
777 devices; 776 bios_connectors[i].devices |=
778 bios_connectors[i]. 777 bios_connectors[j].devices;
779 connector_type = 778 bios_connectors[i].connector_type =
780 DRM_MODE_CONNECTOR_DVII; 779 DRM_MODE_CONNECTOR_DVII;
781 if (bios_connectors[j].devices & 780 if (bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT))
782 (ATOM_DEVICE_DFP_SUPPORT))
783 bios_connectors[i].hpd = 781 bios_connectors[i].hpd =
784 bios_connectors[j].hpd; 782 bios_connectors[j].hpd;
785 bios_connectors[j]. 783 bios_connectors[j].valid = false;
786 valid = false;
787 } 784 }
788 } 785 }
789 } 786 }
@@ -1234,6 +1231,61 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
1234 return true; 1231 return true;
1235} 1232}
1236 1233
1234enum radeon_tv_std
1235radeon_atombios_get_tv_info(struct radeon_device *rdev)
1236{
1237 struct radeon_mode_info *mode_info = &rdev->mode_info;
1238 int index = GetIndexIntoMasterTable(DATA, AnalogTV_Info);
1239 uint16_t data_offset;
1240 uint8_t frev, crev;
1241 struct _ATOM_ANALOG_TV_INFO *tv_info;
1242 enum radeon_tv_std tv_std = TV_STD_NTSC;
1243
1244 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
1245
1246 tv_info = (struct _ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
1247
1248 switch (tv_info->ucTV_BootUpDefaultStandard) {
1249 case ATOM_TV_NTSC:
1250 tv_std = TV_STD_NTSC;
1251 DRM_INFO("Default TV standard: NTSC\n");
1252 break;
1253 case ATOM_TV_NTSCJ:
1254 tv_std = TV_STD_NTSC_J;
1255 DRM_INFO("Default TV standard: NTSC-J\n");
1256 break;
1257 case ATOM_TV_PAL:
1258 tv_std = TV_STD_PAL;
1259 DRM_INFO("Default TV standard: PAL\n");
1260 break;
1261 case ATOM_TV_PALM:
1262 tv_std = TV_STD_PAL_M;
1263 DRM_INFO("Default TV standard: PAL-M\n");
1264 break;
1265 case ATOM_TV_PALN:
1266 tv_std = TV_STD_PAL_N;
1267 DRM_INFO("Default TV standard: PAL-N\n");
1268 break;
1269 case ATOM_TV_PALCN:
1270 tv_std = TV_STD_PAL_CN;
1271 DRM_INFO("Default TV standard: PAL-CN\n");
1272 break;
1273 case ATOM_TV_PAL60:
1274 tv_std = TV_STD_PAL_60;
1275 DRM_INFO("Default TV standard: PAL-60\n");
1276 break;
1277 case ATOM_TV_SECAM:
1278 tv_std = TV_STD_SECAM;
1279 DRM_INFO("Default TV standard: SECAM\n");
1280 break;
1281 default:
1282 tv_std = TV_STD_NTSC;
1283 DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
1284 break;
1285 }
1286 return tv_std;
1287}
1288
1237struct radeon_encoder_tv_dac * 1289struct radeon_encoder_tv_dac *
1238radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) 1290radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
1239{ 1291{
@@ -1269,6 +1321,7 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
1269 dac = dac_info->ucDAC2_NTSC_DAC_Adjustment; 1321 dac = dac_info->ucDAC2_NTSC_DAC_Adjustment;
1270 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); 1322 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
1271 1323
1324 tv_dac->tv_std = radeon_atombios_get_tv_info(rdev);
1272 } 1325 }
1273 return tv_dac; 1326 return tv_dac;
1274} 1327}
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index b062109efbee..812f24dbc2a8 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -62,7 +62,7 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
62} 62}
63 63
64/* 10 khz */ 64/* 10 khz */
65static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev) 65uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
66{ 66{
67 struct radeon_pll *mpll = &rdev->clock.mpll; 67 struct radeon_pll *mpll = &rdev->clock.mpll;
68 uint32_t fb_div, ref_div, post_div, mclk; 68 uint32_t fb_div, ref_div, post_div, mclk;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index c5021a3445de..fd94dbca33ac 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -634,11 +634,10 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
634 return p_dac; 634 return p_dac;
635} 635}
636 636
637static enum radeon_tv_std 637enum radeon_tv_std
638radeon_combios_get_tv_info(struct radeon_encoder *encoder) 638radeon_combios_get_tv_info(struct radeon_device *rdev)
639{ 639{
640 struct drm_device *dev = encoder->base.dev; 640 struct drm_device *dev = rdev->ddev;
641 struct radeon_device *rdev = dev->dev_private;
642 uint16_t tv_info; 641 uint16_t tv_info;
643 enum radeon_tv_std tv_std = TV_STD_NTSC; 642 enum radeon_tv_std tv_std = TV_STD_NTSC;
644 643
@@ -779,7 +778,7 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
779 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); 778 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
780 found = 1; 779 found = 1;
781 } 780 }
782 tv_dac->tv_std = radeon_combios_get_tv_info(encoder); 781 tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
783 } 782 }
784 if (!found) { 783 if (!found) {
785 /* then check CRT table */ 784 /* then check CRT table */
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 5eece186e03c..20161567dbff 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -208,6 +208,18 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
208 drm_mode_set_name(mode); 208 drm_mode_set_name(mode);
209 209
210 DRM_DEBUG("Adding native panel mode %s\n", mode->name); 210 DRM_DEBUG("Adding native panel mode %s\n", mode->name);
211 } else if (native_mode->hdisplay != 0 &&
212 native_mode->vdisplay != 0) {
213 /* mac laptops without an edid */
214 /* Note that this is not necessarily the exact panel mode,
215 * but an approximation based on the cvt formula. For these
216 * systems we should ideally read the mode info out of the
217 * registers or add a mode table, but this works and is much
218 * simpler.
219 */
220 mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
221 mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
222 DRM_DEBUG("Adding cvt approximation of native panel mode %s\n", mode->name);
211 } 223 }
212 return mode; 224 return mode;
213} 225}
@@ -1171,7 +1183,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1171 1); 1183 1);
1172 drm_connector_attach_property(&radeon_connector->base, 1184 drm_connector_attach_property(&radeon_connector->base,
1173 rdev->mode_info.tv_std_property, 1185 rdev->mode_info.tv_std_property,
1174 1); 1186 radeon_atombios_get_tv_info(rdev));
1175 } 1187 }
1176 break; 1188 break;
1177 case DRM_MODE_CONNECTOR_LVDS: 1189 case DRM_MODE_CONNECTOR_LVDS:
@@ -1315,7 +1327,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1315 1); 1327 1);
1316 drm_connector_attach_property(&radeon_connector->base, 1328 drm_connector_attach_property(&radeon_connector->base,
1317 rdev->mode_info.tv_std_property, 1329 rdev->mode_info.tv_std_property,
1318 1); 1330 radeon_combios_get_tv_info(rdev));
1319 } 1331 }
1320 break; 1332 break;
1321 case DRM_MODE_CONNECTOR_LVDS: 1333 case DRM_MODE_CONNECTOR_LVDS:
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 02bcdb1240c0..7c6848096bcd 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -391,6 +391,12 @@ int radeon_asic_init(struct radeon_device *rdev)
391 /* FIXME: not supported yet */ 391 /* FIXME: not supported yet */
392 return -EINVAL; 392 return -EINVAL;
393 } 393 }
394
395 if (rdev->flags & RADEON_IS_IGP) {
396 rdev->asic->get_memory_clock = NULL;
397 rdev->asic->set_memory_clock = NULL;
398 }
399
394 return 0; 400 return 0;
395} 401}
396 402
@@ -481,6 +487,7 @@ int radeon_atombios_init(struct radeon_device *rdev)
481 atom_card_info->pll_write = cail_pll_write; 487 atom_card_info->pll_write = cail_pll_write;
482 488
483 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); 489 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
490 mutex_init(&rdev->mode_info.atom_context->mutex);
484 radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 491 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
485 atom_allocate_fb_scratch(rdev->mode_info.atom_context); 492 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
486 return 0; 493 return 0;
@@ -539,9 +546,72 @@ void radeon_agp_disable(struct radeon_device *rdev)
539 } 546 }
540} 547}
541 548
542/* 549void radeon_check_arguments(struct radeon_device *rdev)
543 * Radeon device. 550{
544 */ 551 /* vramlimit must be a power of two */
552 switch (radeon_vram_limit) {
553 case 0:
554 case 4:
555 case 8:
556 case 16:
557 case 32:
558 case 64:
559 case 128:
560 case 256:
561 case 512:
562 case 1024:
563 case 2048:
564 case 4096:
565 break;
566 default:
567 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
568 radeon_vram_limit);
569 radeon_vram_limit = 0;
570 break;
571 }
572 radeon_vram_limit = radeon_vram_limit << 20;
573 /* gtt size must be power of two and greater or equal to 32M */
574 switch (radeon_gart_size) {
575 case 4:
576 case 8:
577 case 16:
578 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
579 radeon_gart_size);
580 radeon_gart_size = 512;
581 break;
582 case 32:
583 case 64:
584 case 128:
585 case 256:
586 case 512:
587 case 1024:
588 case 2048:
589 case 4096:
590 break;
591 default:
592 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
593 radeon_gart_size);
594 radeon_gart_size = 512;
595 break;
596 }
597 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
598 /* AGP mode can only be -1, 1, 2, 4, 8 */
599 switch (radeon_agpmode) {
600 case -1:
601 case 0:
602 case 1:
603 case 2:
604 case 4:
605 case 8:
606 break;
607 default:
608 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
609 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
610 radeon_agpmode = 0;
611 break;
612 }
613}
614
545int radeon_device_init(struct radeon_device *rdev, 615int radeon_device_init(struct radeon_device *rdev,
546 struct drm_device *ddev, 616 struct drm_device *ddev,
547 struct pci_dev *pdev, 617 struct pci_dev *pdev,
@@ -580,9 +650,9 @@ int radeon_device_init(struct radeon_device *rdev,
580 650
581 /* Set asic functions */ 651 /* Set asic functions */
582 r = radeon_asic_init(rdev); 652 r = radeon_asic_init(rdev);
583 if (r) { 653 if (r)
584 return r; 654 return r;
585 } 655 radeon_check_arguments(rdev);
586 656
587 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { 657 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
588 radeon_agp_disable(rdev); 658 radeon_agp_disable(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index a133b833e45d..91d72b70abc9 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -739,7 +739,7 @@ static struct drm_prop_enum_list radeon_tv_std_enum_list[] =
739 { TV_STD_SECAM, "secam" }, 739 { TV_STD_SECAM, "secam" },
740}; 740};
741 741
742int radeon_modeset_create_props(struct radeon_device *rdev) 742static int radeon_modeset_create_props(struct radeon_device *rdev)
743{ 743{
744 int i, sz; 744 int i, sz;
745 745
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index dbd56ef82f9c..8ba3de7994d4 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -196,7 +196,7 @@ static struct drm_driver driver_old = {
196 .owner = THIS_MODULE, 196 .owner = THIS_MODULE,
197 .open = drm_open, 197 .open = drm_open,
198 .release = drm_release, 198 .release = drm_release,
199 .ioctl = drm_ioctl, 199 .unlocked_ioctl = drm_ioctl,
200 .mmap = drm_mmap, 200 .mmap = drm_mmap,
201 .poll = drm_poll, 201 .poll = drm_poll,
202 .fasync = drm_fasync, 202 .fasync = drm_fasync,
@@ -284,7 +284,7 @@ static struct drm_driver kms_driver = {
284 .owner = THIS_MODULE, 284 .owner = THIS_MODULE,
285 .open = drm_open, 285 .open = drm_open,
286 .release = drm_release, 286 .release = drm_release,
287 .ioctl = drm_ioctl, 287 .unlocked_ioctl = drm_ioctl,
288 .mmap = radeon_mmap, 288 .mmap = radeon_mmap,
289 .poll = drm_poll, 289 .poll = drm_poll,
290 .fasync = drm_fasync, 290 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 0d1d908e5225..ccba95f83d11 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -233,6 +233,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
233 if (!ASIC_IS_AVIVO(rdev)) { 233 if (!ASIC_IS_AVIVO(rdev)) {
234 adjusted_mode->hdisplay = mode->hdisplay; 234 adjusted_mode->hdisplay = mode->hdisplay;
235 adjusted_mode->vdisplay = mode->vdisplay; 235 adjusted_mode->vdisplay = mode->vdisplay;
236 adjusted_mode->crtc_hdisplay = mode->hdisplay;
237 adjusted_mode->crtc_vdisplay = mode->vdisplay;
236 } 238 }
237 adjusted_mode->base.id = mode_id; 239 adjusted_mode->base.id = mode_id;
238 } 240 }
@@ -495,9 +497,9 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
495 args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; 497 args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
496 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 498 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
497 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 499 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
498 if (dig->lvds_misc & (1 << 0)) 500 if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
499 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; 501 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
500 if (dig->lvds_misc & (1 << 1)) 502 if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
501 args.v1.ucMisc |= (1 << 1); 503 args.v1.ucMisc |= (1 << 1);
502 } else { 504 } else {
503 if (dig_connector->linkb) 505 if (dig_connector->linkb)
@@ -524,18 +526,18 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
524 args.v2.ucTemporal = 0; 526 args.v2.ucTemporal = 0;
525 args.v2.ucFRC = 0; 527 args.v2.ucFRC = 0;
526 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 528 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
527 if (dig->lvds_misc & (1 << 0)) 529 if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
528 args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; 530 args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
529 if (dig->lvds_misc & (1 << 5)) { 531 if (dig->lvds_misc & ATOM_PANEL_MISC_SPATIAL) {
530 args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN; 532 args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
531 if (dig->lvds_misc & (1 << 1)) 533 if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
532 args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH; 534 args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
533 } 535 }
534 if (dig->lvds_misc & (1 << 6)) { 536 if (dig->lvds_misc & ATOM_PANEL_MISC_TEMPORAL) {
535 args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN; 537 args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
536 if (dig->lvds_misc & (1 << 1)) 538 if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
537 args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH; 539 args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
538 if (((dig->lvds_misc >> 2) & 0x3) == 2) 540 if (((dig->lvds_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
539 args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; 541 args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
540 } 542 }
541 } else { 543 } else {
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index cb4cd97ae39f..4cdd8b4f7549 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -324,7 +324,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
324 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 324 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
325 r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg); 325 r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
326 if (r) { 326 if (r) {
327 DRM_ERROR("Fence failed to get a scratch register."); 327 dev_err(rdev->dev, "fence failed to get scratch register\n");
328 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 328 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
329 return r; 329 return r;
330 } 330 }
@@ -335,9 +335,10 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
335 INIT_LIST_HEAD(&rdev->fence_drv.signaled); 335 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
336 rdev->fence_drv.count_timeout = 0; 336 rdev->fence_drv.count_timeout = 0;
337 init_waitqueue_head(&rdev->fence_drv.queue); 337 init_waitqueue_head(&rdev->fence_drv.queue);
338 rdev->fence_drv.initialized = true;
338 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 339 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
339 if (radeon_debugfs_fence_init(rdev)) { 340 if (radeon_debugfs_fence_init(rdev)) {
340 DRM_ERROR("Failed to register debugfs file for fence !\n"); 341 dev_err(rdev->dev, "fence debugfs file creation failed\n");
341 } 342 }
342 return 0; 343 return 0;
343} 344}
@@ -346,11 +347,13 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
346{ 347{
347 unsigned long irq_flags; 348 unsigned long irq_flags;
348 349
350 if (!rdev->fence_drv.initialized)
351 return;
349 wake_up_all(&rdev->fence_drv.queue); 352 wake_up_all(&rdev->fence_drv.queue);
350 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 353 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
351 radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg); 354 radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
352 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 355 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
353 DRM_INFO("radeon: fence finalized\n"); 356 rdev->fence_drv.initialized = false;
354} 357}
355 358
356 359
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
index a1bf11de308a..48b7cea31e08 100644
--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -92,8 +92,7 @@ static int compat_radeon_cp_init(struct file *file, unsigned int cmd,
92 &init->gart_textures_offset)) 92 &init->gart_textures_offset))
93 return -EFAULT; 93 return -EFAULT;
94 94
95 return drm_ioctl(file->f_path.dentry->d_inode, file, 95 return drm_ioctl(file, DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
96 DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
97} 96}
98 97
99typedef struct drm_radeon_clear32 { 98typedef struct drm_radeon_clear32 {
@@ -125,8 +124,7 @@ static int compat_radeon_cp_clear(struct file *file, unsigned int cmd,
125 &clr->depth_boxes)) 124 &clr->depth_boxes))
126 return -EFAULT; 125 return -EFAULT;
127 126
128 return drm_ioctl(file->f_path.dentry->d_inode, file, 127 return drm_ioctl(file, DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
129 DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
130} 128}
131 129
132typedef struct drm_radeon_stipple32 { 130typedef struct drm_radeon_stipple32 {
@@ -149,8 +147,7 @@ static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,
149 &request->mask)) 147 &request->mask))
150 return -EFAULT; 148 return -EFAULT;
151 149
152 return drm_ioctl(file->f_path.dentry->d_inode, file, 150 return drm_ioctl(file, DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
153 DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
154} 151}
155 152
156typedef struct drm_radeon_tex_image32 { 153typedef struct drm_radeon_tex_image32 {
@@ -204,8 +201,7 @@ static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,
204 &image->data)) 201 &image->data))
205 return -EFAULT; 202 return -EFAULT;
206 203
207 return drm_ioctl(file->f_path.dentry->d_inode, file, 204 return drm_ioctl(file, DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
208 DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
209} 205}
210 206
211typedef struct drm_radeon_vertex2_32 { 207typedef struct drm_radeon_vertex2_32 {
@@ -238,8 +234,7 @@ static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,
238 &request->prim)) 234 &request->prim))
239 return -EFAULT; 235 return -EFAULT;
240 236
241 return drm_ioctl(file->f_path.dentry->d_inode, file, 237 return drm_ioctl(file, DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
242 DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
243} 238}
244 239
245typedef struct drm_radeon_cmd_buffer32 { 240typedef struct drm_radeon_cmd_buffer32 {
@@ -268,8 +263,7 @@ static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,
268 &request->boxes)) 263 &request->boxes))
269 return -EFAULT; 264 return -EFAULT;
270 265
271 return drm_ioctl(file->f_path.dentry->d_inode, file, 266 return drm_ioctl(file, DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
272 DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
273} 267}
274 268
275typedef struct drm_radeon_getparam32 { 269typedef struct drm_radeon_getparam32 {
@@ -293,8 +287,7 @@ static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,
293 &request->value)) 287 &request->value))
294 return -EFAULT; 288 return -EFAULT;
295 289
296 return drm_ioctl(file->f_path.dentry->d_inode, file, 290 return drm_ioctl(file, DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
297 DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
298} 291}
299 292
300typedef struct drm_radeon_mem_alloc32 { 293typedef struct drm_radeon_mem_alloc32 {
@@ -322,8 +315,7 @@ static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,
322 &request->region_offset)) 315 &request->region_offset))
323 return -EFAULT; 316 return -EFAULT;
324 317
325 return drm_ioctl(file->f_path.dentry->d_inode, file, 318 return drm_ioctl(file, DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
326 DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
327} 319}
328 320
329typedef struct drm_radeon_irq_emit32 { 321typedef struct drm_radeon_irq_emit32 {
@@ -345,8 +337,7 @@ static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
345 &request->irq_seq)) 337 &request->irq_seq))
346 return -EFAULT; 338 return -EFAULT;
347 339
348 return drm_ioctl(file->f_path.dentry->d_inode, file, 340 return drm_ioctl(file, DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
349 DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
350} 341}
351 342
352/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */ 343/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
@@ -372,8 +363,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
372 &request->value)) 363 &request->value))
373 return -EFAULT; 364 return -EFAULT;
374 365
375 return drm_ioctl(file->f_dentry->d_inode, file, 366 return drm_ioctl(file, DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
376 DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
377} 367}
378#else 368#else
379#define compat_radeon_cp_setparam NULL 369#define compat_radeon_cp_setparam NULL
@@ -413,12 +403,10 @@ long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
413 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) 403 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
414 fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE]; 404 fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
415 405
416 lock_kernel(); /* XXX for now */
417 if (fn != NULL) 406 if (fn != NULL)
418 ret = (*fn) (filp, cmd, arg); 407 ret = (*fn) (filp, cmd, arg);
419 else 408 else
420 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); 409 ret = drm_ioctl(filp, cmd, arg);
421 unlock_kernel();
422 410
423 return ret; 411 return ret;
424} 412}
@@ -431,9 +419,7 @@ long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long
431 if (nr < DRM_COMMAND_BASE) 419 if (nr < DRM_COMMAND_BASE)
432 return drm_compat_ioctl(filp, cmd, arg); 420 return drm_compat_ioctl(filp, cmd, arg);
433 421
434 lock_kernel(); /* XXX for now */ 422 ret = drm_ioctl(filp, cmd, arg);
435 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
436 unlock_kernel();
437 423
438 return ret; 424 return ret;
439} 425}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index b82ede98e152..cc27485a07ad 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -43,8 +43,7 @@ static void radeon_overscan_setup(struct drm_crtc *crtc,
43} 43}
44 44
45static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, 45static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
46 struct drm_display_mode *mode, 46 struct drm_display_mode *mode)
47 struct drm_display_mode *adjusted_mode)
48{ 47{
49 struct drm_device *dev = crtc->dev; 48 struct drm_device *dev = crtc->dev;
50 struct radeon_device *rdev = dev->dev_private; 49 struct radeon_device *rdev = dev->dev_private;
@@ -1059,7 +1058,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
1059 radeon_set_pll(crtc, adjusted_mode); 1058 radeon_set_pll(crtc, adjusted_mode);
1060 radeon_overscan_setup(crtc, adjusted_mode); 1059 radeon_overscan_setup(crtc, adjusted_mode);
1061 if (radeon_crtc->crtc_id == 0) { 1060 if (radeon_crtc->crtc_id == 0) {
1062 radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode); 1061 radeon_legacy_rmx_mode_set(crtc, adjusted_mode);
1063 } else { 1062 } else {
1064 if (radeon_crtc->rmx_type != RMX_OFF) { 1063 if (radeon_crtc->rmx_type != RMX_OFF) {
1065 /* FIXME: only first crtc has rmx what should we 1064 /* FIXME: only first crtc has rmx what should we
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index df00515e81fa..981508ff7037 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -207,6 +207,8 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
207 *adjusted_mode = *native_mode; 207 *adjusted_mode = *native_mode;
208 adjusted_mode->hdisplay = mode->hdisplay; 208 adjusted_mode->hdisplay = mode->hdisplay;
209 adjusted_mode->vdisplay = mode->vdisplay; 209 adjusted_mode->vdisplay = mode->vdisplay;
210 adjusted_mode->crtc_hdisplay = mode->hdisplay;
211 adjusted_mode->crtc_vdisplay = mode->vdisplay;
210 adjusted_mode->base.id = mode_id; 212 adjusted_mode->base.id = mode_id;
211 } 213 }
212 214
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 3dcbe130c422..402369db5ba0 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -88,6 +88,7 @@ enum radeon_tv_std {
88 TV_STD_SCART_PAL, 88 TV_STD_SCART_PAL,
89 TV_STD_SECAM, 89 TV_STD_SECAM,
90 TV_STD_PAL_CN, 90 TV_STD_PAL_CN,
91 TV_STD_PAL_N,
91}; 92};
92 93
93/* radeon gpio-based i2c 94/* radeon gpio-based i2c
@@ -395,6 +396,11 @@ struct radeon_framebuffer {
395 struct drm_gem_object *obj; 396 struct drm_gem_object *obj;
396}; 397};
397 398
399extern enum radeon_tv_std
400radeon_combios_get_tv_info(struct radeon_device *rdev);
401extern enum radeon_tv_std
402radeon_atombios_get_tv_info(struct radeon_device *rdev);
403
398extern void radeon_connector_hotplug(struct drm_connector *connector); 404extern void radeon_connector_hotplug(struct drm_connector *connector);
399extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector); 405extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
400extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector, 406extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 391c973ec4db..9f5e2f929da9 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -42,8 +42,8 @@ void radeon_test_moves(struct radeon_device *rdev)
42 /* Number of tests = 42 /* Number of tests =
43 * (Total GTT - IB pool - writeback page - ring buffer) / test size 43 * (Total GTT - IB pool - writeback page - ring buffer) / test size
44 */ 44 */
45 n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE - 45 n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
46 rdev->cp.ring_size) / size; 46 rdev->cp.ring_size)) / size;
47 47
48 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); 48 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
49 if (!gtt_obj) { 49 if (!gtt_obj) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d7fd160cc671..3b0c07b444a2 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -494,6 +494,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
494 DRM_ERROR("failed initializing buffer object driver(%d).\n", r); 494 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
495 return r; 495 return r;
496 } 496 }
497 rdev->mman.initialized = true;
497 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 498 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
498 rdev->mc.real_vram_size >> PAGE_SHIFT); 499 rdev->mc.real_vram_size >> PAGE_SHIFT);
499 if (r) { 500 if (r) {
@@ -541,6 +542,8 @@ void radeon_ttm_fini(struct radeon_device *rdev)
541{ 542{
542 int r; 543 int r;
543 544
545 if (!rdev->mman.initialized)
546 return;
544 if (rdev->stollen_vga_memory) { 547 if (rdev->stollen_vga_memory) {
545 r = radeon_bo_reserve(rdev->stollen_vga_memory, false); 548 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
546 if (r == 0) { 549 if (r == 0) {
@@ -554,6 +557,7 @@ void radeon_ttm_fini(struct radeon_device *rdev)
554 ttm_bo_device_release(&rdev->mman.bdev); 557 ttm_bo_device_release(&rdev->mman.bdev);
555 radeon_gart_fini(rdev); 558 radeon_gart_fini(rdev);
556 radeon_ttm_global_fini(rdev); 559 radeon_ttm_global_fini(rdev);
560 rdev->mman.initialized = false;
557 DRM_INFO("radeon: ttm finalized\n"); 561 DRM_INFO("radeon: ttm finalized\n");
558} 562}
559 563
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index eee52aa92a7c..021de44c15ab 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -50,7 +50,7 @@ static struct drm_driver driver = {
50 .owner = THIS_MODULE, 50 .owner = THIS_MODULE,
51 .open = drm_open, 51 .open = drm_open,
52 .release = drm_release, 52 .release = drm_release,
53 .ioctl = drm_ioctl, 53 .unlocked_ioctl = drm_ioctl,
54 .mmap = drm_mmap, 54 .mmap = drm_mmap,
55 .poll = drm_poll, 55 .poll = drm_poll,
56 .fasync = drm_fasync, 56 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index e725cc0b1155..4fd1f067d380 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -80,7 +80,7 @@ static struct drm_driver driver = {
80 .owner = THIS_MODULE, 80 .owner = THIS_MODULE,
81 .open = drm_open, 81 .open = drm_open,
82 .release = drm_release, 82 .release = drm_release,
83 .ioctl = drm_ioctl, 83 .unlocked_ioctl = drm_ioctl,
84 .mmap = drm_mmap, 84 .mmap = drm_mmap,
85 .poll = drm_poll, 85 .poll = drm_poll,
86 .fasync = drm_fasync, 86 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index 012ff2e356b2..ec5a43e65722 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -48,7 +48,7 @@ static struct drm_driver driver = {
48 .owner = THIS_MODULE, 48 .owner = THIS_MODULE,
49 .open = drm_open, 49 .open = drm_open,
50 .release = drm_release, 50 .release = drm_release,
51 .ioctl = drm_ioctl, 51 .unlocked_ioctl = drm_ioctl,
52 .mmap = drm_mmap, 52 .mmap = drm_mmap,
53 .poll = drm_poll, 53 .poll = drm_poll,
54 .fasync = drm_fasync, 54 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index bc2f51843005..7a1b210401e0 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -58,7 +58,7 @@ static struct drm_driver driver = {
58 .owner = THIS_MODULE, 58 .owner = THIS_MODULE,
59 .open = drm_open, 59 .open = drm_open,
60 .release = drm_release, 60 .release = drm_release,
61 .ioctl = drm_ioctl, 61 .unlocked_ioctl = drm_ioctl,
62 .mmap = drm_mmap, 62 .mmap = drm_mmap,
63 .poll = drm_poll, 63 .poll = drm_poll,
64 .fasync = drm_fasync, 64 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 7b48bb3b63b2..1db1ef30be2b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -103,37 +103,39 @@
103 */ 103 */
104 104
105static struct drm_ioctl_desc vmw_ioctls[] = { 105static struct drm_ioctl_desc vmw_ioctls[] = {
106 VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl, 0), 106 VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl,
107 DRM_AUTH | DRM_UNLOCKED),
107 VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, 108 VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
108 0), 109 DRM_AUTH | DRM_UNLOCKED),
109 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, 110 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
110 0), 111 DRM_AUTH | DRM_UNLOCKED),
111 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS, 112 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS,
112 vmw_kms_cursor_bypass_ioctl, 0), 113 vmw_kms_cursor_bypass_ioctl,
114 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
113 115
114 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl, 116 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl,
115 0), 117 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
116 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, 118 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
117 0), 119 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
118 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl, 120 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
119 0), 121 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
120 122
121 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl, 123 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
122 0), 124 DRM_AUTH | DRM_UNLOCKED),
123 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, 125 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
124 0), 126 DRM_AUTH | DRM_UNLOCKED),
125 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl, 127 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
126 0), 128 DRM_AUTH | DRM_UNLOCKED),
127 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, 129 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
128 0), 130 DRM_AUTH | DRM_UNLOCKED),
129 VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl, 131 VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl,
130 0), 132 DRM_AUTH | DRM_UNLOCKED),
131 VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl, 133 VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl,
132 0), 134 DRM_AUTH | DRM_UNLOCKED),
133 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, 135 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
134 0), 136 DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
135 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, 137 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
136 0) 138 DRM_AUTH | DRM_UNLOCKED)
137}; 139};
138 140
139static struct pci_device_id vmw_pci_id_list[] = { 141static struct pci_device_id vmw_pci_id_list[] = {
@@ -460,11 +462,9 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
460 struct drm_file *file_priv = filp->private_data; 462 struct drm_file *file_priv = filp->private_data;
461 struct drm_device *dev = file_priv->minor->dev; 463 struct drm_device *dev = file_priv->minor->dev;
462 unsigned int nr = DRM_IOCTL_NR(cmd); 464 unsigned int nr = DRM_IOCTL_NR(cmd);
463 long ret;
464 465
465 /* 466 /*
466 * The driver private ioctls and TTM ioctls should be 467 * Do extra checking on driver private ioctls.
467 * thread-safe.
468 */ 468 */
469 469
470 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) 470 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
@@ -477,18 +477,9 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
477 nr - DRM_COMMAND_BASE); 477 nr - DRM_COMMAND_BASE);
478 return -EINVAL; 478 return -EINVAL;
479 } 479 }
480 return drm_ioctl(filp->f_path.dentry->d_inode,
481 filp, cmd, arg);
482 } 480 }
483 481
484 /* 482 return drm_ioctl(filp, cmd, arg);
485 * Not all old drm ioctls are thread-safe.
486 */
487
488 lock_kernel();
489 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
490 unlock_kernel();
491 return ret;
492} 483}
493 484
494static int vmw_firstopen(struct drm_device *dev) 485static int vmw_firstopen(struct drm_device *dev)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 43546d09d1b0..e61bd85b6975 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -123,6 +123,7 @@ struct vmw_sw_context{
123 uint32_t last_cid; 123 uint32_t last_cid;
124 bool cid_valid; 124 bool cid_valid;
125 uint32_t last_sid; 125 uint32_t last_sid;
126 uint32_t sid_translation;
126 bool sid_valid; 127 bool sid_valid;
127 struct ttm_object_file *tfile; 128 struct ttm_object_file *tfile;
128 struct list_head validate_nodes; 129 struct list_head validate_nodes;
@@ -317,9 +318,10 @@ extern void vmw_surface_res_free(struct vmw_resource *res);
317extern int vmw_surface_init(struct vmw_private *dev_priv, 318extern int vmw_surface_init(struct vmw_private *dev_priv,
318 struct vmw_surface *srf, 319 struct vmw_surface *srf,
319 void (*res_free) (struct vmw_resource *res)); 320 void (*res_free) (struct vmw_resource *res));
320extern int vmw_user_surface_lookup(struct vmw_private *dev_priv, 321extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
321 struct ttm_object_file *tfile, 322 struct ttm_object_file *tfile,
322 int sid, struct vmw_surface **out); 323 uint32_t handle,
324 struct vmw_surface **out);
323extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, 325extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
324 struct drm_file *file_priv); 326 struct drm_file *file_priv);
325extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 327extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -328,7 +330,7 @@ extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
328 struct drm_file *file_priv); 330 struct drm_file *file_priv);
329extern int vmw_surface_check(struct vmw_private *dev_priv, 331extern int vmw_surface_check(struct vmw_private *dev_priv,
330 struct ttm_object_file *tfile, 332 struct ttm_object_file *tfile,
331 int id); 333 uint32_t handle, int *id);
332extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); 334extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
333extern int vmw_dmabuf_init(struct vmw_private *dev_priv, 335extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
334 struct vmw_dma_buffer *vmw_bo, 336 struct vmw_dma_buffer *vmw_bo,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 7a39f3e6dc2c..2e92da567403 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -73,21 +73,32 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
73 73
74static int vmw_cmd_sid_check(struct vmw_private *dev_priv, 74static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
75 struct vmw_sw_context *sw_context, 75 struct vmw_sw_context *sw_context,
76 uint32_t sid) 76 uint32_t *sid)
77{ 77{
78 if (unlikely((!sw_context->sid_valid || sid != sw_context->last_sid) && 78 if (*sid == SVGA3D_INVALID_ID)
79 sid != SVGA3D_INVALID_ID)) { 79 return 0;
80 int ret = vmw_surface_check(dev_priv, sw_context->tfile, sid); 80
81 if (unlikely((!sw_context->sid_valid ||
82 *sid != sw_context->last_sid))) {
83 int real_id;
84 int ret = vmw_surface_check(dev_priv, sw_context->tfile,
85 *sid, &real_id);
81 86
82 if (unlikely(ret != 0)) { 87 if (unlikely(ret != 0)) {
83 DRM_ERROR("Could ot find or use surface %u\n", 88 DRM_ERROR("Could ot find or use surface 0x%08x "
84 (unsigned) sid); 89 "address 0x%08lx\n",
90 (unsigned int) *sid,
91 (unsigned long) sid);
85 return ret; 92 return ret;
86 } 93 }
87 94
88 sw_context->last_sid = sid; 95 sw_context->last_sid = *sid;
89 sw_context->sid_valid = true; 96 sw_context->sid_valid = true;
90 } 97 *sid = real_id;
98 sw_context->sid_translation = real_id;
99 } else
100 *sid = sw_context->sid_translation;
101
91 return 0; 102 return 0;
92} 103}
93 104
@@ -107,7 +118,8 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
107 return ret; 118 return ret;
108 119
109 cmd = container_of(header, struct vmw_sid_cmd, header); 120 cmd = container_of(header, struct vmw_sid_cmd, header);
110 return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.target.sid); 121 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
122 return ret;
111} 123}
112 124
113static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, 125static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
@@ -121,10 +133,10 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
121 int ret; 133 int ret;
122 134
123 cmd = container_of(header, struct vmw_sid_cmd, header); 135 cmd = container_of(header, struct vmw_sid_cmd, header);
124 ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid); 136 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
125 if (unlikely(ret != 0)) 137 if (unlikely(ret != 0))
126 return ret; 138 return ret;
127 return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid); 139 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
128} 140}
129 141
130static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, 142static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
@@ -138,10 +150,10 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
138 int ret; 150 int ret;
139 151
140 cmd = container_of(header, struct vmw_sid_cmd, header); 152 cmd = container_of(header, struct vmw_sid_cmd, header);
141 ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid); 153 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
142 if (unlikely(ret != 0)) 154 if (unlikely(ret != 0))
143 return ret; 155 return ret;
144 return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid); 156 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
145} 157}
146 158
147static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, 159static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
@@ -154,7 +166,7 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
154 } *cmd; 166 } *cmd;
155 167
156 cmd = container_of(header, struct vmw_sid_cmd, header); 168 cmd = container_of(header, struct vmw_sid_cmd, header);
157 return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.srcImage.sid); 169 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
158} 170}
159 171
160static int vmw_cmd_present_check(struct vmw_private *dev_priv, 172static int vmw_cmd_present_check(struct vmw_private *dev_priv,
@@ -167,7 +179,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
167 } *cmd; 179 } *cmd;
168 180
169 cmd = container_of(header, struct vmw_sid_cmd, header); 181 cmd = container_of(header, struct vmw_sid_cmd, header);
170 return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.sid); 182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
171} 183}
172 184
173static int vmw_cmd_dma(struct vmw_private *dev_priv, 185static int vmw_cmd_dma(struct vmw_private *dev_priv,
@@ -187,12 +199,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
187 uint32_t cur_validate_node; 199 uint32_t cur_validate_node;
188 struct ttm_validate_buffer *val_buf; 200 struct ttm_validate_buffer *val_buf;
189 201
190
191 cmd = container_of(header, struct vmw_dma_cmd, header); 202 cmd = container_of(header, struct vmw_dma_cmd, header);
192 ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->dma.host.sid);
193 if (unlikely(ret != 0))
194 return ret;
195
196 handle = cmd->dma.guest.ptr.gmrId; 203 handle = cmd->dma.guest.ptr.gmrId;
197 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); 204 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
198 if (unlikely(ret != 0)) { 205 if (unlikely(ret != 0)) {
@@ -228,14 +235,23 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
228 ++sw_context->cur_val_buf; 235 ++sw_context->cur_val_buf;
229 } 236 }
230 237
231 ret = vmw_user_surface_lookup(dev_priv, sw_context->tfile, 238 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
232 cmd->dma.host.sid, &srf); 239 cmd->dma.host.sid, &srf);
233 if (ret) { 240 if (ret) {
234 DRM_ERROR("could not find surface\n"); 241 DRM_ERROR("could not find surface\n");
235 goto out_no_reloc; 242 goto out_no_reloc;
236 } 243 }
237 244
245 /**
246 * Patch command stream with device SID.
247 */
248
249 cmd->dma.host.sid = srf->res.id;
238 vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header); 250 vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
251 /**
252 * FIXME: May deadlock here when called from the
253 * command parsing code.
254 */
239 vmw_surface_unreference(&srf); 255 vmw_surface_unreference(&srf);
240 256
241out_no_reloc: 257out_no_reloc:
@@ -243,6 +259,90 @@ out_no_reloc:
243 return ret; 259 return ret;
244} 260}
245 261
262static int vmw_cmd_draw(struct vmw_private *dev_priv,
263 struct vmw_sw_context *sw_context,
264 SVGA3dCmdHeader *header)
265{
266 struct vmw_draw_cmd {
267 SVGA3dCmdHeader header;
268 SVGA3dCmdDrawPrimitives body;
269 } *cmd;
270 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
271 (unsigned long)header + sizeof(*cmd));
272 SVGA3dPrimitiveRange *range;
273 uint32_t i;
274 uint32_t maxnum;
275 int ret;
276
277 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
278 if (unlikely(ret != 0))
279 return ret;
280
281 cmd = container_of(header, struct vmw_draw_cmd, header);
282 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
283
284 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
285 DRM_ERROR("Illegal number of vertex declarations.\n");
286 return -EINVAL;
287 }
288
289 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
290 ret = vmw_cmd_sid_check(dev_priv, sw_context,
291 &decl->array.surfaceId);
292 if (unlikely(ret != 0))
293 return ret;
294 }
295
296 maxnum = (header->size - sizeof(cmd->body) -
297 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
298 if (unlikely(cmd->body.numRanges > maxnum)) {
299 DRM_ERROR("Illegal number of index ranges.\n");
300 return -EINVAL;
301 }
302
303 range = (SVGA3dPrimitiveRange *) decl;
304 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
305 ret = vmw_cmd_sid_check(dev_priv, sw_context,
306 &range->indexArray.surfaceId);
307 if (unlikely(ret != 0))
308 return ret;
309 }
310 return 0;
311}
312
313
314static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
315 struct vmw_sw_context *sw_context,
316 SVGA3dCmdHeader *header)
317{
318 struct vmw_tex_state_cmd {
319 SVGA3dCmdHeader header;
320 SVGA3dCmdSetTextureState state;
321 };
322
323 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
324 ((unsigned long) header + header->size + sizeof(header));
325 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
326 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
327 int ret;
328
329 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
330 if (unlikely(ret != 0))
331 return ret;
332
333 for (; cur_state < last_state; ++cur_state) {
334 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
335 continue;
336
337 ret = vmw_cmd_sid_check(dev_priv, sw_context,
338 &cur_state->value);
339 if (unlikely(ret != 0))
340 return ret;
341 }
342
343 return 0;
344}
345
246 346
247typedef int (*vmw_cmd_func) (struct vmw_private *, 347typedef int (*vmw_cmd_func) (struct vmw_private *,
248 struct vmw_sw_context *, 348 struct vmw_sw_context *,
@@ -264,7 +364,7 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
264 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check), 364 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
265 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, 365 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
266 &vmw_cmd_set_render_target_check), 366 &vmw_cmd_set_render_target_check),
267 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_cid_check), 367 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
268 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check), 368 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
269 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check), 369 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
270 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check), 370 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
@@ -276,7 +376,7 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
276 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), 376 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
277 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check), 377 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
278 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), 378 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
279 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_cid_check), 379 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
280 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), 380 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
281 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), 381 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
282 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check), 382 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check),
@@ -291,6 +391,7 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
291 void *buf, uint32_t *size) 391 void *buf, uint32_t *size)
292{ 392{
293 uint32_t cmd_id; 393 uint32_t cmd_id;
394 uint32_t size_remaining = *size;
294 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; 395 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
295 int ret; 396 int ret;
296 397
@@ -304,6 +405,9 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
304 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); 405 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
305 406
306 cmd_id -= SVGA_3D_CMD_BASE; 407 cmd_id -= SVGA_3D_CMD_BASE;
408 if (unlikely(*size > size_remaining))
409 goto out_err;
410
307 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) 411 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
308 goto out_err; 412 goto out_err;
309 413
@@ -326,6 +430,7 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
326 int ret; 430 int ret;
327 431
328 while (cur_size > 0) { 432 while (cur_size > 0) {
433 size = cur_size;
329 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); 434 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
330 if (unlikely(ret != 0)) 435 if (unlikely(ret != 0))
331 return ret; 436 return ret;
@@ -386,7 +491,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
386 return 0; 491 return 0;
387 492
388 ret = vmw_gmr_bind(dev_priv, bo); 493 ret = vmw_gmr_bind(dev_priv, bo);
389 if (likely(ret == 0 || ret == -ERESTART)) 494 if (likely(ret == 0 || ret == -ERESTARTSYS))
390 return ret; 495 return ret;
391 496
392 497
@@ -429,7 +534,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
429 534
430 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); 535 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
431 if (unlikely(ret != 0)) { 536 if (unlikely(ret != 0)) {
432 ret = -ERESTART; 537 ret = -ERESTARTSYS;
433 goto out_no_cmd_mutex; 538 goto out_no_cmd_mutex;
434 } 539 }
435 540
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 76b0693e2458..01feb48af333 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -191,7 +191,7 @@ static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
191 } 191 }
192 schedule_timeout(1); 192 schedule_timeout(1);
193 if (interruptible && signal_pending(current)) { 193 if (interruptible && signal_pending(current)) {
194 ret = -ERESTART; 194 ret = -ERESTARTSYS;
195 break; 195 break;
196 } 196 }
197 } 197 }
@@ -237,9 +237,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
237 (dev_priv->fifo_queue, 237 (dev_priv->fifo_queue,
238 !vmw_fifo_is_full(dev_priv, bytes), timeout); 238 !vmw_fifo_is_full(dev_priv, bytes), timeout);
239 239
240 if (unlikely(ret == -ERESTARTSYS)) 240 if (unlikely(ret == 0))
241 ret = -ERESTART;
242 else if (unlikely(ret == 0))
243 ret = -EBUSY; 241 ret = -EBUSY;
244 else if (likely(ret > 0)) 242 else if (likely(ret > 0))
245 ret = 0; 243 ret = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 9e0f0306eedb..d40086fc8647 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -155,7 +155,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
155 TASK_UNINTERRUPTIBLE); 155 TASK_UNINTERRUPTIBLE);
156 } 156 }
157 if (interruptible && signal_pending(current)) { 157 if (interruptible && signal_pending(current)) {
158 ret = -ERESTART; 158 ret = -ERESTARTSYS;
159 break; 159 break;
160 } 160 }
161 } 161 }
@@ -218,9 +218,7 @@ int vmw_wait_fence(struct vmw_private *dev_priv,
218 vmw_fence_signaled(dev_priv, sequence), 218 vmw_fence_signaled(dev_priv, sequence),
219 timeout); 219 timeout);
220 220
221 if (unlikely(ret == -ERESTARTSYS)) 221 if (unlikely(ret == 0))
222 ret = -ERESTART;
223 else if (unlikely(ret == 0))
224 ret = -EBUSY; 222 ret = -EBUSY;
225 else if (likely(ret > 0)) 223 else if (likely(ret > 0))
226 ret = 0; 224 ret = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index e9403be446fe..b1af76e371c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -106,8 +106,8 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
106 int ret; 106 int ret;
107 107
108 if (handle) { 108 if (handle) {
109 ret = vmw_user_surface_lookup(dev_priv, tfile, 109 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
110 handle, &surface); 110 handle, &surface);
111 if (!ret) { 111 if (!ret) {
112 if (!surface->snooper.image) { 112 if (!surface->snooper.image) {
113 DRM_ERROR("surface not suitable for cursor\n"); 113 DRM_ERROR("surface not suitable for cursor\n");
@@ -704,8 +704,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
704 struct vmw_dma_buffer *bo = NULL; 704 struct vmw_dma_buffer *bo = NULL;
705 int ret; 705 int ret;
706 706
707 ret = vmw_user_surface_lookup(dev_priv, tfile, 707 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
708 mode_cmd->handle, &surface); 708 mode_cmd->handle, &surface);
709 if (ret) 709 if (ret)
710 goto try_dmabuf; 710 goto try_dmabuf;
711 711
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a1ceed0c8e07..c012d5927f65 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -488,28 +488,44 @@ static void vmw_user_surface_free(struct vmw_resource *res)
488 kfree(user_srf); 488 kfree(user_srf);
489} 489}
490 490
491int vmw_user_surface_lookup(struct vmw_private *dev_priv, 491int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
492 struct ttm_object_file *tfile, 492 struct ttm_object_file *tfile,
493 int sid, struct vmw_surface **out) 493 uint32_t handle, struct vmw_surface **out)
494{ 494{
495 struct vmw_resource *res; 495 struct vmw_resource *res;
496 struct vmw_surface *srf; 496 struct vmw_surface *srf;
497 struct vmw_user_surface *user_srf; 497 struct vmw_user_surface *user_srf;
498 struct ttm_base_object *base;
499 int ret = -EINVAL;
498 500
499 res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, sid); 501 base = ttm_base_object_lookup(tfile, handle);
500 if (unlikely(res == NULL)) 502 if (unlikely(base == NULL))
501 return -EINVAL; 503 return -EINVAL;
502 504
503 if (res->res_free != &vmw_user_surface_free) 505 if (unlikely(base->object_type != VMW_RES_SURFACE))
504 return -EINVAL; 506 goto out_bad_resource;
505 507
506 srf = container_of(res, struct vmw_surface, res); 508 user_srf = container_of(base, struct vmw_user_surface, base);
507 user_srf = container_of(srf, struct vmw_user_surface, srf); 509 srf = &user_srf->srf;
508 if (user_srf->base.tfile != tfile && !user_srf->base.shareable) 510 res = &srf->res;
509 return -EPERM; 511
512 read_lock(&dev_priv->resource_lock);
513
514 if (!res->avail || res->res_free != &vmw_user_surface_free) {
515 read_unlock(&dev_priv->resource_lock);
516 goto out_bad_resource;
517 }
518
519 kref_get(&res->kref);
520 read_unlock(&dev_priv->resource_lock);
510 521
511 *out = srf; 522 *out = srf;
512 return 0; 523 ret = 0;
524
525out_bad_resource:
526 ttm_base_object_unref(&base);
527
528 return ret;
513} 529}
514 530
515static void vmw_user_surface_base_release(struct ttm_base_object **p_base) 531static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
@@ -526,35 +542,10 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
526int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, 542int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
527 struct drm_file *file_priv) 543 struct drm_file *file_priv)
528{ 544{
529 struct vmw_private *dev_priv = vmw_priv(dev);
530 struct vmw_resource *res;
531 struct vmw_surface *srf;
532 struct vmw_user_surface *user_srf;
533 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; 545 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
534 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 546 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
535 int ret = 0;
536
537 res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, arg->sid);
538 if (unlikely(res == NULL))
539 return -EINVAL;
540
541 if (res->res_free != &vmw_user_surface_free) {
542 ret = -EINVAL;
543 goto out;
544 }
545 547
546 srf = container_of(res, struct vmw_surface, res); 548 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
547 user_srf = container_of(srf, struct vmw_user_surface, srf);
548 if (user_srf->base.tfile != tfile && !user_srf->base.shareable) {
549 ret = -EPERM;
550 goto out;
551 }
552
553 ttm_ref_object_base_unref(tfile, user_srf->base.hash.key,
554 TTM_REF_USAGE);
555out:
556 vmw_resource_unreference(&res);
557 return ret;
558} 549}
559 550
560int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 551int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -649,7 +640,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
649 } 640 }
650 srf->snooper.crtc = NULL; 641 srf->snooper.crtc = NULL;
651 642
652 rep->sid = res->id; 643 rep->sid = user_srf->base.hash.key;
644 if (rep->sid == SVGA3D_INVALID_ID)
645 DRM_ERROR("Created bad Surface ID.\n");
646
653 vmw_resource_unreference(&res); 647 vmw_resource_unreference(&res);
654 return 0; 648 return 0;
655out_err1: 649out_err1:
@@ -662,39 +656,33 @@ out_err0:
662int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, 656int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
663 struct drm_file *file_priv) 657 struct drm_file *file_priv)
664{ 658{
665 struct vmw_private *dev_priv = vmw_priv(dev);
666 union drm_vmw_surface_reference_arg *arg = 659 union drm_vmw_surface_reference_arg *arg =
667 (union drm_vmw_surface_reference_arg *)data; 660 (union drm_vmw_surface_reference_arg *)data;
668 struct drm_vmw_surface_arg *req = &arg->req; 661 struct drm_vmw_surface_arg *req = &arg->req;
669 struct drm_vmw_surface_create_req *rep = &arg->rep; 662 struct drm_vmw_surface_create_req *rep = &arg->rep;
670 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 663 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
671 struct vmw_resource *res;
672 struct vmw_surface *srf; 664 struct vmw_surface *srf;
673 struct vmw_user_surface *user_srf; 665 struct vmw_user_surface *user_srf;
674 struct drm_vmw_size __user *user_sizes; 666 struct drm_vmw_size __user *user_sizes;
675 int ret; 667 struct ttm_base_object *base;
668 int ret = -EINVAL;
676 669
677 res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, req->sid); 670 base = ttm_base_object_lookup(tfile, req->sid);
678 if (unlikely(res == NULL)) 671 if (unlikely(base == NULL)) {
672 DRM_ERROR("Could not find surface to reference.\n");
679 return -EINVAL; 673 return -EINVAL;
680
681 if (res->res_free != &vmw_user_surface_free) {
682 ret = -EINVAL;
683 goto out;
684 } 674 }
685 675
686 srf = container_of(res, struct vmw_surface, res); 676 if (unlikely(base->object_type != VMW_RES_SURFACE))
687 user_srf = container_of(srf, struct vmw_user_surface, srf); 677 goto out_bad_resource;
688 if (user_srf->base.tfile != tfile && !user_srf->base.shareable) { 678
689 DRM_ERROR("Tried to reference none shareable surface\n"); 679 user_srf = container_of(base, struct vmw_user_surface, base);
690 ret = -EPERM; 680 srf = &user_srf->srf;
691 goto out;
692 }
693 681
694 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); 682 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
695 if (unlikely(ret != 0)) { 683 if (unlikely(ret != 0)) {
696 DRM_ERROR("Could not add a reference to a surface.\n"); 684 DRM_ERROR("Could not add a reference to a surface.\n");
697 goto out; 685 goto out_no_reference;
698 } 686 }
699 687
700 rep->flags = srf->flags; 688 rep->flags = srf->flags;
@@ -706,40 +694,43 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
706 if (user_sizes) 694 if (user_sizes)
707 ret = copy_to_user(user_sizes, srf->sizes, 695 ret = copy_to_user(user_sizes, srf->sizes,
708 srf->num_sizes * sizeof(*srf->sizes)); 696 srf->num_sizes * sizeof(*srf->sizes));
709 if (unlikely(ret != 0)) { 697 if (unlikely(ret != 0))
710 DRM_ERROR("copy_to_user failed %p %u\n", 698 DRM_ERROR("copy_to_user failed %p %u\n",
711 user_sizes, srf->num_sizes); 699 user_sizes, srf->num_sizes);
712 /** 700out_bad_resource:
713 * FIXME: Unreference surface here? 701out_no_reference:
714 */ 702 ttm_base_object_unref(&base);
715 goto out; 703
716 }
717out:
718 vmw_resource_unreference(&res);
719 return ret; 704 return ret;
720} 705}
721 706
722int vmw_surface_check(struct vmw_private *dev_priv, 707int vmw_surface_check(struct vmw_private *dev_priv,
723 struct ttm_object_file *tfile, 708 struct ttm_object_file *tfile,
724 int id) 709 uint32_t handle, int *id)
725{ 710{
726 struct vmw_resource *res; 711 struct ttm_base_object *base;
727 int ret = 0; 712 struct vmw_user_surface *user_srf;
728 713
729 read_lock(&dev_priv->resource_lock); 714 int ret = -EPERM;
730 res = idr_find(&dev_priv->surface_idr, id);
731 if (res && res->avail) {
732 struct vmw_surface *srf =
733 container_of(res, struct vmw_surface, res);
734 struct vmw_user_surface *usrf =
735 container_of(srf, struct vmw_user_surface, srf);
736 715
737 if (usrf->base.tfile != tfile && !usrf->base.shareable) 716 base = ttm_base_object_lookup(tfile, handle);
738 ret = -EPERM; 717 if (unlikely(base == NULL))
739 } else 718 return -EINVAL;
740 ret = -EINVAL; 719
741 read_unlock(&dev_priv->resource_lock); 720 if (unlikely(base->object_type != VMW_RES_SURFACE))
721 goto out_bad_surface;
742 722
723 user_srf = container_of(base, struct vmw_user_surface, base);
724 *id = user_srf->srf.res.id;
725 ret = 0;
726
727out_bad_surface:
728 /**
729 * FIXME: May deadlock here when called from the
730 * command parsing code.
731 */
732
733 ttm_base_object_unref(&base);
743 return ret; 734 return ret;
744} 735}
745 736
diff --git a/drivers/hid/hid-lg.h b/drivers/hid/hid-lg.h
index 27ae750ca878..bf31592eaf79 100644
--- a/drivers/hid/hid-lg.h
+++ b/drivers/hid/hid-lg.h
@@ -1,8 +1,6 @@
1#ifndef __HID_LG_H 1#ifndef __HID_LG_H
2#define __HID_LG_H 2#define __HID_LG_H
3 3
4#include <linux/autoconf.h>
5
6#ifdef CONFIG_LOGITECH_FF 4#ifdef CONFIG_LOGITECH_FF
7int lgff_init(struct hid_device *hdev); 5int lgff_init(struct hid_device *hdev);
8#else 6#else
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 95ccbe377f9c..46c3c566307e 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -228,6 +228,18 @@ config SENSORS_K8TEMP
228 This driver can also be built as a module. If so, the module 228 This driver can also be built as a module. If so, the module
229 will be called k8temp. 229 will be called k8temp.
230 230
231config SENSORS_K10TEMP
232 tristate "AMD Phenom/Sempron/Turion/Opteron temperature sensor"
233 depends on X86 && PCI
234 help
235 If you say yes here you get support for the temperature
236 sensor(s) inside your CPU. Supported are later revisions of
237 the AMD Family 10h and all revisions of the AMD Family 11h
238 microarchitectures.
239
240 This driver can also be built as a module. If so, the module
241 will be called k10temp.
242
231config SENSORS_AMS 243config SENSORS_AMS
232 tristate "Apple Motion Sensor driver" 244 tristate "Apple Motion Sensor driver"
233 depends on PPC_PMAC && !PPC64 && INPUT && ((ADB_PMU && I2C = y) || (ADB_PMU && !I2C) || I2C) && EXPERIMENTAL 245 depends on PPC_PMAC && !PPC64 && INPUT && ((ADB_PMU && I2C = y) || (ADB_PMU && !I2C) || I2C) && EXPERIMENTAL
@@ -810,6 +822,14 @@ config SENSORS_TMP421
810 This driver can also be built as a module. If so, the module 822 This driver can also be built as a module. If so, the module
811 will be called tmp421. 823 will be called tmp421.
812 824
825config SENSORS_VIA_CPUTEMP
826 tristate "VIA CPU temperature sensor"
827 depends on X86
828 help
829 If you say yes here you get support for the temperature
830 sensor inside your CPU. Supported are all known variants of
831 the VIA C7 and Nano.
832
813config SENSORS_VIA686A 833config SENSORS_VIA686A
814 tristate "VIA686A" 834 tristate "VIA686A"
815 depends on PCI 835 depends on PCI
@@ -998,6 +1018,23 @@ config SENSORS_LIS3_SPI
998 will be called lis3lv02d and a specific module for the SPI transport 1018 will be called lis3lv02d and a specific module for the SPI transport
999 is called lis3lv02d_spi. 1019 is called lis3lv02d_spi.
1000 1020
1021config SENSORS_LIS3_I2C
1022 tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (I2C)"
1023 depends on I2C && INPUT
1024 select INPUT_POLLDEV
1025 default n
1026 help
1027 This driver provides support for the LIS3LV02Dx accelerometer connected
1028 via I2C. The accelerometer data is readable via
1029 /sys/devices/platform/lis3lv02d.
1030
1031 This driver also provides an absolute input class device, allowing
1032 the device to act as a pinball machine-esque joystick.
1033
1034 This driver can also be built as modules. If so, the core module
1035 will be called lis3lv02d and a specific module for the I2C transport
1036 is called lis3lv02d_i2c.
1037
1001config SENSORS_APPLESMC 1038config SENSORS_APPLESMC
1002 tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)" 1039 tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)"
1003 depends on INPUT && X86 1040 depends on INPUT && X86
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 33c2ee105284..450c8e894277 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -53,8 +53,10 @@ obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
53obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o 53obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o
54obj-$(CONFIG_SENSORS_IT87) += it87.o 54obj-$(CONFIG_SENSORS_IT87) += it87.o
55obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o 55obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o
56obj-$(CONFIG_SENSORS_K10TEMP) += k10temp.o
56obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o hp_accel.o 57obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o hp_accel.o
57obj-$(CONFIG_SENSORS_LIS3_SPI) += lis3lv02d.o lis3lv02d_spi.o 58obj-$(CONFIG_SENSORS_LIS3_SPI) += lis3lv02d.o lis3lv02d_spi.o
59obj-$(CONFIG_SENSORS_LIS3_I2C) += lis3lv02d.o lis3lv02d_i2c.o
58obj-$(CONFIG_SENSORS_LM63) += lm63.o 60obj-$(CONFIG_SENSORS_LM63) += lm63.o
59obj-$(CONFIG_SENSORS_LM70) += lm70.o 61obj-$(CONFIG_SENSORS_LM70) += lm70.o
60obj-$(CONFIG_SENSORS_LM73) += lm73.o 62obj-$(CONFIG_SENSORS_LM73) += lm73.o
@@ -87,6 +89,7 @@ obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o
87obj-$(CONFIG_SENSORS_THMC50) += thmc50.o 89obj-$(CONFIG_SENSORS_THMC50) += thmc50.o
88obj-$(CONFIG_SENSORS_TMP401) += tmp401.o 90obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
89obj-$(CONFIG_SENSORS_TMP421) += tmp421.o 91obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
92obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
90obj-$(CONFIG_SENSORS_VIA686A) += via686a.o 93obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
91obj-$(CONFIG_SENSORS_VT1211) += vt1211.o 94obj-$(CONFIG_SENSORS_VT1211) += vt1211.o
92obj-$(CONFIG_SENSORS_VT8231) += vt8231.o 95obj-$(CONFIG_SENSORS_VT8231) += vt8231.o
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
new file mode 100644
index 000000000000..d8a26d16d948
--- /dev/null
+++ b/drivers/hwmon/k10temp.c
@@ -0,0 +1,197 @@
1/*
2 * k10temp.c - AMD Family 10h/11h processor hardware monitoring
3 *
4 * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
5 *
6 *
7 * This driver is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This driver is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14 * See the GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this driver; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/err.h>
21#include <linux/hwmon.h>
22#include <linux/hwmon-sysfs.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/pci.h>
26#include <asm/processor.h>
27
28MODULE_DESCRIPTION("AMD Family 10h/11h CPU core temperature monitor");
29MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
30MODULE_LICENSE("GPL");
31
32static bool force;
33module_param(force, bool, 0444);
34MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
35
36#define REG_HARDWARE_THERMAL_CONTROL 0x64
37#define HTC_ENABLE 0x00000001
38
39#define REG_REPORTED_TEMPERATURE 0xa4
40
41#define REG_NORTHBRIDGE_CAPABILITIES 0xe8
42#define NB_CAP_HTC 0x00000400
43
44static ssize_t show_temp(struct device *dev,
45 struct device_attribute *attr, char *buf)
46{
47 u32 regval;
48
49 pci_read_config_dword(to_pci_dev(dev),
50 REG_REPORTED_TEMPERATURE, &regval);
51 return sprintf(buf, "%u\n", (regval >> 21) * 125);
52}
53
54static ssize_t show_temp_max(struct device *dev,
55 struct device_attribute *attr, char *buf)
56{
57 return sprintf(buf, "%d\n", 70 * 1000);
58}
59
60static ssize_t show_temp_crit(struct device *dev,
61 struct device_attribute *devattr, char *buf)
62{
63 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
64 int show_hyst = attr->index;
65 u32 regval;
66 int value;
67
68 pci_read_config_dword(to_pci_dev(dev),
69 REG_HARDWARE_THERMAL_CONTROL, &regval);
70 value = ((regval >> 16) & 0x7f) * 500 + 52000;
71 if (show_hyst)
72 value -= ((regval >> 24) & 0xf) * 500;
73 return sprintf(buf, "%d\n", value);
74}
75
76static ssize_t show_name(struct device *dev,
77 struct device_attribute *attr, char *buf)
78{
79 return sprintf(buf, "k10temp\n");
80}
81
82static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
83static DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max, NULL);
84static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, NULL, 0);
85static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, show_temp_crit, NULL, 1);
86static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
87
88static bool __devinit has_erratum_319(void)
89{
90 /*
91 * Erratum 319: The thermal sensor of older Family 10h processors
92 * (B steppings) may be unreliable.
93 */
94 return boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model <= 2;
95}
96
97static int __devinit k10temp_probe(struct pci_dev *pdev,
98 const struct pci_device_id *id)
99{
100 struct device *hwmon_dev;
101 u32 reg_caps, reg_htc;
102 int err;
103
104 if (has_erratum_319() && !force) {
105 dev_err(&pdev->dev,
106 "unreliable CPU thermal sensor; monitoring disabled\n");
107 err = -ENODEV;
108 goto exit;
109 }
110
111 err = device_create_file(&pdev->dev, &dev_attr_temp1_input);
112 if (err)
113 goto exit;
114 err = device_create_file(&pdev->dev, &dev_attr_temp1_max);
115 if (err)
116 goto exit_remove;
117
118 pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES, &reg_caps);
119 pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, &reg_htc);
120 if ((reg_caps & NB_CAP_HTC) && (reg_htc & HTC_ENABLE)) {
121 err = device_create_file(&pdev->dev,
122 &sensor_dev_attr_temp1_crit.dev_attr);
123 if (err)
124 goto exit_remove;
125 err = device_create_file(&pdev->dev,
126 &sensor_dev_attr_temp1_crit_hyst.dev_attr);
127 if (err)
128 goto exit_remove;
129 }
130
131 err = device_create_file(&pdev->dev, &dev_attr_name);
132 if (err)
133 goto exit_remove;
134
135 hwmon_dev = hwmon_device_register(&pdev->dev);
136 if (IS_ERR(hwmon_dev)) {
137 err = PTR_ERR(hwmon_dev);
138 goto exit_remove;
139 }
140 dev_set_drvdata(&pdev->dev, hwmon_dev);
141
142 if (has_erratum_319() && force)
143 dev_warn(&pdev->dev,
144 "unreliable CPU thermal sensor; check erratum 319\n");
145 return 0;
146
147exit_remove:
148 device_remove_file(&pdev->dev, &dev_attr_name);
149 device_remove_file(&pdev->dev, &dev_attr_temp1_input);
150 device_remove_file(&pdev->dev, &dev_attr_temp1_max);
151 device_remove_file(&pdev->dev,
152 &sensor_dev_attr_temp1_crit.dev_attr);
153 device_remove_file(&pdev->dev,
154 &sensor_dev_attr_temp1_crit_hyst.dev_attr);
155exit:
156 return err;
157}
158
159static void __devexit k10temp_remove(struct pci_dev *pdev)
160{
161 hwmon_device_unregister(dev_get_drvdata(&pdev->dev));
162 device_remove_file(&pdev->dev, &dev_attr_name);
163 device_remove_file(&pdev->dev, &dev_attr_temp1_input);
164 device_remove_file(&pdev->dev, &dev_attr_temp1_max);
165 device_remove_file(&pdev->dev,
166 &sensor_dev_attr_temp1_crit.dev_attr);
167 device_remove_file(&pdev->dev,
168 &sensor_dev_attr_temp1_crit_hyst.dev_attr);
169 dev_set_drvdata(&pdev->dev, NULL);
170}
171
172static struct pci_device_id k10temp_id_table[] = {
173 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
174 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
175 {}
176};
177MODULE_DEVICE_TABLE(pci, k10temp_id_table);
178
179static struct pci_driver k10temp_driver = {
180 .name = "k10temp",
181 .id_table = k10temp_id_table,
182 .probe = k10temp_probe,
183 .remove = __devexit_p(k10temp_remove),
184};
185
186static int __init k10temp_init(void)
187{
188 return pci_register_driver(&k10temp_driver);
189}
190
191static void __exit k10temp_exit(void)
192{
193 pci_unregister_driver(&k10temp_driver);
194}
195
196module_init(k10temp_init)
197module_exit(k10temp_exit)
diff --git a/drivers/hwmon/lis3lv02d_i2c.c b/drivers/hwmon/lis3lv02d_i2c.c
new file mode 100644
index 000000000000..dc1f5402c1d7
--- /dev/null
+++ b/drivers/hwmon/lis3lv02d_i2c.c
@@ -0,0 +1,183 @@
1/*
2 * drivers/hwmon/lis3lv02d_i2c.c
3 *
4 * Implements I2C interface for lis3lv02d (STMicroelectronics) accelerometer.
5 * Driver is based on corresponding SPI driver written by Daniel Mack
6 * (lis3lv02d_spi.c (C) 2009 Daniel Mack <daniel@caiaq.de> ).
7 *
8 * Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
9 *
10 * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * version 2 as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 * 02110-1301 USA
25 */
26
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/init.h>
30#include <linux/err.h>
31#include <linux/i2c.h>
32#include "lis3lv02d.h"
33
34#define DRV_NAME "lis3lv02d_i2c"
35
36static inline s32 lis3_i2c_write(struct lis3lv02d *lis3, int reg, u8 value)
37{
38 struct i2c_client *c = lis3->bus_priv;
39 return i2c_smbus_write_byte_data(c, reg, value);
40}
41
42static inline s32 lis3_i2c_read(struct lis3lv02d *lis3, int reg, u8 *v)
43{
44 struct i2c_client *c = lis3->bus_priv;
45 *v = i2c_smbus_read_byte_data(c, reg);
46 return 0;
47}
48
49static int lis3_i2c_init(struct lis3lv02d *lis3)
50{
51 u8 reg;
52 int ret;
53
54 /* power up the device */
55 ret = lis3->read(lis3, CTRL_REG1, &reg);
56 if (ret < 0)
57 return ret;
58
59 reg |= CTRL1_PD0;
60 return lis3->write(lis3, CTRL_REG1, reg);
61}
62
63/* Default axis mapping but it can be overwritten by platform data */
64static struct axis_conversion lis3lv02d_axis_map = { LIS3_DEV_X,
65 LIS3_DEV_Y,
66 LIS3_DEV_Z };
67
68static int __devinit lis3lv02d_i2c_probe(struct i2c_client *client,
69 const struct i2c_device_id *id)
70{
71 int ret = 0;
72 struct lis3lv02d_platform_data *pdata = client->dev.platform_data;
73
74 if (pdata) {
75 if (pdata->axis_x)
76 lis3lv02d_axis_map.x = pdata->axis_x;
77
78 if (pdata->axis_y)
79 lis3lv02d_axis_map.y = pdata->axis_y;
80
81 if (pdata->axis_z)
82 lis3lv02d_axis_map.z = pdata->axis_z;
83
84 if (pdata->setup_resources)
85 ret = pdata->setup_resources();
86
87 if (ret)
88 goto fail;
89 }
90
91 lis3_dev.pdata = pdata;
92 lis3_dev.bus_priv = client;
93 lis3_dev.init = lis3_i2c_init;
94 lis3_dev.read = lis3_i2c_read;
95 lis3_dev.write = lis3_i2c_write;
96 lis3_dev.irq = client->irq;
97 lis3_dev.ac = lis3lv02d_axis_map;
98
99 i2c_set_clientdata(client, &lis3_dev);
100 ret = lis3lv02d_init_device(&lis3_dev);
101fail:
102 return ret;
103}
104
105static int __devexit lis3lv02d_i2c_remove(struct i2c_client *client)
106{
107 struct lis3lv02d *lis3 = i2c_get_clientdata(client);
108 struct lis3lv02d_platform_data *pdata = client->dev.platform_data;
109
110 if (pdata && pdata->release_resources)
111 pdata->release_resources();
112
113 lis3lv02d_joystick_disable();
114 lis3lv02d_poweroff(lis3);
115
116 return lis3lv02d_remove_fs(&lis3_dev);
117}
118
119#ifdef CONFIG_PM
120static int lis3lv02d_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
121{
122 struct lis3lv02d *lis3 = i2c_get_clientdata(client);
123
124 if (!lis3->pdata->wakeup_flags)
125 lis3lv02d_poweroff(lis3);
126 return 0;
127}
128
129static int lis3lv02d_i2c_resume(struct i2c_client *client)
130{
131 struct lis3lv02d *lis3 = i2c_get_clientdata(client);
132
133 if (!lis3->pdata->wakeup_flags)
134 lis3lv02d_poweron(lis3);
135 return 0;
136}
137
138static void lis3lv02d_i2c_shutdown(struct i2c_client *client)
139{
140 lis3lv02d_i2c_suspend(client, PMSG_SUSPEND);
141}
142#else
143#define lis3lv02d_i2c_suspend NULL
144#define lis3lv02d_i2c_resume NULL
145#define lis3lv02d_i2c_shutdown NULL
146#endif
147
148static const struct i2c_device_id lis3lv02d_id[] = {
149 {"lis3lv02d", 0 },
150 {}
151};
152
153MODULE_DEVICE_TABLE(i2c, lis3lv02d_id);
154
155static struct i2c_driver lis3lv02d_i2c_driver = {
156 .driver = {
157 .name = DRV_NAME,
158 .owner = THIS_MODULE,
159 },
160 .suspend = lis3lv02d_i2c_suspend,
161 .shutdown = lis3lv02d_i2c_shutdown,
162 .resume = lis3lv02d_i2c_resume,
163 .probe = lis3lv02d_i2c_probe,
164 .remove = __devexit_p(lis3lv02d_i2c_remove),
165 .id_table = lis3lv02d_id,
166};
167
168static int __init lis3lv02d_init(void)
169{
170 return i2c_add_driver(&lis3lv02d_i2c_driver);
171}
172
173static void __exit lis3lv02d_exit(void)
174{
175 i2c_del_driver(&lis3lv02d_i2c_driver);
176}
177
178MODULE_AUTHOR("Nokia Corporation");
179MODULE_DESCRIPTION("lis3lv02d I2C interface");
180MODULE_LICENSE("GPL");
181
182module_init(lis3lv02d_init);
183module_exit(lis3lv02d_exit);
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index ebe38b680ee3..864a371f6eb9 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -305,7 +305,7 @@ static inline int sht15_calc_temp(struct sht15_data *data)
305 int d1 = 0; 305 int d1 = 0;
306 int i; 306 int i;
307 307
308 for (i = 1; i < ARRAY_SIZE(temppoints) - 1; i++) 308 for (i = 1; i < ARRAY_SIZE(temppoints); i++)
309 /* Find pointer to interpolate */ 309 /* Find pointer to interpolate */
310 if (data->supply_uV > temppoints[i - 1].vdd) { 310 if (data->supply_uV > temppoints[i - 1].vdd) {
311 d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd) 311 d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
@@ -332,12 +332,12 @@ static inline int sht15_calc_humid(struct sht15_data *data)
332 332
333 const int c1 = -4; 333 const int c1 = -4;
334 const int c2 = 40500; /* x 10 ^ -6 */ 334 const int c2 = 40500; /* x 10 ^ -6 */
335 const int c3 = 2800; /* x10 ^ -9 */ 335 const int c3 = -2800; /* x10 ^ -9 */
336 336
337 RHlinear = c1*1000 337 RHlinear = c1*1000
338 + c2 * data->val_humid/1000 338 + c2 * data->val_humid/1000
339 + (data->val_humid * data->val_humid * c3)/1000000; 339 + (data->val_humid * data->val_humid * c3)/1000000;
340 return (temp - 25000) * (10000 + 800 * data->val_humid) 340 return (temp - 25000) * (10000 + 80 * data->val_humid)
341 / 1000000 + RHlinear; 341 / 1000000 + RHlinear;
342} 342}
343 343
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 8ad50fdba00d..9ca97818bd4b 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -136,11 +136,11 @@ struct smsc47m1_data {
136 136
137struct smsc47m1_sio_data { 137struct smsc47m1_sio_data {
138 enum chips type; 138 enum chips type;
139 u8 activate; /* Remember initial device state */
139}; 140};
140 141
141 142
142static int smsc47m1_probe(struct platform_device *pdev); 143static int __exit smsc47m1_remove(struct platform_device *pdev);
143static int __devexit smsc47m1_remove(struct platform_device *pdev);
144static struct smsc47m1_data *smsc47m1_update_device(struct device *dev, 144static struct smsc47m1_data *smsc47m1_update_device(struct device *dev,
145 int init); 145 int init);
146 146
@@ -160,8 +160,7 @@ static struct platform_driver smsc47m1_driver = {
160 .owner = THIS_MODULE, 160 .owner = THIS_MODULE,
161 .name = DRVNAME, 161 .name = DRVNAME,
162 }, 162 },
163 .probe = smsc47m1_probe, 163 .remove = __exit_p(smsc47m1_remove),
164 .remove = __devexit_p(smsc47m1_remove),
165}; 164};
166 165
167static ssize_t get_fan(struct device *dev, struct device_attribute 166static ssize_t get_fan(struct device *dev, struct device_attribute
@@ -470,24 +469,126 @@ static int __init smsc47m1_find(unsigned short *addr,
470 superio_select(); 469 superio_select();
471 *addr = (superio_inb(SUPERIO_REG_BASE) << 8) 470 *addr = (superio_inb(SUPERIO_REG_BASE) << 8)
472 | superio_inb(SUPERIO_REG_BASE + 1); 471 | superio_inb(SUPERIO_REG_BASE + 1);
473 val = superio_inb(SUPERIO_REG_ACT); 472 if (*addr == 0) {
474 if (*addr == 0 || (val & 0x01) == 0) { 473 pr_info(DRVNAME ": Device address not set, will not use\n");
475 pr_info(DRVNAME ": Device is disabled, will not use\n");
476 superio_exit(); 474 superio_exit();
477 return -ENODEV; 475 return -ENODEV;
478 } 476 }
479 477
478 /* Enable only if address is set (needed at least on the
479 * Compaq Presario S4000NX) */
480 sio_data->activate = superio_inb(SUPERIO_REG_ACT);
481 if ((sio_data->activate & 0x01) == 0) {
482 pr_info(DRVNAME ": Enabling device\n");
483 superio_outb(SUPERIO_REG_ACT, sio_data->activate | 0x01);
484 }
485
480 superio_exit(); 486 superio_exit();
481 return 0; 487 return 0;
482} 488}
483 489
484static int __devinit smsc47m1_probe(struct platform_device *pdev) 490/* Restore device to its initial state */
491static void __init smsc47m1_restore(const struct smsc47m1_sio_data *sio_data)
492{
493 if ((sio_data->activate & 0x01) == 0) {
494 superio_enter();
495 superio_select();
496
497 pr_info(DRVNAME ": Disabling device\n");
498 superio_outb(SUPERIO_REG_ACT, sio_data->activate);
499
500 superio_exit();
501 }
502}
503
504#define CHECK 1
505#define REQUEST 2
506#define RELEASE 3
507
508/*
509 * This function can be used to:
510 * - test for resource conflicts with ACPI
511 * - request the resources
512 * - release the resources
513 * We only allocate the I/O ports we really need, to minimize the risk of
514 * conflicts with ACPI or with other drivers.
515 */
516static int smsc47m1_handle_resources(unsigned short address, enum chips type,
517 int action, struct device *dev)
518{
519 static const u8 ports_m1[] = {
520 /* register, region length */
521 0x04, 1,
522 0x33, 4,
523 0x56, 7,
524 };
525
526 static const u8 ports_m2[] = {
527 /* register, region length */
528 0x04, 1,
529 0x09, 1,
530 0x2c, 2,
531 0x35, 4,
532 0x56, 7,
533 0x69, 4,
534 };
535
536 int i, ports_size, err;
537 const u8 *ports;
538
539 switch (type) {
540 case smsc47m1:
541 default:
542 ports = ports_m1;
543 ports_size = ARRAY_SIZE(ports_m1);
544 break;
545 case smsc47m2:
546 ports = ports_m2;
547 ports_size = ARRAY_SIZE(ports_m2);
548 break;
549 }
550
551 for (i = 0; i + 1 < ports_size; i += 2) {
552 unsigned short start = address + ports[i];
553 unsigned short len = ports[i + 1];
554
555 switch (action) {
556 case CHECK:
557 /* Only check for conflicts */
558 err = acpi_check_region(start, len, DRVNAME);
559 if (err)
560 return err;
561 break;
562 case REQUEST:
563 /* Request the resources */
564 if (!request_region(start, len, DRVNAME)) {
565 dev_err(dev, "Region 0x%hx-0x%hx already in "
566 "use!\n", start, start + len);
567
568 /* Undo all requests */
569 for (i -= 2; i >= 0; i -= 2)
570 release_region(address + ports[i],
571 ports[i + 1]);
572 return -EBUSY;
573 }
574 break;
575 case RELEASE:
576 /* Release the resources */
577 release_region(start, len);
578 break;
579 }
580 }
581
582 return 0;
583}
584
585static int __init smsc47m1_probe(struct platform_device *pdev)
485{ 586{
486 struct device *dev = &pdev->dev; 587 struct device *dev = &pdev->dev;
487 struct smsc47m1_sio_data *sio_data = dev->platform_data; 588 struct smsc47m1_sio_data *sio_data = dev->platform_data;
488 struct smsc47m1_data *data; 589 struct smsc47m1_data *data;
489 struct resource *res; 590 struct resource *res;
490 int err = 0; 591 int err;
491 int fan1, fan2, fan3, pwm1, pwm2, pwm3; 592 int fan1, fan2, fan3, pwm1, pwm2, pwm3;
492 593
493 static const char *names[] = { 594 static const char *names[] = {
@@ -496,12 +597,10 @@ static int __devinit smsc47m1_probe(struct platform_device *pdev)
496 }; 597 };
497 598
498 res = platform_get_resource(pdev, IORESOURCE_IO, 0); 599 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
499 if (!request_region(res->start, SMSC_EXTENT, DRVNAME)) { 600 err = smsc47m1_handle_resources(res->start, sio_data->type,
500 dev_err(dev, "Region 0x%lx-0x%lx already in use!\n", 601 REQUEST, dev);
501 (unsigned long)res->start, 602 if (err < 0)
502 (unsigned long)res->end); 603 return err;
503 return -EBUSY;
504 }
505 604
506 if (!(data = kzalloc(sizeof(struct smsc47m1_data), GFP_KERNEL))) { 605 if (!(data = kzalloc(sizeof(struct smsc47m1_data), GFP_KERNEL))) {
507 err = -ENOMEM; 606 err = -ENOMEM;
@@ -637,11 +736,11 @@ error_free:
637 platform_set_drvdata(pdev, NULL); 736 platform_set_drvdata(pdev, NULL);
638 kfree(data); 737 kfree(data);
639error_release: 738error_release:
640 release_region(res->start, SMSC_EXTENT); 739 smsc47m1_handle_resources(res->start, sio_data->type, RELEASE, dev);
641 return err; 740 return err;
642} 741}
643 742
644static int __devexit smsc47m1_remove(struct platform_device *pdev) 743static int __exit smsc47m1_remove(struct platform_device *pdev)
645{ 744{
646 struct smsc47m1_data *data = platform_get_drvdata(pdev); 745 struct smsc47m1_data *data = platform_get_drvdata(pdev);
647 struct resource *res; 746 struct resource *res;
@@ -650,7 +749,7 @@ static int __devexit smsc47m1_remove(struct platform_device *pdev)
650 sysfs_remove_group(&pdev->dev.kobj, &smsc47m1_group); 749 sysfs_remove_group(&pdev->dev.kobj, &smsc47m1_group);
651 750
652 res = platform_get_resource(pdev, IORESOURCE_IO, 0); 751 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
653 release_region(res->start, SMSC_EXTENT); 752 smsc47m1_handle_resources(res->start, data->type, RELEASE, &pdev->dev);
654 platform_set_drvdata(pdev, NULL); 753 platform_set_drvdata(pdev, NULL);
655 kfree(data); 754 kfree(data);
656 755
@@ -717,7 +816,7 @@ static int __init smsc47m1_device_add(unsigned short address,
717 }; 816 };
718 int err; 817 int err;
719 818
720 err = acpi_check_resource_conflict(&res); 819 err = smsc47m1_handle_resources(address, sio_data->type, CHECK, NULL);
721 if (err) 820 if (err)
722 goto exit; 821 goto exit;
723 822
@@ -766,27 +865,29 @@ static int __init sm_smsc47m1_init(void)
766 if (smsc47m1_find(&address, &sio_data)) 865 if (smsc47m1_find(&address, &sio_data))
767 return -ENODEV; 866 return -ENODEV;
768 867
769 err = platform_driver_register(&smsc47m1_driver); 868 /* Sets global pdev as a side effect */
869 err = smsc47m1_device_add(address, &sio_data);
770 if (err) 870 if (err)
771 goto exit; 871 goto exit;
772 872
773 /* Sets global pdev as a side effect */ 873 err = platform_driver_probe(&smsc47m1_driver, smsc47m1_probe);
774 err = smsc47m1_device_add(address, &sio_data);
775 if (err) 874 if (err)
776 goto exit_driver; 875 goto exit_device;
777 876
778 return 0; 877 return 0;
779 878
780exit_driver: 879exit_device:
781 platform_driver_unregister(&smsc47m1_driver); 880 platform_device_unregister(pdev);
881 smsc47m1_restore(&sio_data);
782exit: 882exit:
783 return err; 883 return err;
784} 884}
785 885
786static void __exit sm_smsc47m1_exit(void) 886static void __exit sm_smsc47m1_exit(void)
787{ 887{
788 platform_device_unregister(pdev);
789 platform_driver_unregister(&smsc47m1_driver); 888 platform_driver_unregister(&smsc47m1_driver);
889 smsc47m1_restore(pdev->dev.platform_data);
890 platform_device_unregister(pdev);
790} 891}
791 892
792MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>"); 893MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>");
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
new file mode 100644
index 000000000000..7442cf754856
--- /dev/null
+++ b/drivers/hwmon/via-cputemp.c
@@ -0,0 +1,356 @@
1/*
2 * via-cputemp.c - Driver for VIA CPU core temperature monitoring
3 * Copyright (C) 2009 VIA Technologies, Inc.
4 *
5 * based on existing coretemp.c, which is
6 *
7 * Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301 USA.
22 */
23
24#include <linux/module.h>
25#include <linux/delay.h>
26#include <linux/init.h>
27#include <linux/slab.h>
28#include <linux/jiffies.h>
29#include <linux/hwmon.h>
30#include <linux/sysfs.h>
31#include <linux/hwmon-sysfs.h>
32#include <linux/err.h>
33#include <linux/mutex.h>
34#include <linux/list.h>
35#include <linux/platform_device.h>
36#include <linux/cpu.h>
37#include <asm/msr.h>
38#include <asm/processor.h>
39
40#define DRVNAME "via_cputemp"
41
42enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME } SHOW;
43
44/*
45 * Functions declaration
46 */
47
48struct via_cputemp_data {
49 struct device *hwmon_dev;
50 const char *name;
51 u32 id;
52 u32 msr;
53};
54
55/*
56 * Sysfs stuff
57 */
58
59static ssize_t show_name(struct device *dev, struct device_attribute
60 *devattr, char *buf)
61{
62 int ret;
63 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
64 struct via_cputemp_data *data = dev_get_drvdata(dev);
65
66 if (attr->index == SHOW_NAME)
67 ret = sprintf(buf, "%s\n", data->name);
68 else /* show label */
69 ret = sprintf(buf, "Core %d\n", data->id);
70 return ret;
71}
72
73static ssize_t show_temp(struct device *dev,
74 struct device_attribute *devattr, char *buf)
75{
76 struct via_cputemp_data *data = dev_get_drvdata(dev);
77 u32 eax, edx;
78 int err;
79
80 err = rdmsr_safe_on_cpu(data->id, data->msr, &eax, &edx);
81 if (err)
82 return -EAGAIN;
83
84 return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xffffff) * 1000);
85}
86
87static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL,
88 SHOW_TEMP);
89static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_name, NULL, SHOW_LABEL);
90static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, SHOW_NAME);
91
92static struct attribute *via_cputemp_attributes[] = {
93 &sensor_dev_attr_name.dev_attr.attr,
94 &sensor_dev_attr_temp1_label.dev_attr.attr,
95 &sensor_dev_attr_temp1_input.dev_attr.attr,
96 NULL
97};
98
99static const struct attribute_group via_cputemp_group = {
100 .attrs = via_cputemp_attributes,
101};
102
103static int __devinit via_cputemp_probe(struct platform_device *pdev)
104{
105 struct via_cputemp_data *data;
106 struct cpuinfo_x86 *c = &cpu_data(pdev->id);
107 int err;
108 u32 eax, edx;
109
110 data = kzalloc(sizeof(struct via_cputemp_data), GFP_KERNEL);
111 if (!data) {
112 err = -ENOMEM;
113 dev_err(&pdev->dev, "Out of memory\n");
114 goto exit;
115 }
116
117 data->id = pdev->id;
118 data->name = "via_cputemp";
119
120 switch (c->x86_model) {
121 case 0xA:
122 /* C7 A */
123 case 0xD:
124 /* C7 D */
125 data->msr = 0x1169;
126 break;
127 case 0xF:
128 /* Nano */
129 data->msr = 0x1423;
130 break;
131 default:
132 err = -ENODEV;
133 goto exit_free;
134 }
135
136 /* test if we can access the TEMPERATURE MSR */
137 err = rdmsr_safe_on_cpu(data->id, data->msr, &eax, &edx);
138 if (err) {
139 dev_err(&pdev->dev,
140 "Unable to access TEMPERATURE MSR, giving up\n");
141 goto exit_free;
142 }
143
144 platform_set_drvdata(pdev, data);
145
146 err = sysfs_create_group(&pdev->dev.kobj, &via_cputemp_group);
147 if (err)
148 goto exit_free;
149
150 data->hwmon_dev = hwmon_device_register(&pdev->dev);
151 if (IS_ERR(data->hwmon_dev)) {
152 err = PTR_ERR(data->hwmon_dev);
153 dev_err(&pdev->dev, "Class registration failed (%d)\n",
154 err);
155 goto exit_remove;
156 }
157
158 return 0;
159
160exit_remove:
161 sysfs_remove_group(&pdev->dev.kobj, &via_cputemp_group);
162exit_free:
163 platform_set_drvdata(pdev, NULL);
164 kfree(data);
165exit:
166 return err;
167}
168
169static int __devexit via_cputemp_remove(struct platform_device *pdev)
170{
171 struct via_cputemp_data *data = platform_get_drvdata(pdev);
172
173 hwmon_device_unregister(data->hwmon_dev);
174 sysfs_remove_group(&pdev->dev.kobj, &via_cputemp_group);
175 platform_set_drvdata(pdev, NULL);
176 kfree(data);
177 return 0;
178}
179
180static struct platform_driver via_cputemp_driver = {
181 .driver = {
182 .owner = THIS_MODULE,
183 .name = DRVNAME,
184 },
185 .probe = via_cputemp_probe,
186 .remove = __devexit_p(via_cputemp_remove),
187};
188
189struct pdev_entry {
190 struct list_head list;
191 struct platform_device *pdev;
192 unsigned int cpu;
193};
194
195static LIST_HEAD(pdev_list);
196static DEFINE_MUTEX(pdev_list_mutex);
197
198static int __cpuinit via_cputemp_device_add(unsigned int cpu)
199{
200 int err;
201 struct platform_device *pdev;
202 struct pdev_entry *pdev_entry;
203
204 pdev = platform_device_alloc(DRVNAME, cpu);
205 if (!pdev) {
206 err = -ENOMEM;
207 printk(KERN_ERR DRVNAME ": Device allocation failed\n");
208 goto exit;
209 }
210
211 pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL);
212 if (!pdev_entry) {
213 err = -ENOMEM;
214 goto exit_device_put;
215 }
216
217 err = platform_device_add(pdev);
218 if (err) {
219 printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
220 err);
221 goto exit_device_free;
222 }
223
224 pdev_entry->pdev = pdev;
225 pdev_entry->cpu = cpu;
226 mutex_lock(&pdev_list_mutex);
227 list_add_tail(&pdev_entry->list, &pdev_list);
228 mutex_unlock(&pdev_list_mutex);
229
230 return 0;
231
232exit_device_free:
233 kfree(pdev_entry);
234exit_device_put:
235 platform_device_put(pdev);
236exit:
237 return err;
238}
239
240#ifdef CONFIG_HOTPLUG_CPU
241static void via_cputemp_device_remove(unsigned int cpu)
242{
243 struct pdev_entry *p, *n;
244 mutex_lock(&pdev_list_mutex);
245 list_for_each_entry_safe(p, n, &pdev_list, list) {
246 if (p->cpu == cpu) {
247 platform_device_unregister(p->pdev);
248 list_del(&p->list);
249 kfree(p);
250 }
251 }
252 mutex_unlock(&pdev_list_mutex);
253}
254
255static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
256 unsigned long action, void *hcpu)
257{
258 unsigned int cpu = (unsigned long) hcpu;
259
260 switch (action) {
261 case CPU_ONLINE:
262 case CPU_DOWN_FAILED:
263 via_cputemp_device_add(cpu);
264 break;
265 case CPU_DOWN_PREPARE:
266 via_cputemp_device_remove(cpu);
267 break;
268 }
269 return NOTIFY_OK;
270}
271
272static struct notifier_block via_cputemp_cpu_notifier __refdata = {
273 .notifier_call = via_cputemp_cpu_callback,
274};
275#endif /* !CONFIG_HOTPLUG_CPU */
276
277static int __init via_cputemp_init(void)
278{
279 int i, err;
280 struct pdev_entry *p, *n;
281
282 if (cpu_data(0).x86_vendor != X86_VENDOR_CENTAUR) {
283 printk(KERN_DEBUG DRVNAME ": Not a VIA CPU\n");
284 err = -ENODEV;
285 goto exit;
286 }
287
288 err = platform_driver_register(&via_cputemp_driver);
289 if (err)
290 goto exit;
291
292 for_each_online_cpu(i) {
293 struct cpuinfo_x86 *c = &cpu_data(i);
294
295 if (c->x86 != 6)
296 continue;
297
298 if (c->x86_model < 0x0a)
299 continue;
300
301 if (c->x86_model > 0x0f) {
302 printk(KERN_WARNING DRVNAME ": Unknown CPU "
303 "model 0x%x\n", c->x86_model);
304 continue;
305 }
306
307 err = via_cputemp_device_add(i);
308 if (err)
309 goto exit_devices_unreg;
310 }
311 if (list_empty(&pdev_list)) {
312 err = -ENODEV;
313 goto exit_driver_unreg;
314 }
315
316#ifdef CONFIG_HOTPLUG_CPU
317 register_hotcpu_notifier(&via_cputemp_cpu_notifier);
318#endif
319 return 0;
320
321exit_devices_unreg:
322 mutex_lock(&pdev_list_mutex);
323 list_for_each_entry_safe(p, n, &pdev_list, list) {
324 platform_device_unregister(p->pdev);
325 list_del(&p->list);
326 kfree(p);
327 }
328 mutex_unlock(&pdev_list_mutex);
329exit_driver_unreg:
330 platform_driver_unregister(&via_cputemp_driver);
331exit:
332 return err;
333}
334
335static void __exit via_cputemp_exit(void)
336{
337 struct pdev_entry *p, *n;
338#ifdef CONFIG_HOTPLUG_CPU
339 unregister_hotcpu_notifier(&via_cputemp_cpu_notifier);
340#endif
341 mutex_lock(&pdev_list_mutex);
342 list_for_each_entry_safe(p, n, &pdev_list, list) {
343 platform_device_unregister(p->pdev);
344 list_del(&p->list);
345 kfree(p);
346 }
347 mutex_unlock(&pdev_list_mutex);
348 platform_driver_unregister(&via_cputemp_driver);
349}
350
351MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>");
352MODULE_DESCRIPTION("VIA CPU temperature monitor");
353MODULE_LICENSE("GPL");
354
355module_init(via_cputemp_init)
356module_exit(via_cputemp_exit)
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index b257c7223733..38e280523071 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -1135,6 +1135,7 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr,
1135 "W83687THF", 1135 "W83687THF",
1136 }; 1136 };
1137 1137
1138 sio_data->sioaddr = sioaddr;
1138 superio_enter(sio_data); 1139 superio_enter(sio_data);
1139 val = force_id ? force_id : superio_inb(sio_data, DEVID); 1140 val = force_id ? force_id : superio_inb(sio_data, DEVID);
1140 switch (val) { 1141 switch (val) {
@@ -1177,7 +1178,6 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr,
1177 } 1178 }
1178 1179
1179 err = 0; 1180 err = 0;
1180 sio_data->sioaddr = sioaddr;
1181 pr_info(DRVNAME ": Found %s chip at %#x\n", 1181 pr_info(DRVNAME ": Found %s chip at %#x\n",
1182 names[sio_data->type], *addr); 1182 names[sio_data->type], *addr);
1183 1183
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
index 1f20a042a4f5..dd253002cd50 100644
--- a/drivers/idle/i7300_idle.c
+++ b/drivers/idle/i7300_idle.c
@@ -81,7 +81,7 @@ static u8 i7300_idle_thrtctl_saved;
81static u8 i7300_idle_thrtlow_saved; 81static u8 i7300_idle_thrtlow_saved;
82static u32 i7300_idle_mc_saved; 82static u32 i7300_idle_mc_saved;
83 83
84static cpumask_t idle_cpumask; 84static cpumask_var_t idle_cpumask;
85static ktime_t start_ktime; 85static ktime_t start_ktime;
86static unsigned long avg_idle_us; 86static unsigned long avg_idle_us;
87 87
@@ -459,9 +459,9 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
459 spin_lock_irqsave(&i7300_idle_lock, flags); 459 spin_lock_irqsave(&i7300_idle_lock, flags);
460 if (val == IDLE_START) { 460 if (val == IDLE_START) {
461 461
462 cpu_set(smp_processor_id(), idle_cpumask); 462 cpumask_set_cpu(smp_processor_id(), idle_cpumask);
463 463
464 if (cpus_weight(idle_cpumask) != num_online_cpus()) 464 if (cpumask_weight(idle_cpumask) != num_online_cpus())
465 goto end; 465 goto end;
466 466
467 now_ktime = ktime_get(); 467 now_ktime = ktime_get();
@@ -478,8 +478,8 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
478 i7300_idle_ioat_start(); 478 i7300_idle_ioat_start();
479 479
480 } else if (val == IDLE_END) { 480 } else if (val == IDLE_END) {
481 cpu_clear(smp_processor_id(), idle_cpumask); 481 cpumask_clear_cpu(smp_processor_id(), idle_cpumask);
482 if (cpus_weight(idle_cpumask) == (num_online_cpus() - 1)) { 482 if (cpumask_weight(idle_cpumask) == (num_online_cpus() - 1)) {
483 /* First CPU coming out of idle */ 483 /* First CPU coming out of idle */
484 u64 idle_duration_us; 484 u64 idle_duration_us;
485 485
@@ -553,7 +553,6 @@ struct debugfs_file_info {
553static int __init i7300_idle_init(void) 553static int __init i7300_idle_init(void)
554{ 554{
555 spin_lock_init(&i7300_idle_lock); 555 spin_lock_init(&i7300_idle_lock);
556 cpus_clear(idle_cpumask);
557 total_us = 0; 556 total_us = 0;
558 557
559 if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload)) 558 if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
@@ -565,6 +564,9 @@ static int __init i7300_idle_init(void)
565 if (i7300_idle_ioat_init()) 564 if (i7300_idle_ioat_init())
566 return -ENODEV; 565 return -ENODEV;
567 566
567 if (!zalloc_cpumask_var(&idle_cpumask, GFP_KERNEL))
568 return -ENOMEM;
569
568 debugfs_dir = debugfs_create_dir("i7300_idle", NULL); 570 debugfs_dir = debugfs_create_dir("i7300_idle", NULL);
569 if (debugfs_dir) { 571 if (debugfs_dir) {
570 int i = 0; 572 int i = 0;
@@ -589,6 +591,7 @@ static int __init i7300_idle_init(void)
589static void __exit i7300_idle_exit(void) 591static void __exit i7300_idle_exit(void)
590{ 592{
591 idle_notifier_unregister(&i7300_idle_nb); 593 idle_notifier_unregister(&i7300_idle_nb);
594 free_cpumask_var(idle_cpumask);
592 595
593 if (debugfs_dir) { 596 if (debugfs_dir) {
594 int i = 0; 597 int i = 0;
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index bfd03bf8be54..f3d440cc68f2 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -34,6 +34,7 @@
34 34
35#include <linux/list.h> 35#include <linux/list.h>
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37#include <linux/kfifo.h>
37 38
38#include "t3_cpl.h" 39#include "t3_cpl.h"
39#include "t3cdev.h" 40#include "t3cdev.h"
@@ -75,13 +76,13 @@ struct cxio_hal_ctrl_qp {
75}; 76};
76 77
77struct cxio_hal_resource { 78struct cxio_hal_resource {
78 struct kfifo *tpt_fifo; 79 struct kfifo tpt_fifo;
79 spinlock_t tpt_fifo_lock; 80 spinlock_t tpt_fifo_lock;
80 struct kfifo *qpid_fifo; 81 struct kfifo qpid_fifo;
81 spinlock_t qpid_fifo_lock; 82 spinlock_t qpid_fifo_lock;
82 struct kfifo *cqid_fifo; 83 struct kfifo cqid_fifo;
83 spinlock_t cqid_fifo_lock; 84 spinlock_t cqid_fifo_lock;
84 struct kfifo *pdid_fifo; 85 struct kfifo pdid_fifo;
85 spinlock_t pdid_fifo_lock; 86 spinlock_t pdid_fifo_lock;
86}; 87};
87 88
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
index bd233c087653..31f9201b2980 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_resource.c
@@ -39,12 +39,12 @@
39#include "cxio_resource.h" 39#include "cxio_resource.h"
40#include "cxio_hal.h" 40#include "cxio_hal.h"
41 41
42static struct kfifo *rhdl_fifo; 42static struct kfifo rhdl_fifo;
43static spinlock_t rhdl_fifo_lock; 43static spinlock_t rhdl_fifo_lock;
44 44
45#define RANDOM_SIZE 16 45#define RANDOM_SIZE 16
46 46
47static int __cxio_init_resource_fifo(struct kfifo **fifo, 47static int __cxio_init_resource_fifo(struct kfifo *fifo,
48 spinlock_t *fifo_lock, 48 spinlock_t *fifo_lock,
49 u32 nr, u32 skip_low, 49 u32 nr, u32 skip_low,
50 u32 skip_high, 50 u32 skip_high,
@@ -55,12 +55,11 @@ static int __cxio_init_resource_fifo(struct kfifo **fifo,
55 u32 rarray[16]; 55 u32 rarray[16];
56 spin_lock_init(fifo_lock); 56 spin_lock_init(fifo_lock);
57 57
58 *fifo = kfifo_alloc(nr * sizeof(u32), GFP_KERNEL, fifo_lock); 58 if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
59 if (IS_ERR(*fifo))
60 return -ENOMEM; 59 return -ENOMEM;
61 60
62 for (i = 0; i < skip_low + skip_high; i++) 61 for (i = 0; i < skip_low + skip_high; i++)
63 __kfifo_put(*fifo, (unsigned char *) &entry, sizeof(u32)); 62 kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
64 if (random) { 63 if (random) {
65 j = 0; 64 j = 0;
66 random_bytes = random32(); 65 random_bytes = random32();
@@ -72,33 +71,35 @@ static int __cxio_init_resource_fifo(struct kfifo **fifo,
72 random_bytes = random32(); 71 random_bytes = random32();
73 } 72 }
74 idx = (random_bytes >> (j * 2)) & 0xF; 73 idx = (random_bytes >> (j * 2)) & 0xF;
75 __kfifo_put(*fifo, 74 kfifo_in(fifo,
76 (unsigned char *) &rarray[idx], 75 (unsigned char *) &rarray[idx],
77 sizeof(u32)); 76 sizeof(u32));
78 rarray[idx] = i; 77 rarray[idx] = i;
79 j++; 78 j++;
80 } 79 }
81 for (i = 0; i < RANDOM_SIZE; i++) 80 for (i = 0; i < RANDOM_SIZE; i++)
82 __kfifo_put(*fifo, 81 kfifo_in(fifo,
83 (unsigned char *) &rarray[i], 82 (unsigned char *) &rarray[i],
84 sizeof(u32)); 83 sizeof(u32));
85 } else 84 } else
86 for (i = skip_low; i < nr - skip_high; i++) 85 for (i = skip_low; i < nr - skip_high; i++)
87 __kfifo_put(*fifo, (unsigned char *) &i, sizeof(u32)); 86 kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
88 87
89 for (i = 0; i < skip_low + skip_high; i++) 88 for (i = 0; i < skip_low + skip_high; i++)
90 kfifo_get(*fifo, (unsigned char *) &entry, sizeof(u32)); 89 if (kfifo_out_locked(fifo, (unsigned char *) &entry,
90 sizeof(u32), fifo_lock) != sizeof(u32))
91 break;
91 return 0; 92 return 0;
92} 93}
93 94
94static int cxio_init_resource_fifo(struct kfifo **fifo, spinlock_t * fifo_lock, 95static int cxio_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
95 u32 nr, u32 skip_low, u32 skip_high) 96 u32 nr, u32 skip_low, u32 skip_high)
96{ 97{
97 return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low, 98 return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
98 skip_high, 0)); 99 skip_high, 0));
99} 100}
100 101
101static int cxio_init_resource_fifo_random(struct kfifo **fifo, 102static int cxio_init_resource_fifo_random(struct kfifo *fifo,
102 spinlock_t * fifo_lock, 103 spinlock_t * fifo_lock,
103 u32 nr, u32 skip_low, u32 skip_high) 104 u32 nr, u32 skip_low, u32 skip_high)
104{ 105{
@@ -113,15 +114,13 @@ static int cxio_init_qpid_fifo(struct cxio_rdev *rdev_p)
113 114
114 spin_lock_init(&rdev_p->rscp->qpid_fifo_lock); 115 spin_lock_init(&rdev_p->rscp->qpid_fifo_lock);
115 116
116 rdev_p->rscp->qpid_fifo = kfifo_alloc(T3_MAX_NUM_QP * sizeof(u32), 117 if (kfifo_alloc(&rdev_p->rscp->qpid_fifo, T3_MAX_NUM_QP * sizeof(u32),
117 GFP_KERNEL, 118 GFP_KERNEL))
118 &rdev_p->rscp->qpid_fifo_lock);
119 if (IS_ERR(rdev_p->rscp->qpid_fifo))
120 return -ENOMEM; 119 return -ENOMEM;
121 120
122 for (i = 16; i < T3_MAX_NUM_QP; i++) 121 for (i = 16; i < T3_MAX_NUM_QP; i++)
123 if (!(i & rdev_p->qpmask)) 122 if (!(i & rdev_p->qpmask))
124 __kfifo_put(rdev_p->rscp->qpid_fifo, 123 kfifo_in(&rdev_p->rscp->qpid_fifo,
125 (unsigned char *) &i, sizeof(u32)); 124 (unsigned char *) &i, sizeof(u32));
126 return 0; 125 return 0;
127} 126}
@@ -134,7 +133,7 @@ int cxio_hal_init_rhdl_resource(u32 nr_rhdl)
134 133
135void cxio_hal_destroy_rhdl_resource(void) 134void cxio_hal_destroy_rhdl_resource(void)
136{ 135{
137 kfifo_free(rhdl_fifo); 136 kfifo_free(&rhdl_fifo);
138} 137}
139 138
140/* nr_* must be power of 2 */ 139/* nr_* must be power of 2 */
@@ -167,11 +166,11 @@ int cxio_hal_init_resource(struct cxio_rdev *rdev_p,
167 goto pdid_err; 166 goto pdid_err;
168 return 0; 167 return 0;
169pdid_err: 168pdid_err:
170 kfifo_free(rscp->cqid_fifo); 169 kfifo_free(&rscp->cqid_fifo);
171cqid_err: 170cqid_err:
172 kfifo_free(rscp->qpid_fifo); 171 kfifo_free(&rscp->qpid_fifo);
173qpid_err: 172qpid_err:
174 kfifo_free(rscp->tpt_fifo); 173 kfifo_free(&rscp->tpt_fifo);
175tpt_err: 174tpt_err:
176 return -ENOMEM; 175 return -ENOMEM;
177} 176}
@@ -179,33 +178,37 @@ tpt_err:
179/* 178/*
180 * returns 0 if no resource available 179 * returns 0 if no resource available
181 */ 180 */
182static u32 cxio_hal_get_resource(struct kfifo *fifo) 181static u32 cxio_hal_get_resource(struct kfifo *fifo, spinlock_t * lock)
183{ 182{
184 u32 entry; 183 u32 entry;
185 if (kfifo_get(fifo, (unsigned char *) &entry, sizeof(u32))) 184 if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
186 return entry; 185 return entry;
187 else 186 else
188 return 0; /* fifo emptry */ 187 return 0; /* fifo emptry */
189} 188}
190 189
191static void cxio_hal_put_resource(struct kfifo *fifo, u32 entry) 190static void cxio_hal_put_resource(struct kfifo *fifo, spinlock_t * lock,
191 u32 entry)
192{ 192{
193 BUG_ON(kfifo_put(fifo, (unsigned char *) &entry, sizeof(u32)) == 0); 193 BUG_ON(
194 kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock)
195 == 0);
194} 196}
195 197
196u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp) 198u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp)
197{ 199{
198 return cxio_hal_get_resource(rscp->tpt_fifo); 200 return cxio_hal_get_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock);
199} 201}
200 202
201void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag) 203void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag)
202{ 204{
203 cxio_hal_put_resource(rscp->tpt_fifo, stag); 205 cxio_hal_put_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock, stag);
204} 206}
205 207
206u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp) 208u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp)
207{ 209{
208 u32 qpid = cxio_hal_get_resource(rscp->qpid_fifo); 210 u32 qpid = cxio_hal_get_resource(&rscp->qpid_fifo,
211 &rscp->qpid_fifo_lock);
209 PDBG("%s qpid 0x%x\n", __func__, qpid); 212 PDBG("%s qpid 0x%x\n", __func__, qpid);
210 return qpid; 213 return qpid;
211} 214}
@@ -213,35 +216,35 @@ u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp)
213void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid) 216void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid)
214{ 217{
215 PDBG("%s qpid 0x%x\n", __func__, qpid); 218 PDBG("%s qpid 0x%x\n", __func__, qpid);
216 cxio_hal_put_resource(rscp->qpid_fifo, qpid); 219 cxio_hal_put_resource(&rscp->qpid_fifo, &rscp->qpid_fifo_lock, qpid);
217} 220}
218 221
219u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp) 222u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp)
220{ 223{
221 return cxio_hal_get_resource(rscp->cqid_fifo); 224 return cxio_hal_get_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock);
222} 225}
223 226
224void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid) 227void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid)
225{ 228{
226 cxio_hal_put_resource(rscp->cqid_fifo, cqid); 229 cxio_hal_put_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock, cqid);
227} 230}
228 231
229u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp) 232u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp)
230{ 233{
231 return cxio_hal_get_resource(rscp->pdid_fifo); 234 return cxio_hal_get_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock);
232} 235}
233 236
234void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid) 237void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid)
235{ 238{
236 cxio_hal_put_resource(rscp->pdid_fifo, pdid); 239 cxio_hal_put_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock, pdid);
237} 240}
238 241
239void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp) 242void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
240{ 243{
241 kfifo_free(rscp->tpt_fifo); 244 kfifo_free(&rscp->tpt_fifo);
242 kfifo_free(rscp->cqid_fifo); 245 kfifo_free(&rscp->cqid_fifo);
243 kfifo_free(rscp->qpid_fifo); 246 kfifo_free(&rscp->qpid_fifo);
244 kfifo_free(rscp->pdid_fifo); 247 kfifo_free(&rscp->pdid_fifo);
245 kfree(rscp); 248 kfree(rscp);
246} 249}
247 250
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 7e5f30dbc0a0..f1e8af54dff0 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -661,7 +661,7 @@ l1oip_socket_thread(void *data)
661 size_t recvbuf_size = 1500; 661 size_t recvbuf_size = 1500;
662 int recvlen; 662 int recvlen;
663 struct socket *socket = NULL; 663 struct socket *socket = NULL;
664 DECLARE_COMPLETION(wait); 664 DECLARE_COMPLETION_ONSTACK(wait);
665 665
666 /* allocate buffer memory */ 666 /* allocate buffer memory */
667 recvbuf = kmalloc(recvbuf_size, GFP_KERNEL); 667 recvbuf = kmalloc(recvbuf_size, GFP_KERNEL);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index e4f599f20e38..8a0e1ec95e4a 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -229,6 +229,12 @@ config LEDS_PWM
229 help 229 help
230 This option enables support for pwm driven LEDs 230 This option enables support for pwm driven LEDs
231 231
232config LEDS_REGULATOR
233 tristate "REGULATOR driven LED support"
234 depends on LEDS_CLASS && REGULATOR
235 help
236 This option enables support for regulator driven LEDs.
237
232config LEDS_BD2802 238config LEDS_BD2802
233 tristate "LED driver for BD2802 RGB LED" 239 tristate "LED driver for BD2802 RGB LED"
234 depends on LEDS_CLASS && I2C 240 depends on LEDS_CLASS && I2C
@@ -236,6 +242,33 @@ config LEDS_BD2802
236 This option enables support for BD2802GU RGB LED driver chips 242 This option enables support for BD2802GU RGB LED driver chips
237 accessed via the I2C bus. 243 accessed via the I2C bus.
238 244
245config LEDS_INTEL_SS4200
246 tristate "LED driver for Intel NAS SS4200 series"
247 depends on LEDS_CLASS && PCI && DMI
248 help
249 This option enables support for the Intel SS4200 series of
250 Network Attached Storage servers. You may control the hard
251 drive or power LEDs on the front panel. Using this driver
252 can stop the front LED from blinking after startup.
253
254config LEDS_LT3593
255 tristate "LED driver for LT3593 controllers"
256 depends on LEDS_CLASS && GENERIC_GPIO
257 help
258 This option enables support for LEDs driven by a Linear Technology
259 LT3593 controller. This controller uses a special one-wire pulse
260 coding protocol to set the brightness.
261
262config LEDS_ADP5520
263 tristate "LED Support for ADP5520/ADP5501 PMIC"
264 depends on LEDS_CLASS && PMIC_ADP5520
265 help
266 This option enables support for on-chip LED drivers found
267 on Analog Devices ADP5520/ADP5501 PMICs.
268
269 To compile this driver as a module, choose M here: the module will
270 be called leds-adp5520.
271
239comment "LED Triggers" 272comment "LED Triggers"
240 273
241config LEDS_TRIGGERS 274config LEDS_TRIGGERS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 46d72704d606..9e63869d7c0d 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -29,6 +29,10 @@ obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
29obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o 29obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
30obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o 30obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
31obj-$(CONFIG_LEDS_PWM) += leds-pwm.o 31obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
32obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o
33obj-$(CONFIG_LEDS_INTEL_SS4200) += leds-ss4200.o
34obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o
35obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o
32 36
33# LED SPI Drivers 37# LED SPI Drivers
34obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o 38obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/leds-adp5520.c b/drivers/leds/leds-adp5520.c
new file mode 100644
index 000000000000..a8f315902131
--- /dev/null
+++ b/drivers/leds/leds-adp5520.c
@@ -0,0 +1,230 @@
1/*
2 * LEDs driver for Analog Devices ADP5520/ADP5501 MFD PMICs
3 *
4 * Copyright 2009 Analog Devices Inc.
5 *
6 * Loosely derived from leds-da903x:
7 * Copyright (C) 2008 Compulab, Ltd.
8 * Mike Rapoport <mike@compulab.co.il>
9 *
10 * Copyright (C) 2006-2008 Marvell International Ltd.
11 * Eric Miao <eric.miao@marvell.com>
12 *
13 * Licensed under the GPL-2 or later.
14 */
15
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/platform_device.h>
20#include <linux/leds.h>
21#include <linux/workqueue.h>
22#include <linux/mfd/adp5520.h>
23
24struct adp5520_led {
25 struct led_classdev cdev;
26 struct work_struct work;
27 struct device *master;
28 enum led_brightness new_brightness;
29 int id;
30 int flags;
31};
32
33static void adp5520_led_work(struct work_struct *work)
34{
35 struct adp5520_led *led = container_of(work, struct adp5520_led, work);
36 adp5520_write(led->master, ADP5520_LED1_CURRENT + led->id - 1,
37 led->new_brightness >> 2);
38}
39
40static void adp5520_led_set(struct led_classdev *led_cdev,
41 enum led_brightness value)
42{
43 struct adp5520_led *led;
44
45 led = container_of(led_cdev, struct adp5520_led, cdev);
46 led->new_brightness = value;
47 schedule_work(&led->work);
48}
49
50static int adp5520_led_setup(struct adp5520_led *led)
51{
52 struct device *dev = led->master;
53 int flags = led->flags;
54 int ret = 0;
55
56 switch (led->id) {
57 case FLAG_ID_ADP5520_LED1_ADP5501_LED0:
58 ret |= adp5520_set_bits(dev, ADP5520_LED_TIME,
59 (flags >> ADP5520_FLAG_OFFT_SHIFT) &
60 ADP5520_FLAG_OFFT_MASK);
61 ret |= adp5520_set_bits(dev, ADP5520_LED_CONTROL,
62 ADP5520_LED1_EN);
63 break;
64 case FLAG_ID_ADP5520_LED2_ADP5501_LED1:
65 ret |= adp5520_set_bits(dev, ADP5520_LED_TIME,
66 ((flags >> ADP5520_FLAG_OFFT_SHIFT) &
67 ADP5520_FLAG_OFFT_MASK) << 2);
68 ret |= adp5520_clr_bits(dev, ADP5520_LED_CONTROL,
69 ADP5520_R3_MODE);
70 ret |= adp5520_set_bits(dev, ADP5520_LED_CONTROL,
71 ADP5520_LED2_EN);
72 break;
73 case FLAG_ID_ADP5520_LED3_ADP5501_LED2:
74 ret |= adp5520_set_bits(dev, ADP5520_LED_TIME,
75 ((flags >> ADP5520_FLAG_OFFT_SHIFT) &
76 ADP5520_FLAG_OFFT_MASK) << 4);
77 ret |= adp5520_clr_bits(dev, ADP5520_LED_CONTROL,
78 ADP5520_C3_MODE);
79 ret |= adp5520_set_bits(dev, ADP5520_LED_CONTROL,
80 ADP5520_LED3_EN);
81 break;
82 }
83
84 return ret;
85}
86
87static int __devinit adp5520_led_prepare(struct platform_device *pdev)
88{
89 struct adp5520_leds_platform_data *pdata = pdev->dev.platform_data;
90 struct device *dev = pdev->dev.parent;
91 int ret = 0;
92
93 ret |= adp5520_write(dev, ADP5520_LED1_CURRENT, 0);
94 ret |= adp5520_write(dev, ADP5520_LED2_CURRENT, 0);
95 ret |= adp5520_write(dev, ADP5520_LED3_CURRENT, 0);
96 ret |= adp5520_write(dev, ADP5520_LED_TIME, pdata->led_on_time << 6);
97 ret |= adp5520_write(dev, ADP5520_LED_FADE, FADE_VAL(pdata->fade_in,
98 pdata->fade_out));
99
100 return ret;
101}
102
103static int __devinit adp5520_led_probe(struct platform_device *pdev)
104{
105 struct adp5520_leds_platform_data *pdata = pdev->dev.platform_data;
106 struct adp5520_led *led, *led_dat;
107 struct led_info *cur_led;
108 int ret, i;
109
110 if (pdata == NULL) {
111 dev_err(&pdev->dev, "missing platform data\n");
112 return -ENODEV;
113 }
114
115 if (pdata->num_leds > ADP5520_01_MAXLEDS) {
116 dev_err(&pdev->dev, "can't handle more than %d LEDS\n",
117 ADP5520_01_MAXLEDS);
118 return -EFAULT;
119 }
120
121 led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
122 if (led == NULL) {
123 dev_err(&pdev->dev, "failed to alloc memory\n");
124 return -ENOMEM;
125 }
126
127 ret = adp5520_led_prepare(pdev);
128
129 if (ret) {
130 dev_err(&pdev->dev, "failed to write\n");
131 goto err_free;
132 }
133
134 for (i = 0; i < pdata->num_leds; ++i) {
135 cur_led = &pdata->leds[i];
136 led_dat = &led[i];
137
138 led_dat->cdev.name = cur_led->name;
139 led_dat->cdev.default_trigger = cur_led->default_trigger;
140 led_dat->cdev.brightness_set = adp5520_led_set;
141 led_dat->cdev.brightness = LED_OFF;
142
143 if (cur_led->flags & ADP5520_FLAG_LED_MASK)
144 led_dat->flags = cur_led->flags;
145 else
146 led_dat->flags = i + 1;
147
148 led_dat->id = led_dat->flags & ADP5520_FLAG_LED_MASK;
149
150 led_dat->master = pdev->dev.parent;
151 led_dat->new_brightness = LED_OFF;
152
153 INIT_WORK(&led_dat->work, adp5520_led_work);
154
155 ret = led_classdev_register(led_dat->master, &led_dat->cdev);
156 if (ret) {
157 dev_err(&pdev->dev, "failed to register LED %d\n",
158 led_dat->id);
159 goto err;
160 }
161
162 ret = adp5520_led_setup(led_dat);
163 if (ret) {
164 dev_err(&pdev->dev, "failed to write\n");
165 i++;
166 goto err;
167 }
168 }
169
170 platform_set_drvdata(pdev, led);
171 return 0;
172
173err:
174 if (i > 0) {
175 for (i = i - 1; i >= 0; i--) {
176 led_classdev_unregister(&led[i].cdev);
177 cancel_work_sync(&led[i].work);
178 }
179 }
180
181err_free:
182 kfree(led);
183 return ret;
184}
185
186static int __devexit adp5520_led_remove(struct platform_device *pdev)
187{
188 struct adp5520_leds_platform_data *pdata = pdev->dev.platform_data;
189 struct adp5520_led *led;
190 int i;
191
192 led = platform_get_drvdata(pdev);
193
194 adp5520_clr_bits(led->master, ADP5520_LED_CONTROL,
195 ADP5520_LED1_EN | ADP5520_LED2_EN | ADP5520_LED3_EN);
196
197 for (i = 0; i < pdata->num_leds; i++) {
198 led_classdev_unregister(&led[i].cdev);
199 cancel_work_sync(&led[i].work);
200 }
201
202 kfree(led);
203 return 0;
204}
205
206static struct platform_driver adp5520_led_driver = {
207 .driver = {
208 .name = "adp5520-led",
209 .owner = THIS_MODULE,
210 },
211 .probe = adp5520_led_probe,
212 .remove = __devexit_p(adp5520_led_remove),
213};
214
215static int __init adp5520_led_init(void)
216{
217 return platform_driver_register(&adp5520_led_driver);
218}
219module_init(adp5520_led_init);
220
221static void __exit adp5520_led_exit(void)
222{
223 platform_driver_unregister(&adp5520_led_driver);
224}
225module_exit(adp5520_led_exit);
226
227MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
228MODULE_DESCRIPTION("LEDS ADP5520(01) Driver");
229MODULE_LICENSE("GPL");
230MODULE_ALIAS("platform:adp5520-led");
diff --git a/drivers/leds/leds-alix2.c b/drivers/leds/leds-alix2.c
index 731d4eef3425..f59ffadf5125 100644
--- a/drivers/leds/leds-alix2.c
+++ b/drivers/leds/leds-alix2.c
@@ -11,11 +11,24 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <linux/pci.h>
14 15
15static int force = 0; 16static int force = 0;
16module_param(force, bool, 0444); 17module_param(force, bool, 0444);
17MODULE_PARM_DESC(force, "Assume system has ALIX.2/ALIX.3 style LEDs"); 18MODULE_PARM_DESC(force, "Assume system has ALIX.2/ALIX.3 style LEDs");
18 19
20#define MSR_LBAR_GPIO 0x5140000C
21#define CS5535_GPIO_SIZE 256
22
23static u32 gpio_base;
24
25static struct pci_device_id divil_pci[] = {
26 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
27 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
28 { } /* NULL entry */
29};
30MODULE_DEVICE_TABLE(pci, divil_pci);
31
19struct alix_led { 32struct alix_led {
20 struct led_classdev cdev; 33 struct led_classdev cdev;
21 unsigned short port; 34 unsigned short port;
@@ -30,9 +43,9 @@ static void alix_led_set(struct led_classdev *led_cdev,
30 container_of(led_cdev, struct alix_led, cdev); 43 container_of(led_cdev, struct alix_led, cdev);
31 44
32 if (brightness) 45 if (brightness)
33 outl(led_dev->on_value, led_dev->port); 46 outl(led_dev->on_value, gpio_base + led_dev->port);
34 else 47 else
35 outl(led_dev->off_value, led_dev->port); 48 outl(led_dev->off_value, gpio_base + led_dev->port);
36} 49}
37 50
38static struct alix_led alix_leds[] = { 51static struct alix_led alix_leds[] = {
@@ -41,7 +54,7 @@ static struct alix_led alix_leds[] = {
41 .name = "alix:1", 54 .name = "alix:1",
42 .brightness_set = alix_led_set, 55 .brightness_set = alix_led_set,
43 }, 56 },
44 .port = 0x6100, 57 .port = 0x00,
45 .on_value = 1 << 22, 58 .on_value = 1 << 22,
46 .off_value = 1 << 6, 59 .off_value = 1 << 6,
47 }, 60 },
@@ -50,7 +63,7 @@ static struct alix_led alix_leds[] = {
50 .name = "alix:2", 63 .name = "alix:2",
51 .brightness_set = alix_led_set, 64 .brightness_set = alix_led_set,
52 }, 65 },
53 .port = 0x6180, 66 .port = 0x80,
54 .on_value = 1 << 25, 67 .on_value = 1 << 25,
55 .off_value = 1 << 9, 68 .off_value = 1 << 9,
56 }, 69 },
@@ -59,7 +72,7 @@ static struct alix_led alix_leds[] = {
59 .name = "alix:3", 72 .name = "alix:3",
60 .brightness_set = alix_led_set, 73 .brightness_set = alix_led_set,
61 }, 74 },
62 .port = 0x6180, 75 .port = 0x80,
63 .on_value = 1 << 27, 76 .on_value = 1 << 27,
64 .off_value = 1 << 11, 77 .off_value = 1 << 11,
65 }, 78 },
@@ -101,64 +114,104 @@ static struct platform_driver alix_led_driver = {
101 }, 114 },
102}; 115};
103 116
104static int __init alix_present(void) 117static int __init alix_present(unsigned long bios_phys,
118 const char *alix_sig,
119 size_t alix_sig_len)
105{ 120{
106 const unsigned long bios_phys = 0x000f0000;
107 const size_t bios_len = 0x00010000; 121 const size_t bios_len = 0x00010000;
108 const char alix_sig[] = "PC Engines ALIX.";
109 const size_t alix_sig_len = sizeof(alix_sig) - 1;
110
111 const char *bios_virt; 122 const char *bios_virt;
112 const char *scan_end; 123 const char *scan_end;
113 const char *p; 124 const char *p;
114 int ret = 0; 125 char name[64];
115 126
116 if (force) { 127 if (force) {
117 printk(KERN_NOTICE "%s: forced to skip BIOS test, " 128 printk(KERN_NOTICE "%s: forced to skip BIOS test, "
118 "assume system has ALIX.2 style LEDs\n", 129 "assume system has ALIX.2 style LEDs\n",
119 KBUILD_MODNAME); 130 KBUILD_MODNAME);
120 ret = 1; 131 return 1;
121 goto out;
122 } 132 }
123 133
124 bios_virt = phys_to_virt(bios_phys); 134 bios_virt = phys_to_virt(bios_phys);
125 scan_end = bios_virt + bios_len - (alix_sig_len + 2); 135 scan_end = bios_virt + bios_len - (alix_sig_len + 2);
126 for (p = bios_virt; p < scan_end; p++) { 136 for (p = bios_virt; p < scan_end; p++) {
127 const char *tail; 137 const char *tail;
138 char *a;
128 139
129 if (memcmp(p, alix_sig, alix_sig_len) != 0) { 140 if (memcmp(p, alix_sig, alix_sig_len) != 0)
130 continue; 141 continue;
131 } 142
143 memcpy(name, p, sizeof(name));
144
145 /* remove the first \0 character from string */
146 a = strchr(name, '\0');
147 if (a)
148 *a = ' ';
149
150 /* cut the string at a newline */
151 a = strchr(name, '\r');
152 if (a)
153 *a = '\0';
132 154
133 tail = p + alix_sig_len; 155 tail = p + alix_sig_len;
134 if ((tail[0] == '2' || tail[0] == '3') && tail[1] == '\0') { 156 if ((tail[0] == '2' || tail[0] == '3')) {
135 printk(KERN_INFO 157 printk(KERN_INFO
136 "%s: system is recognized as \"%s\"\n", 158 "%s: system is recognized as \"%s\"\n",
137 KBUILD_MODNAME, p); 159 KBUILD_MODNAME, name);
138 ret = 1; 160 return 1;
139 break;
140 } 161 }
141 } 162 }
142 163
143out: 164 return 0;
144 return ret;
145} 165}
146 166
147static struct platform_device *pdev; 167static struct platform_device *pdev;
148 168
149static int __init alix_led_init(void) 169static int __init alix_pci_led_init(void)
150{ 170{
151 int ret; 171 u32 low, hi;
152 172
153 if (!alix_present()) { 173 if (pci_dev_present(divil_pci) == 0) {
154 ret = -ENODEV; 174 printk(KERN_WARNING KBUILD_MODNAME": DIVIL not found\n");
155 goto out; 175 return -ENODEV;
156 } 176 }
157 177
158 /* enable output on GPIO for LED 1,2,3 */ 178 /* Grab the GPIO I/O range */
159 outl(1 << 6, 0x6104); 179 rdmsr(MSR_LBAR_GPIO, low, hi);
160 outl(1 << 9, 0x6184); 180
161 outl(1 << 11, 0x6184); 181 /* Check the mask and whether GPIO is enabled (sanity check) */
182 if (hi != 0x0000f001) {
183 printk(KERN_WARNING KBUILD_MODNAME": GPIO not enabled\n");
184 return -ENODEV;
185 }
186
187 /* Mask off the IO base address */
188 gpio_base = low & 0x0000ff00;
189
190 if (!request_region(gpio_base, CS5535_GPIO_SIZE, KBUILD_MODNAME)) {
191 printk(KERN_ERR KBUILD_MODNAME": can't allocate I/O for GPIO\n");
192 return -ENODEV;
193 }
194
195 /* Set GPIO function to output */
196 outl(1 << 6, gpio_base + 0x04);
197 outl(1 << 9, gpio_base + 0x84);
198 outl(1 << 11, gpio_base + 0x84);
199
200 return 0;
201}
202
203static int __init alix_led_init(void)
204{
205 int ret = -ENODEV;
206 const char tinybios_sig[] = "PC Engines ALIX.";
207 const char coreboot_sig[] = "PC Engines\0ALIX.";
208
209 if (alix_present(0xf0000, tinybios_sig, sizeof(tinybios_sig) - 1) ||
210 alix_present(0x500, coreboot_sig, sizeof(coreboot_sig) - 1))
211 ret = alix_pci_led_init();
212
213 if (ret < 0)
214 return ret;
162 215
163 pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0); 216 pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
164 if (!IS_ERR(pdev)) { 217 if (!IS_ERR(pdev)) {
@@ -168,7 +221,6 @@ static int __init alix_led_init(void)
168 } else 221 } else
169 ret = PTR_ERR(pdev); 222 ret = PTR_ERR(pdev);
170 223
171out:
172 return ret; 224 return ret;
173} 225}
174 226
@@ -176,6 +228,7 @@ static void __exit alix_led_exit(void)
176{ 228{
177 platform_device_unregister(pdev); 229 platform_device_unregister(pdev);
178 platform_driver_unregister(&alix_led_driver); 230 platform_driver_unregister(&alix_led_driver);
231 release_region(gpio_base, CS5535_GPIO_SIZE);
179} 232}
180 233
181module_init(alix_led_init); 234module_init(alix_led_init);
diff --git a/drivers/leds/leds-cobalt-qube.c b/drivers/leds/leds-cobalt-qube.c
index 8816806accd2..da5fb016b1a5 100644
--- a/drivers/leds/leds-cobalt-qube.c
+++ b/drivers/leds/leds-cobalt-qube.c
@@ -31,7 +31,7 @@ static struct led_classdev qube_front_led = {
31 .name = "qube::front", 31 .name = "qube::front",
32 .brightness = LED_FULL, 32 .brightness = LED_FULL,
33 .brightness_set = qube_front_led_set, 33 .brightness_set = qube_front_led_set,
34 .default_trigger = "ide-disk", 34 .default_trigger = "default-on",
35}; 35};
36 36
37static int __devinit cobalt_qube_led_probe(struct platform_device *pdev) 37static int __devinit cobalt_qube_led_probe(struct platform_device *pdev)
@@ -43,7 +43,7 @@ static int __devinit cobalt_qube_led_probe(struct platform_device *pdev)
43 if (!res) 43 if (!res)
44 return -EBUSY; 44 return -EBUSY;
45 45
46 led_port = ioremap(res->start, res->end - res->start + 1); 46 led_port = ioremap(res->start, resource_size(res));
47 if (!led_port) 47 if (!led_port)
48 return -ENOMEM; 48 return -ENOMEM;
49 49
diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c
index defc212105f3..438d48384636 100644
--- a/drivers/leds/leds-cobalt-raq.c
+++ b/drivers/leds/leds-cobalt-raq.c
@@ -84,7 +84,7 @@ static int __devinit cobalt_raq_led_probe(struct platform_device *pdev)
84 if (!res) 84 if (!res)
85 return -EBUSY; 85 return -EBUSY;
86 86
87 led_port = ioremap(res->start, res->end - res->start + 1); 87 led_port = ioremap(res->start, resource_size(res));
88 if (!led_port) 88 if (!led_port)
89 return -ENOMEM; 89 return -ENOMEM;
90 90
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
new file mode 100644
index 000000000000..fee40a841959
--- /dev/null
+++ b/drivers/leds/leds-lt3593.c
@@ -0,0 +1,217 @@
1/*
2 * LEDs driver for LT3593 controllers
3 *
4 * See the datasheet at http://cds.linear.com/docs/Datasheet/3593f.pdf
5 *
6 * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
7 *
8 * Based on leds-gpio.c,
9 *
10 * Copyright (C) 2007 8D Technologies inc.
11 * Raphael Assenat <raph@8d.com>
12 * Copyright (C) 2008 Freescale Semiconductor, Inc.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <linux/leds.h>
23#include <linux/workqueue.h>
24#include <linux/delay.h>
25#include <linux/gpio.h>
26
27struct lt3593_led_data {
28 struct led_classdev cdev;
29 unsigned gpio;
30 struct work_struct work;
31 u8 new_level;
32};
33
34static void lt3593_led_work(struct work_struct *work)
35{
36 int pulses;
37 struct lt3593_led_data *led_dat =
38 container_of(work, struct lt3593_led_data, work);
39
40 /*
41 * The LT3593 resets its internal current level register to the maximum
42 * level on the first falling edge on the control pin. Each following
43 * falling edge decreases the current level by 625uA. Up to 32 pulses
44 * can be sent, so the maximum power reduction is 20mA.
45 * After a timeout of 128us, the value is taken from the register and
46 * applied is to the output driver.
47 */
48
49 if (led_dat->new_level == 0) {
50 gpio_set_value_cansleep(led_dat->gpio, 0);
51 return;
52 }
53
54 pulses = 32 - (led_dat->new_level * 32) / 255;
55
56 if (pulses == 0) {
57 gpio_set_value_cansleep(led_dat->gpio, 0);
58 mdelay(1);
59 gpio_set_value_cansleep(led_dat->gpio, 1);
60 return;
61 }
62
63 gpio_set_value_cansleep(led_dat->gpio, 1);
64
65 while (pulses--) {
66 gpio_set_value_cansleep(led_dat->gpio, 0);
67 udelay(1);
68 gpio_set_value_cansleep(led_dat->gpio, 1);
69 udelay(1);
70 }
71}
72
73static void lt3593_led_set(struct led_classdev *led_cdev,
74 enum led_brightness value)
75{
76 struct lt3593_led_data *led_dat =
77 container_of(led_cdev, struct lt3593_led_data, cdev);
78
79 led_dat->new_level = value;
80 schedule_work(&led_dat->work);
81}
82
83static int __devinit create_lt3593_led(const struct gpio_led *template,
84 struct lt3593_led_data *led_dat, struct device *parent)
85{
86 int ret, state;
87
88 /* skip leds on GPIOs that aren't available */
89 if (!gpio_is_valid(template->gpio)) {
90 printk(KERN_INFO "%s: skipping unavailable LT3593 LED at gpio %d (%s)\n",
91 KBUILD_MODNAME, template->gpio, template->name);
92 return 0;
93 }
94
95 ret = gpio_request(template->gpio, template->name);
96 if (ret < 0)
97 return ret;
98
99 led_dat->cdev.name = template->name;
100 led_dat->cdev.default_trigger = template->default_trigger;
101 led_dat->gpio = template->gpio;
102
103 led_dat->cdev.brightness_set = lt3593_led_set;
104
105 state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
106 led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
107
108 if (!template->retain_state_suspended)
109 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
110
111 ret = gpio_direction_output(led_dat->gpio, state);
112 if (ret < 0)
113 goto err;
114
115 INIT_WORK(&led_dat->work, lt3593_led_work);
116
117 ret = led_classdev_register(parent, &led_dat->cdev);
118 if (ret < 0)
119 goto err;
120
121 printk(KERN_INFO "%s: registered LT3593 LED '%s' at GPIO %d\n",
122 KBUILD_MODNAME, template->name, template->gpio);
123
124 return 0;
125
126err:
127 gpio_free(led_dat->gpio);
128 return ret;
129}
130
131static void delete_lt3593_led(struct lt3593_led_data *led)
132{
133 if (!gpio_is_valid(led->gpio))
134 return;
135
136 led_classdev_unregister(&led->cdev);
137 cancel_work_sync(&led->work);
138 gpio_free(led->gpio);
139}
140
141static int __devinit lt3593_led_probe(struct platform_device *pdev)
142{
143 struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
144 struct lt3593_led_data *leds_data;
145 int i, ret = 0;
146
147 if (!pdata)
148 return -EBUSY;
149
150 leds_data = kzalloc(sizeof(struct lt3593_led_data) * pdata->num_leds,
151 GFP_KERNEL);
152 if (!leds_data)
153 return -ENOMEM;
154
155 for (i = 0; i < pdata->num_leds; i++) {
156 ret = create_lt3593_led(&pdata->leds[i], &leds_data[i],
157 &pdev->dev);
158 if (ret < 0)
159 goto err;
160 }
161
162 platform_set_drvdata(pdev, leds_data);
163
164 return 0;
165
166err:
167 for (i = i - 1; i >= 0; i--)
168 delete_lt3593_led(&leds_data[i]);
169
170 kfree(leds_data);
171
172 return ret;
173}
174
175static int __devexit lt3593_led_remove(struct platform_device *pdev)
176{
177 int i;
178 struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
179 struct lt3593_led_data *leds_data;
180
181 leds_data = platform_get_drvdata(pdev);
182
183 for (i = 0; i < pdata->num_leds; i++)
184 delete_lt3593_led(&leds_data[i]);
185
186 kfree(leds_data);
187
188 return 0;
189}
190
191static struct platform_driver lt3593_led_driver = {
192 .probe = lt3593_led_probe,
193 .remove = __devexit_p(lt3593_led_remove),
194 .driver = {
195 .name = "leds-lt3593",
196 .owner = THIS_MODULE,
197 },
198};
199
200MODULE_ALIAS("platform:leds-lt3593");
201
202static int __init lt3593_led_init(void)
203{
204 return platform_driver_register(&lt3593_led_driver);
205}
206
207static void __exit lt3593_led_exit(void)
208{
209 platform_driver_unregister(&lt3593_led_driver);
210}
211
212module_init(lt3593_led_init);
213module_exit(lt3593_led_exit);
214
215MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
216MODULE_DESCRIPTION("LED driver for LT3593 controllers");
217MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index cdfdc8714e10..88b1dd091cfb 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -27,7 +27,6 @@ struct led_pwm_data {
27 struct pwm_device *pwm; 27 struct pwm_device *pwm;
28 unsigned int active_low; 28 unsigned int active_low;
29 unsigned int period; 29 unsigned int period;
30 unsigned int max_brightness;
31}; 30};
32 31
33static void led_pwm_set(struct led_classdev *led_cdev, 32static void led_pwm_set(struct led_classdev *led_cdev,
@@ -35,7 +34,7 @@ static void led_pwm_set(struct led_classdev *led_cdev,
35{ 34{
36 struct led_pwm_data *led_dat = 35 struct led_pwm_data *led_dat =
37 container_of(led_cdev, struct led_pwm_data, cdev); 36 container_of(led_cdev, struct led_pwm_data, cdev);
38 unsigned int max = led_dat->max_brightness; 37 unsigned int max = led_dat->cdev.max_brightness;
39 unsigned int period = led_dat->period; 38 unsigned int period = led_dat->period;
40 39
41 if (brightness == 0) { 40 if (brightness == 0) {
@@ -77,10 +76,10 @@ static int led_pwm_probe(struct platform_device *pdev)
77 led_dat->cdev.name = cur_led->name; 76 led_dat->cdev.name = cur_led->name;
78 led_dat->cdev.default_trigger = cur_led->default_trigger; 77 led_dat->cdev.default_trigger = cur_led->default_trigger;
79 led_dat->active_low = cur_led->active_low; 78 led_dat->active_low = cur_led->active_low;
80 led_dat->max_brightness = cur_led->max_brightness;
81 led_dat->period = cur_led->pwm_period_ns; 79 led_dat->period = cur_led->pwm_period_ns;
82 led_dat->cdev.brightness_set = led_pwm_set; 80 led_dat->cdev.brightness_set = led_pwm_set;
83 led_dat->cdev.brightness = LED_OFF; 81 led_dat->cdev.brightness = LED_OFF;
82 led_dat->cdev.max_brightness = cur_led->max_brightness;
84 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; 83 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
85 84
86 ret = led_classdev_register(&pdev->dev, &led_dat->cdev); 85 ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c
new file mode 100644
index 000000000000..7f00de3ef922
--- /dev/null
+++ b/drivers/leds/leds-regulator.c
@@ -0,0 +1,242 @@
1/*
2 * leds-regulator.c - LED class driver for regulator driven LEDs.
3 *
4 * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
5 *
6 * Inspired by leds-wm8350 driver.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/err.h>
16#include <linux/workqueue.h>
17#include <linux/leds.h>
18#include <linux/leds-regulator.h>
19#include <linux/platform_device.h>
20#include <linux/regulator/consumer.h>
21
22#define to_regulator_led(led_cdev) \
23 container_of(led_cdev, struct regulator_led, cdev)
24
25struct regulator_led {
26 struct led_classdev cdev;
27 enum led_brightness value;
28 int enabled;
29 struct mutex mutex;
30 struct work_struct work;
31
32 struct regulator *vcc;
33};
34
35static inline int led_regulator_get_max_brightness(struct regulator *supply)
36{
37 int ret;
38 int voltage = regulator_list_voltage(supply, 0);
39
40 if (voltage <= 0)
41 return 1;
42
43 /* even if regulator can't change voltages,
44 * we still assume it can change status
45 * and the LED can be turned on and off.
46 */
47 ret = regulator_set_voltage(supply, voltage, voltage);
48 if (ret < 0)
49 return 1;
50
51 return regulator_count_voltages(supply);
52}
53
54static int led_regulator_get_voltage(struct regulator *supply,
55 enum led_brightness brightness)
56{
57 if (brightness == 0)
58 return -EINVAL;
59
60 return regulator_list_voltage(supply, brightness - 1);
61}
62
63
64static void regulator_led_enable(struct regulator_led *led)
65{
66 int ret;
67
68 if (led->enabled)
69 return;
70
71 ret = regulator_enable(led->vcc);
72 if (ret != 0) {
73 dev_err(led->cdev.dev, "Failed to enable vcc: %d\n", ret);
74 return;
75 }
76
77 led->enabled = 1;
78}
79
80static void regulator_led_disable(struct regulator_led *led)
81{
82 int ret;
83
84 if (!led->enabled)
85 return;
86
87 ret = regulator_disable(led->vcc);
88 if (ret != 0) {
89 dev_err(led->cdev.dev, "Failed to disable vcc: %d\n", ret);
90 return;
91 }
92
93 led->enabled = 0;
94}
95
96static void regulator_led_set_value(struct regulator_led *led)
97{
98 int voltage;
99 int ret;
100
101 mutex_lock(&led->mutex);
102
103 if (led->value == LED_OFF) {
104 regulator_led_disable(led);
105 goto out;
106 }
107
108 if (led->cdev.max_brightness > 1) {
109 voltage = led_regulator_get_voltage(led->vcc, led->value);
110 dev_dbg(led->cdev.dev, "brightness: %d voltage: %d\n",
111 led->value, voltage);
112
113 ret = regulator_set_voltage(led->vcc, voltage, voltage);
114 if (ret != 0)
115 dev_err(led->cdev.dev, "Failed to set voltage %d: %d\n",
116 voltage, ret);
117 }
118
119 regulator_led_enable(led);
120
121out:
122 mutex_unlock(&led->mutex);
123}
124
125static void led_work(struct work_struct *work)
126{
127 struct regulator_led *led;
128
129 led = container_of(work, struct regulator_led, work);
130 regulator_led_set_value(led);
131}
132
133static void regulator_led_brightness_set(struct led_classdev *led_cdev,
134 enum led_brightness value)
135{
136 struct regulator_led *led = to_regulator_led(led_cdev);
137
138 led->value = value;
139 schedule_work(&led->work);
140}
141
142static int __devinit regulator_led_probe(struct platform_device *pdev)
143{
144 struct led_regulator_platform_data *pdata = pdev->dev.platform_data;
145 struct regulator_led *led;
146 struct regulator *vcc;
147 int ret = 0;
148
149 if (pdata == NULL) {
150 dev_err(&pdev->dev, "no platform data\n");
151 return -ENODEV;
152 }
153
154 vcc = regulator_get_exclusive(&pdev->dev, "vled");
155 if (IS_ERR(vcc)) {
156 dev_err(&pdev->dev, "Cannot get vcc for %s\n", pdata->name);
157 return PTR_ERR(vcc);
158 }
159
160 led = kzalloc(sizeof(*led), GFP_KERNEL);
161 if (led == NULL) {
162 ret = -ENOMEM;
163 goto err_vcc;
164 }
165
166 led->cdev.max_brightness = led_regulator_get_max_brightness(vcc);
167 if (pdata->brightness > led->cdev.max_brightness) {
168 dev_err(&pdev->dev, "Invalid default brightness %d\n",
169 pdata->brightness);
170 ret = -EINVAL;
171 goto err_led;
172 }
173 led->value = pdata->brightness;
174
175 led->cdev.brightness_set = regulator_led_brightness_set;
176 led->cdev.name = pdata->name;
177 led->cdev.flags |= LED_CORE_SUSPENDRESUME;
178 led->vcc = vcc;
179
180 mutex_init(&led->mutex);
181 INIT_WORK(&led->work, led_work);
182
183 platform_set_drvdata(pdev, led);
184
185 ret = led_classdev_register(&pdev->dev, &led->cdev);
186 if (ret < 0) {
187 cancel_work_sync(&led->work);
188 goto err_led;
189 }
190
191 /* to expose the default value to userspace */
192 led->cdev.brightness = led->value;
193
194 /* Set the default led status */
195 regulator_led_set_value(led);
196
197 return 0;
198
199err_led:
200 kfree(led);
201err_vcc:
202 regulator_put(vcc);
203 return ret;
204}
205
206static int __devexit regulator_led_remove(struct platform_device *pdev)
207{
208 struct regulator_led *led = platform_get_drvdata(pdev);
209
210 led_classdev_unregister(&led->cdev);
211 cancel_work_sync(&led->work);
212 regulator_led_disable(led);
213 regulator_put(led->vcc);
214 kfree(led);
215 return 0;
216}
217
218static struct platform_driver regulator_led_driver = {
219 .driver = {
220 .name = "leds-regulator",
221 .owner = THIS_MODULE,
222 },
223 .probe = regulator_led_probe,
224 .remove = __devexit_p(regulator_led_remove),
225};
226
227static int __init regulator_led_init(void)
228{
229 return platform_driver_register(&regulator_led_driver);
230}
231module_init(regulator_led_init);
232
233static void __exit regulator_led_exit(void)
234{
235 platform_driver_unregister(&regulator_led_driver);
236}
237module_exit(regulator_led_exit);
238
239MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
240MODULE_DESCRIPTION("Regulator driven LED driver");
241MODULE_LICENSE("GPL");
242MODULE_ALIAS("platform:leds-regulator");
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
new file mode 100644
index 000000000000..97f04984c1ca
--- /dev/null
+++ b/drivers/leds/leds-ss4200.c
@@ -0,0 +1,556 @@
1/*
2 * SS4200-E Hardware API
3 * Copyright (c) 2009, Intel Corporation.
4 * Copyright IBM Corporation, 2009
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Author: Dave Hansen <dave@sr71.net>
20 */
21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/dmi.h>
25#include <linux/init.h>
26#include <linux/ioport.h>
27#include <linux/kernel.h>
28#include <linux/leds.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/types.h>
32#include <linux/uaccess.h>
33
34MODULE_AUTHOR("Rodney Girod <rgirod@confocus.com>, Dave Hansen <dave@sr71.net>");
35MODULE_DESCRIPTION("Intel NAS/Home Server ICH7 GPIO Driver");
36MODULE_LICENSE("GPL");
37
38/*
39 * ICH7 LPC/GPIO PCI Config register offsets
40 */
41#define PMBASE 0x040
42#define GPIO_BASE 0x048
43#define GPIO_CTRL 0x04c
44#define GPIO_EN 0x010
45
46/*
47 * The ICH7 GPIO register block is 64 bytes in size.
48 */
49#define ICH7_GPIO_SIZE 64
50
51/*
52 * Define register offsets within the ICH7 register block.
53 */
54#define GPIO_USE_SEL 0x000
55#define GP_IO_SEL 0x004
56#define GP_LVL 0x00c
57#define GPO_BLINK 0x018
58#define GPI_INV 0x030
59#define GPIO_USE_SEL2 0x034
60#define GP_IO_SEL2 0x038
61#define GP_LVL2 0x03c
62
63/*
64 * PCI ID of the Intel ICH7 LPC Device within which the GPIO block lives.
65 */
66static struct pci_device_id ich7_lpc_pci_id[] =
67{
68 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0) },
69 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1) },
70 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_30) },
71 { } /* NULL entry */
72};
73
74MODULE_DEVICE_TABLE(pci, ich7_lpc_pci_id);
75
76static int __init ss4200_led_dmi_callback(const struct dmi_system_id *id)
77{
78 pr_info("detected '%s'\n", id->ident);
79 return 1;
80}
81
82static unsigned int __initdata nodetect;
83module_param_named(nodetect, nodetect, bool, 0);
84MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
85
86/*
87 * struct nas_led_whitelist - List of known good models
88 *
89 * Contains the known good models this driver is compatible with.
90 * When adding a new model try to be as strict as possible. This
91 * makes it possible to keep the false positives (the model is
92 * detected as working, but in reality it is not) as low as
93 * possible.
94 */
95static struct dmi_system_id __initdata nas_led_whitelist[] = {
96 {
97 .callback = ss4200_led_dmi_callback,
98 .ident = "Intel SS4200-E",
99 .matches = {
100 DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
101 DMI_MATCH(DMI_PRODUCT_NAME, "SS4200-E"),
102 DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00")
103 }
104 },
105};
106
107/*
108 * Base I/O address assigned to the Power Management register block
109 */
110static u32 g_pm_io_base;
111
112/*
113 * Base I/O address assigned to the ICH7 GPIO register block
114 */
115static u32 nas_gpio_io_base;
116
117/*
118 * When we successfully register a region, we are returned a resource.
119 * We use these to identify which regions we need to release on our way
120 * back out.
121 */
122static struct resource *gp_gpio_resource;
123
124struct nasgpio_led {
125 char *name;
126 u32 gpio_bit;
127 struct led_classdev led_cdev;
128};
129
130/*
131 * gpio_bit(s) are the ICH7 GPIO bit assignments
132 */
133static struct nasgpio_led nasgpio_leds[] = {
134 { .name = "hdd1:blue:sata", .gpio_bit = 0 },
135 { .name = "hdd1:amber:sata", .gpio_bit = 1 },
136 { .name = "hdd2:blue:sata", .gpio_bit = 2 },
137 { .name = "hdd2:amber:sata", .gpio_bit = 3 },
138 { .name = "hdd3:blue:sata", .gpio_bit = 4 },
139 { .name = "hdd3:amber:sata", .gpio_bit = 5 },
140 { .name = "hdd4:blue:sata", .gpio_bit = 6 },
141 { .name = "hdd4:amber:sata", .gpio_bit = 7 },
142 { .name = "power:blue:power", .gpio_bit = 27},
143 { .name = "power:amber:power", .gpio_bit = 28},
144};
145
146#define NAS_RECOVERY 0x00000400 /* GPIO10 */
147
148static struct nasgpio_led *
149led_classdev_to_nasgpio_led(struct led_classdev *led_cdev)
150{
151 return container_of(led_cdev, struct nasgpio_led, led_cdev);
152}
153
154static struct nasgpio_led *get_led_named(char *name)
155{
156 int i;
157 for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++) {
158 if (strcmp(nasgpio_leds[i].name, name))
159 continue;
160 return &nasgpio_leds[i];
161 }
162 return NULL;
163}
164
165/*
166 * This protects access to the gpio ports.
167 */
168static DEFINE_SPINLOCK(nasgpio_gpio_lock);
169
170/*
171 * There are two gpio ports, one for blinking and the other
172 * for power. @port tells us if we're doing blinking or
173 * power control.
174 *
175 * Caller must hold nasgpio_gpio_lock
176 */
177static void __nasgpio_led_set_attr(struct led_classdev *led_cdev,
178 u32 port, u32 value)
179{
180 struct nasgpio_led *led = led_classdev_to_nasgpio_led(led_cdev);
181 u32 gpio_out;
182
183 gpio_out = inl(nas_gpio_io_base + port);
184 if (value)
185 gpio_out |= (1<<led->gpio_bit);
186 else
187 gpio_out &= ~(1<<led->gpio_bit);
188
189 outl(gpio_out, nas_gpio_io_base + port);
190}
191
192static void nasgpio_led_set_attr(struct led_classdev *led_cdev,
193 u32 port, u32 value)
194{
195 spin_lock(&nasgpio_gpio_lock);
196 __nasgpio_led_set_attr(led_cdev, port, value);
197 spin_unlock(&nasgpio_gpio_lock);
198}
199
200u32 nasgpio_led_get_attr(struct led_classdev *led_cdev, u32 port)
201{
202 struct nasgpio_led *led = led_classdev_to_nasgpio_led(led_cdev);
203 u32 gpio_in;
204
205 spin_lock(&nasgpio_gpio_lock);
206 gpio_in = inl(nas_gpio_io_base + port);
207 spin_unlock(&nasgpio_gpio_lock);
208 if (gpio_in & (1<<led->gpio_bit))
209 return 1;
210 return 0;
211}
212
213/*
214 * There is actual brightness control in the hardware,
215 * but it is via smbus commands and not implemented
216 * in this driver.
217 */
218static void nasgpio_led_set_brightness(struct led_classdev *led_cdev,
219 enum led_brightness brightness)
220{
221 u32 setting = 0;
222 if (brightness >= LED_HALF)
223 setting = 1;
224 /*
225 * Hold the lock across both operations. This ensures
226 * consistency so that both the "turn off blinking"
227 * and "turn light off" operations complete as a set.
228 */
229 spin_lock(&nasgpio_gpio_lock);
230 /*
231 * LED class documentation asks that past blink state
232 * be disabled when brightness is turned to zero.
233 */
234 if (brightness == 0)
235 __nasgpio_led_set_attr(led_cdev, GPO_BLINK, 0);
236 __nasgpio_led_set_attr(led_cdev, GP_LVL, setting);
237 spin_unlock(&nasgpio_gpio_lock);
238}
239
240static int nasgpio_led_set_blink(struct led_classdev *led_cdev,
241 unsigned long *delay_on,
242 unsigned long *delay_off)
243{
244 u32 setting = 1;
245 if (!(*delay_on == 0 && *delay_off == 0) &&
246 !(*delay_on == 500 && *delay_off == 500))
247 return -EINVAL;
248 /*
249 * These are very approximate.
250 */
251 *delay_on = 500;
252 *delay_off = 500;
253
254 nasgpio_led_set_attr(led_cdev, GPO_BLINK, setting);
255
256 return 0;
257}
258
259
260/*
261 * Initialize the ICH7 GPIO registers for NAS usage. The BIOS should have
262 * already taken care of this, but we will do so in a non destructive manner
263 * so that we have what we need whether the BIOS did it or not.
264 */
265static int __devinit ich7_gpio_init(struct device *dev)
266{
267 int i;
268 u32 config_data = 0;
269 u32 all_nas_led = 0;
270
271 for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++)
272 all_nas_led |= (1<<nasgpio_leds[i].gpio_bit);
273
274 spin_lock(&nasgpio_gpio_lock);
275 /*
276 * We need to enable all of the GPIO lines used by the NAS box,
277 * so we will read the current Use Selection and add our usage
278 * to it. This should be benign with regard to the original
279 * BIOS configuration.
280 */
281 config_data = inl(nas_gpio_io_base + GPIO_USE_SEL);
282 dev_dbg(dev, ": Data read from GPIO_USE_SEL = 0x%08x\n", config_data);
283 config_data |= all_nas_led + NAS_RECOVERY;
284 outl(config_data, nas_gpio_io_base + GPIO_USE_SEL);
285 config_data = inl(nas_gpio_io_base + GPIO_USE_SEL);
286 dev_dbg(dev, ": GPIO_USE_SEL = 0x%08x\n\n", config_data);
287
288 /*
289 * The LED GPIO outputs need to be configured for output, so we
290 * will ensure that all LED lines are cleared for output and the
291 * RECOVERY line ready for input. This too should be benign with
292 * regard to BIOS configuration.
293 */
294 config_data = inl(nas_gpio_io_base + GP_IO_SEL);
295 dev_dbg(dev, ": Data read from GP_IO_SEL = 0x%08x\n",
296 config_data);
297 config_data &= ~all_nas_led;
298 config_data |= NAS_RECOVERY;
299 outl(config_data, nas_gpio_io_base + GP_IO_SEL);
300 config_data = inl(nas_gpio_io_base + GP_IO_SEL);
301 dev_dbg(dev, ": GP_IO_SEL = 0x%08x\n", config_data);
302
303 /*
304 * In our final system, the BIOS will initialize the state of all
305 * of the LEDs. For now, we turn them all off (or Low).
306 */
307 config_data = inl(nas_gpio_io_base + GP_LVL);
308 dev_dbg(dev, ": Data read from GP_LVL = 0x%08x\n", config_data);
309 /*
310 * In our final system, the BIOS will initialize the blink state of all
311 * of the LEDs. For now, we turn blink off for all of them.
312 */
313 config_data = inl(nas_gpio_io_base + GPO_BLINK);
314 dev_dbg(dev, ": Data read from GPO_BLINK = 0x%08x\n", config_data);
315
316 /*
317 * At this moment, I am unsure if anything needs to happen with GPI_INV
318 */
319 config_data = inl(nas_gpio_io_base + GPI_INV);
320 dev_dbg(dev, ": Data read from GPI_INV = 0x%08x\n", config_data);
321
322 spin_unlock(&nasgpio_gpio_lock);
323 return 0;
324}
325
326static void ich7_lpc_cleanup(struct device *dev)
327{
328 /*
329 * If we were given exclusive use of the GPIO
330 * I/O Address range, we must return it.
331 */
332 if (gp_gpio_resource) {
333 dev_dbg(dev, ": Releasing GPIO I/O addresses\n");
334 release_region(nas_gpio_io_base, ICH7_GPIO_SIZE);
335 gp_gpio_resource = NULL;
336 }
337}
338
339/*
340 * The OS has determined that the LPC of the Intel ICH7 Southbridge is present
341 * so we can retrive the required operational information and prepare the GPIO.
342 */
343static struct pci_dev *nas_gpio_pci_dev;
344static int __devinit ich7_lpc_probe(struct pci_dev *dev,
345 const struct pci_device_id *id)
346{
347 int status;
348 u32 gc = 0;
349
350 status = pci_enable_device(dev);
351 if (status) {
352 dev_err(&dev->dev, "pci_enable_device failed\n");
353 return -EIO;
354 }
355
356 nas_gpio_pci_dev = dev;
357 status = pci_read_config_dword(dev, PMBASE, &g_pm_io_base);
358 if (status)
359 goto out;
360 g_pm_io_base &= 0x00000ff80;
361
362 status = pci_read_config_dword(dev, GPIO_CTRL, &gc);
363 if (!(GPIO_EN & gc)) {
364 status = -EEXIST;
365 dev_info(&dev->dev,
366 "ERROR: The LPC GPIO Block has not been enabled.\n");
367 goto out;
368 }
369
370 status = pci_read_config_dword(dev, GPIO_BASE, &nas_gpio_io_base);
371 if (0 > status) {
372 dev_info(&dev->dev, "Unable to read GPIOBASE.\n");
373 goto out;
374 }
375 dev_dbg(&dev->dev, ": GPIOBASE = 0x%08x\n", nas_gpio_io_base);
376 nas_gpio_io_base &= 0x00000ffc0;
377
378 /*
379 * Insure that we have exclusive access to the GPIO I/O address range.
380 */
381 gp_gpio_resource = request_region(nas_gpio_io_base, ICH7_GPIO_SIZE,
382 KBUILD_MODNAME);
383 if (NULL == gp_gpio_resource) {
384 dev_info(&dev->dev,
385 "ERROR Unable to register GPIO I/O addresses.\n");
386 status = -1;
387 goto out;
388 }
389
390 /*
391 * Initialize the GPIO for NAS/Home Server Use
392 */
393 ich7_gpio_init(&dev->dev);
394
395out:
396 if (status) {
397 ich7_lpc_cleanup(&dev->dev);
398 pci_disable_device(dev);
399 }
400 return status;
401}
402
403static void ich7_lpc_remove(struct pci_dev *dev)
404{
405 ich7_lpc_cleanup(&dev->dev);
406 pci_disable_device(dev);
407}
408
409/*
410 * pci_driver structure passed to the PCI modules
411 */
412static struct pci_driver nas_gpio_pci_driver = {
413 .name = KBUILD_MODNAME,
414 .id_table = ich7_lpc_pci_id,
415 .probe = ich7_lpc_probe,
416 .remove = ich7_lpc_remove,
417};
418
419static struct led_classdev *get_classdev_for_led_nr(int nr)
420{
421 struct nasgpio_led *nas_led = &nasgpio_leds[nr];
422 struct led_classdev *led = &nas_led->led_cdev;
423 return led;
424}
425
426
427static void set_power_light_amber_noblink(void)
428{
429 struct nasgpio_led *amber = get_led_named("power:amber:power");
430 struct nasgpio_led *blue = get_led_named("power:blue:power");
431
432 if (!amber || !blue)
433 return;
434 /*
435 * LED_OFF implies disabling future blinking
436 */
437 pr_debug("setting blue off and amber on\n");
438
439 nasgpio_led_set_brightness(&blue->led_cdev, LED_OFF);
440 nasgpio_led_set_brightness(&amber->led_cdev, LED_FULL);
441}
442
443static ssize_t nas_led_blink_show(struct device *dev,
444 struct device_attribute *attr, char *buf)
445{
446 struct led_classdev *led = dev_get_drvdata(dev);
447 int blinking = 0;
448 if (nasgpio_led_get_attr(led, GPO_BLINK))
449 blinking = 1;
450 return sprintf(buf, "%u\n", blinking);
451}
452
453static ssize_t nas_led_blink_store(struct device *dev,
454 struct device_attribute *attr,
455 const char *buf, size_t size)
456{
457 int ret;
458 struct led_classdev *led = dev_get_drvdata(dev);
459 unsigned long blink_state;
460
461 ret = strict_strtoul(buf, 10, &blink_state);
462 if (ret)
463 return ret;
464
465 nasgpio_led_set_attr(led, GPO_BLINK, blink_state);
466
467 return size;
468}
469
470static DEVICE_ATTR(blink, 0644, nas_led_blink_show, nas_led_blink_store);
471
472static int register_nasgpio_led(int led_nr)
473{
474 int ret;
475 struct nasgpio_led *nas_led = &nasgpio_leds[led_nr];
476 struct led_classdev *led = get_classdev_for_led_nr(led_nr);
477
478 led->name = nas_led->name;
479 led->brightness = LED_OFF;
480 if (nasgpio_led_get_attr(led, GP_LVL))
481 led->brightness = LED_FULL;
482 led->brightness_set = nasgpio_led_set_brightness;
483 led->blink_set = nasgpio_led_set_blink;
484 ret = led_classdev_register(&nas_gpio_pci_dev->dev, led);
485 if (ret)
486 return ret;
487 ret = device_create_file(led->dev, &dev_attr_blink);
488 if (ret)
489 led_classdev_unregister(led);
490 return ret;
491}
492
493static void unregister_nasgpio_led(int led_nr)
494{
495 struct led_classdev *led = get_classdev_for_led_nr(led_nr);
496 led_classdev_unregister(led);
497 device_remove_file(led->dev, &dev_attr_blink);
498}
499/*
500 * module load/initialization
501 */
502static int __init nas_gpio_init(void)
503{
504 int i;
505 int ret = 0;
506 int nr_devices = 0;
507
508 nr_devices = dmi_check_system(nas_led_whitelist);
509 if (nodetect) {
510 pr_info("skipping hardware autodetection\n");
511 pr_info("Please send 'dmidecode' output to dave@sr71.net\n");
512 nr_devices++;
513 }
514
515 if (nr_devices <= 0) {
516 pr_info("no LED devices found\n");
517 return -ENODEV;
518 }
519
520 pr_info("registering PCI driver\n");
521 ret = pci_register_driver(&nas_gpio_pci_driver);
522 if (ret)
523 return ret;
524 for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++) {
525 ret = register_nasgpio_led(i);
526 if (ret)
527 goto out_err;
528 }
529 /*
530 * When the system powers on, the BIOS leaves the power
531 * light blue and blinking. This will turn it solid
532 * amber once the driver is loaded.
533 */
534 set_power_light_amber_noblink();
535 return 0;
536out_err:
537 for (; i >= 0; i--)
538 unregister_nasgpio_led(i);
539 pci_unregister_driver(&nas_gpio_pci_driver);
540 return ret;
541}
542
543/*
544 * module unload
545 */
546static void __exit nas_gpio_exit(void)
547{
548 int i;
549 pr_info("Unregistering driver\n");
550 for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++)
551 unregister_nasgpio_led(i);
552 pci_unregister_driver(&nas_gpio_pci_driver);
553}
554
555module_init(nas_gpio_init);
556module_exit(nas_gpio_exit);
diff --git a/drivers/media/video/cx23885/cx23888-ir.c b/drivers/media/video/cx23885/cx23888-ir.c
index 3ccc8afeccf3..2bf57a4527d3 100644
--- a/drivers/media/video/cx23885/cx23888-ir.c
+++ b/drivers/media/video/cx23885/cx23888-ir.c
@@ -124,15 +124,12 @@ struct cx23888_ir_state {
124 atomic_t rxclk_divider; 124 atomic_t rxclk_divider;
125 atomic_t rx_invert; 125 atomic_t rx_invert;
126 126
127 struct kfifo *rx_kfifo; 127 struct kfifo rx_kfifo;
128 spinlock_t rx_kfifo_lock; 128 spinlock_t rx_kfifo_lock;
129 129
130 struct v4l2_subdev_ir_parameters tx_params; 130 struct v4l2_subdev_ir_parameters tx_params;
131 struct mutex tx_params_lock; 131 struct mutex tx_params_lock;
132 atomic_t txclk_divider; 132 atomic_t txclk_divider;
133
134 struct kfifo *tx_kfifo;
135 spinlock_t tx_kfifo_lock;
136}; 133};
137 134
138static inline struct cx23888_ir_state *to_state(struct v4l2_subdev *sd) 135static inline struct cx23888_ir_state *to_state(struct v4l2_subdev *sd)
@@ -522,6 +519,7 @@ static int cx23888_ir_irq_handler(struct v4l2_subdev *sd, u32 status,
522{ 519{
523 struct cx23888_ir_state *state = to_state(sd); 520 struct cx23888_ir_state *state = to_state(sd);
524 struct cx23885_dev *dev = state->dev; 521 struct cx23885_dev *dev = state->dev;
522 unsigned long flags;
525 523
526 u32 cntrl = cx23888_ir_read4(dev, CX23888_IR_CNTRL_REG); 524 u32 cntrl = cx23888_ir_read4(dev, CX23888_IR_CNTRL_REG);
527 u32 irqen = cx23888_ir_read4(dev, CX23888_IR_IRQEN_REG); 525 u32 irqen = cx23888_ir_read4(dev, CX23888_IR_IRQEN_REG);
@@ -594,8 +592,9 @@ static int cx23888_ir_irq_handler(struct v4l2_subdev *sd, u32 status,
594 if (i == 0) 592 if (i == 0)
595 break; 593 break;
596 j = i * sizeof(u32); 594 j = i * sizeof(u32);
597 k = kfifo_put(state->rx_kfifo, 595 k = kfifo_in_locked(&state->rx_kfifo,
598 (unsigned char *) rx_data, j); 596 (unsigned char *) rx_data, j,
597 &state->rx_kfifo_lock);
599 if (k != j) 598 if (k != j)
600 kror++; /* rx_kfifo over run */ 599 kror++; /* rx_kfifo over run */
601 } 600 }
@@ -631,8 +630,11 @@ static int cx23888_ir_irq_handler(struct v4l2_subdev *sd, u32 status,
631 cx23888_ir_write4(dev, CX23888_IR_CNTRL_REG, cntrl); 630 cx23888_ir_write4(dev, CX23888_IR_CNTRL_REG, cntrl);
632 *handled = true; 631 *handled = true;
633 } 632 }
634 if (kfifo_len(state->rx_kfifo) >= CX23888_IR_RX_KFIFO_SIZE / 2) 633
634 spin_lock_irqsave(&state->rx_kfifo_lock, flags);
635 if (kfifo_len(&state->rx_kfifo) >= CX23888_IR_RX_KFIFO_SIZE / 2)
635 events |= V4L2_SUBDEV_IR_RX_FIFO_SERVICE_REQ; 636 events |= V4L2_SUBDEV_IR_RX_FIFO_SERVICE_REQ;
637 spin_unlock_irqrestore(&state->rx_kfifo_lock, flags);
636 638
637 if (events) 639 if (events)
638 v4l2_subdev_notify(sd, V4L2_SUBDEV_IR_RX_NOTIFY, &events); 640 v4l2_subdev_notify(sd, V4L2_SUBDEV_IR_RX_NOTIFY, &events);
@@ -657,7 +659,7 @@ static int cx23888_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count,
657 return 0; 659 return 0;
658 } 660 }
659 661
660 n = kfifo_get(state->rx_kfifo, buf, n); 662 n = kfifo_out_locked(&state->rx_kfifo, buf, n, &state->rx_kfifo_lock);
661 663
662 n /= sizeof(u32); 664 n /= sizeof(u32);
663 *num = n * sizeof(u32); 665 *num = n * sizeof(u32);
@@ -785,7 +787,12 @@ static int cx23888_ir_rx_s_parameters(struct v4l2_subdev *sd,
785 o->interrupt_enable = p->interrupt_enable; 787 o->interrupt_enable = p->interrupt_enable;
786 o->enable = p->enable; 788 o->enable = p->enable;
787 if (p->enable) { 789 if (p->enable) {
788 kfifo_reset(state->rx_kfifo); 790 unsigned long flags;
791
792 spin_lock_irqsave(&state->rx_kfifo_lock, flags);
793 kfifo_reset(&state->rx_kfifo);
794 /* reset tx_fifo too if there is one... */
795 spin_unlock_irqrestore(&state->rx_kfifo_lock, flags);
789 if (p->interrupt_enable) 796 if (p->interrupt_enable)
790 irqenable_rx(dev, IRQEN_RSE | IRQEN_RTE | IRQEN_ROE); 797 irqenable_rx(dev, IRQEN_RSE | IRQEN_RTE | IRQEN_ROE);
791 control_rx_enable(dev, p->enable); 798 control_rx_enable(dev, p->enable);
@@ -892,7 +899,6 @@ static int cx23888_ir_tx_s_parameters(struct v4l2_subdev *sd,
892 o->interrupt_enable = p->interrupt_enable; 899 o->interrupt_enable = p->interrupt_enable;
893 o->enable = p->enable; 900 o->enable = p->enable;
894 if (p->enable) { 901 if (p->enable) {
895 kfifo_reset(state->tx_kfifo);
896 if (p->interrupt_enable) 902 if (p->interrupt_enable)
897 irqenable_tx(dev, IRQEN_TSE); 903 irqenable_tx(dev, IRQEN_TSE);
898 control_tx_enable(dev, p->enable); 904 control_tx_enable(dev, p->enable);
@@ -1168,18 +1174,8 @@ int cx23888_ir_probe(struct cx23885_dev *dev)
1168 return -ENOMEM; 1174 return -ENOMEM;
1169 1175
1170 spin_lock_init(&state->rx_kfifo_lock); 1176 spin_lock_init(&state->rx_kfifo_lock);
1171 state->rx_kfifo = kfifo_alloc(CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL, 1177 if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL))
1172 &state->rx_kfifo_lock);
1173 if (state->rx_kfifo == NULL)
1174 return -ENOMEM;
1175
1176 spin_lock_init(&state->tx_kfifo_lock);
1177 state->tx_kfifo = kfifo_alloc(CX23888_IR_TX_KFIFO_SIZE, GFP_KERNEL,
1178 &state->tx_kfifo_lock);
1179 if (state->tx_kfifo == NULL) {
1180 kfifo_free(state->rx_kfifo);
1181 return -ENOMEM; 1178 return -ENOMEM;
1182 }
1183 1179
1184 state->dev = dev; 1180 state->dev = dev;
1185 state->id = V4L2_IDENT_CX23888_IR; 1181 state->id = V4L2_IDENT_CX23888_IR;
@@ -1211,8 +1207,7 @@ int cx23888_ir_probe(struct cx23885_dev *dev)
1211 sizeof(struct v4l2_subdev_ir_parameters)); 1207 sizeof(struct v4l2_subdev_ir_parameters));
1212 v4l2_subdev_call(sd, ir, tx_s_parameters, &default_params); 1208 v4l2_subdev_call(sd, ir, tx_s_parameters, &default_params);
1213 } else { 1209 } else {
1214 kfifo_free(state->rx_kfifo); 1210 kfifo_free(&state->rx_kfifo);
1215 kfifo_free(state->tx_kfifo);
1216 } 1211 }
1217 return ret; 1212 return ret;
1218} 1213}
@@ -1231,8 +1226,7 @@ int cx23888_ir_remove(struct cx23885_dev *dev)
1231 1226
1232 state = to_state(sd); 1227 state = to_state(sd);
1233 v4l2_device_unregister_subdev(sd); 1228 v4l2_device_unregister_subdev(sd);
1234 kfifo_free(state->rx_kfifo); 1229 kfifo_free(&state->rx_kfifo);
1235 kfifo_free(state->tx_kfifo);
1236 kfree(state); 1230 kfree(state);
1237 /* Nothing more to free() as state held the actual v4l2_subdev object */ 1231 /* Nothing more to free() as state held the actual v4l2_subdev object */
1238 return 0; 1232 return 0;
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index 6ffa64cd1c6d..b421858ccf90 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -800,8 +800,8 @@ again:
800 return IRQ_HANDLED; 800 return IRQ_HANDLED;
801 801
802 if (meye.mchip_mode == MCHIP_HIC_MODE_CONT_OUT) { 802 if (meye.mchip_mode == MCHIP_HIC_MODE_CONT_OUT) {
803 if (kfifo_get(meye.grabq, (unsigned char *)&reqnr, 803 if (kfifo_out_locked(&meye.grabq, (unsigned char *)&reqnr,
804 sizeof(int)) != sizeof(int)) { 804 sizeof(int), &meye.grabq_lock) != sizeof(int)) {
805 mchip_free_frame(); 805 mchip_free_frame();
806 return IRQ_HANDLED; 806 return IRQ_HANDLED;
807 } 807 }
@@ -811,7 +811,8 @@ again:
811 meye.grab_buffer[reqnr].state = MEYE_BUF_DONE; 811 meye.grab_buffer[reqnr].state = MEYE_BUF_DONE;
812 do_gettimeofday(&meye.grab_buffer[reqnr].timestamp); 812 do_gettimeofday(&meye.grab_buffer[reqnr].timestamp);
813 meye.grab_buffer[reqnr].sequence = sequence++; 813 meye.grab_buffer[reqnr].sequence = sequence++;
814 kfifo_put(meye.doneq, (unsigned char *)&reqnr, sizeof(int)); 814 kfifo_in_locked(&meye.doneq, (unsigned char *)&reqnr,
815 sizeof(int), &meye.doneq_lock);
815 wake_up_interruptible(&meye.proc_list); 816 wake_up_interruptible(&meye.proc_list);
816 } else { 817 } else {
817 int size; 818 int size;
@@ -820,8 +821,8 @@ again:
820 mchip_free_frame(); 821 mchip_free_frame();
821 goto again; 822 goto again;
822 } 823 }
823 if (kfifo_get(meye.grabq, (unsigned char *)&reqnr, 824 if (kfifo_out_locked(&meye.grabq, (unsigned char *)&reqnr,
824 sizeof(int)) != sizeof(int)) { 825 sizeof(int), &meye.grabq_lock) != sizeof(int)) {
825 mchip_free_frame(); 826 mchip_free_frame();
826 goto again; 827 goto again;
827 } 828 }
@@ -831,7 +832,8 @@ again:
831 meye.grab_buffer[reqnr].state = MEYE_BUF_DONE; 832 meye.grab_buffer[reqnr].state = MEYE_BUF_DONE;
832 do_gettimeofday(&meye.grab_buffer[reqnr].timestamp); 833 do_gettimeofday(&meye.grab_buffer[reqnr].timestamp);
833 meye.grab_buffer[reqnr].sequence = sequence++; 834 meye.grab_buffer[reqnr].sequence = sequence++;
834 kfifo_put(meye.doneq, (unsigned char *)&reqnr, sizeof(int)); 835 kfifo_in_locked(&meye.doneq, (unsigned char *)&reqnr,
836 sizeof(int), &meye.doneq_lock);
835 wake_up_interruptible(&meye.proc_list); 837 wake_up_interruptible(&meye.proc_list);
836 } 838 }
837 mchip_free_frame(); 839 mchip_free_frame();
@@ -859,8 +861,8 @@ static int meye_open(struct file *file)
859 861
860 for (i = 0; i < MEYE_MAX_BUFNBRS; i++) 862 for (i = 0; i < MEYE_MAX_BUFNBRS; i++)
861 meye.grab_buffer[i].state = MEYE_BUF_UNUSED; 863 meye.grab_buffer[i].state = MEYE_BUF_UNUSED;
862 kfifo_reset(meye.grabq); 864 kfifo_reset(&meye.grabq);
863 kfifo_reset(meye.doneq); 865 kfifo_reset(&meye.doneq);
864 return 0; 866 return 0;
865} 867}
866 868
@@ -933,7 +935,8 @@ static int meyeioc_qbuf_capt(int *nb)
933 mchip_cont_compression_start(); 935 mchip_cont_compression_start();
934 936
935 meye.grab_buffer[*nb].state = MEYE_BUF_USING; 937 meye.grab_buffer[*nb].state = MEYE_BUF_USING;
936 kfifo_put(meye.grabq, (unsigned char *)nb, sizeof(int)); 938 kfifo_in_locked(&meye.grabq, (unsigned char *)nb, sizeof(int),
939 &meye.grabq_lock);
937 mutex_unlock(&meye.lock); 940 mutex_unlock(&meye.lock);
938 941
939 return 0; 942 return 0;
@@ -965,7 +968,9 @@ static int meyeioc_sync(struct file *file, void *fh, int *i)
965 /* fall through */ 968 /* fall through */
966 case MEYE_BUF_DONE: 969 case MEYE_BUF_DONE:
967 meye.grab_buffer[*i].state = MEYE_BUF_UNUSED; 970 meye.grab_buffer[*i].state = MEYE_BUF_UNUSED;
968 kfifo_get(meye.doneq, (unsigned char *)&unused, sizeof(int)); 971 if (kfifo_out_locked(&meye.doneq, (unsigned char *)&unused,
972 sizeof(int), &meye.doneq_lock) != sizeof(int))
973 break;
969 } 974 }
970 *i = meye.grab_buffer[*i].size; 975 *i = meye.grab_buffer[*i].size;
971 mutex_unlock(&meye.lock); 976 mutex_unlock(&meye.lock);
@@ -1452,7 +1457,8 @@ static int vidioc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
1452 buf->flags |= V4L2_BUF_FLAG_QUEUED; 1457 buf->flags |= V4L2_BUF_FLAG_QUEUED;
1453 buf->flags &= ~V4L2_BUF_FLAG_DONE; 1458 buf->flags &= ~V4L2_BUF_FLAG_DONE;
1454 meye.grab_buffer[buf->index].state = MEYE_BUF_USING; 1459 meye.grab_buffer[buf->index].state = MEYE_BUF_USING;
1455 kfifo_put(meye.grabq, (unsigned char *)&buf->index, sizeof(int)); 1460 kfifo_in_locked(&meye.grabq, (unsigned char *)&buf->index,
1461 sizeof(int), &meye.grabq_lock);
1456 mutex_unlock(&meye.lock); 1462 mutex_unlock(&meye.lock);
1457 1463
1458 return 0; 1464 return 0;
@@ -1467,19 +1473,19 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
1467 1473
1468 mutex_lock(&meye.lock); 1474 mutex_lock(&meye.lock);
1469 1475
1470 if (kfifo_len(meye.doneq) == 0 && file->f_flags & O_NONBLOCK) { 1476 if (kfifo_len(&meye.doneq) == 0 && file->f_flags & O_NONBLOCK) {
1471 mutex_unlock(&meye.lock); 1477 mutex_unlock(&meye.lock);
1472 return -EAGAIN; 1478 return -EAGAIN;
1473 } 1479 }
1474 1480
1475 if (wait_event_interruptible(meye.proc_list, 1481 if (wait_event_interruptible(meye.proc_list,
1476 kfifo_len(meye.doneq) != 0) < 0) { 1482 kfifo_len(&meye.doneq) != 0) < 0) {
1477 mutex_unlock(&meye.lock); 1483 mutex_unlock(&meye.lock);
1478 return -EINTR; 1484 return -EINTR;
1479 } 1485 }
1480 1486
1481 if (!kfifo_get(meye.doneq, (unsigned char *)&reqnr, 1487 if (!kfifo_out_locked(&meye.doneq, (unsigned char *)&reqnr,
1482 sizeof(int))) { 1488 sizeof(int), &meye.doneq_lock)) {
1483 mutex_unlock(&meye.lock); 1489 mutex_unlock(&meye.lock);
1484 return -EBUSY; 1490 return -EBUSY;
1485 } 1491 }
@@ -1529,8 +1535,8 @@ static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
1529{ 1535{
1530 mutex_lock(&meye.lock); 1536 mutex_lock(&meye.lock);
1531 mchip_hic_stop(); 1537 mchip_hic_stop();
1532 kfifo_reset(meye.grabq); 1538 kfifo_reset(&meye.grabq);
1533 kfifo_reset(meye.doneq); 1539 kfifo_reset(&meye.doneq);
1534 1540
1535 for (i = 0; i < MEYE_MAX_BUFNBRS; i++) 1541 for (i = 0; i < MEYE_MAX_BUFNBRS; i++)
1536 meye.grab_buffer[i].state = MEYE_BUF_UNUSED; 1542 meye.grab_buffer[i].state = MEYE_BUF_UNUSED;
@@ -1572,7 +1578,7 @@ static unsigned int meye_poll(struct file *file, poll_table *wait)
1572 1578
1573 mutex_lock(&meye.lock); 1579 mutex_lock(&meye.lock);
1574 poll_wait(file, &meye.proc_list, wait); 1580 poll_wait(file, &meye.proc_list, wait);
1575 if (kfifo_len(meye.doneq)) 1581 if (kfifo_len(&meye.doneq))
1576 res = POLLIN | POLLRDNORM; 1582 res = POLLIN | POLLRDNORM;
1577 mutex_unlock(&meye.lock); 1583 mutex_unlock(&meye.lock);
1578 return res; 1584 return res;
@@ -1745,16 +1751,14 @@ static int __devinit meye_probe(struct pci_dev *pcidev,
1745 } 1751 }
1746 1752
1747 spin_lock_init(&meye.grabq_lock); 1753 spin_lock_init(&meye.grabq_lock);
1748 meye.grabq = kfifo_alloc(sizeof(int) * MEYE_MAX_BUFNBRS, GFP_KERNEL, 1754 if (kfifo_alloc(&meye.grabq, sizeof(int) * MEYE_MAX_BUFNBRS,
1749 &meye.grabq_lock); 1755 GFP_KERNEL)) {
1750 if (IS_ERR(meye.grabq)) {
1751 printk(KERN_ERR "meye: fifo allocation failed\n"); 1756 printk(KERN_ERR "meye: fifo allocation failed\n");
1752 goto outkfifoalloc1; 1757 goto outkfifoalloc1;
1753 } 1758 }
1754 spin_lock_init(&meye.doneq_lock); 1759 spin_lock_init(&meye.doneq_lock);
1755 meye.doneq = kfifo_alloc(sizeof(int) * MEYE_MAX_BUFNBRS, GFP_KERNEL, 1760 if (kfifo_alloc(&meye.doneq, sizeof(int) * MEYE_MAX_BUFNBRS,
1756 &meye.doneq_lock); 1761 GFP_KERNEL)) {
1757 if (IS_ERR(meye.doneq)) {
1758 printk(KERN_ERR "meye: fifo allocation failed\n"); 1762 printk(KERN_ERR "meye: fifo allocation failed\n");
1759 goto outkfifoalloc2; 1763 goto outkfifoalloc2;
1760 } 1764 }
@@ -1868,9 +1872,9 @@ outregions:
1868outenabledev: 1872outenabledev:
1869 sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 0); 1873 sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 0);
1870outsonypienable: 1874outsonypienable:
1871 kfifo_free(meye.doneq); 1875 kfifo_free(&meye.doneq);
1872outkfifoalloc2: 1876outkfifoalloc2:
1873 kfifo_free(meye.grabq); 1877 kfifo_free(&meye.grabq);
1874outkfifoalloc1: 1878outkfifoalloc1:
1875 vfree(meye.grab_temp); 1879 vfree(meye.grab_temp);
1876outvmalloc: 1880outvmalloc:
@@ -1901,8 +1905,8 @@ static void __devexit meye_remove(struct pci_dev *pcidev)
1901 1905
1902 sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 0); 1906 sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 0);
1903 1907
1904 kfifo_free(meye.doneq); 1908 kfifo_free(&meye.doneq);
1905 kfifo_free(meye.grabq); 1909 kfifo_free(&meye.grabq);
1906 1910
1907 vfree(meye.grab_temp); 1911 vfree(meye.grab_temp);
1908 1912
diff --git a/drivers/media/video/meye.h b/drivers/media/video/meye.h
index 5f70a106ba2b..1321ad5d6597 100644
--- a/drivers/media/video/meye.h
+++ b/drivers/media/video/meye.h
@@ -303,9 +303,9 @@ struct meye {
303 struct meye_grab_buffer grab_buffer[MEYE_MAX_BUFNBRS]; 303 struct meye_grab_buffer grab_buffer[MEYE_MAX_BUFNBRS];
304 int vma_use_count[MEYE_MAX_BUFNBRS]; /* mmap count */ 304 int vma_use_count[MEYE_MAX_BUFNBRS]; /* mmap count */
305 struct mutex lock; /* mutex for open/mmap... */ 305 struct mutex lock; /* mutex for open/mmap... */
306 struct kfifo *grabq; /* queue for buffers to be grabbed */ 306 struct kfifo grabq; /* queue for buffers to be grabbed */
307 spinlock_t grabq_lock; /* lock protecting the queue */ 307 spinlock_t grabq_lock; /* lock protecting the queue */
308 struct kfifo *doneq; /* queue for grabbed buffers */ 308 struct kfifo doneq; /* queue for grabbed buffers */
309 spinlock_t doneq_lock; /* lock protecting the queue */ 309 spinlock_t doneq_lock; /* lock protecting the queue */
310 wait_queue_head_t proc_list; /* wait queue */ 310 wait_queue_head_t proc_list; /* wait queue */
311 struct video_device *video_dev; /* video device parameters */ 311 struct video_device *video_dev; /* video device parameters */
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 610e914abe6c..85bc6a685e36 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1587,7 +1587,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1587{ 1587{
1588 u8 __iomem *mem; 1588 u8 __iomem *mem;
1589 int ii; 1589 int ii;
1590 unsigned long mem_phys; 1590 resource_size_t mem_phys;
1591 unsigned long port; 1591 unsigned long port;
1592 u32 msize; 1592 u32 msize;
1593 u32 psize; 1593 u32 psize;
@@ -1677,8 +1677,8 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1677 return -EINVAL; 1677 return -EINVAL;
1678 } 1678 }
1679 ioc->memmap = mem; 1679 ioc->memmap = mem;
1680 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %lx\n", 1680 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %llx\n",
1681 ioc->name, mem, mem_phys)); 1681 ioc->name, mem, (unsigned long long)mem_phys));
1682 1682
1683 ioc->mem_phys = mem_phys; 1683 ioc->mem_phys = mem_phys;
1684 ioc->chip = (SYSIF_REGS __iomem *)mem; 1684 ioc->chip = (SYSIF_REGS __iomem *)mem;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 1a7a9fc50ea1..e3551d20464f 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -203,6 +203,7 @@ config CS5535_MFGPT
203 203
204config CS5535_MFGPT_DEFAULT_IRQ 204config CS5535_MFGPT_DEFAULT_IRQ
205 int 205 int
206 depends on CS5535_MFGPT
206 default 7 207 default 7
207 help 208 help
208 MFGPTs on the CS5535 require an interrupt. The selected IRQ 209 MFGPTs on the CS5535 require an interrupt. The selected IRQ
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index e9eae4a78402..1eac626e710a 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -391,6 +391,7 @@ static const char *const enclosure_status [] = {
391 [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed", 391 [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed",
392 [ENCLOSURE_STATUS_UNKNOWN] = "unknown", 392 [ENCLOSURE_STATUS_UNKNOWN] = "unknown",
393 [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable", 393 [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable",
394 [ENCLOSURE_STATUS_MAX] = NULL,
394}; 395};
395 396
396static const char *const enclosure_type [] = { 397static const char *const enclosure_type [] = {
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index cdb845b68ab5..06b64085a355 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -516,7 +516,8 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
516 * The number of functions on the card is encoded inside 516 * The number of functions on the card is encoded inside
517 * the ocr. 517 * the ocr.
518 */ 518 */
519 card->sdio_funcs = funcs = (ocr & 0x70000000) >> 28; 519 funcs = (ocr & 0x70000000) >> 28;
520 card->sdio_funcs = 0;
520 521
521 /* 522 /*
522 * If needed, disconnect card detection pull-up resistor. 523 * If needed, disconnect card detection pull-up resistor.
@@ -528,7 +529,7 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
528 /* 529 /*
529 * Initialize (but don't add) all present functions. 530 * Initialize (but don't add) all present functions.
530 */ 531 */
531 for (i = 0;i < funcs;i++) { 532 for (i = 0; i < funcs; i++, card->sdio_funcs++) {
532 err = sdio_init_func(host->card, i + 1); 533 err = sdio_init_func(host->card, i + 1);
533 if (err) 534 if (err)
534 goto remove; 535 goto remove;
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index d37464e296a5..9e060c87e64d 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -248,12 +248,15 @@ int sdio_add_func(struct sdio_func *func)
248/* 248/*
249 * Unregister a SDIO function with the driver model, and 249 * Unregister a SDIO function with the driver model, and
250 * (eventually) free it. 250 * (eventually) free it.
251 * This function can be called through error paths where sdio_add_func() was
252 * never executed (because a failure occurred at an earlier point).
251 */ 253 */
252void sdio_remove_func(struct sdio_func *func) 254void sdio_remove_func(struct sdio_func *func)
253{ 255{
254 if (sdio_func_present(func)) 256 if (!sdio_func_present(func))
255 device_del(&func->dev); 257 return;
256 258
259 device_del(&func->dev);
257 put_device(&func->dev); 260 put_device(&func->dev);
258} 261}
259 262
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 9d405b181781..ce1d28884e29 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -44,6 +44,19 @@ config MMC_SDHCI_IO_ACCESSORS
44 This is silent Kconfig symbol that is selected by the drivers that 44 This is silent Kconfig symbol that is selected by the drivers that
45 need to overwrite SDHCI IO memory accessors. 45 need to overwrite SDHCI IO memory accessors.
46 46
47config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
48 bool
49 select MMC_SDHCI_IO_ACCESSORS
50 help
51 This option is selected by drivers running on big endian hosts
52 and performing I/O to a SDHCI controller through a bus that
53 implements a hardware byte swapper using a 32-bit datum.
54 This endian mapping mode is called "data invariance" and
55 has the effect of scrambling the addresses and formats of data
56 accessed in sizes other than the datum size.
57
58 This is the case for the Freescale eSDHC and Nintendo Wii SDHCI.
59
47config MMC_SDHCI_PCI 60config MMC_SDHCI_PCI
48 tristate "SDHCI support on PCI bus" 61 tristate "SDHCI support on PCI bus"
49 depends on MMC_SDHCI && PCI 62 depends on MMC_SDHCI && PCI
@@ -75,11 +88,29 @@ config MMC_RICOH_MMC
75config MMC_SDHCI_OF 88config MMC_SDHCI_OF
76 tristate "SDHCI support on OpenFirmware platforms" 89 tristate "SDHCI support on OpenFirmware platforms"
77 depends on MMC_SDHCI && PPC_OF 90 depends on MMC_SDHCI && PPC_OF
78 select MMC_SDHCI_IO_ACCESSORS
79 help 91 help
80 This selects the OF support for Secure Digital Host Controller 92 This selects the OF support for Secure Digital Host Controller
81 Interfaces. So far, only the Freescale eSDHC controller is known 93 Interfaces.
82 to exist on OF platforms. 94
95 If unsure, say N.
96
97config MMC_SDHCI_OF_ESDHC
98 bool "SDHCI OF support for the Freescale eSDHC controller"
99 depends on MMC_SDHCI_OF
100 select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
101 help
102 This selects the Freescale eSDHC controller support.
103
104 If unsure, say N.
105
106config MMC_SDHCI_OF_HLWD
107 bool "SDHCI OF support for the Nintendo Wii SDHCI controllers"
108 depends on MMC_SDHCI_OF
109 select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
110 help
111 This selects the Secure Digital Host Controller Interface (SDHCI)
112 found in the "Hollywood" chipset of the Nintendo Wii video game
113 console.
83 114
84 If unsure, say N. 115 If unsure, say N.
85 116
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index ded4d8cdd9d7..3d253dd4240f 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o
13obj-$(CONFIG_MMC_SDHCI) += sdhci.o 13obj-$(CONFIG_MMC_SDHCI) += sdhci.o
14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o 14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
15obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o 15obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o
16obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
17obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o 16obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
18obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o 17obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
19obj-$(CONFIG_MMC_WBSD) += wbsd.o 18obj-$(CONFIG_MMC_WBSD) += wbsd.o
@@ -37,6 +36,11 @@ obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
37obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 36obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
38obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o 37obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
39 38
39obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
40sdhci-of-y := sdhci-of-core.o
41sdhci-of-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
42sdhci-of-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
43
40ifeq ($(CONFIG_CB710_DEBUG),y) 44ifeq ($(CONFIG_CB710_DEBUG),y)
41 CFLAGS-cb710-mmc += -DDEBUG 45 CFLAGS-cb710-mmc += -DDEBUG
42endif 46endif
diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of-core.c
index 01ab916c2802..55e33135edb4 100644
--- a/drivers/mmc/host/sdhci-of.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -22,62 +22,37 @@
22#include <linux/of_platform.h> 22#include <linux/of_platform.h>
23#include <linux/mmc/host.h> 23#include <linux/mmc/host.h>
24#include <asm/machdep.h> 24#include <asm/machdep.h>
25#include "sdhci-of.h"
25#include "sdhci.h" 26#include "sdhci.h"
26 27
27struct sdhci_of_data { 28#ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
28 unsigned int quirks;
29 struct sdhci_ops ops;
30};
31
32struct sdhci_of_host {
33 unsigned int clock;
34 u16 xfer_mode_shadow;
35};
36 29
37/* 30/*
38 * Ops and quirks for the Freescale eSDHC controller. 31 * These accessors are designed for big endian hosts doing I/O to
32 * little endian controllers incorporating a 32-bit hardware byte swapper.
39 */ 33 */
40 34
41#define ESDHC_DMA_SYSCTL 0x40c 35u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg)
42#define ESDHC_DMA_SNOOP 0x00000040
43
44#define ESDHC_SYSTEM_CONTROL 0x2c
45#define ESDHC_CLOCK_MASK 0x0000fff0
46#define ESDHC_PREDIV_SHIFT 8
47#define ESDHC_DIVIDER_SHIFT 4
48#define ESDHC_CLOCK_PEREN 0x00000004
49#define ESDHC_CLOCK_HCKEN 0x00000002
50#define ESDHC_CLOCK_IPGEN 0x00000001
51
52#define ESDHC_HOST_CONTROL_RES 0x05
53
54static u32 esdhc_readl(struct sdhci_host *host, int reg)
55{ 36{
56 return in_be32(host->ioaddr + reg); 37 return in_be32(host->ioaddr + reg);
57} 38}
58 39
59static u16 esdhc_readw(struct sdhci_host *host, int reg) 40u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg)
60{ 41{
61 u16 ret; 42 return in_be16(host->ioaddr + (reg ^ 0x2));
62
63 if (unlikely(reg == SDHCI_HOST_VERSION))
64 ret = in_be16(host->ioaddr + reg);
65 else
66 ret = in_be16(host->ioaddr + (reg ^ 0x2));
67 return ret;
68} 43}
69 44
70static u8 esdhc_readb(struct sdhci_host *host, int reg) 45u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg)
71{ 46{
72 return in_8(host->ioaddr + (reg ^ 0x3)); 47 return in_8(host->ioaddr + (reg ^ 0x3));
73} 48}
74 49
75static void esdhc_writel(struct sdhci_host *host, u32 val, int reg) 50void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg)
76{ 51{
77 out_be32(host->ioaddr + reg, val); 52 out_be32(host->ioaddr + reg, val);
78} 53}
79 54
80static void esdhc_writew(struct sdhci_host *host, u16 val, int reg) 55void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg)
81{ 56{
82 struct sdhci_of_host *of_host = sdhci_priv(host); 57 struct sdhci_of_host *of_host = sdhci_priv(host);
83 int base = reg & ~0x3; 58 int base = reg & ~0x3;
@@ -92,106 +67,21 @@ static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)
92 of_host->xfer_mode_shadow = val; 67 of_host->xfer_mode_shadow = val;
93 return; 68 return;
94 case SDHCI_COMMAND: 69 case SDHCI_COMMAND:
95 esdhc_writel(host, val << 16 | of_host->xfer_mode_shadow, 70 sdhci_be32bs_writel(host, val << 16 | of_host->xfer_mode_shadow,
96 SDHCI_TRANSFER_MODE); 71 SDHCI_TRANSFER_MODE);
97 return; 72 return;
98 case SDHCI_BLOCK_SIZE:
99 /*
100 * Two last DMA bits are reserved, and first one is used for
101 * non-standard blksz of 4096 bytes that we don't support
102 * yet. So clear the DMA boundary bits.
103 */
104 val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
105 /* fall through */
106 } 73 }
107 clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift); 74 clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift);
108} 75}
109 76
110static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) 77void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg)
111{ 78{
112 int base = reg & ~0x3; 79 int base = reg & ~0x3;
113 int shift = (reg & 0x3) * 8; 80 int shift = (reg & 0x3) * 8;
114 81
115 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */
116 if (reg == SDHCI_HOST_CONTROL)
117 val &= ~ESDHC_HOST_CONTROL_RES;
118
119 clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift); 82 clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift);
120} 83}
121 84#endif /* CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER */
122static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
123{
124 int pre_div = 2;
125 int div = 1;
126
127 clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
128 ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
129
130 if (clock == 0)
131 goto out;
132
133 while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
134 pre_div *= 2;
135
136 while (host->max_clk / pre_div / div > clock && div < 16)
137 div++;
138
139 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
140 clock, host->max_clk / pre_div / div);
141
142 pre_div >>= 1;
143 div--;
144
145 setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
146 ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN |
147 div << ESDHC_DIVIDER_SHIFT | pre_div << ESDHC_PREDIV_SHIFT);
148 mdelay(100);
149out:
150 host->clock = clock;
151}
152
153static int esdhc_enable_dma(struct sdhci_host *host)
154{
155 setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP);
156 return 0;
157}
158
159static unsigned int esdhc_get_max_clock(struct sdhci_host *host)
160{
161 struct sdhci_of_host *of_host = sdhci_priv(host);
162
163 return of_host->clock;
164}
165
166static unsigned int esdhc_get_min_clock(struct sdhci_host *host)
167{
168 struct sdhci_of_host *of_host = sdhci_priv(host);
169
170 return of_host->clock / 256 / 16;
171}
172
173static struct sdhci_of_data sdhci_esdhc = {
174 .quirks = SDHCI_QUIRK_FORCE_BLK_SZ_2048 |
175 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
176 SDHCI_QUIRK_NO_BUSY_IRQ |
177 SDHCI_QUIRK_NONSTANDARD_CLOCK |
178 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
179 SDHCI_QUIRK_PIO_NEEDS_DELAY |
180 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
181 SDHCI_QUIRK_NO_CARD_NO_RESET,
182 .ops = {
183 .readl = esdhc_readl,
184 .readw = esdhc_readw,
185 .readb = esdhc_readb,
186 .writel = esdhc_writel,
187 .writew = esdhc_writew,
188 .writeb = esdhc_writeb,
189 .set_clock = esdhc_set_clock,
190 .enable_dma = esdhc_enable_dma,
191 .get_max_clock = esdhc_get_max_clock,
192 .get_min_clock = esdhc_get_min_clock,
193 },
194};
195 85
196#ifdef CONFIG_PM 86#ifdef CONFIG_PM
197 87
@@ -301,9 +191,14 @@ static int __devexit sdhci_of_remove(struct of_device *ofdev)
301} 191}
302 192
303static const struct of_device_id sdhci_of_match[] = { 193static const struct of_device_id sdhci_of_match[] = {
194#ifdef CONFIG_MMC_SDHCI_OF_ESDHC
304 { .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, }, 195 { .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, },
305 { .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, }, 196 { .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, },
306 { .compatible = "fsl,esdhc", .data = &sdhci_esdhc, }, 197 { .compatible = "fsl,esdhc", .data = &sdhci_esdhc, },
198#endif
199#ifdef CONFIG_MMC_SDHCI_OF_HLWD
200 { .compatible = "nintendo,hollywood-sdhci", .data = &sdhci_hlwd, },
201#endif
307 { .compatible = "generic-sdhci", }, 202 { .compatible = "generic-sdhci", },
308 {}, 203 {},
309}; 204};
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
new file mode 100644
index 000000000000..d5b11a17e648
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -0,0 +1,143 @@
1/*
2 * Freescale eSDHC controller driver.
3 *
4 * Copyright (c) 2007 Freescale Semiconductor, Inc.
5 * Copyright (c) 2009 MontaVista Software, Inc.
6 *
7 * Authors: Xiaobo Xie <X.Xie@freescale.com>
8 * Anton Vorontsov <avorontsov@ru.mvista.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 */
15
16#include <linux/io.h>
17#include <linux/delay.h>
18#include <linux/mmc/host.h>
19#include "sdhci-of.h"
20#include "sdhci.h"
21
22/*
23 * Ops and quirks for the Freescale eSDHC controller.
24 */
25
26#define ESDHC_DMA_SYSCTL 0x40c
27#define ESDHC_DMA_SNOOP 0x00000040
28
29#define ESDHC_SYSTEM_CONTROL 0x2c
30#define ESDHC_CLOCK_MASK 0x0000fff0
31#define ESDHC_PREDIV_SHIFT 8
32#define ESDHC_DIVIDER_SHIFT 4
33#define ESDHC_CLOCK_PEREN 0x00000004
34#define ESDHC_CLOCK_HCKEN 0x00000002
35#define ESDHC_CLOCK_IPGEN 0x00000001
36
37#define ESDHC_HOST_CONTROL_RES 0x05
38
39static u16 esdhc_readw(struct sdhci_host *host, int reg)
40{
41 u16 ret;
42
43 if (unlikely(reg == SDHCI_HOST_VERSION))
44 ret = in_be16(host->ioaddr + reg);
45 else
46 ret = sdhci_be32bs_readw(host, reg);
47 return ret;
48}
49
50static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)
51{
52 if (reg == SDHCI_BLOCK_SIZE) {
53 /*
54 * Two last DMA bits are reserved, and first one is used for
55 * non-standard blksz of 4096 bytes that we don't support
56 * yet. So clear the DMA boundary bits.
57 */
58 val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
59 }
60 sdhci_be32bs_writew(host, val, reg);
61}
62
63static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
64{
65 /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */
66 if (reg == SDHCI_HOST_CONTROL)
67 val &= ~ESDHC_HOST_CONTROL_RES;
68 sdhci_be32bs_writeb(host, val, reg);
69}
70
71static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
72{
73 int pre_div = 2;
74 int div = 1;
75
76 clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
77 ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
78
79 if (clock == 0)
80 goto out;
81
82 while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
83 pre_div *= 2;
84
85 while (host->max_clk / pre_div / div > clock && div < 16)
86 div++;
87
88 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
89 clock, host->max_clk / pre_div / div);
90
91 pre_div >>= 1;
92 div--;
93
94 setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
95 ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN |
96 div << ESDHC_DIVIDER_SHIFT | pre_div << ESDHC_PREDIV_SHIFT);
97 mdelay(100);
98out:
99 host->clock = clock;
100}
101
102static int esdhc_enable_dma(struct sdhci_host *host)
103{
104 setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP);
105 return 0;
106}
107
108static unsigned int esdhc_get_max_clock(struct sdhci_host *host)
109{
110 struct sdhci_of_host *of_host = sdhci_priv(host);
111
112 return of_host->clock;
113}
114
115static unsigned int esdhc_get_min_clock(struct sdhci_host *host)
116{
117 struct sdhci_of_host *of_host = sdhci_priv(host);
118
119 return of_host->clock / 256 / 16;
120}
121
122struct sdhci_of_data sdhci_esdhc = {
123 .quirks = SDHCI_QUIRK_FORCE_BLK_SZ_2048 |
124 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
125 SDHCI_QUIRK_NO_BUSY_IRQ |
126 SDHCI_QUIRK_NONSTANDARD_CLOCK |
127 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
128 SDHCI_QUIRK_PIO_NEEDS_DELAY |
129 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
130 SDHCI_QUIRK_NO_CARD_NO_RESET,
131 .ops = {
132 .readl = sdhci_be32bs_readl,
133 .readw = esdhc_readw,
134 .readb = sdhci_be32bs_readb,
135 .writel = sdhci_be32bs_writel,
136 .writew = esdhc_writew,
137 .writeb = esdhc_writeb,
138 .set_clock = esdhc_set_clock,
139 .enable_dma = esdhc_enable_dma,
140 .get_max_clock = esdhc_get_max_clock,
141 .get_min_clock = esdhc_get_min_clock,
142 },
143};
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
new file mode 100644
index 000000000000..35117f3ed757
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -0,0 +1,65 @@
1/*
2 * drivers/mmc/host/sdhci-of-hlwd.c
3 *
4 * Nintendo Wii Secure Digital Host Controller Interface.
5 * Copyright (C) 2009 The GameCube Linux Team
6 * Copyright (C) 2009 Albert Herranz
7 *
8 * Based on sdhci-of-esdhc.c
9 *
10 * Copyright (c) 2007 Freescale Semiconductor, Inc.
11 * Copyright (c) 2009 MontaVista Software, Inc.
12 *
13 * Authors: Xiaobo Xie <X.Xie@freescale.com>
14 * Anton Vorontsov <avorontsov@ru.mvista.com>
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or (at
19 * your option) any later version.
20 */
21
22#include <linux/delay.h>
23#include <linux/mmc/host.h>
24#include "sdhci-of.h"
25#include "sdhci.h"
26
27/*
28 * Ops and quirks for the Nintendo Wii SDHCI controllers.
29 */
30
31/*
32 * We need a small delay after each write, or things go horribly wrong.
33 */
34#define SDHCI_HLWD_WRITE_DELAY 5 /* usecs */
35
36static void sdhci_hlwd_writel(struct sdhci_host *host, u32 val, int reg)
37{
38 sdhci_be32bs_writel(host, val, reg);
39 udelay(SDHCI_HLWD_WRITE_DELAY);
40}
41
42static void sdhci_hlwd_writew(struct sdhci_host *host, u16 val, int reg)
43{
44 sdhci_be32bs_writew(host, val, reg);
45 udelay(SDHCI_HLWD_WRITE_DELAY);
46}
47
48static void sdhci_hlwd_writeb(struct sdhci_host *host, u8 val, int reg)
49{
50 sdhci_be32bs_writeb(host, val, reg);
51 udelay(SDHCI_HLWD_WRITE_DELAY);
52}
53
54struct sdhci_of_data sdhci_hlwd = {
55 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
56 SDHCI_QUIRK_32BIT_DMA_SIZE,
57 .ops = {
58 .readl = sdhci_be32bs_readl,
59 .readw = sdhci_be32bs_readw,
60 .readb = sdhci_be32bs_readb,
61 .writel = sdhci_hlwd_writel,
62 .writew = sdhci_hlwd_writew,
63 .writeb = sdhci_hlwd_writeb,
64 },
65};
diff --git a/drivers/mmc/host/sdhci-of.h b/drivers/mmc/host/sdhci-of.h
new file mode 100644
index 000000000000..ad09ad9915d8
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of.h
@@ -0,0 +1,42 @@
1/*
2 * OpenFirmware bindings for Secure Digital Host Controller Interface.
3 *
4 * Copyright (c) 2007 Freescale Semiconductor, Inc.
5 * Copyright (c) 2009 MontaVista Software, Inc.
6 *
7 * Authors: Xiaobo Xie <X.Xie@freescale.com>
8 * Anton Vorontsov <avorontsov@ru.mvista.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 */
15
16#ifndef __SDHCI_OF_H
17#define __SDHCI_OF_H
18
19#include <linux/types.h>
20#include "sdhci.h"
21
22struct sdhci_of_data {
23 unsigned int quirks;
24 struct sdhci_ops ops;
25};
26
27struct sdhci_of_host {
28 unsigned int clock;
29 u16 xfer_mode_shadow;
30};
31
32extern u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg);
33extern u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg);
34extern u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg);
35extern void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg);
36extern void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg);
37extern void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg);
38
39extern struct sdhci_of_data sdhci_esdhc;
40extern struct sdhci_of_data sdhci_hlwd;
41
42#endif /* __SDHCI_OF_H */
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index ce5f1d73dc04..842f46f94284 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -8,6 +8,8 @@
8 * the Free Software Foundation; either version 2 of the License, or (at 8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version. 9 * your option) any later version.
10 */ 10 */
11#ifndef __SDHCI_H
12#define __SDHCI_H
11 13
12#include <linux/scatterlist.h> 14#include <linux/scatterlist.h>
13#include <linux/compiler.h> 15#include <linux/compiler.h>
@@ -408,3 +410,5 @@ extern void sdhci_remove_host(struct sdhci_host *host, int dead);
408extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state); 410extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
409extern int sdhci_resume_host(struct sdhci_host *host); 411extern int sdhci_resume_host(struct sdhci_host *host);
410#endif 412#endif
413
414#endif /* __SDHCI_H */
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 74fa075c838a..b13f6417b5b2 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -20,14 +20,23 @@
20 20
21#include <asm/io.h> 21#include <asm/io.h>
22#include <mach/hardware.h> 22#include <mach/hardware.h>
23#include <asm/cacheflush.h>
24 23
25#include <asm/mach/flash.h> 24#include <asm/mach/flash.h>
26 25
26#define CACHELINESIZE 32
27
27static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from, 28static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from,
28 ssize_t len) 29 ssize_t len)
29{ 30{
30 flush_ioremap_region(map->phys, map->cached, from, len); 31 unsigned long start = (unsigned long)map->cached + from;
32 unsigned long end = start + len;
33
34 start &= ~(CACHELINESIZE - 1);
35 while (start < end) {
36 /* invalidate D cache line */
37 asm volatile ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start));
38 start += CACHELINESIZE;
39 }
31} 40}
32 41
33struct pxa2xx_flash_info { 42struct pxa2xx_flash_info {
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 7678538344f4..677cd53f18c3 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -291,14 +291,6 @@ config MTD_NAND_SHARPSL
291 tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)" 291 tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
292 depends on ARCH_PXA 292 depends on ARCH_PXA
293 293
294config MTD_NAND_BASLER_EXCITE
295 tristate "Support for NAND Flash on Basler eXcite"
296 depends on BASLER_EXCITE
297 help
298 This enables the driver for the NAND flash device found on the
299 Basler eXcite Smart Camera. If built as a module, the driver
300 will be named excite_nandflash.
301
302config MTD_NAND_CAFE 294config MTD_NAND_CAFE
303 tristate "NAND support for OLPC CAFÉ chip" 295 tristate "NAND support for OLPC CAFÉ chip"
304 depends on PCI 296 depends on PCI
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 460a1f39a8d1..1407bd144015 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -27,7 +27,6 @@ obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o
27obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o 27obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o
28obj-$(CONFIG_MTD_NAND_OMAP2) += omap2.o 28obj-$(CONFIG_MTD_NAND_OMAP2) += omap2.o
29obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o 29obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
30obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
31obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o 30obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
32obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o 31obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
33obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o 32obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
diff --git a/drivers/mtd/nand/excite_nandflash.c b/drivers/mtd/nand/excite_nandflash.c
deleted file mode 100644
index af6a6a5399e1..000000000000
--- a/drivers/mtd/nand/excite_nandflash.c
+++ /dev/null
@@ -1,248 +0,0 @@
1/*
2* Copyright (C) 2005 - 2007 by Basler Vision Technologies AG
3* Author: Thomas Koeller <thomas.koeller.qbaslerweb.com>
4* Original code by Thies Moeller <thies.moeller@baslerweb.com>
5*
6* This program is free software; you can redistribute it and/or modify
7* it under the terms of the GNU General Public License as published by
8* the Free Software Foundation; either version 2 of the License, or
9* (at your option) any later version.
10*
11* This program is distributed in the hope that it will be useful,
12* but WITHOUT ANY WARRANTY; without even the implied warranty of
13* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14* GNU General Public License for more details.
15*
16* You should have received a copy of the GNU General Public License
17* along with this program; if not, write to the Free Software
18* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19*/
20
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/string.h>
26#include <linux/ioport.h>
27#include <linux/platform_device.h>
28#include <linux/delay.h>
29#include <linux/err.h>
30
31#include <linux/mtd/mtd.h>
32#include <linux/mtd/nand.h>
33#include <linux/mtd/nand_ecc.h>
34#include <linux/mtd/partitions.h>
35
36#include <asm/io.h>
37#include <asm/rm9k-ocd.h>
38
39#include <excite_nandflash.h>
40
41#define EXCITE_NANDFLASH_VERSION "0.1"
42
43/* I/O register offsets */
44#define EXCITE_NANDFLASH_DATA_BYTE 0x00
45#define EXCITE_NANDFLASH_STATUS_BYTE 0x0c
46#define EXCITE_NANDFLASH_ADDR_BYTE 0x10
47#define EXCITE_NANDFLASH_CMD_BYTE 0x14
48
49/* prefix for debug output */
50static const char module_id[] = "excite_nandflash";
51
52/*
53 * partition definition
54 */
55static const struct mtd_partition partition_info[] = {
56 {
57 .name = "eXcite RootFS",
58 .offset = 0,
59 .size = MTDPART_SIZ_FULL
60 }
61};
62
63static inline const struct resource *
64excite_nand_get_resource(struct platform_device *d, unsigned long flags,
65 const char *basename)
66{
67 char buf[80];
68
69 if (snprintf(buf, sizeof buf, "%s_%u", basename, d->id) >= sizeof buf)
70 return NULL;
71 return platform_get_resource_byname(d, flags, buf);
72}
73
74static inline void __iomem *
75excite_nand_map_regs(struct platform_device *d, const char *basename)
76{
77 void *result = NULL;
78 const struct resource *const r =
79 excite_nand_get_resource(d, IORESOURCE_MEM, basename);
80
81 if (r)
82 result = ioremap_nocache(r->start, r->end + 1 - r->start);
83 return result;
84}
85
86/* controller and mtd information */
87struct excite_nand_drvdata {
88 struct mtd_info board_mtd;
89 struct nand_chip board_chip;
90 void __iomem *regs;
91 void __iomem *tgt;
92};
93
94/* Control function */
95static void excite_nand_control(struct mtd_info *mtd, int cmd,
96 unsigned int ctrl)
97{
98 struct excite_nand_drvdata * const d =
99 container_of(mtd, struct excite_nand_drvdata, board_mtd);
100
101 switch (ctrl) {
102 case NAND_CTRL_CHANGE | NAND_CTRL_CLE:
103 d->tgt = d->regs + EXCITE_NANDFLASH_CMD_BYTE;
104 break;
105 case NAND_CTRL_CHANGE | NAND_CTRL_ALE:
106 d->tgt = d->regs + EXCITE_NANDFLASH_ADDR_BYTE;
107 break;
108 case NAND_CTRL_CHANGE | NAND_NCE:
109 d->tgt = d->regs + EXCITE_NANDFLASH_DATA_BYTE;
110 break;
111 }
112
113 if (cmd != NAND_CMD_NONE)
114 __raw_writeb(cmd, d->tgt);
115}
116
117/* Return 0 if flash is busy, 1 if ready */
118static int excite_nand_devready(struct mtd_info *mtd)
119{
120 struct excite_nand_drvdata * const drvdata =
121 container_of(mtd, struct excite_nand_drvdata, board_mtd);
122
123 return __raw_readb(drvdata->regs + EXCITE_NANDFLASH_STATUS_BYTE);
124}
125
126/*
127 * Called by device layer to remove the driver.
128 * The binding to the mtd and all allocated
129 * resources are released.
130 */
131static int __devexit excite_nand_remove(struct platform_device *dev)
132{
133 struct excite_nand_drvdata * const this = platform_get_drvdata(dev);
134
135 platform_set_drvdata(dev, NULL);
136
137 if (unlikely(!this)) {
138 printk(KERN_ERR "%s: called %s without private data!!",
139 module_id, __func__);
140 return -EINVAL;
141 }
142
143 /* first thing we need to do is release our mtd
144 * then go through freeing the resource used
145 */
146 nand_release(&this->board_mtd);
147
148 /* free the common resources */
149 iounmap(this->regs);
150 kfree(this);
151
152 DEBUG(MTD_DEBUG_LEVEL1, "%s: removed\n", module_id);
153 return 0;
154}
155
156/*
157 * Called by device layer when it finds a device matching
158 * one our driver can handle. This code checks to see if
159 * it can allocate all necessary resources then calls the
160 * nand layer to look for devices.
161*/
162static int __init excite_nand_probe(struct platform_device *pdev)
163{
164 struct excite_nand_drvdata *drvdata; /* private driver data */
165 struct nand_chip *board_chip; /* private flash chip data */
166 struct mtd_info *board_mtd; /* mtd info for this board */
167 int scan_res;
168
169 drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
170 if (unlikely(!drvdata)) {
171 printk(KERN_ERR "%s: no memory for drvdata\n",
172 module_id);
173 return -ENOMEM;
174 }
175
176 /* bind private data into driver */
177 platform_set_drvdata(pdev, drvdata);
178
179 /* allocate and map the resource */
180 drvdata->regs =
181 excite_nand_map_regs(pdev, EXCITE_NANDFLASH_RESOURCE_REGS);
182
183 if (unlikely(!drvdata->regs)) {
184 printk(KERN_ERR "%s: cannot reserve register region\n",
185 module_id);
186 kfree(drvdata);
187 return -ENXIO;
188 }
189
190 drvdata->tgt = drvdata->regs + EXCITE_NANDFLASH_DATA_BYTE;
191
192 /* initialise our chip */
193 board_chip = &drvdata->board_chip;
194 board_chip->IO_ADDR_R = board_chip->IO_ADDR_W =
195 drvdata->regs + EXCITE_NANDFLASH_DATA_BYTE;
196 board_chip->cmd_ctrl = excite_nand_control;
197 board_chip->dev_ready = excite_nand_devready;
198 board_chip->chip_delay = 25;
199 board_chip->ecc.mode = NAND_ECC_SOFT;
200
201 /* link chip to mtd */
202 board_mtd = &drvdata->board_mtd;
203 board_mtd->priv = board_chip;
204
205 DEBUG(MTD_DEBUG_LEVEL2, "%s: device scan\n", module_id);
206 scan_res = nand_scan(&drvdata->board_mtd, 1);
207
208 if (likely(!scan_res)) {
209 DEBUG(MTD_DEBUG_LEVEL2, "%s: register partitions\n", module_id);
210 add_mtd_partitions(&drvdata->board_mtd, partition_info,
211 ARRAY_SIZE(partition_info));
212 } else {
213 iounmap(drvdata->regs);
214 kfree(drvdata);
215 printk(KERN_ERR "%s: device scan failed\n", module_id);
216 return -EIO;
217 }
218 return 0;
219}
220
221static struct platform_driver excite_nand_driver = {
222 .driver = {
223 .name = "excite_nand",
224 .owner = THIS_MODULE,
225 },
226 .probe = excite_nand_probe,
227 .remove = __devexit_p(excite_nand_remove)
228};
229
230static int __init excite_nand_init(void)
231{
232 pr_info("Basler eXcite nand flash driver Version "
233 EXCITE_NANDFLASH_VERSION "\n");
234 return platform_driver_register(&excite_nand_driver);
235}
236
237static void __exit excite_nand_exit(void)
238{
239 platform_driver_unregister(&excite_nand_driver);
240}
241
242module_init(excite_nand_init);
243module_exit(excite_nand_exit);
244
245MODULE_AUTHOR("Thomas Koeller <thomas.koeller@baslerweb.com>");
246MODULE_DESCRIPTION("Basler eXcite NAND-Flash driver");
247MODULE_LICENSE("GPL");
248MODULE_VERSION(EXCITE_NANDFLASH_VERSION)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index a5be9ac6405c..e58a65391ad2 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1953,6 +1953,8 @@ config BCM63XX_ENET
1953 1953
1954source "drivers/net/fs_enet/Kconfig" 1954source "drivers/net/fs_enet/Kconfig"
1955 1955
1956source "drivers/net/octeon/Kconfig"
1957
1956endif # NET_ETHERNET 1958endif # NET_ETHERNET
1957 1959
1958# 1960#
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 246323d7f161..ad1346dd9da9 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -285,3 +285,5 @@ obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
285obj-$(CONFIG_SFC) += sfc/ 285obj-$(CONFIG_SFC) += sfc/
286 286
287obj-$(CONFIG_WIMAX) += wimax/ 287obj-$(CONFIG_WIMAX) += wimax/
288
289obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 4bfc80812926..65df1de447e4 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -653,12 +653,20 @@ static void
653bnx2_netif_stop(struct bnx2 *bp) 653bnx2_netif_stop(struct bnx2 *bp)
654{ 654{
655 bnx2_cnic_stop(bp); 655 bnx2_cnic_stop(bp);
656 bnx2_disable_int_sync(bp);
657 if (netif_running(bp->dev)) { 656 if (netif_running(bp->dev)) {
657 int i;
658
658 bnx2_napi_disable(bp); 659 bnx2_napi_disable(bp);
659 netif_tx_disable(bp->dev); 660 netif_tx_disable(bp->dev);
660 bp->dev->trans_start = jiffies; /* prevent tx timeout */ 661 /* prevent tx timeout */
662 for (i = 0; i < bp->dev->num_tx_queues; i++) {
663 struct netdev_queue *txq;
664
665 txq = netdev_get_tx_queue(bp->dev, i);
666 txq->trans_start = jiffies;
667 }
661 } 668 }
669 bnx2_disable_int_sync(bp);
662} 670}
663 671
664static void 672static void
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index d0ec17878ffc..166cc7e579c0 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1037,7 +1037,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
1037 1037
1038 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1038 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1039 irq = platform_get_irq(pdev, 0); 1039 irq = platform_get_irq(pdev, 0);
1040 if (!res || !irq) { 1040 if (!res || irq <= 0) {
1041 err = -ENODEV; 1041 err = -ENODEV;
1042 goto exit_put; 1042 goto exit_put;
1043 } 1043 }
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 8edac8915ea8..34e03104c3c1 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -2272,7 +2272,7 @@ static int emac_mii_reset(struct mii_bus *bus)
2272 unsigned int clk_div; 2272 unsigned int clk_div;
2273 int mdio_bus_freq = emac_bus_frequency; 2273 int mdio_bus_freq = emac_bus_frequency;
2274 2274
2275 if (mdio_max_freq & mdio_bus_freq) 2275 if (mdio_max_freq && mdio_bus_freq)
2276 clk_div = ((mdio_bus_freq / mdio_max_freq) - 1); 2276 clk_div = ((mdio_bus_freq / mdio_max_freq) - 1);
2277 else 2277 else
2278 clk_div = 0xFF; 2278 clk_div = 0xFF;
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 929701ca07d3..839fb2b136d3 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1829,6 +1829,7 @@ static int e100_alloc_cbs(struct nic *nic)
1829 &nic->cbs_dma_addr); 1829 &nic->cbs_dma_addr);
1830 if (!nic->cbs) 1830 if (!nic->cbs)
1831 return -ENOMEM; 1831 return -ENOMEM;
1832 memset(nic->cbs, 0, count * sizeof(struct cb));
1832 1833
1833 for (cb = nic->cbs, i = 0; i < count; cb++, i++) { 1834 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1834 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; 1835 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
@@ -1837,7 +1838,6 @@ static int e100_alloc_cbs(struct nic *nic)
1837 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); 1838 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1838 cb->link = cpu_to_le32(nic->cbs_dma_addr + 1839 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1839 ((i+1) % count) * sizeof(struct cb)); 1840 ((i+1) % count) * sizeof(struct cb));
1840 cb->skb = NULL;
1841 } 1841 }
1842 1842
1843 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; 1843 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index c1a42cfc80ba..b979464091bb 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1290,7 +1290,6 @@ static s32 e1000_setup_link_82571(struct e1000_hw *hw)
1290static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw) 1290static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
1291{ 1291{
1292 u32 ctrl; 1292 u32 ctrl;
1293 u32 led_ctrl;
1294 s32 ret_val; 1293 s32 ret_val;
1295 1294
1296 ctrl = er32(CTRL); 1295 ctrl = er32(CTRL);
@@ -1305,11 +1304,6 @@ static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
1305 break; 1304 break;
1306 case e1000_phy_igp_2: 1305 case e1000_phy_igp_2:
1307 ret_val = e1000e_copper_link_setup_igp(hw); 1306 ret_val = e1000e_copper_link_setup_igp(hw);
1308 /* Setup activity LED */
1309 led_ctrl = er32(LEDCTL);
1310 led_ctrl &= IGP_ACTIVITY_LED_MASK;
1311 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
1312 ew32(LEDCTL, led_ctrl);
1313 break; 1307 break;
1314 default: 1308 default:
1315 return -E1000_ERR_PHY; 1309 return -E1000_ERR_PHY;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 6850dc0a7b91..e0620d084644 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -357,8 +357,11 @@ static void gfar_init_mac(struct net_device *ndev)
357 /* Configure the coalescing support */ 357 /* Configure the coalescing support */
358 gfar_configure_coalescing(priv, 0xFF, 0xFF); 358 gfar_configure_coalescing(priv, 0xFF, 0xFF);
359 359
360 if (priv->rx_filer_enable) 360 if (priv->rx_filer_enable) {
361 rctrl |= RCTRL_FILREN; 361 rctrl |= RCTRL_FILREN;
362 /* Program the RIR0 reg with the required distribution */
363 gfar_write(&regs->rir0, DEFAULT_RIR0);
364 }
362 365
363 if (priv->rx_csum_enable) 366 if (priv->rx_csum_enable)
364 rctrl |= RCTRL_CHECKSUMMING; 367 rctrl |= RCTRL_CHECKSUMMING;
@@ -414,6 +417,36 @@ static void gfar_init_mac(struct net_device *ndev)
414 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off); 417 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
415} 418}
416 419
420static struct net_device_stats *gfar_get_stats(struct net_device *dev)
421{
422 struct gfar_private *priv = netdev_priv(dev);
423 struct netdev_queue *txq;
424 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
425 unsigned long tx_packets = 0, tx_bytes = 0;
426 int i = 0;
427
428 for (i = 0; i < priv->num_rx_queues; i++) {
429 rx_packets += priv->rx_queue[i]->stats.rx_packets;
430 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
431 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
432 }
433
434 dev->stats.rx_packets = rx_packets;
435 dev->stats.rx_bytes = rx_bytes;
436 dev->stats.rx_dropped = rx_dropped;
437
438 for (i = 0; i < priv->num_tx_queues; i++) {
439 txq = netdev_get_tx_queue(dev, i);
440 tx_bytes += txq->tx_bytes;
441 tx_packets += txq->tx_packets;
442 }
443
444 dev->stats.tx_bytes = tx_bytes;
445 dev->stats.tx_packets = tx_packets;
446
447 return &dev->stats;
448}
449
417static const struct net_device_ops gfar_netdev_ops = { 450static const struct net_device_ops gfar_netdev_ops = {
418 .ndo_open = gfar_enet_open, 451 .ndo_open = gfar_enet_open,
419 .ndo_start_xmit = gfar_start_xmit, 452 .ndo_start_xmit = gfar_start_xmit,
@@ -423,6 +456,7 @@ static const struct net_device_ops gfar_netdev_ops = {
423 .ndo_tx_timeout = gfar_timeout, 456 .ndo_tx_timeout = gfar_timeout,
424 .ndo_do_ioctl = gfar_ioctl, 457 .ndo_do_ioctl = gfar_ioctl,
425 .ndo_select_queue = gfar_select_queue, 458 .ndo_select_queue = gfar_select_queue,
459 .ndo_get_stats = gfar_get_stats,
426 .ndo_vlan_rx_register = gfar_vlan_rx_register, 460 .ndo_vlan_rx_register = gfar_vlan_rx_register,
427 .ndo_set_mac_address = eth_mac_addr, 461 .ndo_set_mac_address = eth_mac_addr,
428 .ndo_validate_addr = eth_validate_addr, 462 .ndo_validate_addr = eth_validate_addr,
@@ -1022,6 +1056,9 @@ static int gfar_probe(struct of_device *ofdev,
1022 priv->rx_queue[i]->rxic = DEFAULT_RXIC; 1056 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1023 } 1057 }
1024 1058
1059 /* enable filer if using multiple RX queues*/
1060 if(priv->num_rx_queues > 1)
1061 priv->rx_filer_enable = 1;
1025 /* Enable most messages by default */ 1062 /* Enable most messages by default */
1026 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1063 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1027 1064
@@ -1937,7 +1974,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1937 } 1974 }
1938 1975
1939 /* Update transmit stats */ 1976 /* Update transmit stats */
1940 dev->stats.tx_bytes += skb->len; 1977 txq->tx_bytes += skb->len;
1978 txq->tx_packets ++;
1941 1979
1942 txbdp = txbdp_start = tx_queue->cur_tx; 1980 txbdp = txbdp_start = tx_queue->cur_tx;
1943 1981
@@ -2295,8 +2333,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2295 tx_queue->skb_dirtytx = skb_dirtytx; 2333 tx_queue->skb_dirtytx = skb_dirtytx;
2296 tx_queue->dirty_tx = bdp; 2334 tx_queue->dirty_tx = bdp;
2297 2335
2298 dev->stats.tx_packets += howmany;
2299
2300 return howmany; 2336 return howmany;
2301} 2337}
2302 2338
@@ -2510,14 +2546,14 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2510 } 2546 }
2511 } else { 2547 } else {
2512 /* Increment the number of packets */ 2548 /* Increment the number of packets */
2513 dev->stats.rx_packets++; 2549 rx_queue->stats.rx_packets++;
2514 howmany++; 2550 howmany++;
2515 2551
2516 if (likely(skb)) { 2552 if (likely(skb)) {
2517 pkt_len = bdp->length - ETH_FCS_LEN; 2553 pkt_len = bdp->length - ETH_FCS_LEN;
2518 /* Remove the FCS from the packet length */ 2554 /* Remove the FCS from the packet length */
2519 skb_put(skb, pkt_len); 2555 skb_put(skb, pkt_len);
2520 dev->stats.rx_bytes += pkt_len; 2556 rx_queue->stats.rx_bytes += pkt_len;
2521 2557
2522 gfar_process_frame(dev, skb, amount_pull); 2558 gfar_process_frame(dev, skb, amount_pull);
2523 2559
@@ -2525,7 +2561,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2525 if (netif_msg_rx_err(priv)) 2561 if (netif_msg_rx_err(priv))
2526 printk(KERN_WARNING 2562 printk(KERN_WARNING
2527 "%s: Missing skb!\n", dev->name); 2563 "%s: Missing skb!\n", dev->name);
2528 dev->stats.rx_dropped++; 2564 rx_queue->stats.rx_dropped++;
2529 priv->extra_stats.rx_skbmissing++; 2565 priv->extra_stats.rx_skbmissing++;
2530 } 2566 }
2531 2567
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index cbb451011cb5..3d72dc43dca5 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -333,7 +333,7 @@ extern const char gfar_driver_version[];
333#define IMASK_BSY 0x20000000 333#define IMASK_BSY 0x20000000
334#define IMASK_EBERR 0x10000000 334#define IMASK_EBERR 0x10000000
335#define IMASK_MSRO 0x04000000 335#define IMASK_MSRO 0x04000000
336#define IMASK_GRSC 0x02000000 336#define IMASK_GTSC 0x02000000
337#define IMASK_BABT 0x01000000 337#define IMASK_BABT 0x01000000
338#define IMASK_TXC 0x00800000 338#define IMASK_TXC 0x00800000
339#define IMASK_TXEEN 0x00400000 339#define IMASK_TXEEN 0x00400000
@@ -344,7 +344,7 @@ extern const char gfar_driver_version[];
344#define IMASK_XFUN 0x00010000 344#define IMASK_XFUN 0x00010000
345#define IMASK_RXB0 0x00008000 345#define IMASK_RXB0 0x00008000
346#define IMASK_MAG 0x00000800 346#define IMASK_MAG 0x00000800
347#define IMASK_GTSC 0x00000100 347#define IMASK_GRSC 0x00000100
348#define IMASK_RXFEN0 0x00000080 348#define IMASK_RXFEN0 0x00000080
349#define IMASK_FIR 0x00000008 349#define IMASK_FIR 0x00000008
350#define IMASK_FIQ 0x00000004 350#define IMASK_FIQ 0x00000004
@@ -401,6 +401,10 @@ extern const char gfar_driver_version[];
401#define FPR_FILER_MASK 0xFFFFFFFF 401#define FPR_FILER_MASK 0xFFFFFFFF
402#define MAX_FILER_IDX 0xFF 402#define MAX_FILER_IDX 0xFF
403 403
404/* This default RIR value directly corresponds
405 * to the 3-bit hash value generated */
406#define DEFAULT_RIR0 0x05397700
407
404/* RQFCR register bits */ 408/* RQFCR register bits */
405#define RQFCR_GPI 0x80000000 409#define RQFCR_GPI 0x80000000
406#define RQFCR_HASHTBL_Q 0x00000000 410#define RQFCR_HASHTBL_Q 0x00000000
@@ -936,6 +940,15 @@ struct gfar_priv_tx_q {
936 unsigned short txtime; 940 unsigned short txtime;
937}; 941};
938 942
943/*
944 * Per RX queue stats
945 */
946struct rx_q_stats {
947 unsigned long rx_packets;
948 unsigned long rx_bytes;
949 unsigned long rx_dropped;
950};
951
939/** 952/**
940 * struct gfar_priv_rx_q - per rx queue structure 953 * struct gfar_priv_rx_q - per rx queue structure
941 * @rxlock: per queue rx spin lock 954 * @rxlock: per queue rx spin lock
@@ -958,6 +971,7 @@ struct gfar_priv_rx_q {
958 struct rxbd8 *cur_rx; 971 struct rxbd8 *cur_rx;
959 struct net_device *dev; 972 struct net_device *dev;
960 struct gfar_priv_grp *grp; 973 struct gfar_priv_grp *grp;
974 struct rx_q_stats stats;
961 u16 skb_currx; 975 u16 skb_currx;
962 u16 qindex; 976 u16 qindex;
963 unsigned int rx_ring_size; 977 unsigned int rx_ring_size;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index f4996846a234..6cae26a5bd67 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -57,7 +57,9 @@ static int use_msi = 1;
57 57
58static int use_msi_x = 1; 58static int use_msi_x = 1;
59 59
60static unsigned long auto_fw_reset = AUTO_FW_RESET_ENABLED; 60static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
61module_param(auto_fw_reset, int, 0644);
62MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled");
61 63
62static int __devinit netxen_nic_probe(struct pci_dev *pdev, 64static int __devinit netxen_nic_probe(struct pci_dev *pdev,
63 const struct pci_device_id *ent); 65 const struct pci_device_id *ent);
@@ -2534,42 +2536,6 @@ static struct bin_attribute bin_attr_mem = {
2534 .write = netxen_sysfs_write_mem, 2536 .write = netxen_sysfs_write_mem,
2535}; 2537};
2536 2538
2537#ifdef CONFIG_MODULES
2538static ssize_t
2539netxen_store_auto_fw_reset(struct module_attribute *mattr,
2540 struct module *mod, const char *buf, size_t count)
2541
2542{
2543 unsigned long new;
2544
2545 if (strict_strtoul(buf, 16, &new))
2546 return -EINVAL;
2547
2548 if ((new == AUTO_FW_RESET_ENABLED) || (new == AUTO_FW_RESET_DISABLED)) {
2549 auto_fw_reset = new;
2550 return count;
2551 }
2552
2553 return -EINVAL;
2554}
2555
2556static ssize_t
2557netxen_show_auto_fw_reset(struct module_attribute *mattr,
2558 struct module *mod, char *buf)
2559
2560{
2561 if (auto_fw_reset == AUTO_FW_RESET_ENABLED)
2562 return sprintf(buf, "enabled\n");
2563 else
2564 return sprintf(buf, "disabled\n");
2565}
2566
2567static struct module_attribute mod_attr_fw_reset = {
2568 .attr = {.name = "auto_fw_reset", .mode = (S_IRUGO | S_IWUSR)},
2569 .show = netxen_show_auto_fw_reset,
2570 .store = netxen_store_auto_fw_reset,
2571};
2572#endif
2573 2539
2574static void 2540static void
2575netxen_create_sysfs_entries(struct netxen_adapter *adapter) 2541netxen_create_sysfs_entries(struct netxen_adapter *adapter)
@@ -2775,23 +2741,12 @@ static struct pci_driver netxen_driver = {
2775 2741
2776static int __init netxen_init_module(void) 2742static int __init netxen_init_module(void)
2777{ 2743{
2778#ifdef CONFIG_MODULES
2779 struct module *mod = THIS_MODULE;
2780#endif
2781
2782 printk(KERN_INFO "%s\n", netxen_nic_driver_string); 2744 printk(KERN_INFO "%s\n", netxen_nic_driver_string);
2783 2745
2784#ifdef CONFIG_INET 2746#ifdef CONFIG_INET
2785 register_netdevice_notifier(&netxen_netdev_cb); 2747 register_netdevice_notifier(&netxen_netdev_cb);
2786 register_inetaddr_notifier(&netxen_inetaddr_cb); 2748 register_inetaddr_notifier(&netxen_inetaddr_cb);
2787#endif 2749#endif
2788
2789#ifdef CONFIG_MODULES
2790 if (sysfs_create_file(&mod->mkobj.kobj, &mod_attr_fw_reset.attr))
2791 printk(KERN_ERR "%s: Failed to create auto_fw_reset "
2792 "sysfs entry.", netxen_nic_driver_name);
2793#endif
2794
2795 return pci_register_driver(&netxen_driver); 2750 return pci_register_driver(&netxen_driver);
2796} 2751}
2797 2752
@@ -2799,12 +2754,6 @@ module_init(netxen_init_module);
2799 2754
2800static void __exit netxen_exit_module(void) 2755static void __exit netxen_exit_module(void)
2801{ 2756{
2802#ifdef CONFIG_MODULES
2803 struct module *mod = THIS_MODULE;
2804
2805 sysfs_remove_file(&mod->mkobj.kobj, &mod_attr_fw_reset.attr);
2806#endif
2807
2808 pci_unregister_driver(&netxen_driver); 2757 pci_unregister_driver(&netxen_driver);
2809 2758
2810#ifdef CONFIG_INET 2759#ifdef CONFIG_INET
diff --git a/drivers/net/octeon/Kconfig b/drivers/net/octeon/Kconfig
new file mode 100644
index 000000000000..1e56bbf3f5c0
--- /dev/null
+++ b/drivers/net/octeon/Kconfig
@@ -0,0 +1,10 @@
1config OCTEON_MGMT_ETHERNET
2 tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)"
3 depends on CPU_CAVIUM_OCTEON
4 select PHYLIB
5 select MDIO_OCTEON
6 default y
7 help
8 This option enables the ethernet driver for the management
9 port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX,
10 CN54XX, CN52XX, and CN6XXX chips.
diff --git a/drivers/net/octeon/Makefile b/drivers/net/octeon/Makefile
new file mode 100644
index 000000000000..906edecacfd3
--- /dev/null
+++ b/drivers/net/octeon/Makefile
@@ -0,0 +1,2 @@
1
2obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon_mgmt.o
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
new file mode 100644
index 000000000000..050538bf155a
--- /dev/null
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -0,0 +1,1176 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2009 Cavium Networks
7 */
8
9#include <linux/capability.h>
10#include <linux/dma-mapping.h>
11#include <linux/init.h>
12#include <linux/platform_device.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/if_vlan.h>
16#include <linux/phy.h>
17#include <linux/spinlock.h>
18
19#include <asm/octeon/octeon.h>
20#include <asm/octeon/cvmx-mixx-defs.h>
21#include <asm/octeon/cvmx-agl-defs.h>
22
23#define DRV_NAME "octeon_mgmt"
24#define DRV_VERSION "2.0"
25#define DRV_DESCRIPTION \
26 "Cavium Networks Octeon MII (management) port Network Driver"
27
28#define OCTEON_MGMT_NAPI_WEIGHT 16
29
30/*
31 * Ring sizes that are powers of two allow for more efficient modulo
32 * opertions.
33 */
34#define OCTEON_MGMT_RX_RING_SIZE 512
35#define OCTEON_MGMT_TX_RING_SIZE 128
36
37/* Allow 8 bytes for vlan and FCS. */
38#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
39
40union mgmt_port_ring_entry {
41 u64 d64;
42 struct {
43 u64 reserved_62_63:2;
44 /* Length of the buffer/packet in bytes */
45 u64 len:14;
46 /* For TX, signals that the packet should be timestamped */
47 u64 tstamp:1;
48 /* The RX error code */
49 u64 code:7;
50#define RING_ENTRY_CODE_DONE 0xf
51#define RING_ENTRY_CODE_MORE 0x10
52 /* Physical address of the buffer */
53 u64 addr:40;
54 } s;
55};
56
57struct octeon_mgmt {
58 struct net_device *netdev;
59 int port;
60 int irq;
61 u64 *tx_ring;
62 dma_addr_t tx_ring_handle;
63 unsigned int tx_next;
64 unsigned int tx_next_clean;
65 unsigned int tx_current_fill;
66 /* The tx_list lock also protects the ring related variables */
67 struct sk_buff_head tx_list;
68
69 /* RX variables only touched in napi_poll. No locking necessary. */
70 u64 *rx_ring;
71 dma_addr_t rx_ring_handle;
72 unsigned int rx_next;
73 unsigned int rx_next_fill;
74 unsigned int rx_current_fill;
75 struct sk_buff_head rx_list;
76
77 spinlock_t lock;
78 unsigned int last_duplex;
79 unsigned int last_link;
80 struct device *dev;
81 struct napi_struct napi;
82 struct tasklet_struct tx_clean_tasklet;
83 struct phy_device *phydev;
84};
85
86static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
87{
88 int port = p->port;
89 union cvmx_mixx_intena mix_intena;
90 unsigned long flags;
91
92 spin_lock_irqsave(&p->lock, flags);
93 mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port));
94 mix_intena.s.ithena = enable ? 1 : 0;
95 cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
96 spin_unlock_irqrestore(&p->lock, flags);
97}
98
99static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
100{
101 int port = p->port;
102 union cvmx_mixx_intena mix_intena;
103 unsigned long flags;
104
105 spin_lock_irqsave(&p->lock, flags);
106 mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port));
107 mix_intena.s.othena = enable ? 1 : 0;
108 cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
109 spin_unlock_irqrestore(&p->lock, flags);
110}
111
112static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
113{
114 octeon_mgmt_set_rx_irq(p, 1);
115}
116
117static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
118{
119 octeon_mgmt_set_rx_irq(p, 0);
120}
121
122static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
123{
124 octeon_mgmt_set_tx_irq(p, 1);
125}
126
127static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
128{
129 octeon_mgmt_set_tx_irq(p, 0);
130}
131
132static unsigned int ring_max_fill(unsigned int ring_size)
133{
134 return ring_size - 8;
135}
136
137static unsigned int ring_size_to_bytes(unsigned int ring_size)
138{
139 return ring_size * sizeof(union mgmt_port_ring_entry);
140}
141
142static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
143{
144 struct octeon_mgmt *p = netdev_priv(netdev);
145 int port = p->port;
146
147 while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
148 unsigned int size;
149 union mgmt_port_ring_entry re;
150 struct sk_buff *skb;
151
152 /* CN56XX pass 1 needs 8 bytes of padding. */
153 size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
154
155 skb = netdev_alloc_skb(netdev, size);
156 if (!skb)
157 break;
158 skb_reserve(skb, NET_IP_ALIGN);
159 __skb_queue_tail(&p->rx_list, skb);
160
161 re.d64 = 0;
162 re.s.len = size;
163 re.s.addr = dma_map_single(p->dev, skb->data,
164 size,
165 DMA_FROM_DEVICE);
166
167 /* Put it in the ring. */
168 p->rx_ring[p->rx_next_fill] = re.d64;
169 dma_sync_single_for_device(p->dev, p->rx_ring_handle,
170 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
171 DMA_BIDIRECTIONAL);
172 p->rx_next_fill =
173 (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
174 p->rx_current_fill++;
175 /* Ring the bell. */
176 cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
177 }
178}
179
180static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
181{
182 int port = p->port;
183 union cvmx_mixx_orcnt mix_orcnt;
184 union mgmt_port_ring_entry re;
185 struct sk_buff *skb;
186 int cleaned = 0;
187 unsigned long flags;
188
189 mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
190 while (mix_orcnt.s.orcnt) {
191 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
192 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
193 DMA_BIDIRECTIONAL);
194
195 spin_lock_irqsave(&p->tx_list.lock, flags);
196
197 re.d64 = p->tx_ring[p->tx_next_clean];
198 p->tx_next_clean =
199 (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
200 skb = __skb_dequeue(&p->tx_list);
201
202 mix_orcnt.u64 = 0;
203 mix_orcnt.s.orcnt = 1;
204
205 /* Acknowledge to hardware that we have the buffer. */
206 cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64);
207 p->tx_current_fill--;
208
209 spin_unlock_irqrestore(&p->tx_list.lock, flags);
210
211 dma_unmap_single(p->dev, re.s.addr, re.s.len,
212 DMA_TO_DEVICE);
213 dev_kfree_skb_any(skb);
214 cleaned++;
215
216 mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
217 }
218
219 if (cleaned && netif_queue_stopped(p->netdev))
220 netif_wake_queue(p->netdev);
221}
222
223static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
224{
225 struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
226 octeon_mgmt_clean_tx_buffers(p);
227 octeon_mgmt_enable_tx_irq(p);
228}
229
230static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
231{
232 struct octeon_mgmt *p = netdev_priv(netdev);
233 int port = p->port;
234 unsigned long flags;
235 u64 drop, bad;
236
237 /* These reads also clear the count registers. */
238 drop = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port));
239 bad = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port));
240
241 if (drop || bad) {
242 /* Do an atomic update. */
243 spin_lock_irqsave(&p->lock, flags);
244 netdev->stats.rx_errors += bad;
245 netdev->stats.rx_dropped += drop;
246 spin_unlock_irqrestore(&p->lock, flags);
247 }
248}
249
250static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
251{
252 struct octeon_mgmt *p = netdev_priv(netdev);
253 int port = p->port;
254 unsigned long flags;
255
256 union cvmx_agl_gmx_txx_stat0 s0;
257 union cvmx_agl_gmx_txx_stat1 s1;
258
259 /* These reads also clear the count registers. */
260 s0.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT0(port));
261 s1.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT1(port));
262
263 if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
264 /* Do an atomic update. */
265 spin_lock_irqsave(&p->lock, flags);
266 netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
267 netdev->stats.collisions += s1.s.scol + s1.s.mcol;
268 spin_unlock_irqrestore(&p->lock, flags);
269 }
270}
271
272/*
273 * Dequeue a receive skb and its corresponding ring entry. The ring
274 * entry is returned, *pskb is updated to point to the skb.
275 */
276static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
277 struct sk_buff **pskb)
278{
279 union mgmt_port_ring_entry re;
280
281 dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
282 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
283 DMA_BIDIRECTIONAL);
284
285 re.d64 = p->rx_ring[p->rx_next];
286 p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
287 p->rx_current_fill--;
288 *pskb = __skb_dequeue(&p->rx_list);
289
290 dma_unmap_single(p->dev, re.s.addr,
291 ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
292 DMA_FROM_DEVICE);
293
294 return re.d64;
295}
296
297
298static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
299{
300 int port = p->port;
301 struct net_device *netdev = p->netdev;
302 union cvmx_mixx_ircnt mix_ircnt;
303 union mgmt_port_ring_entry re;
304 struct sk_buff *skb;
305 struct sk_buff *skb2;
306 struct sk_buff *skb_new;
307 union mgmt_port_ring_entry re2;
308 int rc = 1;
309
310
311 re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
312 if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
313 /* A good packet, send it up. */
314 skb_put(skb, re.s.len);
315good:
316 skb->protocol = eth_type_trans(skb, netdev);
317 netdev->stats.rx_packets++;
318 netdev->stats.rx_bytes += skb->len;
319 netdev->last_rx = jiffies;
320 netif_receive_skb(skb);
321 rc = 0;
322 } else if (re.s.code == RING_ENTRY_CODE_MORE) {
323 /*
324 * Packet split across skbs. This can happen if we
325 * increase the MTU. Buffers that are already in the
326 * rx ring can then end up being too small. As the rx
327 * ring is refilled, buffers sized for the new MTU
328 * will be used and we should go back to the normal
329 * non-split case.
330 */
331 skb_put(skb, re.s.len);
332 do {
333 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
334 if (re2.s.code != RING_ENTRY_CODE_MORE
335 && re2.s.code != RING_ENTRY_CODE_DONE)
336 goto split_error;
337 skb_put(skb2, re2.s.len);
338 skb_new = skb_copy_expand(skb, 0, skb2->len,
339 GFP_ATOMIC);
340 if (!skb_new)
341 goto split_error;
342 if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
343 skb2->len))
344 goto split_error;
345 skb_put(skb_new, skb2->len);
346 dev_kfree_skb_any(skb);
347 dev_kfree_skb_any(skb2);
348 skb = skb_new;
349 } while (re2.s.code == RING_ENTRY_CODE_MORE);
350 goto good;
351 } else {
352 /* Some other error, discard it. */
353 dev_kfree_skb_any(skb);
354 /*
355 * Error statistics are accumulated in
356 * octeon_mgmt_update_rx_stats.
357 */
358 }
359 goto done;
360split_error:
361 /* Discard the whole mess. */
362 dev_kfree_skb_any(skb);
363 dev_kfree_skb_any(skb2);
364 while (re2.s.code == RING_ENTRY_CODE_MORE) {
365 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
366 dev_kfree_skb_any(skb2);
367 }
368 netdev->stats.rx_errors++;
369
370done:
371 /* Tell the hardware we processed a packet. */
372 mix_ircnt.u64 = 0;
373 mix_ircnt.s.ircnt = 1;
374 cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64);
375 return rc;
376
377}
378
379static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
380{
381 int port = p->port;
382 unsigned int work_done = 0;
383 union cvmx_mixx_ircnt mix_ircnt;
384 int rc;
385
386
387 mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
388 while (work_done < budget && mix_ircnt.s.ircnt) {
389
390 rc = octeon_mgmt_receive_one(p);
391 if (!rc)
392 work_done++;
393
394 /* Check for more packets. */
395 mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
396 }
397
398 octeon_mgmt_rx_fill_ring(p->netdev);
399
400 return work_done;
401}
402
403static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
404{
405 struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
406 struct net_device *netdev = p->netdev;
407 unsigned int work_done = 0;
408
409 work_done = octeon_mgmt_receive_packets(p, budget);
410
411 if (work_done < budget) {
412 /* We stopped because no more packets were available. */
413 napi_complete(napi);
414 octeon_mgmt_enable_rx_irq(p);
415 }
416 octeon_mgmt_update_rx_stats(netdev);
417
418 return work_done;
419}
420
421/* Reset the hardware to clean state. */
422static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
423{
424 union cvmx_mixx_ctl mix_ctl;
425 union cvmx_mixx_bist mix_bist;
426 union cvmx_agl_gmx_bist agl_gmx_bist;
427
428 mix_ctl.u64 = 0;
429 cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64);
430 do {
431 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port));
432 } while (mix_ctl.s.busy);
433 mix_ctl.s.reset = 1;
434 cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64);
435 cvmx_read_csr(CVMX_MIXX_CTL(p->port));
436 cvmx_wait(64);
437
438 mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port));
439 if (mix_bist.u64)
440 dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
441 (unsigned long long)mix_bist.u64);
442
443 agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
444 if (agl_gmx_bist.u64)
445 dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
446 (unsigned long long)agl_gmx_bist.u64);
447}
448
449struct octeon_mgmt_cam_state {
450 u64 cam[6];
451 u64 cam_mask;
452 int cam_index;
453};
454
455static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
456 unsigned char *addr)
457{
458 int i;
459
460 for (i = 0; i < 6; i++)
461 cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
462 cs->cam_mask |= (1ULL << cs->cam_index);
463 cs->cam_index++;
464}
465
466static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
467{
468 struct octeon_mgmt *p = netdev_priv(netdev);
469 int port = p->port;
470 int i;
471 union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
472 union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
473 unsigned long flags;
474 unsigned int prev_packet_enable;
475 unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
476 unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
477 struct octeon_mgmt_cam_state cam_state;
478 struct dev_addr_list *list;
479 struct list_head *pos;
480 int available_cam_entries;
481
482 memset(&cam_state, 0, sizeof(cam_state));
483
484 if ((netdev->flags & IFF_PROMISC) || netdev->dev_addrs.count > 7) {
485 cam_mode = 0;
486 available_cam_entries = 8;
487 } else {
488 /*
489 * One CAM entry for the primary address, leaves seven
490 * for the secondary addresses.
491 */
492 available_cam_entries = 7 - netdev->dev_addrs.count;
493 }
494
495 if (netdev->flags & IFF_MULTICAST) {
496 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI)
497 || netdev->mc_count > available_cam_entries)
498 multicast_mode = 2; /* 1 - Accept all multicast. */
499 else
500 multicast_mode = 0; /* 0 - Use CAM. */
501 }
502
503 if (cam_mode == 1) {
504 /* Add primary address. */
505 octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
506 list_for_each(pos, &netdev->dev_addrs.list) {
507 struct netdev_hw_addr *hw_addr;
508 hw_addr = list_entry(pos, struct netdev_hw_addr, list);
509 octeon_mgmt_cam_state_add(&cam_state, hw_addr->addr);
510 list = list->next;
511 }
512 }
513 if (multicast_mode == 0) {
514 i = netdev->mc_count;
515 list = netdev->mc_list;
516 while (i--) {
517 octeon_mgmt_cam_state_add(&cam_state, list->da_addr);
518 list = list->next;
519 }
520 }
521
522
523 spin_lock_irqsave(&p->lock, flags);
524
525 /* Disable packet I/O. */
526 agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
527 prev_packet_enable = agl_gmx_prtx.s.en;
528 agl_gmx_prtx.s.en = 0;
529 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
530
531
532 adr_ctl.u64 = 0;
533 adr_ctl.s.cam_mode = cam_mode;
534 adr_ctl.s.mcst = multicast_mode;
535 adr_ctl.s.bcst = 1; /* Allow broadcast */
536
537 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64);
538
539 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]);
540 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]);
541 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]);
542 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]);
543 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]);
544 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]);
545 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask);
546
547 /* Restore packet I/O. */
548 agl_gmx_prtx.s.en = prev_packet_enable;
549 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
550
551 spin_unlock_irqrestore(&p->lock, flags);
552}
553
554static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
555{
556 struct sockaddr *sa = addr;
557
558 if (!is_valid_ether_addr(sa->sa_data))
559 return -EADDRNOTAVAIL;
560
561 memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
562
563 octeon_mgmt_set_rx_filtering(netdev);
564
565 return 0;
566}
567
568static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
569{
570 struct octeon_mgmt *p = netdev_priv(netdev);
571 int port = p->port;
572 int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
573
574 /*
575 * Limit the MTU to make sure the ethernet packets are between
576 * 64 bytes and 16383 bytes.
577 */
578 if (size_without_fcs < 64 || size_without_fcs > 16383) {
579 dev_warn(p->dev, "MTU must be between %d and %d.\n",
580 64 - OCTEON_MGMT_RX_HEADROOM,
581 16383 - OCTEON_MGMT_RX_HEADROOM);
582 return -EINVAL;
583 }
584
585 netdev->mtu = new_mtu;
586
587 cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs);
588 cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port),
589 (size_without_fcs + 7) & 0xfff8);
590
591 return 0;
592}
593
594static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
595{
596 struct net_device *netdev = dev_id;
597 struct octeon_mgmt *p = netdev_priv(netdev);
598 int port = p->port;
599 union cvmx_mixx_isr mixx_isr;
600
601 mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port));
602
603 /* Clear any pending interrupts */
604 cvmx_write_csr(CVMX_MIXX_ISR(port),
605 cvmx_read_csr(CVMX_MIXX_ISR(port)));
606 cvmx_read_csr(CVMX_MIXX_ISR(port));
607
608 if (mixx_isr.s.irthresh) {
609 octeon_mgmt_disable_rx_irq(p);
610 napi_schedule(&p->napi);
611 }
612 if (mixx_isr.s.orthresh) {
613 octeon_mgmt_disable_tx_irq(p);
614 tasklet_schedule(&p->tx_clean_tasklet);
615 }
616
617 return IRQ_HANDLED;
618}
619
620static int octeon_mgmt_ioctl(struct net_device *netdev,
621 struct ifreq *rq, int cmd)
622{
623 struct octeon_mgmt *p = netdev_priv(netdev);
624
625 if (!netif_running(netdev))
626 return -EINVAL;
627
628 if (!p->phydev)
629 return -EINVAL;
630
631 return phy_mii_ioctl(p->phydev, if_mii(rq), cmd);
632}
633
634static void octeon_mgmt_adjust_link(struct net_device *netdev)
635{
636 struct octeon_mgmt *p = netdev_priv(netdev);
637 int port = p->port;
638 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
639 unsigned long flags;
640 int link_changed = 0;
641
642 spin_lock_irqsave(&p->lock, flags);
643 if (p->phydev->link) {
644 if (!p->last_link)
645 link_changed = 1;
646 if (p->last_duplex != p->phydev->duplex) {
647 p->last_duplex = p->phydev->duplex;
648 prtx_cfg.u64 =
649 cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
650 prtx_cfg.s.duplex = p->phydev->duplex;
651 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port),
652 prtx_cfg.u64);
653 }
654 } else {
655 if (p->last_link)
656 link_changed = -1;
657 }
658 p->last_link = p->phydev->link;
659 spin_unlock_irqrestore(&p->lock, flags);
660
661 if (link_changed != 0) {
662 if (link_changed > 0) {
663 netif_carrier_on(netdev);
664 pr_info("%s: Link is up - %d/%s\n", netdev->name,
665 p->phydev->speed,
666 DUPLEX_FULL == p->phydev->duplex ?
667 "Full" : "Half");
668 } else {
669 netif_carrier_off(netdev);
670 pr_info("%s: Link is down\n", netdev->name);
671 }
672 }
673}
674
675static int octeon_mgmt_init_phy(struct net_device *netdev)
676{
677 struct octeon_mgmt *p = netdev_priv(netdev);
678 char phy_id[20];
679
680 if (octeon_is_simulation()) {
681 /* No PHYs in the simulator. */
682 netif_carrier_on(netdev);
683 return 0;
684 }
685
686 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", p->port);
687
688 p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0,
689 PHY_INTERFACE_MODE_MII);
690
691 if (IS_ERR(p->phydev)) {
692 p->phydev = NULL;
693 return -1;
694 }
695
696 phy_start_aneg(p->phydev);
697
698 return 0;
699}
700
701static int octeon_mgmt_open(struct net_device *netdev)
702{
703 struct octeon_mgmt *p = netdev_priv(netdev);
704 int port = p->port;
705 union cvmx_mixx_ctl mix_ctl;
706 union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
707 union cvmx_mixx_oring1 oring1;
708 union cvmx_mixx_iring1 iring1;
709 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
710 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
711 union cvmx_mixx_irhwm mix_irhwm;
712 union cvmx_mixx_orhwm mix_orhwm;
713 union cvmx_mixx_intena mix_intena;
714 struct sockaddr sa;
715
716 /* Allocate ring buffers. */
717 p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
718 GFP_KERNEL);
719 if (!p->tx_ring)
720 return -ENOMEM;
721 p->tx_ring_handle =
722 dma_map_single(p->dev, p->tx_ring,
723 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
724 DMA_BIDIRECTIONAL);
725 p->tx_next = 0;
726 p->tx_next_clean = 0;
727 p->tx_current_fill = 0;
728
729
730 p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
731 GFP_KERNEL);
732 if (!p->rx_ring)
733 goto err_nomem;
734 p->rx_ring_handle =
735 dma_map_single(p->dev, p->rx_ring,
736 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
737 DMA_BIDIRECTIONAL);
738
739 p->rx_next = 0;
740 p->rx_next_fill = 0;
741 p->rx_current_fill = 0;
742
743 octeon_mgmt_reset_hw(p);
744
745 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
746
747 /* Bring it out of reset if needed. */
748 if (mix_ctl.s.reset) {
749 mix_ctl.s.reset = 0;
750 cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
751 do {
752 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
753 } while (mix_ctl.s.reset);
754 }
755
756 agl_gmx_inf_mode.u64 = 0;
757 agl_gmx_inf_mode.s.en = 1;
758 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
759
760 oring1.u64 = 0;
761 oring1.s.obase = p->tx_ring_handle >> 3;
762 oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
763 cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);
764
765 iring1.u64 = 0;
766 iring1.s.ibase = p->rx_ring_handle >> 3;
767 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
768 cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);
769
770 /* Disable packet I/O. */
771 prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
772 prtx_cfg.s.en = 0;
773 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);
774
775 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
776 octeon_mgmt_set_mac_address(netdev, &sa);
777
778 octeon_mgmt_change_mtu(netdev, netdev->mtu);
779
780 /*
781 * Enable the port HW. Packets are not allowed until
782 * cvmx_mgmt_port_enable() is called.
783 */
784 mix_ctl.u64 = 0;
785 mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
786 mix_ctl.s.en = 1; /* Enable the port */
787 mix_ctl.s.nbtarb = 0; /* Arbitration mode */
788 /* MII CB-request FIFO programmable high watermark */
789 mix_ctl.s.mrq_hwm = 1;
790 cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
791
792 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
793 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
794 /*
795 * Force compensation values, as they are not
796 * determined properly by HW
797 */
798 union cvmx_agl_gmx_drv_ctl drv_ctl;
799
800 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
801 if (port) {
802 drv_ctl.s.byp_en1 = 1;
803 drv_ctl.s.nctl1 = 6;
804 drv_ctl.s.pctl1 = 6;
805 } else {
806 drv_ctl.s.byp_en = 1;
807 drv_ctl.s.nctl = 6;
808 drv_ctl.s.pctl = 6;
809 }
810 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
811 }
812
813 octeon_mgmt_rx_fill_ring(netdev);
814
815 /* Clear statistics. */
816 /* Clear on read. */
817 cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1);
818 cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0);
819 cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0);
820
821 cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1);
822 cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0);
823 cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0);
824
825 /* Clear any pending interrupts */
826 cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port)));
827
828 if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
829 netdev)) {
830 dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
831 goto err_noirq;
832 }
833
834 /* Interrupt every single RX packet */
835 mix_irhwm.u64 = 0;
836 mix_irhwm.s.irhwm = 0;
837 cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64);
838
839 /* Interrupt when we have 5 or more packets to clean. */
840 mix_orhwm.u64 = 0;
841 mix_orhwm.s.orhwm = 5;
842 cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64);
843
844 /* Enable receive and transmit interrupts */
845 mix_intena.u64 = 0;
846 mix_intena.s.ithena = 1;
847 mix_intena.s.othena = 1;
848 cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
849
850
851 /* Enable packet I/O. */
852
853 rxx_frm_ctl.u64 = 0;
854 rxx_frm_ctl.s.pre_align = 1;
855 /*
856 * When set, disables the length check for non-min sized pkts
857 * with padding in the client data.
858 */
859 rxx_frm_ctl.s.pad_len = 1;
860 /* When set, disables the length check for VLAN pkts */
861 rxx_frm_ctl.s.vlan_len = 1;
862 /* When set, PREAMBLE checking is less strict */
863 rxx_frm_ctl.s.pre_free = 1;
864 /* Control Pause Frames can match station SMAC */
865 rxx_frm_ctl.s.ctl_smac = 0;
866 /* Control Pause Frames can match globally assign Multicast address */
867 rxx_frm_ctl.s.ctl_mcst = 1;
868 /* Forward pause information to TX block */
869 rxx_frm_ctl.s.ctl_bck = 1;
870 /* Drop Control Pause Frames */
871 rxx_frm_ctl.s.ctl_drp = 1;
872 /* Strip off the preamble */
873 rxx_frm_ctl.s.pre_strp = 1;
874 /*
875 * This port is configured to send PREAMBLE+SFD to begin every
876 * frame. GMX checks that the PREAMBLE is sent correctly.
877 */
878 rxx_frm_ctl.s.pre_chk = 1;
879 cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);
880
881 /* Enable the AGL block */
882 agl_gmx_inf_mode.u64 = 0;
883 agl_gmx_inf_mode.s.en = 1;
884 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
885
886 /* Configure the port duplex and enables */
887 prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
888 prtx_cfg.s.tx_en = 1;
889 prtx_cfg.s.rx_en = 1;
890 prtx_cfg.s.en = 1;
891 p->last_duplex = 1;
892 prtx_cfg.s.duplex = p->last_duplex;
893 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);
894
895 p->last_link = 0;
896 netif_carrier_off(netdev);
897
898 if (octeon_mgmt_init_phy(netdev)) {
899 dev_err(p->dev, "Cannot initialize PHY.\n");
900 goto err_noirq;
901 }
902
903 netif_wake_queue(netdev);
904 napi_enable(&p->napi);
905
906 return 0;
907err_noirq:
908 octeon_mgmt_reset_hw(p);
909 dma_unmap_single(p->dev, p->rx_ring_handle,
910 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
911 DMA_BIDIRECTIONAL);
912 kfree(p->rx_ring);
913err_nomem:
914 dma_unmap_single(p->dev, p->tx_ring_handle,
915 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
916 DMA_BIDIRECTIONAL);
917 kfree(p->tx_ring);
918 return -ENOMEM;
919}
920
921static int octeon_mgmt_stop(struct net_device *netdev)
922{
923 struct octeon_mgmt *p = netdev_priv(netdev);
924
925 napi_disable(&p->napi);
926 netif_stop_queue(netdev);
927
928 if (p->phydev)
929 phy_disconnect(p->phydev);
930
931 netif_carrier_off(netdev);
932
933 octeon_mgmt_reset_hw(p);
934
935
936 free_irq(p->irq, netdev);
937
938 /* dma_unmap is a nop on Octeon, so just free everything. */
939 skb_queue_purge(&p->tx_list);
940 skb_queue_purge(&p->rx_list);
941
942 dma_unmap_single(p->dev, p->rx_ring_handle,
943 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
944 DMA_BIDIRECTIONAL);
945 kfree(p->rx_ring);
946
947 dma_unmap_single(p->dev, p->tx_ring_handle,
948 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
949 DMA_BIDIRECTIONAL);
950 kfree(p->tx_ring);
951
952
953 return 0;
954}
955
956static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
957{
958 struct octeon_mgmt *p = netdev_priv(netdev);
959 int port = p->port;
960 union mgmt_port_ring_entry re;
961 unsigned long flags;
962
963 re.d64 = 0;
964 re.s.len = skb->len;
965 re.s.addr = dma_map_single(p->dev, skb->data,
966 skb->len,
967 DMA_TO_DEVICE);
968
969 spin_lock_irqsave(&p->tx_list.lock, flags);
970
971 if (unlikely(p->tx_current_fill >=
972 ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
973 spin_unlock_irqrestore(&p->tx_list.lock, flags);
974
975 dma_unmap_single(p->dev, re.s.addr, re.s.len,
976 DMA_TO_DEVICE);
977
978 netif_stop_queue(netdev);
979 return NETDEV_TX_BUSY;
980 }
981
982 __skb_queue_tail(&p->tx_list, skb);
983
984 /* Put it in the ring. */
985 p->tx_ring[p->tx_next] = re.d64;
986 p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
987 p->tx_current_fill++;
988
989 spin_unlock_irqrestore(&p->tx_list.lock, flags);
990
991 dma_sync_single_for_device(p->dev, p->tx_ring_handle,
992 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
993 DMA_BIDIRECTIONAL);
994
995 netdev->stats.tx_packets++;
996 netdev->stats.tx_bytes += skb->len;
997
998 /* Ring the bell. */
999 cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
1000
1001 netdev->trans_start = jiffies;
1002 octeon_mgmt_clean_tx_buffers(p);
1003 octeon_mgmt_update_tx_stats(netdev);
1004 return NETDEV_TX_OK;
1005}
1006
1007#ifdef CONFIG_NET_POLL_CONTROLLER
1008static void octeon_mgmt_poll_controller(struct net_device *netdev)
1009{
1010 struct octeon_mgmt *p = netdev_priv(netdev);
1011
1012 octeon_mgmt_receive_packets(p, 16);
1013 octeon_mgmt_update_rx_stats(netdev);
1014 return;
1015}
1016#endif
1017
1018static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
1019 struct ethtool_drvinfo *info)
1020{
1021 strncpy(info->driver, DRV_NAME, sizeof(info->driver));
1022 strncpy(info->version, DRV_VERSION, sizeof(info->version));
1023 strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
1024 strncpy(info->bus_info, "N/A", sizeof(info->bus_info));
1025 info->n_stats = 0;
1026 info->testinfo_len = 0;
1027 info->regdump_len = 0;
1028 info->eedump_len = 0;
1029}
1030
1031static int octeon_mgmt_get_settings(struct net_device *netdev,
1032 struct ethtool_cmd *cmd)
1033{
1034 struct octeon_mgmt *p = netdev_priv(netdev);
1035
1036 if (p->phydev)
1037 return phy_ethtool_gset(p->phydev, cmd);
1038
1039 return -EINVAL;
1040}
1041
1042static int octeon_mgmt_set_settings(struct net_device *netdev,
1043 struct ethtool_cmd *cmd)
1044{
1045 struct octeon_mgmt *p = netdev_priv(netdev);
1046
1047 if (!capable(CAP_NET_ADMIN))
1048 return -EPERM;
1049
1050 if (p->phydev)
1051 return phy_ethtool_sset(p->phydev, cmd);
1052
1053 return -EINVAL;
1054}
1055
1056static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1057 .get_drvinfo = octeon_mgmt_get_drvinfo,
1058 .get_link = ethtool_op_get_link,
1059 .get_settings = octeon_mgmt_get_settings,
1060 .set_settings = octeon_mgmt_set_settings
1061};
1062
1063static const struct net_device_ops octeon_mgmt_ops = {
1064 .ndo_open = octeon_mgmt_open,
1065 .ndo_stop = octeon_mgmt_stop,
1066 .ndo_start_xmit = octeon_mgmt_xmit,
1067 .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
1068 .ndo_set_multicast_list = octeon_mgmt_set_rx_filtering,
1069 .ndo_set_mac_address = octeon_mgmt_set_mac_address,
1070 .ndo_do_ioctl = octeon_mgmt_ioctl,
1071 .ndo_change_mtu = octeon_mgmt_change_mtu,
1072#ifdef CONFIG_NET_POLL_CONTROLLER
1073 .ndo_poll_controller = octeon_mgmt_poll_controller,
1074#endif
1075};
1076
1077static int __init octeon_mgmt_probe(struct platform_device *pdev)
1078{
1079 struct resource *res_irq;
1080 struct net_device *netdev;
1081 struct octeon_mgmt *p;
1082 int i;
1083
1084 netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
1085 if (netdev == NULL)
1086 return -ENOMEM;
1087
1088 dev_set_drvdata(&pdev->dev, netdev);
1089 p = netdev_priv(netdev);
1090 netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
1091 OCTEON_MGMT_NAPI_WEIGHT);
1092
1093 p->netdev = netdev;
1094 p->dev = &pdev->dev;
1095
1096 p->port = pdev->id;
1097 snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
1098
1099 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1100 if (!res_irq)
1101 goto err;
1102
1103 p->irq = res_irq->start;
1104 spin_lock_init(&p->lock);
1105
1106 skb_queue_head_init(&p->tx_list);
1107 skb_queue_head_init(&p->rx_list);
1108 tasklet_init(&p->tx_clean_tasklet,
1109 octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
1110
1111 netdev->netdev_ops = &octeon_mgmt_ops;
1112 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1113
1114
1115 /* The mgmt ports get the first N MACs. */
1116 for (i = 0; i < 6; i++)
1117 netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i];
1118 netdev->dev_addr[5] += p->port;
1119
1120 if (p->port >= octeon_bootinfo->mac_addr_count)
1121 dev_err(&pdev->dev,
1122 "Error %s: Using MAC outside of the assigned range: "
1123 "%02x:%02x:%02x:%02x:%02x:%02x\n", netdev->name,
1124 netdev->dev_addr[0], netdev->dev_addr[1],
1125 netdev->dev_addr[2], netdev->dev_addr[3],
1126 netdev->dev_addr[4], netdev->dev_addr[5]);
1127
1128 if (register_netdev(netdev))
1129 goto err;
1130
1131 dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
1132 return 0;
1133err:
1134 free_netdev(netdev);
1135 return -ENOENT;
1136}
1137
1138static int __exit octeon_mgmt_remove(struct platform_device *pdev)
1139{
1140 struct net_device *netdev = dev_get_drvdata(&pdev->dev);
1141
1142 unregister_netdev(netdev);
1143 free_netdev(netdev);
1144 return 0;
1145}
1146
1147static struct platform_driver octeon_mgmt_driver = {
1148 .driver = {
1149 .name = "octeon_mgmt",
1150 .owner = THIS_MODULE,
1151 },
1152 .probe = octeon_mgmt_probe,
1153 .remove = __exit_p(octeon_mgmt_remove),
1154};
1155
1156extern void octeon_mdiobus_force_mod_depencency(void);
1157
1158static int __init octeon_mgmt_mod_init(void)
1159{
1160 /* Force our mdiobus driver module to be loaded first. */
1161 octeon_mdiobus_force_mod_depencency();
1162 return platform_driver_register(&octeon_mgmt_driver);
1163}
1164
1165static void __exit octeon_mgmt_mod_exit(void)
1166{
1167 platform_driver_unregister(&octeon_mgmt_driver);
1168}
1169
1170module_init(octeon_mgmt_mod_init);
1171module_exit(octeon_mgmt_mod_exit);
1172
1173MODULE_DESCRIPTION(DRV_DESCRIPTION);
1174MODULE_AUTHOR("David Daney");
1175MODULE_LICENSE("GPL");
1176MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index d5d8e1c5bc91..fc5938ba3d78 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -115,4 +115,15 @@ config MDIO_GPIO
115 To compile this driver as a module, choose M here: the module 115 To compile this driver as a module, choose M here: the module
116 will be called mdio-gpio. 116 will be called mdio-gpio.
117 117
118config MDIO_OCTEON
119 tristate "Support for MDIO buses on Octeon SOCs"
120 depends on CPU_CAVIUM_OCTEON
121 default y
122 help
123
124 This module provides a driver for the Octeon MDIO busses.
125 It is required by the Octeon Ethernet device drivers.
126
127 If in doubt, say Y.
128
118endif # PHYLIB 129endif # PHYLIB
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index edfaac48cbd5..1342585af381 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -20,3 +20,4 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
20obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o 20obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
21obj-$(CONFIG_NATIONAL_PHY) += national.o 21obj-$(CONFIG_NATIONAL_PHY) += national.o
22obj-$(CONFIG_STE10XP) += ste10Xp.o 22obj-$(CONFIG_STE10XP) += ste10Xp.o
23obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index f63c96a4ecb4..c13cf64095b6 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -326,7 +326,8 @@ error:
326 326
327static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev) 327static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
328{ 328{
329 u32 val, orig; 329 u32 orig;
330 int val;
330 bool clk125en = true; 331 bool clk125en = true;
331 332
332 /* Abort if we are using an untested phy. */ 333 /* Abort if we are using an untested phy. */
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
new file mode 100644
index 000000000000..61a4461cbda5
--- /dev/null
+++ b/drivers/net/phy/mdio-octeon.c
@@ -0,0 +1,180 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2009 Cavium Networks
7 */
8
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/platform_device.h>
12#include <linux/phy.h>
13
14#include <asm/octeon/octeon.h>
15#include <asm/octeon/cvmx-smix-defs.h>
16
17#define DRV_VERSION "1.0"
18#define DRV_DESCRIPTION "Cavium Networks Octeon SMI/MDIO driver"
19
20struct octeon_mdiobus {
21 struct mii_bus *mii_bus;
22 int unit;
23 int phy_irq[PHY_MAX_ADDR];
24};
25
26static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum)
27{
28 struct octeon_mdiobus *p = bus->priv;
29 union cvmx_smix_cmd smi_cmd;
30 union cvmx_smix_rd_dat smi_rd;
31 int timeout = 1000;
32
33 smi_cmd.u64 = 0;
34 smi_cmd.s.phy_op = 1; /* MDIO_CLAUSE_22_READ */
35 smi_cmd.s.phy_adr = phy_id;
36 smi_cmd.s.reg_adr = regnum;
37 cvmx_write_csr(CVMX_SMIX_CMD(p->unit), smi_cmd.u64);
38
39 do {
40 /*
41 * Wait 1000 clocks so we don't saturate the RSL bus
42 * doing reads.
43 */
44 cvmx_wait(1000);
45 smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(p->unit));
46 } while (smi_rd.s.pending && --timeout);
47
48 if (smi_rd.s.val)
49 return smi_rd.s.dat;
50 else
51 return -EIO;
52}
53
54static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id,
55 int regnum, u16 val)
56{
57 struct octeon_mdiobus *p = bus->priv;
58 union cvmx_smix_cmd smi_cmd;
59 union cvmx_smix_wr_dat smi_wr;
60 int timeout = 1000;
61
62 smi_wr.u64 = 0;
63 smi_wr.s.dat = val;
64 cvmx_write_csr(CVMX_SMIX_WR_DAT(p->unit), smi_wr.u64);
65
66 smi_cmd.u64 = 0;
67 smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_22_WRITE */
68 smi_cmd.s.phy_adr = phy_id;
69 smi_cmd.s.reg_adr = regnum;
70 cvmx_write_csr(CVMX_SMIX_CMD(p->unit), smi_cmd.u64);
71
72 do {
73 /*
74 * Wait 1000 clocks so we don't saturate the RSL bus
75 * doing reads.
76 */
77 cvmx_wait(1000);
78 smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(p->unit));
79 } while (smi_wr.s.pending && --timeout);
80
81 if (timeout <= 0)
82 return -EIO;
83
84 return 0;
85}
86
87static int __init octeon_mdiobus_probe(struct platform_device *pdev)
88{
89 struct octeon_mdiobus *bus;
90 int i;
91 int err = -ENOENT;
92
93 bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
94 if (!bus)
95 return -ENOMEM;
96
97 /* The platform_device id is our unit number. */
98 bus->unit = pdev->id;
99
100 bus->mii_bus = mdiobus_alloc();
101
102 if (!bus->mii_bus)
103 goto err;
104
105 /*
106 * Standard Octeon evaluation boards don't support phy
107 * interrupts, we need to poll.
108 */
109 for (i = 0; i < PHY_MAX_ADDR; i++)
110 bus->phy_irq[i] = PHY_POLL;
111
112 bus->mii_bus->priv = bus;
113 bus->mii_bus->irq = bus->phy_irq;
114 bus->mii_bus->name = "mdio-octeon";
115 snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%x", bus->unit);
116 bus->mii_bus->parent = &pdev->dev;
117
118 bus->mii_bus->read = octeon_mdiobus_read;
119 bus->mii_bus->write = octeon_mdiobus_write;
120
121 dev_set_drvdata(&pdev->dev, bus);
122
123 err = mdiobus_register(bus->mii_bus);
124 if (err)
125 goto err_register;
126
127 dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
128
129 return 0;
130err_register:
131 mdiobus_free(bus->mii_bus);
132
133err:
134 devm_kfree(&pdev->dev, bus);
135 return err;
136}
137
138static int __exit octeon_mdiobus_remove(struct platform_device *pdev)
139{
140 struct octeon_mdiobus *bus;
141
142 bus = dev_get_drvdata(&pdev->dev);
143
144 mdiobus_unregister(bus->mii_bus);
145 mdiobus_free(bus->mii_bus);
146 return 0;
147}
148
149static struct platform_driver octeon_mdiobus_driver = {
150 .driver = {
151 .name = "mdio-octeon",
152 .owner = THIS_MODULE,
153 },
154 .probe = octeon_mdiobus_probe,
155 .remove = __exit_p(octeon_mdiobus_remove),
156};
157
158void octeon_mdiobus_force_mod_depencency(void)
159{
160 /* Let ethernet drivers force us to be loaded. */
161}
162EXPORT_SYMBOL(octeon_mdiobus_force_mod_depencency);
163
164static int __init octeon_mdiobus_mod_init(void)
165{
166 return platform_driver_register(&octeon_mdiobus_driver);
167}
168
169static void __exit octeon_mdiobus_mod_exit(void)
170{
171 platform_driver_unregister(&octeon_mdiobus_driver);
172}
173
174module_init(octeon_mdiobus_mod_init);
175module_exit(octeon_mdiobus_mod_exit);
176
177MODULE_DESCRIPTION(DRV_DESCRIPTION);
178MODULE_VERSION(DRV_VERSION);
179MODULE_AUTHOR("David Daney");
180MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 675b7df632fc..27ca859e7453 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -63,7 +63,7 @@
63#ifndef __iwl_core_h__ 63#ifndef __iwl_core_h__
64#define __iwl_core_h__ 64#define __iwl_core_h__
65 65
66#include <linux/utsrelease.h> 66#include <generated/utsrelease.h>
67 67
68/************************ 68/************************
69 * forward declarations * 69 * forward declarations *
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index b9b371bfa30f..42611bea76a3 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -1365,7 +1365,7 @@ static void lbs_send_confirmsleep(struct lbs_private *priv)
1365 priv->dnld_sent = DNLD_RES_RECEIVED; 1365 priv->dnld_sent = DNLD_RES_RECEIVED;
1366 1366
1367 /* If nothing to do, go back to sleep (?) */ 1367 /* If nothing to do, go back to sleep (?) */
1368 if (!__kfifo_len(priv->event_fifo) && !priv->resp_len[priv->resp_idx]) 1368 if (!kfifo_len(&priv->event_fifo) && !priv->resp_len[priv->resp_idx])
1369 priv->psstate = PS_STATE_SLEEP; 1369 priv->psstate = PS_STATE_SLEEP;
1370 1370
1371 spin_unlock_irqrestore(&priv->driver_lock, flags); 1371 spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -1439,7 +1439,7 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv)
1439 } 1439 }
1440 1440
1441 /* Pending events or command responses? */ 1441 /* Pending events or command responses? */
1442 if (__kfifo_len(priv->event_fifo) || priv->resp_len[priv->resp_idx]) { 1442 if (kfifo_len(&priv->event_fifo) || priv->resp_len[priv->resp_idx]) {
1443 allowed = 0; 1443 allowed = 0;
1444 lbs_deb_host("pending events or command responses\n"); 1444 lbs_deb_host("pending events or command responses\n");
1445 } 1445 }
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 6a8d2b291d8c..05bb298dfae9 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -10,7 +10,7 @@
10#include "scan.h" 10#include "scan.h"
11#include "assoc.h" 11#include "assoc.h"
12 12
13 13#include <linux/kfifo.h>
14 14
15/** sleep_params */ 15/** sleep_params */
16struct sleep_params { 16struct sleep_params {
@@ -120,7 +120,7 @@ struct lbs_private {
120 u32 resp_len[2]; 120 u32 resp_len[2];
121 121
122 /* Events sent from hardware to driver */ 122 /* Events sent from hardware to driver */
123 struct kfifo *event_fifo; 123 struct kfifo event_fifo;
124 124
125 /** thread to service interrupts */ 125 /** thread to service interrupts */
126 struct task_struct *main_thread; 126 struct task_struct *main_thread;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index db38a5a719fa..c2975c8e2f21 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -459,7 +459,7 @@ static int lbs_thread(void *data)
459 else if (!list_empty(&priv->cmdpendingq) && 459 else if (!list_empty(&priv->cmdpendingq) &&
460 !(priv->wakeup_dev_required)) 460 !(priv->wakeup_dev_required))
461 shouldsleep = 0; /* We have a command to send */ 461 shouldsleep = 0; /* We have a command to send */
462 else if (__kfifo_len(priv->event_fifo)) 462 else if (kfifo_len(&priv->event_fifo))
463 shouldsleep = 0; /* We have an event to process */ 463 shouldsleep = 0; /* We have an event to process */
464 else 464 else
465 shouldsleep = 1; /* No command */ 465 shouldsleep = 1; /* No command */
@@ -511,10 +511,13 @@ static int lbs_thread(void *data)
511 511
512 /* Process hardware events, e.g. card removed, link lost */ 512 /* Process hardware events, e.g. card removed, link lost */
513 spin_lock_irq(&priv->driver_lock); 513 spin_lock_irq(&priv->driver_lock);
514 while (__kfifo_len(priv->event_fifo)) { 514 while (kfifo_len(&priv->event_fifo)) {
515 u32 event; 515 u32 event;
516 __kfifo_get(priv->event_fifo, (unsigned char *) &event, 516
517 sizeof(event)); 517 if (kfifo_out(&priv->event_fifo,
518 (unsigned char *) &event, sizeof(event)) !=
519 sizeof(event))
520 break;
518 spin_unlock_irq(&priv->driver_lock); 521 spin_unlock_irq(&priv->driver_lock);
519 lbs_process_event(priv, event); 522 lbs_process_event(priv, event);
520 spin_lock_irq(&priv->driver_lock); 523 spin_lock_irq(&priv->driver_lock);
@@ -883,10 +886,9 @@ static int lbs_init_adapter(struct lbs_private *priv)
883 priv->resp_len[0] = priv->resp_len[1] = 0; 886 priv->resp_len[0] = priv->resp_len[1] = 0;
884 887
885 /* Create the event FIFO */ 888 /* Create the event FIFO */
886 priv->event_fifo = kfifo_alloc(sizeof(u32) * 16, GFP_KERNEL, NULL); 889 ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL);
887 if (IS_ERR(priv->event_fifo)) { 890 if (ret) {
888 lbs_pr_err("Out of memory allocating event FIFO buffer\n"); 891 lbs_pr_err("Out of memory allocating event FIFO buffer\n");
889 ret = -ENOMEM;
890 goto out; 892 goto out;
891 } 893 }
892 894
@@ -901,8 +903,7 @@ static void lbs_free_adapter(struct lbs_private *priv)
901 lbs_deb_enter(LBS_DEB_MAIN); 903 lbs_deb_enter(LBS_DEB_MAIN);
902 904
903 lbs_free_cmd_buffer(priv); 905 lbs_free_cmd_buffer(priv);
904 if (priv->event_fifo) 906 kfifo_free(&priv->event_fifo);
905 kfifo_free(priv->event_fifo);
906 del_timer(&priv->command_timer); 907 del_timer(&priv->command_timer);
907 del_timer(&priv->auto_deepsleep_timer); 908 del_timer(&priv->auto_deepsleep_timer);
908 kfree(priv->networks); 909 kfree(priv->networks);
@@ -1177,7 +1178,7 @@ void lbs_queue_event(struct lbs_private *priv, u32 event)
1177 if (priv->psstate == PS_STATE_SLEEP) 1178 if (priv->psstate == PS_STATE_SLEEP)
1178 priv->psstate = PS_STATE_AWAKE; 1179 priv->psstate = PS_STATE_AWAKE;
1179 1180
1180 __kfifo_put(priv->event_fifo, (unsigned char *) &event, sizeof(u32)); 1181 kfifo_in(&priv->event_fifo, (unsigned char *) &event, sizeof(u32));
1181 1182
1182 wake_up_interruptible(&priv->waitq); 1183 wake_up_interruptible(&priv->waitq);
1183 1184
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 3aabf1e37988..76e640bccde8 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -291,7 +291,7 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
291 skt->nr = ops->first + i; 291 skt->nr = ops->first + i;
292 skt->ops = ops; 292 skt->ops = ops;
293 skt->socket.owner = ops->owner; 293 skt->socket.owner = ops->owner;
294 skt->socket.dev.parent = dev; 294 skt->socket.dev.parent = &dev->dev;
295 skt->socket.pci_irq = NO_IRQ; 295 skt->socket.pci_irq = NO_IRQ;
296 296
297 ret = pxa2xx_drv_pcmcia_add_one(skt); 297 ret = pxa2xx_drv_pcmcia_add_one(skt);
@@ -304,8 +304,8 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
304 soc_pcmcia_remove_one(&sinfo->skt[i]); 304 soc_pcmcia_remove_one(&sinfo->skt[i]);
305 kfree(sinfo); 305 kfree(sinfo);
306 } else { 306 } else {
307 pxa2xx_configure_sockets(dev); 307 pxa2xx_configure_sockets(&dev->dev);
308 dev_set_drvdata(dev, sinfo); 308 dev_set_drvdata(&dev->dev, sinfo);
309 } 309 }
310 310
311 return ret; 311 return ret;
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 11003bba10d3..1a387e79f719 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -51,7 +51,6 @@
51#include <linux/dmi.h> 51#include <linux/dmi.h>
52#include <linux/backlight.h> 52#include <linux/backlight.h>
53#include <linux/platform_device.h> 53#include <linux/platform_device.h>
54#include <linux/autoconf.h>
55 54
56#define COMPAL_DRIVER_VERSION "0.2.6" 55#define COMPAL_DRIVER_VERSION "0.2.6"
57 56
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index bcd4ba8be7db..b66029bd75d0 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -164,7 +164,7 @@ struct fujitsu_hotkey_t {
164 struct input_dev *input; 164 struct input_dev *input;
165 char phys[32]; 165 char phys[32];
166 struct platform_device *pf_device; 166 struct platform_device *pf_device;
167 struct kfifo *fifo; 167 struct kfifo fifo;
168 spinlock_t fifo_lock; 168 spinlock_t fifo_lock;
169 int rfkill_supported; 169 int rfkill_supported;
170 int rfkill_state; 170 int rfkill_state;
@@ -824,12 +824,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
824 824
825 /* kfifo */ 825 /* kfifo */
826 spin_lock_init(&fujitsu_hotkey->fifo_lock); 826 spin_lock_init(&fujitsu_hotkey->fifo_lock);
827 fujitsu_hotkey->fifo = 827 error = kfifo_alloc(&fujitsu_hotkey->fifo, RINGBUFFERSIZE * sizeof(int),
828 kfifo_alloc(RINGBUFFERSIZE * sizeof(int), GFP_KERNEL, 828 GFP_KERNEL);
829 &fujitsu_hotkey->fifo_lock); 829 if (error) {
830 if (IS_ERR(fujitsu_hotkey->fifo)) {
831 printk(KERN_ERR "kfifo_alloc failed\n"); 830 printk(KERN_ERR "kfifo_alloc failed\n");
832 error = PTR_ERR(fujitsu_hotkey->fifo);
833 goto err_stop; 831 goto err_stop;
834 } 832 }
835 833
@@ -934,7 +932,7 @@ err_unregister_input_dev:
934err_free_input_dev: 932err_free_input_dev:
935 input_free_device(input); 933 input_free_device(input);
936err_free_fifo: 934err_free_fifo:
937 kfifo_free(fujitsu_hotkey->fifo); 935 kfifo_free(&fujitsu_hotkey->fifo);
938err_stop: 936err_stop:
939 return result; 937 return result;
940} 938}
@@ -956,7 +954,7 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type)
956 954
957 input_free_device(input); 955 input_free_device(input);
958 956
959 kfifo_free(fujitsu_hotkey->fifo); 957 kfifo_free(&fujitsu_hotkey->fifo);
960 958
961 fujitsu_hotkey->acpi_handle = NULL; 959 fujitsu_hotkey->acpi_handle = NULL;
962 960
@@ -1008,9 +1006,10 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
1008 vdbg_printk(FUJLAPTOP_DBG_TRACE, 1006 vdbg_printk(FUJLAPTOP_DBG_TRACE,
1009 "Push keycode into ringbuffer [%d]\n", 1007 "Push keycode into ringbuffer [%d]\n",
1010 keycode); 1008 keycode);
1011 status = kfifo_put(fujitsu_hotkey->fifo, 1009 status = kfifo_in_locked(&fujitsu_hotkey->fifo,
1012 (unsigned char *)&keycode, 1010 (unsigned char *)&keycode,
1013 sizeof(keycode)); 1011 sizeof(keycode),
1012 &fujitsu_hotkey->fifo_lock);
1014 if (status != sizeof(keycode)) { 1013 if (status != sizeof(keycode)) {
1015 vdbg_printk(FUJLAPTOP_DBG_WARN, 1014 vdbg_printk(FUJLAPTOP_DBG_WARN,
1016 "Could not push keycode [0x%x]\n", 1015 "Could not push keycode [0x%x]\n",
@@ -1021,11 +1020,12 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
1021 } 1020 }
1022 } else if (keycode == 0) { 1021 } else if (keycode == 0) {
1023 while ((status = 1022 while ((status =
1024 kfifo_get 1023 kfifo_out_locked(
1025 (fujitsu_hotkey->fifo, (unsigned char *) 1024 &fujitsu_hotkey->fifo,
1026 &keycode_r, 1025 (unsigned char *) &keycode_r,
1027 sizeof 1026 sizeof(keycode_r),
1028 (keycode_r))) == sizeof(keycode_r)) { 1027 &fujitsu_hotkey->fifo_lock))
1028 == sizeof(keycode_r)) {
1029 input_report_key(input, keycode_r, 0); 1029 input_report_key(input, keycode_r, 0);
1030 input_sync(input); 1030 input_sync(input);
1031 vdbg_printk(FUJLAPTOP_DBG_TRACE, 1031 vdbg_printk(FUJLAPTOP_DBG_TRACE,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 7a2cc8a5c975..2896ca4cd9ab 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -142,7 +142,7 @@ struct sony_laptop_input_s {
142 atomic_t users; 142 atomic_t users;
143 struct input_dev *jog_dev; 143 struct input_dev *jog_dev;
144 struct input_dev *key_dev; 144 struct input_dev *key_dev;
145 struct kfifo *fifo; 145 struct kfifo fifo;
146 spinlock_t fifo_lock; 146 spinlock_t fifo_lock;
147 struct workqueue_struct *wq; 147 struct workqueue_struct *wq;
148}; 148};
@@ -300,8 +300,9 @@ static void do_sony_laptop_release_key(struct work_struct *work)
300{ 300{
301 struct sony_laptop_keypress kp; 301 struct sony_laptop_keypress kp;
302 302
303 while (kfifo_get(sony_laptop_input.fifo, (unsigned char *)&kp, 303 while (kfifo_out_locked(&sony_laptop_input.fifo, (unsigned char *)&kp,
304 sizeof(kp)) == sizeof(kp)) { 304 sizeof(kp), &sony_laptop_input.fifo_lock)
305 == sizeof(kp)) {
305 msleep(10); 306 msleep(10);
306 input_report_key(kp.dev, kp.key, 0); 307 input_report_key(kp.dev, kp.key, 0);
307 input_sync(kp.dev); 308 input_sync(kp.dev);
@@ -362,8 +363,9 @@ static void sony_laptop_report_input_event(u8 event)
362 /* we emit the scancode so we can always remap the key */ 363 /* we emit the scancode so we can always remap the key */
363 input_event(kp.dev, EV_MSC, MSC_SCAN, event); 364 input_event(kp.dev, EV_MSC, MSC_SCAN, event);
364 input_sync(kp.dev); 365 input_sync(kp.dev);
365 kfifo_put(sony_laptop_input.fifo, 366 kfifo_in_locked(&sony_laptop_input.fifo,
366 (unsigned char *)&kp, sizeof(kp)); 367 (unsigned char *)&kp, sizeof(kp),
368 &sony_laptop_input.fifo_lock);
367 369
368 if (!work_pending(&sony_laptop_release_key_work)) 370 if (!work_pending(&sony_laptop_release_key_work))
369 queue_work(sony_laptop_input.wq, 371 queue_work(sony_laptop_input.wq,
@@ -385,12 +387,10 @@ static int sony_laptop_setup_input(struct acpi_device *acpi_device)
385 387
386 /* kfifo */ 388 /* kfifo */
387 spin_lock_init(&sony_laptop_input.fifo_lock); 389 spin_lock_init(&sony_laptop_input.fifo_lock);
388 sony_laptop_input.fifo = 390 error =
389 kfifo_alloc(SONY_LAPTOP_BUF_SIZE, GFP_KERNEL, 391 kfifo_alloc(&sony_laptop_input.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
390 &sony_laptop_input.fifo_lock); 392 if (error) {
391 if (IS_ERR(sony_laptop_input.fifo)) {
392 printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n"); 393 printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n");
393 error = PTR_ERR(sony_laptop_input.fifo);
394 goto err_dec_users; 394 goto err_dec_users;
395 } 395 }
396 396
@@ -474,7 +474,7 @@ err_destroy_wq:
474 destroy_workqueue(sony_laptop_input.wq); 474 destroy_workqueue(sony_laptop_input.wq);
475 475
476err_free_kfifo: 476err_free_kfifo:
477 kfifo_free(sony_laptop_input.fifo); 477 kfifo_free(&sony_laptop_input.fifo);
478 478
479err_dec_users: 479err_dec_users:
480 atomic_dec(&sony_laptop_input.users); 480 atomic_dec(&sony_laptop_input.users);
@@ -500,7 +500,7 @@ static void sony_laptop_remove_input(void)
500 } 500 }
501 501
502 destroy_workqueue(sony_laptop_input.wq); 502 destroy_workqueue(sony_laptop_input.wq);
503 kfifo_free(sony_laptop_input.fifo); 503 kfifo_free(&sony_laptop_input.fifo);
504} 504}
505 505
506/*********** Platform Device ***********/ 506/*********** Platform Device ***********/
@@ -2079,7 +2079,7 @@ static struct attribute_group spic_attribute_group = {
2079 2079
2080struct sonypi_compat_s { 2080struct sonypi_compat_s {
2081 struct fasync_struct *fifo_async; 2081 struct fasync_struct *fifo_async;
2082 struct kfifo *fifo; 2082 struct kfifo fifo;
2083 spinlock_t fifo_lock; 2083 spinlock_t fifo_lock;
2084 wait_queue_head_t fifo_proc_list; 2084 wait_queue_head_t fifo_proc_list;
2085 atomic_t open_count; 2085 atomic_t open_count;
@@ -2104,12 +2104,12 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
2104 /* Flush input queue on first open */ 2104 /* Flush input queue on first open */
2105 unsigned long flags; 2105 unsigned long flags;
2106 2106
2107 spin_lock_irqsave(sonypi_compat.fifo->lock, flags); 2107 spin_lock_irqsave(&sonypi_compat.fifo_lock, flags);
2108 2108
2109 if (atomic_inc_return(&sonypi_compat.open_count) == 1) 2109 if (atomic_inc_return(&sonypi_compat.open_count) == 1)
2110 __kfifo_reset(sonypi_compat.fifo); 2110 kfifo_reset(&sonypi_compat.fifo);
2111 2111
2112 spin_unlock_irqrestore(sonypi_compat.fifo->lock, flags); 2112 spin_unlock_irqrestore(&sonypi_compat.fifo_lock, flags);
2113 2113
2114 return 0; 2114 return 0;
2115} 2115}
@@ -2120,17 +2120,18 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
2120 ssize_t ret; 2120 ssize_t ret;
2121 unsigned char c; 2121 unsigned char c;
2122 2122
2123 if ((kfifo_len(sonypi_compat.fifo) == 0) && 2123 if ((kfifo_len(&sonypi_compat.fifo) == 0) &&
2124 (file->f_flags & O_NONBLOCK)) 2124 (file->f_flags & O_NONBLOCK))
2125 return -EAGAIN; 2125 return -EAGAIN;
2126 2126
2127 ret = wait_event_interruptible(sonypi_compat.fifo_proc_list, 2127 ret = wait_event_interruptible(sonypi_compat.fifo_proc_list,
2128 kfifo_len(sonypi_compat.fifo) != 0); 2128 kfifo_len(&sonypi_compat.fifo) != 0);
2129 if (ret) 2129 if (ret)
2130 return ret; 2130 return ret;
2131 2131
2132 while (ret < count && 2132 while (ret < count &&
2133 (kfifo_get(sonypi_compat.fifo, &c, sizeof(c)) == sizeof(c))) { 2133 (kfifo_out_locked(&sonypi_compat.fifo, &c, sizeof(c),
2134 &sonypi_compat.fifo_lock) == sizeof(c))) {
2134 if (put_user(c, buf++)) 2135 if (put_user(c, buf++))
2135 return -EFAULT; 2136 return -EFAULT;
2136 ret++; 2137 ret++;
@@ -2147,7 +2148,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
2147static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait) 2148static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
2148{ 2149{
2149 poll_wait(file, &sonypi_compat.fifo_proc_list, wait); 2150 poll_wait(file, &sonypi_compat.fifo_proc_list, wait);
2150 if (kfifo_len(sonypi_compat.fifo)) 2151 if (kfifo_len(&sonypi_compat.fifo))
2151 return POLLIN | POLLRDNORM; 2152 return POLLIN | POLLRDNORM;
2152 return 0; 2153 return 0;
2153} 2154}
@@ -2309,7 +2310,8 @@ static struct miscdevice sonypi_misc_device = {
2309 2310
2310static void sonypi_compat_report_event(u8 event) 2311static void sonypi_compat_report_event(u8 event)
2311{ 2312{
2312 kfifo_put(sonypi_compat.fifo, (unsigned char *)&event, sizeof(event)); 2313 kfifo_in_locked(&sonypi_compat.fifo, (unsigned char *)&event,
2314 sizeof(event), &sonypi_compat.fifo_lock);
2313 kill_fasync(&sonypi_compat.fifo_async, SIGIO, POLL_IN); 2315 kill_fasync(&sonypi_compat.fifo_async, SIGIO, POLL_IN);
2314 wake_up_interruptible(&sonypi_compat.fifo_proc_list); 2316 wake_up_interruptible(&sonypi_compat.fifo_proc_list);
2315} 2317}
@@ -2319,11 +2321,11 @@ static int sonypi_compat_init(void)
2319 int error; 2321 int error;
2320 2322
2321 spin_lock_init(&sonypi_compat.fifo_lock); 2323 spin_lock_init(&sonypi_compat.fifo_lock);
2322 sonypi_compat.fifo = kfifo_alloc(SONY_LAPTOP_BUF_SIZE, GFP_KERNEL, 2324 error =
2323 &sonypi_compat.fifo_lock); 2325 kfifo_alloc(&sonypi_compat.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
2324 if (IS_ERR(sonypi_compat.fifo)) { 2326 if (error) {
2325 printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n"); 2327 printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n");
2326 return PTR_ERR(sonypi_compat.fifo); 2328 return error;
2327 } 2329 }
2328 2330
2329 init_waitqueue_head(&sonypi_compat.fifo_proc_list); 2331 init_waitqueue_head(&sonypi_compat.fifo_proc_list);
@@ -2342,14 +2344,14 @@ static int sonypi_compat_init(void)
2342 return 0; 2344 return 0;
2343 2345
2344err_free_kfifo: 2346err_free_kfifo:
2345 kfifo_free(sonypi_compat.fifo); 2347 kfifo_free(&sonypi_compat.fifo);
2346 return error; 2348 return error;
2347} 2349}
2348 2350
2349static void sonypi_compat_exit(void) 2351static void sonypi_compat_exit(void)
2350{ 2352{
2351 misc_deregister(&sonypi_misc_device); 2353 misc_deregister(&sonypi_misc_device);
2352 kfifo_free(sonypi_compat.fifo); 2354 kfifo_free(&sonypi_compat.fifo);
2353} 2355}
2354#else 2356#else
2355static int sonypi_compat_init(void) { return 0; } 2357static int sonypi_compat_init(void) { return 0; }
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
new file mode 100644
index 000000000000..04719551381b
--- /dev/null
+++ b/drivers/regulator/88pm8607.c
@@ -0,0 +1,685 @@
1/*
2 * Regulators driver for Marvell 88PM8607
3 *
4 * Copyright (C) 2009 Marvell International Ltd.
5 * Haojian Zhuang <haojian.zhuang@marvell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/err.h>
14#include <linux/platform_device.h>
15#include <linux/regulator/driver.h>
16#include <linux/regulator/machine.h>
17#include <linux/mfd/88pm8607.h>
18
19struct pm8607_regulator_info {
20 struct regulator_desc desc;
21 struct pm8607_chip *chip;
22 struct regulator_dev *regulator;
23
24 int min_uV;
25 int max_uV;
26 int step_uV;
27 int vol_reg;
28 int vol_shift;
29 int vol_nbits;
30 int update_reg;
31 int update_bit;
32 int enable_reg;
33 int enable_bit;
34 int slope_double;
35};
36
37static inline int check_range(struct pm8607_regulator_info *info,
38 int min_uV, int max_uV)
39{
40 if (max_uV < info->min_uV || min_uV > info->max_uV || min_uV > max_uV)
41 return -EINVAL;
42
43 return 0;
44}
45
46static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
47{
48 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
49 uint8_t chip_id = info->chip->chip_id;
50 int ret = -EINVAL;
51
52 switch (info->desc.id) {
53 case PM8607_ID_BUCK1:
54 ret = (index < 0x1d) ? (index * 25000 + 800000) :
55 ((index < 0x20) ? 1500000 :
56 ((index < 0x40) ? ((index - 0x20) * 25000) :
57 -EINVAL));
58 break;
59 case PM8607_ID_BUCK3:
60 ret = (index < 0x3d) ? (index * 25000) :
61 ((index < 0x40) ? 1500000 : -EINVAL);
62 if (ret < 0)
63 break;
64 if (info->slope_double)
65 ret <<= 1;
66 break;
67 case PM8607_ID_LDO1:
68 ret = (index == 0) ? 1800000 :
69 ((index == 1) ? 1200000 :
70 ((index == 2) ? 2800000 : -EINVAL));
71 break;
72 case PM8607_ID_LDO5:
73 ret = (index == 0) ? 2900000 :
74 ((index == 1) ? 3000000 :
75 ((index == 2) ? 3100000 : 3300000));
76 break;
77 case PM8607_ID_LDO7:
78 case PM8607_ID_LDO8:
79 ret = (index < 3) ? (index * 50000 + 1800000) :
80 ((index < 8) ? (index * 50000 + 2550000) :
81 -EINVAL);
82 break;
83 case PM8607_ID_LDO12:
84 ret = (index < 2) ? (index * 100000 + 1800000) :
85 ((index < 7) ? (index * 100000 + 2500000) :
86 ((index == 7) ? 3300000 : 1200000));
87 break;
88 case PM8607_ID_LDO2:
89 case PM8607_ID_LDO3:
90 case PM8607_ID_LDO9:
91 switch (chip_id) {
92 case PM8607_CHIP_A0:
93 case PM8607_CHIP_A1:
94 ret = (index < 3) ? (index * 50000 + 1800000) :
95 ((index < 8) ? (index * 50000 + 2550000) :
96 -EINVAL);
97 break;
98 case PM8607_CHIP_B0:
99 ret = (index < 3) ? (index * 50000 + 1800000) :
100 ((index < 7) ? (index * 50000 + 2550000) :
101 3300000);
102 break;
103 }
104 break;
105 case PM8607_ID_LDO4:
106 switch (chip_id) {
107 case PM8607_CHIP_A0:
108 case PM8607_CHIP_A1:
109 ret = (index < 3) ? (index * 50000 + 1800000) :
110 ((index < 8) ? (index * 50000 + 2550000) :
111 -EINVAL);
112 break;
113 case PM8607_CHIP_B0:
114 ret = (index < 3) ? (index * 50000 + 1800000) :
115 ((index < 6) ? (index * 50000 + 2550000) :
116 ((index == 6) ? 2900000 : 3300000));
117 break;
118 }
119 break;
120 case PM8607_ID_LDO6:
121 switch (chip_id) {
122 case PM8607_CHIP_A0:
123 case PM8607_CHIP_A1:
124 ret = (index < 3) ? (index * 50000 + 1800000) :
125 ((index < 8) ? (index * 50000 + 2450000) :
126 -EINVAL);
127 break;
128 case PM8607_CHIP_B0:
129 ret = (index < 2) ? (index * 50000 + 1800000) :
130 ((index < 7) ? (index * 50000 + 2500000) :
131 3300000);
132 break;
133 }
134 break;
135 case PM8607_ID_LDO10:
136 switch (chip_id) {
137 case PM8607_CHIP_A0:
138 case PM8607_CHIP_A1:
139 ret = (index < 3) ? (index * 50000 + 1800000) :
140 ((index < 8) ? (index * 50000 + 2550000) :
141 1200000);
142 break;
143 case PM8607_CHIP_B0:
144 ret = (index < 3) ? (index * 50000 + 1800000) :
145 ((index < 7) ? (index * 50000 + 2550000) :
146 ((index == 7) ? 3300000 : 1200000));
147 break;
148 }
149 break;
150 case PM8607_ID_LDO14:
151 switch (chip_id) {
152 case PM8607_CHIP_A0:
153 case PM8607_CHIP_A1:
154 ret = (index < 3) ? (index * 50000 + 1800000) :
155 ((index < 8) ? (index * 50000 + 2550000) :
156 -EINVAL);
157 break;
158 case PM8607_CHIP_B0:
159 ret = (index < 2) ? (index * 50000 + 1800000) :
160 ((index < 7) ? (index * 50000 + 2600000) :
161 3300000);
162 break;
163 }
164 break;
165 }
166 return ret;
167}
168
169static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
170{
171 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
172 uint8_t chip_id = info->chip->chip_id;
173 int val = -ENOENT;
174 int ret;
175
176 switch (info->desc.id) {
177 case PM8607_ID_BUCK1:
178 if (min_uV >= 800000) /* 800mV ~ 1500mV / 25mV */
179 val = (min_uV - 775001) / 25000;
180 else { /* 25mV ~ 775mV / 25mV */
181 val = (min_uV + 249999) / 25000;
182 val += 32;
183 }
184 break;
185 case PM8607_ID_BUCK3:
186 if (info->slope_double)
187 min_uV = min_uV >> 1;
188 val = (min_uV + 249999) / 25000; /* 0mV ~ 1500mV / 25mV */
189
190 break;
191 case PM8607_ID_LDO1:
192 if (min_uV > 1800000)
193 val = 2;
194 else if (min_uV > 1200000)
195 val = 0;
196 else
197 val = 1;
198 break;
199 case PM8607_ID_LDO5:
200 if (min_uV > 3100000)
201 val = 3;
202 else /* 2900mV ~ 3100mV / 100mV */
203 val = (min_uV - 2800001) / 100000;
204 break;
205 case PM8607_ID_LDO7:
206 case PM8607_ID_LDO8:
207 if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
208 if (min_uV <= 1800000)
209 val = 0; /* 1800mv */
210 else if (min_uV <= 1900000)
211 val = (min_uV - 1750001) / 50000;
212 else
213 val = 3; /* 2700mV */
214 } else { /* 2700mV ~ 2900mV / 50mV */
215 if (min_uV <= 2900000) {
216 val = (min_uV - 2650001) / 50000;
217 val += 3;
218 } else
219 val = -EINVAL;
220 }
221 break;
222 case PM8607_ID_LDO10:
223 if (min_uV > 2850000)
224 val = 7;
225 else if (min_uV <= 1200000)
226 val = 8;
227 else if (min_uV < 2700000) /* 1800mV ~ 1900mV / 50mV */
228 val = (min_uV - 1750001) / 50000;
229 else { /* 2700mV ~ 2850mV / 50mV */
230 val = (min_uV - 2650001) / 50000;
231 val += 3;
232 }
233 break;
234 case PM8607_ID_LDO12:
235 if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 100mV */
236 if (min_uV <= 1200000)
237 val = 8; /* 1200mV */
238 else if (min_uV <= 1800000)
239 val = 0; /* 1800mV */
240 else if (min_uV <= 1900000)
241 val = (min_uV - 1700001) / 100000;
242 else
243 val = 2; /* 2700mV */
244 } else { /* 2700mV ~ 3100mV / 100mV */
245 if (min_uV <= 3100000) {
246 val = (min_uV - 2600001) / 100000;
247 val += 2;
248 } else if (min_uV <= 3300000)
249 val = 7;
250 else
251 val = -EINVAL;
252 }
253 break;
254 case PM8607_ID_LDO2:
255 case PM8607_ID_LDO3:
256 case PM8607_ID_LDO9:
257 switch (chip_id) {
258 case PM8607_CHIP_A0:
259 case PM8607_CHIP_A1:
260 if (min_uV < 2700000) /* 1800mV ~ 1900mV / 50mV */
261 if (min_uV <= 1800000)
262 val = 0;
263 else if (min_uV <= 1900000)
264 val = (min_uV - 1750001) / 50000;
265 else
266 val = 3; /* 2700mV */
267 else { /* 2700mV ~ 2900mV / 50mV */
268 if (min_uV <= 2900000) {
269 val = (min_uV - 2650001) / 50000;
270 val += 3;
271 } else
272 val = -EINVAL;
273 }
274 break;
275 case PM8607_CHIP_B0:
276 if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
277 if (min_uV <= 1800000)
278 val = 0;
279 else if (min_uV <= 1900000)
280 val = (min_uV - 1750001) / 50000;
281 else
282 val = 3; /* 2700mV */
283 } else { /* 2700mV ~ 2850mV / 50mV */
284 if (min_uV <= 2850000) {
285 val = (min_uV - 2650001) / 50000;
286 val += 3;
287 } else if (min_uV <= 3300000)
288 val = 7;
289 else
290 val = -EINVAL;
291 }
292 break;
293 }
294 break;
295 case PM8607_ID_LDO4:
296 switch (chip_id) {
297 case PM8607_CHIP_A0:
298 case PM8607_CHIP_A1:
299 if (min_uV < 2700000) /* 1800mV ~ 1900mV / 50mV */
300 if (min_uV <= 1800000)
301 val = 0;
302 else if (min_uV <= 1900000)
303 val = (min_uV - 1750001) / 50000;
304 else
305 val = 3; /* 2700mV */
306 else { /* 2700mV ~ 2900mV / 50mV */
307 if (min_uV <= 2900000) {
308 val = (min_uV - 2650001) / 50000;
309 val += 3;
310 } else
311 val = -EINVAL;
312 }
313 break;
314 case PM8607_CHIP_B0:
315 if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
316 if (min_uV <= 1800000)
317 val = 0;
318 else if (min_uV <= 1900000)
319 val = (min_uV - 1750001) / 50000;
320 else
321 val = 3; /* 2700mV */
322 } else { /* 2700mV ~ 2800mV / 50mV */
323 if (min_uV <= 2850000) {
324 val = (min_uV - 2650001) / 50000;
325 val += 3;
326 } else if (min_uV <= 2900000)
327 val = 6;
328 else if (min_uV <= 3300000)
329 val = 7;
330 else
331 val = -EINVAL;
332 }
333 break;
334 }
335 break;
336 case PM8607_ID_LDO6:
337 switch (chip_id) {
338 case PM8607_CHIP_A0:
339 case PM8607_CHIP_A1:
340 if (min_uV < 2600000) { /* 1800mV ~ 1900mV / 50mV */
341 if (min_uV <= 1800000)
342 val = 0;
343 else if (min_uV <= 1900000)
344 val = (min_uV - 1750001) / 50000;
345 else
346 val = 3; /* 2600mV */
347 } else { /* 2600mV ~ 2800mV / 50mV */
348 if (min_uV <= 2800000) {
349 val = (min_uV - 2550001) / 50000;
350 val += 3;
351 } else
352 val = -EINVAL;
353 }
354 break;
355 case PM8607_CHIP_B0:
356 if (min_uV < 2600000) { /* 1800mV ~ 1850mV / 50mV */
357 if (min_uV <= 1800000)
358 val = 0;
359 else if (min_uV <= 1850000)
360 val = (min_uV - 1750001) / 50000;
361 else
362 val = 2; /* 2600mV */
363 } else { /* 2600mV ~ 2800mV / 50mV */
364 if (min_uV <= 2800000) {
365 val = (min_uV - 2550001) / 50000;
366 val += 2;
367 } else if (min_uV <= 3300000)
368 val = 7;
369 else
370 val = -EINVAL;
371 }
372 break;
373 }
374 break;
375 case PM8607_ID_LDO14:
376 switch (chip_id) {
377 case PM8607_CHIP_A0:
378 case PM8607_CHIP_A1:
379 if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
380 if (min_uV <= 1800000)
381 val = 0;
382 else if (min_uV <= 1900000)
383 val = (min_uV - 1750001) / 50000;
384 else
385 val = 3; /* 2700mV */
386 } else { /* 2700mV ~ 2900mV / 50mV */
387 if (min_uV <= 2900000) {
388 val = (min_uV - 2650001) / 50000;
389 val += 3;
390 } else
391 val = -EINVAL;
392 }
393 break;
394 case PM8607_CHIP_B0:
395 if (min_uV < 2700000) { /* 1800mV ~ 1850mV / 50mV */
396 if (min_uV <= 1800000)
397 val = 0;
398 else if (min_uV <= 1850000)
399 val = (min_uV - 1750001) / 50000;
400 else
401 val = 2; /* 2700mV */
402 } else { /* 2700mV ~ 2900mV / 50mV */
403 if (min_uV <= 2900000) {
404 val = (min_uV - 2650001) / 50000;
405 val += 2;
406 } else if (min_uV <= 3300000)
407 val = 7;
408 else
409 val = -EINVAL;
410 }
411 break;
412 }
413 break;
414 }
415 if (val >= 0) {
416 ret = pm8607_list_voltage(rdev, val);
417 if (ret > max_uV) {
418 pr_err("exceed voltage range (%d %d) uV",
419 min_uV, max_uV);
420 return -EINVAL;
421 }
422 } else
423 pr_err("invalid voltage range (%d %d) uV", min_uV, max_uV);
424 return val;
425}
426
427static int pm8607_set_voltage(struct regulator_dev *rdev,
428 int min_uV, int max_uV)
429{
430 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
431 struct pm8607_chip *chip = info->chip;
432 uint8_t val, mask;
433 int ret;
434
435 if (check_range(info, min_uV, max_uV)) {
436 pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
437 return -EINVAL;
438 }
439
440 ret = choose_voltage(rdev, min_uV, max_uV);
441 if (ret < 0)
442 return -EINVAL;
443 val = (uint8_t)(ret << info->vol_shift);
444 mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
445
446 ret = pm8607_set_bits(chip, info->vol_reg, mask, val);
447 if (ret)
448 return ret;
449 switch (info->desc.id) {
450 case PM8607_ID_BUCK1:
451 case PM8607_ID_BUCK3:
452 ret = pm8607_set_bits(chip, info->update_reg,
453 1 << info->update_bit,
454 1 << info->update_bit);
455 break;
456 }
457 return ret;
458}
459
460static int pm8607_get_voltage(struct regulator_dev *rdev)
461{
462 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
463 struct pm8607_chip *chip = info->chip;
464 uint8_t val, mask;
465 int ret;
466
467 ret = pm8607_reg_read(chip, info->vol_reg);
468 if (ret < 0)
469 return ret;
470
471 mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
472 val = ((unsigned char)ret & mask) >> info->vol_shift;
473
474 return pm8607_list_voltage(rdev, val);
475}
476
477static int pm8607_enable(struct regulator_dev *rdev)
478{
479 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
480 struct pm8607_chip *chip = info->chip;
481
482 return pm8607_set_bits(chip, info->enable_reg,
483 1 << info->enable_bit,
484 1 << info->enable_bit);
485}
486
487static int pm8607_disable(struct regulator_dev *rdev)
488{
489 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
490 struct pm8607_chip *chip = info->chip;
491
492 return pm8607_set_bits(chip, info->enable_reg,
493 1 << info->enable_bit, 0);
494}
495
496static int pm8607_is_enabled(struct regulator_dev *rdev)
497{
498 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
499 struct pm8607_chip *chip = info->chip;
500 int ret;
501
502 ret = pm8607_reg_read(chip, info->enable_reg);
503 if (ret < 0)
504 return ret;
505
506 return !!((unsigned char)ret & (1 << info->enable_bit));
507}
508
509static struct regulator_ops pm8607_regulator_ops = {
510 .set_voltage = pm8607_set_voltage,
511 .get_voltage = pm8607_get_voltage,
512 .enable = pm8607_enable,
513 .disable = pm8607_disable,
514 .is_enabled = pm8607_is_enabled,
515};
516
517#define PM8607_DVC(_id, min, max, step, vreg, nbits, ureg, ubit, ereg, ebit) \
518{ \
519 .desc = { \
520 .name = "BUCK" #_id, \
521 .ops = &pm8607_regulator_ops, \
522 .type = REGULATOR_VOLTAGE, \
523 .id = PM8607_ID_BUCK##_id, \
524 .owner = THIS_MODULE, \
525 }, \
526 .min_uV = (min) * 1000, \
527 .max_uV = (max) * 1000, \
528 .step_uV = (step) * 1000, \
529 .vol_reg = PM8607_##vreg, \
530 .vol_shift = (0), \
531 .vol_nbits = (nbits), \
532 .update_reg = PM8607_##ureg, \
533 .update_bit = (ubit), \
534 .enable_reg = PM8607_##ereg, \
535 .enable_bit = (ebit), \
536 .slope_double = (0), \
537}
538
539#define PM8607_LDO(_id, min, max, step, vreg, shift, nbits, ereg, ebit) \
540{ \
541 .desc = { \
542 .name = "LDO" #_id, \
543 .ops = &pm8607_regulator_ops, \
544 .type = REGULATOR_VOLTAGE, \
545 .id = PM8607_ID_LDO##_id, \
546 .owner = THIS_MODULE, \
547 }, \
548 .min_uV = (min) * 1000, \
549 .max_uV = (max) * 1000, \
550 .step_uV = (step) * 1000, \
551 .vol_reg = PM8607_##vreg, \
552 .vol_shift = (shift), \
553 .vol_nbits = (nbits), \
554 .enable_reg = PM8607_##ereg, \
555 .enable_bit = (ebit), \
556 .slope_double = (0), \
557}
558
559static struct pm8607_regulator_info pm8607_regulator_info[] = {
560 PM8607_DVC(1, 0, 1500, 25, BUCK1, 6, GO, 0, SUPPLIES_EN11, 0),
561 PM8607_DVC(3, 0, 1500, 25, BUCK3, 6, GO, 2, SUPPLIES_EN11, 2),
562
563 PM8607_LDO(1 , 1200, 2800, 0, LDO1 , 0, 2, SUPPLIES_EN11, 3),
564 PM8607_LDO(2 , 1800, 3300, 0, LDO2 , 0, 3, SUPPLIES_EN11, 4),
565 PM8607_LDO(3 , 1800, 3300, 0, LDO3 , 0, 3, SUPPLIES_EN11, 5),
566 PM8607_LDO(4 , 1800, 3300, 0, LDO4 , 0, 3, SUPPLIES_EN11, 6),
567 PM8607_LDO(5 , 2900, 3300, 0, LDO5 , 0, 2, SUPPLIES_EN11, 7),
568 PM8607_LDO(6 , 1800, 3300, 0, LDO6 , 0, 3, SUPPLIES_EN12, 0),
569 PM8607_LDO(7 , 1800, 2900, 0, LDO7 , 0, 3, SUPPLIES_EN12, 1),
570 PM8607_LDO(8 , 1800, 2900, 0, LDO8 , 0, 3, SUPPLIES_EN12, 2),
571 PM8607_LDO(9 , 1800, 3300, 0, LDO9 , 0, 3, SUPPLIES_EN12, 3),
572 PM8607_LDO(10, 1200, 3300, 0, LDO10, 0, 4, SUPPLIES_EN11, 4),
573 PM8607_LDO(12, 1200, 3300, 0, LDO12, 0, 4, SUPPLIES_EN11, 5),
574 PM8607_LDO(14, 1800, 3300, 0, LDO14, 0, 3, SUPPLIES_EN11, 6),
575};
576
577static inline struct pm8607_regulator_info *find_regulator_info(int id)
578{
579 struct pm8607_regulator_info *info;
580 int i;
581
582 for (i = 0; i < ARRAY_SIZE(pm8607_regulator_info); i++) {
583 info = &pm8607_regulator_info[i];
584 if (info->desc.id == id)
585 return info;
586 }
587 return NULL;
588}
589
590static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
591{
592 struct pm8607_chip *chip = dev_get_drvdata(pdev->dev.parent);
593 struct pm8607_platform_data *pdata = chip->dev->platform_data;
594 struct pm8607_regulator_info *info = NULL;
595
596 info = find_regulator_info(pdev->id);
597 if (info == NULL) {
598 dev_err(&pdev->dev, "invalid regulator ID specified\n");
599 return -EINVAL;
600 }
601
602 info->chip = chip;
603
604 info->regulator = regulator_register(&info->desc, &pdev->dev,
605 pdata->regulator[pdev->id], info);
606 if (IS_ERR(info->regulator)) {
607 dev_err(&pdev->dev, "failed to register regulator %s\n",
608 info->desc.name);
609 return PTR_ERR(info->regulator);
610 }
611
612 /* check DVC ramp slope double */
613 if (info->desc.id == PM8607_ID_BUCK3)
614 if (info->chip->buck3_double)
615 info->slope_double = 1;
616
617 platform_set_drvdata(pdev, info);
618 return 0;
619}
620
621static int __devexit pm8607_regulator_remove(struct platform_device *pdev)
622{
623 struct pm8607_regulator_info *info = platform_get_drvdata(pdev);
624
625 regulator_unregister(info->regulator);
626 return 0;
627}
628
629#define PM8607_REGULATOR_DRIVER(_name) \
630{ \
631 .driver = { \
632 .name = "88pm8607-" #_name, \
633 .owner = THIS_MODULE, \
634 }, \
635 .probe = pm8607_regulator_probe, \
636 .remove = __devexit_p(pm8607_regulator_remove), \
637}
638
639static struct platform_driver pm8607_regulator_driver[] = {
640 PM8607_REGULATOR_DRIVER(buck1),
641 PM8607_REGULATOR_DRIVER(buck2),
642 PM8607_REGULATOR_DRIVER(buck3),
643 PM8607_REGULATOR_DRIVER(ldo1),
644 PM8607_REGULATOR_DRIVER(ldo2),
645 PM8607_REGULATOR_DRIVER(ldo3),
646 PM8607_REGULATOR_DRIVER(ldo4),
647 PM8607_REGULATOR_DRIVER(ldo5),
648 PM8607_REGULATOR_DRIVER(ldo6),
649 PM8607_REGULATOR_DRIVER(ldo7),
650 PM8607_REGULATOR_DRIVER(ldo8),
651 PM8607_REGULATOR_DRIVER(ldo9),
652 PM8607_REGULATOR_DRIVER(ldo10),
653 PM8607_REGULATOR_DRIVER(ldo12),
654 PM8607_REGULATOR_DRIVER(ldo14),
655};
656
657static int __init pm8607_regulator_init(void)
658{
659 int i, count, ret;
660
661 count = ARRAY_SIZE(pm8607_regulator_driver);
662 for (i = 0; i < count; i++) {
663 ret = platform_driver_register(&pm8607_regulator_driver[i]);
664 if (ret != 0)
665 pr_err("Failed to register regulator driver: %d\n",
666 ret);
667 }
668 return 0;
669}
670subsys_initcall(pm8607_regulator_init);
671
672static void __exit pm8607_regulator_exit(void)
673{
674 int i, count;
675
676 count = ARRAY_SIZE(pm8607_regulator_driver);
677 for (i = 0; i < count; i++)
678 platform_driver_unregister(&pm8607_regulator_driver[i]);
679}
680module_exit(pm8607_regulator_exit);
681
682MODULE_LICENSE("GPL");
683MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
684MODULE_DESCRIPTION("Regulator Driver for Marvell 88PM8607 PMIC");
685MODULE_ALIAS("platform:88pm8607-regulator");
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 7cfdd65bebb4..262f62eec837 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -69,6 +69,13 @@ config REGULATOR_MAX1586
69 regulator via I2C bus. The provided regulator is suitable 69 regulator via I2C bus. The provided regulator is suitable
70 for PXA27x chips to control VCC_CORE and VCC_USIM voltages. 70 for PXA27x chips to control VCC_CORE and VCC_USIM voltages.
71 71
72config REGULATOR_MAX8660
73 tristate "Maxim 8660/8661 voltage regulator"
74 depends on I2C
75 help
76 This driver controls a Maxim 8660/8661 voltage output
77 regulator via I2C bus.
78
72config REGULATOR_TWL4030 79config REGULATOR_TWL4030
73 bool "TI TWL4030/TWL5030/TWL6030/TPS695x0 PMIC" 80 bool "TI TWL4030/TWL5030/TWL6030/TPS695x0 PMIC"
74 depends on TWL4030_CORE 81 depends on TWL4030_CORE
@@ -157,5 +164,11 @@ config REGULATOR_TPS6507X
157 three step-down converters and two general-purpose LDO voltage regulators. 164 three step-down converters and two general-purpose LDO voltage regulators.
158 It supports TI's software based Class-2 SmartReflex implementation. 165 It supports TI's software based Class-2 SmartReflex implementation.
159 166
167config REGULATOR_88PM8607
168 bool "Marvell 88PM8607 Power regulators"
169 depends on MFD_88PM8607=y
170 help
171 This driver supports 88PM8607 voltage regulator chips.
172
160endif 173endif
161 174
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 9ae3cc44e668..b3c806c79415 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_REGULATOR_BQ24022) += bq24022.o
12obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o 12obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
13obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o 13obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
14obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o 14obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o
15obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
15obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o 16obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
16obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o 17obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
17obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o 18obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
@@ -20,10 +21,11 @@ obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
20obj-$(CONFIG_REGULATOR_DA903X) += da903x.o 21obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
21obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o 22obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
22obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o 23obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
23obj-$(CONFIG_REGULATOR_MC13783) += mc13783.o 24obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
24obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o 25obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
25 26
26obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o 27obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
27obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o 28obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
29obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
28 30
29ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG 31ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 49aeee823a25..b349db4504b7 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -81,7 +81,7 @@ static const u8 ab3100_reg_init_order[AB3100_NUM_REGULATORS+2] = {
81#define LDO_C_VOLTAGE 2650000 81#define LDO_C_VOLTAGE 2650000
82#define LDO_D_VOLTAGE 2650000 82#define LDO_D_VOLTAGE 2650000
83 83
84static const int const ldo_e_buck_typ_voltages[] = { 84static const int ldo_e_buck_typ_voltages[] = {
85 1800000, 85 1800000,
86 1400000, 86 1400000,
87 1300000, 87 1300000,
@@ -91,7 +91,7 @@ static const int const ldo_e_buck_typ_voltages[] = {
91 900000, 91 900000,
92}; 92};
93 93
94static const int const ldo_f_typ_voltages[] = { 94static const int ldo_f_typ_voltages[] = {
95 1800000, 95 1800000,
96 1400000, 96 1400000,
97 1300000, 97 1300000,
@@ -102,21 +102,21 @@ static const int const ldo_f_typ_voltages[] = {
102 2650000, 102 2650000,
103}; 103};
104 104
105static const int const ldo_g_typ_voltages[] = { 105static const int ldo_g_typ_voltages[] = {
106 2850000, 106 2850000,
107 2750000, 107 2750000,
108 1800000, 108 1800000,
109 1500000, 109 1500000,
110}; 110};
111 111
112static const int const ldo_h_typ_voltages[] = { 112static const int ldo_h_typ_voltages[] = {
113 2750000, 113 2750000,
114 1800000, 114 1800000,
115 1500000, 115 1500000,
116 1200000, 116 1200000,
117}; 117};
118 118
119static const int const ldo_k_typ_voltages[] = { 119static const int ldo_k_typ_voltages[] = {
120 2750000, 120 2750000,
121 1800000, 121 1800000,
122}; 122};
@@ -241,24 +241,12 @@ static int ab3100_disable_regulator(struct regulator_dev *reg)
241 * LDO D is a special regulator. When it is disabled, the entire 241 * LDO D is a special regulator. When it is disabled, the entire
242 * system is shut down. So this is handled specially. 242 * system is shut down. So this is handled specially.
243 */ 243 */
244 pr_info("Called ab3100_disable_regulator\n");
244 if (abreg->regreg == AB3100_LDO_D) { 245 if (abreg->regreg == AB3100_LDO_D) {
245 int i;
246
247 dev_info(&reg->dev, "disabling LDO D - shut down system\n"); 246 dev_info(&reg->dev, "disabling LDO D - shut down system\n");
248 /*
249 * Set regulators to default values, ignore any errors,
250 * we're going DOWN
251 */
252 for (i = 0; i < ARRAY_SIZE(ab3100_reg_init_order); i++) {
253 (void) ab3100_set_register_interruptible(abreg->ab3100,
254 ab3100_reg_init_order[i],
255 abreg->plfdata->reg_initvals[i]);
256 }
257
258 /* Setting LDO D to 0x00 cuts the power to the SoC */ 247 /* Setting LDO D to 0x00 cuts the power to the SoC */
259 return ab3100_set_register_interruptible(abreg->ab3100, 248 return ab3100_set_register_interruptible(abreg->ab3100,
260 AB3100_LDO_D, 0x00U); 249 AB3100_LDO_D, 0x00U);
261
262 } 250 }
263 251
264 /* 252 /*
@@ -607,13 +595,6 @@ static int __init ab3100_regulators_probe(struct platform_device *pdev)
607 } 595 }
608 } 596 }
609 597
610 if (err) {
611 dev_err(&pdev->dev,
612 "LDO D regulator initialization failed with error %d\n",
613 err);
614 return err;
615 }
616
617 /* Register the regulators */ 598 /* Register the regulators */
618 for (i = 0; i < AB3100_NUM_REGULATORS; i++) { 599 for (i = 0; i < AB3100_NUM_REGULATORS; i++) {
619 struct ab3100_regulator *reg = &ab3100_regulators[i]; 600 struct ab3100_regulator *reg = &ab3100_regulators[i];
@@ -688,7 +669,7 @@ static __init int ab3100_regulators_init(void)
688 669
689static __exit void ab3100_regulators_exit(void) 670static __exit void ab3100_regulators_exit(void)
690{ 671{
691 platform_driver_register(&ab3100_regulators_driver); 672 platform_driver_unregister(&ab3100_regulators_driver);
692} 673}
693 674
694subsys_initcall(ab3100_regulators_init); 675subsys_initcall(ab3100_regulators_init);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index efe568deda12..686ef270ecf7 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -66,6 +66,16 @@ static unsigned int _regulator_get_mode(struct regulator_dev *rdev);
66static void _notifier_call_chain(struct regulator_dev *rdev, 66static void _notifier_call_chain(struct regulator_dev *rdev,
67 unsigned long event, void *data); 67 unsigned long event, void *data);
68 68
69static const char *rdev_get_name(struct regulator_dev *rdev)
70{
71 if (rdev->constraints && rdev->constraints->name)
72 return rdev->constraints->name;
73 else if (rdev->desc->name)
74 return rdev->desc->name;
75 else
76 return "";
77}
78
69/* gets the regulator for a given consumer device */ 79/* gets the regulator for a given consumer device */
70static struct regulator *get_device_regulator(struct device *dev) 80static struct regulator *get_device_regulator(struct device *dev)
71{ 81{
@@ -96,12 +106,12 @@ static int regulator_check_voltage(struct regulator_dev *rdev,
96 106
97 if (!rdev->constraints) { 107 if (!rdev->constraints) {
98 printk(KERN_ERR "%s: no constraints for %s\n", __func__, 108 printk(KERN_ERR "%s: no constraints for %s\n", __func__,
99 rdev->desc->name); 109 rdev_get_name(rdev));
100 return -ENODEV; 110 return -ENODEV;
101 } 111 }
102 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) { 112 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
103 printk(KERN_ERR "%s: operation not allowed for %s\n", 113 printk(KERN_ERR "%s: operation not allowed for %s\n",
104 __func__, rdev->desc->name); 114 __func__, rdev_get_name(rdev));
105 return -EPERM; 115 return -EPERM;
106 } 116 }
107 117
@@ -124,12 +134,12 @@ static int regulator_check_current_limit(struct regulator_dev *rdev,
124 134
125 if (!rdev->constraints) { 135 if (!rdev->constraints) {
126 printk(KERN_ERR "%s: no constraints for %s\n", __func__, 136 printk(KERN_ERR "%s: no constraints for %s\n", __func__,
127 rdev->desc->name); 137 rdev_get_name(rdev));
128 return -ENODEV; 138 return -ENODEV;
129 } 139 }
130 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_CURRENT)) { 140 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_CURRENT)) {
131 printk(KERN_ERR "%s: operation not allowed for %s\n", 141 printk(KERN_ERR "%s: operation not allowed for %s\n",
132 __func__, rdev->desc->name); 142 __func__, rdev_get_name(rdev));
133 return -EPERM; 143 return -EPERM;
134 } 144 }
135 145
@@ -159,17 +169,17 @@ static int regulator_check_mode(struct regulator_dev *rdev, int mode)
159 169
160 if (!rdev->constraints) { 170 if (!rdev->constraints) {
161 printk(KERN_ERR "%s: no constraints for %s\n", __func__, 171 printk(KERN_ERR "%s: no constraints for %s\n", __func__,
162 rdev->desc->name); 172 rdev_get_name(rdev));
163 return -ENODEV; 173 return -ENODEV;
164 } 174 }
165 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) { 175 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) {
166 printk(KERN_ERR "%s: operation not allowed for %s\n", 176 printk(KERN_ERR "%s: operation not allowed for %s\n",
167 __func__, rdev->desc->name); 177 __func__, rdev_get_name(rdev));
168 return -EPERM; 178 return -EPERM;
169 } 179 }
170 if (!(rdev->constraints->valid_modes_mask & mode)) { 180 if (!(rdev->constraints->valid_modes_mask & mode)) {
171 printk(KERN_ERR "%s: invalid mode %x for %s\n", 181 printk(KERN_ERR "%s: invalid mode %x for %s\n",
172 __func__, mode, rdev->desc->name); 182 __func__, mode, rdev_get_name(rdev));
173 return -EINVAL; 183 return -EINVAL;
174 } 184 }
175 return 0; 185 return 0;
@@ -180,12 +190,12 @@ static int regulator_check_drms(struct regulator_dev *rdev)
180{ 190{
181 if (!rdev->constraints) { 191 if (!rdev->constraints) {
182 printk(KERN_ERR "%s: no constraints for %s\n", __func__, 192 printk(KERN_ERR "%s: no constraints for %s\n", __func__,
183 rdev->desc->name); 193 rdev_get_name(rdev));
184 return -ENODEV; 194 return -ENODEV;
185 } 195 }
186 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) { 196 if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
187 printk(KERN_ERR "%s: operation not allowed for %s\n", 197 printk(KERN_ERR "%s: operation not allowed for %s\n",
188 __func__, rdev->desc->name); 198 __func__, rdev_get_name(rdev));
189 return -EPERM; 199 return -EPERM;
190 } 200 }
191 return 0; 201 return 0;
@@ -230,16 +240,8 @@ static ssize_t regulator_name_show(struct device *dev,
230 struct device_attribute *attr, char *buf) 240 struct device_attribute *attr, char *buf)
231{ 241{
232 struct regulator_dev *rdev = dev_get_drvdata(dev); 242 struct regulator_dev *rdev = dev_get_drvdata(dev);
233 const char *name;
234 243
235 if (rdev->constraints && rdev->constraints->name) 244 return sprintf(buf, "%s\n", rdev_get_name(rdev));
236 name = rdev->constraints->name;
237 else if (rdev->desc->name)
238 name = rdev->desc->name;
239 else
240 name = "";
241
242 return sprintf(buf, "%s\n", name);
243} 245}
244 246
245static ssize_t regulator_print_opmode(char *buf, int mode) 247static ssize_t regulator_print_opmode(char *buf, int mode)
@@ -388,7 +390,7 @@ static ssize_t regulator_total_uA_show(struct device *dev,
388 390
389 mutex_lock(&rdev->mutex); 391 mutex_lock(&rdev->mutex);
390 list_for_each_entry(regulator, &rdev->consumer_list, list) 392 list_for_each_entry(regulator, &rdev->consumer_list, list)
391 uA += regulator->uA_load; 393 uA += regulator->uA_load;
392 mutex_unlock(&rdev->mutex); 394 mutex_unlock(&rdev->mutex);
393 return sprintf(buf, "%d\n", uA); 395 return sprintf(buf, "%d\n", uA);
394} 396}
@@ -563,7 +565,7 @@ static void drms_uA_update(struct regulator_dev *rdev)
563 565
564 /* calc total requested load */ 566 /* calc total requested load */
565 list_for_each_entry(sibling, &rdev->consumer_list, list) 567 list_for_each_entry(sibling, &rdev->consumer_list, list)
566 current_uA += sibling->uA_load; 568 current_uA += sibling->uA_load;
567 569
568 /* now get the optimum mode for our new total regulator load */ 570 /* now get the optimum mode for our new total regulator load */
569 mode = rdev->desc->ops->get_optimum_mode(rdev, input_uV, 571 mode = rdev->desc->ops->get_optimum_mode(rdev, input_uV,
@@ -579,10 +581,29 @@ static int suspend_set_state(struct regulator_dev *rdev,
579 struct regulator_state *rstate) 581 struct regulator_state *rstate)
580{ 582{
581 int ret = 0; 583 int ret = 0;
584 bool can_set_state;
582 585
583 /* enable & disable are mandatory for suspend control */ 586 can_set_state = rdev->desc->ops->set_suspend_enable &&
584 if (!rdev->desc->ops->set_suspend_enable || 587 rdev->desc->ops->set_suspend_disable;
585 !rdev->desc->ops->set_suspend_disable) { 588
589 /* If we have no suspend mode configration don't set anything;
590 * only warn if the driver actually makes the suspend mode
591 * configurable.
592 */
593 if (!rstate->enabled && !rstate->disabled) {
594 if (can_set_state)
595 printk(KERN_WARNING "%s: No configuration for %s\n",
596 __func__, rdev_get_name(rdev));
597 return 0;
598 }
599
600 if (rstate->enabled && rstate->disabled) {
601 printk(KERN_ERR "%s: invalid configuration for %s\n",
602 __func__, rdev_get_name(rdev));
603 return -EINVAL;
604 }
605
606 if (!can_set_state) {
586 printk(KERN_ERR "%s: no way to set suspend state\n", 607 printk(KERN_ERR "%s: no way to set suspend state\n",
587 __func__); 608 __func__);
588 return -EINVAL; 609 return -EINVAL;
@@ -641,25 +662,43 @@ static void print_constraints(struct regulator_dev *rdev)
641{ 662{
642 struct regulation_constraints *constraints = rdev->constraints; 663 struct regulation_constraints *constraints = rdev->constraints;
643 char buf[80]; 664 char buf[80];
644 int count; 665 int count = 0;
666 int ret;
645 667
646 if (rdev->desc->type == REGULATOR_VOLTAGE) { 668 if (constraints->min_uV && constraints->max_uV) {
647 if (constraints->min_uV == constraints->max_uV) 669 if (constraints->min_uV == constraints->max_uV)
648 count = sprintf(buf, "%d mV ", 670 count += sprintf(buf + count, "%d mV ",
649 constraints->min_uV / 1000); 671 constraints->min_uV / 1000);
650 else 672 else
651 count = sprintf(buf, "%d <--> %d mV ", 673 count += sprintf(buf + count, "%d <--> %d mV ",
652 constraints->min_uV / 1000, 674 constraints->min_uV / 1000,
653 constraints->max_uV / 1000); 675 constraints->max_uV / 1000);
654 } else { 676 }
677
678 if (!constraints->min_uV ||
679 constraints->min_uV != constraints->max_uV) {
680 ret = _regulator_get_voltage(rdev);
681 if (ret > 0)
682 count += sprintf(buf + count, "at %d mV ", ret / 1000);
683 }
684
685 if (constraints->min_uA && constraints->max_uA) {
655 if (constraints->min_uA == constraints->max_uA) 686 if (constraints->min_uA == constraints->max_uA)
656 count = sprintf(buf, "%d mA ", 687 count += sprintf(buf + count, "%d mA ",
657 constraints->min_uA / 1000); 688 constraints->min_uA / 1000);
658 else 689 else
659 count = sprintf(buf, "%d <--> %d mA ", 690 count += sprintf(buf + count, "%d <--> %d mA ",
660 constraints->min_uA / 1000, 691 constraints->min_uA / 1000,
661 constraints->max_uA / 1000); 692 constraints->max_uA / 1000);
662 } 693 }
694
695 if (!constraints->min_uA ||
696 constraints->min_uA != constraints->max_uA) {
697 ret = _regulator_get_current_limit(rdev);
698 if (ret > 0)
699 count += sprintf(buf + count, "at %d uA ", ret / 1000);
700 }
701
663 if (constraints->valid_modes_mask & REGULATOR_MODE_FAST) 702 if (constraints->valid_modes_mask & REGULATOR_MODE_FAST)
664 count += sprintf(buf + count, "fast "); 703 count += sprintf(buf + count, "fast ");
665 if (constraints->valid_modes_mask & REGULATOR_MODE_NORMAL) 704 if (constraints->valid_modes_mask & REGULATOR_MODE_NORMAL)
@@ -669,33 +708,30 @@ static void print_constraints(struct regulator_dev *rdev)
669 if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY) 708 if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY)
670 count += sprintf(buf + count, "standby"); 709 count += sprintf(buf + count, "standby");
671 710
672 printk(KERN_INFO "regulator: %s: %s\n", rdev->desc->name, buf); 711 printk(KERN_INFO "regulator: %s: %s\n", rdev_get_name(rdev), buf);
673} 712}
674 713
675/** 714static int machine_constraints_voltage(struct regulator_dev *rdev,
676 * set_machine_constraints - sets regulator constraints
677 * @rdev: regulator source
678 * @constraints: constraints to apply
679 *
680 * Allows platform initialisation code to define and constrain
681 * regulator circuits e.g. valid voltage/current ranges, etc. NOTE:
682 * Constraints *must* be set by platform code in order for some
683 * regulator operations to proceed i.e. set_voltage, set_current_limit,
684 * set_mode.
685 */
686static int set_machine_constraints(struct regulator_dev *rdev,
687 struct regulation_constraints *constraints) 715 struct regulation_constraints *constraints)
688{ 716{
689 int ret = 0;
690 const char *name;
691 struct regulator_ops *ops = rdev->desc->ops; 717 struct regulator_ops *ops = rdev->desc->ops;
718 const char *name = rdev_get_name(rdev);
719 int ret;
692 720
693 if (constraints->name) 721 /* do we need to apply the constraint voltage */
694 name = constraints->name; 722 if (rdev->constraints->apply_uV &&
695 else if (rdev->desc->name) 723 rdev->constraints->min_uV == rdev->constraints->max_uV &&
696 name = rdev->desc->name; 724 ops->set_voltage) {
697 else 725 ret = ops->set_voltage(rdev,
698 name = "regulator"; 726 rdev->constraints->min_uV, rdev->constraints->max_uV);
727 if (ret < 0) {
728 printk(KERN_ERR "%s: failed to apply %duV constraint to %s\n",
729 __func__,
730 rdev->constraints->min_uV, name);
731 rdev->constraints = NULL;
732 return ret;
733 }
734 }
699 735
700 /* constrain machine-level voltage specs to fit 736 /* constrain machine-level voltage specs to fit
701 * the actual range supported by this regulator. 737 * the actual range supported by this regulator.
@@ -719,14 +755,13 @@ static int set_machine_constraints(struct regulator_dev *rdev,
719 755
720 /* voltage constraints are optional */ 756 /* voltage constraints are optional */
721 if ((cmin == 0) && (cmax == 0)) 757 if ((cmin == 0) && (cmax == 0))
722 goto out; 758 return 0;
723 759
724 /* else require explicit machine-level constraints */ 760 /* else require explicit machine-level constraints */
725 if (cmin <= 0 || cmax <= 0 || cmax < cmin) { 761 if (cmin <= 0 || cmax <= 0 || cmax < cmin) {
726 pr_err("%s: %s '%s' voltage constraints\n", 762 pr_err("%s: %s '%s' voltage constraints\n",
727 __func__, "invalid", name); 763 __func__, "invalid", name);
728 ret = -EINVAL; 764 return -EINVAL;
729 goto out;
730 } 765 }
731 766
732 /* initial: [cmin..cmax] valid, [min_uV..max_uV] not */ 767 /* initial: [cmin..cmax] valid, [min_uV..max_uV] not */
@@ -748,8 +783,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
748 if (max_uV < min_uV) { 783 if (max_uV < min_uV) {
749 pr_err("%s: %s '%s' voltage constraints\n", 784 pr_err("%s: %s '%s' voltage constraints\n",
750 __func__, "unsupportable", name); 785 __func__, "unsupportable", name);
751 ret = -EINVAL; 786 return -EINVAL;
752 goto out;
753 } 787 }
754 788
755 /* use regulator's subset of machine constraints */ 789 /* use regulator's subset of machine constraints */
@@ -767,22 +801,34 @@ static int set_machine_constraints(struct regulator_dev *rdev,
767 } 801 }
768 } 802 }
769 803
804 return 0;
805}
806
807/**
808 * set_machine_constraints - sets regulator constraints
809 * @rdev: regulator source
810 * @constraints: constraints to apply
811 *
812 * Allows platform initialisation code to define and constrain
813 * regulator circuits e.g. valid voltage/current ranges, etc. NOTE:
814 * Constraints *must* be set by platform code in order for some
815 * regulator operations to proceed i.e. set_voltage, set_current_limit,
816 * set_mode.
817 */
818static int set_machine_constraints(struct regulator_dev *rdev,
819 struct regulation_constraints *constraints)
820{
821 int ret = 0;
822 const char *name;
823 struct regulator_ops *ops = rdev->desc->ops;
824
770 rdev->constraints = constraints; 825 rdev->constraints = constraints;
771 826
772 /* do we need to apply the constraint voltage */ 827 name = rdev_get_name(rdev);
773 if (rdev->constraints->apply_uV && 828
774 rdev->constraints->min_uV == rdev->constraints->max_uV && 829 ret = machine_constraints_voltage(rdev, constraints);
775 ops->set_voltage) { 830 if (ret != 0)
776 ret = ops->set_voltage(rdev, 831 goto out;
777 rdev->constraints->min_uV, rdev->constraints->max_uV);
778 if (ret < 0) {
779 printk(KERN_ERR "%s: failed to apply %duV constraint to %s\n",
780 __func__,
781 rdev->constraints->min_uV, name);
782 rdev->constraints = NULL;
783 goto out;
784 }
785 }
786 832
787 /* do we need to setup our suspend state */ 833 /* do we need to setup our suspend state */
788 if (constraints->initial_state) { 834 if (constraints->initial_state) {
@@ -903,7 +949,7 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
903 dev_name(&node->regulator->dev), 949 dev_name(&node->regulator->dev),
904 node->regulator->desc->name, 950 node->regulator->desc->name,
905 supply, 951 supply,
906 dev_name(&rdev->dev), rdev->desc->name); 952 dev_name(&rdev->dev), rdev_get_name(rdev));
907 return -EBUSY; 953 return -EBUSY;
908 } 954 }
909 955
@@ -1212,7 +1258,7 @@ static int _regulator_enable(struct regulator_dev *rdev)
1212 ret = _regulator_enable(rdev->supply); 1258 ret = _regulator_enable(rdev->supply);
1213 if (ret < 0) { 1259 if (ret < 0) {
1214 printk(KERN_ERR "%s: failed to enable %s: %d\n", 1260 printk(KERN_ERR "%s: failed to enable %s: %d\n",
1215 __func__, rdev->desc->name, ret); 1261 __func__, rdev_get_name(rdev), ret);
1216 return ret; 1262 return ret;
1217 } 1263 }
1218 } 1264 }
@@ -1238,7 +1284,7 @@ static int _regulator_enable(struct regulator_dev *rdev)
1238 } 1284 }
1239 } else if (ret < 0) { 1285 } else if (ret < 0) {
1240 printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n", 1286 printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n",
1241 __func__, rdev->desc->name, ret); 1287 __func__, rdev_get_name(rdev), ret);
1242 return ret; 1288 return ret;
1243 } 1289 }
1244 /* Fallthrough on positive return values - already enabled */ 1290 /* Fallthrough on positive return values - already enabled */
@@ -1279,7 +1325,7 @@ static int _regulator_disable(struct regulator_dev *rdev)
1279 1325
1280 if (WARN(rdev->use_count <= 0, 1326 if (WARN(rdev->use_count <= 0,
1281 "unbalanced disables for %s\n", 1327 "unbalanced disables for %s\n",
1282 rdev->desc->name)) 1328 rdev_get_name(rdev)))
1283 return -EIO; 1329 return -EIO;
1284 1330
1285 /* are we the last user and permitted to disable ? */ 1331 /* are we the last user and permitted to disable ? */
@@ -1292,7 +1338,7 @@ static int _regulator_disable(struct regulator_dev *rdev)
1292 ret = rdev->desc->ops->disable(rdev); 1338 ret = rdev->desc->ops->disable(rdev);
1293 if (ret < 0) { 1339 if (ret < 0) {
1294 printk(KERN_ERR "%s: failed to disable %s\n", 1340 printk(KERN_ERR "%s: failed to disable %s\n",
1295 __func__, rdev->desc->name); 1341 __func__, rdev_get_name(rdev));
1296 return ret; 1342 return ret;
1297 } 1343 }
1298 } 1344 }
@@ -1349,7 +1395,7 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
1349 ret = rdev->desc->ops->disable(rdev); 1395 ret = rdev->desc->ops->disable(rdev);
1350 if (ret < 0) { 1396 if (ret < 0) {
1351 printk(KERN_ERR "%s: failed to force disable %s\n", 1397 printk(KERN_ERR "%s: failed to force disable %s\n",
1352 __func__, rdev->desc->name); 1398 __func__, rdev_get_name(rdev));
1353 return ret; 1399 return ret;
1354 } 1400 }
1355 /* notify other consumers that power has been forced off */ 1401 /* notify other consumers that power has been forced off */
@@ -1766,7 +1812,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
1766 output_uV = rdev->desc->ops->get_voltage(rdev); 1812 output_uV = rdev->desc->ops->get_voltage(rdev);
1767 if (output_uV <= 0) { 1813 if (output_uV <= 0) {
1768 printk(KERN_ERR "%s: invalid output voltage found for %s\n", 1814 printk(KERN_ERR "%s: invalid output voltage found for %s\n",
1769 __func__, rdev->desc->name); 1815 __func__, rdev_get_name(rdev));
1770 goto out; 1816 goto out;
1771 } 1817 }
1772 1818
@@ -1777,13 +1823,13 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
1777 input_uV = rdev->constraints->input_uV; 1823 input_uV = rdev->constraints->input_uV;
1778 if (input_uV <= 0) { 1824 if (input_uV <= 0) {
1779 printk(KERN_ERR "%s: invalid input voltage found for %s\n", 1825 printk(KERN_ERR "%s: invalid input voltage found for %s\n",
1780 __func__, rdev->desc->name); 1826 __func__, rdev_get_name(rdev));
1781 goto out; 1827 goto out;
1782 } 1828 }
1783 1829
1784 /* calc total requested load for this regulator */ 1830 /* calc total requested load for this regulator */
1785 list_for_each_entry(consumer, &rdev->consumer_list, list) 1831 list_for_each_entry(consumer, &rdev->consumer_list, list)
1786 total_uA_load += consumer->uA_load; 1832 total_uA_load += consumer->uA_load;
1787 1833
1788 mode = rdev->desc->ops->get_optimum_mode(rdev, 1834 mode = rdev->desc->ops->get_optimum_mode(rdev,
1789 input_uV, output_uV, 1835 input_uV, output_uV,
@@ -1791,7 +1837,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
1791 ret = regulator_check_mode(rdev, mode); 1837 ret = regulator_check_mode(rdev, mode);
1792 if (ret < 0) { 1838 if (ret < 0) {
1793 printk(KERN_ERR "%s: failed to get optimum mode for %s @" 1839 printk(KERN_ERR "%s: failed to get optimum mode for %s @"
1794 " %d uA %d -> %d uV\n", __func__, rdev->desc->name, 1840 " %d uA %d -> %d uV\n", __func__, rdev_get_name(rdev),
1795 total_uA_load, input_uV, output_uV); 1841 total_uA_load, input_uV, output_uV);
1796 goto out; 1842 goto out;
1797 } 1843 }
@@ -1799,7 +1845,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
1799 ret = rdev->desc->ops->set_mode(rdev, mode); 1845 ret = rdev->desc->ops->set_mode(rdev, mode);
1800 if (ret < 0) { 1846 if (ret < 0) {
1801 printk(KERN_ERR "%s: failed to set optimum mode %x for %s\n", 1847 printk(KERN_ERR "%s: failed to set optimum mode %x for %s\n",
1802 __func__, mode, rdev->desc->name); 1848 __func__, mode, rdev_get_name(rdev));
1803 goto out; 1849 goto out;
1804 } 1850 }
1805 ret = mode; 1851 ret = mode;
@@ -1852,9 +1898,9 @@ static void _notifier_call_chain(struct regulator_dev *rdev,
1852 1898
1853 /* now notify regulator we supply */ 1899 /* now notify regulator we supply */
1854 list_for_each_entry(_rdev, &rdev->supply_list, slist) { 1900 list_for_each_entry(_rdev, &rdev->supply_list, slist) {
1855 mutex_lock(&_rdev->mutex); 1901 mutex_lock(&_rdev->mutex);
1856 _notifier_call_chain(_rdev, event, data); 1902 _notifier_call_chain(_rdev, event, data);
1857 mutex_unlock(&_rdev->mutex); 1903 mutex_unlock(&_rdev->mutex);
1858 } 1904 }
1859} 1905}
1860 1906
@@ -1885,9 +1931,9 @@ int regulator_bulk_get(struct device *dev, int num_consumers,
1885 consumers[i].consumer = regulator_get(dev, 1931 consumers[i].consumer = regulator_get(dev,
1886 consumers[i].supply); 1932 consumers[i].supply);
1887 if (IS_ERR(consumers[i].consumer)) { 1933 if (IS_ERR(consumers[i].consumer)) {
1888 dev_err(dev, "Failed to get supply '%s'\n",
1889 consumers[i].supply);
1890 ret = PTR_ERR(consumers[i].consumer); 1934 ret = PTR_ERR(consumers[i].consumer);
1935 dev_err(dev, "Failed to get supply '%s': %d\n",
1936 consumers[i].supply, ret);
1891 consumers[i].consumer = NULL; 1937 consumers[i].consumer = NULL;
1892 goto err; 1938 goto err;
1893 } 1939 }
@@ -1930,8 +1976,8 @@ int regulator_bulk_enable(int num_consumers,
1930 return 0; 1976 return 0;
1931 1977
1932err: 1978err:
1933 printk(KERN_ERR "Failed to enable %s\n", consumers[i].supply); 1979 printk(KERN_ERR "Failed to enable %s: %d\n", consumers[i].supply, ret);
1934 for (i = 0; i < num_consumers; i++) 1980 for (--i; i >= 0; --i)
1935 regulator_disable(consumers[i].consumer); 1981 regulator_disable(consumers[i].consumer);
1936 1982
1937 return ret; 1983 return ret;
@@ -1965,8 +2011,9 @@ int regulator_bulk_disable(int num_consumers,
1965 return 0; 2011 return 0;
1966 2012
1967err: 2013err:
1968 printk(KERN_ERR "Failed to disable %s\n", consumers[i].supply); 2014 printk(KERN_ERR "Failed to disable %s: %d\n", consumers[i].supply,
1969 for (i = 0; i < num_consumers; i++) 2015 ret);
2016 for (--i; i >= 0; --i)
1970 regulator_enable(consumers[i].consumer); 2017 regulator_enable(consumers[i].consumer);
1971 2018
1972 return ret; 2019 return ret;
@@ -2316,7 +2363,7 @@ int regulator_suspend_prepare(suspend_state_t state)
2316 2363
2317 if (ret < 0) { 2364 if (ret < 0) {
2318 printk(KERN_ERR "%s: failed to prepare %s\n", 2365 printk(KERN_ERR "%s: failed to prepare %s\n",
2319 __func__, rdev->desc->name); 2366 __func__, rdev_get_name(rdev));
2320 goto out; 2367 goto out;
2321 } 2368 }
2322 } 2369 }
@@ -2429,12 +2476,7 @@ static int __init regulator_init_complete(void)
2429 ops = rdev->desc->ops; 2476 ops = rdev->desc->ops;
2430 c = rdev->constraints; 2477 c = rdev->constraints;
2431 2478
2432 if (c && c->name) 2479 name = rdev_get_name(rdev);
2433 name = c->name;
2434 else if (rdev->desc->name)
2435 name = rdev->desc->name;
2436 else
2437 name = "regulator";
2438 2480
2439 if (!ops->disable || (c && c->always_on)) 2481 if (!ops->disable || (c && c->always_on))
2440 continue; 2482 continue;
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index aa224d936e0d..f8c4661a7a81 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -331,7 +331,7 @@ static int da9034_get_ldo12_voltage(struct regulator_dev *rdev)
331static int da9034_list_ldo12_voltage(struct regulator_dev *rdev, 331static int da9034_list_ldo12_voltage(struct regulator_dev *rdev,
332 unsigned selector) 332 unsigned selector)
333{ 333{
334 if (selector > ARRAY_SIZE(da9034_ldo12_data)) 334 if (selector >= ARRAY_SIZE(da9034_ldo12_data))
335 return -EINVAL; 335 return -EINVAL;
336 return da9034_ldo12_data[selector] * 1000; 336 return da9034_ldo12_data[selector] * 1000;
337} 337}
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 7803a320543b..76d08c282f9c 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -446,8 +446,8 @@ static int setup_regulators(struct lp3971 *lp3971,
446 lp3971->rdev[i] = regulator_register(&regulators[id], 446 lp3971->rdev[i] = regulator_register(&regulators[id],
447 lp3971->dev, pdata->regulators[i].initdata, lp3971); 447 lp3971->dev, pdata->regulators[i].initdata, lp3971);
448 448
449 err = IS_ERR(lp3971->rdev[i]); 449 if (IS_ERR(lp3971->rdev[i])) {
450 if (err) { 450 err = PTR_ERR(lp3971->rdev[i]);
451 dev_err(lp3971->dev, "regulator init failed: %d\n", 451 dev_err(lp3971->dev, "regulator init failed: %d\n",
452 err); 452 err);
453 goto error; 453 goto error;
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
new file mode 100644
index 000000000000..acc2fb7b6087
--- /dev/null
+++ b/drivers/regulator/max8660.c
@@ -0,0 +1,510 @@
1/*
2 * max8660.c -- Voltage regulation for the Maxim 8660/8661
3 *
4 * based on max1586.c and wm8400-regulator.c
5 *
6 * Copyright (C) 2009 Wolfram Sang, Pengutronix e.K.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Some info:
22 *
23 * Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8660-MAX8661.pdf
24 *
25 * This chip is a bit nasty because it is a write-only device. Thus, the driver
26 * uses shadow registers to keep track of its values. The main problem appears
27 * to be the initialization: When Linux boots up, we cannot know if the chip is
28 * in the default state or not, so we would have to pass such information in
29 * platform_data. As this adds a bit of complexity to the driver, this is left
30 * out for now until it is really needed.
31 *
32 * [A|S|M]DTV1 registers are currently not used, but [A|S|M]DTV2.
33 *
34 * If the driver is feature complete, it might be worth to check if one set of
35 * functions for V3-V7 is sufficient. For maximum flexibility during
36 * development, they are separated for now.
37 *
38 */
39
40#include <linux/module.h>
41#include <linux/err.h>
42#include <linux/i2c.h>
43#include <linux/platform_device.h>
44#include <linux/regulator/driver.h>
45#include <linux/regulator/max8660.h>
46
47#define MAX8660_DCDC_MIN_UV 725000
48#define MAX8660_DCDC_MAX_UV 1800000
49#define MAX8660_DCDC_STEP 25000
50#define MAX8660_DCDC_MAX_SEL 0x2b
51
52#define MAX8660_LDO5_MIN_UV 1700000
53#define MAX8660_LDO5_MAX_UV 2000000
54#define MAX8660_LDO5_STEP 25000
55#define MAX8660_LDO5_MAX_SEL 0x0c
56
57#define MAX8660_LDO67_MIN_UV 1800000
58#define MAX8660_LDO67_MAX_UV 3300000
59#define MAX8660_LDO67_STEP 100000
60#define MAX8660_LDO67_MAX_SEL 0x0f
61
62enum {
63 MAX8660_OVER1,
64 MAX8660_OVER2,
65 MAX8660_VCC1,
66 MAX8660_ADTV1,
67 MAX8660_ADTV2,
68 MAX8660_SDTV1,
69 MAX8660_SDTV2,
70 MAX8660_MDTV1,
71 MAX8660_MDTV2,
72 MAX8660_L12VCR,
73 MAX8660_FPWM,
74 MAX8660_N_REGS, /* not a real register */
75};
76
77struct max8660 {
78 struct i2c_client *client;
79 u8 shadow_regs[MAX8660_N_REGS]; /* as chip is write only */
80 struct regulator_dev *rdev[];
81};
82
83static int max8660_write(struct max8660 *max8660, u8 reg, u8 mask, u8 val)
84{
85 static const u8 max8660_addresses[MAX8660_N_REGS] =
86 { 0x10, 0x12, 0x20, 0x23, 0x24, 0x29, 0x2a, 0x32, 0x33, 0x39, 0x80 };
87
88 int ret;
89 u8 reg_val = (max8660->shadow_regs[reg] & mask) | val;
90 dev_vdbg(&max8660->client->dev, "Writing reg %02x with %02x\n",
91 max8660_addresses[reg], reg_val);
92
93 ret = i2c_smbus_write_byte_data(max8660->client,
94 max8660_addresses[reg], reg_val);
95 if (ret == 0)
96 max8660->shadow_regs[reg] = reg_val;
97
98 return ret;
99}
100
101
102/*
103 * DCDC functions
104 */
105
106static int max8660_dcdc_is_enabled(struct regulator_dev *rdev)
107{
108 struct max8660 *max8660 = rdev_get_drvdata(rdev);
109 u8 val = max8660->shadow_regs[MAX8660_OVER1];
110 u8 mask = (rdev_get_id(rdev) == MAX8660_V3) ? 1 : 4;
111 return !!(val & mask);
112}
113
114static int max8660_dcdc_enable(struct regulator_dev *rdev)
115{
116 struct max8660 *max8660 = rdev_get_drvdata(rdev);
117 u8 bit = (rdev_get_id(rdev) == MAX8660_V3) ? 1 : 4;
118 return max8660_write(max8660, MAX8660_OVER1, 0xff, bit);
119}
120
121static int max8660_dcdc_disable(struct regulator_dev *rdev)
122{
123 struct max8660 *max8660 = rdev_get_drvdata(rdev);
124 u8 mask = (rdev_get_id(rdev) == MAX8660_V3) ? ~1 : ~4;
125 return max8660_write(max8660, MAX8660_OVER1, mask, 0);
126}
127
128static int max8660_dcdc_list(struct regulator_dev *rdev, unsigned selector)
129{
130 if (selector > MAX8660_DCDC_MAX_SEL)
131 return -EINVAL;
132 return MAX8660_DCDC_MIN_UV + selector * MAX8660_DCDC_STEP;
133}
134
135static int max8660_dcdc_get(struct regulator_dev *rdev)
136{
137 struct max8660 *max8660 = rdev_get_drvdata(rdev);
138 u8 reg = (rdev_get_id(rdev) == MAX8660_V3) ? MAX8660_ADTV2 : MAX8660_SDTV2;
139 u8 selector = max8660->shadow_regs[reg];
140 return MAX8660_DCDC_MIN_UV + selector * MAX8660_DCDC_STEP;
141}
142
143static int max8660_dcdc_set(struct regulator_dev *rdev, int min_uV, int max_uV)
144{
145 struct max8660 *max8660 = rdev_get_drvdata(rdev);
146 u8 reg, selector, bits;
147 int ret;
148
149 if (min_uV < MAX8660_DCDC_MIN_UV || min_uV > MAX8660_DCDC_MAX_UV)
150 return -EINVAL;
151 if (max_uV < MAX8660_DCDC_MIN_UV || max_uV > MAX8660_DCDC_MAX_UV)
152 return -EINVAL;
153
154 selector = (min_uV - (MAX8660_DCDC_MIN_UV - MAX8660_DCDC_STEP + 1))
155 / MAX8660_DCDC_STEP;
156
157 ret = max8660_dcdc_list(rdev, selector);
158 if (ret < 0 || ret > max_uV)
159 return -EINVAL;
160
161 reg = (rdev_get_id(rdev) == MAX8660_V3) ? MAX8660_ADTV2 : MAX8660_SDTV2;
162 ret = max8660_write(max8660, reg, 0, selector);
163 if (ret)
164 return ret;
165
166 /* Select target voltage register and activate regulation */
167 bits = (rdev_get_id(rdev) == MAX8660_V3) ? 0x03 : 0x30;
168 return max8660_write(max8660, MAX8660_VCC1, 0xff, bits);
169}
170
171static struct regulator_ops max8660_dcdc_ops = {
172 .is_enabled = max8660_dcdc_is_enabled,
173 .list_voltage = max8660_dcdc_list,
174 .set_voltage = max8660_dcdc_set,
175 .get_voltage = max8660_dcdc_get,
176};
177
178
179/*
180 * LDO5 functions
181 */
182
183static int max8660_ldo5_list(struct regulator_dev *rdev, unsigned selector)
184{
185 if (selector > MAX8660_LDO5_MAX_SEL)
186 return -EINVAL;
187 return MAX8660_LDO5_MIN_UV + selector * MAX8660_LDO5_STEP;
188}
189
190static int max8660_ldo5_get(struct regulator_dev *rdev)
191{
192 struct max8660 *max8660 = rdev_get_drvdata(rdev);
193 u8 selector = max8660->shadow_regs[MAX8660_MDTV2];
194
195 return MAX8660_LDO5_MIN_UV + selector * MAX8660_LDO5_STEP;
196}
197
198static int max8660_ldo5_set(struct regulator_dev *rdev, int min_uV, int max_uV)
199{
200 struct max8660 *max8660 = rdev_get_drvdata(rdev);
201 u8 selector;
202 int ret;
203
204 if (min_uV < MAX8660_LDO5_MIN_UV || min_uV > MAX8660_LDO5_MAX_UV)
205 return -EINVAL;
206 if (max_uV < MAX8660_LDO5_MIN_UV || max_uV > MAX8660_LDO5_MAX_UV)
207 return -EINVAL;
208
209 selector = (min_uV - (MAX8660_LDO5_MIN_UV - MAX8660_LDO5_STEP + 1))
210 / MAX8660_LDO5_STEP;
211 ret = max8660_ldo5_list(rdev, selector);
212 if (ret < 0 || ret > max_uV)
213 return -EINVAL;
214
215 ret = max8660_write(max8660, MAX8660_MDTV2, 0, selector);
216 if (ret)
217 return ret;
218
219 /* Select target voltage register and activate regulation */
220 return max8660_write(max8660, MAX8660_VCC1, 0xff, 0xc0);
221}
222
223static struct regulator_ops max8660_ldo5_ops = {
224 .list_voltage = max8660_ldo5_list,
225 .set_voltage = max8660_ldo5_set,
226 .get_voltage = max8660_ldo5_get,
227};
228
229
230/*
231 * LDO67 functions
232 */
233
234static int max8660_ldo67_is_enabled(struct regulator_dev *rdev)
235{
236 struct max8660 *max8660 = rdev_get_drvdata(rdev);
237 u8 val = max8660->shadow_regs[MAX8660_OVER2];
238 u8 mask = (rdev_get_id(rdev) == MAX8660_V6) ? 2 : 4;
239 return !!(val & mask);
240}
241
242static int max8660_ldo67_enable(struct regulator_dev *rdev)
243{
244 struct max8660 *max8660 = rdev_get_drvdata(rdev);
245 u8 bit = (rdev_get_id(rdev) == MAX8660_V6) ? 2 : 4;
246 return max8660_write(max8660, MAX8660_OVER2, 0xff, bit);
247}
248
249static int max8660_ldo67_disable(struct regulator_dev *rdev)
250{
251 struct max8660 *max8660 = rdev_get_drvdata(rdev);
252 u8 mask = (rdev_get_id(rdev) == MAX8660_V6) ? ~2 : ~4;
253 return max8660_write(max8660, MAX8660_OVER2, mask, 0);
254}
255
256static int max8660_ldo67_list(struct regulator_dev *rdev, unsigned selector)
257{
258 if (selector > MAX8660_LDO67_MAX_SEL)
259 return -EINVAL;
260 return MAX8660_LDO67_MIN_UV + selector * MAX8660_LDO67_STEP;
261}
262
263static int max8660_ldo67_get(struct regulator_dev *rdev)
264{
265 struct max8660 *max8660 = rdev_get_drvdata(rdev);
266 u8 shift = (rdev_get_id(rdev) == MAX8660_V6) ? 0 : 4;
267 u8 selector = (max8660->shadow_regs[MAX8660_L12VCR] >> shift) & 0xf;
268
269 return MAX8660_LDO67_MIN_UV + selector * MAX8660_LDO67_STEP;
270}
271
272static int max8660_ldo67_set(struct regulator_dev *rdev, int min_uV, int max_uV)
273{
274 struct max8660 *max8660 = rdev_get_drvdata(rdev);
275 u8 selector;
276 int ret;
277
278 if (min_uV < MAX8660_LDO67_MIN_UV || min_uV > MAX8660_LDO67_MAX_UV)
279 return -EINVAL;
280 if (max_uV < MAX8660_LDO67_MIN_UV || max_uV > MAX8660_LDO67_MAX_UV)
281 return -EINVAL;
282
283 selector = (min_uV - (MAX8660_LDO67_MIN_UV - MAX8660_LDO67_STEP + 1))
284 / MAX8660_LDO67_STEP;
285
286 ret = max8660_ldo67_list(rdev, selector);
287 if (ret < 0 || ret > max_uV)
288 return -EINVAL;
289
290 if (rdev_get_id(rdev) == MAX8660_V6)
291 return max8660_write(max8660, MAX8660_L12VCR, 0xf0, selector);
292 else
293 return max8660_write(max8660, MAX8660_L12VCR, 0x0f, selector << 4);
294}
295
296static struct regulator_ops max8660_ldo67_ops = {
297 .is_enabled = max8660_ldo67_is_enabled,
298 .enable = max8660_ldo67_enable,
299 .disable = max8660_ldo67_disable,
300 .list_voltage = max8660_ldo67_list,
301 .get_voltage = max8660_ldo67_get,
302 .set_voltage = max8660_ldo67_set,
303};
304
305static struct regulator_desc max8660_reg[] = {
306 {
307 .name = "V3(DCDC)",
308 .id = MAX8660_V3,
309 .ops = &max8660_dcdc_ops,
310 .type = REGULATOR_VOLTAGE,
311 .n_voltages = MAX8660_DCDC_MAX_SEL + 1,
312 .owner = THIS_MODULE,
313 },
314 {
315 .name = "V4(DCDC)",
316 .id = MAX8660_V4,
317 .ops = &max8660_dcdc_ops,
318 .type = REGULATOR_VOLTAGE,
319 .n_voltages = MAX8660_DCDC_MAX_SEL + 1,
320 .owner = THIS_MODULE,
321 },
322 {
323 .name = "V5(LDO)",
324 .id = MAX8660_V5,
325 .ops = &max8660_ldo5_ops,
326 .type = REGULATOR_VOLTAGE,
327 .n_voltages = MAX8660_LDO5_MAX_SEL + 1,
328 .owner = THIS_MODULE,
329 },
330 {
331 .name = "V6(LDO)",
332 .id = MAX8660_V6,
333 .ops = &max8660_ldo67_ops,
334 .type = REGULATOR_VOLTAGE,
335 .n_voltages = MAX8660_LDO67_MAX_SEL + 1,
336 .owner = THIS_MODULE,
337 },
338 {
339 .name = "V7(LDO)",
340 .id = MAX8660_V7,
341 .ops = &max8660_ldo67_ops,
342 .type = REGULATOR_VOLTAGE,
343 .n_voltages = MAX8660_LDO67_MAX_SEL + 1,
344 .owner = THIS_MODULE,
345 },
346};
347
348static int max8660_probe(struct i2c_client *client,
349 const struct i2c_device_id *i2c_id)
350{
351 struct regulator_dev **rdev;
352 struct max8660_platform_data *pdata = client->dev.platform_data;
353 struct max8660 *max8660;
354 int boot_on, i, id, ret = -EINVAL;
355
356 if (pdata->num_subdevs > MAX8660_V_END) {
357 dev_err(&client->dev, "Too much regulators found!\n");
358 goto out;
359 }
360
361 max8660 = kzalloc(sizeof(struct max8660) +
362 sizeof(struct regulator_dev *) * MAX8660_V_END,
363 GFP_KERNEL);
364 if (!max8660) {
365 ret = -ENOMEM;
366 goto out;
367 }
368
369 max8660->client = client;
370 rdev = max8660->rdev;
371
372 if (pdata->en34_is_high) {
373 /* Simulate always on */
374 max8660->shadow_regs[MAX8660_OVER1] = 5;
375 } else {
376 /* Otherwise devices can be toggled via software */
377 max8660_dcdc_ops.enable = max8660_dcdc_enable;
378 max8660_dcdc_ops.disable = max8660_dcdc_disable;
379 }
380
381 /*
382 * First, set up shadow registers to prevent glitches. As some
383 * registers are shared between regulators, everything must be properly
384 * set up for all regulators in advance.
385 */
386 max8660->shadow_regs[MAX8660_ADTV1] =
387 max8660->shadow_regs[MAX8660_ADTV2] =
388 max8660->shadow_regs[MAX8660_SDTV1] =
389 max8660->shadow_regs[MAX8660_SDTV2] = 0x1b;
390 max8660->shadow_regs[MAX8660_MDTV1] =
391 max8660->shadow_regs[MAX8660_MDTV2] = 0x04;
392
393 for (i = 0; i < pdata->num_subdevs; i++) {
394
395 if (!pdata->subdevs[i].platform_data)
396 goto err_free;
397
398 boot_on = pdata->subdevs[i].platform_data->constraints.boot_on;
399
400 switch (pdata->subdevs[i].id) {
401 case MAX8660_V3:
402 if (boot_on)
403 max8660->shadow_regs[MAX8660_OVER1] |= 1;
404 break;
405
406 case MAX8660_V4:
407 if (boot_on)
408 max8660->shadow_regs[MAX8660_OVER1] |= 4;
409 break;
410
411 case MAX8660_V5:
412 break;
413
414 case MAX8660_V6:
415 if (boot_on)
416 max8660->shadow_regs[MAX8660_OVER2] |= 2;
417 break;
418
419 case MAX8660_V7:
420 if (!strcmp(i2c_id->name, "max8661")) {
421 dev_err(&client->dev, "Regulator not on this chip!\n");
422 goto err_free;
423 }
424
425 if (boot_on)
426 max8660->shadow_regs[MAX8660_OVER2] |= 4;
427 break;
428
429 default:
430 dev_err(&client->dev, "invalid regulator %s\n",
431 pdata->subdevs[i].name);
432 goto err_free;
433 }
434 }
435
436 /* Finally register devices */
437 for (i = 0; i < pdata->num_subdevs; i++) {
438
439 id = pdata->subdevs[i].id;
440
441 rdev[i] = regulator_register(&max8660_reg[id], &client->dev,
442 pdata->subdevs[i].platform_data,
443 max8660);
444 if (IS_ERR(rdev[i])) {
445 ret = PTR_ERR(rdev[i]);
446 dev_err(&client->dev, "failed to register %s\n",
447 max8660_reg[id].name);
448 goto err_unregister;
449 }
450 }
451
452 i2c_set_clientdata(client, rdev);
453 dev_info(&client->dev, "Maxim 8660/8661 regulator driver loaded\n");
454 return 0;
455
456err_unregister:
457 while (--i >= 0)
458 regulator_unregister(rdev[i]);
459err_free:
460 kfree(max8660);
461out:
462 return ret;
463}
464
465static int max8660_remove(struct i2c_client *client)
466{
467 struct regulator_dev **rdev = i2c_get_clientdata(client);
468 int i;
469
470 for (i = 0; i < MAX8660_V_END; i++)
471 if (rdev[i])
472 regulator_unregister(rdev[i]);
473 kfree(rdev);
474 i2c_set_clientdata(client, NULL);
475
476 return 0;
477}
478
479static const struct i2c_device_id max8660_id[] = {
480 { "max8660", 0 },
481 { "max8661", 0 },
482 { }
483};
484MODULE_DEVICE_TABLE(i2c, max8660_id);
485
486static struct i2c_driver max8660_driver = {
487 .probe = max8660_probe,
488 .remove = max8660_remove,
489 .driver = {
490 .name = "max8660",
491 },
492 .id_table = max8660_id,
493};
494
495static int __init max8660_init(void)
496{
497 return i2c_add_driver(&max8660_driver);
498}
499subsys_initcall(max8660_init);
500
501static void __exit max8660_exit(void)
502{
503 i2c_del_driver(&max8660_driver);
504}
505module_exit(max8660_exit);
506
507/* Module information */
508MODULE_DESCRIPTION("MAXIM 8660/8661 voltage regulator driver");
509MODULE_AUTHOR("Wolfram Sang");
510MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
new file mode 100644
index 000000000000..39c495300045
--- /dev/null
+++ b/drivers/regulator/mc13783-regulator.c
@@ -0,0 +1,245 @@
1/*
2 * Regulator Driver for Freescale MC13783 PMIC
3 *
4 * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/mfd/mc13783.h>
12#include <linux/regulator/machine.h>
13#include <linux/regulator/driver.h>
14#include <linux/platform_device.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/err.h>
18
19#define MC13783_REG_SWITCHERS4 28
20#define MC13783_REG_SWITCHERS4_PLLEN (1 << 18)
21
22#define MC13783_REG_SWITCHERS5 29
23#define MC13783_REG_SWITCHERS5_SW3EN (1 << 20)
24
25#define MC13783_REG_REGULATORMODE0 32
26#define MC13783_REG_REGULATORMODE0_VAUDIOEN (1 << 0)
27#define MC13783_REG_REGULATORMODE0_VIOHIEN (1 << 3)
28#define MC13783_REG_REGULATORMODE0_VIOLOEN (1 << 6)
29#define MC13783_REG_REGULATORMODE0_VDIGEN (1 << 9)
30#define MC13783_REG_REGULATORMODE0_VGENEN (1 << 12)
31#define MC13783_REG_REGULATORMODE0_VRFDIGEN (1 << 15)
32#define MC13783_REG_REGULATORMODE0_VRFREFEN (1 << 18)
33#define MC13783_REG_REGULATORMODE0_VRFCPEN (1 << 21)
34
35#define MC13783_REG_REGULATORMODE1 33
36#define MC13783_REG_REGULATORMODE1_VSIMEN (1 << 0)
37#define MC13783_REG_REGULATORMODE1_VESIMEN (1 << 3)
38#define MC13783_REG_REGULATORMODE1_VCAMEN (1 << 6)
39#define MC13783_REG_REGULATORMODE1_VRFBGEN (1 << 9)
40#define MC13783_REG_REGULATORMODE1_VVIBEN (1 << 11)
41#define MC13783_REG_REGULATORMODE1_VRF1EN (1 << 12)
42#define MC13783_REG_REGULATORMODE1_VRF2EN (1 << 15)
43#define MC13783_REG_REGULATORMODE1_VMMC1EN (1 << 18)
44#define MC13783_REG_REGULATORMODE1_VMMC2EN (1 << 21)
45
46#define MC13783_REG_POWERMISC 34
47#define MC13783_REG_POWERMISC_GPO1EN (1 << 6)
48#define MC13783_REG_POWERMISC_GPO2EN (1 << 8)
49#define MC13783_REG_POWERMISC_GPO3EN (1 << 10)
50#define MC13783_REG_POWERMISC_GPO4EN (1 << 12)
51
52struct mc13783_regulator {
53 struct regulator_desc desc;
54 int reg;
55 int enable_bit;
56};
57
58static struct regulator_ops mc13783_regulator_ops;
59
60#define MC13783_DEFINE(prefix, _name, _reg) \
61 [MC13783_ ## prefix ## _ ## _name] = { \
62 .desc = { \
63 .name = #prefix "_" #_name, \
64 .ops = &mc13783_regulator_ops, \
65 .type = REGULATOR_VOLTAGE, \
66 .id = MC13783_ ## prefix ## _ ## _name, \
67 .owner = THIS_MODULE, \
68 }, \
69 .reg = MC13783_REG_ ## _reg, \
70 .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \
71 }
72
73#define MC13783_DEFINE_SW(_name, _reg) MC13783_DEFINE(SW, _name, _reg)
74#define MC13783_DEFINE_REGU(_name, _reg) MC13783_DEFINE(REGU, _name, _reg)
75
76static struct mc13783_regulator mc13783_regulators[] = {
77 MC13783_DEFINE_SW(SW3, SWITCHERS5),
78 MC13783_DEFINE_SW(PLL, SWITCHERS4),
79
80 MC13783_DEFINE_REGU(VAUDIO, REGULATORMODE0),
81 MC13783_DEFINE_REGU(VIOHI, REGULATORMODE0),
82 MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0),
83 MC13783_DEFINE_REGU(VDIG, REGULATORMODE0),
84 MC13783_DEFINE_REGU(VGEN, REGULATORMODE0),
85 MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0),
86 MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0),
87 MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0),
88 MC13783_DEFINE_REGU(VSIM, REGULATORMODE1),
89 MC13783_DEFINE_REGU(VESIM, REGULATORMODE1),
90 MC13783_DEFINE_REGU(VCAM, REGULATORMODE1),
91 MC13783_DEFINE_REGU(VRFBG, REGULATORMODE1),
92 MC13783_DEFINE_REGU(VVIB, REGULATORMODE1),
93 MC13783_DEFINE_REGU(VRF1, REGULATORMODE1),
94 MC13783_DEFINE_REGU(VRF2, REGULATORMODE1),
95 MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1),
96 MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1),
97 MC13783_DEFINE_REGU(GPO1, POWERMISC),
98 MC13783_DEFINE_REGU(GPO2, POWERMISC),
99 MC13783_DEFINE_REGU(GPO3, POWERMISC),
100 MC13783_DEFINE_REGU(GPO4, POWERMISC),
101};
102
103struct mc13783_regulator_priv {
104 struct mc13783 *mc13783;
105 struct regulator_dev *regulators[];
106};
107
108static int mc13783_regulator_enable(struct regulator_dev *rdev)
109{
110 struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
111 int id = rdev_get_id(rdev);
112 int ret;
113
114 dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
115
116 mc13783_lock(priv->mc13783);
117 ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].reg,
118 mc13783_regulators[id].enable_bit,
119 mc13783_regulators[id].enable_bit);
120 mc13783_unlock(priv->mc13783);
121
122 return ret;
123}
124
125static int mc13783_regulator_disable(struct regulator_dev *rdev)
126{
127 struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
128 int id = rdev_get_id(rdev);
129 int ret;
130
131 dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
132
133 mc13783_lock(priv->mc13783);
134 ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].reg,
135 mc13783_regulators[id].enable_bit, 0);
136 mc13783_unlock(priv->mc13783);
137
138 return ret;
139}
140
141static int mc13783_regulator_is_enabled(struct regulator_dev *rdev)
142{
143 struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
144 int ret, id = rdev_get_id(rdev);
145 unsigned int val;
146
147 mc13783_lock(priv->mc13783);
148 ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val);
149 mc13783_unlock(priv->mc13783);
150
151 if (ret)
152 return ret;
153
154 return (val & mc13783_regulators[id].enable_bit) != 0;
155}
156
157static struct regulator_ops mc13783_regulator_ops = {
158 .enable = mc13783_regulator_enable,
159 .disable = mc13783_regulator_disable,
160 .is_enabled = mc13783_regulator_is_enabled,
161};
162
163static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
164{
165 struct mc13783_regulator_priv *priv;
166 struct mc13783 *mc13783 = dev_get_drvdata(pdev->dev.parent);
167 struct mc13783_regulator_platform_data *pdata =
168 dev_get_platdata(&pdev->dev);
169 struct mc13783_regulator_init_data *init_data;
170 int i, ret;
171
172 dev_dbg(&pdev->dev, "mc13783_regulator_probe id %d\n", pdev->id);
173
174 priv = kzalloc(sizeof(*priv) +
175 pdata->num_regulators * sizeof(priv->regulators[0]),
176 GFP_KERNEL);
177 if (!priv)
178 return -ENOMEM;
179
180 priv->mc13783 = mc13783;
181
182 for (i = 0; i < pdata->num_regulators; i++) {
183 init_data = &pdata->regulators[i];
184 priv->regulators[i] = regulator_register(
185 &mc13783_regulators[init_data->id].desc,
186 &pdev->dev, init_data->init_data, priv);
187
188 if (IS_ERR(priv->regulators[i])) {
189 dev_err(&pdev->dev, "failed to register regulator %s\n",
190 mc13783_regulators[i].desc.name);
191 ret = PTR_ERR(priv->regulators[i]);
192 goto err;
193 }
194 }
195
196 platform_set_drvdata(pdev, priv);
197
198 return 0;
199err:
200 while (--i >= 0)
201 regulator_unregister(priv->regulators[i]);
202
203 kfree(priv);
204
205 return ret;
206}
207
208static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
209{
210 struct mc13783_regulator_priv *priv = platform_get_drvdata(pdev);
211 struct mc13783_regulator_platform_data *pdata =
212 dev_get_platdata(&pdev->dev);
213 int i;
214
215 for (i = 0; i < pdata->num_regulators; i++)
216 regulator_unregister(priv->regulators[i]);
217
218 return 0;
219}
220
221static struct platform_driver mc13783_regulator_driver = {
222 .driver = {
223 .name = "mc13783-regulator",
224 .owner = THIS_MODULE,
225 },
226 .remove = __devexit_p(mc13783_regulator_remove),
227 .probe = mc13783_regulator_probe,
228};
229
230static int __init mc13783_regulator_init(void)
231{
232 return platform_driver_register(&mc13783_regulator_driver);
233}
234subsys_initcall(mc13783_regulator_init);
235
236static void __exit mc13783_regulator_exit(void)
237{
238 platform_driver_unregister(&mc13783_regulator_driver);
239}
240module_exit(mc13783_regulator_exit);
241
242MODULE_LICENSE("GPL v2");
243MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
244MODULE_DESCRIPTION("Regulator Driver for Freescale MC13783 PMIC");
245MODULE_ALIAS("platform:mc13783-regulator");
diff --git a/drivers/regulator/mc13783.c b/drivers/regulator/mc13783.c
deleted file mode 100644
index 710211f67449..000000000000
--- a/drivers/regulator/mc13783.c
+++ /dev/null
@@ -1,410 +0,0 @@
1/*
2 * Regulator Driver for Freescale MC13783 PMIC
3 *
4 * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/mfd/mc13783-private.h>
12#include <linux/regulator/machine.h>
13#include <linux/regulator/driver.h>
14#include <linux/platform_device.h>
15#include <linux/mfd/mc13783.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/err.h>
19
20struct mc13783_regulator {
21 struct regulator_desc desc;
22 int reg;
23 int enable_bit;
24};
25
26static struct regulator_ops mc13783_regulator_ops;
27
28static struct mc13783_regulator mc13783_regulators[] = {
29 [MC13783_SW_SW3] = {
30 .desc = {
31 .name = "SW_SW3",
32 .ops = &mc13783_regulator_ops,
33 .type = REGULATOR_VOLTAGE,
34 .id = MC13783_SW_SW3,
35 .owner = THIS_MODULE,
36 },
37 .reg = MC13783_REG_SWITCHERS_5,
38 .enable_bit = MC13783_SWCTRL_SW3_EN,
39 },
40 [MC13783_SW_PLL] = {
41 .desc = {
42 .name = "SW_PLL",
43 .ops = &mc13783_regulator_ops,
44 .type = REGULATOR_VOLTAGE,
45 .id = MC13783_SW_PLL,
46 .owner = THIS_MODULE,
47 },
48 .reg = MC13783_REG_SWITCHERS_4,
49 .enable_bit = MC13783_SWCTRL_PLL_EN,
50 },
51 [MC13783_REGU_VAUDIO] = {
52 .desc = {
53 .name = "REGU_VAUDIO",
54 .ops = &mc13783_regulator_ops,
55 .type = REGULATOR_VOLTAGE,
56 .id = MC13783_REGU_VAUDIO,
57 .owner = THIS_MODULE,
58 },
59 .reg = MC13783_REG_REGULATOR_MODE_0,
60 .enable_bit = MC13783_REGCTRL_VAUDIO_EN,
61 },
62 [MC13783_REGU_VIOHI] = {
63 .desc = {
64 .name = "REGU_VIOHI",
65 .ops = &mc13783_regulator_ops,
66 .type = REGULATOR_VOLTAGE,
67 .id = MC13783_REGU_VIOHI,
68 .owner = THIS_MODULE,
69 },
70 .reg = MC13783_REG_REGULATOR_MODE_0,
71 .enable_bit = MC13783_REGCTRL_VIOHI_EN,
72 },
73 [MC13783_REGU_VIOLO] = {
74 .desc = {
75 .name = "REGU_VIOLO",
76 .ops = &mc13783_regulator_ops,
77 .type = REGULATOR_VOLTAGE,
78 .id = MC13783_REGU_VIOLO,
79 .owner = THIS_MODULE,
80 },
81 .reg = MC13783_REG_REGULATOR_MODE_0,
82 .enable_bit = MC13783_REGCTRL_VIOLO_EN,
83 },
84 [MC13783_REGU_VDIG] = {
85 .desc = {
86 .name = "REGU_VDIG",
87 .ops = &mc13783_regulator_ops,
88 .type = REGULATOR_VOLTAGE,
89 .id = MC13783_REGU_VDIG,
90 .owner = THIS_MODULE,
91 },
92 .reg = MC13783_REG_REGULATOR_MODE_0,
93 .enable_bit = MC13783_REGCTRL_VDIG_EN,
94 },
95 [MC13783_REGU_VGEN] = {
96 .desc = {
97 .name = "REGU_VGEN",
98 .ops = &mc13783_regulator_ops,
99 .type = REGULATOR_VOLTAGE,
100 .id = MC13783_REGU_VGEN,
101 .owner = THIS_MODULE,
102 },
103 .reg = MC13783_REG_REGULATOR_MODE_0,
104 .enable_bit = MC13783_REGCTRL_VGEN_EN,
105 },
106 [MC13783_REGU_VRFDIG] = {
107 .desc = {
108 .name = "REGU_VRFDIG",
109 .ops = &mc13783_regulator_ops,
110 .type = REGULATOR_VOLTAGE,
111 .id = MC13783_REGU_VRFDIG,
112 .owner = THIS_MODULE,
113 },
114 .reg = MC13783_REG_REGULATOR_MODE_0,
115 .enable_bit = MC13783_REGCTRL_VRFDIG_EN,
116 },
117 [MC13783_REGU_VRFREF] = {
118 .desc = {
119 .name = "REGU_VRFREF",
120 .ops = &mc13783_regulator_ops,
121 .type = REGULATOR_VOLTAGE,
122 .id = MC13783_REGU_VRFREF,
123 .owner = THIS_MODULE,
124 },
125 .reg = MC13783_REG_REGULATOR_MODE_0,
126 .enable_bit = MC13783_REGCTRL_VRFREF_EN,
127 },
128 [MC13783_REGU_VRFCP] = {
129 .desc = {
130 .name = "REGU_VRFCP",
131 .ops = &mc13783_regulator_ops,
132 .type = REGULATOR_VOLTAGE,
133 .id = MC13783_REGU_VRFCP,
134 .owner = THIS_MODULE,
135 },
136 .reg = MC13783_REG_REGULATOR_MODE_0,
137 .enable_bit = MC13783_REGCTRL_VRFCP_EN,
138 },
139 [MC13783_REGU_VSIM] = {
140 .desc = {
141 .name = "REGU_VSIM",
142 .ops = &mc13783_regulator_ops,
143 .type = REGULATOR_VOLTAGE,
144 .id = MC13783_REGU_VSIM,
145 .owner = THIS_MODULE,
146 },
147 .reg = MC13783_REG_REGULATOR_MODE_1,
148 .enable_bit = MC13783_REGCTRL_VSIM_EN,
149 },
150 [MC13783_REGU_VESIM] = {
151 .desc = {
152 .name = "REGU_VESIM",
153 .ops = &mc13783_regulator_ops,
154 .type = REGULATOR_VOLTAGE,
155 .id = MC13783_REGU_VESIM,
156 .owner = THIS_MODULE,
157 },
158 .reg = MC13783_REG_REGULATOR_MODE_1,
159 .enable_bit = MC13783_REGCTRL_VESIM_EN,
160 },
161 [MC13783_REGU_VCAM] = {
162 .desc = {
163 .name = "REGU_VCAM",
164 .ops = &mc13783_regulator_ops,
165 .type = REGULATOR_VOLTAGE,
166 .id = MC13783_REGU_VCAM,
167 .owner = THIS_MODULE,
168 },
169 .reg = MC13783_REG_REGULATOR_MODE_1,
170 .enable_bit = MC13783_REGCTRL_VCAM_EN,
171 },
172 [MC13783_REGU_VRFBG] = {
173 .desc = {
174 .name = "REGU_VRFBG",
175 .ops = &mc13783_regulator_ops,
176 .type = REGULATOR_VOLTAGE,
177 .id = MC13783_REGU_VRFBG,
178 .owner = THIS_MODULE,
179 },
180 .reg = MC13783_REG_REGULATOR_MODE_1,
181 .enable_bit = MC13783_REGCTRL_VRFBG_EN,
182 },
183 [MC13783_REGU_VVIB] = {
184 .desc = {
185 .name = "REGU_VVIB",
186 .ops = &mc13783_regulator_ops,
187 .type = REGULATOR_VOLTAGE,
188 .id = MC13783_REGU_VVIB,
189 .owner = THIS_MODULE,
190 },
191 .reg = MC13783_REG_REGULATOR_MODE_1,
192 .enable_bit = MC13783_REGCTRL_VVIB_EN,
193 },
194 [MC13783_REGU_VRF1] = {
195 .desc = {
196 .name = "REGU_VRF1",
197 .ops = &mc13783_regulator_ops,
198 .type = REGULATOR_VOLTAGE,
199 .id = MC13783_REGU_VRF1,
200 .owner = THIS_MODULE,
201 },
202 .reg = MC13783_REG_REGULATOR_MODE_1,
203 .enable_bit = MC13783_REGCTRL_VRF1_EN,
204 },
205 [MC13783_REGU_VRF2] = {
206 .desc = {
207 .name = "REGU_VRF2",
208 .ops = &mc13783_regulator_ops,
209 .type = REGULATOR_VOLTAGE,
210 .id = MC13783_REGU_VRF2,
211 .owner = THIS_MODULE,
212 },
213 .reg = MC13783_REG_REGULATOR_MODE_1,
214 .enable_bit = MC13783_REGCTRL_VRF2_EN,
215 },
216 [MC13783_REGU_VMMC1] = {
217 .desc = {
218 .name = "REGU_VMMC1",
219 .ops = &mc13783_regulator_ops,
220 .type = REGULATOR_VOLTAGE,
221 .id = MC13783_REGU_VMMC1,
222 .owner = THIS_MODULE,
223 },
224 .reg = MC13783_REG_REGULATOR_MODE_1,
225 .enable_bit = MC13783_REGCTRL_VMMC1_EN,
226 },
227 [MC13783_REGU_VMMC2] = {
228 .desc = {
229 .name = "REGU_VMMC2",
230 .ops = &mc13783_regulator_ops,
231 .type = REGULATOR_VOLTAGE,
232 .id = MC13783_REGU_VMMC2,
233 .owner = THIS_MODULE,
234 },
235 .reg = MC13783_REG_REGULATOR_MODE_1,
236 .enable_bit = MC13783_REGCTRL_VMMC2_EN,
237 },
238 [MC13783_REGU_GPO1] = {
239 .desc = {
240 .name = "REGU_GPO1",
241 .ops = &mc13783_regulator_ops,
242 .type = REGULATOR_VOLTAGE,
243 .id = MC13783_REGU_GPO1,
244 .owner = THIS_MODULE,
245 },
246 .reg = MC13783_REG_POWER_MISCELLANEOUS,
247 .enable_bit = MC13783_REGCTRL_GPO1_EN,
248 },
249 [MC13783_REGU_GPO2] = {
250 .desc = {
251 .name = "REGU_GPO2",
252 .ops = &mc13783_regulator_ops,
253 .type = REGULATOR_VOLTAGE,
254 .id = MC13783_REGU_GPO2,
255 .owner = THIS_MODULE,
256 },
257 .reg = MC13783_REG_POWER_MISCELLANEOUS,
258 .enable_bit = MC13783_REGCTRL_GPO2_EN,
259 },
260 [MC13783_REGU_GPO3] = {
261 .desc = {
262 .name = "REGU_GPO3",
263 .ops = &mc13783_regulator_ops,
264 .type = REGULATOR_VOLTAGE,
265 .id = MC13783_REGU_GPO3,
266 .owner = THIS_MODULE,
267 },
268 .reg = MC13783_REG_POWER_MISCELLANEOUS,
269 .enable_bit = MC13783_REGCTRL_GPO3_EN,
270 },
271 [MC13783_REGU_GPO4] = {
272 .desc = {
273 .name = "REGU_GPO4",
274 .ops = &mc13783_regulator_ops,
275 .type = REGULATOR_VOLTAGE,
276 .id = MC13783_REGU_GPO4,
277 .owner = THIS_MODULE,
278 },
279 .reg = MC13783_REG_POWER_MISCELLANEOUS,
280 .enable_bit = MC13783_REGCTRL_GPO4_EN,
281 },
282};
283
284struct mc13783_priv {
285 struct regulator_desc desc[ARRAY_SIZE(mc13783_regulators)];
286 struct mc13783 *mc13783;
287 struct regulator_dev *regulators[0];
288};
289
290static int mc13783_enable(struct regulator_dev *rdev)
291{
292 struct mc13783_priv *priv = rdev_get_drvdata(rdev);
293 int id = rdev_get_id(rdev);
294
295 dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
296
297 return mc13783_set_bits(priv->mc13783, mc13783_regulators[id].reg,
298 mc13783_regulators[id].enable_bit,
299 mc13783_regulators[id].enable_bit);
300}
301
302static int mc13783_disable(struct regulator_dev *rdev)
303{
304 struct mc13783_priv *priv = rdev_get_drvdata(rdev);
305 int id = rdev_get_id(rdev);
306
307 dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
308
309 return mc13783_set_bits(priv->mc13783, mc13783_regulators[id].reg,
310 mc13783_regulators[id].enable_bit, 0);
311}
312
313static int mc13783_is_enabled(struct regulator_dev *rdev)
314{
315 struct mc13783_priv *priv = rdev_get_drvdata(rdev);
316 int ret, id = rdev_get_id(rdev);
317 unsigned int val;
318
319 ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val);
320 if (ret)
321 return ret;
322
323 return (val & mc13783_regulators[id].enable_bit) != 0;
324}
325
326static struct regulator_ops mc13783_regulator_ops = {
327 .enable = mc13783_enable,
328 .disable = mc13783_disable,
329 .is_enabled = mc13783_is_enabled,
330};
331
332static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
333{
334 struct mc13783_priv *priv;
335 struct mc13783 *mc13783 = dev_get_drvdata(pdev->dev.parent);
336 struct mc13783_regulator_init_data *init_data;
337 int i, ret;
338
339 dev_dbg(&pdev->dev, "mc13783_regulator_probe id %d\n", pdev->id);
340
341 priv = kzalloc(sizeof(*priv) + mc13783->num_regulators * sizeof(void *),
342 GFP_KERNEL);
343 if (!priv)
344 return -ENOMEM;
345
346 priv->mc13783 = mc13783;
347
348 for (i = 0; i < mc13783->num_regulators; i++) {
349 init_data = &mc13783->regulators[i];
350 priv->regulators[i] = regulator_register(
351 &mc13783_regulators[init_data->id].desc,
352 &pdev->dev, init_data->init_data, priv);
353
354 if (IS_ERR(priv->regulators[i])) {
355 dev_err(&pdev->dev, "failed to register regulator %s\n",
356 mc13783_regulators[i].desc.name);
357 ret = PTR_ERR(priv->regulators[i]);
358 goto err;
359 }
360 }
361
362 platform_set_drvdata(pdev, priv);
363
364 return 0;
365err:
366 while (--i >= 0)
367 regulator_unregister(priv->regulators[i]);
368
369 kfree(priv);
370
371 return ret;
372}
373
374static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
375{
376 struct mc13783_priv *priv = platform_get_drvdata(pdev);
377 struct mc13783 *mc13783 = priv->mc13783;
378 int i;
379
380 for (i = 0; i < mc13783->num_regulators; i++)
381 regulator_unregister(priv->regulators[i]);
382
383 return 0;
384}
385
386static struct platform_driver mc13783_regulator_driver = {
387 .driver = {
388 .name = "mc13783-regulator",
389 .owner = THIS_MODULE,
390 },
391 .remove = __devexit_p(mc13783_regulator_remove),
392};
393
394static int __init mc13783_regulator_init(void)
395{
396 return platform_driver_probe(&mc13783_regulator_driver,
397 mc13783_regulator_probe);
398}
399subsys_initcall(mc13783_regulator_init);
400
401static void __exit mc13783_regulator_exit(void)
402{
403 platform_driver_unregister(&mc13783_regulator_driver);
404}
405module_exit(mc13783_regulator_exit);
406
407MODULE_LICENSE("GPL");
408MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
409MODULE_DESCRIPTION("Regulator Driver for Freescale MC13783 PMIC");
410MODULE_ALIAS("platform:mc13783-regulator");
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 7ea1c3a31081..7e674859bd59 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/delay.h>
15#include <linux/platform_device.h> 16#include <linux/platform_device.h>
16#include <linux/regulator/driver.h> 17#include <linux/regulator/driver.h>
17#include <linux/regulator/machine.h> 18#include <linux/regulator/machine.h>
@@ -40,6 +41,12 @@ struct twlreg_info {
40 u8 table_len; 41 u8 table_len;
41 const u16 *table; 42 const u16 *table;
42 43
44 /* regulator specific turn-on delay */
45 u16 delay;
46
47 /* State REMAP default configuration */
48 u8 remap;
49
43 /* chip constraints on regulator behavior */ 50 /* chip constraints on regulator behavior */
44 u16 min_mV; 51 u16 min_mV;
45 52
@@ -128,6 +135,7 @@ static int twlreg_enable(struct regulator_dev *rdev)
128{ 135{
129 struct twlreg_info *info = rdev_get_drvdata(rdev); 136 struct twlreg_info *info = rdev_get_drvdata(rdev);
130 int grp; 137 int grp;
138 int ret;
131 139
132 grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP); 140 grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
133 if (grp < 0) 141 if (grp < 0)
@@ -138,7 +146,11 @@ static int twlreg_enable(struct regulator_dev *rdev)
138 else 146 else
139 grp |= P1_GRP_6030; 147 grp |= P1_GRP_6030;
140 148
141 return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp); 149 ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
150
151 udelay(info->delay);
152
153 return ret;
142} 154}
143 155
144static int twlreg_disable(struct regulator_dev *rdev) 156static int twlreg_disable(struct regulator_dev *rdev)
@@ -151,9 +163,9 @@ static int twlreg_disable(struct regulator_dev *rdev)
151 return grp; 163 return grp;
152 164
153 if (twl_class_is_4030()) 165 if (twl_class_is_4030())
154 grp &= ~P1_GRP_4030; 166 grp &= ~(P1_GRP_4030 | P2_GRP_4030 | P3_GRP_4030);
155 else 167 else
156 grp &= ~P1_GRP_6030; 168 grp &= ~(P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030);
157 169
158 return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp); 170 return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
159} 171}
@@ -294,6 +306,18 @@ static const u16 VSIM_VSEL_table[] = {
294static const u16 VDAC_VSEL_table[] = { 306static const u16 VDAC_VSEL_table[] = {
295 1200, 1300, 1800, 1800, 307 1200, 1300, 1800, 1800,
296}; 308};
309static const u16 VDD1_VSEL_table[] = {
310 800, 1450,
311};
312static const u16 VDD2_VSEL_table[] = {
313 800, 1450, 1500,
314};
315static const u16 VIO_VSEL_table[] = {
316 1800, 1850,
317};
318static const u16 VINTANA2_VSEL_table[] = {
319 2500, 2750,
320};
297static const u16 VAUX1_6030_VSEL_table[] = { 321static const u16 VAUX1_6030_VSEL_table[] = {
298 1000, 1300, 1800, 2500, 322 1000, 1300, 1800, 2500,
299 2800, 2900, 3000, 3000, 323 2800, 2900, 3000, 3000,
@@ -414,20 +438,30 @@ static struct regulator_ops twlfixed_ops = {
414 438
415/*----------------------------------------------------------------------*/ 439/*----------------------------------------------------------------------*/
416 440
417#define TWL4030_ADJUSTABLE_LDO(label, offset, num) \ 441#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) \
418 TWL_ADJUSTABLE_LDO(label, offset, num, TWL4030) 442 TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
419#define TWL4030_FIXED_LDO(label, offset, mVolts, num) \ 443 remap_conf, TWL4030)
420 TWL_FIXED_LDO(label, offset, mVolts, num, TWL4030) 444#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
421#define TWL6030_ADJUSTABLE_LDO(label, offset, num) \ 445 remap_conf) \
422 TWL_ADJUSTABLE_LDO(label, offset, num, TWL6030) 446 TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
423#define TWL6030_FIXED_LDO(label, offset, mVolts, num) \ 447 remap_conf, TWL4030)
424 TWL_FIXED_LDO(label, offset, mVolts, num, TWL6030) 448#define TWL6030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
425 449 remap_conf) \
426#define TWL_ADJUSTABLE_LDO(label, offset, num, family) { \ 450 TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
451 remap_conf, TWL6030)
452#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
453 remap_conf) \
454 TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
455 remap_conf, TWL6030)
456
457#define TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf, \
458 family) { \
427 .base = offset, \ 459 .base = offset, \
428 .id = num, \ 460 .id = num, \
429 .table_len = ARRAY_SIZE(label##_VSEL_table), \ 461 .table_len = ARRAY_SIZE(label##_VSEL_table), \
430 .table = label##_VSEL_table, \ 462 .table = label##_VSEL_table, \
463 .delay = turnon_delay, \
464 .remap = remap_conf, \
431 .desc = { \ 465 .desc = { \
432 .name = #label, \ 466 .name = #label, \
433 .id = family##_REG_##label, \ 467 .id = family##_REG_##label, \
@@ -438,10 +472,13 @@ static struct regulator_ops twlfixed_ops = {
438 }, \ 472 }, \
439 } 473 }
440 474
441#define TWL_FIXED_LDO(label, offset, mVolts, num, family) { \ 475#define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \
476 family) { \
442 .base = offset, \ 477 .base = offset, \
443 .id = num, \ 478 .id = num, \
444 .min_mV = mVolts, \ 479 .min_mV = mVolts, \
480 .delay = turnon_delay, \
481 .remap = remap_conf, \
445 .desc = { \ 482 .desc = { \
446 .name = #label, \ 483 .name = #label, \
447 .id = family##_REG_##label, \ 484 .id = family##_REG_##label, \
@@ -457,43 +494,41 @@ static struct regulator_ops twlfixed_ops = {
457 * software control over them after boot. 494 * software control over them after boot.
458 */ 495 */
459static struct twlreg_info twl_regs[] = { 496static struct twlreg_info twl_regs[] = {
460 TWL4030_ADJUSTABLE_LDO(VAUX1, 0x17, 1), 497 TWL4030_ADJUSTABLE_LDO(VAUX1, 0x17, 1, 100, 0x08),
461 TWL4030_ADJUSTABLE_LDO(VAUX2_4030, 0x1b, 2), 498 TWL4030_ADJUSTABLE_LDO(VAUX2_4030, 0x1b, 2, 100, 0x08),
462 TWL4030_ADJUSTABLE_LDO(VAUX2, 0x1b, 2), 499 TWL4030_ADJUSTABLE_LDO(VAUX2, 0x1b, 2, 100, 0x08),
463 TWL4030_ADJUSTABLE_LDO(VAUX3, 0x1f, 3), 500 TWL4030_ADJUSTABLE_LDO(VAUX3, 0x1f, 3, 100, 0x08),
464 TWL4030_ADJUSTABLE_LDO(VAUX4, 0x23, 4), 501 TWL4030_ADJUSTABLE_LDO(VAUX4, 0x23, 4, 100, 0x08),
465 TWL4030_ADJUSTABLE_LDO(VMMC1, 0x27, 5), 502 TWL4030_ADJUSTABLE_LDO(VMMC1, 0x27, 5, 100, 0x08),
466 TWL4030_ADJUSTABLE_LDO(VMMC2, 0x2b, 6), 503 TWL4030_ADJUSTABLE_LDO(VMMC2, 0x2b, 6, 100, 0x08),
467 /* 504 TWL4030_ADJUSTABLE_LDO(VPLL1, 0x2f, 7, 100, 0x00),
468 TWL4030_ADJUSTABLE_LDO(VPLL1, 0x2f, 7), 505 TWL4030_ADJUSTABLE_LDO(VPLL2, 0x33, 8, 100, 0x08),
469 */ 506 TWL4030_ADJUSTABLE_LDO(VSIM, 0x37, 9, 100, 0x00),
470 TWL4030_ADJUSTABLE_LDO(VPLL2, 0x33, 8), 507 TWL4030_ADJUSTABLE_LDO(VDAC, 0x3b, 10, 100, 0x08),
471 TWL4030_ADJUSTABLE_LDO(VSIM, 0x37, 9), 508 TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08),
472 TWL4030_ADJUSTABLE_LDO(VDAC, 0x3b, 10), 509 TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08),
473 /* 510 TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08),
474 TWL4030_ADJUSTABLE_LDO(VINTANA1, 0x3f, 11), 511 TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08),
475 TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12), 512 TWL4030_ADJUSTABLE_LDO(VDD1, 0x55, 15, 1000, 0x08),
476 TWL4030_ADJUSTABLE_LDO(VINTDIG, 0x47, 13), 513 TWL4030_ADJUSTABLE_LDO(VDD2, 0x63, 16, 1000, 0x08),
477 TWL4030_SMPS(VIO, 0x4b, 14), 514 TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08),
478 TWL4030_SMPS(VDD1, 0x55, 15), 515 TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08),
479 TWL4030_SMPS(VDD2, 0x63, 16), 516 TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08),
480 */
481 TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17),
482 TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18),
483 TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19),
484 /* VUSBCP is managed *only* by the USB subchip */ 517 /* VUSBCP is managed *only* by the USB subchip */
485 518
486 /* 6030 REG with base as PMC Slave Misc : 0x0030 */ 519 /* 6030 REG with base as PMC Slave Misc : 0x0030 */
487 TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1), 520 /* Turnon-delay and remap configuration values for 6030 are not
488 TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 2), 521 verified since the specification is not public */
489 TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 3), 522 TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1, 0, 0x08),
490 TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 4), 523 TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 2, 0, 0x08),
491 TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 5), 524 TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 3, 0, 0x08),
492 TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 7), 525 TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 4, 0, 0x08),
493 TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15), 526 TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 5, 0, 0x08),
494 TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16), 527 TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 7, 0, 0x08),
495 TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17), 528 TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x08),
496 TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18) 529 TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x08),
530 TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x08),
531 TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x08)
497}; 532};
498 533
499static int twlreg_probe(struct platform_device *pdev) 534static int twlreg_probe(struct platform_device *pdev)
@@ -525,6 +560,19 @@ static int twlreg_probe(struct platform_device *pdev)
525 c->valid_ops_mask &= REGULATOR_CHANGE_VOLTAGE 560 c->valid_ops_mask &= REGULATOR_CHANGE_VOLTAGE
526 | REGULATOR_CHANGE_MODE 561 | REGULATOR_CHANGE_MODE
527 | REGULATOR_CHANGE_STATUS; 562 | REGULATOR_CHANGE_STATUS;
563 switch (pdev->id) {
564 case TWL4030_REG_VIO:
565 case TWL4030_REG_VDD1:
566 case TWL4030_REG_VDD2:
567 case TWL4030_REG_VPLL1:
568 case TWL4030_REG_VINTANA1:
569 case TWL4030_REG_VINTANA2:
570 case TWL4030_REG_VINTDIG:
571 c->always_on = true;
572 break;
573 default:
574 break;
575 }
528 576
529 rdev = regulator_register(&info->desc, &pdev->dev, initdata, info); 577 rdev = regulator_register(&info->desc, &pdev->dev, initdata, info);
530 if (IS_ERR(rdev)) { 578 if (IS_ERR(rdev)) {
@@ -534,6 +582,9 @@ static int twlreg_probe(struct platform_device *pdev)
534 } 582 }
535 platform_set_drvdata(pdev, rdev); 583 platform_set_drvdata(pdev, rdev);
536 584
585 twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP,
586 info->remap);
587
537 /* NOTE: many regulators support short-circuit IRQs (presentable 588 /* NOTE: many regulators support short-circuit IRQs (presentable
538 * as REGULATOR_OVER_CURRENT notifications?) configured via: 589 * as REGULATOR_OVER_CURRENT notifications?) configured via:
539 * - SC_CONFIG 590 * - SC_CONFIG
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 2eefc1a0cf08..0a6577577e8d 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -19,6 +19,8 @@
19#include <linux/i2c.h> 19#include <linux/i2c.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/regulator/driver.h> 21#include <linux/regulator/driver.h>
22#include <linux/regulator/machine.h>
23#include <linux/gpio.h>
22 24
23#include <linux/mfd/wm831x/core.h> 25#include <linux/mfd/wm831x/core.h>
24#include <linux/mfd/wm831x/regulator.h> 26#include <linux/mfd/wm831x/regulator.h>
@@ -39,6 +41,7 @@
39#define WM831X_DCDC_CONTROL_2 1 41#define WM831X_DCDC_CONTROL_2 1
40#define WM831X_DCDC_ON_CONFIG 2 42#define WM831X_DCDC_ON_CONFIG 2
41#define WM831X_DCDC_SLEEP_CONTROL 3 43#define WM831X_DCDC_SLEEP_CONTROL 3
44#define WM831X_DCDC_DVS_CONTROL 4
42 45
43/* 46/*
44 * Shared 47 * Shared
@@ -50,6 +53,10 @@ struct wm831x_dcdc {
50 int base; 53 int base;
51 struct wm831x *wm831x; 54 struct wm831x *wm831x;
52 struct regulator_dev *regulator; 55 struct regulator_dev *regulator;
56 int dvs_gpio;
57 int dvs_gpio_state;
58 int on_vsel;
59 int dvs_vsel;
53}; 60};
54 61
55static int wm831x_dcdc_is_enabled(struct regulator_dev *rdev) 62static int wm831x_dcdc_is_enabled(struct regulator_dev *rdev)
@@ -240,11 +247,9 @@ static int wm831x_buckv_list_voltage(struct regulator_dev *rdev,
240 return -EINVAL; 247 return -EINVAL;
241} 248}
242 249
243static int wm831x_buckv_set_voltage_int(struct regulator_dev *rdev, int reg, 250static int wm831x_buckv_select_min_voltage(struct regulator_dev *rdev,
244 int min_uV, int max_uV) 251 int min_uV, int max_uV)
245{ 252{
246 struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
247 struct wm831x *wm831x = dcdc->wm831x;
248 u16 vsel; 253 u16 vsel;
249 254
250 if (min_uV < 600000) 255 if (min_uV < 600000)
@@ -257,39 +262,126 @@ static int wm831x_buckv_set_voltage_int(struct regulator_dev *rdev, int reg,
257 if (wm831x_buckv_list_voltage(rdev, vsel) > max_uV) 262 if (wm831x_buckv_list_voltage(rdev, vsel) > max_uV)
258 return -EINVAL; 263 return -EINVAL;
259 264
260 return wm831x_set_bits(wm831x, reg, WM831X_DC1_ON_VSEL_MASK, vsel); 265 return vsel;
266}
267
268static int wm831x_buckv_select_max_voltage(struct regulator_dev *rdev,
269 int min_uV, int max_uV)
270{
271 u16 vsel;
272
273 if (max_uV < 600000 || max_uV > 1800000)
274 return -EINVAL;
275
276 vsel = ((max_uV - 600000) / 12500) + 8;
277
278 if (wm831x_buckv_list_voltage(rdev, vsel) < min_uV ||
279 wm831x_buckv_list_voltage(rdev, vsel) < max_uV)
280 return -EINVAL;
281
282 return vsel;
283}
284
285static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state)
286{
287 struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
288
289 if (state == dcdc->dvs_gpio_state)
290 return 0;
291
292 dcdc->dvs_gpio_state = state;
293 gpio_set_value(dcdc->dvs_gpio, state);
294
295 /* Should wait for DVS state change to be asserted if we have
296 * a GPIO for it, for now assume the device is configured
297 * for the fastest possible transition.
298 */
299
300 return 0;
261} 301}
262 302
263static int wm831x_buckv_set_voltage(struct regulator_dev *rdev, 303static int wm831x_buckv_set_voltage(struct regulator_dev *rdev,
264 int min_uV, int max_uV) 304 int min_uV, int max_uV)
265{ 305{
266 struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); 306 struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
267 u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG; 307 struct wm831x *wm831x = dcdc->wm831x;
308 int on_reg = dcdc->base + WM831X_DCDC_ON_CONFIG;
309 int dvs_reg = dcdc->base + WM831X_DCDC_DVS_CONTROL;
310 int vsel, ret;
311
312 vsel = wm831x_buckv_select_min_voltage(rdev, min_uV, max_uV);
313 if (vsel < 0)
314 return vsel;
315
316 /* If this value is already set then do a GPIO update if we can */
317 if (dcdc->dvs_gpio && dcdc->on_vsel == vsel)
318 return wm831x_buckv_set_dvs(rdev, 0);
319
320 if (dcdc->dvs_gpio && dcdc->dvs_vsel == vsel)
321 return wm831x_buckv_set_dvs(rdev, 1);
322
323 /* Always set the ON status to the minimum voltage */
324 ret = wm831x_set_bits(wm831x, on_reg, WM831X_DC1_ON_VSEL_MASK, vsel);
325 if (ret < 0)
326 return ret;
327 dcdc->on_vsel = vsel;
328
329 if (!dcdc->dvs_gpio)
330 return ret;
331
332 /* Kick the voltage transition now */
333 ret = wm831x_buckv_set_dvs(rdev, 0);
334 if (ret < 0)
335 return ret;
336
337 /* Set the high voltage as the DVS voltage. This is optimised
338 * for CPUfreq usage, most processors will keep the maximum
339 * voltage constant and lower the minimum with the frequency. */
340 vsel = wm831x_buckv_select_max_voltage(rdev, min_uV, max_uV);
341 if (vsel < 0) {
342 /* This should never happen - at worst the same vsel
343 * should be chosen */
344 WARN_ON(vsel < 0);
345 return 0;
346 }
347
348 /* Don't bother if it's the same VSEL we're already using */
349 if (vsel == dcdc->on_vsel)
350 return 0;
268 351
269 return wm831x_buckv_set_voltage_int(rdev, reg, min_uV, max_uV); 352 ret = wm831x_set_bits(wm831x, dvs_reg, WM831X_DC1_DVS_VSEL_MASK, vsel);
353 if (ret == 0)
354 dcdc->dvs_vsel = vsel;
355 else
356 dev_warn(wm831x->dev, "Failed to set DCDC DVS VSEL: %d\n",
357 ret);
358
359 return 0;
270} 360}
271 361
272static int wm831x_buckv_set_suspend_voltage(struct regulator_dev *rdev, 362static int wm831x_buckv_set_suspend_voltage(struct regulator_dev *rdev,
273 int uV) 363 int uV)
274{ 364{
275 struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); 365 struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
366 struct wm831x *wm831x = dcdc->wm831x;
276 u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL; 367 u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL;
368 int vsel;
369
370 vsel = wm831x_buckv_select_min_voltage(rdev, uV, uV);
371 if (vsel < 0)
372 return vsel;
277 373
278 return wm831x_buckv_set_voltage_int(rdev, reg, uV, uV); 374 return wm831x_set_bits(wm831x, reg, WM831X_DC1_SLP_VSEL_MASK, vsel);
279} 375}
280 376
281static int wm831x_buckv_get_voltage(struct regulator_dev *rdev) 377static int wm831x_buckv_get_voltage(struct regulator_dev *rdev)
282{ 378{
283 struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); 379 struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
284 struct wm831x *wm831x = dcdc->wm831x;
285 u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG;
286 int val;
287 380
288 val = wm831x_reg_read(wm831x, reg); 381 if (dcdc->dvs_gpio && dcdc->dvs_gpio_state)
289 if (val < 0) 382 return wm831x_buckv_list_voltage(rdev, dcdc->dvs_vsel);
290 return val; 383 else
291 384 return wm831x_buckv_list_voltage(rdev, dcdc->on_vsel);
292 return wm831x_buckv_list_voltage(rdev, val & WM831X_DC1_ON_VSEL_MASK);
293} 385}
294 386
295/* Current limit options */ 387/* Current limit options */
@@ -346,6 +438,64 @@ static struct regulator_ops wm831x_buckv_ops = {
346 .set_suspend_mode = wm831x_dcdc_set_suspend_mode, 438 .set_suspend_mode = wm831x_dcdc_set_suspend_mode,
347}; 439};
348 440
441/*
442 * Set up DVS control. We just log errors since we can still run
443 * (with reduced performance) if we fail.
444 */
445static __devinit void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
446 struct wm831x_buckv_pdata *pdata)
447{
448 struct wm831x *wm831x = dcdc->wm831x;
449 int ret;
450 u16 ctrl;
451
452 if (!pdata || !pdata->dvs_gpio)
453 return;
454
455 switch (pdata->dvs_control_src) {
456 case 1:
457 ctrl = 2 << WM831X_DC1_DVS_SRC_SHIFT;
458 break;
459 case 2:
460 ctrl = 3 << WM831X_DC1_DVS_SRC_SHIFT;
461 break;
462 default:
463 dev_err(wm831x->dev, "Invalid DVS control source %d for %s\n",
464 pdata->dvs_control_src, dcdc->name);
465 return;
466 }
467
468 ret = wm831x_set_bits(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL,
469 WM831X_DC1_DVS_SRC_MASK, ctrl);
470 if (ret < 0) {
471 dev_err(wm831x->dev, "Failed to set %s DVS source: %d\n",
472 dcdc->name, ret);
473 return;
474 }
475
476 ret = gpio_request(pdata->dvs_gpio, "DCDC DVS");
477 if (ret < 0) {
478 dev_err(wm831x->dev, "Failed to get %s DVS GPIO: %d\n",
479 dcdc->name, ret);
480 return;
481 }
482
483 /* gpiolib won't let us read the GPIO status so pick the higher
484 * of the two existing voltages so we take it as platform data.
485 */
486 dcdc->dvs_gpio_state = pdata->dvs_init_state;
487
488 ret = gpio_direction_output(pdata->dvs_gpio, dcdc->dvs_gpio_state);
489 if (ret < 0) {
490 dev_err(wm831x->dev, "Failed to enable %s DVS GPIO: %d\n",
491 dcdc->name, ret);
492 gpio_free(pdata->dvs_gpio);
493 return;
494 }
495
496 dcdc->dvs_gpio = pdata->dvs_gpio;
497}
498
349static __devinit int wm831x_buckv_probe(struct platform_device *pdev) 499static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
350{ 500{
351 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); 501 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
@@ -384,6 +534,23 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
384 dcdc->desc.ops = &wm831x_buckv_ops; 534 dcdc->desc.ops = &wm831x_buckv_ops;
385 dcdc->desc.owner = THIS_MODULE; 535 dcdc->desc.owner = THIS_MODULE;
386 536
537 ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG);
538 if (ret < 0) {
539 dev_err(wm831x->dev, "Failed to read ON VSEL: %d\n", ret);
540 goto err;
541 }
542 dcdc->on_vsel = ret & WM831X_DC1_ON_VSEL_MASK;
543
544 ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG);
545 if (ret < 0) {
546 dev_err(wm831x->dev, "Failed to read DVS VSEL: %d\n", ret);
547 goto err;
548 }
549 dcdc->dvs_vsel = ret & WM831X_DC1_DVS_VSEL_MASK;
550
551 if (pdata->dcdc[id])
552 wm831x_buckv_dvs_init(dcdc, pdata->dcdc[id]->driver_data);
553
387 dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev, 554 dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
388 pdata->dcdc[id], dcdc); 555 pdata->dcdc[id], dcdc);
389 if (IS_ERR(dcdc->regulator)) { 556 if (IS_ERR(dcdc->regulator)) {
@@ -422,6 +589,8 @@ err_uv:
422err_regulator: 589err_regulator:
423 regulator_unregister(dcdc->regulator); 590 regulator_unregister(dcdc->regulator);
424err: 591err:
592 if (dcdc->dvs_gpio)
593 gpio_free(dcdc->dvs_gpio);
425 kfree(dcdc); 594 kfree(dcdc);
426 return ret; 595 return ret;
427} 596}
@@ -434,6 +603,8 @@ static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
434 wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "HC"), dcdc); 603 wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "HC"), dcdc);
435 wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); 604 wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc);
436 regulator_unregister(dcdc->regulator); 605 regulator_unregister(dcdc->regulator);
606 if (dcdc->dvs_gpio)
607 gpio_free(dcdc->dvs_gpio);
437 kfree(dcdc); 608 kfree(dcdc);
438 609
439 return 0; 610 return 0;
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 902db56ce099..61e02ac2fda3 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -470,7 +470,7 @@ static unsigned int wm831x_aldo_get_mode(struct regulator_dev *rdev)
470 struct wm831x_ldo *ldo = rdev_get_drvdata(rdev); 470 struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
471 struct wm831x *wm831x = ldo->wm831x; 471 struct wm831x *wm831x = ldo->wm831x;
472 int on_reg = ldo->base + WM831X_LDO_ON_CONTROL; 472 int on_reg = ldo->base + WM831X_LDO_ON_CONTROL;
473 unsigned int ret; 473 int ret;
474 474
475 ret = wm831x_reg_read(wm831x, on_reg); 475 ret = wm831x_reg_read(wm831x, on_reg);
476 if (ret < 0) 476 if (ret < 0)
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index eb154dc57164..c8c12325e69b 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -686,7 +686,8 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
686 */ 686 */
687#if defined(CONFIG_ATARI) 687#if defined(CONFIG_ATARI)
688 address_space = 64; 688 address_space = 64;
689#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__sparc__) 689#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) \
690 || defined(__sparc__) || defined(__mips__)
690 address_space = 128; 691 address_space = 128;
691#else 692#else
692#warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes. 693#warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes.
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 259db7f3535b..9630e7d3314e 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -778,6 +778,8 @@ static int __devinit ds1305_probe(struct spi_device *spi)
778 spi->irq, status); 778 spi->irq, status);
779 goto fail1; 779 goto fail1;
780 } 780 }
781
782 device_set_wakeup_capable(&spi->dev, 1);
781 } 783 }
782 784
783 /* export NVRAM */ 785 /* export NVRAM */
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 8a99da6f2f24..c4ec5c158aa1 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -881,6 +881,8 @@ read_rtc:
881 "unable to request IRQ!\n"); 881 "unable to request IRQ!\n");
882 goto exit_irq; 882 goto exit_irq;
883 } 883 }
884
885 device_set_wakeup_capable(&client->dev, 1);
884 set_bit(HAS_ALARM, &ds1307->flags); 886 set_bit(HAS_ALARM, &ds1307->flags);
885 dev_dbg(&client->dev, "got IRQ %d\n", client->irq); 887 dev_dbg(&client->dev, "got IRQ %d\n", client->irq);
886 } 888 }
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 713f7bf5afb3..5317bbcbc7a0 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -383,6 +383,8 @@ static int ds1374_probe(struct i2c_client *client,
383 dev_err(&client->dev, "unable to request IRQ\n"); 383 dev_err(&client->dev, "unable to request IRQ\n");
384 goto out_free; 384 goto out_free;
385 } 385 }
386
387 device_set_wakeup_capable(&client->dev, 1);
386 } 388 }
387 389
388 ds1374->rtc = rtc_device_register(client->name, &client->dev, 390 ds1374->rtc = rtc_device_register(client->name, &client->dev,
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index fd1231738ef4..148b1dd24070 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -218,7 +218,7 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
218 spin_unlock_irqrestore(&aliastree.lock, flags); 218 spin_unlock_irqrestore(&aliastree.lock, flags);
219 newlcu = _allocate_lcu(uid); 219 newlcu = _allocate_lcu(uid);
220 if (IS_ERR(newlcu)) 220 if (IS_ERR(newlcu))
221 return PTR_ERR(lcu); 221 return PTR_ERR(newlcu);
222 spin_lock_irqsave(&aliastree.lock, flags); 222 spin_lock_irqsave(&aliastree.lock, flags);
223 lcu = _find_lcu(server, uid); 223 lcu = _find_lcu(server, uid);
224 if (!lcu) { 224 if (!lcu) {
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index f64d0db881b4..6e14863f5c70 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -8,7 +8,7 @@
8 * 8 *
9 */ 9 */
10 10
11#define KMSG_COMPONENT "dasd-diag" 11#define KMSG_COMPONENT "dasd"
12 12
13#include <linux/stddef.h> 13#include <linux/stddef.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
@@ -146,16 +146,16 @@ dasd_diag_erp(struct dasd_device *device)
146 rc = mdsk_init_io(device, device->block->bp_block, 0, NULL); 146 rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
147 if (rc == 4) { 147 if (rc == 4) {
148 if (!(device->features & DASD_FEATURE_READONLY)) { 148 if (!(device->features & DASD_FEATURE_READONLY)) {
149 dev_warn(&device->cdev->dev, 149 pr_warning("%s: The access mode of a DIAG device "
150 "The access mode of a DIAG device changed" 150 "changed to read-only\n",
151 " to read-only"); 151 dev_name(&device->cdev->dev));
152 device->features |= DASD_FEATURE_READONLY; 152 device->features |= DASD_FEATURE_READONLY;
153 } 153 }
154 rc = 0; 154 rc = 0;
155 } 155 }
156 if (rc) 156 if (rc)
157 dev_warn(&device->cdev->dev, "DIAG ERP failed with " 157 pr_warning("%s: DIAG ERP failed with "
158 "rc=%d\n", rc); 158 "rc=%d\n", dev_name(&device->cdev->dev), rc);
159} 159}
160 160
161/* Start a given request at the device. Return zero on success, non-zero 161/* Start a given request at the device. Return zero on success, non-zero
@@ -371,8 +371,9 @@ dasd_diag_check_device(struct dasd_device *device)
371 private->pt_block = 2; 371 private->pt_block = 2;
372 break; 372 break;
373 default: 373 default:
374 dev_warn(&device->cdev->dev, "Device type %d is not supported " 374 pr_warning("%s: Device type %d is not supported "
375 "in DIAG mode\n", private->rdc_data.vdev_class); 375 "in DIAG mode\n", dev_name(&device->cdev->dev),
376 private->rdc_data.vdev_class);
376 rc = -EOPNOTSUPP; 377 rc = -EOPNOTSUPP;
377 goto out; 378 goto out;
378 } 379 }
@@ -413,8 +414,8 @@ dasd_diag_check_device(struct dasd_device *device)
413 private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT; 414 private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
414 rc = dia250(&private->iob, RW_BIO); 415 rc = dia250(&private->iob, RW_BIO);
415 if (rc == 3) { 416 if (rc == 3) {
416 dev_warn(&device->cdev->dev, 417 pr_warning("%s: A 64-bit DIAG call failed\n",
417 "A 64-bit DIAG call failed\n"); 418 dev_name(&device->cdev->dev));
418 rc = -EOPNOTSUPP; 419 rc = -EOPNOTSUPP;
419 goto out_label; 420 goto out_label;
420 } 421 }
@@ -423,8 +424,9 @@ dasd_diag_check_device(struct dasd_device *device)
423 break; 424 break;
424 } 425 }
425 if (bsize > PAGE_SIZE) { 426 if (bsize > PAGE_SIZE) {
426 dev_warn(&device->cdev->dev, "Accessing the DASD failed because" 427 pr_warning("%s: Accessing the DASD failed because of an "
427 " of an incorrect format (rc=%d)\n", rc); 428 "incorrect format (rc=%d)\n",
429 dev_name(&device->cdev->dev), rc);
428 rc = -EIO; 430 rc = -EIO;
429 goto out_label; 431 goto out_label;
430 } 432 }
@@ -442,18 +444,18 @@ dasd_diag_check_device(struct dasd_device *device)
442 block->s2b_shift++; 444 block->s2b_shift++;
443 rc = mdsk_init_io(device, block->bp_block, 0, NULL); 445 rc = mdsk_init_io(device, block->bp_block, 0, NULL);
444 if (rc && (rc != 4)) { 446 if (rc && (rc != 4)) {
445 dev_warn(&device->cdev->dev, "DIAG initialization " 447 pr_warning("%s: DIAG initialization failed with rc=%d\n",
446 "failed with rc=%d\n", rc); 448 dev_name(&device->cdev->dev), rc);
447 rc = -EIO; 449 rc = -EIO;
448 } else { 450 } else {
449 if (rc == 4) 451 if (rc == 4)
450 device->features |= DASD_FEATURE_READONLY; 452 device->features |= DASD_FEATURE_READONLY;
451 dev_info(&device->cdev->dev, 453 pr_info("%s: New DASD with %ld byte/block, total size %ld "
452 "New DASD with %ld byte/block, total size %ld KB%s\n", 454 "KB%s\n", dev_name(&device->cdev->dev),
453 (unsigned long) block->bp_block, 455 (unsigned long) block->bp_block,
454 (unsigned long) (block->blocks << 456 (unsigned long) (block->blocks <<
455 block->s2b_shift) >> 1, 457 block->s2b_shift) >> 1,
456 (rc == 4) ? ", read-only device" : ""); 458 (rc == 4) ? ", read-only device" : "");
457 rc = 0; 459 rc = 0;
458 } 460 }
459out_label: 461out_label:
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 28e4649fa9e4..247b2b934728 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -467,7 +467,7 @@ fs3270_open(struct inode *inode, struct file *filp)
467 if (IS_ERR(ib)) { 467 if (IS_ERR(ib)) {
468 raw3270_put_view(&fp->view); 468 raw3270_put_view(&fp->view);
469 raw3270_del_view(&fp->view); 469 raw3270_del_view(&fp->view);
470 rc = PTR_ERR(fp); 470 rc = PTR_ERR(ib);
471 goto out; 471 goto out;
472 } 472 }
473 fp->rdbuf = ib; 473 fp->rdbuf = ib;
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 3657fe103c27..cb70fa1cf539 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#define KMSG_COMPONENT "tape_34xx" 11#define KMSG_COMPONENT "tape_34xx"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 13
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/init.h> 15#include <linux/init.h>
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 0c72aadb8391..9821c5886613 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#define KMSG_COMPONENT "tape_3590" 11#define KMSG_COMPONENT "tape_3590"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 13
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/init.h> 15#include <linux/init.h>
@@ -136,7 +137,7 @@ static void int_to_ext_kekl(struct tape3592_kekl *in,
136 out->type_on_tape = TAPE390_KEKL_TYPE_LABEL; 137 out->type_on_tape = TAPE390_KEKL_TYPE_LABEL;
137 memcpy(out->label, in->label, sizeof(in->label)); 138 memcpy(out->label, in->label, sizeof(in->label));
138 EBCASC(out->label, sizeof(in->label)); 139 EBCASC(out->label, sizeof(in->label));
139 strstrip(out->label); 140 strim(out->label);
140} 141}
141 142
142static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in, 143static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in,
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 4799cc2f73c3..96816149368a 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#define KMSG_COMPONENT "tape" 13#define KMSG_COMPONENT "tape"
14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 15
15#include <linux/fs.h> 16#include <linux/fs.h>
16#include <linux/module.h> 17#include <linux/module.h>
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 23d773a0d113..2125ec7d95f0 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -10,6 +10,9 @@
10 * Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 */ 11 */
12 12
13#define KMSG_COMPONENT "tape"
14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15
13#include <linux/module.h> 16#include <linux/module.h>
14#include <linux/types.h> 17#include <linux/types.h>
15#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index ddc914ccea8f..b2864e3edb6d 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -7,6 +7,10 @@
7 * Author: Stefan Bader <shbader@de.ibm.com> 7 * Author: Stefan Bader <shbader@de.ibm.com>
8 * Based on simple class device code by Greg K-H 8 * Based on simple class device code by Greg K-H
9 */ 9 */
10
11#define KMSG_COMPONENT "tape"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
10#include "tape_class.h" 14#include "tape_class.h"
11 15
12MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>"); 16MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index f5d6802dc5da..81b094e480e6 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -12,6 +12,8 @@
12 */ 12 */
13 13
14#define KMSG_COMPONENT "tape" 14#define KMSG_COMPONENT "tape"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
15#include <linux/module.h> 17#include <linux/module.h>
16#include <linux/init.h> // for kernel parameters 18#include <linux/init.h> // for kernel parameters
17#include <linux/kmod.h> // for requesting modules 19#include <linux/kmod.h> // for requesting modules
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index ebd820ccfb24..0ceb37984f77 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -11,6 +11,9 @@
11 * PROCFS Functions 11 * PROCFS Functions
12 */ 12 */
13 13
14#define KMSG_COMPONENT "tape"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
14#include <linux/module.h> 17#include <linux/module.h>
15#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
16#include <linux/seq_file.h> 19#include <linux/seq_file.h>
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 750354ad16e5..03f07e5dd6e9 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -11,6 +11,9 @@
11 * Stefan Bader <shbader@de.ibm.com> 11 * Stefan Bader <shbader@de.ibm.com>
12 */ 12 */
13 13
14#define KMSG_COMPONENT "tape"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
14#include <linux/stddef.h> 17#include <linux/stddef.h>
15#include <linux/kernel.h> 18#include <linux/kernel.h>
16#include <linux/bio.h> 19#include <linux/bio.h>
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 9509e3860934..7a28a3029a3f 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -49,7 +49,6 @@ static u16 ccwreq_next_path(struct ccw_device *cdev)
49 */ 49 */
50static void ccwreq_stop(struct ccw_device *cdev, int rc) 50static void ccwreq_stop(struct ccw_device *cdev, int rc)
51{ 51{
52 struct subchannel *sch = to_subchannel(cdev->dev.parent);
53 struct ccw_request *req = &cdev->private->req; 52 struct ccw_request *req = &cdev->private->req;
54 53
55 if (req->done) 54 if (req->done)
@@ -57,7 +56,6 @@ static void ccwreq_stop(struct ccw_device *cdev, int rc)
57 req->done = 1; 56 req->done = 1;
58 ccw_device_set_timeout(cdev, 0); 57 ccw_device_set_timeout(cdev, 0);
59 memset(&cdev->private->irb, 0, sizeof(struct irb)); 58 memset(&cdev->private->irb, 0, sizeof(struct irb));
60 sch->lpm = sch->schib.pmcw.pam;
61 if (rc && rc != -ENODEV && req->drc) 59 if (rc && rc != -ENODEV && req->drc)
62 rc = req->drc; 60 rc = req->drc;
63 req->callback(cdev, req->data, rc); 61 req->callback(cdev, req->data, rc);
@@ -80,7 +78,6 @@ static void ccwreq_do(struct ccw_device *cdev)
80 continue; 78 continue;
81 } 79 }
82 /* Perform start function. */ 80 /* Perform start function. */
83 sch->lpm = 0xff;
84 memset(&cdev->private->irb, 0, sizeof(struct irb)); 81 memset(&cdev->private->irb, 0, sizeof(struct irb));
85 rc = cio_start(sch, cp, (u8) req->mask); 82 rc = cio_start(sch, cp, (u8) req->mask);
86 if (rc == 0) { 83 if (rc == 0) {
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 73901c9e260f..a6c7d5426fb2 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1519,6 +1519,7 @@ static int ccw_device_console_enable(struct ccw_device *cdev,
1519 sch->driver = &io_subchannel_driver; 1519 sch->driver = &io_subchannel_driver;
1520 /* Initialize the ccw_device structure. */ 1520 /* Initialize the ccw_device structure. */
1521 cdev->dev.parent= &sch->dev; 1521 cdev->dev.parent= &sch->dev;
1522 sch_set_cdev(sch, cdev);
1522 io_subchannel_recog(cdev, sch); 1523 io_subchannel_recog(cdev, sch);
1523 /* Now wait for the async. recognition to come to an end. */ 1524 /* Now wait for the async. recognition to come to an end. */
1524 spin_lock_irq(cdev->ccwlock); 1525 spin_lock_irq(cdev->ccwlock);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index aad188e43b4f..6facb5499a65 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -142,7 +142,7 @@ static void spid_do(struct ccw_device *cdev)
142 u8 fn; 142 u8 fn;
143 143
144 /* Use next available path that is not already in correct state. */ 144 /* Use next available path that is not already in correct state. */
145 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & ~sch->vpm); 145 req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
146 if (!req->lpm) 146 if (!req->lpm)
147 goto out_nopath; 147 goto out_nopath;
148 /* Channel program setup. */ 148 /* Channel program setup. */
@@ -254,15 +254,15 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
254 *p = first; 254 *p = first;
255} 255}
256 256
257static u8 pgid_to_vpm(struct ccw_device *cdev) 257static u8 pgid_to_donepm(struct ccw_device *cdev)
258{ 258{
259 struct subchannel *sch = to_subchannel(cdev->dev.parent); 259 struct subchannel *sch = to_subchannel(cdev->dev.parent);
260 struct pgid *pgid; 260 struct pgid *pgid;
261 int i; 261 int i;
262 int lpm; 262 int lpm;
263 u8 vpm = 0; 263 u8 donepm = 0;
264 264
265 /* Set VPM bits for paths which are already in the target state. */ 265 /* Set bits for paths which are already in the target state. */
266 for (i = 0; i < 8; i++) { 266 for (i = 0; i < 8; i++) {
267 lpm = 0x80 >> i; 267 lpm = 0x80 >> i;
268 if ((cdev->private->pgid_valid_mask & lpm) == 0) 268 if ((cdev->private->pgid_valid_mask & lpm) == 0)
@@ -282,10 +282,10 @@ static u8 pgid_to_vpm(struct ccw_device *cdev)
282 if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH) 282 if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
283 continue; 283 continue;
284 } 284 }
285 vpm |= lpm; 285 donepm |= lpm;
286 } 286 }
287 287
288 return vpm; 288 return donepm;
289} 289}
290 290
291static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid) 291static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
@@ -307,6 +307,7 @@ static void snid_done(struct ccw_device *cdev, int rc)
307 int mismatch = 0; 307 int mismatch = 0;
308 int reserved = 0; 308 int reserved = 0;
309 int reset = 0; 309 int reset = 0;
310 u8 donepm;
310 311
311 if (rc) 312 if (rc)
312 goto out; 313 goto out;
@@ -316,18 +317,20 @@ static void snid_done(struct ccw_device *cdev, int rc)
316 else if (mismatch) 317 else if (mismatch)
317 rc = -EOPNOTSUPP; 318 rc = -EOPNOTSUPP;
318 else { 319 else {
319 sch->vpm = pgid_to_vpm(cdev); 320 donepm = pgid_to_donepm(cdev);
321 sch->vpm = donepm & sch->opm;
322 cdev->private->pgid_todo_mask &= ~donepm;
320 pgid_fill(cdev, pgid); 323 pgid_fill(cdev, pgid);
321 } 324 }
322out: 325out:
323 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x " 326 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
324 "mism=%d rsvd=%d reset=%d\n", id->ssid, id->devno, rc, 327 "todo=%02x mism=%d rsvd=%d reset=%d\n", id->ssid,
325 cdev->private->pgid_valid_mask, sch->vpm, mismatch, 328 id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
326 reserved, reset); 329 cdev->private->pgid_todo_mask, mismatch, reserved, reset);
327 switch (rc) { 330 switch (rc) {
328 case 0: 331 case 0:
329 /* Anything left to do? */ 332 /* Anything left to do? */
330 if (sch->vpm == sch->schib.pmcw.pam) { 333 if (cdev->private->pgid_todo_mask == 0) {
331 verify_done(cdev, sch->vpm == 0 ? -EACCES : 0); 334 verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
332 return; 335 return;
333 } 336 }
@@ -411,6 +414,7 @@ static void verify_start(struct ccw_device *cdev)
411 struct ccw_dev_id *devid = &cdev->private->dev_id; 414 struct ccw_dev_id *devid = &cdev->private->dev_id;
412 415
413 sch->vpm = 0; 416 sch->vpm = 0;
417 sch->lpm = sch->schib.pmcw.pam;
414 /* Initialize request data. */ 418 /* Initialize request data. */
415 memset(req, 0, sizeof(*req)); 419 memset(req, 0, sizeof(*req));
416 req->timeout = PGID_TIMEOUT; 420 req->timeout = PGID_TIMEOUT;
@@ -442,11 +446,14 @@ static void verify_start(struct ccw_device *cdev)
442 */ 446 */
443void ccw_device_verify_start(struct ccw_device *cdev) 447void ccw_device_verify_start(struct ccw_device *cdev)
444{ 448{
449 struct subchannel *sch = to_subchannel(cdev->dev.parent);
450
445 CIO_TRACE_EVENT(4, "vrfy"); 451 CIO_TRACE_EVENT(4, "vrfy");
446 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); 452 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
447 /* Initialize PGID data. */ 453 /* Initialize PGID data. */
448 memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid)); 454 memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
449 cdev->private->pgid_valid_mask = 0; 455 cdev->private->pgid_valid_mask = 0;
456 cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
450 /* 457 /*
451 * Initialize pathgroup and multipath state with target values. 458 * Initialize pathgroup and multipath state with target values.
452 * They may change in the course of path verification. 459 * They may change in the course of path verification.
diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c
index 61677dfbdc9b..ca5e9bb9d458 100644
--- a/drivers/s390/cio/fcx.c
+++ b/drivers/s390/cio/fcx.c
@@ -163,7 +163,7 @@ void tcw_finalize(struct tcw *tcw, int num_tidaws)
163 /* Add tcat to tccb. */ 163 /* Add tcat to tccb. */
164 tccb = tcw_get_tccb(tcw); 164 tccb = tcw_get_tccb(tcw);
165 tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)]; 165 tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)];
166 memset(tcat, 0, sizeof(tcat)); 166 memset(tcat, 0, sizeof(*tcat));
167 /* Calculate tcw input/output count and tcat transport count. */ 167 /* Calculate tcw input/output count and tcat transport count. */
168 count = calc_dcw_count(tccb); 168 count = calc_dcw_count(tccb);
169 if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA)) 169 if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA))
@@ -269,7 +269,7 @@ EXPORT_SYMBOL(tccb_init);
269 */ 269 */
270void tsb_init(struct tsb *tsb) 270void tsb_init(struct tsb *tsb)
271{ 271{
272 memset(tsb, 0, sizeof(tsb)); 272 memset(tsb, 0, sizeof(*tsb));
273} 273}
274EXPORT_SYMBOL(tsb_init); 274EXPORT_SYMBOL(tsb_init);
275 275
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index d72ae4c93af9..b9ce712a7f25 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -150,6 +150,7 @@ struct ccw_device_private {
150 struct ccw_request req; /* internal I/O request */ 150 struct ccw_request req; /* internal I/O request */
151 int iretry; 151 int iretry;
152 u8 pgid_valid_mask; /* mask of valid PGIDs */ 152 u8 pgid_valid_mask; /* mask of valid PGIDs */
153 u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */
153 struct { 154 struct {
154 unsigned int fast:1; /* post with "channel end" */ 155 unsigned int fast:1; /* post with "channel end" */
155 unsigned int repall:1; /* report every interrupt status */ 156 unsigned int repall:1; /* report every interrupt status */
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 4be6e84b9599..b2275c5000e7 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -486,7 +486,8 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
486 case SLSB_P_INPUT_PRIMED: 486 case SLSB_P_INPUT_PRIMED:
487 inbound_primed(q, count); 487 inbound_primed(q, count);
488 q->first_to_check = add_buf(q->first_to_check, count); 488 q->first_to_check = add_buf(q->first_to_check, count);
489 atomic_sub(count, &q->nr_buf_used); 489 if (atomic_sub(count, &q->nr_buf_used) == 0)
490 qdio_perf_stat_inc(&perf_stats.inbound_queue_full);
490 break; 491 break;
491 case SLSB_P_INPUT_ERROR: 492 case SLSB_P_INPUT_ERROR:
492 announce_buffer_error(q, count); 493 announce_buffer_error(q, count);
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
index 968e3c7c2632..54f7c325a3e6 100644
--- a/drivers/s390/cio/qdio_perf.c
+++ b/drivers/s390/cio/qdio_perf.c
@@ -64,6 +64,8 @@ static int qdio_perf_proc_show(struct seq_file *m, void *v)
64 (long)atomic_long_read(&perf_stats.fast_requeue)); 64 (long)atomic_long_read(&perf_stats.fast_requeue));
65 seq_printf(m, "Number of outbound target full condition\t: %li\n", 65 seq_printf(m, "Number of outbound target full condition\t: %li\n",
66 (long)atomic_long_read(&perf_stats.outbound_target_full)); 66 (long)atomic_long_read(&perf_stats.outbound_target_full));
67 seq_printf(m, "Number of inbound queue full condition\t\t: %li\n",
68 (long)atomic_long_read(&perf_stats.inbound_queue_full));
67 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n", 69 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
68 (long)atomic_long_read(&perf_stats.debug_tl_out_timer)); 70 (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
69 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n", 71 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
index ff4504ce1e3c..12454231dc8b 100644
--- a/drivers/s390/cio/qdio_perf.h
+++ b/drivers/s390/cio/qdio_perf.h
@@ -36,6 +36,7 @@ struct qdio_perf_stats {
36 atomic_long_t outbound_handler; 36 atomic_long_t outbound_handler;
37 atomic_long_t fast_requeue; 37 atomic_long_t fast_requeue;
38 atomic_long_t outbound_target_full; 38 atomic_long_t outbound_target_full;
39 atomic_long_t inbound_queue_full;
39 40
40 /* for debugging */ 41 /* for debugging */
41 atomic_long_t debug_tl_out_timer; 42 atomic_long_t debug_tl_out_timer;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 18d54fc21ce9..8c2dea5fa2b4 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -48,7 +48,6 @@ static void set_impl_params(struct qdio_irq *irq_ptr,
48 if (!irq_ptr) 48 if (!irq_ptr)
49 return; 49 return;
50 50
51 WARN_ON((unsigned long)&irq_ptr->qib & 0xff);
52 irq_ptr->qib.pfmt = qib_param_field_format; 51 irq_ptr->qib.pfmt = qib_param_field_format;
53 if (qib_param_field) 52 if (qib_param_field)
54 memcpy(irq_ptr->qib.parm, qib_param_field, 53 memcpy(irq_ptr->qib.parm, qib_param_field,
@@ -82,14 +81,12 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
82 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); 81 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
83 if (!q) 82 if (!q)
84 return -ENOMEM; 83 return -ENOMEM;
85 WARN_ON((unsigned long)q & 0xff);
86 84
87 q->slib = (struct slib *) __get_free_page(GFP_KERNEL); 85 q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
88 if (!q->slib) { 86 if (!q->slib) {
89 kmem_cache_free(qdio_q_cache, q); 87 kmem_cache_free(qdio_q_cache, q);
90 return -ENOMEM; 88 return -ENOMEM;
91 } 89 }
92 WARN_ON((unsigned long)q->slib & 0x7ff);
93 irq_ptr_qs[i] = q; 90 irq_ptr_qs[i] = q;
94 } 91 }
95 return 0; 92 return 0;
@@ -131,7 +128,7 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
131 /* fill in sbal */ 128 /* fill in sbal */
132 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) { 129 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) {
133 q->sbal[j] = *sbals_array++; 130 q->sbal[j] = *sbals_array++;
134 WARN_ON((unsigned long)q->sbal[j] & 0xff); 131 BUG_ON((unsigned long)q->sbal[j] & 0xff);
135 } 132 }
136 133
137 /* fill in slib */ 134 /* fill in slib */
@@ -147,11 +144,6 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
147 /* fill in sl */ 144 /* fill in sl */
148 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 145 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
149 q->sl->element[j].sbal = (unsigned long)q->sbal[j]; 146 q->sl->element[j].sbal = (unsigned long)q->sbal[j];
150
151 DBF_EVENT("sl-slsb-sbal");
152 DBF_HEX(q->sl, sizeof(void *));
153 DBF_HEX(&q->slsb, sizeof(void *));
154 DBF_HEX(q->sbal, sizeof(void *));
155} 147}
156 148
157static void setup_queues(struct qdio_irq *irq_ptr, 149static void setup_queues(struct qdio_irq *irq_ptr,
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 3bf75924741f..84d3bbaa95e7 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -76,6 +76,7 @@
76 Fix bug in twa_get_param() on 4GB+. 76 Fix bug in twa_get_param() on 4GB+.
77 Use pci_resource_len() for ioremap(). 77 Use pci_resource_len() for ioremap().
78 2.26.02.012 - Add power management support. 78 2.26.02.012 - Add power management support.
79 2.26.02.013 - Fix bug in twa_load_sgl().
79*/ 80*/
80 81
81#include <linux/module.h> 82#include <linux/module.h>
@@ -100,7 +101,7 @@
100#include "3w-9xxx.h" 101#include "3w-9xxx.h"
101 102
102/* Globals */ 103/* Globals */
103#define TW_DRIVER_VERSION "2.26.02.012" 104#define TW_DRIVER_VERSION "2.26.02.013"
104static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 105static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
105static unsigned int twa_device_extension_count; 106static unsigned int twa_device_extension_count;
106static int twa_major = -1; 107static int twa_major = -1;
@@ -1382,10 +1383,12 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
1382 newcommand = &full_command_packet->command.newcommand; 1383 newcommand = &full_command_packet->command.newcommand;
1383 newcommand->request_id__lunl = 1384 newcommand->request_id__lunl =
1384 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id)); 1385 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1385 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); 1386 if (length) {
1386 newcommand->sg_list[0].length = cpu_to_le32(length); 1387 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1388 newcommand->sg_list[0].length = cpu_to_le32(length);
1389 }
1387 newcommand->sgl_entries__lunh = 1390 newcommand->sgl_entries__lunh =
1388 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), 1)); 1391 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1389 } else { 1392 } else {
1390 oldcommand = &full_command_packet->command.oldcommand; 1393 oldcommand = &full_command_packet->command.oldcommand;
1391 oldcommand->request_id = request_id; 1394 oldcommand->request_id = request_id;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 36900c71a592..9191d1ea6451 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -388,6 +388,16 @@ config BLK_DEV_3W_XXXX_RAID
388 Please read the comments at the top of 388 Please read the comments at the top of
389 <file:drivers/scsi/3w-xxxx.c>. 389 <file:drivers/scsi/3w-xxxx.c>.
390 390
391config SCSI_HPSA
392 tristate "HP Smart Array SCSI driver"
393 depends on PCI && SCSI
394 help
395 This driver supports HP Smart Array Controllers (circa 2009).
396 It is a SCSI alternative to the cciss driver, which is a block
397 driver. Anyone wishing to use HP Smart Array controllers who
398 would prefer the devices be presented to linux as SCSI devices,
399 rather than as generic block devices should say Y here.
400
391config SCSI_3W_9XXX 401config SCSI_3W_9XXX
392 tristate "3ware 9xxx SATA-RAID support" 402 tristate "3ware 9xxx SATA-RAID support"
393 depends on PCI && SCSI 403 depends on PCI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 280d3c657d60..92a8c500b23d 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -91,6 +91,7 @@ obj-$(CONFIG_SCSI_BFA_FC) += bfa/
91obj-$(CONFIG_SCSI_PAS16) += pas16.o 91obj-$(CONFIG_SCSI_PAS16) += pas16.o
92obj-$(CONFIG_SCSI_T128) += t128.o 92obj-$(CONFIG_SCSI_T128) += t128.o
93obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o 93obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
94obj-$(CONFIG_SCSI_HPSA) += hpsa.o
94obj-$(CONFIG_SCSI_DTC3280) += dtc.o 95obj-$(CONFIG_SCSI_DTC3280) += dtc.o
95obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/ 96obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
96obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o 97obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 698a527d6cca..f008708f1b08 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -135,11 +135,15 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
135 while ((compl = be_mcc_compl_get(phba))) { 135 while ((compl = be_mcc_compl_get(phba))) {
136 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 136 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
137 /* Interpret flags as an async trailer */ 137 /* Interpret flags as an async trailer */
138 BUG_ON(!is_link_state_evt(compl->flags)); 138 if (is_link_state_evt(compl->flags))
139 /* Interpret compl as a async link evt */
140 beiscsi_async_link_state_process(phba,
141 (struct be_async_event_link_state *) compl);
142 else
143 SE_DEBUG(DBG_LVL_1,
144 " Unsupported Async Event, flags"
145 " = 0x%08x \n", compl->flags);
139 146
140 /* Interpret compl as a async link evt */
141 beiscsi_async_link_state_process(phba,
142 (struct be_async_event_link_state *) compl);
143 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 147 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
144 status = be_mcc_compl_process(ctrl, compl); 148 status = be_mcc_compl_process(ctrl, compl);
145 atomic_dec(&phba->ctrl.mcc_obj.q.used); 149 atomic_dec(&phba->ctrl.mcc_obj.q.used);
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 2b973f3c2eb2..6cf9dc37d78b 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -684,6 +684,7 @@ extern unsigned int error_mask1, error_mask2;
684extern u64 iscsi_error_mask; 684extern u64 iscsi_error_mask;
685extern unsigned int en_tcp_dack; 685extern unsigned int en_tcp_dack;
686extern unsigned int event_coal_div; 686extern unsigned int event_coal_div;
687extern unsigned int event_coal_min;
687 688
688extern struct scsi_transport_template *bnx2i_scsi_xport_template; 689extern struct scsi_transport_template *bnx2i_scsi_xport_template;
689extern struct iscsi_transport bnx2i_iscsi_transport; 690extern struct iscsi_transport bnx2i_iscsi_transport;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 5c8d7630c13e..1af578dec276 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -133,20 +133,38 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
133{ 133{
134 struct bnx2i_5771x_cq_db *cq_db; 134 struct bnx2i_5771x_cq_db *cq_db;
135 u16 cq_index; 135 u16 cq_index;
136 u16 next_index;
137 u32 num_active_cmds;
136 138
139
140 /* Coalesce CQ entries only on 10G devices */
137 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) 141 if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
138 return; 142 return;
139 143
144 /* Do not update CQ DB multiple times before firmware writes
145 * '0xFFFF' to CQDB->SQN field. Deviation may cause spurious
146 * interrupts and other unwanted results
147 */
148 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
149 if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
150 return;
151
140 if (action == CNIC_ARM_CQE) { 152 if (action == CNIC_ARM_CQE) {
141 cq_index = ep->qp.cqe_exp_seq_sn + 153 num_active_cmds = ep->num_active_cmds;
142 ep->num_active_cmds / event_coal_div; 154 if (num_active_cmds <= event_coal_min)
143 cq_index %= (ep->qp.cqe_size * 2 + 1); 155 next_index = 1;
144 if (!cq_index) { 156 else
157 next_index = event_coal_min +
158 (num_active_cmds - event_coal_min) / event_coal_div;
159 if (!next_index)
160 next_index = 1;
161 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
162 if (cq_index > ep->qp.cqe_size * 2)
163 cq_index -= ep->qp.cqe_size * 2;
164 if (!cq_index)
145 cq_index = 1; 165 cq_index = 1;
146 cq_db = (struct bnx2i_5771x_cq_db *) 166
147 ep->qp.cq_pgtbl_virt; 167 cq_db->sqn[0] = cq_index;
148 cq_db->sqn[0] = cq_index;
149 }
150 } 168 }
151} 169}
152 170
@@ -366,6 +384,7 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
366 struct bnx2i_cmd *bnx2i_cmd; 384 struct bnx2i_cmd *bnx2i_cmd;
367 struct bnx2i_tmf_request *tmfabort_wqe; 385 struct bnx2i_tmf_request *tmfabort_wqe;
368 u32 dword; 386 u32 dword;
387 u32 scsi_lun[2];
369 388
370 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data; 389 bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
371 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr; 390 tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
@@ -376,27 +395,35 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
376 tmfabort_wqe->op_attr = 0; 395 tmfabort_wqe->op_attr = 0;
377 tmfabort_wqe->op_attr = 396 tmfabort_wqe->op_attr =
378 ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK; 397 ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
379 tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]);
380 tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]);
381 398
382 tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14)); 399 tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
383 tmfabort_wqe->reserved2 = 0; 400 tmfabort_wqe->reserved2 = 0;
384 tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn); 401 tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
385 402
386 ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt); 403 ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
387 if (!ctask || ctask->sc) 404 if (!ctask || !ctask->sc)
388 /* 405 /*
389 * the iscsi layer must have completed the cmd while this 406 * the iscsi layer must have completed the cmd while this
390 * was starting up. 407 * was starting up.
408 *
409 * Note: In the case of a SCSI cmd timeout, the task's sc
410 * is still active; hence ctask->sc != 0
411 * In this case, the task must be aborted
391 */ 412 */
392 return 0; 413 return 0;
414
393 ref_sc = ctask->sc; 415 ref_sc = ctask->sc;
394 416
417 /* Retrieve LUN directly from the ref_sc */
418 int_to_scsilun(ref_sc->device->lun, (struct scsi_lun *) scsi_lun);
419 tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]);
420 tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]);
421
395 if (ref_sc->sc_data_direction == DMA_TO_DEVICE) 422 if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
396 dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); 423 dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
397 else 424 else
398 dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); 425 dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
399 tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt); 426 tmfabort_wqe->ref_itt = (dword | (tmfabort_hdr->rtt & ISCSI_ITT_MASK));
400 tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn); 427 tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
401 428
402 tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; 429 tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 0c4210d48ee8..6d8172e781cf 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -17,8 +17,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
17static u32 adapter_count; 17static u32 adapter_count;
18 18
19#define DRV_MODULE_NAME "bnx2i" 19#define DRV_MODULE_NAME "bnx2i"
20#define DRV_MODULE_VERSION "2.0.1e" 20#define DRV_MODULE_VERSION "2.1.0"
21#define DRV_MODULE_RELDATE "June 22, 2009" 21#define DRV_MODULE_RELDATE "Dec 06, 2009"
22 22
23static char version[] __devinitdata = 23static char version[] __devinitdata =
24 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ 24 "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -32,6 +32,10 @@ MODULE_VERSION(DRV_MODULE_VERSION);
32 32
33static DEFINE_MUTEX(bnx2i_dev_lock); 33static DEFINE_MUTEX(bnx2i_dev_lock);
34 34
35unsigned int event_coal_min = 24;
36module_param(event_coal_min, int, 0664);
37MODULE_PARM_DESC(event_coal_min, "Event Coalescing Minimum Commands");
38
35unsigned int event_coal_div = 1; 39unsigned int event_coal_div = 1;
36module_param(event_coal_div, int, 0664); 40module_param(event_coal_div, int, 0664);
37MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor"); 41MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
@@ -83,8 +87,12 @@ void bnx2i_identify_device(struct bnx2i_hba *hba)
83 set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type); 87 set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
84 hba->mail_queue_access = BNX2I_MQ_BIN_MODE; 88 hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
85 } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 || 89 } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
86 hba->pci_did == PCI_DEVICE_ID_NX2_57711) 90 hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
91 hba->pci_did == PCI_DEVICE_ID_NX2_57711E)
87 set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type); 92 set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
93 else
94 printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
95 hba->pci_did);
88} 96}
89 97
90 98
@@ -363,7 +371,7 @@ static int __init bnx2i_mod_init(void)
363 371
364 printk(KERN_INFO "%s", version); 372 printk(KERN_INFO "%s", version);
365 373
366 if (!is_power_of_2(sq_size)) 374 if (sq_size && !is_power_of_2(sq_size))
367 sq_size = roundup_pow_of_two(sq_size); 375 sq_size = roundup_pow_of_two(sq_size);
368 376
369 mutex_init(&bnx2i_dev_lock); 377 mutex_init(&bnx2i_dev_lock);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 132898c88d5e..33b2294625bb 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -485,7 +485,6 @@ static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
485 struct iscsi_task *task = session->cmds[i]; 485 struct iscsi_task *task = session->cmds[i];
486 struct bnx2i_cmd *cmd = task->dd_data; 486 struct bnx2i_cmd *cmd = task->dd_data;
487 487
488 /* Anil */
489 task->hdr = &cmd->hdr; 488 task->hdr = &cmd->hdr;
490 task->hdr_max = sizeof(struct iscsi_hdr); 489 task->hdr_max = sizeof(struct iscsi_hdr);
491 490
@@ -765,7 +764,6 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
765 hba->pci_svid = hba->pcidev->subsystem_vendor; 764 hba->pci_svid = hba->pcidev->subsystem_vendor;
766 hba->pci_func = PCI_FUNC(hba->pcidev->devfn); 765 hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
767 hba->pci_devno = PCI_SLOT(hba->pcidev->devfn); 766 hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
768 bnx2i_identify_device(hba);
769 767
770 bnx2i_identify_device(hba); 768 bnx2i_identify_device(hba);
771 bnx2i_setup_host_queue_size(hba, shost); 769 bnx2i_setup_host_queue_size(hba, shost);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index c1d5be4adf9c..26ffdcd5a437 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -291,7 +291,7 @@ static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
291 c3cn_hold(c3cn); 291 c3cn_hold(c3cn);
292 spin_lock_bh(&c3cn->lock); 292 spin_lock_bh(&c3cn->lock);
293 if (c3cn->state == C3CN_STATE_CONNECTING) 293 if (c3cn->state == C3CN_STATE_CONNECTING)
294 fail_act_open(c3cn, EHOSTUNREACH); 294 fail_act_open(c3cn, -EHOSTUNREACH);
295 spin_unlock_bh(&c3cn->lock); 295 spin_unlock_bh(&c3cn->lock);
296 c3cn_put(c3cn); 296 c3cn_put(c3cn);
297 __kfree_skb(skb); 297 __kfree_skb(skb);
@@ -792,18 +792,18 @@ static int act_open_rpl_status_to_errno(int status)
792{ 792{
793 switch (status) { 793 switch (status) {
794 case CPL_ERR_CONN_RESET: 794 case CPL_ERR_CONN_RESET:
795 return ECONNREFUSED; 795 return -ECONNREFUSED;
796 case CPL_ERR_ARP_MISS: 796 case CPL_ERR_ARP_MISS:
797 return EHOSTUNREACH; 797 return -EHOSTUNREACH;
798 case CPL_ERR_CONN_TIMEDOUT: 798 case CPL_ERR_CONN_TIMEDOUT:
799 return ETIMEDOUT; 799 return -ETIMEDOUT;
800 case CPL_ERR_TCAM_FULL: 800 case CPL_ERR_TCAM_FULL:
801 return ENOMEM; 801 return -ENOMEM;
802 case CPL_ERR_CONN_EXIST: 802 case CPL_ERR_CONN_EXIST:
803 cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n"); 803 cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n");
804 return EADDRINUSE; 804 return -EADDRINUSE;
805 default: 805 default:
806 return EIO; 806 return -EIO;
807 } 807 }
808} 808}
809 809
@@ -817,7 +817,7 @@ static void act_open_retry_timer(unsigned long data)
817 spin_lock_bh(&c3cn->lock); 817 spin_lock_bh(&c3cn->lock);
818 skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC); 818 skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC);
819 if (!skb) 819 if (!skb)
820 fail_act_open(c3cn, ENOMEM); 820 fail_act_open(c3cn, -ENOMEM);
821 else { 821 else {
822 skb->sk = (struct sock *)c3cn; 822 skb->sk = (struct sock *)c3cn;
823 set_arp_failure_handler(skb, act_open_req_arp_failure); 823 set_arp_failure_handler(skb, act_open_req_arp_failure);
@@ -966,14 +966,14 @@ static int abort_status_to_errno(struct s3_conn *c3cn, int abort_reason,
966 case CPL_ERR_BAD_SYN: /* fall through */ 966 case CPL_ERR_BAD_SYN: /* fall through */
967 case CPL_ERR_CONN_RESET: 967 case CPL_ERR_CONN_RESET:
968 return c3cn->state > C3CN_STATE_ESTABLISHED ? 968 return c3cn->state > C3CN_STATE_ESTABLISHED ?
969 EPIPE : ECONNRESET; 969 -EPIPE : -ECONNRESET;
970 case CPL_ERR_XMIT_TIMEDOUT: 970 case CPL_ERR_XMIT_TIMEDOUT:
971 case CPL_ERR_PERSIST_TIMEDOUT: 971 case CPL_ERR_PERSIST_TIMEDOUT:
972 case CPL_ERR_FINWAIT2_TIMEDOUT: 972 case CPL_ERR_FINWAIT2_TIMEDOUT:
973 case CPL_ERR_KEEPALIVE_TIMEDOUT: 973 case CPL_ERR_KEEPALIVE_TIMEDOUT:
974 return ETIMEDOUT; 974 return -ETIMEDOUT;
975 default: 975 default:
976 return EIO; 976 return -EIO;
977 } 977 }
978} 978}
979 979
@@ -1563,7 +1563,7 @@ free_tid:
1563 s3_free_atid(cdev, c3cn->tid); 1563 s3_free_atid(cdev, c3cn->tid);
1564 c3cn->tid = 0; 1564 c3cn->tid = 0;
1565out_err: 1565out_err:
1566 return -1; 1566 return -EINVAL;
1567} 1567}
1568 1568
1569 1569
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
index 709105071177..1fe3b0f1f3c9 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_pdu.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
@@ -388,8 +388,8 @@ int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
388 if (err > 0) { 388 if (err > 0) {
389 int pdulen = err; 389 int pdulen = err;
390 390
391 cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n", 391 cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
392 task, skb, skb->len, skb->data_len, err); 392 task, skb, skb->len, skb->data_len, err);
393 393
394 if (task->conn->hdrdgst_en) 394 if (task->conn->hdrdgst_en)
395 pdulen += ISCSI_DIGEST_SIZE; 395 pdulen += ISCSI_DIGEST_SIZE;
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 47cfe1c49c3e..1a660191a905 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -748,6 +748,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
748 {"IBM", "1724"}, 748 {"IBM", "1724"},
749 {"IBM", "1726"}, 749 {"IBM", "1726"},
750 {"IBM", "1742"}, 750 {"IBM", "1742"},
751 {"IBM", "1745"},
752 {"IBM", "1746"},
751 {"IBM", "1814"}, 753 {"IBM", "1814"},
752 {"IBM", "1815"}, 754 {"IBM", "1815"},
753 {"IBM", "1818"}, 755 {"IBM", "1818"},
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index a30ffaa1222c..10be9f36a4cc 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -101,6 +101,8 @@ static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
101 101
102static int fcoe_create(const char *, struct kernel_param *); 102static int fcoe_create(const char *, struct kernel_param *);
103static int fcoe_destroy(const char *, struct kernel_param *); 103static int fcoe_destroy(const char *, struct kernel_param *);
104static int fcoe_enable(const char *, struct kernel_param *);
105static int fcoe_disable(const char *, struct kernel_param *);
104 106
105static struct fc_seq *fcoe_elsct_send(struct fc_lport *, 107static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
106 u32 did, struct fc_frame *, 108 u32 did, struct fc_frame *,
@@ -115,10 +117,16 @@ static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
115 117
116module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR); 118module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
117__MODULE_PARM_TYPE(create, "string"); 119__MODULE_PARM_TYPE(create, "string");
118MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in."); 120MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
119module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR); 121module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
120__MODULE_PARM_TYPE(destroy, "string"); 122__MODULE_PARM_TYPE(destroy, "string");
121MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe"); 123MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
124module_param_call(enable, fcoe_enable, NULL, NULL, S_IWUSR);
125__MODULE_PARM_TYPE(enable, "string");
126MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
127module_param_call(disable, fcoe_disable, NULL, NULL, S_IWUSR);
128__MODULE_PARM_TYPE(disable, "string");
129MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
122 130
123/* notification function for packets from net device */ 131/* notification function for packets from net device */
124static struct notifier_block fcoe_notifier = { 132static struct notifier_block fcoe_notifier = {
@@ -545,6 +553,23 @@ static void fcoe_queue_timer(ulong lport)
545} 553}
546 554
547/** 555/**
556 * fcoe_get_wwn() - Get the world wide name from LLD if it supports it
557 * @netdev: the associated net device
558 * @wwn: the output WWN
559 * @type: the type of WWN (WWPN or WWNN)
560 *
561 * Returns: 0 for success
562 */
563static int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
564{
565 const struct net_device_ops *ops = netdev->netdev_ops;
566
567 if (ops->ndo_fcoe_get_wwn)
568 return ops->ndo_fcoe_get_wwn(netdev, wwn, type);
569 return -EINVAL;
570}
571
572/**
548 * fcoe_netdev_config() - Set up net devive for SW FCoE 573 * fcoe_netdev_config() - Set up net devive for SW FCoE
549 * @lport: The local port that is associated with the net device 574 * @lport: The local port that is associated with the net device
550 * @netdev: The associated net device 575 * @netdev: The associated net device
@@ -611,9 +636,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
611 */ 636 */
612 if (netdev->priv_flags & IFF_802_1Q_VLAN) 637 if (netdev->priv_flags & IFF_802_1Q_VLAN)
613 vid = vlan_dev_vlan_id(netdev); 638 vid = vlan_dev_vlan_id(netdev);
614 wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0); 639
640 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
641 wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
615 fc_set_wwnn(lport, wwnn); 642 fc_set_wwnn(lport, wwnn);
616 wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 2, vid); 643 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
644 wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
645 2, vid);
617 fc_set_wwpn(lport, wwpn); 646 fc_set_wwpn(lport, wwpn);
618 } 647 }
619 648
@@ -1231,7 +1260,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1231 "CPU.\n"); 1260 "CPU.\n");
1232 1261
1233 spin_unlock_bh(&fps->fcoe_rx_list.lock); 1262 spin_unlock_bh(&fps->fcoe_rx_list.lock);
1234 cpu = first_cpu(cpu_online_map); 1263 cpu = cpumask_first(cpu_online_mask);
1235 fps = &per_cpu(fcoe_percpu, cpu); 1264 fps = &per_cpu(fcoe_percpu, cpu);
1236 spin_lock_bh(&fps->fcoe_rx_list.lock); 1265 spin_lock_bh(&fps->fcoe_rx_list.lock);
1237 if (!fps->thread) { 1266 if (!fps->thread) {
@@ -1838,6 +1867,104 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer)
1838} 1867}
1839 1868
1840/** 1869/**
1870 * fcoe_disable() - Disables a FCoE interface
1871 * @buffer: The name of the Ethernet interface to be disabled
1872 * @kp: The associated kernel parameter
1873 *
1874 * Called from sysfs.
1875 *
1876 * Returns: 0 for success
1877 */
1878static int fcoe_disable(const char *buffer, struct kernel_param *kp)
1879{
1880 struct fcoe_interface *fcoe;
1881 struct net_device *netdev;
1882 int rc = 0;
1883
1884 mutex_lock(&fcoe_config_mutex);
1885#ifdef CONFIG_FCOE_MODULE
1886 /*
1887 * Make sure the module has been initialized, and is not about to be
1888 * removed. Module paramter sysfs files are writable before the
1889 * module_init function is called and after module_exit.
1890 */
1891 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1892 rc = -ENODEV;
1893 goto out_nodev;
1894 }
1895#endif
1896
1897 netdev = fcoe_if_to_netdev(buffer);
1898 if (!netdev) {
1899 rc = -ENODEV;
1900 goto out_nodev;
1901 }
1902
1903 rtnl_lock();
1904 fcoe = fcoe_hostlist_lookup_port(netdev);
1905 rtnl_unlock();
1906
1907 if (fcoe)
1908 fc_fabric_logoff(fcoe->ctlr.lp);
1909 else
1910 rc = -ENODEV;
1911
1912 dev_put(netdev);
1913out_nodev:
1914 mutex_unlock(&fcoe_config_mutex);
1915 return rc;
1916}
1917
1918/**
1919 * fcoe_enable() - Enables a FCoE interface
1920 * @buffer: The name of the Ethernet interface to be enabled
1921 * @kp: The associated kernel parameter
1922 *
1923 * Called from sysfs.
1924 *
1925 * Returns: 0 for success
1926 */
1927static int fcoe_enable(const char *buffer, struct kernel_param *kp)
1928{
1929 struct fcoe_interface *fcoe;
1930 struct net_device *netdev;
1931 int rc = 0;
1932
1933 mutex_lock(&fcoe_config_mutex);
1934#ifdef CONFIG_FCOE_MODULE
1935 /*
1936 * Make sure the module has been initialized, and is not about to be
1937 * removed. Module paramter sysfs files are writable before the
1938 * module_init function is called and after module_exit.
1939 */
1940 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1941 rc = -ENODEV;
1942 goto out_nodev;
1943 }
1944#endif
1945
1946 netdev = fcoe_if_to_netdev(buffer);
1947 if (!netdev) {
1948 rc = -ENODEV;
1949 goto out_nodev;
1950 }
1951
1952 rtnl_lock();
1953 fcoe = fcoe_hostlist_lookup_port(netdev);
1954 rtnl_unlock();
1955
1956 if (fcoe)
1957 rc = fc_fabric_login(fcoe->ctlr.lp);
1958 else
1959 rc = -ENODEV;
1960
1961 dev_put(netdev);
1962out_nodev:
1963 mutex_unlock(&fcoe_config_mutex);
1964 return rc;
1965}
1966
1967/**
1841 * fcoe_destroy() - Destroy a FCoE interface 1968 * fcoe_destroy() - Destroy a FCoE interface
1842 * @buffer: The name of the Ethernet interface to be destroyed 1969 * @buffer: The name of the Ethernet interface to be destroyed
1843 * @kp: The associated kernel parameter 1970 * @kp: The associated kernel parameter
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
new file mode 100644
index 000000000000..bb96fdd58e23
--- /dev/null
+++ b/drivers/scsi/hpsa.c
@@ -0,0 +1,3531 @@
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/delay.h>
29#include <linux/fs.h>
30#include <linux/timer.h>
31#include <linux/seq_file.h>
32#include <linux/init.h>
33#include <linux/spinlock.h>
34#include <linux/smp_lock.h>
35#include <linux/compat.h>
36#include <linux/blktrace_api.h>
37#include <linux/uaccess.h>
38#include <linux/io.h>
39#include <linux/dma-mapping.h>
40#include <linux/completion.h>
41#include <linux/moduleparam.h>
42#include <scsi/scsi.h>
43#include <scsi/scsi_cmnd.h>
44#include <scsi/scsi_device.h>
45#include <scsi/scsi_host.h>
46#include <linux/cciss_ioctl.h>
47#include <linux/string.h>
48#include <linux/bitmap.h>
49#include <asm/atomic.h>
50#include <linux/kthread.h>
51#include "hpsa_cmd.h"
52#include "hpsa.h"
53
54/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
55#define HPSA_DRIVER_VERSION "1.0.0"
56#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
57
58/* How long to wait (in milliseconds) for board to go into simple mode */
59#define MAX_CONFIG_WAIT 30000
60#define MAX_IOCTL_CONFIG_WAIT 1000
61
62/*define how many times we will try a command because of bus resets */
63#define MAX_CMD_RETRIES 3
64
65/* Embedded module documentation macros - see modules.h */
66MODULE_AUTHOR("Hewlett-Packard Company");
67MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
68 HPSA_DRIVER_VERSION);
69MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
70MODULE_VERSION(HPSA_DRIVER_VERSION);
71MODULE_LICENSE("GPL");
72
73static int hpsa_allow_any;
74module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
75MODULE_PARM_DESC(hpsa_allow_any,
76 "Allow hpsa driver to access unknown HP Smart Array hardware");
77
78/* define the PCI info for the cards we can control */
79static const struct pci_device_id hpsa_pci_device_id[] = {
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
90 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
91 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
92 {0,}
93};
94
95MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
96
97/* board_id = Subsystem Device ID & Vendor ID
98 * product = Marketing Name for the board
99 * access = Address of the struct of function pointers
100 */
101static struct board_type products[] = {
102 {0x3223103C, "Smart Array P800", &SA5_access},
103 {0x3234103C, "Smart Array P400", &SA5_access},
104 {0x323d103c, "Smart Array P700M", &SA5_access},
105 {0x3241103C, "Smart Array P212", &SA5_access},
106 {0x3243103C, "Smart Array P410", &SA5_access},
107 {0x3245103C, "Smart Array P410i", &SA5_access},
108 {0x3247103C, "Smart Array P411", &SA5_access},
109 {0x3249103C, "Smart Array P812", &SA5_access},
110 {0x324a103C, "Smart Array P712m", &SA5_access},
111 {0x324b103C, "Smart Array P711m", &SA5_access},
112 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
113};
114
115static int number_of_controllers;
116
117static irqreturn_t do_hpsa_intr(int irq, void *dev_id);
118static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
119static void start_io(struct ctlr_info *h);
120
121#ifdef CONFIG_COMPAT
122static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
123#endif
124
125static void cmd_free(struct ctlr_info *h, struct CommandList *c);
126static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
127static struct CommandList *cmd_alloc(struct ctlr_info *h);
128static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
129static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h,
130 void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr,
131 int cmd_type);
132
133static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
134 void (*done)(struct scsi_cmnd *));
135
136static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
137static int hpsa_slave_alloc(struct scsi_device *sdev);
138static void hpsa_slave_destroy(struct scsi_device *sdev);
139
140static ssize_t raid_level_show(struct device *dev,
141 struct device_attribute *attr, char *buf);
142static ssize_t lunid_show(struct device *dev,
143 struct device_attribute *attr, char *buf);
144static ssize_t unique_id_show(struct device *dev,
145 struct device_attribute *attr, char *buf);
146static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
147static ssize_t host_store_rescan(struct device *dev,
148 struct device_attribute *attr, const char *buf, size_t count);
149static int check_for_unit_attention(struct ctlr_info *h,
150 struct CommandList *c);
151static void check_ioctl_unit_attention(struct ctlr_info *h,
152 struct CommandList *c);
153
154static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
155static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
156static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
157static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
158
159static struct device_attribute *hpsa_sdev_attrs[] = {
160 &dev_attr_raid_level,
161 &dev_attr_lunid,
162 &dev_attr_unique_id,
163 NULL,
164};
165
166static struct device_attribute *hpsa_shost_attrs[] = {
167 &dev_attr_rescan,
168 NULL,
169};
170
171static struct scsi_host_template hpsa_driver_template = {
172 .module = THIS_MODULE,
173 .name = "hpsa",
174 .proc_name = "hpsa",
175 .queuecommand = hpsa_scsi_queue_command,
176 .can_queue = 512,
177 .this_id = -1,
178 .sg_tablesize = MAXSGENTRIES,
179 .cmd_per_lun = 512,
180 .use_clustering = ENABLE_CLUSTERING,
181 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
182 .ioctl = hpsa_ioctl,
183 .slave_alloc = hpsa_slave_alloc,
184 .slave_destroy = hpsa_slave_destroy,
185#ifdef CONFIG_COMPAT
186 .compat_ioctl = hpsa_compat_ioctl,
187#endif
188 .sdev_attrs = hpsa_sdev_attrs,
189 .shost_attrs = hpsa_shost_attrs,
190};
191
192static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
193{
194 unsigned long *priv = shost_priv(sdev->host);
195 return (struct ctlr_info *) *priv;
196}
197
198static struct task_struct *hpsa_scan_thread;
199static DEFINE_MUTEX(hpsa_scan_mutex);
200static LIST_HEAD(hpsa_scan_q);
201static int hpsa_scan_func(void *data);
202
203/**
204 * add_to_scan_list() - add controller to rescan queue
205 * @h: Pointer to the controller.
206 *
207 * Adds the controller to the rescan queue if not already on the queue.
208 *
209 * returns 1 if added to the queue, 0 if skipped (could be on the
210 * queue already, or the controller could be initializing or shutting
211 * down).
212 **/
213static int add_to_scan_list(struct ctlr_info *h)
214{
215 struct ctlr_info *test_h;
216 int found = 0;
217 int ret = 0;
218
219 if (h->busy_initializing)
220 return 0;
221
222 /*
223 * If we don't get the lock, it means the driver is unloading
224 * and there's no point in scheduling a new scan.
225 */
226 if (!mutex_trylock(&h->busy_shutting_down))
227 return 0;
228
229 mutex_lock(&hpsa_scan_mutex);
230 list_for_each_entry(test_h, &hpsa_scan_q, scan_list) {
231 if (test_h == h) {
232 found = 1;
233 break;
234 }
235 }
236 if (!found && !h->busy_scanning) {
237 INIT_COMPLETION(h->scan_wait);
238 list_add_tail(&h->scan_list, &hpsa_scan_q);
239 ret = 1;
240 }
241 mutex_unlock(&hpsa_scan_mutex);
242 mutex_unlock(&h->busy_shutting_down);
243
244 return ret;
245}
246
247/**
248 * remove_from_scan_list() - remove controller from rescan queue
249 * @h: Pointer to the controller.
250 *
251 * Removes the controller from the rescan queue if present. Blocks if
252 * the controller is currently conducting a rescan. The controller
253 * can be in one of three states:
254 * 1. Doesn't need a scan
255 * 2. On the scan list, but not scanning yet (we remove it)
256 * 3. Busy scanning (and not on the list). In this case we want to wait for
257 * the scan to complete to make sure the scanning thread for this
258 * controller is completely idle.
259 **/
260static void remove_from_scan_list(struct ctlr_info *h)
261{
262 struct ctlr_info *test_h, *tmp_h;
263
264 mutex_lock(&hpsa_scan_mutex);
265 list_for_each_entry_safe(test_h, tmp_h, &hpsa_scan_q, scan_list) {
266 if (test_h == h) { /* state 2. */
267 list_del(&h->scan_list);
268 complete_all(&h->scan_wait);
269 mutex_unlock(&hpsa_scan_mutex);
270 return;
271 }
272 }
273 if (h->busy_scanning) { /* state 3. */
274 mutex_unlock(&hpsa_scan_mutex);
275 wait_for_completion(&h->scan_wait);
276 } else { /* state 1, nothing to do. */
277 mutex_unlock(&hpsa_scan_mutex);
278 }
279}
280
281/* hpsa_scan_func() - kernel thread used to rescan controllers
282 * @data: Ignored.
283 *
284 * A kernel thread used scan for drive topology changes on
285 * controllers. The thread processes only one controller at a time
286 * using a queue. Controllers are added to the queue using
287 * add_to_scan_list() and removed from the queue either after done
288 * processing or using remove_from_scan_list().
289 *
290 * returns 0.
291 **/
292static int hpsa_scan_func(__attribute__((unused)) void *data)
293{
294 struct ctlr_info *h;
295 int host_no;
296
297 while (1) {
298 set_current_state(TASK_INTERRUPTIBLE);
299 schedule();
300 if (kthread_should_stop())
301 break;
302
303 while (1) {
304 mutex_lock(&hpsa_scan_mutex);
305 if (list_empty(&hpsa_scan_q)) {
306 mutex_unlock(&hpsa_scan_mutex);
307 break;
308 }
309 h = list_entry(hpsa_scan_q.next, struct ctlr_info,
310 scan_list);
311 list_del(&h->scan_list);
312 h->busy_scanning = 1;
313 mutex_unlock(&hpsa_scan_mutex);
314 host_no = h->scsi_host ? h->scsi_host->host_no : -1;
315 hpsa_update_scsi_devices(h, host_no);
316 complete_all(&h->scan_wait);
317 mutex_lock(&hpsa_scan_mutex);
318 h->busy_scanning = 0;
319 mutex_unlock(&hpsa_scan_mutex);
320 }
321 }
322 return 0;
323}
324
325static int check_for_unit_attention(struct ctlr_info *h,
326 struct CommandList *c)
327{
328 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
329 return 0;
330
331 switch (c->err_info->SenseInfo[12]) {
332 case STATE_CHANGED:
333 dev_warn(&h->pdev->dev, "hpsa%d: a state change "
334 "detected, command retried\n", h->ctlr);
335 break;
336 case LUN_FAILED:
337 dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
338 "detected, action required\n", h->ctlr);
339 break;
340 case REPORT_LUNS_CHANGED:
341 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
342 "changed\n", h->ctlr);
343 /*
344 * Here, we could call add_to_scan_list and wake up the scan thread,
345 * except that it's quite likely that we will get more than one
346 * REPORT_LUNS_CHANGED condition in quick succession, which means
347 * that those which occur after the first one will likely happen
348 * *during* the hpsa_scan_thread's rescan. And the rescan code is not
349 * robust enough to restart in the middle, undoing what it has already
350 * done, and it's not clear that it's even possible to do this, since
351 * part of what it does is notify the SCSI mid layer, which starts
352 * doing it's own i/o to read partition tables and so on, and the
353 * driver doesn't have visibility to know what might need undoing.
354 * In any event, if possible, it is horribly complicated to get right
355 * so we just don't do it for now.
356 *
357 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
358 */
359 break;
360 case POWER_OR_RESET:
361 dev_warn(&h->pdev->dev, "hpsa%d: a power on "
362 "or device reset detected\n", h->ctlr);
363 break;
364 case UNIT_ATTENTION_CLEARED:
365 dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
366 "cleared by another initiator\n", h->ctlr);
367 break;
368 default:
369 dev_warn(&h->pdev->dev, "hpsa%d: unknown "
370 "unit attention detected\n", h->ctlr);
371 break;
372 }
373 return 1;
374}
375
376static ssize_t host_store_rescan(struct device *dev,
377 struct device_attribute *attr,
378 const char *buf, size_t count)
379{
380 struct ctlr_info *h;
381 struct Scsi_Host *shost = class_to_shost(dev);
382 unsigned long *priv = shost_priv(shost);
383 h = (struct ctlr_info *) *priv;
384 if (add_to_scan_list(h)) {
385 wake_up_process(hpsa_scan_thread);
386 wait_for_completion_interruptible(&h->scan_wait);
387 }
388 return count;
389}
390
391/* Enqueuing and dequeuing functions for cmdlists. */
392static inline void addQ(struct hlist_head *list, struct CommandList *c)
393{
394 hlist_add_head(&c->list, list);
395}
396
397static void enqueue_cmd_and_start_io(struct ctlr_info *h,
398 struct CommandList *c)
399{
400 unsigned long flags;
401 spin_lock_irqsave(&h->lock, flags);
402 addQ(&h->reqQ, c);
403 h->Qdepth++;
404 start_io(h);
405 spin_unlock_irqrestore(&h->lock, flags);
406}
407
408static inline void removeQ(struct CommandList *c)
409{
410 if (WARN_ON(hlist_unhashed(&c->list)))
411 return;
412 hlist_del_init(&c->list);
413}
414
415static inline int is_hba_lunid(unsigned char scsi3addr[])
416{
417 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
418}
419
420static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
421{
422 return (scsi3addr[3] & 0xC0) == 0x40;
423}
424
425static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
426 "UNKNOWN"
427};
428#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
429
430static ssize_t raid_level_show(struct device *dev,
431 struct device_attribute *attr, char *buf)
432{
433 ssize_t l = 0;
434 int rlevel;
435 struct ctlr_info *h;
436 struct scsi_device *sdev;
437 struct hpsa_scsi_dev_t *hdev;
438 unsigned long flags;
439
440 sdev = to_scsi_device(dev);
441 h = sdev_to_hba(sdev);
442 spin_lock_irqsave(&h->lock, flags);
443 hdev = sdev->hostdata;
444 if (!hdev) {
445 spin_unlock_irqrestore(&h->lock, flags);
446 return -ENODEV;
447 }
448
449 /* Is this even a logical drive? */
450 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
451 spin_unlock_irqrestore(&h->lock, flags);
452 l = snprintf(buf, PAGE_SIZE, "N/A\n");
453 return l;
454 }
455
456 rlevel = hdev->raid_level;
457 spin_unlock_irqrestore(&h->lock, flags);
458 if (rlevel < 0 || rlevel > RAID_UNKNOWN)
459 rlevel = RAID_UNKNOWN;
460 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
461 return l;
462}
463
464static ssize_t lunid_show(struct device *dev,
465 struct device_attribute *attr, char *buf)
466{
467 struct ctlr_info *h;
468 struct scsi_device *sdev;
469 struct hpsa_scsi_dev_t *hdev;
470 unsigned long flags;
471 unsigned char lunid[8];
472
473 sdev = to_scsi_device(dev);
474 h = sdev_to_hba(sdev);
475 spin_lock_irqsave(&h->lock, flags);
476 hdev = sdev->hostdata;
477 if (!hdev) {
478 spin_unlock_irqrestore(&h->lock, flags);
479 return -ENODEV;
480 }
481 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
482 spin_unlock_irqrestore(&h->lock, flags);
483 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
484 lunid[0], lunid[1], lunid[2], lunid[3],
485 lunid[4], lunid[5], lunid[6], lunid[7]);
486}
487
488static ssize_t unique_id_show(struct device *dev,
489 struct device_attribute *attr, char *buf)
490{
491 struct ctlr_info *h;
492 struct scsi_device *sdev;
493 struct hpsa_scsi_dev_t *hdev;
494 unsigned long flags;
495 unsigned char sn[16];
496
497 sdev = to_scsi_device(dev);
498 h = sdev_to_hba(sdev);
499 spin_lock_irqsave(&h->lock, flags);
500 hdev = sdev->hostdata;
501 if (!hdev) {
502 spin_unlock_irqrestore(&h->lock, flags);
503 return -ENODEV;
504 }
505 memcpy(sn, hdev->device_id, sizeof(sn));
506 spin_unlock_irqrestore(&h->lock, flags);
507 return snprintf(buf, 16 * 2 + 2,
508 "%02X%02X%02X%02X%02X%02X%02X%02X"
509 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
510 sn[0], sn[1], sn[2], sn[3],
511 sn[4], sn[5], sn[6], sn[7],
512 sn[8], sn[9], sn[10], sn[11],
513 sn[12], sn[13], sn[14], sn[15]);
514}
515
516static int hpsa_find_target_lun(struct ctlr_info *h,
517 unsigned char scsi3addr[], int bus, int *target, int *lun)
518{
519 /* finds an unused bus, target, lun for a new physical device
520 * assumes h->devlock is held
521 */
522 int i, found = 0;
523 DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA);
524
525 memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3);
526
527 for (i = 0; i < h->ndevices; i++) {
528 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
529 set_bit(h->dev[i]->target, lun_taken);
530 }
531
532 for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) {
533 if (!test_bit(i, lun_taken)) {
534 /* *bus = 1; */
535 *target = i;
536 *lun = 0;
537 found = 1;
538 break;
539 }
540 }
541 return !found;
542}
543
544/* Add an entry into h->dev[] array. */
545static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
546 struct hpsa_scsi_dev_t *device,
547 struct hpsa_scsi_dev_t *added[], int *nadded)
548{
549 /* assumes h->devlock is held */
550 int n = h->ndevices;
551 int i;
552 unsigned char addr1[8], addr2[8];
553 struct hpsa_scsi_dev_t *sd;
554
555 if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) {
556 dev_err(&h->pdev->dev, "too many devices, some will be "
557 "inaccessible.\n");
558 return -1;
559 }
560
561 /* physical devices do not have lun or target assigned until now. */
562 if (device->lun != -1)
563 /* Logical device, lun is already assigned. */
564 goto lun_assigned;
565
566 /* If this device a non-zero lun of a multi-lun device
567 * byte 4 of the 8-byte LUN addr will contain the logical
568 * unit no, zero otherise.
569 */
570 if (device->scsi3addr[4] == 0) {
571 /* This is not a non-zero lun of a multi-lun device */
572 if (hpsa_find_target_lun(h, device->scsi3addr,
573 device->bus, &device->target, &device->lun) != 0)
574 return -1;
575 goto lun_assigned;
576 }
577
578 /* This is a non-zero lun of a multi-lun device.
579 * Search through our list and find the device which
580 * has the same 8 byte LUN address, excepting byte 4.
581 * Assign the same bus and target for this new LUN.
582 * Use the logical unit number from the firmware.
583 */
584 memcpy(addr1, device->scsi3addr, 8);
585 addr1[4] = 0;
586 for (i = 0; i < n; i++) {
587 sd = h->dev[i];
588 memcpy(addr2, sd->scsi3addr, 8);
589 addr2[4] = 0;
590 /* differ only in byte 4? */
591 if (memcmp(addr1, addr2, 8) == 0) {
592 device->bus = sd->bus;
593 device->target = sd->target;
594 device->lun = device->scsi3addr[4];
595 break;
596 }
597 }
598 if (device->lun == -1) {
599 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
600 " suspect firmware bug or unsupported hardware "
601 "configuration.\n");
602 return -1;
603 }
604
605lun_assigned:
606
607 h->dev[n] = device;
608 h->ndevices++;
609 added[*nadded] = device;
610 (*nadded)++;
611
612 /* initially, (before registering with scsi layer) we don't
613 * know our hostno and we don't want to print anything first
614 * time anyway (the scsi layer's inquiries will show that info)
615 */
616 /* if (hostno != -1) */
617 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
618 scsi_device_type(device->devtype), hostno,
619 device->bus, device->target, device->lun);
620 return 0;
621}
622
623/* Remove an entry from h->dev[] array. */
624static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
625 struct hpsa_scsi_dev_t *removed[], int *nremoved)
626{
627 /* assumes h->devlock is held */
628 int i;
629 struct hpsa_scsi_dev_t *sd;
630
631 if (entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA)
632 BUG();
633
634 sd = h->dev[entry];
635 removed[*nremoved] = h->dev[entry];
636 (*nremoved)++;
637
638 for (i = entry; i < h->ndevices-1; i++)
639 h->dev[i] = h->dev[i+1];
640 h->ndevices--;
641 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
642 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
643 sd->lun);
644}
645
646#define SCSI3ADDR_EQ(a, b) ( \
647 (a)[7] == (b)[7] && \
648 (a)[6] == (b)[6] && \
649 (a)[5] == (b)[5] && \
650 (a)[4] == (b)[4] && \
651 (a)[3] == (b)[3] && \
652 (a)[2] == (b)[2] && \
653 (a)[1] == (b)[1] && \
654 (a)[0] == (b)[0])
655
656static void fixup_botched_add(struct ctlr_info *h,
657 struct hpsa_scsi_dev_t *added)
658{
659 /* called when scsi_add_device fails in order to re-adjust
660 * h->dev[] to match the mid layer's view.
661 */
662 unsigned long flags;
663 int i, j;
664
665 spin_lock_irqsave(&h->lock, flags);
666 for (i = 0; i < h->ndevices; i++) {
667 if (h->dev[i] == added) {
668 for (j = i; j < h->ndevices-1; j++)
669 h->dev[j] = h->dev[j+1];
670 h->ndevices--;
671 break;
672 }
673 }
674 spin_unlock_irqrestore(&h->lock, flags);
675 kfree(added);
676}
677
678static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
679 struct hpsa_scsi_dev_t *dev2)
680{
681 if ((is_logical_dev_addr_mode(dev1->scsi3addr) ||
682 (dev1->lun != -1 && dev2->lun != -1)) &&
683 dev1->devtype != 0x0C)
684 return (memcmp(dev1, dev2, sizeof(*dev1)) == 0);
685
686 /* we compare everything except lun and target as these
687 * are not yet assigned. Compare parts likely
688 * to differ first
689 */
690 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
691 sizeof(dev1->scsi3addr)) != 0)
692 return 0;
693 if (memcmp(dev1->device_id, dev2->device_id,
694 sizeof(dev1->device_id)) != 0)
695 return 0;
696 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
697 return 0;
698 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
699 return 0;
700 if (memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) != 0)
701 return 0;
702 if (dev1->devtype != dev2->devtype)
703 return 0;
704 if (dev1->raid_level != dev2->raid_level)
705 return 0;
706 if (dev1->bus != dev2->bus)
707 return 0;
708 return 1;
709}
710
711/* Find needle in haystack. If exact match found, return DEVICE_SAME,
712 * and return needle location in *index. If scsi3addr matches, but not
713 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
714 * location in *index. If needle not found, return DEVICE_NOT_FOUND.
715 */
716static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
717 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
718 int *index)
719{
720 int i;
721#define DEVICE_NOT_FOUND 0
722#define DEVICE_CHANGED 1
723#define DEVICE_SAME 2
724 for (i = 0; i < haystack_size; i++) {
725 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
726 *index = i;
727 if (device_is_the_same(needle, haystack[i]))
728 return DEVICE_SAME;
729 else
730 return DEVICE_CHANGED;
731 }
732 }
733 *index = -1;
734 return DEVICE_NOT_FOUND;
735}
736
737static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
738 struct hpsa_scsi_dev_t *sd[], int nsds)
739{
740 /* sd contains scsi3 addresses and devtypes, and inquiry
741 * data. This function takes what's in sd to be the current
742 * reality and updates h->dev[] to reflect that reality.
743 */
744 int i, entry, device_change, changes = 0;
745 struct hpsa_scsi_dev_t *csd;
746 unsigned long flags;
747 struct hpsa_scsi_dev_t **added, **removed;
748 int nadded, nremoved;
749 struct Scsi_Host *sh = NULL;
750
751 added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA,
752 GFP_KERNEL);
753 removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA,
754 GFP_KERNEL);
755
756 if (!added || !removed) {
757 dev_warn(&h->pdev->dev, "out of memory in "
758 "adjust_hpsa_scsi_table\n");
759 goto free_and_out;
760 }
761
762 spin_lock_irqsave(&h->devlock, flags);
763
764 /* find any devices in h->dev[] that are not in
765 * sd[] and remove them from h->dev[], and for any
766 * devices which have changed, remove the old device
767 * info and add the new device info.
768 */
769 i = 0;
770 nremoved = 0;
771 nadded = 0;
772 while (i < h->ndevices) {
773 csd = h->dev[i];
774 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
775 if (device_change == DEVICE_NOT_FOUND) {
776 changes++;
777 hpsa_scsi_remove_entry(h, hostno, i,
778 removed, &nremoved);
779 continue; /* remove ^^^, hence i not incremented */
780 } else if (device_change == DEVICE_CHANGED) {
781 changes++;
782 hpsa_scsi_remove_entry(h, hostno, i,
783 removed, &nremoved);
784 (void) hpsa_scsi_add_entry(h, hostno, sd[entry],
785 added, &nadded);
786 /* add can't fail, we just removed one. */
787 sd[entry] = NULL; /* prevent it from being freed */
788 }
789 i++;
790 }
791
792 /* Now, make sure every device listed in sd[] is also
793 * listed in h->dev[], adding them if they aren't found
794 */
795
796 for (i = 0; i < nsds; i++) {
797 if (!sd[i]) /* if already added above. */
798 continue;
799 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
800 h->ndevices, &entry);
801 if (device_change == DEVICE_NOT_FOUND) {
802 changes++;
803 if (hpsa_scsi_add_entry(h, hostno, sd[i],
804 added, &nadded) != 0)
805 break;
806 sd[i] = NULL; /* prevent from being freed later. */
807 } else if (device_change == DEVICE_CHANGED) {
808 /* should never happen... */
809 changes++;
810 dev_warn(&h->pdev->dev,
811 "device unexpectedly changed.\n");
812 /* but if it does happen, we just ignore that device */
813 }
814 }
815 spin_unlock_irqrestore(&h->devlock, flags);
816
817 /* Don't notify scsi mid layer of any changes the first time through
818 * (or if there are no changes) scsi_scan_host will do it later the
819 * first time through.
820 */
821 if (hostno == -1 || !changes)
822 goto free_and_out;
823
824 sh = h->scsi_host;
825 /* Notify scsi mid layer of any removed devices */
826 for (i = 0; i < nremoved; i++) {
827 struct scsi_device *sdev =
828 scsi_device_lookup(sh, removed[i]->bus,
829 removed[i]->target, removed[i]->lun);
830 if (sdev != NULL) {
831 scsi_remove_device(sdev);
832 scsi_device_put(sdev);
833 } else {
834 /* We don't expect to get here.
835 * future cmds to this device will get selection
836 * timeout as if the device was gone.
837 */
838 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
839 " for removal.", hostno, removed[i]->bus,
840 removed[i]->target, removed[i]->lun);
841 }
842 kfree(removed[i]);
843 removed[i] = NULL;
844 }
845
846 /* Notify scsi mid layer of any added devices */
847 for (i = 0; i < nadded; i++) {
848 if (scsi_add_device(sh, added[i]->bus,
849 added[i]->target, added[i]->lun) == 0)
850 continue;
851 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
852 "device not added.\n", hostno, added[i]->bus,
853 added[i]->target, added[i]->lun);
854 /* now we have to remove it from h->dev,
855 * since it didn't get added to scsi mid layer
856 */
857 fixup_botched_add(h, added[i]);
858 }
859
860free_and_out:
861 kfree(added);
862 kfree(removed);
863 return 0;
864}
865
866/*
867 * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
868 * Assume's h->devlock is held.
869 */
870static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
871 int bus, int target, int lun)
872{
873 int i;
874 struct hpsa_scsi_dev_t *sd;
875
876 for (i = 0; i < h->ndevices; i++) {
877 sd = h->dev[i];
878 if (sd->bus == bus && sd->target == target && sd->lun == lun)
879 return sd;
880 }
881 return NULL;
882}
883
884/* link sdev->hostdata to our per-device structure. */
885static int hpsa_slave_alloc(struct scsi_device *sdev)
886{
887 struct hpsa_scsi_dev_t *sd;
888 unsigned long flags;
889 struct ctlr_info *h;
890
891 h = sdev_to_hba(sdev);
892 spin_lock_irqsave(&h->devlock, flags);
893 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
894 sdev_id(sdev), sdev->lun);
895 if (sd != NULL)
896 sdev->hostdata = sd;
897 spin_unlock_irqrestore(&h->devlock, flags);
898 return 0;
899}
900
901static void hpsa_slave_destroy(struct scsi_device *sdev)
902{
903 return; /* nothing to do. */
904}
905
906static void hpsa_scsi_setup(struct ctlr_info *h)
907{
908 h->ndevices = 0;
909 h->scsi_host = NULL;
910 spin_lock_init(&h->devlock);
911 return;
912}
913
914static void complete_scsi_command(struct CommandList *cp,
915 int timeout, __u32 tag)
916{
917 struct scsi_cmnd *cmd;
918 struct ctlr_info *h;
919 struct ErrorInfo *ei;
920
921 unsigned char sense_key;
922 unsigned char asc; /* additional sense code */
923 unsigned char ascq; /* additional sense code qualifier */
924
925 ei = cp->err_info;
926 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
927 h = cp->h;
928
929 scsi_dma_unmap(cmd); /* undo the DMA mappings */
930
931 cmd->result = (DID_OK << 16); /* host byte */
932 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
933 cmd->result |= (ei->ScsiStatus << 1);
934
935 /* copy the sense data whether we need to or not. */
936 memcpy(cmd->sense_buffer, ei->SenseInfo,
937 ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
938 SCSI_SENSE_BUFFERSIZE :
939 ei->SenseLen);
940 scsi_set_resid(cmd, ei->ResidualCnt);
941
942 if (ei->CommandStatus == 0) {
943 cmd->scsi_done(cmd);
944 cmd_free(h, cp);
945 return;
946 }
947
948 /* an error has occurred */
949 switch (ei->CommandStatus) {
950
951 case CMD_TARGET_STATUS:
952 if (ei->ScsiStatus) {
953 /* Get sense key */
954 sense_key = 0xf & ei->SenseInfo[2];
955 /* Get additional sense code */
956 asc = ei->SenseInfo[12];
957 /* Get addition sense code qualifier */
958 ascq = ei->SenseInfo[13];
959 }
960
961 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
962 if (check_for_unit_attention(h, cp)) {
963 cmd->result = DID_SOFT_ERROR << 16;
964 break;
965 }
966 if (sense_key == ILLEGAL_REQUEST) {
967 /*
968 * SCSI REPORT_LUNS is commonly unsupported on
969 * Smart Array. Suppress noisy complaint.
970 */
971 if (cp->Request.CDB[0] == REPORT_LUNS)
972 break;
973
974 /* If ASC/ASCQ indicate Logical Unit
975 * Not Supported condition,
976 */
977 if ((asc == 0x25) && (ascq == 0x0)) {
978 dev_warn(&h->pdev->dev, "cp %p "
979 "has check condition\n", cp);
980 break;
981 }
982 }
983
984 if (sense_key == NOT_READY) {
985 /* If Sense is Not Ready, Logical Unit
986 * Not ready, Manual Intervention
987 * required
988 */
989 if ((asc == 0x04) && (ascq == 0x03)) {
990 cmd->result = DID_NO_CONNECT << 16;
991 dev_warn(&h->pdev->dev, "cp %p "
992 "has check condition: unit "
993 "not ready, manual "
994 "intervention required\n", cp);
995 break;
996 }
997 }
998
999
1000 /* Must be some other type of check condition */
1001 dev_warn(&h->pdev->dev, "cp %p has check condition: "
1002 "unknown type: "
1003 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1004 "Returning result: 0x%x, "
1005 "cmd=[%02x %02x %02x %02x %02x "
1006 "%02x %02x %02x %02x %02x]\n",
1007 cp, sense_key, asc, ascq,
1008 cmd->result,
1009 cmd->cmnd[0], cmd->cmnd[1],
1010 cmd->cmnd[2], cmd->cmnd[3],
1011 cmd->cmnd[4], cmd->cmnd[5],
1012 cmd->cmnd[6], cmd->cmnd[7],
1013 cmd->cmnd[8], cmd->cmnd[9]);
1014 break;
1015 }
1016
1017
1018 /* Problem was not a check condition
1019 * Pass it up to the upper layers...
1020 */
1021 if (ei->ScsiStatus) {
1022 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1023 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1024 "Returning result: 0x%x\n",
1025 cp, ei->ScsiStatus,
1026 sense_key, asc, ascq,
1027 cmd->result);
1028 } else { /* scsi status is zero??? How??? */
1029 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1030 "Returning no connection.\n", cp),
1031
1032 /* Ordinarily, this case should never happen,
1033 * but there is a bug in some released firmware
1034 * revisions that allows it to happen if, for
1035 * example, a 4100 backplane loses power and
1036 * the tape drive is in it. We assume that
1037 * it's a fatal error of some kind because we
1038 * can't show that it wasn't. We will make it
1039 * look like selection timeout since that is
1040 * the most common reason for this to occur,
1041 * and it's severe enough.
1042 */
1043
1044 cmd->result = DID_NO_CONNECT << 16;
1045 }
1046 break;
1047
1048 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1049 break;
1050 case CMD_DATA_OVERRUN:
1051 dev_warn(&h->pdev->dev, "cp %p has"
1052 " completed with data overrun "
1053 "reported\n", cp);
1054 break;
1055 case CMD_INVALID: {
1056 /* print_bytes(cp, sizeof(*cp), 1, 0);
1057 print_cmd(cp); */
1058 /* We get CMD_INVALID if you address a non-existent device
1059 * instead of a selection timeout (no response). You will
1060 * see this if you yank out a drive, then try to access it.
1061 * This is kind of a shame because it means that any other
1062 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1063 * missing target. */
1064 cmd->result = DID_NO_CONNECT << 16;
1065 }
1066 break;
1067 case CMD_PROTOCOL_ERR:
1068 dev_warn(&h->pdev->dev, "cp %p has "
1069 "protocol error \n", cp);
1070 break;
1071 case CMD_HARDWARE_ERR:
1072 cmd->result = DID_ERROR << 16;
1073 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1074 break;
1075 case CMD_CONNECTION_LOST:
1076 cmd->result = DID_ERROR << 16;
1077 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1078 break;
1079 case CMD_ABORTED:
1080 cmd->result = DID_ABORT << 16;
1081 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1082 cp, ei->ScsiStatus);
1083 break;
1084 case CMD_ABORT_FAILED:
1085 cmd->result = DID_ERROR << 16;
1086 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1087 break;
1088 case CMD_UNSOLICITED_ABORT:
1089 cmd->result = DID_ABORT << 16;
1090 dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
1091 "abort\n", cp);
1092 break;
1093 case CMD_TIMEOUT:
1094 cmd->result = DID_TIME_OUT << 16;
1095 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1096 break;
1097 default:
1098 cmd->result = DID_ERROR << 16;
1099 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1100 cp, ei->CommandStatus);
1101 }
1102 cmd->scsi_done(cmd);
1103 cmd_free(h, cp);
1104}
1105
1106static int hpsa_scsi_detect(struct ctlr_info *h)
1107{
1108 struct Scsi_Host *sh;
1109 int error;
1110
1111 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
1112 if (sh == NULL)
1113 goto fail;
1114
1115 sh->io_port = 0;
1116 sh->n_io_port = 0;
1117 sh->this_id = -1;
1118 sh->max_channel = 3;
1119 sh->max_cmd_len = MAX_COMMAND_SIZE;
1120 sh->max_lun = HPSA_MAX_LUN;
1121 sh->max_id = HPSA_MAX_LUN;
1122 h->scsi_host = sh;
1123 sh->hostdata[0] = (unsigned long) h;
1124 sh->irq = h->intr[SIMPLE_MODE_INT];
1125 sh->unique_id = sh->irq;
1126 error = scsi_add_host(sh, &h->pdev->dev);
1127 if (error)
1128 goto fail_host_put;
1129 scsi_scan_host(sh);
1130 return 0;
1131
1132 fail_host_put:
1133 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
1134 " failed for controller %d\n", h->ctlr);
1135 scsi_host_put(sh);
1136 return -1;
1137 fail:
1138 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
1139 " failed for controller %d\n", h->ctlr);
1140 return -1;
1141}
1142
1143static void hpsa_pci_unmap(struct pci_dev *pdev,
1144 struct CommandList *c, int sg_used, int data_direction)
1145{
1146 int i;
1147 union u64bit addr64;
1148
1149 for (i = 0; i < sg_used; i++) {
1150 addr64.val32.lower = c->SG[i].Addr.lower;
1151 addr64.val32.upper = c->SG[i].Addr.upper;
1152 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1153 data_direction);
1154 }
1155}
1156
1157static void hpsa_map_one(struct pci_dev *pdev,
1158 struct CommandList *cp,
1159 unsigned char *buf,
1160 size_t buflen,
1161 int data_direction)
1162{
1163 __u64 addr64;
1164
1165 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1166 cp->Header.SGList = 0;
1167 cp->Header.SGTotal = 0;
1168 return;
1169 }
1170
1171 addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
1172 cp->SG[0].Addr.lower =
1173 (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
1174 cp->SG[0].Addr.upper =
1175 (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
1176 cp->SG[0].Len = buflen;
1177 cp->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */
1178 cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
1179}
1180
1181static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1182 struct CommandList *c)
1183{
1184 DECLARE_COMPLETION_ONSTACK(wait);
1185
1186 c->waiting = &wait;
1187 enqueue_cmd_and_start_io(h, c);
1188 wait_for_completion(&wait);
1189}
1190
1191static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1192 struct CommandList *c, int data_direction)
1193{
1194 int retry_count = 0;
1195
1196 do {
1197 memset(c->err_info, 0, sizeof(c->err_info));
1198 hpsa_scsi_do_simple_cmd_core(h, c);
1199 retry_count++;
1200 } while (check_for_unit_attention(h, c) && retry_count <= 3);
1201 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
1202}
1203
1204static void hpsa_scsi_interpret_error(struct CommandList *cp)
1205{
1206 struct ErrorInfo *ei;
1207 struct device *d = &cp->h->pdev->dev;
1208
1209 ei = cp->err_info;
1210 switch (ei->CommandStatus) {
1211 case CMD_TARGET_STATUS:
1212 dev_warn(d, "cmd %p has completed with errors\n", cp);
1213 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
1214 ei->ScsiStatus);
1215 if (ei->ScsiStatus == 0)
1216 dev_warn(d, "SCSI status is abnormally zero. "
1217 "(probably indicates selection timeout "
1218 "reported incorrectly due to a known "
1219 "firmware bug, circa July, 2001.)\n");
1220 break;
1221 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1222 dev_info(d, "UNDERRUN\n");
1223 break;
1224 case CMD_DATA_OVERRUN:
1225 dev_warn(d, "cp %p has completed with data overrun\n", cp);
1226 break;
1227 case CMD_INVALID: {
1228 /* controller unfortunately reports SCSI passthru's
1229 * to non-existent targets as invalid commands.
1230 */
1231 dev_warn(d, "cp %p is reported invalid (probably means "
1232 "target device no longer present)\n", cp);
1233 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
1234 print_cmd(cp); */
1235 }
1236 break;
1237 case CMD_PROTOCOL_ERR:
1238 dev_warn(d, "cp %p has protocol error \n", cp);
1239 break;
1240 case CMD_HARDWARE_ERR:
1241 /* cmd->result = DID_ERROR << 16; */
1242 dev_warn(d, "cp %p had hardware error\n", cp);
1243 break;
1244 case CMD_CONNECTION_LOST:
1245 dev_warn(d, "cp %p had connection lost\n", cp);
1246 break;
1247 case CMD_ABORTED:
1248 dev_warn(d, "cp %p was aborted\n", cp);
1249 break;
1250 case CMD_ABORT_FAILED:
1251 dev_warn(d, "cp %p reports abort failed\n", cp);
1252 break;
1253 case CMD_UNSOLICITED_ABORT:
1254 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
1255 break;
1256 case CMD_TIMEOUT:
1257 dev_warn(d, "cp %p timed out\n", cp);
1258 break;
1259 default:
1260 dev_warn(d, "cp %p returned unknown status %x\n", cp,
1261 ei->CommandStatus);
1262 }
1263}
1264
1265static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1266 unsigned char page, unsigned char *buf,
1267 unsigned char bufsize)
1268{
1269 int rc = IO_OK;
1270 struct CommandList *c;
1271 struct ErrorInfo *ei;
1272
1273 c = cmd_special_alloc(h);
1274
1275 if (c == NULL) { /* trouble... */
1276 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1277 return -1;
1278 }
1279
1280 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
1281 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1282 ei = c->err_info;
1283 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
1284 hpsa_scsi_interpret_error(c);
1285 rc = -1;
1286 }
1287 cmd_special_free(h, c);
1288 return rc;
1289}
1290
1291static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
1292{
1293 int rc = IO_OK;
1294 struct CommandList *c;
1295 struct ErrorInfo *ei;
1296
1297 c = cmd_special_alloc(h);
1298
1299 if (c == NULL) { /* trouble... */
1300 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1301 return -1;
1302 }
1303
1304 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
1305 hpsa_scsi_do_simple_cmd_core(h, c);
1306 /* no unmap needed here because no data xfer. */
1307
1308 ei = c->err_info;
1309 if (ei->CommandStatus != 0) {
1310 hpsa_scsi_interpret_error(c);
1311 rc = -1;
1312 }
1313 cmd_special_free(h, c);
1314 return rc;
1315}
1316
1317static void hpsa_get_raid_level(struct ctlr_info *h,
1318 unsigned char *scsi3addr, unsigned char *raid_level)
1319{
1320 int rc;
1321 unsigned char *buf;
1322
1323 *raid_level = RAID_UNKNOWN;
1324 buf = kzalloc(64, GFP_KERNEL);
1325 if (!buf)
1326 return;
1327 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
1328 if (rc == 0)
1329 *raid_level = buf[8];
1330 if (*raid_level > RAID_UNKNOWN)
1331 *raid_level = RAID_UNKNOWN;
1332 kfree(buf);
1333 return;
1334}
1335
1336/* Get the device id from inquiry page 0x83 */
1337static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
1338 unsigned char *device_id, int buflen)
1339{
1340 int rc;
1341 unsigned char *buf;
1342
1343 if (buflen > 16)
1344 buflen = 16;
1345 buf = kzalloc(64, GFP_KERNEL);
1346 if (!buf)
1347 return -1;
1348 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
1349 if (rc == 0)
1350 memcpy(device_id, &buf[8], buflen);
1351 kfree(buf);
1352 return rc != 0;
1353}
1354
1355static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
1356 struct ReportLUNdata *buf, int bufsize,
1357 int extended_response)
1358{
1359 int rc = IO_OK;
1360 struct CommandList *c;
1361 unsigned char scsi3addr[8];
1362 struct ErrorInfo *ei;
1363
1364 c = cmd_special_alloc(h);
1365 if (c == NULL) { /* trouble... */
1366 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1367 return -1;
1368 }
1369
1370 memset(&scsi3addr[0], 0, 8); /* address the controller */
1371
1372 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
1373 buf, bufsize, 0, scsi3addr, TYPE_CMD);
1374 if (extended_response)
1375 c->Request.CDB[1] = extended_response;
1376 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1377 ei = c->err_info;
1378 if (ei->CommandStatus != 0 &&
1379 ei->CommandStatus != CMD_DATA_UNDERRUN) {
1380 hpsa_scsi_interpret_error(c);
1381 rc = -1;
1382 }
1383 cmd_special_free(h, c);
1384 return rc;
1385}
1386
1387static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
1388 struct ReportLUNdata *buf,
1389 int bufsize, int extended_response)
1390{
1391 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
1392}
1393
1394static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
1395 struct ReportLUNdata *buf, int bufsize)
1396{
1397 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
1398}
1399
1400static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
1401 int bus, int target, int lun)
1402{
1403 device->bus = bus;
1404 device->target = target;
1405 device->lun = lun;
1406}
1407
1408static int hpsa_update_device_info(struct ctlr_info *h,
1409 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
1410{
1411#define OBDR_TAPE_INQ_SIZE 49
1412 unsigned char *inq_buff = NULL;
1413
1414 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1415 if (!inq_buff)
1416 goto bail_out;
1417
1418 memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
1419 /* Do an inquiry to the device to see what it is. */
1420 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
1421 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
1422 /* Inquiry failed (msg printed already) */
1423 dev_err(&h->pdev->dev,
1424 "hpsa_update_device_info: inquiry failed\n");
1425 goto bail_out;
1426 }
1427
1428 /* As a side effect, record the firmware version number
1429 * if we happen to be talking to the RAID controller.
1430 */
1431 if (is_hba_lunid(scsi3addr))
1432 memcpy(h->firm_ver, &inq_buff[32], 4);
1433
1434 this_device->devtype = (inq_buff[0] & 0x1f);
1435 memcpy(this_device->scsi3addr, scsi3addr, 8);
1436 memcpy(this_device->vendor, &inq_buff[8],
1437 sizeof(this_device->vendor));
1438 memcpy(this_device->model, &inq_buff[16],
1439 sizeof(this_device->model));
1440 memcpy(this_device->revision, &inq_buff[32],
1441 sizeof(this_device->revision));
1442 memset(this_device->device_id, 0,
1443 sizeof(this_device->device_id));
1444 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
1445 sizeof(this_device->device_id));
1446
1447 if (this_device->devtype == TYPE_DISK &&
1448 is_logical_dev_addr_mode(scsi3addr))
1449 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
1450 else
1451 this_device->raid_level = RAID_UNKNOWN;
1452
1453 kfree(inq_buff);
1454 return 0;
1455
1456bail_out:
1457 kfree(inq_buff);
1458 return 1;
1459}
1460
1461static unsigned char *msa2xxx_model[] = {
1462 "MSA2012",
1463 "MSA2024",
1464 "MSA2312",
1465 "MSA2324",
1466 NULL,
1467};
1468
1469static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1470{
1471 int i;
1472
1473 for (i = 0; msa2xxx_model[i]; i++)
1474 if (strncmp(device->model, msa2xxx_model[i],
1475 strlen(msa2xxx_model[i])) == 0)
1476 return 1;
1477 return 0;
1478}
1479
1480/* Helper function to assign bus, target, lun mapping of devices.
1481 * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
1482 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
1483 * Logical drive target and lun are assigned at this time, but
1484 * physical device lun and target assignment are deferred (assigned
1485 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
1486 */
1487static void figure_bus_target_lun(struct ctlr_info *h,
1488 __u8 *lunaddrbytes, int *bus, int *target, int *lun,
1489 struct hpsa_scsi_dev_t *device)
1490{
1491
1492 __u32 lunid;
1493
1494 if (is_logical_dev_addr_mode(lunaddrbytes)) {
1495 /* logical device */
1496 memcpy(&lunid, lunaddrbytes, sizeof(lunid));
1497 lunid = le32_to_cpu(lunid);
1498
1499 if (is_msa2xxx(h, device)) {
1500 *bus = 1;
1501 *target = (lunid >> 16) & 0x3fff;
1502 *lun = lunid & 0x00ff;
1503 } else {
1504 *bus = 0;
1505 *lun = 0;
1506 *target = lunid & 0x3fff;
1507 }
1508 } else {
1509 /* physical device */
1510 if (is_hba_lunid(lunaddrbytes))
1511 *bus = 3;
1512 else
1513 *bus = 2;
1514 *target = -1;
1515 *lun = -1; /* we will fill these in later. */
1516 }
1517}
1518
1519/*
1520 * If there is no lun 0 on a target, linux won't find any devices.
1521 * For the MSA2xxx boxes, we have to manually detect the enclosure
1522 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
1523 * it for some reason. *tmpdevice is the target we're adding,
1524 * this_device is a pointer into the current element of currentsd[]
1525 * that we're building up in update_scsi_devices(), below.
1526 * lunzerobits is a bitmap that tracks which targets already have a
1527 * lun 0 assigned.
1528 * Returns 1 if an enclosure was added, 0 if not.
1529 */
1530static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
1531 struct hpsa_scsi_dev_t *tmpdevice,
1532 struct hpsa_scsi_dev_t *this_device, __u8 *lunaddrbytes,
1533 int bus, int target, int lun, unsigned long lunzerobits[],
1534 int *nmsa2xxx_enclosures)
1535{
1536 unsigned char scsi3addr[8];
1537
1538 if (test_bit(target, lunzerobits))
1539 return 0; /* There is already a lun 0 on this target. */
1540
1541 if (!is_logical_dev_addr_mode(lunaddrbytes))
1542 return 0; /* It's the logical targets that may lack lun 0. */
1543
1544 if (!is_msa2xxx(h, tmpdevice))
1545 return 0; /* It's only the MSA2xxx that have this problem. */
1546
1547 if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
1548 return 0;
1549
1550 if (is_hba_lunid(scsi3addr))
1551 return 0; /* Don't add the RAID controller here. */
1552
1553#define MAX_MSA2XXX_ENCLOSURES 32
1554 if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
1555 dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
1556 "enclosures exceeded. Check your hardware "
1557 "configuration.");
1558 return 0;
1559 }
1560
1561 memset(scsi3addr, 0, 8);
1562 scsi3addr[3] = target;
1563 if (hpsa_update_device_info(h, scsi3addr, this_device))
1564 return 0;
1565 (*nmsa2xxx_enclosures)++;
1566 hpsa_set_bus_target_lun(this_device, bus, target, 0);
1567 set_bit(target, lunzerobits);
1568 return 1;
1569}
1570
1571/*
1572 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
1573 * logdev. The number of luns in physdev and logdev are returned in
1574 * *nphysicals and *nlogicals, respectively.
1575 * Returns 0 on success, -1 otherwise.
1576 */
1577static int hpsa_gather_lun_info(struct ctlr_info *h,
1578 int reportlunsize,
1579 struct ReportLUNdata *physdev, __u32 *nphysicals,
1580 struct ReportLUNdata *logdev, __u32 *nlogicals)
1581{
1582 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
1583 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
1584 return -1;
1585 }
1586 memcpy(nphysicals, &physdev->LUNListLength[0], sizeof(*nphysicals));
1587 *nphysicals = be32_to_cpu(*nphysicals) / 8;
1588#ifdef DEBUG
1589 dev_info(&h->pdev->dev, "number of physical luns is %d\n", *nphysicals);
1590#endif
1591 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
1592 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
1593 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1594 *nphysicals - HPSA_MAX_PHYS_LUN);
1595 *nphysicals = HPSA_MAX_PHYS_LUN;
1596 }
1597 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
1598 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
1599 return -1;
1600 }
1601 memcpy(nlogicals, &logdev->LUNListLength[0], sizeof(*nlogicals));
1602 *nlogicals = be32_to_cpu(*nlogicals) / 8;
1603#ifdef DEBUG
1604 dev_info(&h->pdev->dev, "number of logical luns is %d\n", *nlogicals);
1605#endif
1606 /* Reject Logicals in excess of our max capability. */
1607 if (*nlogicals > HPSA_MAX_LUN) {
1608 dev_warn(&h->pdev->dev,
1609 "maximum logical LUNs (%d) exceeded. "
1610 "%d LUNs ignored.\n", HPSA_MAX_LUN,
1611 *nlogicals - HPSA_MAX_LUN);
1612 *nlogicals = HPSA_MAX_LUN;
1613 }
1614 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
1615 dev_warn(&h->pdev->dev,
1616 "maximum logical + physical LUNs (%d) exceeded. "
1617 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1618 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
1619 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
1620 }
1621 return 0;
1622}
1623
1624static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1625{
1626 /* the idea here is we could get notified
1627 * that some devices have changed, so we do a report
1628 * physical luns and report logical luns cmd, and adjust
1629 * our list of devices accordingly.
1630 *
1631 * The scsi3addr's of devices won't change so long as the
1632 * adapter is not reset. That means we can rescan and
1633 * tell which devices we already know about, vs. new
1634 * devices, vs. disappearing devices.
1635 */
1636 struct ReportLUNdata *physdev_list = NULL;
1637 struct ReportLUNdata *logdev_list = NULL;
1638 unsigned char *inq_buff = NULL;
1639 __u32 nphysicals = 0;
1640 __u32 nlogicals = 0;
1641 __u32 ndev_allocated = 0;
1642 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
1643 int ncurrent = 0;
1644 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
1645 int i, nmsa2xxx_enclosures, ndevs_to_allocate;
1646 int bus, target, lun;
1647 DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
1648
1649 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
1650 GFP_KERNEL);
1651 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1652 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1653 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1654 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
1655
1656 if (!currentsd || !physdev_list || !logdev_list ||
1657 !inq_buff || !tmpdevice) {
1658 dev_err(&h->pdev->dev, "out of memory\n");
1659 goto out;
1660 }
1661 memset(lunzerobits, 0, sizeof(lunzerobits));
1662
1663 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
1664 logdev_list, &nlogicals))
1665 goto out;
1666
1667 /* We might see up to 32 MSA2xxx enclosures, actually 8 of them
1668 * but each of them 4 times through different paths. The plus 1
1669 * is for the RAID controller.
1670 */
1671 ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
1672
1673 /* Allocate the per device structures */
1674 for (i = 0; i < ndevs_to_allocate; i++) {
1675 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
1676 if (!currentsd[i]) {
1677 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
1678 __FILE__, __LINE__);
1679 goto out;
1680 }
1681 ndev_allocated++;
1682 }
1683
1684 /* adjust our table of devices */
1685 nmsa2xxx_enclosures = 0;
1686 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
1687 __u8 *lunaddrbytes;
1688
1689 /* Figure out where the LUN ID info is coming from */
1690 if (i < nphysicals)
1691 lunaddrbytes = &physdev_list->LUN[i][0];
1692 else
1693 if (i < nphysicals + nlogicals)
1694 lunaddrbytes =
1695 &logdev_list->LUN[i-nphysicals][0];
1696 else /* jam in the RAID controller at the end */
1697 lunaddrbytes = RAID_CTLR_LUNID;
1698
1699 /* skip masked physical devices. */
1700 if (lunaddrbytes[3] & 0xC0 && i < nphysicals)
1701 continue;
1702
1703 /* Get device type, vendor, model, device id */
1704 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice))
1705 continue; /* skip it if we can't talk to it. */
1706 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
1707 tmpdevice);
1708 this_device = currentsd[ncurrent];
1709
1710 /*
1711 * For the msa2xxx boxes, we have to insert a LUN 0 which
1712 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
1713 * is nonetheless an enclosure device there. We have to
1714 * present that otherwise linux won't find anything if
1715 * there is no lun 0.
1716 */
1717 if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
1718 lunaddrbytes, bus, target, lun, lunzerobits,
1719 &nmsa2xxx_enclosures)) {
1720 ncurrent++;
1721 this_device = currentsd[ncurrent];
1722 }
1723
1724 *this_device = *tmpdevice;
1725 hpsa_set_bus_target_lun(this_device, bus, target, lun);
1726
1727 switch (this_device->devtype) {
1728 case TYPE_ROM: {
1729 /* We don't *really* support actual CD-ROM devices,
1730 * just "One Button Disaster Recovery" tape drive
1731 * which temporarily pretends to be a CD-ROM drive.
1732 * So we check that the device is really an OBDR tape
1733 * device by checking for "$DR-10" in bytes 43-48 of
1734 * the inquiry data.
1735 */
1736 char obdr_sig[7];
1737#define OBDR_TAPE_SIG "$DR-10"
1738 strncpy(obdr_sig, &inq_buff[43], 6);
1739 obdr_sig[6] = '\0';
1740 if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
1741 /* Not OBDR device, ignore it. */
1742 break;
1743 }
1744 ncurrent++;
1745 break;
1746 case TYPE_DISK:
1747 if (i < nphysicals)
1748 break;
1749 ncurrent++;
1750 break;
1751 case TYPE_TAPE:
1752 case TYPE_MEDIUM_CHANGER:
1753 ncurrent++;
1754 break;
1755 case TYPE_RAID:
1756 /* Only present the Smartarray HBA as a RAID controller.
1757 * If it's a RAID controller other than the HBA itself
1758 * (an external RAID controller, MSA500 or similar)
1759 * don't present it.
1760 */
1761 if (!is_hba_lunid(lunaddrbytes))
1762 break;
1763 ncurrent++;
1764 break;
1765 default:
1766 break;
1767 }
1768 if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA)
1769 break;
1770 }
1771 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
1772out:
1773 kfree(tmpdevice);
1774 for (i = 0; i < ndev_allocated; i++)
1775 kfree(currentsd[i]);
1776 kfree(currentsd);
1777 kfree(inq_buff);
1778 kfree(physdev_list);
1779 kfree(logdev_list);
1780 return;
1781}
1782
1783/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
1784 * dma mapping and fills in the scatter gather entries of the
1785 * hpsa command, cp.
1786 */
1787static int hpsa_scatter_gather(struct pci_dev *pdev,
1788 struct CommandList *cp,
1789 struct scsi_cmnd *cmd)
1790{
1791 unsigned int len;
1792 struct scatterlist *sg;
1793 __u64 addr64;
1794 int use_sg, i;
1795
1796 BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
1797
1798 use_sg = scsi_dma_map(cmd);
1799 if (use_sg < 0)
1800 return use_sg;
1801
1802 if (!use_sg)
1803 goto sglist_finished;
1804
1805 scsi_for_each_sg(cmd, sg, use_sg, i) {
1806 addr64 = (__u64) sg_dma_address(sg);
1807 len = sg_dma_len(sg);
1808 cp->SG[i].Addr.lower =
1809 (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
1810 cp->SG[i].Addr.upper =
1811 (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
1812 cp->SG[i].Len = len;
1813 cp->SG[i].Ext = 0; /* we are not chaining */
1814 }
1815
1816sglist_finished:
1817
1818 cp->Header.SGList = (__u8) use_sg; /* no. SGs contig in this cmd */
1819 cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */
1820 return 0;
1821}
1822
1823
1824static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
1825 void (*done)(struct scsi_cmnd *))
1826{
1827 struct ctlr_info *h;
1828 struct hpsa_scsi_dev_t *dev;
1829 unsigned char scsi3addr[8];
1830 struct CommandList *c;
1831 unsigned long flags;
1832
1833 /* Get the ptr to our adapter structure out of cmd->host. */
1834 h = sdev_to_hba(cmd->device);
1835 dev = cmd->device->hostdata;
1836 if (!dev) {
1837 cmd->result = DID_NO_CONNECT << 16;
1838 done(cmd);
1839 return 0;
1840 }
1841 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
1842
1843 /* Need a lock as this is being allocated from the pool */
1844 spin_lock_irqsave(&h->lock, flags);
1845 c = cmd_alloc(h);
1846 spin_unlock_irqrestore(&h->lock, flags);
1847 if (c == NULL) { /* trouble... */
1848 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
1849 return SCSI_MLQUEUE_HOST_BUSY;
1850 }
1851
1852 /* Fill in the command list header */
1853
1854 cmd->scsi_done = done; /* save this for use by completion code */
1855
1856 /* save c in case we have to abort it */
1857 cmd->host_scribble = (unsigned char *) c;
1858
1859 c->cmd_type = CMD_SCSI;
1860 c->scsi_cmd = cmd;
1861 c->Header.ReplyQueue = 0; /* unused in simple mode */
1862 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
1863 c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */
1864
1865 /* Fill in the request block... */
1866
1867 c->Request.Timeout = 0;
1868 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
1869 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
1870 c->Request.CDBLen = cmd->cmd_len;
1871 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
1872 c->Request.Type.Type = TYPE_CMD;
1873 c->Request.Type.Attribute = ATTR_SIMPLE;
1874 switch (cmd->sc_data_direction) {
1875 case DMA_TO_DEVICE:
1876 c->Request.Type.Direction = XFER_WRITE;
1877 break;
1878 case DMA_FROM_DEVICE:
1879 c->Request.Type.Direction = XFER_READ;
1880 break;
1881 case DMA_NONE:
1882 c->Request.Type.Direction = XFER_NONE;
1883 break;
1884 case DMA_BIDIRECTIONAL:
1885 /* This can happen if a buggy application does a scsi passthru
1886 * and sets both inlen and outlen to non-zero. ( see
1887 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
1888 */
1889
1890 c->Request.Type.Direction = XFER_RSVD;
1891 /* This is technically wrong, and hpsa controllers should
1892 * reject it with CMD_INVALID, which is the most correct
1893 * response, but non-fibre backends appear to let it
1894 * slide by, and give the same results as if this field
1895 * were set correctly. Either way is acceptable for
1896 * our purposes here.
1897 */
1898
1899 break;
1900
1901 default:
1902 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
1903 cmd->sc_data_direction);
1904 BUG();
1905 break;
1906 }
1907
1908 if (hpsa_scatter_gather(h->pdev, c, cmd) < 0) { /* Fill SG list */
1909 cmd_free(h, c);
1910 return SCSI_MLQUEUE_HOST_BUSY;
1911 }
1912 enqueue_cmd_and_start_io(h, c);
1913 /* the cmd'll come back via intr handler in complete_scsi_command() */
1914 return 0;
1915}
1916
1917static void hpsa_unregister_scsi(struct ctlr_info *h)
1918{
1919 /* we are being forcibly unloaded, and may not refuse. */
1920 scsi_remove_host(h->scsi_host);
1921 scsi_host_put(h->scsi_host);
1922 h->scsi_host = NULL;
1923}
1924
1925static int hpsa_register_scsi(struct ctlr_info *h)
1926{
1927 int rc;
1928
1929 hpsa_update_scsi_devices(h, -1);
1930 rc = hpsa_scsi_detect(h);
1931 if (rc != 0)
1932 dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
1933 " hpsa_scsi_detect(), rc is %d\n", rc);
1934 return rc;
1935}
1936
1937static int wait_for_device_to_become_ready(struct ctlr_info *h,
1938 unsigned char lunaddr[])
1939{
1940 int rc = 0;
1941 int count = 0;
1942 int waittime = 1; /* seconds */
1943 struct CommandList *c;
1944
1945 c = cmd_special_alloc(h);
1946 if (!c) {
1947 dev_warn(&h->pdev->dev, "out of memory in "
1948 "wait_for_device_to_become_ready.\n");
1949 return IO_ERROR;
1950 }
1951
1952 /* Send test unit ready until device ready, or give up. */
1953 while (count < HPSA_TUR_RETRY_LIMIT) {
1954
1955 /* Wait for a bit. do this first, because if we send
1956 * the TUR right away, the reset will just abort it.
1957 */
1958 msleep(1000 * waittime);
1959 count++;
1960
1961 /* Increase wait time with each try, up to a point. */
1962 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
1963 waittime = waittime * 2;
1964
1965 /* Send the Test Unit Ready */
1966 fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
1967 hpsa_scsi_do_simple_cmd_core(h, c);
1968 /* no unmap needed here because no data xfer. */
1969
1970 if (c->err_info->CommandStatus == CMD_SUCCESS)
1971 break;
1972
1973 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
1974 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
1975 (c->err_info->SenseInfo[2] == NO_SENSE ||
1976 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
1977 break;
1978
1979 dev_warn(&h->pdev->dev, "waiting %d secs "
1980 "for device to become ready.\n", waittime);
1981 rc = 1; /* device not ready. */
1982 }
1983
1984 if (rc)
1985 dev_warn(&h->pdev->dev, "giving up on device.\n");
1986 else
1987 dev_warn(&h->pdev->dev, "device is ready.\n");
1988
1989 cmd_special_free(h, c);
1990 return rc;
1991}
1992
1993/* Need at least one of these error handlers to keep ../scsi/hosts.c from
1994 * complaining. Doing a host- or bus-reset can't do anything good here.
1995 */
1996static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
1997{
1998 int rc;
1999 struct ctlr_info *h;
2000 struct hpsa_scsi_dev_t *dev;
2001
2002 /* find the controller to which the command to be aborted was sent */
2003 h = sdev_to_hba(scsicmd->device);
2004 if (h == NULL) /* paranoia */
2005 return FAILED;
2006 dev_warn(&h->pdev->dev, "resetting drive\n");
2007
2008 dev = scsicmd->device->hostdata;
2009 if (!dev) {
2010 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
2011 "device lookup failed.\n");
2012 return FAILED;
2013 }
2014 /* send a reset to the SCSI LUN which the command was sent to */
2015 rc = hpsa_send_reset(h, dev->scsi3addr);
2016 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
2017 return SUCCESS;
2018
2019 dev_warn(&h->pdev->dev, "resetting device failed.\n");
2020 return FAILED;
2021}
2022
2023/*
2024 * For operations that cannot sleep, a command block is allocated at init,
2025 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
2026 * which ones are free or in use. Lock must be held when calling this.
2027 * cmd_free() is the complement.
2028 */
2029static struct CommandList *cmd_alloc(struct ctlr_info *h)
2030{
2031 struct CommandList *c;
2032 int i;
2033 union u64bit temp64;
2034 dma_addr_t cmd_dma_handle, err_dma_handle;
2035
2036 do {
2037 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
2038 if (i == h->nr_cmds)
2039 return NULL;
2040 } while (test_and_set_bit
2041 (i & (BITS_PER_LONG - 1),
2042 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
2043 c = h->cmd_pool + i;
2044 memset(c, 0, sizeof(*c));
2045 cmd_dma_handle = h->cmd_pool_dhandle
2046 + i * sizeof(*c);
2047 c->err_info = h->errinfo_pool + i;
2048 memset(c->err_info, 0, sizeof(*c->err_info));
2049 err_dma_handle = h->errinfo_pool_dhandle
2050 + i * sizeof(*c->err_info);
2051 h->nr_allocs++;
2052
2053 c->cmdindex = i;
2054
2055 INIT_HLIST_NODE(&c->list);
2056 c->busaddr = (__u32) cmd_dma_handle;
2057 temp64.val = (__u64) err_dma_handle;
2058 c->ErrDesc.Addr.lower = temp64.val32.lower;
2059 c->ErrDesc.Addr.upper = temp64.val32.upper;
2060 c->ErrDesc.Len = sizeof(*c->err_info);
2061
2062 c->h = h;
2063 return c;
2064}
2065
2066/* For operations that can wait for kmalloc to possibly sleep,
2067 * this routine can be called. Lock need not be held to call
2068 * cmd_special_alloc. cmd_special_free() is the complement.
2069 */
2070static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2071{
2072 struct CommandList *c;
2073 union u64bit temp64;
2074 dma_addr_t cmd_dma_handle, err_dma_handle;
2075
2076 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
2077 if (c == NULL)
2078 return NULL;
2079 memset(c, 0, sizeof(*c));
2080
2081 c->cmdindex = -1;
2082
2083 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
2084 &err_dma_handle);
2085
2086 if (c->err_info == NULL) {
2087 pci_free_consistent(h->pdev,
2088 sizeof(*c), c, cmd_dma_handle);
2089 return NULL;
2090 }
2091 memset(c->err_info, 0, sizeof(*c->err_info));
2092
2093 INIT_HLIST_NODE(&c->list);
2094 c->busaddr = (__u32) cmd_dma_handle;
2095 temp64.val = (__u64) err_dma_handle;
2096 c->ErrDesc.Addr.lower = temp64.val32.lower;
2097 c->ErrDesc.Addr.upper = temp64.val32.upper;
2098 c->ErrDesc.Len = sizeof(*c->err_info);
2099
2100 c->h = h;
2101 return c;
2102}
2103
2104static void cmd_free(struct ctlr_info *h, struct CommandList *c)
2105{
2106 int i;
2107
2108 i = c - h->cmd_pool;
2109 clear_bit(i & (BITS_PER_LONG - 1),
2110 h->cmd_pool_bits + (i / BITS_PER_LONG));
2111 h->nr_frees++;
2112}
2113
2114static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
2115{
2116 union u64bit temp64;
2117
2118 temp64.val32.lower = c->ErrDesc.Addr.lower;
2119 temp64.val32.upper = c->ErrDesc.Addr.upper;
2120 pci_free_consistent(h->pdev, sizeof(*c->err_info),
2121 c->err_info, (dma_addr_t) temp64.val);
2122 pci_free_consistent(h->pdev, sizeof(*c),
2123 c, (dma_addr_t) c->busaddr);
2124}
2125
2126#ifdef CONFIG_COMPAT
2127
2128static int do_ioctl(struct scsi_device *dev, int cmd, void *arg)
2129{
2130 int ret;
2131
2132 lock_kernel();
2133 ret = hpsa_ioctl(dev, cmd, arg);
2134 unlock_kernel();
2135 return ret;
2136}
2137
2138static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg);
2139static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2140 int cmd, void *arg);
2141
2142static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
2143{
2144 switch (cmd) {
2145 case CCISS_GETPCIINFO:
2146 case CCISS_GETINTINFO:
2147 case CCISS_SETINTINFO:
2148 case CCISS_GETNODENAME:
2149 case CCISS_SETNODENAME:
2150 case CCISS_GETHEARTBEAT:
2151 case CCISS_GETBUSTYPES:
2152 case CCISS_GETFIRMVER:
2153 case CCISS_GETDRIVVER:
2154 case CCISS_REVALIDVOLS:
2155 case CCISS_DEREGDISK:
2156 case CCISS_REGNEWDISK:
2157 case CCISS_REGNEWD:
2158 case CCISS_RESCANDISK:
2159 case CCISS_GETLUNINFO:
2160 return do_ioctl(dev, cmd, arg);
2161
2162 case CCISS_PASSTHRU32:
2163 return hpsa_ioctl32_passthru(dev, cmd, arg);
2164 case CCISS_BIG_PASSTHRU32:
2165 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
2166
2167 default:
2168 return -ENOIOCTLCMD;
2169 }
2170}
2171
2172static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
2173{
2174 IOCTL32_Command_struct __user *arg32 =
2175 (IOCTL32_Command_struct __user *) arg;
2176 IOCTL_Command_struct arg64;
2177 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
2178 int err;
2179 u32 cp;
2180
2181 err = 0;
2182 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2183 sizeof(arg64.LUN_info));
2184 err |= copy_from_user(&arg64.Request, &arg32->Request,
2185 sizeof(arg64.Request));
2186 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2187 sizeof(arg64.error_info));
2188 err |= get_user(arg64.buf_size, &arg32->buf_size);
2189 err |= get_user(cp, &arg32->buf);
2190 arg64.buf = compat_ptr(cp);
2191 err |= copy_to_user(p, &arg64, sizeof(arg64));
2192
2193 if (err)
2194 return -EFAULT;
2195
2196 err = do_ioctl(dev, CCISS_PASSTHRU, (void *)p);
2197 if (err)
2198 return err;
2199 err |= copy_in_user(&arg32->error_info, &p->error_info,
2200 sizeof(arg32->error_info));
2201 if (err)
2202 return -EFAULT;
2203 return err;
2204}
2205
2206static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2207 int cmd, void *arg)
2208{
2209 BIG_IOCTL32_Command_struct __user *arg32 =
2210 (BIG_IOCTL32_Command_struct __user *) arg;
2211 BIG_IOCTL_Command_struct arg64;
2212 BIG_IOCTL_Command_struct __user *p =
2213 compat_alloc_user_space(sizeof(arg64));
2214 int err;
2215 u32 cp;
2216
2217 err = 0;
2218 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2219 sizeof(arg64.LUN_info));
2220 err |= copy_from_user(&arg64.Request, &arg32->Request,
2221 sizeof(arg64.Request));
2222 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2223 sizeof(arg64.error_info));
2224 err |= get_user(arg64.buf_size, &arg32->buf_size);
2225 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
2226 err |= get_user(cp, &arg32->buf);
2227 arg64.buf = compat_ptr(cp);
2228 err |= copy_to_user(p, &arg64, sizeof(arg64));
2229
2230 if (err)
2231 return -EFAULT;
2232
2233 err = do_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
2234 if (err)
2235 return err;
2236 err |= copy_in_user(&arg32->error_info, &p->error_info,
2237 sizeof(arg32->error_info));
2238 if (err)
2239 return -EFAULT;
2240 return err;
2241}
2242#endif
2243
2244static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
2245{
2246 struct hpsa_pci_info pciinfo;
2247
2248 if (!argp)
2249 return -EINVAL;
2250 pciinfo.domain = pci_domain_nr(h->pdev->bus);
2251 pciinfo.bus = h->pdev->bus->number;
2252 pciinfo.dev_fn = h->pdev->devfn;
2253 pciinfo.board_id = h->board_id;
2254 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
2255 return -EFAULT;
2256 return 0;
2257}
2258
2259static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
2260{
2261 DriverVer_type DriverVer;
2262 unsigned char vmaj, vmin, vsubmin;
2263 int rc;
2264
2265 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
2266 &vmaj, &vmin, &vsubmin);
2267 if (rc != 3) {
2268 dev_info(&h->pdev->dev, "driver version string '%s' "
2269 "unrecognized.", HPSA_DRIVER_VERSION);
2270 vmaj = 0;
2271 vmin = 0;
2272 vsubmin = 0;
2273 }
2274 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
2275 if (!argp)
2276 return -EINVAL;
2277 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
2278 return -EFAULT;
2279 return 0;
2280}
2281
2282static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2283{
2284 IOCTL_Command_struct iocommand;
2285 struct CommandList *c;
2286 char *buff = NULL;
2287 union u64bit temp64;
2288
2289 if (!argp)
2290 return -EINVAL;
2291 if (!capable(CAP_SYS_RAWIO))
2292 return -EPERM;
2293 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
2294 return -EFAULT;
2295 if ((iocommand.buf_size < 1) &&
2296 (iocommand.Request.Type.Direction != XFER_NONE)) {
2297 return -EINVAL;
2298 }
2299 if (iocommand.buf_size > 0) {
2300 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
2301 if (buff == NULL)
2302 return -EFAULT;
2303 }
2304 if (iocommand.Request.Type.Direction == XFER_WRITE) {
2305 /* Copy the data into the buffer we created */
2306 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
2307 kfree(buff);
2308 return -EFAULT;
2309 }
2310 } else
2311 memset(buff, 0, iocommand.buf_size);
2312 c = cmd_special_alloc(h);
2313 if (c == NULL) {
2314 kfree(buff);
2315 return -ENOMEM;
2316 }
2317 /* Fill in the command type */
2318 c->cmd_type = CMD_IOCTL_PEND;
2319 /* Fill in Command Header */
2320 c->Header.ReplyQueue = 0; /* unused in simple mode */
2321 if (iocommand.buf_size > 0) { /* buffer to fill */
2322 c->Header.SGList = 1;
2323 c->Header.SGTotal = 1;
2324 } else { /* no buffers to fill */
2325 c->Header.SGList = 0;
2326 c->Header.SGTotal = 0;
2327 }
2328 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
2329 /* use the kernel address the cmd block for tag */
2330 c->Header.Tag.lower = c->busaddr;
2331
2332 /* Fill in Request block */
2333 memcpy(&c->Request, &iocommand.Request,
2334 sizeof(c->Request));
2335
2336 /* Fill in the scatter gather information */
2337 if (iocommand.buf_size > 0) {
2338 temp64.val = pci_map_single(h->pdev, buff,
2339 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
2340 c->SG[0].Addr.lower = temp64.val32.lower;
2341 c->SG[0].Addr.upper = temp64.val32.upper;
2342 c->SG[0].Len = iocommand.buf_size;
2343 c->SG[0].Ext = 0; /* we are not chaining*/
2344 }
2345 hpsa_scsi_do_simple_cmd_core(h, c);
2346 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
2347 check_ioctl_unit_attention(h, c);
2348
2349 /* Copy the error information out */
2350 memcpy(&iocommand.error_info, c->err_info,
2351 sizeof(iocommand.error_info));
2352 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
2353 kfree(buff);
2354 cmd_special_free(h, c);
2355 return -EFAULT;
2356 }
2357
2358 if (iocommand.Request.Type.Direction == XFER_READ) {
2359 /* Copy the data out of the buffer we created */
2360 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
2361 kfree(buff);
2362 cmd_special_free(h, c);
2363 return -EFAULT;
2364 }
2365 }
2366 kfree(buff);
2367 cmd_special_free(h, c);
2368 return 0;
2369}
2370
2371static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2372{
2373 BIG_IOCTL_Command_struct *ioc;
2374 struct CommandList *c;
2375 unsigned char **buff = NULL;
2376 int *buff_size = NULL;
2377 union u64bit temp64;
2378 BYTE sg_used = 0;
2379 int status = 0;
2380 int i;
2381 __u32 left;
2382 __u32 sz;
2383 BYTE __user *data_ptr;
2384
2385 if (!argp)
2386 return -EINVAL;
2387 if (!capable(CAP_SYS_RAWIO))
2388 return -EPERM;
2389 ioc = (BIG_IOCTL_Command_struct *)
2390 kmalloc(sizeof(*ioc), GFP_KERNEL);
2391 if (!ioc) {
2392 status = -ENOMEM;
2393 goto cleanup1;
2394 }
2395 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
2396 status = -EFAULT;
2397 goto cleanup1;
2398 }
2399 if ((ioc->buf_size < 1) &&
2400 (ioc->Request.Type.Direction != XFER_NONE)) {
2401 status = -EINVAL;
2402 goto cleanup1;
2403 }
2404 /* Check kmalloc limits using all SGs */
2405 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
2406 status = -EINVAL;
2407 goto cleanup1;
2408 }
2409 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
2410 status = -EINVAL;
2411 goto cleanup1;
2412 }
2413 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
2414 if (!buff) {
2415 status = -ENOMEM;
2416 goto cleanup1;
2417 }
2418 buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
2419 if (!buff_size) {
2420 status = -ENOMEM;
2421 goto cleanup1;
2422 }
2423 left = ioc->buf_size;
2424 data_ptr = ioc->buf;
2425 while (left) {
2426 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
2427 buff_size[sg_used] = sz;
2428 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
2429 if (buff[sg_used] == NULL) {
2430 status = -ENOMEM;
2431 goto cleanup1;
2432 }
2433 if (ioc->Request.Type.Direction == XFER_WRITE) {
2434 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
2435 status = -ENOMEM;
2436 goto cleanup1;
2437 }
2438 } else
2439 memset(buff[sg_used], 0, sz);
2440 left -= sz;
2441 data_ptr += sz;
2442 sg_used++;
2443 }
2444 c = cmd_special_alloc(h);
2445 if (c == NULL) {
2446 status = -ENOMEM;
2447 goto cleanup1;
2448 }
2449 c->cmd_type = CMD_IOCTL_PEND;
2450 c->Header.ReplyQueue = 0;
2451
2452 if (ioc->buf_size > 0) {
2453 c->Header.SGList = sg_used;
2454 c->Header.SGTotal = sg_used;
2455 } else {
2456 c->Header.SGList = 0;
2457 c->Header.SGTotal = 0;
2458 }
2459 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
2460 c->Header.Tag.lower = c->busaddr;
2461 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
2462 if (ioc->buf_size > 0) {
2463 int i;
2464 for (i = 0; i < sg_used; i++) {
2465 temp64.val = pci_map_single(h->pdev, buff[i],
2466 buff_size[i], PCI_DMA_BIDIRECTIONAL);
2467 c->SG[i].Addr.lower = temp64.val32.lower;
2468 c->SG[i].Addr.upper = temp64.val32.upper;
2469 c->SG[i].Len = buff_size[i];
2470 /* we are not chaining */
2471 c->SG[i].Ext = 0;
2472 }
2473 }
2474 hpsa_scsi_do_simple_cmd_core(h, c);
2475 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
2476 check_ioctl_unit_attention(h, c);
2477 /* Copy the error information out */
2478 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
2479 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
2480 cmd_special_free(h, c);
2481 status = -EFAULT;
2482 goto cleanup1;
2483 }
2484 if (ioc->Request.Type.Direction == XFER_READ) {
2485 /* Copy the data out of the buffer we created */
2486 BYTE __user *ptr = ioc->buf;
2487 for (i = 0; i < sg_used; i++) {
2488 if (copy_to_user(ptr, buff[i], buff_size[i])) {
2489 cmd_special_free(h, c);
2490 status = -EFAULT;
2491 goto cleanup1;
2492 }
2493 ptr += buff_size[i];
2494 }
2495 }
2496 cmd_special_free(h, c);
2497 status = 0;
2498cleanup1:
2499 if (buff) {
2500 for (i = 0; i < sg_used; i++)
2501 kfree(buff[i]);
2502 kfree(buff);
2503 }
2504 kfree(buff_size);
2505 kfree(ioc);
2506 return status;
2507}
2508
2509static void check_ioctl_unit_attention(struct ctlr_info *h,
2510 struct CommandList *c)
2511{
2512 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2513 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
2514 (void) check_for_unit_attention(h, c);
2515}
2516/*
2517 * ioctl
2518 */
2519static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
2520{
2521 struct ctlr_info *h;
2522 void __user *argp = (void __user *)arg;
2523
2524 h = sdev_to_hba(dev);
2525
2526 switch (cmd) {
2527 case CCISS_DEREGDISK:
2528 case CCISS_REGNEWDISK:
2529 case CCISS_REGNEWD:
2530 hpsa_update_scsi_devices(h, dev->host->host_no);
2531 return 0;
2532 case CCISS_GETPCIINFO:
2533 return hpsa_getpciinfo_ioctl(h, argp);
2534 case CCISS_GETDRIVVER:
2535 return hpsa_getdrivver_ioctl(h, argp);
2536 case CCISS_PASSTHRU:
2537 return hpsa_passthru_ioctl(h, argp);
2538 case CCISS_BIG_PASSTHRU:
2539 return hpsa_big_passthru_ioctl(h, argp);
2540 default:
2541 return -ENOTTY;
2542 }
2543}
2544
2545static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h,
2546 void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr,
2547 int cmd_type)
2548{
2549 int pci_dir = XFER_NONE;
2550
2551 c->cmd_type = CMD_IOCTL_PEND;
2552 c->Header.ReplyQueue = 0;
2553 if (buff != NULL && size > 0) {
2554 c->Header.SGList = 1;
2555 c->Header.SGTotal = 1;
2556 } else {
2557 c->Header.SGList = 0;
2558 c->Header.SGTotal = 0;
2559 }
2560 c->Header.Tag.lower = c->busaddr;
2561 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
2562
2563 c->Request.Type.Type = cmd_type;
2564 if (cmd_type == TYPE_CMD) {
2565 switch (cmd) {
2566 case HPSA_INQUIRY:
2567 /* are we trying to read a vital product page */
2568 if (page_code != 0) {
2569 c->Request.CDB[1] = 0x01;
2570 c->Request.CDB[2] = page_code;
2571 }
2572 c->Request.CDBLen = 6;
2573 c->Request.Type.Attribute = ATTR_SIMPLE;
2574 c->Request.Type.Direction = XFER_READ;
2575 c->Request.Timeout = 0;
2576 c->Request.CDB[0] = HPSA_INQUIRY;
2577 c->Request.CDB[4] = size & 0xFF;
2578 break;
2579 case HPSA_REPORT_LOG:
2580 case HPSA_REPORT_PHYS:
2581 /* Talking to controller so It's a physical command
2582 mode = 00 target = 0. Nothing to write.
2583 */
2584 c->Request.CDBLen = 12;
2585 c->Request.Type.Attribute = ATTR_SIMPLE;
2586 c->Request.Type.Direction = XFER_READ;
2587 c->Request.Timeout = 0;
2588 c->Request.CDB[0] = cmd;
2589 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
2590 c->Request.CDB[7] = (size >> 16) & 0xFF;
2591 c->Request.CDB[8] = (size >> 8) & 0xFF;
2592 c->Request.CDB[9] = size & 0xFF;
2593 break;
2594
2595 case HPSA_READ_CAPACITY:
2596 c->Request.CDBLen = 10;
2597 c->Request.Type.Attribute = ATTR_SIMPLE;
2598 c->Request.Type.Direction = XFER_READ;
2599 c->Request.Timeout = 0;
2600 c->Request.CDB[0] = cmd;
2601 break;
2602 case HPSA_CACHE_FLUSH:
2603 c->Request.CDBLen = 12;
2604 c->Request.Type.Attribute = ATTR_SIMPLE;
2605 c->Request.Type.Direction = XFER_WRITE;
2606 c->Request.Timeout = 0;
2607 c->Request.CDB[0] = BMIC_WRITE;
2608 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
2609 break;
2610 case TEST_UNIT_READY:
2611 c->Request.CDBLen = 6;
2612 c->Request.Type.Attribute = ATTR_SIMPLE;
2613 c->Request.Type.Direction = XFER_NONE;
2614 c->Request.Timeout = 0;
2615 break;
2616 default:
2617 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
2618 BUG();
2619 return;
2620 }
2621 } else if (cmd_type == TYPE_MSG) {
2622 switch (cmd) {
2623
2624 case HPSA_DEVICE_RESET_MSG:
2625 c->Request.CDBLen = 16;
2626 c->Request.Type.Type = 1; /* It is a MSG not a CMD */
2627 c->Request.Type.Attribute = ATTR_SIMPLE;
2628 c->Request.Type.Direction = XFER_NONE;
2629 c->Request.Timeout = 0; /* Don't time out */
2630 c->Request.CDB[0] = 0x01; /* RESET_MSG is 0x01 */
2631 c->Request.CDB[1] = 0x03; /* Reset target above */
2632 /* If bytes 4-7 are zero, it means reset the */
2633 /* LunID device */
2634 c->Request.CDB[4] = 0x00;
2635 c->Request.CDB[5] = 0x00;
2636 c->Request.CDB[6] = 0x00;
2637 c->Request.CDB[7] = 0x00;
2638 break;
2639
2640 default:
2641 dev_warn(&h->pdev->dev, "unknown message type %d\n",
2642 cmd);
2643 BUG();
2644 }
2645 } else {
2646 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
2647 BUG();
2648 }
2649
2650 switch (c->Request.Type.Direction) {
2651 case XFER_READ:
2652 pci_dir = PCI_DMA_FROMDEVICE;
2653 break;
2654 case XFER_WRITE:
2655 pci_dir = PCI_DMA_TODEVICE;
2656 break;
2657 case XFER_NONE:
2658 pci_dir = PCI_DMA_NONE;
2659 break;
2660 default:
2661 pci_dir = PCI_DMA_BIDIRECTIONAL;
2662 }
2663
2664 hpsa_map_one(h->pdev, c, buff, size, pci_dir);
2665
2666 return;
2667}
2668
2669/*
2670 * Map (physical) PCI mem into (virtual) kernel space
2671 */
2672static void __iomem *remap_pci_mem(ulong base, ulong size)
2673{
2674 ulong page_base = ((ulong) base) & PAGE_MASK;
2675 ulong page_offs = ((ulong) base) - page_base;
2676 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2677
2678 return page_remapped ? (page_remapped + page_offs) : NULL;
2679}
2680
2681/* Takes cmds off the submission queue and sends them to the hardware,
2682 * then puts them on the queue of cmds waiting for completion.
2683 */
2684static void start_io(struct ctlr_info *h)
2685{
2686 struct CommandList *c;
2687
2688 while (!hlist_empty(&h->reqQ)) {
2689 c = hlist_entry(h->reqQ.first, struct CommandList, list);
2690 /* can't do anything if fifo is full */
2691 if ((h->access.fifo_full(h))) {
2692 dev_warn(&h->pdev->dev, "fifo full\n");
2693 break;
2694 }
2695
2696 /* Get the first entry from the Request Q */
2697 removeQ(c);
2698 h->Qdepth--;
2699
2700 /* Tell the controller execute command */
2701 h->access.submit_command(h, c);
2702
2703 /* Put job onto the completed Q */
2704 addQ(&h->cmpQ, c);
2705 }
2706}
2707
2708static inline unsigned long get_next_completion(struct ctlr_info *h)
2709{
2710 return h->access.command_completed(h);
2711}
2712
2713static inline int interrupt_pending(struct ctlr_info *h)
2714{
2715 return h->access.intr_pending(h);
2716}
2717
2718static inline long interrupt_not_for_us(struct ctlr_info *h)
2719{
2720 return ((h->access.intr_pending(h) == 0) ||
2721 (h->interrupts_enabled == 0));
2722}
2723
2724static inline int bad_tag(struct ctlr_info *h, __u32 tag_index,
2725 __u32 raw_tag)
2726{
2727 if (unlikely(tag_index >= h->nr_cmds)) {
2728 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
2729 return 1;
2730 }
2731 return 0;
2732}
2733
2734static inline void finish_cmd(struct CommandList *c, __u32 raw_tag)
2735{
2736 removeQ(c);
2737 if (likely(c->cmd_type == CMD_SCSI))
2738 complete_scsi_command(c, 0, raw_tag);
2739 else if (c->cmd_type == CMD_IOCTL_PEND)
2740 complete(c->waiting);
2741}
2742
2743static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
2744{
2745 struct ctlr_info *h = dev_id;
2746 struct CommandList *c;
2747 unsigned long flags;
2748 __u32 raw_tag, tag, tag_index;
2749 struct hlist_node *tmp;
2750
2751 if (interrupt_not_for_us(h))
2752 return IRQ_NONE;
2753 spin_lock_irqsave(&h->lock, flags);
2754 while (interrupt_pending(h)) {
2755 while ((raw_tag = get_next_completion(h)) != FIFO_EMPTY) {
2756 if (likely(HPSA_TAG_CONTAINS_INDEX(raw_tag))) {
2757 tag_index = HPSA_TAG_TO_INDEX(raw_tag);
2758 if (bad_tag(h, tag_index, raw_tag))
2759 return IRQ_HANDLED;
2760 c = h->cmd_pool + tag_index;
2761 finish_cmd(c, raw_tag);
2762 continue;
2763 }
2764 tag = HPSA_TAG_DISCARD_ERROR_BITS(raw_tag);
2765 c = NULL;
2766 hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
2767 if (c->busaddr == tag) {
2768 finish_cmd(c, raw_tag);
2769 break;
2770 }
2771 }
2772 }
2773 }
2774 spin_unlock_irqrestore(&h->lock, flags);
2775 return IRQ_HANDLED;
2776}
2777
2778/* Send a message CDB to the firmware. */
2779static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
2780 unsigned char type)
2781{
2782 struct Command {
2783 struct CommandListHeader CommandHeader;
2784 struct RequestBlock Request;
2785 struct ErrDescriptor ErrorDescriptor;
2786 };
2787 struct Command *cmd;
2788 static const size_t cmd_sz = sizeof(*cmd) +
2789 sizeof(cmd->ErrorDescriptor);
2790 dma_addr_t paddr64;
2791 uint32_t paddr32, tag;
2792 void __iomem *vaddr;
2793 int i, err;
2794
2795 vaddr = pci_ioremap_bar(pdev, 0);
2796 if (vaddr == NULL)
2797 return -ENOMEM;
2798
2799 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
2800 * CCISS commands, so they must be allocated from the lower 4GiB of
2801 * memory.
2802 */
2803 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2804 if (err) {
2805 iounmap(vaddr);
2806 return -ENOMEM;
2807 }
2808
2809 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
2810 if (cmd == NULL) {
2811 iounmap(vaddr);
2812 return -ENOMEM;
2813 }
2814
2815 /* This must fit, because of the 32-bit consistent DMA mask. Also,
2816 * although there's no guarantee, we assume that the address is at
2817 * least 4-byte aligned (most likely, it's page-aligned).
2818 */
2819 paddr32 = paddr64;
2820
2821 cmd->CommandHeader.ReplyQueue = 0;
2822 cmd->CommandHeader.SGList = 0;
2823 cmd->CommandHeader.SGTotal = 0;
2824 cmd->CommandHeader.Tag.lower = paddr32;
2825 cmd->CommandHeader.Tag.upper = 0;
2826 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
2827
2828 cmd->Request.CDBLen = 16;
2829 cmd->Request.Type.Type = TYPE_MSG;
2830 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
2831 cmd->Request.Type.Direction = XFER_NONE;
2832 cmd->Request.Timeout = 0; /* Don't time out */
2833 cmd->Request.CDB[0] = opcode;
2834 cmd->Request.CDB[1] = type;
2835 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
2836 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
2837 cmd->ErrorDescriptor.Addr.upper = 0;
2838 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
2839
2840 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
2841
2842 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
2843 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
2844 if (HPSA_TAG_DISCARD_ERROR_BITS(tag) == paddr32)
2845 break;
2846 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
2847 }
2848
2849 iounmap(vaddr);
2850
2851 /* we leak the DMA buffer here ... no choice since the controller could
2852 * still complete the command.
2853 */
2854 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
2855 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
2856 opcode, type);
2857 return -ETIMEDOUT;
2858 }
2859
2860 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
2861
2862 if (tag & HPSA_ERROR_BIT) {
2863 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
2864 opcode, type);
2865 return -EIO;
2866 }
2867
2868 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
2869 opcode, type);
2870 return 0;
2871}
2872
2873#define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
2874#define hpsa_noop(p) hpsa_message(p, 3, 0)
2875
2876static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
2877{
2878/* the #defines are stolen from drivers/pci/msi.h. */
2879#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
2880#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
2881
2882 int pos;
2883 u16 control = 0;
2884
2885 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
2886 if (pos) {
2887 pci_read_config_word(pdev, msi_control_reg(pos), &control);
2888 if (control & PCI_MSI_FLAGS_ENABLE) {
2889 dev_info(&pdev->dev, "resetting MSI\n");
2890 pci_write_config_word(pdev, msi_control_reg(pos),
2891 control & ~PCI_MSI_FLAGS_ENABLE);
2892 }
2893 }
2894
2895 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
2896 if (pos) {
2897 pci_read_config_word(pdev, msi_control_reg(pos), &control);
2898 if (control & PCI_MSIX_FLAGS_ENABLE) {
2899 dev_info(&pdev->dev, "resetting MSI-X\n");
2900 pci_write_config_word(pdev, msi_control_reg(pos),
2901 control & ~PCI_MSIX_FLAGS_ENABLE);
2902 }
2903 }
2904
2905 return 0;
2906}
2907
2908/* This does a hard reset of the controller using PCI power management
2909 * states.
2910 */
2911static __devinit int hpsa_hard_reset_controller(struct pci_dev *pdev)
2912{
2913 u16 pmcsr, saved_config_space[32];
2914 int i, pos;
2915
2916 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
2917
2918 /* This is very nearly the same thing as
2919 *
2920 * pci_save_state(pci_dev);
2921 * pci_set_power_state(pci_dev, PCI_D3hot);
2922 * pci_set_power_state(pci_dev, PCI_D0);
2923 * pci_restore_state(pci_dev);
2924 *
2925 * but we can't use these nice canned kernel routines on
2926 * kexec, because they also check the MSI/MSI-X state in PCI
2927 * configuration space and do the wrong thing when it is
2928 * set/cleared. Also, the pci_save/restore_state functions
2929 * violate the ordering requirements for restoring the
2930 * configuration space from the CCISS document (see the
2931 * comment below). So we roll our own ....
2932 */
2933
2934 for (i = 0; i < 32; i++)
2935 pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
2936
2937 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
2938 if (pos == 0) {
2939 dev_err(&pdev->dev,
2940 "hpsa_reset_controller: PCI PM not supported\n");
2941 return -ENODEV;
2942 }
2943
2944 /* Quoting from the Open CISS Specification: "The Power
2945 * Management Control/Status Register (CSR) controls the power
2946 * state of the device. The normal operating state is D0,
2947 * CSR=00h. The software off state is D3, CSR=03h. To reset
2948 * the controller, place the interface device in D3 then to
2949 * D0, this causes a secondary PCI reset which will reset the
2950 * controller."
2951 */
2952
2953 /* enter the D3hot power management state */
2954 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
2955 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2956 pmcsr |= PCI_D3hot;
2957 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
2958
2959 msleep(500);
2960
2961 /* enter the D0 power management state */
2962 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2963 pmcsr |= PCI_D0;
2964 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
2965
2966 msleep(500);
2967
2968 /* Restore the PCI configuration space. The Open CISS
2969 * Specification says, "Restore the PCI Configuration
2970 * Registers, offsets 00h through 60h. It is important to
2971 * restore the command register, 16-bits at offset 04h,
2972 * last. Do not restore the configuration status register,
2973 * 16-bits at offset 06h." Note that the offset is 2*i.
2974 */
2975 for (i = 0; i < 32; i++) {
2976 if (i == 2 || i == 3)
2977 continue;
2978 pci_write_config_word(pdev, 2*i, saved_config_space[i]);
2979 }
2980 wmb();
2981 pci_write_config_word(pdev, 4, saved_config_space[2]);
2982
2983 return 0;
2984}
2985
2986/*
2987 * We cannot read the structure directly, for portability we must use
2988 * the io functions.
2989 * This is for debug only.
2990 */
2991#ifdef HPSA_DEBUG
2992static void print_cfg_table(struct device *dev, struct CfgTable *tb)
2993{
2994 int i;
2995 char temp_name[17];
2996
2997 dev_info(dev, "Controller Configuration information\n");
2998 dev_info(dev, "------------------------------------\n");
2999 for (i = 0; i < 4; i++)
3000 temp_name[i] = readb(&(tb->Signature[i]));
3001 temp_name[4] = '\0';
3002 dev_info(dev, " Signature = %s\n", temp_name);
3003 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
3004 dev_info(dev, " Transport methods supported = 0x%x\n",
3005 readl(&(tb->TransportSupport)));
3006 dev_info(dev, " Transport methods active = 0x%x\n",
3007 readl(&(tb->TransportActive)));
3008 dev_info(dev, " Requested transport Method = 0x%x\n",
3009 readl(&(tb->HostWrite.TransportRequest)));
3010 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
3011 readl(&(tb->HostWrite.CoalIntDelay)));
3012 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
3013 readl(&(tb->HostWrite.CoalIntCount)));
3014 dev_info(dev, " Max outstanding commands = 0x%d\n",
3015 readl(&(tb->CmdsOutMax)));
3016 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
3017 for (i = 0; i < 16; i++)
3018 temp_name[i] = readb(&(tb->ServerName[i]));
3019 temp_name[16] = '\0';
3020 dev_info(dev, " Server Name = %s\n", temp_name);
3021 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
3022 readl(&(tb->HeartBeat)));
3023}
3024#endif /* HPSA_DEBUG */
3025
3026static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3027{
3028 int i, offset, mem_type, bar_type;
3029
3030 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
3031 return 0;
3032 offset = 0;
3033 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3034 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
3035 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
3036 offset += 4;
3037 else {
3038 mem_type = pci_resource_flags(pdev, i) &
3039 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
3040 switch (mem_type) {
3041 case PCI_BASE_ADDRESS_MEM_TYPE_32:
3042 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
3043 offset += 4; /* 32 bit */
3044 break;
3045 case PCI_BASE_ADDRESS_MEM_TYPE_64:
3046 offset += 8;
3047 break;
3048 default: /* reserved in PCI 2.2 */
3049 dev_warn(&pdev->dev,
3050 "base address is invalid\n");
3051 return -1;
3052 break;
3053 }
3054 }
3055 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
3056 return i + 1;
3057 }
3058 return -1;
3059}
3060
3061/* If MSI/MSI-X is supported by the kernel we will try to enable it on
3062 * controllers that are capable. If not, we use IO-APIC mode.
3063 */
3064
3065static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
3066 struct pci_dev *pdev, __u32 board_id)
3067{
3068#ifdef CONFIG_PCI_MSI
3069 int err;
3070 struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
3071 {0, 2}, {0, 3}
3072 };
3073
3074 /* Some boards advertise MSI but don't really support it */
3075 if ((board_id == 0x40700E11) ||
3076 (board_id == 0x40800E11) ||
3077 (board_id == 0x40820E11) || (board_id == 0x40830E11))
3078 goto default_int_mode;
3079 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
3080 dev_info(&pdev->dev, "MSIX\n");
3081 err = pci_enable_msix(pdev, hpsa_msix_entries, 4);
3082 if (!err) {
3083 h->intr[0] = hpsa_msix_entries[0].vector;
3084 h->intr[1] = hpsa_msix_entries[1].vector;
3085 h->intr[2] = hpsa_msix_entries[2].vector;
3086 h->intr[3] = hpsa_msix_entries[3].vector;
3087 h->msix_vector = 1;
3088 return;
3089 }
3090 if (err > 0) {
3091 dev_warn(&pdev->dev, "only %d MSI-X vectors "
3092 "available\n", err);
3093 goto default_int_mode;
3094 } else {
3095 dev_warn(&pdev->dev, "MSI-X init failed %d\n",
3096 err);
3097 goto default_int_mode;
3098 }
3099 }
3100 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
3101 dev_info(&pdev->dev, "MSI\n");
3102 if (!pci_enable_msi(pdev))
3103 h->msi_vector = 1;
3104 else
3105 dev_warn(&pdev->dev, "MSI init failed\n");
3106 }
3107default_int_mode:
3108#endif /* CONFIG_PCI_MSI */
3109 /* if we get here we're going to use the default interrupt mode */
3110 h->intr[SIMPLE_MODE_INT] = pdev->irq;
3111 return;
3112}
3113
3114static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
3115{
3116 ushort subsystem_vendor_id, subsystem_device_id, command;
3117 __u32 board_id, scratchpad = 0;
3118 __u64 cfg_offset;
3119 __u32 cfg_base_addr;
3120 __u64 cfg_base_addr_index;
3121 int i, prod_index, err;
3122
3123 subsystem_vendor_id = pdev->subsystem_vendor;
3124 subsystem_device_id = pdev->subsystem_device;
3125 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
3126 subsystem_vendor_id);
3127
3128 for (i = 0; i < ARRAY_SIZE(products); i++)
3129 if (board_id == products[i].board_id)
3130 break;
3131
3132 prod_index = i;
3133
3134 if (prod_index == ARRAY_SIZE(products)) {
3135 prod_index--;
3136 if (subsystem_vendor_id != PCI_VENDOR_ID_HP ||
3137 !hpsa_allow_any) {
3138 dev_warn(&pdev->dev, "unrecognized board ID:"
3139 " 0x%08lx, ignoring.\n",
3140 (unsigned long) board_id);
3141 return -ENODEV;
3142 }
3143 }
3144 /* check to see if controller has been disabled
3145 * BEFORE trying to enable it
3146 */
3147 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
3148 if (!(command & 0x02)) {
3149 dev_warn(&pdev->dev, "controller appears to be disabled\n");
3150 return -ENODEV;
3151 }
3152
3153 err = pci_enable_device(pdev);
3154 if (err) {
3155 dev_warn(&pdev->dev, "unable to enable PCI device\n");
3156 return err;
3157 }
3158
3159 err = pci_request_regions(pdev, "hpsa");
3160 if (err) {
3161 dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
3162 return err;
3163 }
3164
3165 /* If the kernel supports MSI/MSI-X we will try to enable that,
3166 * else we use the IO-APIC interrupt assigned to us by system ROM.
3167 */
3168 hpsa_interrupt_mode(h, pdev, board_id);
3169
3170 /* find the memory BAR */
3171 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3172 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM)
3173 break;
3174 }
3175 if (i == DEVICE_COUNT_RESOURCE) {
3176 dev_warn(&pdev->dev, "no memory BAR found\n");
3177 err = -ENODEV;
3178 goto err_out_free_res;
3179 }
3180
3181 h->paddr = pci_resource_start(pdev, i); /* addressing mode bits
3182 * already removed
3183 */
3184
3185 h->vaddr = remap_pci_mem(h->paddr, 0x250);
3186
3187 /* Wait for the board to become ready. */
3188 for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
3189 scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
3190 if (scratchpad == HPSA_FIRMWARE_READY)
3191 break;
3192 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
3193 }
3194 if (scratchpad != HPSA_FIRMWARE_READY) {
3195 dev_warn(&pdev->dev, "board not ready, timed out.\n");
3196 err = -ENODEV;
3197 goto err_out_free_res;
3198 }
3199
3200 /* get the address index number */
3201 cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET);
3202 cfg_base_addr &= (__u32) 0x0000ffff;
3203 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
3204 if (cfg_base_addr_index == -1) {
3205 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
3206 err = -ENODEV;
3207 goto err_out_free_res;
3208 }
3209
3210 cfg_offset = readl(h->vaddr + SA5_CTMEM_OFFSET);
3211 h->cfgtable = remap_pci_mem(pci_resource_start(pdev,
3212 cfg_base_addr_index) + cfg_offset,
3213 sizeof(h->cfgtable));
3214 h->board_id = board_id;
3215
3216 /* Query controller for max supported commands: */
3217 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
3218
3219 h->product_name = products[prod_index].product_name;
3220 h->access = *(products[prod_index].access);
3221 /* Allow room for some ioctls */
3222 h->nr_cmds = h->max_commands - 4;
3223
3224 if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
3225 (readb(&h->cfgtable->Signature[1]) != 'I') ||
3226 (readb(&h->cfgtable->Signature[2]) != 'S') ||
3227 (readb(&h->cfgtable->Signature[3]) != 'S')) {
3228 dev_warn(&pdev->dev, "not a valid CISS config table\n");
3229 err = -ENODEV;
3230 goto err_out_free_res;
3231 }
3232#ifdef CONFIG_X86
3233 {
3234 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3235 __u32 prefetch;
3236 prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
3237 prefetch |= 0x100;
3238 writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
3239 }
3240#endif
3241
3242 /* Disabling DMA prefetch for the P600
3243 * An ASIC bug may result in a prefetch beyond
3244 * physical memory.
3245 */
3246 if (board_id == 0x3225103C) {
3247 __u32 dma_prefetch;
3248 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
3249 dma_prefetch |= 0x8000;
3250 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
3251 }
3252
3253 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
3254 /* Update the field, and then ring the doorbell */
3255 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
3256 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3257
3258 /* under certain very rare conditions, this can take awhile.
3259 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3260 * as we enter this code.)
3261 */
3262 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3263 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3264 break;
3265 /* delay and try again */
3266 msleep(10);
3267 }
3268
3269#ifdef HPSA_DEBUG
3270 print_cfg_table(&pdev->dev, h->cfgtable);
3271#endif /* HPSA_DEBUG */
3272
3273 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3274 dev_warn(&pdev->dev, "unable to get board into simple mode\n");
3275 err = -ENODEV;
3276 goto err_out_free_res;
3277 }
3278 return 0;
3279
3280err_out_free_res:
3281 /*
3282 * Deliberately omit pci_disable_device(): it does something nasty to
3283 * Smart Array controllers that pci_enable_device does not undo
3284 */
3285 pci_release_regions(pdev);
3286 return err;
3287}
3288
3289static int __devinit hpsa_init_one(struct pci_dev *pdev,
3290 const struct pci_device_id *ent)
3291{
3292 int i;
3293 int dac;
3294 struct ctlr_info *h;
3295
3296 if (number_of_controllers == 0)
3297 printk(KERN_INFO DRIVER_NAME "\n");
3298 if (reset_devices) {
3299 /* Reset the controller with a PCI power-cycle */
3300 if (hpsa_hard_reset_controller(pdev) || hpsa_reset_msi(pdev))
3301 return -ENODEV;
3302
3303 /* Some devices (notably the HP Smart Array 5i Controller)
3304 need a little pause here */
3305 msleep(HPSA_POST_RESET_PAUSE_MSECS);
3306
3307 /* Now try to get the controller to respond to a no-op */
3308 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
3309 if (hpsa_noop(pdev) == 0)
3310 break;
3311 else
3312 dev_warn(&pdev->dev, "no-op failed%s\n",
3313 (i < 11 ? "; re-trying" : ""));
3314 }
3315 }
3316
3317 BUILD_BUG_ON(sizeof(struct CommandList) % 8);
3318 h = kzalloc(sizeof(*h), GFP_KERNEL);
3319 if (!h)
3320 return -1;
3321
3322 h->busy_initializing = 1;
3323 INIT_HLIST_HEAD(&h->cmpQ);
3324 INIT_HLIST_HEAD(&h->reqQ);
3325 mutex_init(&h->busy_shutting_down);
3326 init_completion(&h->scan_wait);
3327 if (hpsa_pci_init(h, pdev) != 0)
3328 goto clean1;
3329
3330 sprintf(h->devname, "hpsa%d", number_of_controllers);
3331 h->ctlr = number_of_controllers;
3332 number_of_controllers++;
3333 h->pdev = pdev;
3334
3335 /* configure PCI DMA stuff */
3336 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
3337 dac = 1;
3338 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
3339 dac = 0;
3340 else {
3341 dev_err(&pdev->dev, "no suitable DMA available\n");
3342 goto clean1;
3343 }
3344
3345 /* make sure the board interrupts are off */
3346 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3347 if (request_irq(h->intr[SIMPLE_MODE_INT], do_hpsa_intr,
3348 IRQF_DISABLED | IRQF_SHARED, h->devname, h)) {
3349 dev_err(&pdev->dev, "unable to get irq %d for %s\n",
3350 h->intr[SIMPLE_MODE_INT], h->devname);
3351 goto clean2;
3352 }
3353
3354 dev_info(&pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3355 h->devname, pdev->device, pci_name(pdev),
3356 h->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3357
3358 h->cmd_pool_bits =
3359 kmalloc(((h->nr_cmds + BITS_PER_LONG -
3360 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3361 h->cmd_pool = pci_alloc_consistent(h->pdev,
3362 h->nr_cmds * sizeof(*h->cmd_pool),
3363 &(h->cmd_pool_dhandle));
3364 h->errinfo_pool = pci_alloc_consistent(h->pdev,
3365 h->nr_cmds * sizeof(*h->errinfo_pool),
3366 &(h->errinfo_pool_dhandle));
3367 if ((h->cmd_pool_bits == NULL)
3368 || (h->cmd_pool == NULL)
3369 || (h->errinfo_pool == NULL)) {
3370 dev_err(&pdev->dev, "out of memory");
3371 goto clean4;
3372 }
3373 spin_lock_init(&h->lock);
3374
3375 pci_set_drvdata(pdev, h);
3376 memset(h->cmd_pool_bits, 0,
3377 ((h->nr_cmds + BITS_PER_LONG -
3378 1) / BITS_PER_LONG) * sizeof(unsigned long));
3379
3380 hpsa_scsi_setup(h);
3381
3382 /* Turn the interrupts on so we can service requests */
3383 h->access.set_intr_mask(h, HPSA_INTR_ON);
3384
3385 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
3386 h->busy_initializing = 0;
3387 return 1;
3388
3389clean4:
3390 kfree(h->cmd_pool_bits);
3391 if (h->cmd_pool)
3392 pci_free_consistent(h->pdev,
3393 h->nr_cmds * sizeof(struct CommandList),
3394 h->cmd_pool, h->cmd_pool_dhandle);
3395 if (h->errinfo_pool)
3396 pci_free_consistent(h->pdev,
3397 h->nr_cmds * sizeof(struct ErrorInfo),
3398 h->errinfo_pool,
3399 h->errinfo_pool_dhandle);
3400 free_irq(h->intr[SIMPLE_MODE_INT], h);
3401clean2:
3402clean1:
3403 h->busy_initializing = 0;
3404 kfree(h);
3405 return -1;
3406}
3407
3408static void hpsa_flush_cache(struct ctlr_info *h)
3409{
3410 char *flush_buf;
3411 struct CommandList *c;
3412
3413 flush_buf = kzalloc(4, GFP_KERNEL);
3414 if (!flush_buf)
3415 return;
3416
3417 c = cmd_special_alloc(h);
3418 if (!c) {
3419 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
3420 goto out_of_memory;
3421 }
3422 fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
3423 RAID_CTLR_LUNID, TYPE_CMD);
3424 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
3425 if (c->err_info->CommandStatus != 0)
3426 dev_warn(&h->pdev->dev,
3427 "error flushing cache on controller\n");
3428 cmd_special_free(h, c);
3429out_of_memory:
3430 kfree(flush_buf);
3431}
3432
3433static void hpsa_shutdown(struct pci_dev *pdev)
3434{
3435 struct ctlr_info *h;
3436
3437 h = pci_get_drvdata(pdev);
3438 /* Turn board interrupts off and send the flush cache command
3439 * sendcmd will turn off interrupt, and send the flush...
3440 * To write all data in the battery backed cache to disks
3441 */
3442 hpsa_flush_cache(h);
3443 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3444 free_irq(h->intr[2], h);
3445#ifdef CONFIG_PCI_MSI
3446 if (h->msix_vector)
3447 pci_disable_msix(h->pdev);
3448 else if (h->msi_vector)
3449 pci_disable_msi(h->pdev);
3450#endif /* CONFIG_PCI_MSI */
3451}
3452
3453static void __devexit hpsa_remove_one(struct pci_dev *pdev)
3454{
3455 struct ctlr_info *h;
3456
3457 if (pci_get_drvdata(pdev) == NULL) {
3458 dev_err(&pdev->dev, "unable to remove device \n");
3459 return;
3460 }
3461 h = pci_get_drvdata(pdev);
3462 mutex_lock(&h->busy_shutting_down);
3463 remove_from_scan_list(h);
3464 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
3465 hpsa_shutdown(pdev);
3466 iounmap(h->vaddr);
3467 pci_free_consistent(h->pdev,
3468 h->nr_cmds * sizeof(struct CommandList),
3469 h->cmd_pool, h->cmd_pool_dhandle);
3470 pci_free_consistent(h->pdev,
3471 h->nr_cmds * sizeof(struct ErrorInfo),
3472 h->errinfo_pool, h->errinfo_pool_dhandle);
3473 kfree(h->cmd_pool_bits);
3474 /*
3475 * Deliberately omit pci_disable_device(): it does something nasty to
3476 * Smart Array controllers that pci_enable_device does not undo
3477 */
3478 pci_release_regions(pdev);
3479 pci_set_drvdata(pdev, NULL);
3480 mutex_unlock(&h->busy_shutting_down);
3481 kfree(h);
3482}
3483
3484static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
3485 __attribute__((unused)) pm_message_t state)
3486{
3487 return -ENOSYS;
3488}
3489
3490static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
3491{
3492 return -ENOSYS;
3493}
3494
3495static struct pci_driver hpsa_pci_driver = {
3496 .name = "hpsa",
3497 .probe = hpsa_init_one,
3498 .remove = __devexit_p(hpsa_remove_one),
3499 .id_table = hpsa_pci_device_id, /* id_table */
3500 .shutdown = hpsa_shutdown,
3501 .suspend = hpsa_suspend,
3502 .resume = hpsa_resume,
3503};
3504
3505/*
3506 * This is it. Register the PCI driver information for the cards we control
3507 * the OS will call our registered routines when it finds one of our cards.
3508 */
3509static int __init hpsa_init(void)
3510{
3511 int err;
3512 /* Start the scan thread */
3513 hpsa_scan_thread = kthread_run(hpsa_scan_func, NULL, "hpsa_scan");
3514 if (IS_ERR(hpsa_scan_thread)) {
3515 err = PTR_ERR(hpsa_scan_thread);
3516 return -ENODEV;
3517 }
3518 err = pci_register_driver(&hpsa_pci_driver);
3519 if (err)
3520 kthread_stop(hpsa_scan_thread);
3521 return err;
3522}
3523
3524static void __exit hpsa_cleanup(void)
3525{
3526 pci_unregister_driver(&hpsa_pci_driver);
3527 kthread_stop(hpsa_scan_thread);
3528}
3529
3530module_init(hpsa_init);
3531module_exit(hpsa_cleanup);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
new file mode 100644
index 000000000000..6bd1949144b5
--- /dev/null
+++ b/drivers/scsi/hpsa.h
@@ -0,0 +1,273 @@
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21#ifndef HPSA_H
22#define HPSA_H
23
24#include <scsi/scsicam.h>
25
26#define IO_OK 0
27#define IO_ERROR 1
28
29struct ctlr_info;
30
31struct access_method {
32 void (*submit_command)(struct ctlr_info *h,
33 struct CommandList *c);
34 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
35 unsigned long (*fifo_full)(struct ctlr_info *h);
36 unsigned long (*intr_pending)(struct ctlr_info *h);
37 unsigned long (*command_completed)(struct ctlr_info *h);
38};
39
40struct hpsa_scsi_dev_t {
41 int devtype;
42 int bus, target, lun; /* as presented to the OS */
43 unsigned char scsi3addr[8]; /* as presented to the HW */
44#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
45 unsigned char device_id[16]; /* from inquiry pg. 0x83 */
46 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
47 unsigned char model[16]; /* bytes 16-31 of inquiry data */
48 unsigned char revision[4]; /* bytes 32-35 of inquiry data */
49 unsigned char raid_level; /* from inquiry page 0xC1 */
50};
51
52struct ctlr_info {
53 int ctlr;
54 char devname[8];
55 char *product_name;
56 char firm_ver[4]; /* Firmware version */
57 struct pci_dev *pdev;
58 __u32 board_id;
59 void __iomem *vaddr;
60 unsigned long paddr;
61 int nr_cmds; /* Number of commands allowed on this controller */
62 struct CfgTable __iomem *cfgtable;
63 int interrupts_enabled;
64 int major;
65 int max_commands;
66 int commands_outstanding;
67 int max_outstanding; /* Debug */
68 int usage_count; /* number of opens all all minor devices */
69# define DOORBELL_INT 0
70# define PERF_MODE_INT 1
71# define SIMPLE_MODE_INT 2
72# define MEMQ_MODE_INT 3
73 unsigned int intr[4];
74 unsigned int msix_vector;
75 unsigned int msi_vector;
76 struct access_method access;
77
78 /* queue and queue Info */
79 struct hlist_head reqQ;
80 struct hlist_head cmpQ;
81 unsigned int Qdepth;
82 unsigned int maxQsinceinit;
83 unsigned int maxSG;
84 spinlock_t lock;
85
86 /* pointers to command and error info pool */
87 struct CommandList *cmd_pool;
88 dma_addr_t cmd_pool_dhandle;
89 struct ErrorInfo *errinfo_pool;
90 dma_addr_t errinfo_pool_dhandle;
91 unsigned long *cmd_pool_bits;
92 int nr_allocs;
93 int nr_frees;
94 int busy_initializing;
95 int busy_scanning;
96 struct mutex busy_shutting_down;
97 struct list_head scan_list;
98 struct completion scan_wait;
99
100 struct Scsi_Host *scsi_host;
101 spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
102 int ndevices; /* number of used elements in .dev[] array. */
103#define HPSA_MAX_SCSI_DEVS_PER_HBA 256
104 struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA];
105};
106#define HPSA_ABORT_MSG 0
107#define HPSA_DEVICE_RESET_MSG 1
108#define HPSA_BUS_RESET_MSG 2
109#define HPSA_HOST_RESET_MSG 3
110#define HPSA_MSG_SEND_RETRY_LIMIT 10
111#define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS 1000
112
113/* Maximum time in seconds driver will wait for command completions
114 * when polling before giving up.
115 */
116#define HPSA_MAX_POLL_TIME_SECS (20)
117
118/* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
119 * how many times to retry TEST UNIT READY on a device
120 * while waiting for it to become ready before giving up.
121 * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
122 * between sending TURs while waiting for a device
123 * to become ready.
124 */
125#define HPSA_TUR_RETRY_LIMIT (20)
126#define HPSA_MAX_WAIT_INTERVAL_SECS (30)
127
128/* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
129 * to become ready, in seconds, before giving up on it.
130 * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
131 * between polling the board to see if it is ready, in
132 * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and
133 * HPSA_BOARD_READY_ITERATIONS are derived from those.
134 */
135#define HPSA_BOARD_READY_WAIT_SECS (120)
136#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
137#define HPSA_BOARD_READY_POLL_INTERVAL \
138 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
139#define HPSA_BOARD_READY_ITERATIONS \
140 ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
141 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
142#define HPSA_POST_RESET_PAUSE_MSECS (3000)
143#define HPSA_POST_RESET_NOOP_RETRIES (12)
144
145/* Defining the diffent access_menthods */
146/*
147 * Memory mapped FIFO interface (SMART 53xx cards)
148 */
149#define SA5_DOORBELL 0x20
150#define SA5_REQUEST_PORT_OFFSET 0x40
151#define SA5_REPLY_INTR_MASK_OFFSET 0x34
152#define SA5_REPLY_PORT_OFFSET 0x44
153#define SA5_INTR_STATUS 0x30
154#define SA5_SCRATCHPAD_OFFSET 0xB0
155
156#define SA5_CTCFG_OFFSET 0xB4
157#define SA5_CTMEM_OFFSET 0xB8
158
159#define SA5_INTR_OFF 0x08
160#define SA5B_INTR_OFF 0x04
161#define SA5_INTR_PENDING 0x08
162#define SA5B_INTR_PENDING 0x04
163#define FIFO_EMPTY 0xffffffff
164#define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
165
166#define HPSA_ERROR_BIT 0x02
167#define HPSA_TAG_CONTAINS_INDEX(tag) ((tag) & 0x04)
168#define HPSA_TAG_TO_INDEX(tag) ((tag) >> 3)
169#define HPSA_TAG_DISCARD_ERROR_BITS(tag) ((tag) & ~3)
170
171#define HPSA_INTR_ON 1
172#define HPSA_INTR_OFF 0
173/*
174 Send the command to the hardware
175*/
176static void SA5_submit_command(struct ctlr_info *h,
177 struct CommandList *c)
178{
179#ifdef HPSA_DEBUG
180 printk(KERN_WARNING "hpsa: Sending %x - down to controller\n",
181 c->busaddr);
182#endif /* HPSA_DEBUG */
183 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
184 h->commands_outstanding++;
185 if (h->commands_outstanding > h->max_outstanding)
186 h->max_outstanding = h->commands_outstanding;
187}
188
189/*
190 * This card is the opposite of the other cards.
191 * 0 turns interrupts on...
192 * 0x08 turns them off...
193 */
194static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
195{
196 if (val) { /* Turn interrupts on */
197 h->interrupts_enabled = 1;
198 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
199 } else { /* Turn them off */
200 h->interrupts_enabled = 0;
201 writel(SA5_INTR_OFF,
202 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
203 }
204}
205/*
206 * Returns true if fifo is full.
207 *
208 */
209static unsigned long SA5_fifo_full(struct ctlr_info *h)
210{
211 if (h->commands_outstanding >= h->max_commands)
212 return 1;
213 else
214 return 0;
215
216}
217/*
218 * returns value read from hardware.
219 * returns FIFO_EMPTY if there is nothing to read
220 */
221static unsigned long SA5_completed(struct ctlr_info *h)
222{
223 unsigned long register_value
224 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
225
226 if (register_value != FIFO_EMPTY)
227 h->commands_outstanding--;
228
229#ifdef HPSA_DEBUG
230 if (register_value != FIFO_EMPTY)
231 printk(KERN_INFO "hpsa: Read %lx back from board\n",
232 register_value);
233 else
234 printk(KERN_INFO "hpsa: FIFO Empty read\n");
235#endif
236
237 return register_value;
238}
239/*
240 * Returns true if an interrupt is pending..
241 */
242static unsigned long SA5_intr_pending(struct ctlr_info *h)
243{
244 unsigned long register_value =
245 readl(h->vaddr + SA5_INTR_STATUS);
246#ifdef HPSA_DEBUG
247 printk(KERN_INFO "hpsa: intr_pending %lx\n", register_value);
248#endif /* HPSA_DEBUG */
249 if (register_value & SA5_INTR_PENDING)
250 return 1;
251 return 0 ;
252}
253
254
255static struct access_method SA5_access = {
256 SA5_submit_command,
257 SA5_intr_mask,
258 SA5_fifo_full,
259 SA5_intr_pending,
260 SA5_completed,
261};
262
263struct board_type {
264 __u32 board_id;
265 char *product_name;
266 struct access_method *access;
267};
268
269
270/* end of old hpsa_scsi.h file */
271
272#endif /* HPSA_H */
273
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
new file mode 100644
index 000000000000..12d71387ed9a
--- /dev/null
+++ b/drivers/scsi/hpsa_cmd.h
@@ -0,0 +1,326 @@
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21#ifndef HPSA_CMD_H
22#define HPSA_CMD_H
23
24/* general boundary defintions */
25#define SENSEINFOBYTES 32 /* may vary between hbas */
26#define MAXSGENTRIES 31
27#define MAXREPLYQS 256
28
29/* Command Status value */
30#define CMD_SUCCESS 0x0000
31#define CMD_TARGET_STATUS 0x0001
32#define CMD_DATA_UNDERRUN 0x0002
33#define CMD_DATA_OVERRUN 0x0003
34#define CMD_INVALID 0x0004
35#define CMD_PROTOCOL_ERR 0x0005
36#define CMD_HARDWARE_ERR 0x0006
37#define CMD_CONNECTION_LOST 0x0007
38#define CMD_ABORTED 0x0008
39#define CMD_ABORT_FAILED 0x0009
40#define CMD_UNSOLICITED_ABORT 0x000A
41#define CMD_TIMEOUT 0x000B
42#define CMD_UNABORTABLE 0x000C
43
44/* Unit Attentions ASC's as defined for the MSA2012sa */
45#define POWER_OR_RESET 0x29
46#define STATE_CHANGED 0x2a
47#define UNIT_ATTENTION_CLEARED 0x2f
48#define LUN_FAILED 0x3e
49#define REPORT_LUNS_CHANGED 0x3f
50
51/* Unit Attentions ASCQ's as defined for the MSA2012sa */
52
53 /* These ASCQ's defined for ASC = POWER_OR_RESET */
54#define POWER_ON_RESET 0x00
55#define POWER_ON_REBOOT 0x01
56#define SCSI_BUS_RESET 0x02
57#define MSA_TARGET_RESET 0x03
58#define CONTROLLER_FAILOVER 0x04
59#define TRANSCEIVER_SE 0x05
60#define TRANSCEIVER_LVD 0x06
61
62 /* These ASCQ's defined for ASC = STATE_CHANGED */
63#define RESERVATION_PREEMPTED 0x03
64#define ASYM_ACCESS_CHANGED 0x06
65#define LUN_CAPACITY_CHANGED 0x09
66
67/* transfer direction */
68#define XFER_NONE 0x00
69#define XFER_WRITE 0x01
70#define XFER_READ 0x02
71#define XFER_RSVD 0x03
72
73/* task attribute */
74#define ATTR_UNTAGGED 0x00
75#define ATTR_SIMPLE 0x04
76#define ATTR_HEADOFQUEUE 0x05
77#define ATTR_ORDERED 0x06
78#define ATTR_ACA 0x07
79
80/* cdb type */
81#define TYPE_CMD 0x00
82#define TYPE_MSG 0x01
83
84/* config space register offsets */
85#define CFG_VENDORID 0x00
86#define CFG_DEVICEID 0x02
87#define CFG_I2OBAR 0x10
88#define CFG_MEM1BAR 0x14
89
90/* i2o space register offsets */
91#define I2O_IBDB_SET 0x20
92#define I2O_IBDB_CLEAR 0x70
93#define I2O_INT_STATUS 0x30
94#define I2O_INT_MASK 0x34
95#define I2O_IBPOST_Q 0x40
96#define I2O_OBPOST_Q 0x44
97#define I2O_DMA1_CFG 0x214
98
99/* Configuration Table */
100#define CFGTBL_ChangeReq 0x00000001l
101#define CFGTBL_AccCmds 0x00000001l
102
103#define CFGTBL_Trans_Simple 0x00000002l
104
105#define CFGTBL_BusType_Ultra2 0x00000001l
106#define CFGTBL_BusType_Ultra3 0x00000002l
107#define CFGTBL_BusType_Fibre1G 0x00000100l
108#define CFGTBL_BusType_Fibre2G 0x00000200l
109struct vals32 {
110 __u32 lower;
111 __u32 upper;
112};
113
114union u64bit {
115 struct vals32 val32;
116 __u64 val;
117};
118
119/* FIXME this is a per controller value (barf!) */
120#define HPSA_MAX_TARGETS_PER_CTLR 16
121#define HPSA_MAX_LUN 256
122#define HPSA_MAX_PHYS_LUN 1024
123
124/* SCSI-3 Commands */
125#pragma pack(1)
126
127#define HPSA_INQUIRY 0x12
128struct InquiryData {
129 __u8 data_byte[36];
130};
131
132#define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */
133#define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
134struct ReportLUNdata {
135 __u8 LUNListLength[4];
136 __u32 reserved;
137 __u8 LUN[HPSA_MAX_LUN][8];
138};
139
140struct ReportExtendedLUNdata {
141 __u8 LUNListLength[4];
142 __u8 extended_response_flag;
143 __u8 reserved[3];
144 __u8 LUN[HPSA_MAX_LUN][24];
145};
146
147struct SenseSubsystem_info {
148 __u8 reserved[36];
149 __u8 portname[8];
150 __u8 reserved1[1108];
151};
152
153#define HPSA_READ_CAPACITY 0x25 /* Read Capacity */
154struct ReadCapdata {
155 __u8 total_size[4]; /* Total size in blocks */
156 __u8 block_size[4]; /* Size of blocks in bytes */
157};
158
159#if 0
160/* 12 byte commands not implemented in firmware yet. */
161#define HPSA_READ 0xa8
162#define HPSA_WRITE 0xaa
163#endif
164
165#define HPSA_READ 0x28 /* Read(10) */
166#define HPSA_WRITE 0x2a /* Write(10) */
167
168/* BMIC commands */
169#define BMIC_READ 0x26
170#define BMIC_WRITE 0x27
171#define BMIC_CACHE_FLUSH 0xc2
172#define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */
173
174/* Command List Structure */
175union SCSI3Addr {
176 struct {
177 __u8 Dev;
178 __u8 Bus:6;
179 __u8 Mode:2; /* b00 */
180 } PeripDev;
181 struct {
182 __u8 DevLSB;
183 __u8 DevMSB:6;
184 __u8 Mode:2; /* b01 */
185 } LogDev;
186 struct {
187 __u8 Dev:5;
188 __u8 Bus:3;
189 __u8 Targ:6;
190 __u8 Mode:2; /* b10 */
191 } LogUnit;
192};
193
194struct PhysDevAddr {
195 __u32 TargetId:24;
196 __u32 Bus:6;
197 __u32 Mode:2;
198 /* 2 level target device addr */
199 union SCSI3Addr Target[2];
200};
201
202struct LogDevAddr {
203 __u32 VolId:30;
204 __u32 Mode:2;
205 __u8 reserved[4];
206};
207
208union LUNAddr {
209 __u8 LunAddrBytes[8];
210 union SCSI3Addr SCSI3Lun[4];
211 struct PhysDevAddr PhysDev;
212 struct LogDevAddr LogDev;
213};
214
215struct CommandListHeader {
216 __u8 ReplyQueue;
217 __u8 SGList;
218 __u16 SGTotal;
219 struct vals32 Tag;
220 union LUNAddr LUN;
221};
222
223struct RequestBlock {
224 __u8 CDBLen;
225 struct {
226 __u8 Type:3;
227 __u8 Attribute:3;
228 __u8 Direction:2;
229 } Type;
230 __u16 Timeout;
231 __u8 CDB[16];
232};
233
234struct ErrDescriptor {
235 struct vals32 Addr;
236 __u32 Len;
237};
238
239struct SGDescriptor {
240 struct vals32 Addr;
241 __u32 Len;
242 __u32 Ext;
243};
244
245union MoreErrInfo {
246 struct {
247 __u8 Reserved[3];
248 __u8 Type;
249 __u32 ErrorInfo;
250 } Common_Info;
251 struct {
252 __u8 Reserved[2];
253 __u8 offense_size; /* size of offending entry */
254 __u8 offense_num; /* byte # of offense 0-base */
255 __u32 offense_value;
256 } Invalid_Cmd;
257};
258struct ErrorInfo {
259 __u8 ScsiStatus;
260 __u8 SenseLen;
261 __u16 CommandStatus;
262 __u32 ResidualCnt;
263 union MoreErrInfo MoreErrInfo;
264 __u8 SenseInfo[SENSEINFOBYTES];
265};
266/* Command types */
267#define CMD_IOCTL_PEND 0x01
268#define CMD_SCSI 0x03
269
270struct ctlr_info; /* defined in hpsa.h */
271/* The size of this structure needs to be divisible by 8
272 * od on all architectures, because the controller uses 2
273 * lower bits of the address, and the driver uses 1 lower
274 * bit (3 bits total.)
275 */
276struct CommandList {
277 struct CommandListHeader Header;
278 struct RequestBlock Request;
279 struct ErrDescriptor ErrDesc;
280 struct SGDescriptor SG[MAXSGENTRIES];
281 /* information associated with the command */
282 __u32 busaddr; /* physical addr of this record */
283 struct ErrorInfo *err_info; /* pointer to the allocated mem */
284 struct ctlr_info *h;
285 int cmd_type;
286 long cmdindex;
287 struct hlist_node list;
288 struct CommandList *prev;
289 struct CommandList *next;
290 struct request *rq;
291 struct completion *waiting;
292 int retry_count;
293 void *scsi_cmd;
294};
295
296/* Configuration Table Structure */
297struct HostWrite {
298 __u32 TransportRequest;
299 __u32 Reserved;
300 __u32 CoalIntDelay;
301 __u32 CoalIntCount;
302};
303
304struct CfgTable {
305 __u8 Signature[4];
306 __u32 SpecValence;
307 __u32 TransportSupport;
308 __u32 TransportActive;
309 struct HostWrite HostWrite;
310 __u32 CmdsOutMax;
311 __u32 BusTypes;
312 __u32 Reserved;
313 __u8 ServerName[16];
314 __u32 HeartBeat;
315 __u32 SCSI_Prefetch;
316};
317
318struct hpsa_pci_info {
319 unsigned char bus;
320 unsigned char dev_fn;
321 unsigned short domain;
322 __u32 board_id;
323};
324
325#pragma pack()
326#endif /* HPSA_CMD_H */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 8643f5089361..9e52d16c7c39 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6521,6 +6521,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
6521 int rc; 6521 int rc;
6522 6522
6523 ENTER; 6523 ENTER;
6524 ioa_cfg->pdev->state_saved = true;
6524 rc = pci_restore_state(ioa_cfg->pdev); 6525 rc = pci_restore_state(ioa_cfg->pdev);
6525 6526
6526 if (rc != PCIBIOS_SUCCESSFUL) { 6527 if (rc != PCIBIOS_SUCCESSFUL) {
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index c4b58d042f6f..881d5dfe8c74 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -68,18 +68,20 @@ struct kmem_cache *scsi_pkt_cachep;
68 68
69/** 69/**
70 * struct fc_fcp_internal - FCP layer internal data 70 * struct fc_fcp_internal - FCP layer internal data
71 * @scsi_pkt_pool: Memory pool to draw FCP packets from 71 * @scsi_pkt_pool: Memory pool to draw FCP packets from
72 * @scsi_queue_lock: Protects the scsi_pkt_queue
72 * @scsi_pkt_queue: Current FCP packets 73 * @scsi_pkt_queue: Current FCP packets
73 * @last_can_queue_ramp_down_time: ramp down time 74 * @last_can_queue_ramp_down_time: ramp down time
74 * @last_can_queue_ramp_up_time: ramp up time 75 * @last_can_queue_ramp_up_time: ramp up time
75 * @max_can_queue: max can_queue size 76 * @max_can_queue: max can_queue size
76 */ 77 */
77struct fc_fcp_internal { 78struct fc_fcp_internal {
78 mempool_t *scsi_pkt_pool; 79 mempool_t *scsi_pkt_pool;
79 struct list_head scsi_pkt_queue; 80 spinlock_t scsi_queue_lock;
80 unsigned long last_can_queue_ramp_down_time; 81 struct list_head scsi_pkt_queue;
81 unsigned long last_can_queue_ramp_up_time; 82 unsigned long last_can_queue_ramp_down_time;
82 int max_can_queue; 83 unsigned long last_can_queue_ramp_up_time;
84 int max_can_queue;
83}; 85};
84 86
85#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv) 87#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
@@ -410,12 +412,14 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
410 unsigned long flags; 412 unsigned long flags;
411 413
412 fp = fc_frame_alloc(lport, len); 414 fp = fc_frame_alloc(lport, len);
413 if (!fp) { 415 if (likely(fp))
414 spin_lock_irqsave(lport->host->host_lock, flags); 416 return fp;
415 fc_fcp_can_queue_ramp_down(lport); 417
416 spin_unlock_irqrestore(lport->host->host_lock, flags); 418 /* error case */
417 } 419 spin_lock_irqsave(lport->host->host_lock, flags);
418 return fp; 420 fc_fcp_can_queue_ramp_down(lport);
421 spin_unlock_irqrestore(lport->host->host_lock, flags);
422 return NULL;
419} 423}
420 424
421/** 425/**
@@ -990,7 +994,7 @@ static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id,
990 struct scsi_cmnd *sc_cmd; 994 struct scsi_cmnd *sc_cmd;
991 unsigned long flags; 995 unsigned long flags;
992 996
993 spin_lock_irqsave(lport->host->host_lock, flags); 997 spin_lock_irqsave(&si->scsi_queue_lock, flags);
994restart: 998restart:
995 list_for_each_entry(fsp, &si->scsi_pkt_queue, list) { 999 list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
996 sc_cmd = fsp->cmd; 1000 sc_cmd = fsp->cmd;
@@ -1001,7 +1005,7 @@ restart:
1001 continue; 1005 continue;
1002 1006
1003 fc_fcp_pkt_hold(fsp); 1007 fc_fcp_pkt_hold(fsp);
1004 spin_unlock_irqrestore(lport->host->host_lock, flags); 1008 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1005 1009
1006 if (!fc_fcp_lock_pkt(fsp)) { 1010 if (!fc_fcp_lock_pkt(fsp)) {
1007 fc_fcp_cleanup_cmd(fsp, error); 1011 fc_fcp_cleanup_cmd(fsp, error);
@@ -1010,14 +1014,14 @@ restart:
1010 } 1014 }
1011 1015
1012 fc_fcp_pkt_release(fsp); 1016 fc_fcp_pkt_release(fsp);
1013 spin_lock_irqsave(lport->host->host_lock, flags); 1017 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1014 /* 1018 /*
1015 * while we dropped the lock multiple pkts could 1019 * while we dropped the lock multiple pkts could
1016 * have been released, so we have to start over. 1020 * have been released, so we have to start over.
1017 */ 1021 */
1018 goto restart; 1022 goto restart;
1019 } 1023 }
1020 spin_unlock_irqrestore(lport->host->host_lock, flags); 1024 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1021} 1025}
1022 1026
1023/** 1027/**
@@ -1035,11 +1039,12 @@ static void fc_fcp_abort_io(struct fc_lport *lport)
1035 * @fsp: The FCP packet to send 1039 * @fsp: The FCP packet to send
1036 * 1040 *
1037 * Return: Zero for success and -1 for failure 1041 * Return: Zero for success and -1 for failure
1038 * Locks: Called with the host lock and irqs disabled. 1042 * Locks: Called without locks held
1039 */ 1043 */
1040static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp) 1044static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
1041{ 1045{
1042 struct fc_fcp_internal *si = fc_get_scsi_internal(lport); 1046 struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
1047 unsigned long flags;
1043 int rc; 1048 int rc;
1044 1049
1045 fsp->cmd->SCp.ptr = (char *)fsp; 1050 fsp->cmd->SCp.ptr = (char *)fsp;
@@ -1049,13 +1054,16 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
1049 int_to_scsilun(fsp->cmd->device->lun, 1054 int_to_scsilun(fsp->cmd->device->lun,
1050 (struct scsi_lun *)fsp->cdb_cmd.fc_lun); 1055 (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
1051 memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len); 1056 memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
1052 list_add_tail(&fsp->list, &si->scsi_pkt_queue);
1053 1057
1054 spin_unlock_irq(lport->host->host_lock); 1058 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1059 list_add_tail(&fsp->list, &si->scsi_pkt_queue);
1060 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1055 rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv); 1061 rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
1056 spin_lock_irq(lport->host->host_lock); 1062 if (unlikely(rc)) {
1057 if (rc) 1063 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1058 list_del(&fsp->list); 1064 list_del(&fsp->list);
1065 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1066 }
1059 1067
1060 return rc; 1068 return rc;
1061} 1069}
@@ -1752,6 +1760,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1752 struct fcoe_dev_stats *stats; 1760 struct fcoe_dev_stats *stats;
1753 1761
1754 lport = shost_priv(sc_cmd->device->host); 1762 lport = shost_priv(sc_cmd->device->host);
1763 spin_unlock_irq(lport->host->host_lock);
1755 1764
1756 rval = fc_remote_port_chkready(rport); 1765 rval = fc_remote_port_chkready(rport);
1757 if (rval) { 1766 if (rval) {
@@ -1834,6 +1843,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1834 rc = SCSI_MLQUEUE_HOST_BUSY; 1843 rc = SCSI_MLQUEUE_HOST_BUSY;
1835 } 1844 }
1836out: 1845out:
1846 spin_lock_irq(lport->host->host_lock);
1837 return rc; 1847 return rc;
1838} 1848}
1839EXPORT_SYMBOL(fc_queuecommand); 1849EXPORT_SYMBOL(fc_queuecommand);
@@ -1864,11 +1874,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1864 1874
1865 lport = fsp->lp; 1875 lport = fsp->lp;
1866 si = fc_get_scsi_internal(lport); 1876 si = fc_get_scsi_internal(lport);
1867 spin_lock_irqsave(lport->host->host_lock, flags); 1877 if (!fsp->cmd)
1868 if (!fsp->cmd) {
1869 spin_unlock_irqrestore(lport->host->host_lock, flags);
1870 return; 1878 return;
1871 }
1872 1879
1873 /* 1880 /*
1874 * if can_queue ramp down is done then try can_queue ramp up 1881 * if can_queue ramp down is done then try can_queue ramp up
@@ -1880,10 +1887,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1880 sc_cmd = fsp->cmd; 1887 sc_cmd = fsp->cmd;
1881 fsp->cmd = NULL; 1888 fsp->cmd = NULL;
1882 1889
1883 if (!sc_cmd->SCp.ptr) { 1890 if (!sc_cmd->SCp.ptr)
1884 spin_unlock_irqrestore(lport->host->host_lock, flags);
1885 return; 1891 return;
1886 }
1887 1892
1888 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status; 1893 CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
1889 switch (fsp->status_code) { 1894 switch (fsp->status_code) {
@@ -1945,10 +1950,11 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
1945 break; 1950 break;
1946 } 1951 }
1947 1952
1953 spin_lock_irqsave(&si->scsi_queue_lock, flags);
1948 list_del(&fsp->list); 1954 list_del(&fsp->list);
1955 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1949 sc_cmd->SCp.ptr = NULL; 1956 sc_cmd->SCp.ptr = NULL;
1950 sc_cmd->scsi_done(sc_cmd); 1957 sc_cmd->scsi_done(sc_cmd);
1951 spin_unlock_irqrestore(lport->host->host_lock, flags);
1952 1958
1953 /* release ref from initial allocation in queue command */ 1959 /* release ref from initial allocation in queue command */
1954 fc_fcp_pkt_release(fsp); 1960 fc_fcp_pkt_release(fsp);
@@ -2216,6 +2222,7 @@ int fc_fcp_init(struct fc_lport *lport)
2216 lport->scsi_priv = si; 2222 lport->scsi_priv = si;
2217 si->max_can_queue = lport->host->can_queue; 2223 si->max_can_queue = lport->host->can_queue;
2218 INIT_LIST_HEAD(&si->scsi_pkt_queue); 2224 INIT_LIST_HEAD(&si->scsi_pkt_queue);
2225 spin_lock_init(&si->scsi_queue_lock);
2219 2226
2220 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep); 2227 si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
2221 if (!si->scsi_pkt_pool) { 2228 if (!si->scsi_pkt_pool) {
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 74338c83ad0a..0b165024a219 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -537,7 +537,9 @@ int fc_fabric_login(struct fc_lport *lport)
537 int rc = -1; 537 int rc = -1;
538 538
539 mutex_lock(&lport->lp_mutex); 539 mutex_lock(&lport->lp_mutex);
540 if (lport->state == LPORT_ST_DISABLED) { 540 if (lport->state == LPORT_ST_DISABLED ||
541 lport->state == LPORT_ST_LOGO) {
542 fc_lport_state_enter(lport, LPORT_ST_RESET);
541 fc_lport_enter_reset(lport); 543 fc_lport_enter_reset(lport);
542 rc = 0; 544 rc = 0;
543 } 545 }
@@ -967,6 +969,9 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
967 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", 969 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
968 fc_lport_state(lport)); 970 fc_lport_state(lport));
969 971
972 if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO)
973 return;
974
970 if (lport->vport) { 975 if (lport->vport) {
971 if (lport->link_up) 976 if (lport->link_up)
972 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING); 977 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 35ca0e72df46..02300523b234 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -310,6 +310,7 @@ static void fc_rport_work(struct work_struct *work)
310 restart = 1; 310 restart = 1;
311 else 311 else
312 list_del(&rdata->peers); 312 list_del(&rdata->peers);
313 rdata->event = RPORT_EV_NONE;
313 mutex_unlock(&rdata->rp_mutex); 314 mutex_unlock(&rdata->rp_mutex);
314 mutex_unlock(&lport->disc.disc_mutex); 315 mutex_unlock(&lport->disc.disc_mutex);
315 } 316 }
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index b7689f3d05f5..c28a712fd4db 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -517,7 +517,7 @@ static void iscsi_free_task(struct iscsi_task *task)
517 if (conn->login_task == task) 517 if (conn->login_task == task)
518 return; 518 return;
519 519
520 __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*)); 520 kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*));
521 521
522 if (sc) { 522 if (sc) {
523 task->sc = NULL; 523 task->sc = NULL;
@@ -737,7 +737,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
737 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); 737 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
738 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); 738 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
739 739
740 if (!__kfifo_get(session->cmdpool.queue, 740 if (!kfifo_out(&session->cmdpool.queue,
741 (void*)&task, sizeof(void*))) 741 (void*)&task, sizeof(void*)))
742 return NULL; 742 return NULL;
743 } 743 }
@@ -1567,7 +1567,7 @@ static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
1567{ 1567{
1568 struct iscsi_task *task; 1568 struct iscsi_task *task;
1569 1569
1570 if (!__kfifo_get(conn->session->cmdpool.queue, 1570 if (!kfifo_out(&conn->session->cmdpool.queue,
1571 (void *) &task, sizeof(void *))) 1571 (void *) &task, sizeof(void *)))
1572 return NULL; 1572 return NULL;
1573 1573
@@ -2461,12 +2461,7 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
2461 if (q->pool == NULL) 2461 if (q->pool == NULL)
2462 return -ENOMEM; 2462 return -ENOMEM;
2463 2463
2464 q->queue = kfifo_init((void*)q->pool, max * sizeof(void*), 2464 kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*));
2465 GFP_KERNEL, NULL);
2466 if (IS_ERR(q->queue)) {
2467 q->queue = NULL;
2468 goto enomem;
2469 }
2470 2465
2471 for (i = 0; i < max; i++) { 2466 for (i = 0; i < max; i++) {
2472 q->pool[i] = kzalloc(item_size, GFP_KERNEL); 2467 q->pool[i] = kzalloc(item_size, GFP_KERNEL);
@@ -2474,7 +2469,7 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
2474 q->max = i; 2469 q->max = i;
2475 goto enomem; 2470 goto enomem;
2476 } 2471 }
2477 __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*)); 2472 kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*));
2478 } 2473 }
2479 2474
2480 if (items) { 2475 if (items) {
@@ -2497,7 +2492,6 @@ void iscsi_pool_free(struct iscsi_pool *q)
2497 for (i = 0; i < q->max; i++) 2492 for (i = 0; i < q->max; i++)
2498 kfree(q->pool[i]); 2493 kfree(q->pool[i]);
2499 kfree(q->pool); 2494 kfree(q->pool);
2500 kfree(q->queue);
2501} 2495}
2502EXPORT_SYMBOL_GPL(iscsi_pool_free); 2496EXPORT_SYMBOL_GPL(iscsi_pool_free);
2503 2497
@@ -2825,7 +2819,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
2825 2819
2826 /* allocate login_task used for the login/text sequences */ 2820 /* allocate login_task used for the login/text sequences */
2827 spin_lock_bh(&session->lock); 2821 spin_lock_bh(&session->lock);
2828 if (!__kfifo_get(session->cmdpool.queue, 2822 if (!kfifo_out(&session->cmdpool.queue,
2829 (void*)&conn->login_task, 2823 (void*)&conn->login_task,
2830 sizeof(void*))) { 2824 sizeof(void*))) {
2831 spin_unlock_bh(&session->lock); 2825 spin_unlock_bh(&session->lock);
@@ -2845,7 +2839,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
2845 return cls_conn; 2839 return cls_conn;
2846 2840
2847login_task_data_alloc_fail: 2841login_task_data_alloc_fail:
2848 __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task, 2842 kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
2849 sizeof(void*)); 2843 sizeof(void*));
2850login_task_alloc_fail: 2844login_task_alloc_fail:
2851 iscsi_destroy_conn(cls_conn); 2845 iscsi_destroy_conn(cls_conn);
@@ -2908,7 +2902,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2908 free_pages((unsigned long) conn->data, 2902 free_pages((unsigned long) conn->data,
2909 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); 2903 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
2910 kfree(conn->persistent_address); 2904 kfree(conn->persistent_address);
2911 __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task, 2905 kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
2912 sizeof(void*)); 2906 sizeof(void*));
2913 if (session->leadconn == conn) 2907 if (session->leadconn == conn)
2914 session->leadconn = NULL; 2908 session->leadconn = NULL;
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index ca25ee5190b0..db6856c138fc 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -445,15 +445,15 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
445 return; 445 return;
446 446
447 /* flush task's r2t queues */ 447 /* flush task's r2t queues */
448 while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) { 448 while (kfifo_out(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
449 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t, 449 kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
450 sizeof(void*)); 450 sizeof(void*));
451 ISCSI_DBG_TCP(task->conn, "pending r2t dropped\n"); 451 ISCSI_DBG_TCP(task->conn, "pending r2t dropped\n");
452 } 452 }
453 453
454 r2t = tcp_task->r2t; 454 r2t = tcp_task->r2t;
455 if (r2t != NULL) { 455 if (r2t != NULL) {
456 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t, 456 kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
457 sizeof(void*)); 457 sizeof(void*));
458 tcp_task->r2t = NULL; 458 tcp_task->r2t = NULL;
459 } 459 }
@@ -541,7 +541,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
541 return 0; 541 return 0;
542 } 542 }
543 543
544 rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*)); 544 rc = kfifo_out(&tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
545 if (!rc) { 545 if (!rc) {
546 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. " 546 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
547 "Target has sent more R2Ts than it " 547 "Target has sent more R2Ts than it "
@@ -554,7 +554,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
554 if (r2t->data_length == 0) { 554 if (r2t->data_length == 0) {
555 iscsi_conn_printk(KERN_ERR, conn, 555 iscsi_conn_printk(KERN_ERR, conn,
556 "invalid R2T with zero data len\n"); 556 "invalid R2T with zero data len\n");
557 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t, 557 kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
558 sizeof(void*)); 558 sizeof(void*));
559 return ISCSI_ERR_DATALEN; 559 return ISCSI_ERR_DATALEN;
560 } 560 }
@@ -570,7 +570,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
570 "invalid R2T with data len %u at offset %u " 570 "invalid R2T with data len %u at offset %u "
571 "and total length %d\n", r2t->data_length, 571 "and total length %d\n", r2t->data_length,
572 r2t->data_offset, scsi_out(task->sc)->length); 572 r2t->data_offset, scsi_out(task->sc)->length);
573 __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t, 573 kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
574 sizeof(void*)); 574 sizeof(void*));
575 return ISCSI_ERR_DATALEN; 575 return ISCSI_ERR_DATALEN;
576 } 576 }
@@ -580,7 +580,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
580 r2t->sent = 0; 580 r2t->sent = 0;
581 581
582 tcp_task->exp_datasn = r2tsn + 1; 582 tcp_task->exp_datasn = r2tsn + 1;
583 __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*)); 583 kfifo_in(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
584 conn->r2t_pdus_cnt++; 584 conn->r2t_pdus_cnt++;
585 585
586 iscsi_requeue_task(task); 586 iscsi_requeue_task(task);
@@ -951,7 +951,7 @@ int iscsi_tcp_task_init(struct iscsi_task *task)
951 return conn->session->tt->init_pdu(task, 0, task->data_count); 951 return conn->session->tt->init_pdu(task, 0, task->data_count);
952 } 952 }
953 953
954 BUG_ON(__kfifo_len(tcp_task->r2tqueue)); 954 BUG_ON(kfifo_len(&tcp_task->r2tqueue));
955 tcp_task->exp_datasn = 0; 955 tcp_task->exp_datasn = 0;
956 956
957 /* Prepare PDU, optionally w/ immediate data */ 957 /* Prepare PDU, optionally w/ immediate data */
@@ -982,7 +982,7 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
982 if (r2t->data_length <= r2t->sent) { 982 if (r2t->data_length <= r2t->sent) {
983 ISCSI_DBG_TCP(task->conn, 983 ISCSI_DBG_TCP(task->conn,
984 " done with r2t %p\n", r2t); 984 " done with r2t %p\n", r2t);
985 __kfifo_put(tcp_task->r2tpool.queue, 985 kfifo_in(&tcp_task->r2tpool.queue,
986 (void *)&tcp_task->r2t, 986 (void *)&tcp_task->r2t,
987 sizeof(void *)); 987 sizeof(void *));
988 tcp_task->r2t = r2t = NULL; 988 tcp_task->r2t = r2t = NULL;
@@ -990,8 +990,13 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
990 } 990 }
991 991
992 if (r2t == NULL) { 992 if (r2t == NULL) {
993 __kfifo_get(tcp_task->r2tqueue, 993 if (kfifo_out(&tcp_task->r2tqueue,
994 (void *)&tcp_task->r2t, sizeof(void *)); 994 (void *)&tcp_task->r2t, sizeof(void *)) !=
995 sizeof(void *)) {
996 WARN_ONCE(1, "unexpected fifo state");
997 r2t = NULL;
998 }
999
995 r2t = tcp_task->r2t; 1000 r2t = tcp_task->r2t;
996 } 1001 }
997 spin_unlock_bh(&session->lock); 1002 spin_unlock_bh(&session->lock);
@@ -1127,9 +1132,8 @@ int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session)
1127 } 1132 }
1128 1133
1129 /* R2T xmit queue */ 1134 /* R2T xmit queue */
1130 tcp_task->r2tqueue = kfifo_alloc( 1135 if (kfifo_alloc(&tcp_task->r2tqueue,
1131 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL); 1136 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL)) {
1132 if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
1133 iscsi_pool_free(&tcp_task->r2tpool); 1137 iscsi_pool_free(&tcp_task->r2tpool);
1134 goto r2t_alloc_fail; 1138 goto r2t_alloc_fail;
1135 } 1139 }
@@ -1142,7 +1146,7 @@ r2t_alloc_fail:
1142 struct iscsi_task *task = session->cmds[i]; 1146 struct iscsi_task *task = session->cmds[i];
1143 struct iscsi_tcp_task *tcp_task = task->dd_data; 1147 struct iscsi_tcp_task *tcp_task = task->dd_data;
1144 1148
1145 kfifo_free(tcp_task->r2tqueue); 1149 kfifo_free(&tcp_task->r2tqueue);
1146 iscsi_pool_free(&tcp_task->r2tpool); 1150 iscsi_pool_free(&tcp_task->r2tpool);
1147 } 1151 }
1148 return -ENOMEM; 1152 return -ENOMEM;
@@ -1157,7 +1161,7 @@ void iscsi_tcp_r2tpool_free(struct iscsi_session *session)
1157 struct iscsi_task *task = session->cmds[i]; 1161 struct iscsi_task *task = session->cmds[i];
1158 struct iscsi_tcp_task *tcp_task = task->dd_data; 1162 struct iscsi_tcp_task *tcp_task = task->dd_data;
1159 1163
1160 kfifo_free(tcp_task->r2tqueue); 1164 kfifo_free(&tcp_task->r2tqueue);
1161 iscsi_pool_free(&tcp_task->r2tpool); 1165 iscsi_pool_free(&tcp_task->r2tpool);
1162 } 1166 }
1163} 1167}
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index 9ad38e81e343..ab19b3b4be52 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -58,19 +58,15 @@ static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
58 goto free_pool; 58 goto free_pool;
59 59
60 spin_lock_init(&q->lock); 60 spin_lock_init(&q->lock);
61 q->queue = kfifo_init((void *) q->pool, max * sizeof(void *), 61 kfifo_init(&q->queue, (void *) q->pool, max * sizeof(void *));
62 GFP_KERNEL, &q->lock);
63 if (IS_ERR(q->queue))
64 goto free_item;
65 62
66 for (i = 0, iue = q->items; i < max; i++) { 63 for (i = 0, iue = q->items; i < max; i++) {
67 __kfifo_put(q->queue, (void *) &iue, sizeof(void *)); 64 kfifo_in(&q->queue, (void *) &iue, sizeof(void *));
68 iue->sbuf = ring[i]; 65 iue->sbuf = ring[i];
69 iue++; 66 iue++;
70 } 67 }
71 return 0; 68 return 0;
72 69
73free_item:
74 kfree(q->items); 70 kfree(q->items);
75free_pool: 71free_pool:
76 kfree(q->pool); 72 kfree(q->pool);
@@ -167,7 +163,11 @@ struct iu_entry *srp_iu_get(struct srp_target *target)
167{ 163{
168 struct iu_entry *iue = NULL; 164 struct iu_entry *iue = NULL;
169 165
170 kfifo_get(target->iu_queue.queue, (void *) &iue, sizeof(void *)); 166 if (kfifo_out_locked(&target->iu_queue.queue, (void *) &iue,
167 sizeof(void *), &target->iu_queue.lock) != sizeof(void *)) {
168 WARN_ONCE(1, "unexpected fifo state");
169 return NULL;
170 }
171 if (!iue) 171 if (!iue)
172 return iue; 172 return iue;
173 iue->target = target; 173 iue->target = target;
@@ -179,7 +179,8 @@ EXPORT_SYMBOL_GPL(srp_iu_get);
179 179
180void srp_iu_put(struct iu_entry *iue) 180void srp_iu_put(struct iu_entry *iue)
181{ 181{
182 kfifo_put(iue->target->iu_queue.queue, (void *) &iue, sizeof(void *)); 182 kfifo_in_locked(&iue->target->iu_queue.queue, (void *) &iue,
183 sizeof(void *), &iue->target->iu_queue.lock);
183} 184}
184EXPORT_SYMBOL_GPL(srp_iu_put); 185EXPORT_SYMBOL_GPL(srp_iu_put);
185 186
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 226920d15ea1..d4da6bdd0e73 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4506,9 +4506,13 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
4506 pdev = phba->pcidev; 4506 pdev = phba->pcidev;
4507 4507
4508 /* Set the device DMA mask size */ 4508 /* Set the device DMA mask size */
4509 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 4509 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
4510 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 4510 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
4511 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
4512 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
4511 return error; 4513 return error;
4514 }
4515 }
4512 4516
4513 /* Get the bus address of Bar0 and Bar2 and the number of bytes 4517 /* Get the bus address of Bar0 and Bar2 and the number of bytes
4514 * required by each mapping. 4518 * required by each mapping.
@@ -6021,9 +6025,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
6021 pdev = phba->pcidev; 6025 pdev = phba->pcidev;
6022 6026
6023 /* Set the device DMA mask size */ 6027 /* Set the device DMA mask size */
6024 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) 6028 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
6025 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) 6029 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6030 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6031 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
6026 return error; 6032 return error;
6033 }
6034 }
6027 6035
6028 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the 6036 /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
6029 * number of bytes required by each mapping. They are actually 6037 * number of bytes required by each mapping. They are actually
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 134c63ef6d38..99ff99e45bee 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -2501,7 +2501,9 @@ static int megasas_init_mfi(struct megasas_instance *instance)
2501 instance->base_addr = pci_resource_start(instance->pdev, 0); 2501 instance->base_addr = pci_resource_start(instance->pdev, 0);
2502 } 2502 }
2503 2503
2504 if (pci_request_regions(instance->pdev, "megasas: LSI")) { 2504 if (pci_request_selected_regions(instance->pdev,
2505 pci_select_bars(instance->pdev, IORESOURCE_MEM),
2506 "megasas: LSI")) {
2505 printk(KERN_DEBUG "megasas: IO memory region busy!\n"); 2507 printk(KERN_DEBUG "megasas: IO memory region busy!\n");
2506 return -EBUSY; 2508 return -EBUSY;
2507 } 2509 }
@@ -2642,7 +2644,8 @@ static int megasas_init_mfi(struct megasas_instance *instance)
2642 iounmap(instance->reg_set); 2644 iounmap(instance->reg_set);
2643 2645
2644 fail_ioremap: 2646 fail_ioremap:
2645 pci_release_regions(instance->pdev); 2647 pci_release_selected_regions(instance->pdev,
2648 pci_select_bars(instance->pdev, IORESOURCE_MEM));
2646 2649
2647 return -EINVAL; 2650 return -EINVAL;
2648} 2651}
@@ -2662,7 +2665,8 @@ static void megasas_release_mfi(struct megasas_instance *instance)
2662 2665
2663 iounmap(instance->reg_set); 2666 iounmap(instance->reg_set);
2664 2667
2665 pci_release_regions(instance->pdev); 2668 pci_release_selected_regions(instance->pdev,
2669 pci_select_bars(instance->pdev, IORESOURCE_MEM));
2666} 2670}
2667 2671
2668/** 2672/**
@@ -2971,7 +2975,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2971 /* 2975 /*
2972 * PCI prepping: enable device set bus mastering and dma mask 2976 * PCI prepping: enable device set bus mastering and dma mask
2973 */ 2977 */
2974 rval = pci_enable_device(pdev); 2978 rval = pci_enable_device_mem(pdev);
2975 2979
2976 if (rval) { 2980 if (rval) {
2977 return rval; 2981 return rval;
@@ -3276,7 +3280,7 @@ megasas_resume(struct pci_dev *pdev)
3276 /* 3280 /*
3277 * PCI prepping: enable device set bus mastering and dma mask 3281 * PCI prepping: enable device set bus mastering and dma mask
3278 */ 3282 */
3279 rval = pci_enable_device(pdev); 3283 rval = pci_enable_device_mem(pdev);
3280 3284
3281 if (rval) { 3285 if (rval) {
3282 printk(KERN_ERR "megasas: Enable device failed\n"); 3286 printk(KERN_ERR "megasas: Enable device failed\n");
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 6422e258fd52..89d02401b9ec 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -3583,6 +3583,11 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3583 ioc->transport_cmds.status = MPT2_CMD_NOT_USED; 3583 ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
3584 mutex_init(&ioc->transport_cmds.mutex); 3584 mutex_init(&ioc->transport_cmds.mutex);
3585 3585
3586 /* scsih internal command bits */
3587 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3588 ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
3589 mutex_init(&ioc->scsih_cmds.mutex);
3590
3586 /* task management internal command bits */ 3591 /* task management internal command bits */
3587 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 3592 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3588 ioc->tm_cmds.status = MPT2_CMD_NOT_USED; 3593 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index c790d45876c4..cae6b2cf492f 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -657,6 +657,7 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
657 { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 }, 657 { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
658 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, 658 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
659 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, 659 { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
660 { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
660 661
661 { } /* terminate list */ 662 { } /* terminate list */
662}; 663};
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 950202a70bcf..24223473f573 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -432,30 +432,23 @@ static void _osd_free_seg(struct osd_request *or __unused,
432 seg->alloc_size = 0; 432 seg->alloc_size = 0;
433} 433}
434 434
435static void _put_request(struct request *rq , bool is_async) 435static void _put_request(struct request *rq)
436{ 436{
437 if (is_async) { 437 /*
438 WARN_ON(rq->bio); 438 * If osd_finalize_request() was called but the request was not
439 __blk_put_request(rq->q, rq); 439 * executed through the block layer, then we must release BIOs.
440 } else { 440 * TODO: Keep error code in or->async_error. Need to audit all
441 /* 441 * code paths.
442 * If osd_finalize_request() was called but the request was not 442 */
443 * executed through the block layer, then we must release BIOs. 443 if (unlikely(rq->bio))
444 * TODO: Keep error code in or->async_error. Need to audit all 444 blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
445 * code paths. 445 else
446 */ 446 blk_put_request(rq);
447 if (unlikely(rq->bio))
448 blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
449 else
450 blk_put_request(rq);
451 }
452} 447}
453 448
454void osd_end_request(struct osd_request *or) 449void osd_end_request(struct osd_request *or)
455{ 450{
456 struct request *rq = or->request; 451 struct request *rq = or->request;
457 /* IMPORTANT: make sure this agrees with osd_execute_request_async */
458 bool is_async = (or->request->end_io_data == or);
459 452
460 _osd_free_seg(or, &or->set_attr); 453 _osd_free_seg(or, &or->set_attr);
461 _osd_free_seg(or, &or->enc_get_attr); 454 _osd_free_seg(or, &or->enc_get_attr);
@@ -463,20 +456,34 @@ void osd_end_request(struct osd_request *or)
463 456
464 if (rq) { 457 if (rq) {
465 if (rq->next_rq) { 458 if (rq->next_rq) {
466 _put_request(rq->next_rq, is_async); 459 _put_request(rq->next_rq);
467 rq->next_rq = NULL; 460 rq->next_rq = NULL;
468 } 461 }
469 462
470 _put_request(rq, is_async); 463 _put_request(rq);
471 } 464 }
472 _osd_request_free(or); 465 _osd_request_free(or);
473} 466}
474EXPORT_SYMBOL(osd_end_request); 467EXPORT_SYMBOL(osd_end_request);
475 468
469static void _set_error_resid(struct osd_request *or, struct request *req,
470 int error)
471{
472 or->async_error = error;
473 or->req_errors = req->errors ? : error;
474 or->sense_len = req->sense_len;
475 if (or->out.req)
476 or->out.residual = or->out.req->resid_len;
477 if (or->in.req)
478 or->in.residual = or->in.req->resid_len;
479}
480
476int osd_execute_request(struct osd_request *or) 481int osd_execute_request(struct osd_request *or)
477{ 482{
478 return or->async_error = 483 int error = blk_execute_rq(or->request->q, NULL, or->request, 0);
479 blk_execute_rq(or->request->q, NULL, or->request, 0); 484
485 _set_error_resid(or, or->request, error);
486 return error;
480} 487}
481EXPORT_SYMBOL(osd_execute_request); 488EXPORT_SYMBOL(osd_execute_request);
482 489
@@ -484,15 +491,17 @@ static void osd_request_async_done(struct request *req, int error)
484{ 491{
485 struct osd_request *or = req->end_io_data; 492 struct osd_request *or = req->end_io_data;
486 493
487 or->async_error = error; 494 _set_error_resid(or, req, error);
488 495 if (req->next_rq) {
489 if (unlikely(error)) { 496 __blk_put_request(req->q, req->next_rq);
490 OSD_DEBUG("osd_request_async_done error recieved %d " 497 req->next_rq = NULL;
491 "errors 0x%x\n", error, req->errors);
492 if (!req->errors) /* don't miss out on this one */
493 req->errors = error;
494 } 498 }
495 499
500 __blk_put_request(req->q, req);
501 or->request = NULL;
502 or->in.req = NULL;
503 or->out.req = NULL;
504
496 if (or->async_done) 505 if (or->async_done)
497 or->async_done(or, or->async_private); 506 or->async_done(or, or->async_private);
498 else 507 else
@@ -1489,21 +1498,18 @@ int osd_req_decode_sense_full(struct osd_request *or,
1489#endif 1498#endif
1490 int ret; 1499 int ret;
1491 1500
1492 if (likely(!or->request->errors)) { 1501 if (likely(!or->req_errors))
1493 osi->out_resid = 0;
1494 osi->in_resid = 0;
1495 return 0; 1502 return 0;
1496 }
1497 1503
1498 osi = osi ? : &local_osi; 1504 osi = osi ? : &local_osi;
1499 memset(osi, 0, sizeof(*osi)); 1505 memset(osi, 0, sizeof(*osi));
1500 1506
1501 ssdb = or->request->sense; 1507 ssdb = (typeof(ssdb))or->sense;
1502 sense_len = or->request->sense_len; 1508 sense_len = or->sense_len;
1503 if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) { 1509 if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) {
1504 OSD_ERR("Block-layer returned error(0x%x) but " 1510 OSD_ERR("Block-layer returned error(0x%x) but "
1505 "sense_len(%u) || key(%d) is empty\n", 1511 "sense_len(%u) || key(%d) is empty\n",
1506 or->request->errors, sense_len, ssdb->sense_key); 1512 or->req_errors, sense_len, ssdb->sense_key);
1507 goto analyze; 1513 goto analyze;
1508 } 1514 }
1509 1515
@@ -1525,7 +1531,7 @@ int osd_req_decode_sense_full(struct osd_request *or,
1525 "additional_code=0x%x async_error=%d errors=0x%x\n", 1531 "additional_code=0x%x async_error=%d errors=0x%x\n",
1526 osi->key, original_sense_len, sense_len, 1532 osi->key, original_sense_len, sense_len,
1527 osi->additional_code, or->async_error, 1533 osi->additional_code, or->async_error,
1528 or->request->errors); 1534 or->req_errors);
1529 1535
1530 if (original_sense_len < sense_len) 1536 if (original_sense_len < sense_len)
1531 sense_len = original_sense_len; 1537 sense_len = original_sense_len;
@@ -1695,10 +1701,10 @@ analyze:
1695 ret = -EIO; 1701 ret = -EIO;
1696 } 1702 }
1697 1703
1698 if (or->out.req) 1704 if (!or->out.residual)
1699 osi->out_resid = or->out.req->resid_len ?: or->out.total_bytes; 1705 or->out.residual = or->out.total_bytes;
1700 if (or->in.req) 1706 if (!or->in.residual)
1701 osi->in_resid = or->in.req->resid_len ?: or->in.total_bytes; 1707 or->in.residual = or->in.total_bytes;
1702 1708
1703 return ret; 1709 return ret;
1704} 1710}
diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h
index 22644de26399..63ad4aa0c422 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.h
+++ b/drivers/scsi/pm8001/pm8001_ctl.h
@@ -45,16 +45,6 @@
45#define HEADER_LEN 28 45#define HEADER_LEN 28
46#define SIZE_OFFSET 16 46#define SIZE_OFFSET 16
47 47
48struct pm8001_ioctl_payload {
49 u32 signature;
50 u16 major_function;
51 u16 minor_function;
52 u16 length;
53 u16 status;
54 u16 offset;
55 u16 id;
56 u8 func_specific[1];
57};
58 48
59#define FLASH_OK 0x000000 49#define FLASH_OK 0x000000
60#define FAIL_OPEN_BIOS_FILE 0x000100 50#define FAIL_OPEN_BIOS_FILE 0x000100
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index a3de306b9045..9b44c6f1b10e 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -373,10 +373,7 @@ static int bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
373static void __devinit 373static void __devinit
374mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit) 374mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
375{ 375{
376 u32 offset; 376 u32 value, offset, i;
377 u32 value;
378 u32 i, j;
379 u32 bit_cnt;
380 377
381#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000 378#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
382#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000 379#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000
@@ -392,55 +389,35 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
392 */ 389 */
393 if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR)) 390 if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR))
394 return; 391 return;
395 /* set SSC bit of PHY 0 - 3 */ 392
396 for (i = 0; i < 4; i++) { 393 for (i = 0; i < 4; i++) {
397 offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i; 394 offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i;
398 value = pm8001_cr32(pm8001_ha, 2, offset); 395 pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
399 if (SSCbit) {
400 value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT;
401 value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT);
402 } else {
403 value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT;
404 value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT);
405 }
406 bit_cnt = 0;
407 for (j = 0; j < 31; j++)
408 if ((value >> j) & 0x00000001)
409 bit_cnt++;
410 if (bit_cnt % 2)
411 value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY);
412 else
413 value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY;
414
415 pm8001_cw32(pm8001_ha, 2, offset, value);
416 } 396 }
417
418 /* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */ 397 /* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */
419 if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR)) 398 if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR))
420 return; 399 return;
421
422 /* set SSC bit of PHY 4 - 7 */
423 for (i = 4; i < 8; i++) { 400 for (i = 4; i < 8; i++) {
424 offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4); 401 offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
425 value = pm8001_cr32(pm8001_ha, 2, offset); 402 pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
426 if (SSCbit) {
427 value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT;
428 value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT);
429 } else {
430 value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT;
431 value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT);
432 }
433 bit_cnt = 0;
434 for (j = 0; j < 31; j++)
435 if ((value >> j) & 0x00000001)
436 bit_cnt++;
437 if (bit_cnt % 2)
438 value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY);
439 else
440 value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY;
441
442 pm8001_cw32(pm8001_ha, 2, offset, value);
443 } 403 }
404 /*************************************************************
405 Change the SSC upspreading value to 0x0 so that upspreading is disabled.
406 Device MABC SMOD0 Controls
407 Address: (via MEMBASE-III):
408 Using shifted destination address 0x0_0000: with Offset 0xD8
409
410 31:28 R/W Reserved Do not change
411 27:24 R/W SAS_SMOD_SPRDUP 0000
412 23:20 R/W SAS_SMOD_SPRDDN 0000
413 19:0 R/W Reserved Do not change
414 Upon power-up this register will read as 0x8990c016,
415 and I would like you to change the SAS_SMOD_SPRDUP bits to 0b0000
416 so that the written value will be 0x8090c016.
417 This will ensure only down-spreading SSC is enabled on the SPC.
418 *************************************************************/
419 value = pm8001_cr32(pm8001_ha, 2, 0xd8);
420 pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016);
444 421
445 /*set the shifted destination address to 0x0 to avoid error operation */ 422 /*set the shifted destination address to 0x0 to avoid error operation */
446 bar4_shift(pm8001_ha, 0x0); 423 bar4_shift(pm8001_ha, 0x0);
@@ -1901,7 +1878,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
1901{ 1878{
1902 struct sas_task *t; 1879 struct sas_task *t;
1903 struct pm8001_ccb_info *ccb; 1880 struct pm8001_ccb_info *ccb;
1904 unsigned long flags; 1881 unsigned long flags = 0;
1905 u32 param; 1882 u32 param;
1906 u32 status; 1883 u32 status;
1907 u32 tag; 1884 u32 tag;
@@ -2040,7 +2017,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2040 ts->stat = SAS_QUEUE_FULL; 2017 ts->stat = SAS_QUEUE_FULL;
2041 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2018 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2042 mb();/*in order to force CPU ordering*/ 2019 mb();/*in order to force CPU ordering*/
2020 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2043 t->task_done(t); 2021 t->task_done(t);
2022 spin_lock_irqsave(&pm8001_ha->lock, flags);
2044 return; 2023 return;
2045 } 2024 }
2046 break; 2025 break;
@@ -2058,7 +2037,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2058 ts->stat = SAS_QUEUE_FULL; 2037 ts->stat = SAS_QUEUE_FULL;
2059 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2038 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2060 mb();/*ditto*/ 2039 mb();/*ditto*/
2040 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2061 t->task_done(t); 2041 t->task_done(t);
2042 spin_lock_irqsave(&pm8001_ha->lock, flags);
2062 return; 2043 return;
2063 } 2044 }
2064 break; 2045 break;
@@ -2084,7 +2065,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2084 ts->stat = SAS_QUEUE_FULL; 2065 ts->stat = SAS_QUEUE_FULL;
2085 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2066 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2086 mb();/* ditto*/ 2067 mb();/* ditto*/
2068 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2087 t->task_done(t); 2069 t->task_done(t);
2070 spin_lock_irqsave(&pm8001_ha->lock, flags);
2088 return; 2071 return;
2089 } 2072 }
2090 break; 2073 break;
@@ -2149,7 +2132,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2149 ts->stat = SAS_QUEUE_FULL; 2132 ts->stat = SAS_QUEUE_FULL;
2150 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2133 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2151 mb();/*ditto*/ 2134 mb();/*ditto*/
2135 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2152 t->task_done(t); 2136 t->task_done(t);
2137 spin_lock_irqsave(&pm8001_ha->lock, flags);
2153 return; 2138 return;
2154 } 2139 }
2155 break; 2140 break;
@@ -2171,7 +2156,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2171 ts->stat = SAS_QUEUE_FULL; 2156 ts->stat = SAS_QUEUE_FULL;
2172 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2157 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2173 mb();/*ditto*/ 2158 mb();/*ditto*/
2159 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2174 t->task_done(t); 2160 t->task_done(t);
2161 spin_lock_irqsave(&pm8001_ha->lock, flags);
2175 return; 2162 return;
2176 } 2163 }
2177 break; 2164 break;
@@ -2200,11 +2187,20 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2200 " resp 0x%x stat 0x%x but aborted by upper layer!\n", 2187 " resp 0x%x stat 0x%x but aborted by upper layer!\n",
2201 t, status, ts->resp, ts->stat)); 2188 t, status, ts->resp, ts->stat));
2202 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2189 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2203 } else { 2190 } else if (t->uldd_task) {
2204 spin_unlock_irqrestore(&t->task_state_lock, flags); 2191 spin_unlock_irqrestore(&t->task_state_lock, flags);
2205 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2192 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2206 mb();/* ditto */ 2193 mb();/* ditto */
2194 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2207 t->task_done(t); 2195 t->task_done(t);
2196 spin_lock_irqsave(&pm8001_ha->lock, flags);
2197 } else if (!t->uldd_task) {
2198 spin_unlock_irqrestore(&t->task_state_lock, flags);
2199 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2200 mb();/*ditto*/
2201 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2202 t->task_done(t);
2203 spin_lock_irqsave(&pm8001_ha->lock, flags);
2208 } 2204 }
2209} 2205}
2210 2206
@@ -2212,7 +2208,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2212static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) 2208static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2213{ 2209{
2214 struct sas_task *t; 2210 struct sas_task *t;
2215 unsigned long flags; 2211 unsigned long flags = 0;
2216 struct task_status_struct *ts; 2212 struct task_status_struct *ts;
2217 struct pm8001_ccb_info *ccb; 2213 struct pm8001_ccb_info *ccb;
2218 struct pm8001_device *pm8001_dev; 2214 struct pm8001_device *pm8001_dev;
@@ -2292,7 +2288,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2292 ts->stat = SAS_QUEUE_FULL; 2288 ts->stat = SAS_QUEUE_FULL;
2293 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2289 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2294 mb();/*ditto*/ 2290 mb();/*ditto*/
2291 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2295 t->task_done(t); 2292 t->task_done(t);
2293 spin_lock_irqsave(&pm8001_ha->lock, flags);
2296 return; 2294 return;
2297 } 2295 }
2298 break; 2296 break;
@@ -2401,11 +2399,20 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2401 " resp 0x%x stat 0x%x but aborted by upper layer!\n", 2399 " resp 0x%x stat 0x%x but aborted by upper layer!\n",
2402 t, event, ts->resp, ts->stat)); 2400 t, event, ts->resp, ts->stat));
2403 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2401 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2404 } else { 2402 } else if (t->uldd_task) {
2405 spin_unlock_irqrestore(&t->task_state_lock, flags); 2403 spin_unlock_irqrestore(&t->task_state_lock, flags);
2406 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2404 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2407 mb();/* in order to force CPU ordering */ 2405 mb();/* ditto */
2406 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2408 t->task_done(t); 2407 t->task_done(t);
2408 spin_lock_irqsave(&pm8001_ha->lock, flags);
2409 } else if (!t->uldd_task) {
2410 spin_unlock_irqrestore(&t->task_state_lock, flags);
2411 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2412 mb();/*ditto*/
2413 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
2414 t->task_done(t);
2415 spin_lock_irqsave(&pm8001_ha->lock, flags);
2409 } 2416 }
2410} 2417}
2411 2418
@@ -2876,15 +2883,20 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2876 le32_to_cpu(pPayload->lr_evt_status_phyid_portid); 2883 le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
2877 u8 link_rate = 2884 u8 link_rate =
2878 (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28); 2885 (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
2886 u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
2879 u8 phy_id = 2887 u8 phy_id =
2880 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); 2888 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
2889 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
2890 u8 portstate = (u8)(npip_portstate & 0x0000000F);
2891 struct pm8001_port *port = &pm8001_ha->port[port_id];
2881 struct sas_ha_struct *sas_ha = pm8001_ha->sas; 2892 struct sas_ha_struct *sas_ha = pm8001_ha->sas;
2882 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 2893 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
2883 unsigned long flags; 2894 unsigned long flags;
2884 u8 deviceType = pPayload->sas_identify.dev_type; 2895 u8 deviceType = pPayload->sas_identify.dev_type;
2885 2896 port->port_state = portstate;
2886 PM8001_MSG_DBG(pm8001_ha, 2897 PM8001_MSG_DBG(pm8001_ha,
2887 pm8001_printk("HW_EVENT_SAS_PHY_UP \n")); 2898 pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n",
2899 port_id, phy_id));
2888 2900
2889 switch (deviceType) { 2901 switch (deviceType) {
2890 case SAS_PHY_UNUSED: 2902 case SAS_PHY_UNUSED:
@@ -2895,16 +2907,19 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2895 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n")); 2907 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
2896 pm8001_chip_phy_ctl_req(pm8001_ha, phy_id, 2908 pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
2897 PHY_NOTIFY_ENABLE_SPINUP); 2909 PHY_NOTIFY_ENABLE_SPINUP);
2910 port->port_attached = 1;
2898 get_lrate_mode(phy, link_rate); 2911 get_lrate_mode(phy, link_rate);
2899 break; 2912 break;
2900 case SAS_EDGE_EXPANDER_DEVICE: 2913 case SAS_EDGE_EXPANDER_DEVICE:
2901 PM8001_MSG_DBG(pm8001_ha, 2914 PM8001_MSG_DBG(pm8001_ha,
2902 pm8001_printk("expander device.\n")); 2915 pm8001_printk("expander device.\n"));
2916 port->port_attached = 1;
2903 get_lrate_mode(phy, link_rate); 2917 get_lrate_mode(phy, link_rate);
2904 break; 2918 break;
2905 case SAS_FANOUT_EXPANDER_DEVICE: 2919 case SAS_FANOUT_EXPANDER_DEVICE:
2906 PM8001_MSG_DBG(pm8001_ha, 2920 PM8001_MSG_DBG(pm8001_ha,
2907 pm8001_printk("fanout expander device.\n")); 2921 pm8001_printk("fanout expander device.\n"));
2922 port->port_attached = 1;
2908 get_lrate_mode(phy, link_rate); 2923 get_lrate_mode(phy, link_rate);
2909 break; 2924 break;
2910 default: 2925 default:
@@ -2946,11 +2961,20 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2946 le32_to_cpu(pPayload->lr_evt_status_phyid_portid); 2961 le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
2947 u8 link_rate = 2962 u8 link_rate =
2948 (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28); 2963 (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
2964 u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
2949 u8 phy_id = 2965 u8 phy_id =
2950 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); 2966 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
2967 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
2968 u8 portstate = (u8)(npip_portstate & 0x0000000F);
2969 struct pm8001_port *port = &pm8001_ha->port[port_id];
2951 struct sas_ha_struct *sas_ha = pm8001_ha->sas; 2970 struct sas_ha_struct *sas_ha = pm8001_ha->sas;
2952 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 2971 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
2953 unsigned long flags; 2972 unsigned long flags;
2973 PM8001_MSG_DBG(pm8001_ha,
2974 pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"
2975 " phy id = %d\n", port_id, phy_id));
2976 port->port_state = portstate;
2977 port->port_attached = 1;
2954 get_lrate_mode(phy, link_rate); 2978 get_lrate_mode(phy, link_rate);
2955 phy->phy_type |= PORT_TYPE_SATA; 2979 phy->phy_type |= PORT_TYPE_SATA;
2956 phy->phy_attached = 1; 2980 phy->phy_attached = 1;
@@ -2984,7 +3008,13 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
2984 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); 3008 (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
2985 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate); 3009 u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
2986 u8 portstate = (u8)(npip_portstate & 0x0000000F); 3010 u8 portstate = (u8)(npip_portstate & 0x0000000F);
2987 3011 struct pm8001_port *port = &pm8001_ha->port[port_id];
3012 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
3013 port->port_state = portstate;
3014 phy->phy_type = 0;
3015 phy->identify.device_type = 0;
3016 phy->phy_attached = 0;
3017 memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
2988 switch (portstate) { 3018 switch (portstate) {
2989 case PORT_VALID: 3019 case PORT_VALID:
2990 break; 3020 break;
@@ -2993,26 +3023,30 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
2993 pm8001_printk(" PortInvalid portID %d \n", port_id)); 3023 pm8001_printk(" PortInvalid portID %d \n", port_id));
2994 PM8001_MSG_DBG(pm8001_ha, 3024 PM8001_MSG_DBG(pm8001_ha,
2995 pm8001_printk(" Last phy Down and port invalid\n")); 3025 pm8001_printk(" Last phy Down and port invalid\n"));
3026 port->port_attached = 0;
2996 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, 3027 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
2997 port_id, phy_id, 0, 0); 3028 port_id, phy_id, 0, 0);
2998 break; 3029 break;
2999 case PORT_IN_RESET: 3030 case PORT_IN_RESET:
3000 PM8001_MSG_DBG(pm8001_ha, 3031 PM8001_MSG_DBG(pm8001_ha,
3001 pm8001_printk(" PortInReset portID %d \n", port_id)); 3032 pm8001_printk(" Port In Reset portID %d \n", port_id));
3002 break; 3033 break;
3003 case PORT_NOT_ESTABLISHED: 3034 case PORT_NOT_ESTABLISHED:
3004 PM8001_MSG_DBG(pm8001_ha, 3035 PM8001_MSG_DBG(pm8001_ha,
3005 pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n")); 3036 pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
3037 port->port_attached = 0;
3006 break; 3038 break;
3007 case PORT_LOSTCOMM: 3039 case PORT_LOSTCOMM:
3008 PM8001_MSG_DBG(pm8001_ha, 3040 PM8001_MSG_DBG(pm8001_ha,
3009 pm8001_printk(" phy Down and PORT_LOSTCOMM\n")); 3041 pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
3010 PM8001_MSG_DBG(pm8001_ha, 3042 PM8001_MSG_DBG(pm8001_ha,
3011 pm8001_printk(" Last phy Down and port invalid\n")); 3043 pm8001_printk(" Last phy Down and port invalid\n"));
3044 port->port_attached = 0;
3012 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, 3045 pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
3013 port_id, phy_id, 0, 0); 3046 port_id, phy_id, 0, 0);
3014 break; 3047 break;
3015 default: 3048 default:
3049 port->port_attached = 0;
3016 PM8001_MSG_DBG(pm8001_ha, 3050 PM8001_MSG_DBG(pm8001_ha,
3017 pm8001_printk(" phy Down and(default) = %x\n", 3051 pm8001_printk(" phy Down and(default) = %x\n",
3018 portstate)); 3052 portstate));
@@ -3770,7 +3804,8 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
3770 u32 opc = OPC_INB_SSPINIIOSTART; 3804 u32 opc = OPC_INB_SSPINIIOSTART;
3771 memset(&ssp_cmd, 0, sizeof(ssp_cmd)); 3805 memset(&ssp_cmd, 0, sizeof(ssp_cmd));
3772 memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); 3806 memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
3773 ssp_cmd.dir_m_tlr = data_dir_flags[task->data_dir] << 8 | 0x0;/*0 for 3807 ssp_cmd.dir_m_tlr =
3808 cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);/*0 for
3774 SAS 1.1 compatible TLR*/ 3809 SAS 1.1 compatible TLR*/
3775 ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len); 3810 ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
3776 ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id); 3811 ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
@@ -3841,7 +3876,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
3841 } 3876 }
3842 } 3877 }
3843 if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) 3878 if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag))
3844 ncg_tag = cpu_to_le32(hdr_tag); 3879 ncg_tag = hdr_tag;
3845 dir = data_dir_flags[task->data_dir] << 8; 3880 dir = data_dir_flags[task->data_dir] << 8;
3846 sata_cmd.tag = cpu_to_le32(tag); 3881 sata_cmd.tag = cpu_to_le32(tag);
3847 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); 3882 sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
@@ -3986,7 +4021,7 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
3986 ((stp_sspsmp_sata & 0x03) * 0x10000000)); 4021 ((stp_sspsmp_sata & 0x03) * 0x10000000));
3987 payload.firstburstsize_ITNexustimeout = 4022 payload.firstburstsize_ITNexustimeout =
3988 cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); 4023 cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
3989 memcpy(&payload.sas_addr_hi, pm8001_dev->sas_device->sas_addr, 4024 memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
3990 SAS_ADDR_SIZE); 4025 SAS_ADDR_SIZE);
3991 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); 4026 rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
3992 return rc; 4027 return rc;
@@ -4027,7 +4062,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
4027 struct inbound_queue_table *circularQ; 4062 struct inbound_queue_table *circularQ;
4028 int ret; 4063 int ret;
4029 u32 opc = OPC_INB_LOCAL_PHY_CONTROL; 4064 u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
4030 memset((u8 *)&payload, 0, sizeof(payload)); 4065 memset(&payload, 0, sizeof(payload));
4031 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4066 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4032 payload.tag = 1; 4067 payload.tag = 1;
4033 payload.phyop_phyid = 4068 payload.phyop_phyid =
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index 96e4daa68b8f..833a5201eda4 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -242,8 +242,7 @@ struct reg_dev_req {
242 __le32 phyid_portid; 242 __le32 phyid_portid;
243 __le32 dtype_dlr_retry; 243 __le32 dtype_dlr_retry;
244 __le32 firstburstsize_ITNexustimeout; 244 __le32 firstburstsize_ITNexustimeout;
245 u32 sas_addr_hi; 245 u8 sas_addr[SAS_ADDR_SIZE];
246 u32 sas_addr_low;
247 __le32 upper_device_id; 246 __le32 upper_device_id;
248 u32 reserved[8]; 247 u32 reserved[8];
249} __attribute__((packed, aligned(4))); 248} __attribute__((packed, aligned(4)));
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 42ebe725d5a5..c2f1032496cb 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -200,8 +200,13 @@ static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
200{ 200{
201 int i; 201 int i;
202 spin_lock_init(&pm8001_ha->lock); 202 spin_lock_init(&pm8001_ha->lock);
203 for (i = 0; i < pm8001_ha->chip->n_phy; i++) 203 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
204 pm8001_phy_init(pm8001_ha, i); 204 pm8001_phy_init(pm8001_ha, i);
205 pm8001_ha->port[i].wide_port_phymap = 0;
206 pm8001_ha->port[i].port_attached = 0;
207 pm8001_ha->port[i].port_state = 0;
208 INIT_LIST_HEAD(&pm8001_ha->port[i].list);
209 }
205 210
206 pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL); 211 pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL);
207 if (!pm8001_ha->tags) 212 if (!pm8001_ha->tags)
@@ -511,19 +516,23 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
511 u8 i; 516 u8 i;
512#ifdef PM8001_READ_VPD 517#ifdef PM8001_READ_VPD
513 DECLARE_COMPLETION_ONSTACK(completion); 518 DECLARE_COMPLETION_ONSTACK(completion);
519 struct pm8001_ioctl_payload payload;
514 pm8001_ha->nvmd_completion = &completion; 520 pm8001_ha->nvmd_completion = &completion;
515 PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, 0, 0); 521 payload.minor_function = 0;
522 payload.length = 128;
523 payload.func_specific = kzalloc(128, GFP_KERNEL);
524 PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
516 wait_for_completion(&completion); 525 wait_for_completion(&completion);
517 for (i = 0; i < pm8001_ha->chip->n_phy; i++) { 526 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
518 memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr, 527 memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
519 SAS_ADDR_SIZE); 528 SAS_ADDR_SIZE);
520 PM8001_INIT_DBG(pm8001_ha, 529 PM8001_INIT_DBG(pm8001_ha,
521 pm8001_printk("phy %d sas_addr = %x \n", i, 530 pm8001_printk("phy %d sas_addr = %016llx \n", i,
522 (u64)pm8001_ha->phy[i].dev_sas_addr)); 531 pm8001_ha->phy[i].dev_sas_addr));
523 } 532 }
524#else 533#else
525 for (i = 0; i < pm8001_ha->chip->n_phy; i++) { 534 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
526 pm8001_ha->phy[i].dev_sas_addr = 0x500e004010000004ULL; 535 pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL;
527 pm8001_ha->phy[i].dev_sas_addr = 536 pm8001_ha->phy[i].dev_sas_addr =
528 cpu_to_be64((u64) 537 cpu_to_be64((u64)
529 (*(u64 *)&pm8001_ha->phy[i].dev_sas_addr)); 538 (*(u64 *)&pm8001_ha->phy[i].dev_sas_addr));
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 1f767a0e727a..7f9c83a76390 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -329,6 +329,23 @@ int pm8001_slave_configure(struct scsi_device *sdev)
329 } 329 }
330 return 0; 330 return 0;
331} 331}
332 /* Find the local port id that's attached to this device */
333static int sas_find_local_port_id(struct domain_device *dev)
334{
335 struct domain_device *pdev = dev->parent;
336
337 /* Directly attached device */
338 if (!pdev)
339 return dev->port->id;
340 while (pdev) {
341 struct domain_device *pdev_p = pdev->parent;
342 if (!pdev_p)
343 return pdev->port->id;
344 pdev = pdev->parent;
345 }
346 return 0;
347}
348
332/** 349/**
333 * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware. 350 * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
334 * @task: the task to be execute. 351 * @task: the task to be execute.
@@ -346,11 +363,12 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
346 struct domain_device *dev = task->dev; 363 struct domain_device *dev = task->dev;
347 struct pm8001_hba_info *pm8001_ha; 364 struct pm8001_hba_info *pm8001_ha;
348 struct pm8001_device *pm8001_dev; 365 struct pm8001_device *pm8001_dev;
366 struct pm8001_port *port = NULL;
349 struct sas_task *t = task; 367 struct sas_task *t = task;
350 struct pm8001_ccb_info *ccb; 368 struct pm8001_ccb_info *ccb;
351 u32 tag = 0xdeadbeef, rc, n_elem = 0; 369 u32 tag = 0xdeadbeef, rc, n_elem = 0;
352 u32 n = num; 370 u32 n = num;
353 unsigned long flags = 0; 371 unsigned long flags = 0, flags_libsas = 0;
354 372
355 if (!dev->port) { 373 if (!dev->port) {
356 struct task_status_struct *tsm = &t->task_status; 374 struct task_status_struct *tsm = &t->task_status;
@@ -379,6 +397,35 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
379 rc = SAS_PHY_DOWN; 397 rc = SAS_PHY_DOWN;
380 goto out_done; 398 goto out_done;
381 } 399 }
400 port = &pm8001_ha->port[sas_find_local_port_id(dev)];
401 if (!port->port_attached) {
402 if (sas_protocol_ata(t->task_proto)) {
403 struct task_status_struct *ts = &t->task_status;
404 ts->resp = SAS_TASK_UNDELIVERED;
405 ts->stat = SAS_PHY_DOWN;
406
407 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
408 spin_unlock_irqrestore(dev->sata_dev.ap->lock,
409 flags_libsas);
410 t->task_done(t);
411 spin_lock_irqsave(dev->sata_dev.ap->lock,
412 flags_libsas);
413 spin_lock_irqsave(&pm8001_ha->lock, flags);
414 if (n > 1)
415 t = list_entry(t->list.next,
416 struct sas_task, list);
417 continue;
418 } else {
419 struct task_status_struct *ts = &t->task_status;
420 ts->resp = SAS_TASK_UNDELIVERED;
421 ts->stat = SAS_PHY_DOWN;
422 t->task_done(t);
423 if (n > 1)
424 t = list_entry(t->list.next,
425 struct sas_task, list);
426 continue;
427 }
428 }
382 rc = pm8001_tag_alloc(pm8001_ha, &tag); 429 rc = pm8001_tag_alloc(pm8001_ha, &tag);
383 if (rc) 430 if (rc)
384 goto err_out; 431 goto err_out;
@@ -569,11 +616,11 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
569 spin_lock_irqsave(&pm8001_ha->lock, flags); 616 spin_lock_irqsave(&pm8001_ha->lock, flags);
570 617
571 pm8001_device = pm8001_alloc_dev(pm8001_ha); 618 pm8001_device = pm8001_alloc_dev(pm8001_ha);
572 pm8001_device->sas_device = dev;
573 if (!pm8001_device) { 619 if (!pm8001_device) {
574 res = -1; 620 res = -1;
575 goto found_out; 621 goto found_out;
576 } 622 }
623 pm8001_device->sas_device = dev;
577 dev->lldd_dev = pm8001_device; 624 dev->lldd_dev = pm8001_device;
578 pm8001_device->dev_type = dev->dev_type; 625 pm8001_device->dev_type = dev->dev_type;
579 pm8001_device->dcompletion = &completion; 626 pm8001_device->dcompletion = &completion;
@@ -609,7 +656,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
609 wait_for_completion(&completion); 656 wait_for_completion(&completion);
610 if (dev->dev_type == SAS_END_DEV) 657 if (dev->dev_type == SAS_END_DEV)
611 msleep(50); 658 msleep(50);
612 pm8001_ha->flags = PM8001F_RUN_TIME ; 659 pm8001_ha->flags |= PM8001F_RUN_TIME ;
613 return 0; 660 return 0;
614found_out: 661found_out:
615 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 662 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
@@ -772,7 +819,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
772 task->task_done = pm8001_task_done; 819 task->task_done = pm8001_task_done;
773 task->timer.data = (unsigned long)task; 820 task->timer.data = (unsigned long)task;
774 task->timer.function = pm8001_tmf_timedout; 821 task->timer.function = pm8001_tmf_timedout;
775 task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; 822 task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
776 add_timer(&task->timer); 823 add_timer(&task->timer);
777 824
778 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); 825 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
@@ -897,6 +944,8 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
897 944
898 if (dev_is_sata(dev)) { 945 if (dev_is_sata(dev)) {
899 DECLARE_COMPLETION_ONSTACK(completion_setstate); 946 DECLARE_COMPLETION_ONSTACK(completion_setstate);
947 if (scsi_is_sas_phy_local(phy))
948 return 0;
900 rc = sas_phy_reset(phy, 1); 949 rc = sas_phy_reset(phy, 1);
901 msleep(2000); 950 msleep(2000);
902 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , 951 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 30f2ede55a75..8e38ca8cd101 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -59,11 +59,11 @@
59 59
60#define DRV_NAME "pm8001" 60#define DRV_NAME "pm8001"
61#define DRV_VERSION "0.1.36" 61#define DRV_VERSION "0.1.36"
62#define PM8001_FAIL_LOGGING 0x01 /* libsas EH function logging */ 62#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */
63#define PM8001_INIT_LOGGING 0x02 /* driver init logging */ 63#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
64#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */ 64#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
65#define PM8001_IO_LOGGING 0x08 /* I/O path logging */ 65#define PM8001_IO_LOGGING 0x08 /* I/O path logging */
66#define PM8001_EH_LOGGING 0x10 /* Error message logging */ 66#define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/
67#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */ 67#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */
68#define PM8001_MSG_LOGGING 0x40 /* misc message logging */ 68#define PM8001_MSG_LOGGING 0x40 /* misc message logging */
69#define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\ 69#define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\
@@ -100,6 +100,7 @@ do { \
100 100
101#define PM8001_USE_TASKLET 101#define PM8001_USE_TASKLET
102#define PM8001_USE_MSIX 102#define PM8001_USE_MSIX
103#define PM8001_READ_VPD
103 104
104 105
105#define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV)) 106#define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV))
@@ -111,7 +112,22 @@ extern const struct pm8001_dispatch pm8001_8001_dispatch;
111struct pm8001_hba_info; 112struct pm8001_hba_info;
112struct pm8001_ccb_info; 113struct pm8001_ccb_info;
113struct pm8001_device; 114struct pm8001_device;
114struct pm8001_tmf_task; 115/* define task management IU */
116struct pm8001_tmf_task {
117 u8 tmf;
118 u32 tag_of_task_to_be_managed;
119};
120struct pm8001_ioctl_payload {
121 u32 signature;
122 u16 major_function;
123 u16 minor_function;
124 u16 length;
125 u16 status;
126 u16 offset;
127 u16 id;
128 u8 *func_specific;
129};
130
115struct pm8001_dispatch { 131struct pm8001_dispatch {
116 char *name; 132 char *name;
117 int (*chip_init)(struct pm8001_hba_info *pm8001_ha); 133 int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
@@ -164,6 +180,10 @@ struct pm8001_chip_info {
164 180
165struct pm8001_port { 181struct pm8001_port {
166 struct asd_sas_port sas_port; 182 struct asd_sas_port sas_port;
183 u8 port_attached;
184 u8 wide_port_phymap;
185 u8 port_state;
186 struct list_head list;
167}; 187};
168 188
169struct pm8001_phy { 189struct pm8001_phy {
@@ -386,11 +406,7 @@ struct pm8001_fw_image_header {
386 __be32 startup_entry; 406 __be32 startup_entry;
387} __attribute__((packed, aligned(4))); 407} __attribute__((packed, aligned(4)));
388 408
389/* define task management IU */ 409
390struct pm8001_tmf_task {
391 u8 tmf;
392 u32 tag_of_task_to_be_managed;
393};
394/** 410/**
395 * FW Flash Update status values 411 * FW Flash Update status values
396 */ 412 */
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 34c6b896a91b..e7d2688fbeba 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters 2 * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
3 * 3 *
4 * Written By: PMC Sierra Corporation 4 * Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
5 * PMC-Sierra Inc
5 * 6 *
6 * Copyright (C) 2008, 2009 PMC Sierra Inc 7 * Copyright (C) 2008, 2009 PMC Sierra Inc
7 * 8 *
@@ -79,7 +80,7 @@ DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
79/* 80/*
80 * Module parameters 81 * Module parameters
81 */ 82 */
82MODULE_AUTHOR("PMC Sierra Corporation, anil_ravindranath@pmc-sierra.com"); 83MODULE_AUTHOR("Anil Ravindranath<anil_ravindranath@pmc-sierra.com>");
83MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver"); 84MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver");
84MODULE_LICENSE("GPL"); 85MODULE_LICENSE("GPL");
85MODULE_VERSION(PMCRAID_DRIVER_VERSION); 86MODULE_VERSION(PMCRAID_DRIVER_VERSION);
@@ -162,10 +163,10 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
162 spin_lock_irqsave(&pinstance->resource_lock, lock_flags); 163 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
163 list_for_each_entry(temp, &pinstance->used_res_q, queue) { 164 list_for_each_entry(temp, &pinstance->used_res_q, queue) {
164 165
165 /* do not expose VSETs with order-ids >= 240 */ 166 /* do not expose VSETs with order-ids > MAX_VSET_TARGETS */
166 if (RES_IS_VSET(temp->cfg_entry)) { 167 if (RES_IS_VSET(temp->cfg_entry)) {
167 target = temp->cfg_entry.unique_flags1; 168 target = temp->cfg_entry.unique_flags1;
168 if (target >= PMCRAID_MAX_VSET_TARGETS) 169 if (target > PMCRAID_MAX_VSET_TARGETS)
169 continue; 170 continue;
170 bus = PMCRAID_VSET_BUS_ID; 171 bus = PMCRAID_VSET_BUS_ID;
171 lun = 0; 172 lun = 0;
@@ -1210,7 +1211,7 @@ static int pmcraid_expose_resource(struct pmcraid_config_table_entry *cfgte)
1210 int retval = 0; 1211 int retval = 0;
1211 1212
1212 if (cfgte->resource_type == RES_TYPE_VSET) 1213 if (cfgte->resource_type == RES_TYPE_VSET)
1213 retval = ((cfgte->unique_flags1 & 0xFF) < 0xFE); 1214 retval = ((cfgte->unique_flags1 & 0x80) == 0);
1214 else if (cfgte->resource_type == RES_TYPE_GSCSI) 1215 else if (cfgte->resource_type == RES_TYPE_GSCSI)
1215 retval = (RES_BUS(cfgte->resource_address) != 1216 retval = (RES_BUS(cfgte->resource_address) !=
1216 PMCRAID_VIRTUAL_ENCL_BUS_ID); 1217 PMCRAID_VIRTUAL_ENCL_BUS_ID);
@@ -1361,6 +1362,7 @@ static int pmcraid_notify_aen(struct pmcraid_instance *pinstance, u8 type)
1361 * Return value: 1362 * Return value:
1362 * none 1363 * none
1363 */ 1364 */
1365
1364static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance) 1366static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1365{ 1367{
1366 struct pmcraid_config_table_entry *cfg_entry; 1368 struct pmcraid_config_table_entry *cfg_entry;
@@ -1368,9 +1370,10 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1368 struct pmcraid_cmd *cmd; 1370 struct pmcraid_cmd *cmd;
1369 struct pmcraid_cmd *cfgcmd; 1371 struct pmcraid_cmd *cfgcmd;
1370 struct pmcraid_resource_entry *res = NULL; 1372 struct pmcraid_resource_entry *res = NULL;
1371 u32 new_entry = 1;
1372 unsigned long lock_flags; 1373 unsigned long lock_flags;
1373 unsigned long host_lock_flags; 1374 unsigned long host_lock_flags;
1375 u32 new_entry = 1;
1376 u32 hidden_entry = 0;
1374 int rc; 1377 int rc;
1375 1378
1376 ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam; 1379 ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam;
@@ -1406,9 +1409,15 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1406 } 1409 }
1407 1410
1408 /* If this resource is not going to be added to mid-layer, just notify 1411 /* If this resource is not going to be added to mid-layer, just notify
1409 * applications and return 1412 * applications and return. If this notification is about hiding a VSET
1413 * resource, check if it was exposed already.
1410 */ 1414 */
1411 if (!pmcraid_expose_resource(cfg_entry)) 1415 if (pinstance->ccn.hcam->notification_type ==
1416 NOTIFICATION_TYPE_ENTRY_CHANGED &&
1417 cfg_entry->resource_type == RES_TYPE_VSET &&
1418 cfg_entry->unique_flags1 & 0x80) {
1419 hidden_entry = 1;
1420 } else if (!pmcraid_expose_resource(cfg_entry))
1412 goto out_notify_apps; 1421 goto out_notify_apps;
1413 1422
1414 spin_lock_irqsave(&pinstance->resource_lock, lock_flags); 1423 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
@@ -1424,6 +1433,12 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1424 1433
1425 if (new_entry) { 1434 if (new_entry) {
1426 1435
1436 if (hidden_entry) {
1437 spin_unlock_irqrestore(&pinstance->resource_lock,
1438 lock_flags);
1439 goto out_notify_apps;
1440 }
1441
1427 /* If there are more number of resources than what driver can 1442 /* If there are more number of resources than what driver can
1428 * manage, do not notify the applications about the CCN. Just 1443 * manage, do not notify the applications about the CCN. Just
1429 * ignore this notifications and re-register the same HCAM 1444 * ignore this notifications and re-register the same HCAM
@@ -1454,8 +1469,9 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
1454 sizeof(struct pmcraid_config_table_entry)); 1469 sizeof(struct pmcraid_config_table_entry));
1455 1470
1456 if (pinstance->ccn.hcam->notification_type == 1471 if (pinstance->ccn.hcam->notification_type ==
1457 NOTIFICATION_TYPE_ENTRY_DELETED) { 1472 NOTIFICATION_TYPE_ENTRY_DELETED || hidden_entry) {
1458 if (res->scsi_dev) { 1473 if (res->scsi_dev) {
1474 res->cfg_entry.unique_flags1 &= 0x7F;
1459 res->change_detected = RES_CHANGE_DEL; 1475 res->change_detected = RES_CHANGE_DEL;
1460 res->cfg_entry.resource_handle = 1476 res->cfg_entry.resource_handle =
1461 PMCRAID_INVALID_RES_HANDLE; 1477 PMCRAID_INVALID_RES_HANDLE;
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 2752b56cad56..92f89d50850c 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -1,6 +1,9 @@
1/* 1/*
2 * pmcraid.h -- PMC Sierra MaxRAID controller driver header file 2 * pmcraid.h -- PMC Sierra MaxRAID controller driver header file
3 * 3 *
4 * Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
5 * PMC-Sierra Inc
6 *
4 * Copyright (C) 2008, 2009 PMC Sierra Inc. 7 * Copyright (C) 2008, 2009 PMC Sierra Inc.
5 * 8 *
6 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
@@ -106,7 +109,7 @@
106#define PMCRAID_VSET_LUN_ID 0x0 109#define PMCRAID_VSET_LUN_ID 0x0
107#define PMCRAID_PHYS_BUS_ID 0x0 110#define PMCRAID_PHYS_BUS_ID 0x0
108#define PMCRAID_VIRTUAL_ENCL_BUS_ID 0x8 111#define PMCRAID_VIRTUAL_ENCL_BUS_ID 0x8
109#define PMCRAID_MAX_VSET_TARGETS 240 112#define PMCRAID_MAX_VSET_TARGETS 0x7F
110#define PMCRAID_MAX_VSET_LUNS_PER_TARGET 8 113#define PMCRAID_MAX_VSET_LUNS_PER_TARGET 8
111 114
112#define PMCRAID_IOA_MAX_SECTORS 32767 115#define PMCRAID_IOA_MAX_SECTORS 32767
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 6b9bf23c7735..384afda7dbe9 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1570,9 +1570,6 @@ typedef struct fc_port {
1570 struct fc_rport *rport, *drport; 1570 struct fc_rport *rport, *drport;
1571 u32 supported_classes; 1571 u32 supported_classes;
1572 1572
1573 unsigned long last_queue_full;
1574 unsigned long last_ramp_up;
1575
1576 uint16_t vp_idx; 1573 uint16_t vp_idx;
1577} fc_port_t; 1574} fc_port_t;
1578 1575
@@ -2265,6 +2262,7 @@ struct qla_hw_data {
2265 uint32_t port0 :1; 2262 uint32_t port0 :1;
2266 uint32_t running_gold_fw :1; 2263 uint32_t running_gold_fw :1;
2267 uint32_t cpu_affinity_enabled :1; 2264 uint32_t cpu_affinity_enabled :1;
2265 uint32_t disable_msix_handshake :1;
2268 } flags; 2266 } flags;
2269 2267
2270 /* This spinlock is used to protect "io transactions", you must 2268 /* This spinlock is used to protect "io transactions", you must
@@ -2387,6 +2385,7 @@ struct qla_hw_data {
2387#define IS_QLA81XX(ha) (IS_QLA8001(ha)) 2385#define IS_QLA81XX(ha) (IS_QLA8001(ha))
2388#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \ 2386#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
2389 IS_QLA25XX(ha) || IS_QLA81XX(ha)) 2387 IS_QLA25XX(ha) || IS_QLA81XX(ha))
2388#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha))
2390#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \ 2389#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
2391 (ha)->flags.msix_enabled) 2390 (ha)->flags.msix_enabled)
2392#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha)) 2391#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha))
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index e21851358509..0b6801fc6389 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -72,8 +72,6 @@ extern int ql2xloginretrycount;
72extern int ql2xfdmienable; 72extern int ql2xfdmienable;
73extern int ql2xallocfwdump; 73extern int ql2xallocfwdump;
74extern int ql2xextended_error_logging; 74extern int ql2xextended_error_logging;
75extern int ql2xqfullrampup;
76extern int ql2xqfulltracking;
77extern int ql2xiidmaenable; 75extern int ql2xiidmaenable;
78extern int ql2xmaxqueues; 76extern int ql2xmaxqueues;
79extern int ql2xmultique_tag; 77extern int ql2xmultique_tag;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b74924b279ef..73a793539d45 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1442,7 +1442,17 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1442 icb->firmware_options_2 |= 1442 icb->firmware_options_2 |=
1443 __constant_cpu_to_le32(BIT_18); 1443 __constant_cpu_to_le32(BIT_18);
1444 1444
1445 icb->firmware_options_2 &= __constant_cpu_to_le32(~BIT_22); 1445 /* Use Disable MSIX Handshake mode for capable adapters */
1446 if (IS_MSIX_NACK_CAPABLE(ha)) {
1447 icb->firmware_options_2 &=
1448 __constant_cpu_to_le32(~BIT_22);
1449 ha->flags.disable_msix_handshake = 1;
1450 qla_printk(KERN_INFO, ha,
1451 "MSIX Handshake Disable Mode turned on\n");
1452 } else {
1453 icb->firmware_options_2 |=
1454 __constant_cpu_to_le32(BIT_22);
1455 }
1446 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23); 1456 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
1447 1457
1448 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0); 1458 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 804987397b77..1692a883f4de 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -811,78 +811,6 @@ skip_rio:
811 qla2x00_alert_all_vps(rsp, mb); 811 qla2x00_alert_all_vps(rsp, mb);
812} 812}
813 813
814static void
815qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
816{
817 fc_port_t *fcport = data;
818 struct scsi_qla_host *vha = fcport->vha;
819 struct qla_hw_data *ha = vha->hw;
820 struct req_que *req = NULL;
821
822 if (!ql2xqfulltracking)
823 return;
824
825 req = vha->req;
826 if (!req)
827 return;
828 if (req->max_q_depth <= sdev->queue_depth)
829 return;
830
831 if (sdev->ordered_tags)
832 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
833 sdev->queue_depth + 1);
834 else
835 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
836 sdev->queue_depth + 1);
837
838 fcport->last_ramp_up = jiffies;
839
840 DEBUG2(qla_printk(KERN_INFO, ha,
841 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
842 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
843 sdev->queue_depth));
844}
845
846static void
847qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
848{
849 fc_port_t *fcport = data;
850
851 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
852 return;
853
854 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
855 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
856 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
857 sdev->queue_depth));
858}
859
860static inline void
861qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
862 srb_t *sp)
863{
864 fc_port_t *fcport;
865 struct scsi_device *sdev;
866
867 if (!ql2xqfulltracking)
868 return;
869
870 sdev = sp->cmd->device;
871 if (sdev->queue_depth >= req->max_q_depth)
872 return;
873
874 fcport = sp->fcport;
875 if (time_before(jiffies,
876 fcport->last_ramp_up + ql2xqfullrampup * HZ))
877 return;
878 if (time_before(jiffies,
879 fcport->last_queue_full + ql2xqfullrampup * HZ))
880 return;
881
882 starget_for_each_device(sdev->sdev_target, fcport,
883 qla2x00_adjust_sdev_qdepth_up);
884}
885
886/** 814/**
887 * qla2x00_process_completed_request() - Process a Fast Post response. 815 * qla2x00_process_completed_request() - Process a Fast Post response.
888 * @ha: SCSI driver HA context 816 * @ha: SCSI driver HA context
@@ -913,8 +841,6 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
913 841
914 /* Save ISP completion status */ 842 /* Save ISP completion status */
915 sp->cmd->result = DID_OK << 16; 843 sp->cmd->result = DID_OK << 16;
916
917 qla2x00_ramp_up_queue_depth(vha, req, sp);
918 qla2x00_sp_compl(ha, sp); 844 qla2x00_sp_compl(ha, sp);
919 } else { 845 } else {
920 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion" 846 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
@@ -1435,13 +1361,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1435 "scsi(%ld): QUEUE FULL status detected " 1361 "scsi(%ld): QUEUE FULL status detected "
1436 "0x%x-0x%x.\n", vha->host_no, comp_status, 1362 "0x%x-0x%x.\n", vha->host_no, comp_status,
1437 scsi_status)); 1363 scsi_status));
1438
1439 /* Adjust queue depth for all luns on the port. */
1440 if (!ql2xqfulltracking)
1441 break;
1442 fcport->last_queue_full = jiffies;
1443 starget_for_each_device(cp->device->sdev_target,
1444 fcport, qla2x00_adjust_sdev_qdepth_down);
1445 break; 1364 break;
1446 } 1365 }
1447 if (lscsi_status != SS_CHECK_CONDITION) 1366 if (lscsi_status != SS_CHECK_CONDITION)
@@ -1516,17 +1435,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1516 "scsi(%ld): QUEUE FULL status detected " 1435 "scsi(%ld): QUEUE FULL status detected "
1517 "0x%x-0x%x.\n", vha->host_no, comp_status, 1436 "0x%x-0x%x.\n", vha->host_no, comp_status,
1518 scsi_status)); 1437 scsi_status));
1519
1520 /*
1521 * Adjust queue depth for all luns on the
1522 * port.
1523 */
1524 if (!ql2xqfulltracking)
1525 break;
1526 fcport->last_queue_full = jiffies;
1527 starget_for_each_device(
1528 cp->device->sdev_target, fcport,
1529 qla2x00_adjust_sdev_qdepth_down);
1530 break; 1438 break;
1531 } 1439 }
1532 if (lscsi_status != SS_CHECK_CONDITION) 1440 if (lscsi_status != SS_CHECK_CONDITION)
@@ -2020,7 +1928,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
2020 1928
2021 vha = qla25xx_get_host(rsp); 1929 vha = qla25xx_get_host(rsp);
2022 qla24xx_process_response_queue(vha, rsp); 1930 qla24xx_process_response_queue(vha, rsp);
2023 if (!ha->mqenable) { 1931 if (!ha->flags.disable_msix_handshake) {
2024 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1932 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2025 RD_REG_DWORD_RELAXED(&reg->hccr); 1933 RD_REG_DWORD_RELAXED(&reg->hccr);
2026 } 1934 }
@@ -2034,6 +1942,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
2034{ 1942{
2035 struct qla_hw_data *ha; 1943 struct qla_hw_data *ha;
2036 struct rsp_que *rsp; 1944 struct rsp_que *rsp;
1945 struct device_reg_24xx __iomem *reg;
2037 1946
2038 rsp = (struct rsp_que *) dev_id; 1947 rsp = (struct rsp_que *) dev_id;
2039 if (!rsp) { 1948 if (!rsp) {
@@ -2043,6 +1952,14 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
2043 } 1952 }
2044 ha = rsp->hw; 1953 ha = rsp->hw;
2045 1954
1955 /* Clear the interrupt, if enabled, for this response queue */
1956 if (rsp->options & ~BIT_6) {
1957 reg = &ha->iobase->isp24;
1958 spin_lock_irq(&ha->hardware_lock);
1959 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1960 RD_REG_DWORD_RELAXED(&reg->hccr);
1961 spin_unlock_irq(&ha->hardware_lock);
1962 }
2046 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 1963 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2047 1964
2048 return IRQ_HANDLED; 1965 return IRQ_HANDLED;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index a47d34308a3a..2a4c7f4e7b69 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -696,6 +696,10 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
696 /* Use alternate PCI devfn */ 696 /* Use alternate PCI devfn */
697 if (LSB(rsp->rid)) 697 if (LSB(rsp->rid))
698 options |= BIT_5; 698 options |= BIT_5;
699 /* Enable MSIX handshake mode on for uncapable adapters */
700 if (!IS_MSIX_NACK_CAPABLE(ha))
701 options |= BIT_6;
702
699 rsp->options = options; 703 rsp->options = options;
700 rsp->id = que_id; 704 rsp->id = que_id;
701 reg = ISP_QUE_REG(ha, que_id); 705 reg = ISP_QUE_REG(ha, que_id);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 41669357b186..2f873d237325 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -78,21 +78,6 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
78MODULE_PARM_DESC(ql2xmaxqdepth, 78MODULE_PARM_DESC(ql2xmaxqdepth,
79 "Maximum queue depth to report for target devices."); 79 "Maximum queue depth to report for target devices.");
80 80
81int ql2xqfulltracking = 1;
82module_param(ql2xqfulltracking, int, S_IRUGO|S_IWUSR);
83MODULE_PARM_DESC(ql2xqfulltracking,
84 "Controls whether the driver tracks queue full status "
85 "returns and dynamically adjusts a scsi device's queue "
86 "depth. Default is 1, perform tracking. Set to 0 to "
87 "disable dynamic tracking and adjustment of queue depth.");
88
89int ql2xqfullrampup = 120;
90module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR);
91MODULE_PARM_DESC(ql2xqfullrampup,
92 "Number of seconds to wait to begin to ramp-up the queue "
93 "depth for a device after a queue-full condition has been "
94 "detected. Default is 120 seconds.");
95
96int ql2xiidmaenable=1; 81int ql2xiidmaenable=1;
97module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR); 82module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
98MODULE_PARM_DESC(ql2xiidmaenable, 83MODULE_PARM_DESC(ql2xiidmaenable,
@@ -1217,13 +1202,61 @@ qla2xxx_slave_destroy(struct scsi_device *sdev)
1217 sdev->hostdata = NULL; 1202 sdev->hostdata = NULL;
1218} 1203}
1219 1204
1205static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
1206{
1207 fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
1208
1209 if (!scsi_track_queue_full(sdev, qdepth))
1210 return;
1211
1212 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
1213 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
1214 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
1215 sdev->queue_depth));
1216}
1217
1218static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1219{
1220 fc_port_t *fcport = sdev->hostdata;
1221 struct scsi_qla_host *vha = fcport->vha;
1222 struct qla_hw_data *ha = vha->hw;
1223 struct req_que *req = NULL;
1224
1225 req = vha->req;
1226 if (!req)
1227 return;
1228
1229 if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth)
1230 return;
1231
1232 if (sdev->ordered_tags)
1233 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth);
1234 else
1235 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
1236
1237 DEBUG2(qla_printk(KERN_INFO, ha,
1238 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
1239 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
1240 sdev->queue_depth));
1241}
1242
1220static int 1243static int
1221qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) 1244qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1222{ 1245{
1223 if (reason != SCSI_QDEPTH_DEFAULT) 1246 switch (reason) {
1224 return -EOPNOTSUPP; 1247 case SCSI_QDEPTH_DEFAULT:
1248 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1249 break;
1250 case SCSI_QDEPTH_QFULL:
1251 qla2x00_handle_queue_full(sdev, qdepth);
1252 break;
1253 case SCSI_QDEPTH_RAMP_UP:
1254 qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
1255 break;
1256 default:
1257 return EOPNOTSUPP;
1258 }
1225 1259
1226 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1227 return sdev->queue_depth; 1260 return sdev->queue_depth;
1228} 1261}
1229 1262
@@ -2003,13 +2036,13 @@ skip_dpc:
2003 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 2036 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
2004 base_vha->host_no, ha)); 2037 base_vha->host_no, ha));
2005 2038
2006 base_vha->flags.init_done = 1;
2007 base_vha->flags.online = 1;
2008
2009 ret = scsi_add_host(host, &pdev->dev); 2039 ret = scsi_add_host(host, &pdev->dev);
2010 if (ret) 2040 if (ret)
2011 goto probe_failed; 2041 goto probe_failed;
2012 2042
2043 base_vha->flags.init_done = 1;
2044 base_vha->flags.online = 1;
2045
2013 ha->isp_ops->enable_intrs(ha); 2046 ha->isp_ops->enable_intrs(ha);
2014 2047
2015 scsi_scan_host(host); 2048 scsi_scan_host(host);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 807e0dbc67fa..c482220f7eed 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.01-k7" 10#define QLA2XXX_VERSION "8.03.01-k8"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index e495d3813948..d8927681ec88 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -859,6 +859,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
859 case 0x07: /* operation in progress */ 859 case 0x07: /* operation in progress */
860 case 0x08: /* Long write in progress */ 860 case 0x08: /* Long write in progress */
861 case 0x09: /* self test in progress */ 861 case 0x09: /* self test in progress */
862 case 0x14: /* space allocation in progress */
862 action = ACTION_DELAYED_RETRY; 863 action = ACTION_DELAYED_RETRY;
863 break; 864 break;
864 default: 865 default:
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 6531c91501be..ddfcecd5099f 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -649,11 +649,22 @@ static __init int fc_transport_init(void)
649 return error; 649 return error;
650 error = transport_class_register(&fc_vport_class); 650 error = transport_class_register(&fc_vport_class);
651 if (error) 651 if (error)
652 return error; 652 goto unreg_host_class;
653 error = transport_class_register(&fc_rport_class); 653 error = transport_class_register(&fc_rport_class);
654 if (error) 654 if (error)
655 return error; 655 goto unreg_vport_class;
656 return transport_class_register(&fc_transport_class); 656 error = transport_class_register(&fc_transport_class);
657 if (error)
658 goto unreg_rport_class;
659 return 0;
660
661unreg_rport_class:
662 transport_class_unregister(&fc_rport_class);
663unreg_vport_class:
664 transport_class_unregister(&fc_vport_class);
665unreg_host_class:
666 transport_class_unregister(&fc_host_class);
667 return error;
657} 668}
658 669
659static void __exit fc_transport_exit(void) 670static void __exit fc_transport_exit(void)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 9093c7261f33..255da53e5a01 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -264,6 +264,15 @@ sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
264 return snprintf(buf, 20, "%u\n", sdkp->ATO); 264 return snprintf(buf, 20, "%u\n", sdkp->ATO);
265} 265}
266 266
267static ssize_t
268sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr,
269 char *buf)
270{
271 struct scsi_disk *sdkp = to_scsi_disk(dev);
272
273 return snprintf(buf, 20, "%u\n", sdkp->thin_provisioning);
274}
275
267static struct device_attribute sd_disk_attrs[] = { 276static struct device_attribute sd_disk_attrs[] = {
268 __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type, 277 __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
269 sd_store_cache_type), 278 sd_store_cache_type),
@@ -274,6 +283,7 @@ static struct device_attribute sd_disk_attrs[] = {
274 sd_store_manage_start_stop), 283 sd_store_manage_start_stop),
275 __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL), 284 __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
276 __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL), 285 __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
286 __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
277 __ATTR_NULL, 287 __ATTR_NULL,
278}; 288};
279 289
@@ -399,6 +409,57 @@ static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
399} 409}
400 410
401/** 411/**
412 * sd_prepare_discard - unmap blocks on thinly provisioned device
413 * @rq: Request to prepare
414 *
415 * Will issue either UNMAP or WRITE SAME(16) depending on preference
416 * indicated by target device.
417 **/
418static int sd_prepare_discard(struct request *rq)
419{
420 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
421 struct bio *bio = rq->bio;
422 sector_t sector = bio->bi_sector;
423 unsigned int num = bio_sectors(bio);
424
425 if (sdkp->device->sector_size == 4096) {
426 sector >>= 3;
427 num >>= 3;
428 }
429
430 rq->cmd_type = REQ_TYPE_BLOCK_PC;
431 rq->timeout = SD_TIMEOUT;
432
433 memset(rq->cmd, 0, rq->cmd_len);
434
435 if (sdkp->unmap) {
436 char *buf = kmap_atomic(bio_page(bio), KM_USER0);
437
438 rq->cmd[0] = UNMAP;
439 rq->cmd[8] = 24;
440 rq->cmd_len = 10;
441
442 /* Ensure that data length matches payload */
443 rq->__data_len = bio->bi_size = bio->bi_io_vec->bv_len = 24;
444
445 put_unaligned_be16(6 + 16, &buf[0]);
446 put_unaligned_be16(16, &buf[2]);
447 put_unaligned_be64(sector, &buf[8]);
448 put_unaligned_be32(num, &buf[16]);
449
450 kunmap_atomic(buf, KM_USER0);
451 } else {
452 rq->cmd[0] = WRITE_SAME_16;
453 rq->cmd[1] = 0x8; /* UNMAP */
454 put_unaligned_be64(sector, &rq->cmd[2]);
455 put_unaligned_be32(num, &rq->cmd[10]);
456 rq->cmd_len = 16;
457 }
458
459 return BLKPREP_OK;
460}
461
462/**
402 * sd_init_command - build a scsi (read or write) command from 463 * sd_init_command - build a scsi (read or write) command from
403 * information in the request structure. 464 * information in the request structure.
404 * @SCpnt: pointer to mid-level's per scsi command structure that 465 * @SCpnt: pointer to mid-level's per scsi command structure that
@@ -418,6 +479,13 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
418 int ret, host_dif; 479 int ret, host_dif;
419 unsigned char protect; 480 unsigned char protect;
420 481
482 /*
483 * Discard request come in as REQ_TYPE_FS but we turn them into
484 * block PC requests to make life easier.
485 */
486 if (blk_discard_rq(rq))
487 ret = sd_prepare_discard(rq);
488
421 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 489 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
422 ret = scsi_setup_blk_pc_cmnd(sdp, rq); 490 ret = scsi_setup_blk_pc_cmnd(sdp, rq);
423 goto out; 491 goto out;
@@ -1432,6 +1500,19 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1432 sd_printk(KERN_NOTICE, sdkp, 1500 sd_printk(KERN_NOTICE, sdkp,
1433 "physical block alignment offset: %u\n", alignment); 1501 "physical block alignment offset: %u\n", alignment);
1434 1502
1503 if (buffer[14] & 0x80) { /* TPE */
1504 struct request_queue *q = sdp->request_queue;
1505
1506 sdkp->thin_provisioning = 1;
1507 q->limits.discard_granularity = sdkp->hw_sector_size;
1508 q->limits.max_discard_sectors = 0xffffffff;
1509
1510 if (buffer[14] & 0x40) /* TPRZ */
1511 q->limits.discard_zeroes_data = 1;
1512
1513 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1514 }
1515
1435 sdkp->capacity = lba + 1; 1516 sdkp->capacity = lba + 1;
1436 return sector_size; 1517 return sector_size;
1437} 1518}
@@ -1863,6 +1944,7 @@ void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
1863 */ 1944 */
1864static void sd_read_block_limits(struct scsi_disk *sdkp) 1945static void sd_read_block_limits(struct scsi_disk *sdkp)
1865{ 1946{
1947 struct request_queue *q = sdkp->disk->queue;
1866 unsigned int sector_sz = sdkp->device->sector_size; 1948 unsigned int sector_sz = sdkp->device->sector_size;
1867 char *buffer; 1949 char *buffer;
1868 1950
@@ -1877,6 +1959,31 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
1877 blk_queue_io_opt(sdkp->disk->queue, 1959 blk_queue_io_opt(sdkp->disk->queue,
1878 get_unaligned_be32(&buffer[12]) * sector_sz); 1960 get_unaligned_be32(&buffer[12]) * sector_sz);
1879 1961
1962 /* Thin provisioning enabled and page length indicates TP support */
1963 if (sdkp->thin_provisioning && buffer[3] == 0x3c) {
1964 unsigned int lba_count, desc_count, granularity;
1965
1966 lba_count = get_unaligned_be32(&buffer[20]);
1967 desc_count = get_unaligned_be32(&buffer[24]);
1968
1969 if (lba_count) {
1970 q->limits.max_discard_sectors =
1971 lba_count * sector_sz >> 9;
1972
1973 if (desc_count)
1974 sdkp->unmap = 1;
1975 }
1976
1977 granularity = get_unaligned_be32(&buffer[28]);
1978
1979 if (granularity)
1980 q->limits.discard_granularity = granularity * sector_sz;
1981
1982 if (buffer[32] & 0x80)
1983 q->limits.discard_alignment =
1984 get_unaligned_be32(&buffer[32]) & ~(1 << 31);
1985 }
1986
1880 kfree(buffer); 1987 kfree(buffer);
1881} 1988}
1882 1989
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index e374804d26fb..43d3caf268ef 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -60,6 +60,8 @@ struct scsi_disk {
60 unsigned RCD : 1; /* state of disk RCD bit, unused */ 60 unsigned RCD : 1; /* state of disk RCD bit, unused */
61 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ 61 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
62 unsigned first_scan : 1; 62 unsigned first_scan : 1;
63 unsigned thin_provisioning : 1;
64 unsigned unmap : 1;
63}; 65};
64#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) 66#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
65 67
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index ad59abb47722..d04ea9a6f673 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -552,13 +552,15 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
552 SRpnt->waiting = waiting; 552 SRpnt->waiting = waiting;
553 553
554 if (STp->buffer->do_dio) { 554 if (STp->buffer->do_dio) {
555 mdata->page_order = 0;
555 mdata->nr_entries = STp->buffer->sg_segs; 556 mdata->nr_entries = STp->buffer->sg_segs;
556 mdata->pages = STp->buffer->mapped_pages; 557 mdata->pages = STp->buffer->mapped_pages;
557 } else { 558 } else {
559 mdata->page_order = STp->buffer->reserved_page_order;
558 mdata->nr_entries = 560 mdata->nr_entries =
559 DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order); 561 DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
560 STp->buffer->map_data.pages = STp->buffer->reserved_pages; 562 mdata->pages = STp->buffer->reserved_pages;
561 STp->buffer->map_data.offset = 0; 563 mdata->offset = 0;
562 } 564 }
563 565
564 memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd)); 566 memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
@@ -3719,7 +3721,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
3719 priority |= __GFP_ZERO; 3721 priority |= __GFP_ZERO;
3720 3722
3721 if (STbuffer->frp_segs) { 3723 if (STbuffer->frp_segs) {
3722 order = STbuffer->map_data.page_order; 3724 order = STbuffer->reserved_page_order;
3723 b_size = PAGE_SIZE << order; 3725 b_size = PAGE_SIZE << order;
3724 } else { 3726 } else {
3725 for (b_size = PAGE_SIZE, order = 0; 3727 for (b_size = PAGE_SIZE, order = 0;
@@ -3752,7 +3754,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
3752 segs++; 3754 segs++;
3753 } 3755 }
3754 STbuffer->b_data = page_address(STbuffer->reserved_pages[0]); 3756 STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
3755 STbuffer->map_data.page_order = order; 3757 STbuffer->reserved_page_order = order;
3756 3758
3757 return 1; 3759 return 1;
3758} 3760}
@@ -3765,7 +3767,7 @@ static void clear_buffer(struct st_buffer * st_bp)
3765 3767
3766 for (i=0; i < st_bp->frp_segs; i++) 3768 for (i=0; i < st_bp->frp_segs; i++)
3767 memset(page_address(st_bp->reserved_pages[i]), 0, 3769 memset(page_address(st_bp->reserved_pages[i]), 0,
3768 PAGE_SIZE << st_bp->map_data.page_order); 3770 PAGE_SIZE << st_bp->reserved_page_order);
3769 st_bp->cleared = 1; 3771 st_bp->cleared = 1;
3770} 3772}
3771 3773
@@ -3773,7 +3775,7 @@ static void clear_buffer(struct st_buffer * st_bp)
3773/* Release the extra buffer */ 3775/* Release the extra buffer */
3774static void normalize_buffer(struct st_buffer * STbuffer) 3776static void normalize_buffer(struct st_buffer * STbuffer)
3775{ 3777{
3776 int i, order = STbuffer->map_data.page_order; 3778 int i, order = STbuffer->reserved_page_order;
3777 3779
3778 for (i = 0; i < STbuffer->frp_segs; i++) { 3780 for (i = 0; i < STbuffer->frp_segs; i++) {
3779 __free_pages(STbuffer->reserved_pages[i], order); 3781 __free_pages(STbuffer->reserved_pages[i], order);
@@ -3781,7 +3783,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
3781 } 3783 }
3782 STbuffer->frp_segs = 0; 3784 STbuffer->frp_segs = 0;
3783 STbuffer->sg_segs = 0; 3785 STbuffer->sg_segs = 0;
3784 STbuffer->map_data.page_order = 0; 3786 STbuffer->reserved_page_order = 0;
3785 STbuffer->map_data.offset = 0; 3787 STbuffer->map_data.offset = 0;
3786} 3788}
3787 3789
@@ -3791,7 +3793,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
3791static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count) 3793static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
3792{ 3794{
3793 int i, cnt, res, offset; 3795 int i, cnt, res, offset;
3794 int length = PAGE_SIZE << st_bp->map_data.page_order; 3796 int length = PAGE_SIZE << st_bp->reserved_page_order;
3795 3797
3796 for (i = 0, offset = st_bp->buffer_bytes; 3798 for (i = 0, offset = st_bp->buffer_bytes;
3797 i < st_bp->frp_segs && offset >= length; i++) 3799 i < st_bp->frp_segs && offset >= length; i++)
@@ -3823,7 +3825,7 @@ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, in
3823static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count) 3825static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
3824{ 3826{
3825 int i, cnt, res, offset; 3827 int i, cnt, res, offset;
3826 int length = PAGE_SIZE << st_bp->map_data.page_order; 3828 int length = PAGE_SIZE << st_bp->reserved_page_order;
3827 3829
3828 for (i = 0, offset = st_bp->read_pointer; 3830 for (i = 0, offset = st_bp->read_pointer;
3829 i < st_bp->frp_segs && offset >= length; i++) 3831 i < st_bp->frp_segs && offset >= length; i++)
@@ -3856,7 +3858,7 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
3856{ 3858{
3857 int src_seg, dst_seg, src_offset = 0, dst_offset; 3859 int src_seg, dst_seg, src_offset = 0, dst_offset;
3858 int count, total; 3860 int count, total;
3859 int length = PAGE_SIZE << st_bp->map_data.page_order; 3861 int length = PAGE_SIZE << st_bp->reserved_page_order;
3860 3862
3861 if (offset == 0) 3863 if (offset == 0)
3862 return; 3864 return;
@@ -4578,7 +4580,6 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
4578 } 4580 }
4579 4581
4580 mdata->offset = uaddr & ~PAGE_MASK; 4582 mdata->offset = uaddr & ~PAGE_MASK;
4581 mdata->page_order = 0;
4582 STbp->mapped_pages = pages; 4583 STbp->mapped_pages = pages;
4583 4584
4584 return nr_pages; 4585 return nr_pages;
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 544dc6b1f548..f91a67c6d968 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -46,6 +46,7 @@ struct st_buffer {
46 struct st_request *last_SRpnt; 46 struct st_request *last_SRpnt;
47 struct st_cmdstatus cmdstat; 47 struct st_cmdstatus cmdstat;
48 struct page **reserved_pages; 48 struct page **reserved_pages;
49 int reserved_page_order;
49 struct page **mapped_pages; 50 struct page **mapped_pages;
50 struct rq_map_data map_data; 51 struct rq_map_data map_data;
51 unsigned char *b_data; 52 unsigned char *b_data;
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 68c7f6cfd728..37f0de9dd9ce 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -222,9 +222,9 @@ static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
222 Set SCP6MD1,0 = {01} (output) */ 222 Set SCP6MD1,0 = {01} (output) */
223 __raw_writew((data & 0x0fcf) | 0x1000, SCPCR); 223 __raw_writew((data & 0x0fcf) | 0x1000, SCPCR);
224 224
225 data = ctrl_inb(SCPDR); 225 data = __raw_readb(SCPDR);
226 /* Set /RTS2 (bit6) = 0 */ 226 /* Set /RTS2 (bit6) = 0 */
227 ctrl_outb(data & 0xbf, SCPDR); 227 __raw_writeb(data & 0xbf, SCPDR);
228 } 228 }
229} 229}
230#elif defined(CONFIG_CPU_SUBTYPE_SH7722) 230#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
@@ -897,11 +897,21 @@ static void sci_shutdown(struct uart_port *port)
897static void sci_set_termios(struct uart_port *port, struct ktermios *termios, 897static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
898 struct ktermios *old) 898 struct ktermios *old)
899{ 899{
900 unsigned int status, baud, smr_val; 900 unsigned int status, baud, smr_val, max_baud;
901 int t = -1; 901 int t = -1;
902 902
903 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); 903 /*
904 if (likely(baud)) 904 * earlyprintk comes here early on with port->uartclk set to zero.
905 * the clock framework is not up and running at this point so here
906 * we assume that 115200 is the maximum baud rate. please note that
907 * the baud rate is not programmed during earlyprintk - it is assumed
908 * that the previous boot loader has enabled required clocks and
909 * setup the baud rate generator hardware for us already.
910 */
911 max_baud = port->uartclk ? port->uartclk / 16 : 115200;
912
913 baud = uart_get_baud_rate(port, termios, old, 0, max_baud);
914 if (likely(baud && port->uartclk))
905 t = SCBRR_VALUE(baud, port->uartclk); 915 t = SCBRR_VALUE(baud, port->uartclk);
906 916
907 do { 917 do {
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index a32094eeb42b..0efcded59ae6 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -517,20 +517,20 @@ static const struct __attribute__((packed)) {
517static inline int sci_rxd_in(struct uart_port *port) 517static inline int sci_rxd_in(struct uart_port *port)
518{ 518{
519 if (port->mapbase == 0xfffffe80) 519 if (port->mapbase == 0xfffffe80)
520 return ctrl_inb(SCPDR)&0x01 ? 1 : 0; /* SCI */ 520 return __raw_readb(SCPDR)&0x01 ? 1 : 0; /* SCI */
521 if (port->mapbase == 0xa4000150) 521 if (port->mapbase == 0xa4000150)
522 return ctrl_inb(SCPDR)&0x10 ? 1 : 0; /* SCIF */ 522 return __raw_readb(SCPDR)&0x10 ? 1 : 0; /* SCIF */
523 if (port->mapbase == 0xa4000140) 523 if (port->mapbase == 0xa4000140)
524 return ctrl_inb(SCPDR)&0x04 ? 1 : 0; /* IRDA */ 524 return __raw_readb(SCPDR)&0x04 ? 1 : 0; /* IRDA */
525 return 1; 525 return 1;
526} 526}
527#elif defined(CONFIG_CPU_SUBTYPE_SH7705) 527#elif defined(CONFIG_CPU_SUBTYPE_SH7705)
528static inline int sci_rxd_in(struct uart_port *port) 528static inline int sci_rxd_in(struct uart_port *port)
529{ 529{
530 if (port->mapbase == SCIF0) 530 if (port->mapbase == SCIF0)
531 return ctrl_inb(SCPDR)&0x04 ? 1 : 0; /* IRDA */ 531 return __raw_readb(SCPDR)&0x04 ? 1 : 0; /* IRDA */
532 if (port->mapbase == SCIF2) 532 if (port->mapbase == SCIF2)
533 return ctrl_inb(SCPDR)&0x10 ? 1 : 0; /* SCIF */ 533 return __raw_readb(SCPDR)&0x10 ? 1 : 0; /* SCIF */
534 return 1; 534 return 1;
535} 535}
536#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) 536#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
@@ -557,68 +557,68 @@ static inline int sci_rxd_in(struct uart_port *port)
557static inline int sci_rxd_in(struct uart_port *port) 557static inline int sci_rxd_in(struct uart_port *port)
558{ 558{
559 if (port->mapbase == 0xffe00000) 559 if (port->mapbase == 0xffe00000)
560 return ctrl_inb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */ 560 return __raw_readb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */
561 if (port->mapbase == 0xffe80000) 561 if (port->mapbase == 0xffe80000)
562 return ctrl_inw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */ 562 return __raw_readw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */
563 return 1; 563 return 1;
564} 564}
565#elif defined(CONFIG_CPU_SUBTYPE_SH4_202) 565#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
566static inline int sci_rxd_in(struct uart_port *port) 566static inline int sci_rxd_in(struct uart_port *port)
567{ 567{
568 if (port->mapbase == 0xffe80000) 568 if (port->mapbase == 0xffe80000)
569 return ctrl_inw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */ 569 return __raw_readw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */
570 return 1; 570 return 1;
571} 571}
572#elif defined(CONFIG_CPU_SUBTYPE_SH7757) 572#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
573static inline int sci_rxd_in(struct uart_port *port) 573static inline int sci_rxd_in(struct uart_port *port)
574{ 574{
575 if (port->mapbase == 0xfe4b0000) 575 if (port->mapbase == 0xfe4b0000)
576 return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; 576 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0;
577 if (port->mapbase == 0xfe4c0000) 577 if (port->mapbase == 0xfe4c0000)
578 return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; 578 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0;
579 if (port->mapbase == 0xfe4d0000) 579 if (port->mapbase == 0xfe4d0000)
580 return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; 580 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0;
581} 581}
582#elif defined(CONFIG_CPU_SUBTYPE_SH7760) 582#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
583static inline int sci_rxd_in(struct uart_port *port) 583static inline int sci_rxd_in(struct uart_port *port)
584{ 584{
585 if (port->mapbase == 0xfe600000) 585 if (port->mapbase == 0xfe600000)
586 return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ 586 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
587 if (port->mapbase == 0xfe610000) 587 if (port->mapbase == 0xfe610000)
588 return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ 588 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
589 if (port->mapbase == 0xfe620000) 589 if (port->mapbase == 0xfe620000)
590 return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ 590 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
591 return 1; 591 return 1;
592} 592}
593#elif defined(CONFIG_CPU_SUBTYPE_SH7343) 593#elif defined(CONFIG_CPU_SUBTYPE_SH7343)
594static inline int sci_rxd_in(struct uart_port *port) 594static inline int sci_rxd_in(struct uart_port *port)
595{ 595{
596 if (port->mapbase == 0xffe00000) 596 if (port->mapbase == 0xffe00000)
597 return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ 597 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
598 if (port->mapbase == 0xffe10000) 598 if (port->mapbase == 0xffe10000)
599 return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ 599 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
600 if (port->mapbase == 0xffe20000) 600 if (port->mapbase == 0xffe20000)
601 return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ 601 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
602 if (port->mapbase == 0xffe30000) 602 if (port->mapbase == 0xffe30000)
603 return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ 603 return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
604 return 1; 604 return 1;
605} 605}
606#elif defined(CONFIG_CPU_SUBTYPE_SH7366) 606#elif defined(CONFIG_CPU_SUBTYPE_SH7366)
607static inline int sci_rxd_in(struct uart_port *port) 607static inline int sci_rxd_in(struct uart_port *port)
608{ 608{
609 if (port->mapbase == 0xffe00000) 609 if (port->mapbase == 0xffe00000)
610 return ctrl_inb(SCPDR0) & 0x0001 ? 1 : 0; /* SCIF0 */ 610 return __raw_readb(SCPDR0) & 0x0001 ? 1 : 0; /* SCIF0 */
611 return 1; 611 return 1;
612} 612}
613#elif defined(CONFIG_CPU_SUBTYPE_SH7722) 613#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
614static inline int sci_rxd_in(struct uart_port *port) 614static inline int sci_rxd_in(struct uart_port *port)
615{ 615{
616 if (port->mapbase == 0xffe00000) 616 if (port->mapbase == 0xffe00000)
617 return ctrl_inb(PSDR) & 0x02 ? 1 : 0; /* SCIF0 */ 617 return __raw_readb(PSDR) & 0x02 ? 1 : 0; /* SCIF0 */
618 if (port->mapbase == 0xffe10000) 618 if (port->mapbase == 0xffe10000)
619 return ctrl_inb(PADR) & 0x40 ? 1 : 0; /* SCIF1 */ 619 return __raw_readb(PADR) & 0x40 ? 1 : 0; /* SCIF1 */
620 if (port->mapbase == 0xffe20000) 620 if (port->mapbase == 0xffe20000)
621 return ctrl_inb(PWDR) & 0x04 ? 1 : 0; /* SCIF2 */ 621 return __raw_readb(PWDR) & 0x04 ? 1 : 0; /* SCIF2 */
622 622
623 return 1; 623 return 1;
624} 624}
@@ -626,17 +626,17 @@ static inline int sci_rxd_in(struct uart_port *port)
626static inline int sci_rxd_in(struct uart_port *port) 626static inline int sci_rxd_in(struct uart_port *port)
627{ 627{
628 if (port->mapbase == 0xffe00000) 628 if (port->mapbase == 0xffe00000)
629 return ctrl_inb(SCSPTR0) & 0x0008 ? 1 : 0; /* SCIF0 */ 629 return __raw_readb(SCSPTR0) & 0x0008 ? 1 : 0; /* SCIF0 */
630 if (port->mapbase == 0xffe10000) 630 if (port->mapbase == 0xffe10000)
631 return ctrl_inb(SCSPTR1) & 0x0020 ? 1 : 0; /* SCIF1 */ 631 return __raw_readb(SCSPTR1) & 0x0020 ? 1 : 0; /* SCIF1 */
632 if (port->mapbase == 0xffe20000) 632 if (port->mapbase == 0xffe20000)
633 return ctrl_inb(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF2 */ 633 return __raw_readb(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF2 */
634 if (port->mapbase == 0xa4e30000) 634 if (port->mapbase == 0xa4e30000)
635 return ctrl_inb(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF3 */ 635 return __raw_readb(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF3 */
636 if (port->mapbase == 0xa4e40000) 636 if (port->mapbase == 0xa4e40000)
637 return ctrl_inb(SCSPTR4) & 0x0001 ? 1 : 0; /* SCIF4 */ 637 return __raw_readb(SCSPTR4) & 0x0001 ? 1 : 0; /* SCIF4 */
638 if (port->mapbase == 0xa4e50000) 638 if (port->mapbase == 0xa4e50000)
639 return ctrl_inb(SCSPTR5) & 0x0008 ? 1 : 0; /* SCIF5 */ 639 return __raw_readb(SCSPTR5) & 0x0008 ? 1 : 0; /* SCIF5 */
640 return 1; 640 return 1;
641} 641}
642#elif defined(CONFIG_CPU_SUBTYPE_SH7724) 642#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
@@ -645,9 +645,9 @@ static inline int sci_rxd_in(struct uart_port *port)
645static inline int sci_rxd_in(struct uart_port *port) 645static inline int sci_rxd_in(struct uart_port *port)
646{ 646{
647 if (port->type == PORT_SCIF) 647 if (port->type == PORT_SCIF)
648 return ctrl_inw((port->mapbase + SCFSR)) & SCIF_BRK ? 1 : 0; 648 return __raw_readw((port->mapbase + SCFSR)) & SCIF_BRK ? 1 : 0;
649 if (port->type == PORT_SCIFA) 649 if (port->type == PORT_SCIFA)
650 return ctrl_inw((port->mapbase + SCASSR)) & SCIF_BRK ? 1 : 0; 650 return __raw_readw((port->mapbase + SCASSR)) & SCIF_BRK ? 1 : 0;
651 return 1; 651 return 1;
652} 652}
653#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103) 653#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
@@ -665,11 +665,11 @@ static inline int sci_rxd_in(struct uart_port *port)
665static inline int sci_rxd_in(struct uart_port *port) 665static inline int sci_rxd_in(struct uart_port *port)
666{ 666{
667 if (port->mapbase == 0xffe00000) 667 if (port->mapbase == 0xffe00000)
668 return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ 668 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
669 if (port->mapbase == 0xffe08000) 669 if (port->mapbase == 0xffe08000)
670 return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ 670 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
671 if (port->mapbase == 0xffe10000) 671 if (port->mapbase == 0xffe10000)
672 return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF/IRDA */ 672 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF/IRDA */
673 673
674 return 1; 674 return 1;
675} 675}
@@ -677,20 +677,20 @@ static inline int sci_rxd_in(struct uart_port *port)
677static inline int sci_rxd_in(struct uart_port *port) 677static inline int sci_rxd_in(struct uart_port *port)
678{ 678{
679 if (port->mapbase == 0xff923000) 679 if (port->mapbase == 0xff923000)
680 return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ 680 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
681 if (port->mapbase == 0xff924000) 681 if (port->mapbase == 0xff924000)
682 return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ 682 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
683 if (port->mapbase == 0xff925000) 683 if (port->mapbase == 0xff925000)
684 return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ 684 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
685 return 1; 685 return 1;
686} 686}
687#elif defined(CONFIG_CPU_SUBTYPE_SH7780) 687#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
688static inline int sci_rxd_in(struct uart_port *port) 688static inline int sci_rxd_in(struct uart_port *port)
689{ 689{
690 if (port->mapbase == 0xffe00000) 690 if (port->mapbase == 0xffe00000)
691 return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ 691 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
692 if (port->mapbase == 0xffe10000) 692 if (port->mapbase == 0xffe10000)
693 return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ 693 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
694 return 1; 694 return 1;
695} 695}
696#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \ 696#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \
@@ -698,17 +698,17 @@ static inline int sci_rxd_in(struct uart_port *port)
698static inline int sci_rxd_in(struct uart_port *port) 698static inline int sci_rxd_in(struct uart_port *port)
699{ 699{
700 if (port->mapbase == 0xffea0000) 700 if (port->mapbase == 0xffea0000)
701 return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ 701 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
702 if (port->mapbase == 0xffeb0000) 702 if (port->mapbase == 0xffeb0000)
703 return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ 703 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
704 if (port->mapbase == 0xffec0000) 704 if (port->mapbase == 0xffec0000)
705 return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ 705 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
706 if (port->mapbase == 0xffed0000) 706 if (port->mapbase == 0xffed0000)
707 return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ 707 return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
708 if (port->mapbase == 0xffee0000) 708 if (port->mapbase == 0xffee0000)
709 return ctrl_inw(SCSPTR4) & 0x0001 ? 1 : 0; /* SCIF */ 709 return __raw_readw(SCSPTR4) & 0x0001 ? 1 : 0; /* SCIF */
710 if (port->mapbase == 0xffef0000) 710 if (port->mapbase == 0xffef0000)
711 return ctrl_inw(SCSPTR5) & 0x0001 ? 1 : 0; /* SCIF */ 711 return __raw_readw(SCSPTR5) & 0x0001 ? 1 : 0; /* SCIF */
712 return 1; 712 return 1;
713} 713}
714#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \ 714#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \
@@ -718,22 +718,22 @@ static inline int sci_rxd_in(struct uart_port *port)
718static inline int sci_rxd_in(struct uart_port *port) 718static inline int sci_rxd_in(struct uart_port *port)
719{ 719{
720 if (port->mapbase == 0xfffe8000) 720 if (port->mapbase == 0xfffe8000)
721 return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ 721 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
722 if (port->mapbase == 0xfffe8800) 722 if (port->mapbase == 0xfffe8800)
723 return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ 723 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
724 if (port->mapbase == 0xfffe9000) 724 if (port->mapbase == 0xfffe9000)
725 return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ 725 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
726 if (port->mapbase == 0xfffe9800) 726 if (port->mapbase == 0xfffe9800)
727 return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ 727 return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
728#if defined(CONFIG_CPU_SUBTYPE_SH7201) 728#if defined(CONFIG_CPU_SUBTYPE_SH7201)
729 if (port->mapbase == 0xfffeA000) 729 if (port->mapbase == 0xfffeA000)
730 return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ 730 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
731 if (port->mapbase == 0xfffeA800) 731 if (port->mapbase == 0xfffeA800)
732 return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ 732 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
733 if (port->mapbase == 0xfffeB000) 733 if (port->mapbase == 0xfffeB000)
734 return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ 734 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
735 if (port->mapbase == 0xfffeB800) 735 if (port->mapbase == 0xfffeB800)
736 return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ 736 return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
737#endif 737#endif
738 return 1; 738 return 1;
739} 739}
@@ -741,24 +741,24 @@ static inline int sci_rxd_in(struct uart_port *port)
741static inline int sci_rxd_in(struct uart_port *port) 741static inline int sci_rxd_in(struct uart_port *port)
742{ 742{
743 if (port->mapbase == 0xf8400000) 743 if (port->mapbase == 0xf8400000)
744 return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ 744 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
745 if (port->mapbase == 0xf8410000) 745 if (port->mapbase == 0xf8410000)
746 return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ 746 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
747 if (port->mapbase == 0xf8420000) 747 if (port->mapbase == 0xf8420000)
748 return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ 748 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
749 return 1; 749 return 1;
750} 750}
751#elif defined(CONFIG_CPU_SUBTYPE_SHX3) 751#elif defined(CONFIG_CPU_SUBTYPE_SHX3)
752static inline int sci_rxd_in(struct uart_port *port) 752static inline int sci_rxd_in(struct uart_port *port)
753{ 753{
754 if (port->mapbase == 0xffc30000) 754 if (port->mapbase == 0xffc30000)
755 return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ 755 return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
756 if (port->mapbase == 0xffc40000) 756 if (port->mapbase == 0xffc40000)
757 return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ 757 return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
758 if (port->mapbase == 0xffc50000) 758 if (port->mapbase == 0xffc50000)
759 return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ 759 return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
760 if (port->mapbase == 0xffc60000) 760 if (port->mapbase == 0xffc60000)
761 return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ 761 return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
762 return 1; 762 return 1;
763} 763}
764#endif 764#endif
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 2d9d70359360..f55eb0107336 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -216,6 +216,17 @@ config SPI_S3C24XX
216 help 216 help
217 SPI driver for Samsung S3C24XX series ARM SoCs 217 SPI driver for Samsung S3C24XX series ARM SoCs
218 218
219config SPI_S3C24XX_FIQ
220 bool "S3C24XX driver with FIQ pseudo-DMA"
221 depends on SPI_S3C24XX
222 select FIQ
223 help
224 Enable FIQ support for the S3C24XX SPI driver to provide pseudo
225 DMA by using the fast-interrupt request framework, This allows
226 the driver to get DMA-like performance when there are either
227 no free DMA channels, or when doing transfers that required both
228 TX and RX data paths.
229
219config SPI_S3C24XX_GPIO 230config SPI_S3C24XX_GPIO
220 tristate "Samsung S3C24XX series SPI by GPIO" 231 tristate "Samsung S3C24XX series SPI by GPIO"
221 depends on ARCH_S3C2410 && EXPERIMENTAL 232 depends on ARCH_S3C2410 && EXPERIMENTAL
@@ -226,6 +237,13 @@ config SPI_S3C24XX_GPIO
226 the inbuilt hardware cannot provide the transfer mode, or 237 the inbuilt hardware cannot provide the transfer mode, or
227 where the board is using non hardware connected pins. 238 where the board is using non hardware connected pins.
228 239
240config SPI_S3C64XX
241 tristate "Samsung S3C64XX series type SPI"
242 depends on ARCH_S3C64XX && EXPERIMENTAL
243 select S3C64XX_DMA
244 help
245 SPI driver for Samsung S3C64XX and newer SoCs.
246
229config SPI_SH_MSIOF 247config SPI_SH_MSIOF
230 tristate "SuperH MSIOF SPI controller" 248 tristate "SuperH MSIOF SPI controller"
231 depends on SUPERH && HAVE_CLK 249 depends on SUPERH && HAVE_CLK
@@ -289,6 +307,16 @@ config SPI_NUC900
289# Add new SPI master controllers in alphabetical order above this line 307# Add new SPI master controllers in alphabetical order above this line
290# 308#
291 309
310config SPI_DESIGNWARE
311 bool "DesignWare SPI controller core support"
312 depends on SPI_MASTER
313 help
314 general driver for SPI controller core from DesignWare
315
316config SPI_DW_PCI
317 tristate "PCI interface driver for DW SPI core"
318 depends on SPI_DESIGNWARE && PCI
319
292# 320#
293# There are lots of SPI device types, with sensors and memory 321# There are lots of SPI device types, with sensors and memory
294# being probably the most widely used ones. 322# being probably the most widely used ones.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index ed8c1675b52f..f3d2810ba11c 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -16,6 +16,8 @@ obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o
16obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o 16obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
17obj-$(CONFIG_SPI_AU1550) += au1550_spi.o 17obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
18obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o 18obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
19obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o
20obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o
19obj-$(CONFIG_SPI_GPIO) += spi_gpio.o 21obj-$(CONFIG_SPI_GPIO) += spi_gpio.o
20obj-$(CONFIG_SPI_IMX) += spi_imx.o 22obj-$(CONFIG_SPI_IMX) += spi_imx.o
21obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o 23obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
@@ -30,7 +32,8 @@ obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o
30obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o 32obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o
31obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o 33obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o
32obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o 34obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
33obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o 35obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o
36obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o
34obj-$(CONFIG_SPI_TXX9) += spi_txx9.o 37obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
35obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o 38obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
36obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o 39obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o
@@ -39,6 +42,11 @@ obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
39obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o 42obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o
40obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o 43obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o
41obj-$(CONFIG_SPI_NUC900) += spi_nuc900.o 44obj-$(CONFIG_SPI_NUC900) += spi_nuc900.o
45
46# special build for s3c24xx spi driver with fiq support
47spi_s3c24xx_hw-y := spi_s3c24xx.o
48spi_s3c24xx_hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi_s3c24xx_fiq.o
49
42# ... add above this line ... 50# ... add above this line ...
43 51
44# SPI protocol drivers (device/link on bus) 52# SPI protocol drivers (device/link on bus)
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index f5b3fdbb1e27..d21c24eaf0a9 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -189,14 +189,14 @@ static void atmel_spi_next_xfer_data(struct spi_master *master,
189 189
190 /* use scratch buffer only when rx or tx data is unspecified */ 190 /* use scratch buffer only when rx or tx data is unspecified */
191 if (xfer->rx_buf) 191 if (xfer->rx_buf)
192 *rx_dma = xfer->rx_dma + xfer->len - len; 192 *rx_dma = xfer->rx_dma + xfer->len - *plen;
193 else { 193 else {
194 *rx_dma = as->buffer_dma; 194 *rx_dma = as->buffer_dma;
195 if (len > BUFFER_SIZE) 195 if (len > BUFFER_SIZE)
196 len = BUFFER_SIZE; 196 len = BUFFER_SIZE;
197 } 197 }
198 if (xfer->tx_buf) 198 if (xfer->tx_buf)
199 *tx_dma = xfer->tx_dma + xfer->len - len; 199 *tx_dma = xfer->tx_dma + xfer->len - *plen;
200 else { 200 else {
201 *tx_dma = as->buffer_dma; 201 *tx_dma = as->buffer_dma;
202 if (len > BUFFER_SIZE) 202 if (len > BUFFER_SIZE)
@@ -788,7 +788,7 @@ static int __init atmel_spi_probe(struct platform_device *pdev)
788 spin_lock_init(&as->lock); 788 spin_lock_init(&as->lock);
789 INIT_LIST_HEAD(&as->queue); 789 INIT_LIST_HEAD(&as->queue);
790 as->pdev = pdev; 790 as->pdev = pdev;
791 as->regs = ioremap(regs->start, (regs->end - regs->start) + 1); 791 as->regs = ioremap(regs->start, resource_size(regs));
792 if (!as->regs) 792 if (!as->regs)
793 goto out_free_buffer; 793 goto out_free_buffer;
794 as->irq = irq; 794 as->irq = irq;
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
new file mode 100644
index 000000000000..31620fae77be
--- /dev/null
+++ b/drivers/spi/dw_spi.c
@@ -0,0 +1,944 @@
1/*
2 * dw_spi.c - Designware SPI core controller driver (refer pxa2xx_spi.c)
3 *
4 * Copyright (c) 2009, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#include <linux/dma-mapping.h>
21#include <linux/interrupt.h>
22#include <linux/highmem.h>
23#include <linux/delay.h>
24
25#include <linux/spi/dw_spi.h>
26#include <linux/spi/spi.h>
27
28#ifdef CONFIG_DEBUG_FS
29#include <linux/debugfs.h>
30#endif
31
32#define START_STATE ((void *)0)
33#define RUNNING_STATE ((void *)1)
34#define DONE_STATE ((void *)2)
35#define ERROR_STATE ((void *)-1)
36
37#define QUEUE_RUNNING 0
38#define QUEUE_STOPPED 1
39
40#define MRST_SPI_DEASSERT 0
41#define MRST_SPI_ASSERT 1
42
43/* Slave spi_dev related */
44struct chip_data {
45 u16 cr0;
46 u8 cs; /* chip select pin */
47 u8 n_bytes; /* current is a 1/2/4 byte op */
48 u8 tmode; /* TR/TO/RO/EEPROM */
49 u8 type; /* SPI/SSP/MicroWire */
50
51 u8 poll_mode; /* 1 means use poll mode */
52
53 u32 dma_width;
54 u32 rx_threshold;
55 u32 tx_threshold;
56 u8 enable_dma;
57 u8 bits_per_word;
58 u16 clk_div; /* baud rate divider */
59 u32 speed_hz; /* baud rate */
60 int (*write)(struct dw_spi *dws);
61 int (*read)(struct dw_spi *dws);
62 void (*cs_control)(u32 command);
63};
64
65#ifdef CONFIG_DEBUG_FS
66static int spi_show_regs_open(struct inode *inode, struct file *file)
67{
68 file->private_data = inode->i_private;
69 return 0;
70}
71
72#define SPI_REGS_BUFSIZE 1024
73static ssize_t spi_show_regs(struct file *file, char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct dw_spi *dws;
77 char *buf;
78 u32 len = 0;
79 ssize_t ret;
80
81 dws = file->private_data;
82
83 buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
84 if (!buf)
85 return 0;
86
87 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
88 "MRST SPI0 registers:\n");
89 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
90 "=================================\n");
91 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
92 "CTRL0: \t\t0x%08x\n", dw_readl(dws, ctrl0));
93 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
94 "CTRL1: \t\t0x%08x\n", dw_readl(dws, ctrl1));
95 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
96 "SSIENR: \t0x%08x\n", dw_readl(dws, ssienr));
97 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
98 "SER: \t\t0x%08x\n", dw_readl(dws, ser));
99 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
100 "BAUDR: \t\t0x%08x\n", dw_readl(dws, baudr));
101 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
102 "TXFTLR: \t0x%08x\n", dw_readl(dws, txfltr));
103 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
104 "RXFTLR: \t0x%08x\n", dw_readl(dws, rxfltr));
105 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
106 "TXFLR: \t\t0x%08x\n", dw_readl(dws, txflr));
107 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
108 "RXFLR: \t\t0x%08x\n", dw_readl(dws, rxflr));
109 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
110 "SR: \t\t0x%08x\n", dw_readl(dws, sr));
111 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
112 "IMR: \t\t0x%08x\n", dw_readl(dws, imr));
113 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
114 "ISR: \t\t0x%08x\n", dw_readl(dws, isr));
115 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
116 "DMACR: \t\t0x%08x\n", dw_readl(dws, dmacr));
117 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
118 "DMATDLR: \t0x%08x\n", dw_readl(dws, dmatdlr));
119 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
120 "DMARDLR: \t0x%08x\n", dw_readl(dws, dmardlr));
121 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
122 "=================================\n");
123
124 ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
125 kfree(buf);
126 return ret;
127}
128
129static const struct file_operations mrst_spi_regs_ops = {
130 .owner = THIS_MODULE,
131 .open = spi_show_regs_open,
132 .read = spi_show_regs,
133};
134
135static int mrst_spi_debugfs_init(struct dw_spi *dws)
136{
137 dws->debugfs = debugfs_create_dir("mrst_spi", NULL);
138 if (!dws->debugfs)
139 return -ENOMEM;
140
141 debugfs_create_file("registers", S_IFREG | S_IRUGO,
142 dws->debugfs, (void *)dws, &mrst_spi_regs_ops);
143 return 0;
144}
145
146static void mrst_spi_debugfs_remove(struct dw_spi *dws)
147{
148 if (dws->debugfs)
149 debugfs_remove_recursive(dws->debugfs);
150}
151
152#else
153static inline int mrst_spi_debugfs_init(struct dw_spi *dws)
154{
155}
156
157static inline void mrst_spi_debugfs_remove(struct dw_spi *dws)
158{
159}
160#endif /* CONFIG_DEBUG_FS */
161
162static void wait_till_not_busy(struct dw_spi *dws)
163{
164 unsigned long end = jiffies + usecs_to_jiffies(1000);
165
166 while (time_before(jiffies, end)) {
167 if (!(dw_readw(dws, sr) & SR_BUSY))
168 return;
169 }
170 dev_err(&dws->master->dev,
171 "DW SPI: Stutus keeps busy for 1000us after a read/write!\n");
172}
173
174static void flush(struct dw_spi *dws)
175{
176 while (dw_readw(dws, sr) & SR_RF_NOT_EMPT)
177 dw_readw(dws, dr);
178
179 wait_till_not_busy(dws);
180}
181
182static void null_cs_control(u32 command)
183{
184}
185
186static int null_writer(struct dw_spi *dws)
187{
188 u8 n_bytes = dws->n_bytes;
189
190 if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
191 || (dws->tx == dws->tx_end))
192 return 0;
193 dw_writew(dws, dr, 0);
194 dws->tx += n_bytes;
195
196 wait_till_not_busy(dws);
197 return 1;
198}
199
200static int null_reader(struct dw_spi *dws)
201{
202 u8 n_bytes = dws->n_bytes;
203
204 while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
205 && (dws->rx < dws->rx_end)) {
206 dw_readw(dws, dr);
207 dws->rx += n_bytes;
208 }
209 wait_till_not_busy(dws);
210 return dws->rx == dws->rx_end;
211}
212
213static int u8_writer(struct dw_spi *dws)
214{
215 if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
216 || (dws->tx == dws->tx_end))
217 return 0;
218
219 dw_writew(dws, dr, *(u8 *)(dws->tx));
220 ++dws->tx;
221
222 wait_till_not_busy(dws);
223 return 1;
224}
225
226static int u8_reader(struct dw_spi *dws)
227{
228 while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
229 && (dws->rx < dws->rx_end)) {
230 *(u8 *)(dws->rx) = dw_readw(dws, dr);
231 ++dws->rx;
232 }
233
234 wait_till_not_busy(dws);
235 return dws->rx == dws->rx_end;
236}
237
238static int u16_writer(struct dw_spi *dws)
239{
240 if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
241 || (dws->tx == dws->tx_end))
242 return 0;
243
244 dw_writew(dws, dr, *(u16 *)(dws->tx));
245 dws->tx += 2;
246
247 wait_till_not_busy(dws);
248 return 1;
249}
250
251static int u16_reader(struct dw_spi *dws)
252{
253 u16 temp;
254
255 while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
256 && (dws->rx < dws->rx_end)) {
257 temp = dw_readw(dws, dr);
258 *(u16 *)(dws->rx) = temp;
259 dws->rx += 2;
260 }
261
262 wait_till_not_busy(dws);
263 return dws->rx == dws->rx_end;
264}
265
266static void *next_transfer(struct dw_spi *dws)
267{
268 struct spi_message *msg = dws->cur_msg;
269 struct spi_transfer *trans = dws->cur_transfer;
270
271 /* Move to next transfer */
272 if (trans->transfer_list.next != &msg->transfers) {
273 dws->cur_transfer =
274 list_entry(trans->transfer_list.next,
275 struct spi_transfer,
276 transfer_list);
277 return RUNNING_STATE;
278 } else
279 return DONE_STATE;
280}
281
282/*
283 * Note: first step is the protocol driver prepares
284 * a dma-capable memory, and this func just need translate
285 * the virt addr to physical
286 */
287static int map_dma_buffers(struct dw_spi *dws)
288{
289 if (!dws->cur_msg->is_dma_mapped || !dws->dma_inited
290 || !dws->cur_chip->enable_dma)
291 return 0;
292
293 if (dws->cur_transfer->tx_dma)
294 dws->tx_dma = dws->cur_transfer->tx_dma;
295
296 if (dws->cur_transfer->rx_dma)
297 dws->rx_dma = dws->cur_transfer->rx_dma;
298
299 return 1;
300}
301
302/* Caller already set message->status; dma and pio irqs are blocked */
303static void giveback(struct dw_spi *dws)
304{
305 struct spi_transfer *last_transfer;
306 unsigned long flags;
307 struct spi_message *msg;
308
309 spin_lock_irqsave(&dws->lock, flags);
310 msg = dws->cur_msg;
311 dws->cur_msg = NULL;
312 dws->cur_transfer = NULL;
313 dws->prev_chip = dws->cur_chip;
314 dws->cur_chip = NULL;
315 dws->dma_mapped = 0;
316 queue_work(dws->workqueue, &dws->pump_messages);
317 spin_unlock_irqrestore(&dws->lock, flags);
318
319 last_transfer = list_entry(msg->transfers.prev,
320 struct spi_transfer,
321 transfer_list);
322
323 if (!last_transfer->cs_change)
324 dws->cs_control(MRST_SPI_DEASSERT);
325
326 msg->state = NULL;
327 if (msg->complete)
328 msg->complete(msg->context);
329}
330
331static void int_error_stop(struct dw_spi *dws, const char *msg)
332{
333 /* Stop and reset hw */
334 flush(dws);
335 spi_enable_chip(dws, 0);
336
337 dev_err(&dws->master->dev, "%s\n", msg);
338 dws->cur_msg->state = ERROR_STATE;
339 tasklet_schedule(&dws->pump_transfers);
340}
341
342static void transfer_complete(struct dw_spi *dws)
343{
344 /* Update total byte transfered return count actual bytes read */
345 dws->cur_msg->actual_length += dws->len;
346
347 /* Move to next transfer */
348 dws->cur_msg->state = next_transfer(dws);
349
350 /* Handle end of message */
351 if (dws->cur_msg->state == DONE_STATE) {
352 dws->cur_msg->status = 0;
353 giveback(dws);
354 } else
355 tasklet_schedule(&dws->pump_transfers);
356}
357
358static irqreturn_t interrupt_transfer(struct dw_spi *dws)
359{
360 u16 irq_status, irq_mask = 0x3f;
361
362 irq_status = dw_readw(dws, isr) & irq_mask;
363 /* Error handling */
364 if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
365 dw_readw(dws, txoicr);
366 dw_readw(dws, rxoicr);
367 dw_readw(dws, rxuicr);
368 int_error_stop(dws, "interrupt_transfer: fifo overrun");
369 return IRQ_HANDLED;
370 }
371
372 /* INT comes from tx */
373 if (dws->tx && (irq_status & SPI_INT_TXEI)) {
374 while (dws->tx < dws->tx_end)
375 dws->write(dws);
376
377 if (dws->tx == dws->tx_end) {
378 spi_mask_intr(dws, SPI_INT_TXEI);
379 transfer_complete(dws);
380 }
381 }
382
383 /* INT comes from rx */
384 if (dws->rx && (irq_status & SPI_INT_RXFI)) {
385 if (dws->read(dws))
386 transfer_complete(dws);
387 }
388 return IRQ_HANDLED;
389}
390
391static irqreturn_t dw_spi_irq(int irq, void *dev_id)
392{
393 struct dw_spi *dws = dev_id;
394
395 if (!dws->cur_msg) {
396 spi_mask_intr(dws, SPI_INT_TXEI);
397 /* Never fail */
398 return IRQ_HANDLED;
399 }
400
401 return dws->transfer_handler(dws);
402}
403
404/* Must be called inside pump_transfers() */
405static void poll_transfer(struct dw_spi *dws)
406{
407 if (dws->tx) {
408 while (dws->write(dws))
409 dws->read(dws);
410 }
411
412 dws->read(dws);
413 transfer_complete(dws);
414}
415
416static void dma_transfer(struct dw_spi *dws, int cs_change)
417{
418}
419
420static void pump_transfers(unsigned long data)
421{
422 struct dw_spi *dws = (struct dw_spi *)data;
423 struct spi_message *message = NULL;
424 struct spi_transfer *transfer = NULL;
425 struct spi_transfer *previous = NULL;
426 struct spi_device *spi = NULL;
427 struct chip_data *chip = NULL;
428 u8 bits = 0;
429 u8 imask = 0;
430 u8 cs_change = 0;
431 u16 clk_div = 0;
432 u32 speed = 0;
433 u32 cr0 = 0;
434
435 /* Get current state information */
436 message = dws->cur_msg;
437 transfer = dws->cur_transfer;
438 chip = dws->cur_chip;
439 spi = message->spi;
440
441 if (message->state == ERROR_STATE) {
442 message->status = -EIO;
443 goto early_exit;
444 }
445
446 /* Handle end of message */
447 if (message->state == DONE_STATE) {
448 message->status = 0;
449 goto early_exit;
450 }
451
452 /* Delay if requested at end of transfer*/
453 if (message->state == RUNNING_STATE) {
454 previous = list_entry(transfer->transfer_list.prev,
455 struct spi_transfer,
456 transfer_list);
457 if (previous->delay_usecs)
458 udelay(previous->delay_usecs);
459 }
460
461 dws->n_bytes = chip->n_bytes;
462 dws->dma_width = chip->dma_width;
463 dws->cs_control = chip->cs_control;
464
465 dws->rx_dma = transfer->rx_dma;
466 dws->tx_dma = transfer->tx_dma;
467 dws->tx = (void *)transfer->tx_buf;
468 dws->tx_end = dws->tx + transfer->len;
469 dws->rx = transfer->rx_buf;
470 dws->rx_end = dws->rx + transfer->len;
471 dws->write = dws->tx ? chip->write : null_writer;
472 dws->read = dws->rx ? chip->read : null_reader;
473 dws->cs_change = transfer->cs_change;
474 dws->len = dws->cur_transfer->len;
475 if (chip != dws->prev_chip)
476 cs_change = 1;
477
478 cr0 = chip->cr0;
479
480 /* Handle per transfer options for bpw and speed */
481 if (transfer->speed_hz) {
482 speed = chip->speed_hz;
483
484 if (transfer->speed_hz != speed) {
485 speed = transfer->speed_hz;
486 if (speed > dws->max_freq) {
487 printk(KERN_ERR "MRST SPI0: unsupported"
488 "freq: %dHz\n", speed);
489 message->status = -EIO;
490 goto early_exit;
491 }
492
493 /* clk_div doesn't support odd number */
494 clk_div = dws->max_freq / speed;
495 clk_div = (clk_div >> 1) << 1;
496
497 chip->speed_hz = speed;
498 chip->clk_div = clk_div;
499 }
500 }
501 if (transfer->bits_per_word) {
502 bits = transfer->bits_per_word;
503
504 switch (bits) {
505 case 8:
506 dws->n_bytes = 1;
507 dws->dma_width = 1;
508 dws->read = (dws->read != null_reader) ?
509 u8_reader : null_reader;
510 dws->write = (dws->write != null_writer) ?
511 u8_writer : null_writer;
512 break;
513 case 16:
514 dws->n_bytes = 2;
515 dws->dma_width = 2;
516 dws->read = (dws->read != null_reader) ?
517 u16_reader : null_reader;
518 dws->write = (dws->write != null_writer) ?
519 u16_writer : null_writer;
520 break;
521 default:
522 printk(KERN_ERR "MRST SPI0: unsupported bits:"
523 "%db\n", bits);
524 message->status = -EIO;
525 goto early_exit;
526 }
527
528 cr0 = (bits - 1)
529 | (chip->type << SPI_FRF_OFFSET)
530 | (spi->mode << SPI_MODE_OFFSET)
531 | (chip->tmode << SPI_TMOD_OFFSET);
532 }
533 message->state = RUNNING_STATE;
534
535 /* Check if current transfer is a DMA transaction */
536 dws->dma_mapped = map_dma_buffers(dws);
537
538 if (!dws->dma_mapped && !chip->poll_mode) {
539 if (dws->rx)
540 imask |= SPI_INT_RXFI;
541 if (dws->tx)
542 imask |= SPI_INT_TXEI;
543 dws->transfer_handler = interrupt_transfer;
544 }
545
546 /*
547 * Reprogram registers only if
548 * 1. chip select changes
549 * 2. clk_div is changed
550 * 3. control value changes
551 */
552 if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div) {
553 spi_enable_chip(dws, 0);
554
555 if (dw_readw(dws, ctrl0) != cr0)
556 dw_writew(dws, ctrl0, cr0);
557
558 /* Set the interrupt mask, for poll mode just diable all int */
559 spi_mask_intr(dws, 0xff);
560 if (!chip->poll_mode)
561 spi_umask_intr(dws, imask);
562
563 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
564 spi_chip_sel(dws, spi->chip_select);
565 spi_enable_chip(dws, 1);
566
567 if (cs_change)
568 dws->prev_chip = chip;
569 }
570
571 if (dws->dma_mapped)
572 dma_transfer(dws, cs_change);
573
574 if (chip->poll_mode)
575 poll_transfer(dws);
576
577 return;
578
579early_exit:
580 giveback(dws);
581 return;
582}
583
584static void pump_messages(struct work_struct *work)
585{
586 struct dw_spi *dws =
587 container_of(work, struct dw_spi, pump_messages);
588 unsigned long flags;
589
590 /* Lock queue and check for queue work */
591 spin_lock_irqsave(&dws->lock, flags);
592 if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {
593 dws->busy = 0;
594 spin_unlock_irqrestore(&dws->lock, flags);
595 return;
596 }
597
598 /* Make sure we are not already running a message */
599 if (dws->cur_msg) {
600 spin_unlock_irqrestore(&dws->lock, flags);
601 return;
602 }
603
604 /* Extract head of queue */
605 dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);
606 list_del_init(&dws->cur_msg->queue);
607
608 /* Initial message state*/
609 dws->cur_msg->state = START_STATE;
610 dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
611 struct spi_transfer,
612 transfer_list);
613 dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
614
615 /* Mark as busy and launch transfers */
616 tasklet_schedule(&dws->pump_transfers);
617
618 dws->busy = 1;
619 spin_unlock_irqrestore(&dws->lock, flags);
620}
621
622/* spi_device use this to queue in their spi_msg */
623static int dw_spi_transfer(struct spi_device *spi, struct spi_message *msg)
624{
625 struct dw_spi *dws = spi_master_get_devdata(spi->master);
626 unsigned long flags;
627
628 spin_lock_irqsave(&dws->lock, flags);
629
630 if (dws->run == QUEUE_STOPPED) {
631 spin_unlock_irqrestore(&dws->lock, flags);
632 return -ESHUTDOWN;
633 }
634
635 msg->actual_length = 0;
636 msg->status = -EINPROGRESS;
637 msg->state = START_STATE;
638
639 list_add_tail(&msg->queue, &dws->queue);
640
641 if (dws->run == QUEUE_RUNNING && !dws->busy) {
642
643 if (dws->cur_transfer || dws->cur_msg)
644 queue_work(dws->workqueue,
645 &dws->pump_messages);
646 else {
647 /* If no other data transaction in air, just go */
648 spin_unlock_irqrestore(&dws->lock, flags);
649 pump_messages(&dws->pump_messages);
650 return 0;
651 }
652 }
653
654 spin_unlock_irqrestore(&dws->lock, flags);
655 return 0;
656}
657
658/* This may be called twice for each spi dev */
659static int dw_spi_setup(struct spi_device *spi)
660{
661 struct dw_spi_chip *chip_info = NULL;
662 struct chip_data *chip;
663
664 if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
665 return -EINVAL;
666
667 /* Only alloc on first setup */
668 chip = spi_get_ctldata(spi);
669 if (!chip) {
670 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
671 if (!chip)
672 return -ENOMEM;
673
674 chip->cs_control = null_cs_control;
675 chip->enable_dma = 0;
676 }
677
678 /*
679 * Protocol drivers may change the chip settings, so...
680 * if chip_info exists, use it
681 */
682 chip_info = spi->controller_data;
683
684 /* chip_info doesn't always exist */
685 if (chip_info) {
686 if (chip_info->cs_control)
687 chip->cs_control = chip_info->cs_control;
688
689 chip->poll_mode = chip_info->poll_mode;
690 chip->type = chip_info->type;
691
692 chip->rx_threshold = 0;
693 chip->tx_threshold = 0;
694
695 chip->enable_dma = chip_info->enable_dma;
696 }
697
698 if (spi->bits_per_word <= 8) {
699 chip->n_bytes = 1;
700 chip->dma_width = 1;
701 chip->read = u8_reader;
702 chip->write = u8_writer;
703 } else if (spi->bits_per_word <= 16) {
704 chip->n_bytes = 2;
705 chip->dma_width = 2;
706 chip->read = u16_reader;
707 chip->write = u16_writer;
708 } else {
709 /* Never take >16b case for MRST SPIC */
710 dev_err(&spi->dev, "invalid wordsize\n");
711 return -EINVAL;
712 }
713 chip->bits_per_word = spi->bits_per_word;
714
715 chip->speed_hz = spi->max_speed_hz;
716 if (chip->speed_hz)
717 chip->clk_div = 25000000 / chip->speed_hz;
718 else
719 chip->clk_div = 8; /* default value */
720
721 chip->tmode = 0; /* Tx & Rx */
722 /* Default SPI mode is SCPOL = 0, SCPH = 0 */
723 chip->cr0 = (chip->bits_per_word - 1)
724 | (chip->type << SPI_FRF_OFFSET)
725 | (spi->mode << SPI_MODE_OFFSET)
726 | (chip->tmode << SPI_TMOD_OFFSET);
727
728 spi_set_ctldata(spi, chip);
729 return 0;
730}
731
732static void dw_spi_cleanup(struct spi_device *spi)
733{
734 struct chip_data *chip = spi_get_ctldata(spi);
735 kfree(chip);
736}
737
738static int __init init_queue(struct dw_spi *dws)
739{
740 INIT_LIST_HEAD(&dws->queue);
741 spin_lock_init(&dws->lock);
742
743 dws->run = QUEUE_STOPPED;
744 dws->busy = 0;
745
746 tasklet_init(&dws->pump_transfers,
747 pump_transfers, (unsigned long)dws);
748
749 INIT_WORK(&dws->pump_messages, pump_messages);
750 dws->workqueue = create_singlethread_workqueue(
751 dev_name(dws->master->dev.parent));
752 if (dws->workqueue == NULL)
753 return -EBUSY;
754
755 return 0;
756}
757
758static int start_queue(struct dw_spi *dws)
759{
760 unsigned long flags;
761
762 spin_lock_irqsave(&dws->lock, flags);
763
764 if (dws->run == QUEUE_RUNNING || dws->busy) {
765 spin_unlock_irqrestore(&dws->lock, flags);
766 return -EBUSY;
767 }
768
769 dws->run = QUEUE_RUNNING;
770 dws->cur_msg = NULL;
771 dws->cur_transfer = NULL;
772 dws->cur_chip = NULL;
773 dws->prev_chip = NULL;
774 spin_unlock_irqrestore(&dws->lock, flags);
775
776 queue_work(dws->workqueue, &dws->pump_messages);
777
778 return 0;
779}
780
781static int stop_queue(struct dw_spi *dws)
782{
783 unsigned long flags;
784 unsigned limit = 50;
785 int status = 0;
786
787 spin_lock_irqsave(&dws->lock, flags);
788 dws->run = QUEUE_STOPPED;
789 while (!list_empty(&dws->queue) && dws->busy && limit--) {
790 spin_unlock_irqrestore(&dws->lock, flags);
791 msleep(10);
792 spin_lock_irqsave(&dws->lock, flags);
793 }
794
795 if (!list_empty(&dws->queue) || dws->busy)
796 status = -EBUSY;
797 spin_unlock_irqrestore(&dws->lock, flags);
798
799 return status;
800}
801
802static int destroy_queue(struct dw_spi *dws)
803{
804 int status;
805
806 status = stop_queue(dws);
807 if (status != 0)
808 return status;
809 destroy_workqueue(dws->workqueue);
810 return 0;
811}
812
813/* Restart the controller, disable all interrupts, clean rx fifo */
814static void spi_hw_init(struct dw_spi *dws)
815{
816 spi_enable_chip(dws, 0);
817 spi_mask_intr(dws, 0xff);
818 spi_enable_chip(dws, 1);
819 flush(dws);
820}
821
822int __devinit dw_spi_add_host(struct dw_spi *dws)
823{
824 struct spi_master *master;
825 int ret;
826
827 BUG_ON(dws == NULL);
828
829 master = spi_alloc_master(dws->parent_dev, 0);
830 if (!master) {
831 ret = -ENOMEM;
832 goto exit;
833 }
834
835 dws->master = master;
836 dws->type = SSI_MOTO_SPI;
837 dws->prev_chip = NULL;
838 dws->dma_inited = 0;
839 dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
840
841 ret = request_irq(dws->irq, dw_spi_irq, 0,
842 "dw_spi", dws);
843 if (ret < 0) {
844 dev_err(&master->dev, "can not get IRQ\n");
845 goto err_free_master;
846 }
847
848 master->mode_bits = SPI_CPOL | SPI_CPHA;
849 master->bus_num = dws->bus_num;
850 master->num_chipselect = dws->num_cs;
851 master->cleanup = dw_spi_cleanup;
852 master->setup = dw_spi_setup;
853 master->transfer = dw_spi_transfer;
854
855 dws->dma_inited = 0;
856
857 /* Basic HW init */
858 spi_hw_init(dws);
859
860 /* Initial and start queue */
861 ret = init_queue(dws);
862 if (ret) {
863 dev_err(&master->dev, "problem initializing queue\n");
864 goto err_diable_hw;
865 }
866 ret = start_queue(dws);
867 if (ret) {
868 dev_err(&master->dev, "problem starting queue\n");
869 goto err_diable_hw;
870 }
871
872 spi_master_set_devdata(master, dws);
873 ret = spi_register_master(master);
874 if (ret) {
875 dev_err(&master->dev, "problem registering spi master\n");
876 goto err_queue_alloc;
877 }
878
879 mrst_spi_debugfs_init(dws);
880 return 0;
881
882err_queue_alloc:
883 destroy_queue(dws);
884err_diable_hw:
885 spi_enable_chip(dws, 0);
886 free_irq(dws->irq, dws);
887err_free_master:
888 spi_master_put(master);
889exit:
890 return ret;
891}
892EXPORT_SYMBOL(dw_spi_add_host);
893
894void __devexit dw_spi_remove_host(struct dw_spi *dws)
895{
896 int status = 0;
897
898 if (!dws)
899 return;
900 mrst_spi_debugfs_remove(dws);
901
902 /* Remove the queue */
903 status = destroy_queue(dws);
904 if (status != 0)
905 dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not "
906 "complete, message memory not freed\n");
907
908 spi_enable_chip(dws, 0);
909 /* Disable clk */
910 spi_set_clk(dws, 0);
911 free_irq(dws->irq, dws);
912
913 /* Disconnect from the SPI framework */
914 spi_unregister_master(dws->master);
915}
916
917int dw_spi_suspend_host(struct dw_spi *dws)
918{
919 int ret = 0;
920
921 ret = stop_queue(dws);
922 if (ret)
923 return ret;
924 spi_enable_chip(dws, 0);
925 spi_set_clk(dws, 0);
926 return ret;
927}
928EXPORT_SYMBOL(dw_spi_suspend_host);
929
930int dw_spi_resume_host(struct dw_spi *dws)
931{
932 int ret;
933
934 spi_hw_init(dws);
935 ret = start_queue(dws);
936 if (ret)
937 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
938 return ret;
939}
940EXPORT_SYMBOL(dw_spi_resume_host);
941
942MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
943MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
944MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/dw_spi_pci.c b/drivers/spi/dw_spi_pci.c
new file mode 100644
index 000000000000..34ba69161734
--- /dev/null
+++ b/drivers/spi/dw_spi_pci.c
@@ -0,0 +1,169 @@
1/*
2 * mrst_spi_pci.c - PCI interface driver for DW SPI Core
3 *
4 * Copyright (c) 2009, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#include <linux/spi/dw_spi.h>
23#include <linux/spi/spi.h>
24
25#define DRIVER_NAME "dw_spi_pci"
26
27struct dw_spi_pci {
28 struct pci_dev *pdev;
29 struct dw_spi dws;
30};
31
32static int __devinit spi_pci_probe(struct pci_dev *pdev,
33 const struct pci_device_id *ent)
34{
35 struct dw_spi_pci *dwpci;
36 struct dw_spi *dws;
37 int pci_bar = 0;
38 int ret;
39
40 printk(KERN_INFO "DW: found PCI SPI controller(ID: %04x:%04x)\n",
41 pdev->vendor, pdev->device);
42
43 ret = pci_enable_device(pdev);
44 if (ret)
45 return ret;
46
47 dwpci = kzalloc(sizeof(struct dw_spi_pci), GFP_KERNEL);
48 if (!dwpci) {
49 ret = -ENOMEM;
50 goto err_disable;
51 }
52
53 dwpci->pdev = pdev;
54 dws = &dwpci->dws;
55
56 /* Get basic io resource and map it */
57 dws->paddr = pci_resource_start(pdev, pci_bar);
58 dws->iolen = pci_resource_len(pdev, pci_bar);
59
60 ret = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
61 if (ret)
62 goto err_kfree;
63
64 dws->regs = ioremap_nocache((unsigned long)dws->paddr,
65 pci_resource_len(pdev, pci_bar));
66 if (!dws->regs) {
67 ret = -ENOMEM;
68 goto err_release_reg;
69 }
70
71 dws->parent_dev = &pdev->dev;
72 dws->bus_num = 0;
73 dws->num_cs = 4;
74 dws->max_freq = 25000000; /* for Moorestwon */
75 dws->irq = pdev->irq;
76
77 ret = dw_spi_add_host(dws);
78 if (ret)
79 goto err_unmap;
80
81 /* PCI hook and SPI hook use the same drv data */
82 pci_set_drvdata(pdev, dwpci);
83 return 0;
84
85err_unmap:
86 iounmap(dws->regs);
87err_release_reg:
88 pci_release_region(pdev, pci_bar);
89err_kfree:
90 kfree(dwpci);
91err_disable:
92 pci_disable_device(pdev);
93 return ret;
94}
95
96static void __devexit spi_pci_remove(struct pci_dev *pdev)
97{
98 struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
99
100 pci_set_drvdata(pdev, NULL);
101 iounmap(dwpci->dws.regs);
102 pci_release_region(pdev, 0);
103 kfree(dwpci);
104 pci_disable_device(pdev);
105}
106
107#ifdef CONFIG_PM
108static int spi_suspend(struct pci_dev *pdev, pm_message_t state)
109{
110 struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
111 int ret;
112
113 ret = dw_spi_suspend_host(&dwpci->dws);
114 if (ret)
115 return ret;
116 pci_save_state(pdev);
117 pci_disable_device(pdev);
118 pci_set_power_state(pdev, pci_choose_state(pdev, state));
119 return ret;
120}
121
122static int spi_resume(struct pci_dev *pdev)
123{
124 struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
125 int ret;
126
127 pci_set_power_state(pdev, PCI_D0);
128 pci_restore_state(pdev);
129 ret = pci_enable_device(pdev);
130 if (ret)
131 return ret;
132 return dw_spi_resume_host(&dwpci->dws);
133}
134#else
135#define spi_suspend NULL
136#define spi_resume NULL
137#endif
138
139static const struct pci_device_id pci_ids[] __devinitdata = {
140 /* Intel Moorestown platform SPI controller 0 */
141 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
142 {},
143};
144
145static struct pci_driver dw_spi_driver = {
146 .name = DRIVER_NAME,
147 .id_table = pci_ids,
148 .probe = spi_pci_probe,
149 .remove = __devexit_p(spi_pci_remove),
150 .suspend = spi_suspend,
151 .resume = spi_resume,
152};
153
154static int __init mrst_spi_init(void)
155{
156 return pci_register_driver(&dw_spi_driver);
157}
158
159static void __exit mrst_spi_exit(void)
160{
161 pci_unregister_driver(&dw_spi_driver);
162}
163
164module_init(mrst_spi_init);
165module_exit(mrst_spi_exit);
166
167MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
168MODULE_DESCRIPTION("PCI interface driver for DW SPI Core");
169MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index 73e24ef5a2f9..1d41058bbab2 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -1294,7 +1294,7 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
1294 goto out_error_get_res; 1294 goto out_error_get_res;
1295 } 1295 }
1296 1296
1297 drv_data->regs_base = ioremap(res->start, (res->end - res->start + 1)); 1297 drv_data->regs_base = ioremap(res->start, resource_size(res));
1298 if (drv_data->regs_base == NULL) { 1298 if (drv_data->regs_base == NULL) {
1299 dev_err(dev, "Cannot map IO\n"); 1299 dev_err(dev, "Cannot map IO\n");
1300 status = -ENXIO; 1300 status = -ENXIO;
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c
index e9390d747bfc..1fb2a6ea328c 100644
--- a/drivers/spi/spi_mpc8xxx.c
+++ b/drivers/spi/spi_mpc8xxx.c
@@ -1013,7 +1013,7 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
1013 1013
1014 init_completion(&mpc8xxx_spi->done); 1014 init_completion(&mpc8xxx_spi->done);
1015 1015
1016 mpc8xxx_spi->base = ioremap(mem->start, mem->end - mem->start + 1); 1016 mpc8xxx_spi->base = ioremap(mem->start, resource_size(mem));
1017 if (mpc8xxx_spi->base == NULL) { 1017 if (mpc8xxx_spi->base == NULL) {
1018 ret = -ENOMEM; 1018 ret = -ENOMEM;
1019 goto err_ioremap; 1019 goto err_ioremap;
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index 276591569c8b..c010733877ae 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -1,7 +1,7 @@
1/* linux/drivers/spi/spi_s3c24xx.c 1/* linux/drivers/spi/spi_s3c24xx.c
2 * 2 *
3 * Copyright (c) 2006 Ben Dooks 3 * Copyright (c) 2006 Ben Dooks
4 * Copyright (c) 2006 Simtec Electronics 4 * Copyright 2006-2009 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk> 5 * Ben Dooks <ben@simtec.co.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -28,6 +28,11 @@
28#include <plat/regs-spi.h> 28#include <plat/regs-spi.h>
29#include <mach/spi.h> 29#include <mach/spi.h>
30 30
31#include <plat/fiq.h>
32#include <asm/fiq.h>
33
34#include "spi_s3c24xx_fiq.h"
35
31/** 36/**
32 * s3c24xx_spi_devstate - per device data 37 * s3c24xx_spi_devstate - per device data
33 * @hz: Last frequency calculated for @sppre field. 38 * @hz: Last frequency calculated for @sppre field.
@@ -42,6 +47,13 @@ struct s3c24xx_spi_devstate {
42 u8 sppre; 47 u8 sppre;
43}; 48};
44 49
50enum spi_fiq_mode {
51 FIQ_MODE_NONE = 0,
52 FIQ_MODE_TX = 1,
53 FIQ_MODE_RX = 2,
54 FIQ_MODE_TXRX = 3,
55};
56
45struct s3c24xx_spi { 57struct s3c24xx_spi {
46 /* bitbang has to be first */ 58 /* bitbang has to be first */
47 struct spi_bitbang bitbang; 59 struct spi_bitbang bitbang;
@@ -52,6 +64,11 @@ struct s3c24xx_spi {
52 int len; 64 int len;
53 int count; 65 int count;
54 66
67 struct fiq_handler fiq_handler;
68 enum spi_fiq_mode fiq_mode;
69 unsigned char fiq_inuse;
70 unsigned char fiq_claimed;
71
55 void (*set_cs)(struct s3c2410_spi_info *spi, 72 void (*set_cs)(struct s3c2410_spi_info *spi,
56 int cs, int pol); 73 int cs, int pol);
57 74
@@ -67,6 +84,7 @@ struct s3c24xx_spi {
67 struct s3c2410_spi_info *pdata; 84 struct s3c2410_spi_info *pdata;
68}; 85};
69 86
87
70#define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT) 88#define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT)
71#define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP) 89#define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP)
72 90
@@ -127,7 +145,7 @@ static int s3c24xx_spi_update_state(struct spi_device *spi,
127 } 145 }
128 146
129 if (spi->mode != cs->mode) { 147 if (spi->mode != cs->mode) {
130 u8 spcon = SPCON_DEFAULT; 148 u8 spcon = SPCON_DEFAULT | S3C2410_SPCON_ENSCK;
131 149
132 if (spi->mode & SPI_CPHA) 150 if (spi->mode & SPI_CPHA)
133 spcon |= S3C2410_SPCON_CPHA_FMTB; 151 spcon |= S3C2410_SPCON_CPHA_FMTB;
@@ -214,13 +232,196 @@ static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count)
214 return hw->tx ? hw->tx[count] : 0; 232 return hw->tx ? hw->tx[count] : 0;
215} 233}
216 234
235#ifdef CONFIG_SPI_S3C24XX_FIQ
236/* Support for FIQ based pseudo-DMA to improve the transfer speed.
237 *
238 * This code uses the assembly helper in spi_s3c24xx_spi.S which is
239 * used by the FIQ core to move data between main memory and the peripheral
240 * block. Since this is code running on the processor, there is no problem
241 * with cache coherency of the buffers, so we can use any buffer we like.
242 */
243
244/**
245 * struct spi_fiq_code - FIQ code and header
246 * @length: The length of the code fragment, excluding this header.
247 * @ack_offset: The offset from @data to the word to place the IRQ ACK bit at.
248 * @data: The code itself to install as a FIQ handler.
249 */
250struct spi_fiq_code {
251 u32 length;
252 u32 ack_offset;
253 u8 data[0];
254};
255
256extern struct spi_fiq_code s3c24xx_spi_fiq_txrx;
257extern struct spi_fiq_code s3c24xx_spi_fiq_tx;
258extern struct spi_fiq_code s3c24xx_spi_fiq_rx;
259
260/**
261 * ack_bit - turn IRQ into IRQ acknowledgement bit
262 * @irq: The interrupt number
263 *
264 * Returns the bit to write to the interrupt acknowledge register.
265 */
266static inline u32 ack_bit(unsigned int irq)
267{
268 return 1 << (irq - IRQ_EINT0);
269}
270
271/**
272 * s3c24xx_spi_tryfiq - attempt to claim and setup FIQ for transfer
273 * @hw: The hardware state.
274 *
275 * Claim the FIQ handler (only one can be active at any one time) and
276 * then setup the correct transfer code for this transfer.
277 *
278 * This call updates all the necessary state information if sucessful,
279 * so the caller does not need to do anything more than start the transfer
280 * as normal, since the IRQ will have been re-routed to the FIQ handler.
281*/
282void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw)
283{
284 struct pt_regs regs;
285 enum spi_fiq_mode mode;
286 struct spi_fiq_code *code;
287 int ret;
288
289 if (!hw->fiq_claimed) {
290 /* try and claim fiq if we haven't got it, and if not
291 * then return and simply use another transfer method */
292
293 ret = claim_fiq(&hw->fiq_handler);
294 if (ret)
295 return;
296 }
297
298 if (hw->tx && !hw->rx)
299 mode = FIQ_MODE_TX;
300 else if (hw->rx && !hw->tx)
301 mode = FIQ_MODE_RX;
302 else
303 mode = FIQ_MODE_TXRX;
304
305 regs.uregs[fiq_rspi] = (long)hw->regs;
306 regs.uregs[fiq_rrx] = (long)hw->rx;
307 regs.uregs[fiq_rtx] = (long)hw->tx + 1;
308 regs.uregs[fiq_rcount] = hw->len - 1;
309 regs.uregs[fiq_rirq] = (long)S3C24XX_VA_IRQ;
310
311 set_fiq_regs(&regs);
312
313 if (hw->fiq_mode != mode) {
314 u32 *ack_ptr;
315
316 hw->fiq_mode = mode;
317
318 switch (mode) {
319 case FIQ_MODE_TX:
320 code = &s3c24xx_spi_fiq_tx;
321 break;
322 case FIQ_MODE_RX:
323 code = &s3c24xx_spi_fiq_rx;
324 break;
325 case FIQ_MODE_TXRX:
326 code = &s3c24xx_spi_fiq_txrx;
327 break;
328 default:
329 code = NULL;
330 }
331
332 BUG_ON(!code);
333
334 ack_ptr = (u32 *)&code->data[code->ack_offset];
335 *ack_ptr = ack_bit(hw->irq);
336
337 set_fiq_handler(&code->data, code->length);
338 }
339
340 s3c24xx_set_fiq(hw->irq, true);
341
342 hw->fiq_mode = mode;
343 hw->fiq_inuse = 1;
344}
345
346/**
347 * s3c24xx_spi_fiqop - FIQ core code callback
348 * @pw: Data registered with the handler
349 * @release: Whether this is a release or a return.
350 *
351 * Called by the FIQ code when another module wants to use the FIQ, so
352 * return whether we are currently using this or not and then update our
353 * internal state.
354 */
355static int s3c24xx_spi_fiqop(void *pw, int release)
356{
357 struct s3c24xx_spi *hw = pw;
358 int ret = 0;
359
360 if (release) {
361 if (hw->fiq_inuse)
362 ret = -EBUSY;
363
364 /* note, we do not need to unroute the FIQ, as the FIQ
365 * vector code de-routes it to signal the end of transfer */
366
367 hw->fiq_mode = FIQ_MODE_NONE;
368 hw->fiq_claimed = 0;
369 } else {
370 hw->fiq_claimed = 1;
371 }
372
373 return ret;
374}
375
376/**
377 * s3c24xx_spi_initfiq - setup the information for the FIQ core
378 * @hw: The hardware state.
379 *
380 * Setup the fiq_handler block to pass to the FIQ core.
381 */
382static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *hw)
383{
384 hw->fiq_handler.dev_id = hw;
385 hw->fiq_handler.name = dev_name(hw->dev);
386 hw->fiq_handler.fiq_op = s3c24xx_spi_fiqop;
387}
388
389/**
390 * s3c24xx_spi_usefiq - return if we should be using FIQ.
391 * @hw: The hardware state.
392 *
393 * Return true if the platform data specifies whether this channel is
394 * allowed to use the FIQ.
395 */
396static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *hw)
397{
398 return hw->pdata->use_fiq;
399}
400
401/**
402 * s3c24xx_spi_usingfiq - return if channel is using FIQ
403 * @spi: The hardware state.
404 *
405 * Return whether the channel is currently using the FIQ (separate from
406 * whether the FIQ is claimed).
407 */
408static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *spi)
409{
410 return spi->fiq_inuse;
411}
412#else
413
414static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *s) { }
415static inline void s3c24xx_spi_tryfiq(struct s3c24xx_spi *s) { }
416static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *s) { return false; }
417static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *s) { return false; }
418
419#endif /* CONFIG_SPI_S3C24XX_FIQ */
420
217static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t) 421static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
218{ 422{
219 struct s3c24xx_spi *hw = to_hw(spi); 423 struct s3c24xx_spi *hw = to_hw(spi);
220 424
221 dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
222 t->tx_buf, t->rx_buf, t->len);
223
224 hw->tx = t->tx_buf; 425 hw->tx = t->tx_buf;
225 hw->rx = t->rx_buf; 426 hw->rx = t->rx_buf;
226 hw->len = t->len; 427 hw->len = t->len;
@@ -228,11 +429,14 @@ static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
228 429
229 init_completion(&hw->done); 430 init_completion(&hw->done);
230 431
432 hw->fiq_inuse = 0;
433 if (s3c24xx_spi_usefiq(hw) && t->len >= 3)
434 s3c24xx_spi_tryfiq(hw);
435
231 /* send the first byte */ 436 /* send the first byte */
232 writeb(hw_txbyte(hw, 0), hw->regs + S3C2410_SPTDAT); 437 writeb(hw_txbyte(hw, 0), hw->regs + S3C2410_SPTDAT);
233 438
234 wait_for_completion(&hw->done); 439 wait_for_completion(&hw->done);
235
236 return hw->count; 440 return hw->count;
237} 441}
238 442
@@ -254,17 +458,27 @@ static irqreturn_t s3c24xx_spi_irq(int irq, void *dev)
254 goto irq_done; 458 goto irq_done;
255 } 459 }
256 460
257 hw->count++; 461 if (!s3c24xx_spi_usingfiq(hw)) {
462 hw->count++;
258 463
259 if (hw->rx) 464 if (hw->rx)
260 hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT); 465 hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT);
261 466
262 count++; 467 count++;
468
469 if (count < hw->len)
470 writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT);
471 else
472 complete(&hw->done);
473 } else {
474 hw->count = hw->len;
475 hw->fiq_inuse = 0;
476
477 if (hw->rx)
478 hw->rx[hw->len-1] = readb(hw->regs + S3C2410_SPRDAT);
263 479
264 if (count < hw->len)
265 writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT);
266 else
267 complete(&hw->done); 480 complete(&hw->done);
481 }
268 482
269 irq_done: 483 irq_done:
270 return IRQ_HANDLED; 484 return IRQ_HANDLED;
@@ -322,6 +536,10 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev)
322 platform_set_drvdata(pdev, hw); 536 platform_set_drvdata(pdev, hw);
323 init_completion(&hw->done); 537 init_completion(&hw->done);
324 538
539 /* initialise fiq handler */
540
541 s3c24xx_spi_initfiq(hw);
542
325 /* setup the master state. */ 543 /* setup the master state. */
326 544
327 /* the spi->mode bits understood by this driver: */ 545 /* the spi->mode bits understood by this driver: */
diff --git a/drivers/spi/spi_s3c24xx_fiq.S b/drivers/spi/spi_s3c24xx_fiq.S
new file mode 100644
index 000000000000..3793cae361db
--- /dev/null
+++ b/drivers/spi/spi_s3c24xx_fiq.S
@@ -0,0 +1,116 @@
1/* linux/drivers/spi/spi_s3c24xx_fiq.S
2 *
3 * Copyright 2009 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * S3C24XX SPI - FIQ pseudo-DMA transfer code
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#include <linux/linkage.h>
14#include <asm/assembler.h>
15
16#include <mach/map.h>
17#include <mach/regs-irq.h>
18#include <plat/regs-spi.h>
19
20#include "spi_s3c24xx_fiq.h"
21
22 .text
23
24 @ entry to these routines is as follows, with the register names
25 @ defined in fiq.h so that they can be shared with the C files which
26 @ setup the calling registers.
27 @
28 @ fiq_rirq The base of the IRQ registers to find S3C2410_SRCPND
29 @ fiq_rtmp Temporary register to hold tx/rx data
30 @ fiq_rspi The base of the SPI register block
31 @ fiq_rtx The tx buffer pointer
32 @ fiq_rrx The rx buffer pointer
33 @ fiq_rcount The number of bytes to move
34
35 @ each entry starts with a word entry of how long it is
36 @ and an offset to the irq acknowledgment word
37
38ENTRY(s3c24xx_spi_fiq_rx)
39s3c24xx_spi_fix_rx:
40 .word fiq_rx_end - fiq_rx_start
41 .word fiq_rx_irq_ack - fiq_rx_start
42fiq_rx_start:
43 ldr fiq_rtmp, fiq_rx_irq_ack
44 str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
45
46 ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
47 strb fiq_rtmp, [ fiq_rrx ], #1
48
49 mov fiq_rtmp, #0xff
50 strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
51
52 subs fiq_rcount, fiq_rcount, #1
53 subnes pc, lr, #4 @@ return, still have work to do
54
55 @@ set IRQ controller so that next op will trigger IRQ
56 mov fiq_rtmp, #0
57 str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
58 subs pc, lr, #4
59
60fiq_rx_irq_ack:
61 .word 0
62fiq_rx_end:
63
64ENTRY(s3c24xx_spi_fiq_txrx)
65s3c24xx_spi_fiq_txrx:
66 .word fiq_txrx_end - fiq_txrx_start
67 .word fiq_txrx_irq_ack - fiq_txrx_start
68fiq_txrx_start:
69
70 ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
71 strb fiq_rtmp, [ fiq_rrx ], #1
72
73 ldr fiq_rtmp, fiq_txrx_irq_ack
74 str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
75
76 ldrb fiq_rtmp, [ fiq_rtx ], #1
77 strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
78
79 subs fiq_rcount, fiq_rcount, #1
80 subnes pc, lr, #4 @@ return, still have work to do
81
82 mov fiq_rtmp, #0
83 str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
84 subs pc, lr, #4
85
86fiq_txrx_irq_ack:
87 .word 0
88
89fiq_txrx_end:
90
91ENTRY(s3c24xx_spi_fiq_tx)
92s3c24xx_spi_fix_tx:
93 .word fiq_tx_end - fiq_tx_start
94 .word fiq_tx_irq_ack - fiq_tx_start
95fiq_tx_start:
96 ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
97
98 ldr fiq_rtmp, fiq_tx_irq_ack
99 str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
100
101 ldrb fiq_rtmp, [ fiq_rtx ], #1
102 strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
103
104 subs fiq_rcount, fiq_rcount, #1
105 subnes pc, lr, #4 @@ return, still have work to do
106
107 mov fiq_rtmp, #0
108 str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
109 subs pc, lr, #4
110
111fiq_tx_irq_ack:
112 .word 0
113
114fiq_tx_end:
115
116 .end
diff --git a/drivers/spi/spi_s3c24xx_fiq.h b/drivers/spi/spi_s3c24xx_fiq.h
new file mode 100644
index 000000000000..a5950bb25b51
--- /dev/null
+++ b/drivers/spi/spi_s3c24xx_fiq.h
@@ -0,0 +1,26 @@
1/* linux/drivers/spi/spi_s3c24xx_fiq.h
2 *
3 * Copyright 2009 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * S3C24XX SPI - FIQ pseudo-DMA transfer support
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13/* We have R8 through R13 to play with */
14
15#ifdef __ASSEMBLY__
16#define __REG_NR(x) r##x
17#else
18#define __REG_NR(x) (x)
19#endif
20
21#define fiq_rspi __REG_NR(8)
22#define fiq_rtmp __REG_NR(9)
23#define fiq_rrx __REG_NR(10)
24#define fiq_rtx __REG_NR(11)
25#define fiq_rcount __REG_NR(12)
26#define fiq_rirq __REG_NR(13)
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c
new file mode 100644
index 000000000000..88a456dba967
--- /dev/null
+++ b/drivers/spi/spi_s3c64xx.c
@@ -0,0 +1,1196 @@
1/* linux/drivers/spi/spi_s3c64xx.c
2 *
3 * Copyright (C) 2009 Samsung Electronics Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/workqueue.h>
24#include <linux/delay.h>
25#include <linux/clk.h>
26#include <linux/dma-mapping.h>
27#include <linux/platform_device.h>
28#include <linux/spi/spi.h>
29
30#include <mach/dma.h>
31#include <plat/spi.h>
32
33/* Registers and bit-fields */
34
35#define S3C64XX_SPI_CH_CFG 0x00
36#define S3C64XX_SPI_CLK_CFG 0x04
37#define S3C64XX_SPI_MODE_CFG 0x08
38#define S3C64XX_SPI_SLAVE_SEL 0x0C
39#define S3C64XX_SPI_INT_EN 0x10
40#define S3C64XX_SPI_STATUS 0x14
41#define S3C64XX_SPI_TX_DATA 0x18
42#define S3C64XX_SPI_RX_DATA 0x1C
43#define S3C64XX_SPI_PACKET_CNT 0x20
44#define S3C64XX_SPI_PENDING_CLR 0x24
45#define S3C64XX_SPI_SWAP_CFG 0x28
46#define S3C64XX_SPI_FB_CLK 0x2C
47
48#define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
49#define S3C64XX_SPI_CH_SW_RST (1<<5)
50#define S3C64XX_SPI_CH_SLAVE (1<<4)
51#define S3C64XX_SPI_CPOL_L (1<<3)
52#define S3C64XX_SPI_CPHA_B (1<<2)
53#define S3C64XX_SPI_CH_RXCH_ON (1<<1)
54#define S3C64XX_SPI_CH_TXCH_ON (1<<0)
55
56#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
57#define S3C64XX_SPI_CLKSEL_SRCSHFT 9
58#define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
59#define S3C64XX_SPI_PSR_MASK 0xff
60
61#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
62#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
63#define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
64#define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
65#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
66#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
67#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
68#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
69#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
70#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
71#define S3C64XX_SPI_MODE_4BURST (1<<0)
72
73#define S3C64XX_SPI_SLAVE_AUTO (1<<1)
74#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
75
76#define S3C64XX_SPI_ACT(c) writel(0, (c)->regs + S3C64XX_SPI_SLAVE_SEL)
77
78#define S3C64XX_SPI_DEACT(c) writel(S3C64XX_SPI_SLAVE_SIG_INACT, \
79 (c)->regs + S3C64XX_SPI_SLAVE_SEL)
80
81#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
82#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
83#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
84#define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
85#define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
86#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
87#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
88
89#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
90#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
91#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
92#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
93#define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
94#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
95
96#define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
97
98#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
99#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
100#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
101#define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
102#define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
103
104#define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
105#define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
106#define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
107#define S3C64XX_SPI_SWAP_RX_EN (1<<4)
108#define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
109#define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
110#define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
111#define S3C64XX_SPI_SWAP_TX_EN (1<<0)
112
113#define S3C64XX_SPI_FBCLK_MSK (3<<0)
114
115#define S3C64XX_SPI_ST_TRLCNTZ(v, i) ((((v) >> (i)->rx_lvl_offset) & \
116 (((i)->fifo_lvl_mask + 1))) \
117 ? 1 : 0)
118
119#define S3C64XX_SPI_ST_TX_DONE(v, i) ((((v) >> (i)->rx_lvl_offset) & \
120 (((i)->fifo_lvl_mask + 1) << 1)) \
121 ? 1 : 0)
122#define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask)
123#define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask)
124
125#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
126#define S3C64XX_SPI_TRAILCNT_OFF 19
127
128#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
129
130#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
131
132#define SUSPND (1<<0)
133#define SPIBUSY (1<<1)
134#define RXBUSY (1<<2)
135#define TXBUSY (1<<3)
136
137/**
138 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
139 * @clk: Pointer to the spi clock.
140 * @master: Pointer to the SPI Protocol master.
141 * @workqueue: Work queue for the SPI xfer requests.
142 * @cntrlr_info: Platform specific data for the controller this driver manages.
143 * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
144 * @work: Work
145 * @queue: To log SPI xfer requests.
146 * @lock: Controller specific lock.
147 * @state: Set of FLAGS to indicate status.
148 * @rx_dmach: Controller's DMA channel for Rx.
149 * @tx_dmach: Controller's DMA channel for Tx.
150 * @sfr_start: BUS address of SPI controller regs.
151 * @regs: Pointer to ioremap'ed controller registers.
152 * @xfer_completion: To indicate completion of xfer task.
153 * @cur_mode: Stores the active configuration of the controller.
154 * @cur_bpw: Stores the active bits per word settings.
155 * @cur_speed: Stores the active xfer clock speed.
156 */
157struct s3c64xx_spi_driver_data {
158 void __iomem *regs;
159 struct clk *clk;
160 struct platform_device *pdev;
161 struct spi_master *master;
162 struct workqueue_struct *workqueue;
163 struct s3c64xx_spi_cntrlr_info *cntrlr_info;
164 struct spi_device *tgl_spi;
165 struct work_struct work;
166 struct list_head queue;
167 spinlock_t lock;
168 enum dma_ch rx_dmach;
169 enum dma_ch tx_dmach;
170 unsigned long sfr_start;
171 struct completion xfer_completion;
172 unsigned state;
173 unsigned cur_mode, cur_bpw;
174 unsigned cur_speed;
175};
176
177static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
178 .name = "samsung-spi-dma",
179};
180
181static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
182{
183 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
184 void __iomem *regs = sdd->regs;
185 unsigned long loops;
186 u32 val;
187
188 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
189
190 val = readl(regs + S3C64XX_SPI_CH_CFG);
191 val |= S3C64XX_SPI_CH_SW_RST;
192 val &= ~S3C64XX_SPI_CH_HS_EN;
193 writel(val, regs + S3C64XX_SPI_CH_CFG);
194
195 /* Flush TxFIFO*/
196 loops = msecs_to_loops(1);
197 do {
198 val = readl(regs + S3C64XX_SPI_STATUS);
199 } while (TX_FIFO_LVL(val, sci) && loops--);
200
201 /* Flush RxFIFO*/
202 loops = msecs_to_loops(1);
203 do {
204 val = readl(regs + S3C64XX_SPI_STATUS);
205 if (RX_FIFO_LVL(val, sci))
206 readl(regs + S3C64XX_SPI_RX_DATA);
207 else
208 break;
209 } while (loops--);
210
211 val = readl(regs + S3C64XX_SPI_CH_CFG);
212 val &= ~S3C64XX_SPI_CH_SW_RST;
213 writel(val, regs + S3C64XX_SPI_CH_CFG);
214
215 val = readl(regs + S3C64XX_SPI_MODE_CFG);
216 val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
217 writel(val, regs + S3C64XX_SPI_MODE_CFG);
218
219 val = readl(regs + S3C64XX_SPI_CH_CFG);
220 val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
221 writel(val, regs + S3C64XX_SPI_CH_CFG);
222}
223
224static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
225 struct spi_device *spi,
226 struct spi_transfer *xfer, int dma_mode)
227{
228 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
229 void __iomem *regs = sdd->regs;
230 u32 modecfg, chcfg;
231
232 modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
233 modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
234
235 chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
236 chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
237
238 if (dma_mode) {
239 chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
240 } else {
241 /* Always shift in data in FIFO, even if xfer is Tx only,
242 * this helps setting PCKT_CNT value for generating clocks
243 * as exactly needed.
244 */
245 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
246 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
247 | S3C64XX_SPI_PACKET_CNT_EN,
248 regs + S3C64XX_SPI_PACKET_CNT);
249 }
250
251 if (xfer->tx_buf != NULL) {
252 sdd->state |= TXBUSY;
253 chcfg |= S3C64XX_SPI_CH_TXCH_ON;
254 if (dma_mode) {
255 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
256 s3c2410_dma_config(sdd->tx_dmach, 1);
257 s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd,
258 xfer->tx_dma, xfer->len);
259 s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START);
260 } else {
261 unsigned char *buf = (unsigned char *) xfer->tx_buf;
262 int i = 0;
263 while (i < xfer->len)
264 writeb(buf[i++], regs + S3C64XX_SPI_TX_DATA);
265 }
266 }
267
268 if (xfer->rx_buf != NULL) {
269 sdd->state |= RXBUSY;
270
271 if (sci->high_speed && sdd->cur_speed >= 30000000UL
272 && !(sdd->cur_mode & SPI_CPHA))
273 chcfg |= S3C64XX_SPI_CH_HS_EN;
274
275 if (dma_mode) {
276 modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
277 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
278 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
279 | S3C64XX_SPI_PACKET_CNT_EN,
280 regs + S3C64XX_SPI_PACKET_CNT);
281 s3c2410_dma_config(sdd->rx_dmach, 1);
282 s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd,
283 xfer->rx_dma, xfer->len);
284 s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START);
285 }
286 }
287
288 writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
289 writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
290}
291
292static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
293 struct spi_device *spi)
294{
295 struct s3c64xx_spi_csinfo *cs;
296
297 if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
298 if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
299 /* Deselect the last toggled device */
300 cs = sdd->tgl_spi->controller_data;
301 cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1);
302 }
303 sdd->tgl_spi = NULL;
304 }
305
306 cs = spi->controller_data;
307 cs->set_level(spi->mode & SPI_CS_HIGH ? 1 : 0);
308}
309
310static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
311 struct spi_transfer *xfer, int dma_mode)
312{
313 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
314 void __iomem *regs = sdd->regs;
315 unsigned long val;
316 int ms;
317
318 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
319 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
320 ms += 5; /* some tolerance */
321
322 if (dma_mode) {
323 val = msecs_to_jiffies(ms) + 10;
324 val = wait_for_completion_timeout(&sdd->xfer_completion, val);
325 } else {
326 val = msecs_to_loops(ms);
327 do {
328 val = readl(regs + S3C64XX_SPI_STATUS);
329 } while (RX_FIFO_LVL(val, sci) < xfer->len && --val);
330 }
331
332 if (!val)
333 return -EIO;
334
335 if (dma_mode) {
336 u32 status;
337
338 /*
339 * DmaTx returns after simply writing data in the FIFO,
340 * w/o waiting for real transmission on the bus to finish.
341 * DmaRx returns only after Dma read data from FIFO which
342 * needs bus transmission to finish, so we don't worry if
343 * Xfer involved Rx(with or without Tx).
344 */
345 if (xfer->rx_buf == NULL) {
346 val = msecs_to_loops(10);
347 status = readl(regs + S3C64XX_SPI_STATUS);
348 while ((TX_FIFO_LVL(status, sci)
349 || !S3C64XX_SPI_ST_TX_DONE(status, sci))
350 && --val) {
351 cpu_relax();
352 status = readl(regs + S3C64XX_SPI_STATUS);
353 }
354
355 if (!val)
356 return -EIO;
357 }
358 } else {
359 unsigned char *buf;
360 int i;
361
362 /* If it was only Tx */
363 if (xfer->rx_buf == NULL) {
364 sdd->state &= ~TXBUSY;
365 return 0;
366 }
367
368 i = 0;
369 buf = xfer->rx_buf;
370 while (i < xfer->len)
371 buf[i++] = readb(regs + S3C64XX_SPI_RX_DATA);
372
373 sdd->state &= ~RXBUSY;
374 }
375
376 return 0;
377}
378
379static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
380 struct spi_device *spi)
381{
382 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
383
384 if (sdd->tgl_spi == spi)
385 sdd->tgl_spi = NULL;
386
387 cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1);
388}
389
390static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
391{
392 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
393 void __iomem *regs = sdd->regs;
394 u32 val;
395
396 /* Disable Clock */
397 val = readl(regs + S3C64XX_SPI_CLK_CFG);
398 val &= ~S3C64XX_SPI_ENCLK_ENABLE;
399 writel(val, regs + S3C64XX_SPI_CLK_CFG);
400
401 /* Set Polarity and Phase */
402 val = readl(regs + S3C64XX_SPI_CH_CFG);
403 val &= ~(S3C64XX_SPI_CH_SLAVE |
404 S3C64XX_SPI_CPOL_L |
405 S3C64XX_SPI_CPHA_B);
406
407 if (sdd->cur_mode & SPI_CPOL)
408 val |= S3C64XX_SPI_CPOL_L;
409
410 if (sdd->cur_mode & SPI_CPHA)
411 val |= S3C64XX_SPI_CPHA_B;
412
413 writel(val, regs + S3C64XX_SPI_CH_CFG);
414
415 /* Set Channel & DMA Mode */
416 val = readl(regs + S3C64XX_SPI_MODE_CFG);
417 val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
418 | S3C64XX_SPI_MODE_CH_TSZ_MASK);
419
420 switch (sdd->cur_bpw) {
421 case 32:
422 val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
423 break;
424 case 16:
425 val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
426 break;
427 default:
428 val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
429 break;
430 }
431 val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; /* Always 8bits wide */
432
433 writel(val, regs + S3C64XX_SPI_MODE_CFG);
434
435 /* Configure Clock */
436 val = readl(regs + S3C64XX_SPI_CLK_CFG);
437 val &= ~S3C64XX_SPI_PSR_MASK;
438 val |= ((clk_get_rate(sci->src_clk) / sdd->cur_speed / 2 - 1)
439 & S3C64XX_SPI_PSR_MASK);
440 writel(val, regs + S3C64XX_SPI_CLK_CFG);
441
442 /* Enable Clock */
443 val = readl(regs + S3C64XX_SPI_CLK_CFG);
444 val |= S3C64XX_SPI_ENCLK_ENABLE;
445 writel(val, regs + S3C64XX_SPI_CLK_CFG);
446}
447
448void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
449 int size, enum s3c2410_dma_buffresult res)
450{
451 struct s3c64xx_spi_driver_data *sdd = buf_id;
452 unsigned long flags;
453
454 spin_lock_irqsave(&sdd->lock, flags);
455
456 if (res == S3C2410_RES_OK)
457 sdd->state &= ~RXBUSY;
458 else
459 dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size);
460
461 /* If the other done */
462 if (!(sdd->state & TXBUSY))
463 complete(&sdd->xfer_completion);
464
465 spin_unlock_irqrestore(&sdd->lock, flags);
466}
467
468void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
469 int size, enum s3c2410_dma_buffresult res)
470{
471 struct s3c64xx_spi_driver_data *sdd = buf_id;
472 unsigned long flags;
473
474 spin_lock_irqsave(&sdd->lock, flags);
475
476 if (res == S3C2410_RES_OK)
477 sdd->state &= ~TXBUSY;
478 else
479 dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size);
480
481 /* If the other done */
482 if (!(sdd->state & RXBUSY))
483 complete(&sdd->xfer_completion);
484
485 spin_unlock_irqrestore(&sdd->lock, flags);
486}
487
488#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
489
490static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
491 struct spi_message *msg)
492{
493 struct device *dev = &sdd->pdev->dev;
494 struct spi_transfer *xfer;
495
496 if (msg->is_dma_mapped)
497 return 0;
498
499 /* First mark all xfer unmapped */
500 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
501 xfer->rx_dma = XFER_DMAADDR_INVALID;
502 xfer->tx_dma = XFER_DMAADDR_INVALID;
503 }
504
505 /* Map until end or first fail */
506 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
507
508 if (xfer->tx_buf != NULL) {
509 xfer->tx_dma = dma_map_single(dev, xfer->tx_buf,
510 xfer->len, DMA_TO_DEVICE);
511 if (dma_mapping_error(dev, xfer->tx_dma)) {
512 dev_err(dev, "dma_map_single Tx failed\n");
513 xfer->tx_dma = XFER_DMAADDR_INVALID;
514 return -ENOMEM;
515 }
516 }
517
518 if (xfer->rx_buf != NULL) {
519 xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
520 xfer->len, DMA_FROM_DEVICE);
521 if (dma_mapping_error(dev, xfer->rx_dma)) {
522 dev_err(dev, "dma_map_single Rx failed\n");
523 dma_unmap_single(dev, xfer->tx_dma,
524 xfer->len, DMA_TO_DEVICE);
525 xfer->tx_dma = XFER_DMAADDR_INVALID;
526 xfer->rx_dma = XFER_DMAADDR_INVALID;
527 return -ENOMEM;
528 }
529 }
530 }
531
532 return 0;
533}
534
535static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
536 struct spi_message *msg)
537{
538 struct device *dev = &sdd->pdev->dev;
539 struct spi_transfer *xfer;
540
541 if (msg->is_dma_mapped)
542 return;
543
544 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
545
546 if (xfer->rx_buf != NULL
547 && xfer->rx_dma != XFER_DMAADDR_INVALID)
548 dma_unmap_single(dev, xfer->rx_dma,
549 xfer->len, DMA_FROM_DEVICE);
550
551 if (xfer->tx_buf != NULL
552 && xfer->tx_dma != XFER_DMAADDR_INVALID)
553 dma_unmap_single(dev, xfer->tx_dma,
554 xfer->len, DMA_TO_DEVICE);
555 }
556}
557
558static void handle_msg(struct s3c64xx_spi_driver_data *sdd,
559 struct spi_message *msg)
560{
561 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
562 struct spi_device *spi = msg->spi;
563 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
564 struct spi_transfer *xfer;
565 int status = 0, cs_toggle = 0;
566 u32 speed;
567 u8 bpw;
568
569 /* If Master's(controller) state differs from that needed by Slave */
570 if (sdd->cur_speed != spi->max_speed_hz
571 || sdd->cur_mode != spi->mode
572 || sdd->cur_bpw != spi->bits_per_word) {
573 sdd->cur_bpw = spi->bits_per_word;
574 sdd->cur_speed = spi->max_speed_hz;
575 sdd->cur_mode = spi->mode;
576 s3c64xx_spi_config(sdd);
577 }
578
579 /* Map all the transfers if needed */
580 if (s3c64xx_spi_map_mssg(sdd, msg)) {
581 dev_err(&spi->dev,
582 "Xfer: Unable to map message buffers!\n");
583 status = -ENOMEM;
584 goto out;
585 }
586
587 /* Configure feedback delay */
588 writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
589
590 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
591
592 unsigned long flags;
593 int use_dma;
594
595 INIT_COMPLETION(sdd->xfer_completion);
596
597 /* Only BPW and Speed may change across transfers */
598 bpw = xfer->bits_per_word ? : spi->bits_per_word;
599 speed = xfer->speed_hz ? : spi->max_speed_hz;
600
601 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
602 sdd->cur_bpw = bpw;
603 sdd->cur_speed = speed;
604 s3c64xx_spi_config(sdd);
605 }
606
607 /* Polling method for xfers not bigger than FIFO capacity */
608 if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
609 use_dma = 0;
610 else
611 use_dma = 1;
612
613 spin_lock_irqsave(&sdd->lock, flags);
614
615 /* Pending only which is to be done */
616 sdd->state &= ~RXBUSY;
617 sdd->state &= ~TXBUSY;
618
619 enable_datapath(sdd, spi, xfer, use_dma);
620
621 /* Slave Select */
622 enable_cs(sdd, spi);
623
624 /* Start the signals */
625 S3C64XX_SPI_ACT(sdd);
626
627 spin_unlock_irqrestore(&sdd->lock, flags);
628
629 status = wait_for_xfer(sdd, xfer, use_dma);
630
631 /* Quiese the signals */
632 S3C64XX_SPI_DEACT(sdd);
633
634 if (status) {
635 dev_err(&spi->dev, "I/O Error: \
636 rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
637 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
638 (sdd->state & RXBUSY) ? 'f' : 'p',
639 (sdd->state & TXBUSY) ? 'f' : 'p',
640 xfer->len);
641
642 if (use_dma) {
643 if (xfer->tx_buf != NULL
644 && (sdd->state & TXBUSY))
645 s3c2410_dma_ctrl(sdd->tx_dmach,
646 S3C2410_DMAOP_FLUSH);
647 if (xfer->rx_buf != NULL
648 && (sdd->state & RXBUSY))
649 s3c2410_dma_ctrl(sdd->rx_dmach,
650 S3C2410_DMAOP_FLUSH);
651 }
652
653 goto out;
654 }
655
656 if (xfer->delay_usecs)
657 udelay(xfer->delay_usecs);
658
659 if (xfer->cs_change) {
660 /* Hint that the next mssg is gonna be
661 for the same device */
662 if (list_is_last(&xfer->transfer_list,
663 &msg->transfers))
664 cs_toggle = 1;
665 else
666 disable_cs(sdd, spi);
667 }
668
669 msg->actual_length += xfer->len;
670
671 flush_fifo(sdd);
672 }
673
674out:
675 if (!cs_toggle || status)
676 disable_cs(sdd, spi);
677 else
678 sdd->tgl_spi = spi;
679
680 s3c64xx_spi_unmap_mssg(sdd, msg);
681
682 msg->status = status;
683
684 if (msg->complete)
685 msg->complete(msg->context);
686}
687
688static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
689{
690 if (s3c2410_dma_request(sdd->rx_dmach,
691 &s3c64xx_spi_dma_client, NULL) < 0) {
692 dev_err(&sdd->pdev->dev, "cannot get RxDMA\n");
693 return 0;
694 }
695 s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb);
696 s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW,
697 sdd->sfr_start + S3C64XX_SPI_RX_DATA);
698
699 if (s3c2410_dma_request(sdd->tx_dmach,
700 &s3c64xx_spi_dma_client, NULL) < 0) {
701 dev_err(&sdd->pdev->dev, "cannot get TxDMA\n");
702 s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
703 return 0;
704 }
705 s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb);
706 s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM,
707 sdd->sfr_start + S3C64XX_SPI_TX_DATA);
708
709 return 1;
710}
711
712static void s3c64xx_spi_work(struct work_struct *work)
713{
714 struct s3c64xx_spi_driver_data *sdd = container_of(work,
715 struct s3c64xx_spi_driver_data, work);
716 unsigned long flags;
717
718 /* Acquire DMA channels */
719 while (!acquire_dma(sdd))
720 msleep(10);
721
722 spin_lock_irqsave(&sdd->lock, flags);
723
724 while (!list_empty(&sdd->queue)
725 && !(sdd->state & SUSPND)) {
726
727 struct spi_message *msg;
728
729 msg = container_of(sdd->queue.next, struct spi_message, queue);
730
731 list_del_init(&msg->queue);
732
733 /* Set Xfer busy flag */
734 sdd->state |= SPIBUSY;
735
736 spin_unlock_irqrestore(&sdd->lock, flags);
737
738 handle_msg(sdd, msg);
739
740 spin_lock_irqsave(&sdd->lock, flags);
741
742 sdd->state &= ~SPIBUSY;
743 }
744
745 spin_unlock_irqrestore(&sdd->lock, flags);
746
747 /* Free DMA channels */
748 s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client);
749 s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
750}
751
752static int s3c64xx_spi_transfer(struct spi_device *spi,
753 struct spi_message *msg)
754{
755 struct s3c64xx_spi_driver_data *sdd;
756 unsigned long flags;
757
758 sdd = spi_master_get_devdata(spi->master);
759
760 spin_lock_irqsave(&sdd->lock, flags);
761
762 if (sdd->state & SUSPND) {
763 spin_unlock_irqrestore(&sdd->lock, flags);
764 return -ESHUTDOWN;
765 }
766
767 msg->status = -EINPROGRESS;
768 msg->actual_length = 0;
769
770 list_add_tail(&msg->queue, &sdd->queue);
771
772 queue_work(sdd->workqueue, &sdd->work);
773
774 spin_unlock_irqrestore(&sdd->lock, flags);
775
776 return 0;
777}
778
779/*
780 * Here we only check the validity of requested configuration
781 * and save the configuration in a local data-structure.
782 * The controller is actually configured only just before we
783 * get a message to transfer.
784 */
785static int s3c64xx_spi_setup(struct spi_device *spi)
786{
787 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
788 struct s3c64xx_spi_driver_data *sdd;
789 struct s3c64xx_spi_cntrlr_info *sci;
790 struct spi_message *msg;
791 u32 psr, speed;
792 unsigned long flags;
793 int err = 0;
794
795 if (cs == NULL || cs->set_level == NULL) {
796 dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
797 return -ENODEV;
798 }
799
800 sdd = spi_master_get_devdata(spi->master);
801 sci = sdd->cntrlr_info;
802
803 spin_lock_irqsave(&sdd->lock, flags);
804
805 list_for_each_entry(msg, &sdd->queue, queue) {
806 /* Is some mssg is already queued for this device */
807 if (msg->spi == spi) {
808 dev_err(&spi->dev,
809 "setup: attempt while mssg in queue!\n");
810 spin_unlock_irqrestore(&sdd->lock, flags);
811 return -EBUSY;
812 }
813 }
814
815 if (sdd->state & SUSPND) {
816 spin_unlock_irqrestore(&sdd->lock, flags);
817 dev_err(&spi->dev,
818 "setup: SPI-%d not active!\n", spi->master->bus_num);
819 return -ESHUTDOWN;
820 }
821
822 spin_unlock_irqrestore(&sdd->lock, flags);
823
824 if (spi->bits_per_word != 8
825 && spi->bits_per_word != 16
826 && spi->bits_per_word != 32) {
827 dev_err(&spi->dev, "setup: %dbits/wrd not supported!\n",
828 spi->bits_per_word);
829 err = -EINVAL;
830 goto setup_exit;
831 }
832
833 /* Check if we can provide the requested rate */
834 speed = clk_get_rate(sci->src_clk) / 2 / (0 + 1); /* Max possible */
835
836 if (spi->max_speed_hz > speed)
837 spi->max_speed_hz = speed;
838
839 psr = clk_get_rate(sci->src_clk) / 2 / spi->max_speed_hz - 1;
840 psr &= S3C64XX_SPI_PSR_MASK;
841 if (psr == S3C64XX_SPI_PSR_MASK)
842 psr--;
843
844 speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1);
845 if (spi->max_speed_hz < speed) {
846 if (psr+1 < S3C64XX_SPI_PSR_MASK) {
847 psr++;
848 } else {
849 err = -EINVAL;
850 goto setup_exit;
851 }
852 }
853
854 speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1);
855 if (spi->max_speed_hz >= speed)
856 spi->max_speed_hz = speed;
857 else
858 err = -EINVAL;
859
860setup_exit:
861
862 /* setup() returns with device de-selected */
863 disable_cs(sdd, spi);
864
865 return err;
866}
867
868static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
869{
870 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
871 void __iomem *regs = sdd->regs;
872 unsigned int val;
873
874 sdd->cur_speed = 0;
875
876 S3C64XX_SPI_DEACT(sdd);
877
878 /* Disable Interrupts - we use Polling if not DMA mode */
879 writel(0, regs + S3C64XX_SPI_INT_EN);
880
881 writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
882 regs + S3C64XX_SPI_CLK_CFG);
883 writel(0, regs + S3C64XX_SPI_MODE_CFG);
884 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
885
886 /* Clear any irq pending bits */
887 writel(readl(regs + S3C64XX_SPI_PENDING_CLR),
888 regs + S3C64XX_SPI_PENDING_CLR);
889
890 writel(0, regs + S3C64XX_SPI_SWAP_CFG);
891
892 val = readl(regs + S3C64XX_SPI_MODE_CFG);
893 val &= ~S3C64XX_SPI_MODE_4BURST;
894 val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
895 val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
896 writel(val, regs + S3C64XX_SPI_MODE_CFG);
897
898 flush_fifo(sdd);
899}
900
901static int __init s3c64xx_spi_probe(struct platform_device *pdev)
902{
903 struct resource *mem_res, *dmatx_res, *dmarx_res;
904 struct s3c64xx_spi_driver_data *sdd;
905 struct s3c64xx_spi_cntrlr_info *sci;
906 struct spi_master *master;
907 int ret;
908
909 if (pdev->id < 0) {
910 dev_err(&pdev->dev,
911 "Invalid platform device id-%d\n", pdev->id);
912 return -ENODEV;
913 }
914
915 if (pdev->dev.platform_data == NULL) {
916 dev_err(&pdev->dev, "platform_data missing!\n");
917 return -ENODEV;
918 }
919
920 /* Check for availability of necessary resource */
921
922 dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
923 if (dmatx_res == NULL) {
924 dev_err(&pdev->dev, "Unable to get SPI-Tx dma resource\n");
925 return -ENXIO;
926 }
927
928 dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
929 if (dmarx_res == NULL) {
930 dev_err(&pdev->dev, "Unable to get SPI-Rx dma resource\n");
931 return -ENXIO;
932 }
933
934 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
935 if (mem_res == NULL) {
936 dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
937 return -ENXIO;
938 }
939
940 master = spi_alloc_master(&pdev->dev,
941 sizeof(struct s3c64xx_spi_driver_data));
942 if (master == NULL) {
943 dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
944 return -ENOMEM;
945 }
946
947 sci = pdev->dev.platform_data;
948
949 platform_set_drvdata(pdev, master);
950
951 sdd = spi_master_get_devdata(master);
952 sdd->master = master;
953 sdd->cntrlr_info = sci;
954 sdd->pdev = pdev;
955 sdd->sfr_start = mem_res->start;
956 sdd->tx_dmach = dmatx_res->start;
957 sdd->rx_dmach = dmarx_res->start;
958
959 sdd->cur_bpw = 8;
960
961 master->bus_num = pdev->id;
962 master->setup = s3c64xx_spi_setup;
963 master->transfer = s3c64xx_spi_transfer;
964 master->num_chipselect = sci->num_cs;
965 master->dma_alignment = 8;
966 /* the spi->mode bits understood by this driver: */
967 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
968
969 if (request_mem_region(mem_res->start,
970 resource_size(mem_res), pdev->name) == NULL) {
971 dev_err(&pdev->dev, "Req mem region failed\n");
972 ret = -ENXIO;
973 goto err0;
974 }
975
976 sdd->regs = ioremap(mem_res->start, resource_size(mem_res));
977 if (sdd->regs == NULL) {
978 dev_err(&pdev->dev, "Unable to remap IO\n");
979 ret = -ENXIO;
980 goto err1;
981 }
982
983 if (sci->cfg_gpio == NULL || sci->cfg_gpio(pdev)) {
984 dev_err(&pdev->dev, "Unable to config gpio\n");
985 ret = -EBUSY;
986 goto err2;
987 }
988
989 /* Setup clocks */
990 sdd->clk = clk_get(&pdev->dev, "spi");
991 if (IS_ERR(sdd->clk)) {
992 dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
993 ret = PTR_ERR(sdd->clk);
994 goto err3;
995 }
996
997 if (clk_enable(sdd->clk)) {
998 dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
999 ret = -EBUSY;
1000 goto err4;
1001 }
1002
1003 if (sci->src_clk_nr == S3C64XX_SPI_SRCCLK_PCLK)
1004 sci->src_clk = sdd->clk;
1005 else
1006 sci->src_clk = clk_get(&pdev->dev, sci->src_clk_name);
1007 if (IS_ERR(sci->src_clk)) {
1008 dev_err(&pdev->dev,
1009 "Unable to acquire clock '%s'\n", sci->src_clk_name);
1010 ret = PTR_ERR(sci->src_clk);
1011 goto err5;
1012 }
1013
1014 if (sci->src_clk != sdd->clk && clk_enable(sci->src_clk)) {
1015 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n",
1016 sci->src_clk_name);
1017 ret = -EBUSY;
1018 goto err6;
1019 }
1020
1021 sdd->workqueue = create_singlethread_workqueue(
1022 dev_name(master->dev.parent));
1023 if (sdd->workqueue == NULL) {
1024 dev_err(&pdev->dev, "Unable to create workqueue\n");
1025 ret = -ENOMEM;
1026 goto err7;
1027 }
1028
1029 /* Setup Deufult Mode */
1030 s3c64xx_spi_hwinit(sdd, pdev->id);
1031
1032 spin_lock_init(&sdd->lock);
1033 init_completion(&sdd->xfer_completion);
1034 INIT_WORK(&sdd->work, s3c64xx_spi_work);
1035 INIT_LIST_HEAD(&sdd->queue);
1036
1037 if (spi_register_master(master)) {
1038 dev_err(&pdev->dev, "cannot register SPI master\n");
1039 ret = -EBUSY;
1040 goto err8;
1041 }
1042
1043 dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d \
1044 with %d Slaves attached\n",
1045 pdev->id, master->num_chipselect);
1046 dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\
1047 \tDMA=[Rx-%d, Tx-%d]\n",
1048 mem_res->end, mem_res->start,
1049 sdd->rx_dmach, sdd->tx_dmach);
1050
1051 return 0;
1052
1053err8:
1054 destroy_workqueue(sdd->workqueue);
1055err7:
1056 if (sci->src_clk != sdd->clk)
1057 clk_disable(sci->src_clk);
1058err6:
1059 if (sci->src_clk != sdd->clk)
1060 clk_put(sci->src_clk);
1061err5:
1062 clk_disable(sdd->clk);
1063err4:
1064 clk_put(sdd->clk);
1065err3:
1066err2:
1067 iounmap((void *) sdd->regs);
1068err1:
1069 release_mem_region(mem_res->start, resource_size(mem_res));
1070err0:
1071 platform_set_drvdata(pdev, NULL);
1072 spi_master_put(master);
1073
1074 return ret;
1075}
1076
1077static int s3c64xx_spi_remove(struct platform_device *pdev)
1078{
1079 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1080 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1081 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
1082 struct resource *mem_res;
1083 unsigned long flags;
1084
1085 spin_lock_irqsave(&sdd->lock, flags);
1086 sdd->state |= SUSPND;
1087 spin_unlock_irqrestore(&sdd->lock, flags);
1088
1089 while (sdd->state & SPIBUSY)
1090 msleep(10);
1091
1092 spi_unregister_master(master);
1093
1094 destroy_workqueue(sdd->workqueue);
1095
1096 if (sci->src_clk != sdd->clk)
1097 clk_disable(sci->src_clk);
1098
1099 if (sci->src_clk != sdd->clk)
1100 clk_put(sci->src_clk);
1101
1102 clk_disable(sdd->clk);
1103 clk_put(sdd->clk);
1104
1105 iounmap((void *) sdd->regs);
1106
1107 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1108 release_mem_region(mem_res->start, resource_size(mem_res));
1109
1110 platform_set_drvdata(pdev, NULL);
1111 spi_master_put(master);
1112
1113 return 0;
1114}
1115
1116#ifdef CONFIG_PM
1117static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
1118{
1119 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1120 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1121 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
1122 struct s3c64xx_spi_csinfo *cs;
1123 unsigned long flags;
1124
1125 spin_lock_irqsave(&sdd->lock, flags);
1126 sdd->state |= SUSPND;
1127 spin_unlock_irqrestore(&sdd->lock, flags);
1128
1129 while (sdd->state & SPIBUSY)
1130 msleep(10);
1131
1132 /* Disable the clock */
1133 if (sci->src_clk != sdd->clk)
1134 clk_disable(sci->src_clk);
1135
1136 clk_disable(sdd->clk);
1137
1138 sdd->cur_speed = 0; /* Output Clock is stopped */
1139
1140 return 0;
1141}
1142
1143static int s3c64xx_spi_resume(struct platform_device *pdev)
1144{
1145 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1146 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1147 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
1148 unsigned long flags;
1149
1150 sci->cfg_gpio(pdev);
1151
1152 /* Enable the clock */
1153 if (sci->src_clk != sdd->clk)
1154 clk_enable(sci->src_clk);
1155
1156 clk_enable(sdd->clk);
1157
1158 s3c64xx_spi_hwinit(sdd, pdev->id);
1159
1160 spin_lock_irqsave(&sdd->lock, flags);
1161 sdd->state &= ~SUSPND;
1162 spin_unlock_irqrestore(&sdd->lock, flags);
1163
1164 return 0;
1165}
1166#else
1167#define s3c64xx_spi_suspend NULL
1168#define s3c64xx_spi_resume NULL
1169#endif /* CONFIG_PM */
1170
1171static struct platform_driver s3c64xx_spi_driver = {
1172 .driver = {
1173 .name = "s3c64xx-spi",
1174 .owner = THIS_MODULE,
1175 },
1176 .remove = s3c64xx_spi_remove,
1177 .suspend = s3c64xx_spi_suspend,
1178 .resume = s3c64xx_spi_resume,
1179};
1180MODULE_ALIAS("platform:s3c64xx-spi");
1181
1182static int __init s3c64xx_spi_init(void)
1183{
1184 return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
1185}
1186module_init(s3c64xx_spi_init);
1187
1188static void __exit s3c64xx_spi_exit(void)
1189{
1190 platform_driver_unregister(&s3c64xx_spi_driver);
1191}
1192module_exit(s3c64xx_spi_exit);
1193
1194MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1195MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
1196MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_sh_sci.c b/drivers/spi/spi_sh_sci.c
index 7d36720eb982..a65c12ffa733 100644
--- a/drivers/spi/spi_sh_sci.c
+++ b/drivers/spi/spi_sh_sci.c
@@ -148,7 +148,7 @@ static int sh_sci_spi_probe(struct platform_device *dev)
148 ret = -ENOENT; 148 ret = -ENOENT;
149 goto err1; 149 goto err1;
150 } 150 }
151 sp->membase = ioremap(r->start, r->end - r->start + 1); 151 sp->membase = ioremap(r->start, resource_size(r));
152 if (!sp->membase) { 152 if (!sp->membase) {
153 ret = -ENXIO; 153 ret = -ENXIO;
154 goto err1; 154 goto err1;
diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c
index 19f75627c3de..dfa024b633e1 100644
--- a/drivers/spi/spi_txx9.c
+++ b/drivers/spi/spi_txx9.c
@@ -375,12 +375,10 @@ static int __init txx9spi_probe(struct platform_device *dev)
375 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 375 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
376 if (!res) 376 if (!res)
377 goto exit_busy; 377 goto exit_busy;
378 if (!devm_request_mem_region(&dev->dev, 378 if (!devm_request_mem_region(&dev->dev, res->start, resource_size(res),
379 res->start, res->end - res->start + 1,
380 "spi_txx9")) 379 "spi_txx9"))
381 goto exit_busy; 380 goto exit_busy;
382 c->membase = devm_ioremap(&dev->dev, 381 c->membase = devm_ioremap(&dev->dev, res->start, resource_size(res));
383 res->start, res->end - res->start + 1);
384 if (!c->membase) 382 if (!c->membase)
385 goto exit_busy; 383 goto exit_busy;
386 384
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 9c446e6003d5..ea1bec3c9a13 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -53,7 +53,7 @@
53#define SPIDEV_MAJOR 153 /* assigned */ 53#define SPIDEV_MAJOR 153 /* assigned */
54#define N_SPI_MINORS 32 /* ... up to 256 */ 54#define N_SPI_MINORS 32 /* ... up to 256 */
55 55
56static unsigned long minors[N_SPI_MINORS / BITS_PER_LONG]; 56static DECLARE_BITMAP(minors, N_SPI_MINORS);
57 57
58 58
59/* Bit masks for spi_device.mode management. Note that incorrect 59/* Bit masks for spi_device.mode management. Note that incorrect
@@ -558,7 +558,7 @@ static struct class *spidev_class;
558 558
559/*-------------------------------------------------------------------------*/ 559/*-------------------------------------------------------------------------*/
560 560
561static int spidev_probe(struct spi_device *spi) 561static int __devinit spidev_probe(struct spi_device *spi)
562{ 562{
563 struct spidev_data *spidev; 563 struct spidev_data *spidev;
564 int status; 564 int status;
@@ -607,7 +607,7 @@ static int spidev_probe(struct spi_device *spi)
607 return status; 607 return status;
608} 608}
609 609
610static int spidev_remove(struct spi_device *spi) 610static int __devexit spidev_remove(struct spi_device *spi)
611{ 611{
612 struct spidev_data *spidev = spi_get_drvdata(spi); 612 struct spidev_data *spidev = spi_get_drvdata(spi);
613 613
@@ -629,7 +629,7 @@ static int spidev_remove(struct spi_device *spi)
629 return 0; 629 return 0;
630} 630}
631 631
632static struct spi_driver spidev_spi = { 632static struct spi_driver spidev_spi_driver = {
633 .driver = { 633 .driver = {
634 .name = "spidev", 634 .name = "spidev",
635 .owner = THIS_MODULE, 635 .owner = THIS_MODULE,
@@ -661,14 +661,14 @@ static int __init spidev_init(void)
661 661
662 spidev_class = class_create(THIS_MODULE, "spidev"); 662 spidev_class = class_create(THIS_MODULE, "spidev");
663 if (IS_ERR(spidev_class)) { 663 if (IS_ERR(spidev_class)) {
664 unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); 664 unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
665 return PTR_ERR(spidev_class); 665 return PTR_ERR(spidev_class);
666 } 666 }
667 667
668 status = spi_register_driver(&spidev_spi); 668 status = spi_register_driver(&spidev_spi_driver);
669 if (status < 0) { 669 if (status < 0) {
670 class_destroy(spidev_class); 670 class_destroy(spidev_class);
671 unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); 671 unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
672 } 672 }
673 return status; 673 return status;
674} 674}
@@ -676,9 +676,9 @@ module_init(spidev_init);
676 676
677static void __exit spidev_exit(void) 677static void __exit spidev_exit(void)
678{ 678{
679 spi_unregister_driver(&spidev_spi); 679 spi_unregister_driver(&spidev_spi_driver);
680 class_destroy(spidev_class); 680 class_destroy(spidev_class);
681 unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); 681 unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
682} 682}
683module_exit(spidev_exit); 683module_exit(spidev_exit);
684 684
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 093f57af32d3..94eb86319ff3 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -87,8 +87,6 @@ source "drivers/staging/frontier/Kconfig"
87 87
88source "drivers/staging/dream/Kconfig" 88source "drivers/staging/dream/Kconfig"
89 89
90source "drivers/staging/dst/Kconfig"
91
92source "drivers/staging/pohmelfs/Kconfig" 90source "drivers/staging/pohmelfs/Kconfig"
93 91
94source "drivers/staging/b3dfg/Kconfig" 92source "drivers/staging/b3dfg/Kconfig"
@@ -145,5 +143,7 @@ source "drivers/staging/wavelan/Kconfig"
145 143
146source "drivers/staging/netwave/Kconfig" 144source "drivers/staging/netwave/Kconfig"
147 145
146source "drivers/staging/sm7xx/Kconfig"
147
148endif # !STAGING_EXCLUDE_BUILD 148endif # !STAGING_EXCLUDE_BUILD
149endif # STAGING 149endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 069864f4391e..b5e67b889f60 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -26,7 +26,6 @@ obj-$(CONFIG_RTL8192E) += rtl8192e/
26obj-$(CONFIG_INPUT_MIMIO) += mimio/ 26obj-$(CONFIG_INPUT_MIMIO) += mimio/
27obj-$(CONFIG_TRANZPORT) += frontier/ 27obj-$(CONFIG_TRANZPORT) += frontier/
28obj-$(CONFIG_DREAM) += dream/ 28obj-$(CONFIG_DREAM) += dream/
29obj-$(CONFIG_DST) += dst/
30obj-$(CONFIG_POHMELFS) += pohmelfs/ 29obj-$(CONFIG_POHMELFS) += pohmelfs/
31obj-$(CONFIG_B3DFG) += b3dfg/ 30obj-$(CONFIG_B3DFG) += b3dfg/
32obj-$(CONFIG_IDE_PHISON) += phison/ 31obj-$(CONFIG_IDE_PHISON) += phison/
@@ -53,3 +52,4 @@ obj-$(CONFIG_ARLAN) += arlan/
53obj-$(CONFIG_WAVELAN) += wavelan/ 52obj-$(CONFIG_WAVELAN) += wavelan/
54obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan/ 53obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan/
55obj-$(CONFIG_PCMCIA_NETWAVE) += netwave/ 54obj-$(CONFIG_PCMCIA_NETWAVE) += netwave/
55obj-$(CONFIG_FB_SM7XX) += sm7xx/
diff --git a/drivers/staging/batman-adv/Kconfig b/drivers/staging/batman-adv/Kconfig
index 7632f5760060..1d74dabf9511 100644
--- a/drivers/staging/batman-adv/Kconfig
+++ b/drivers/staging/batman-adv/Kconfig
@@ -4,6 +4,7 @@
4 4
5config BATMAN_ADV 5config BATMAN_ADV
6 tristate "B.A.T.M.A.N. Advanced Meshing Protocol" 6 tristate "B.A.T.M.A.N. Advanced Meshing Protocol"
7 depends on PROC_FS && PACKET
7 default n 8 default n
8 ---help--- 9 ---help---
9 10
diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c
index d724798278d6..eb617508cca4 100644
--- a/drivers/staging/batman-adv/send.c
+++ b/drivers/staging/batman-adv/send.c
@@ -363,8 +363,10 @@ void add_bcast_packet_to_list(unsigned char *packet_buff, int packet_len)
363 return; 363 return;
364 364
365 forw_packet->packet_buff = kmalloc(packet_len, GFP_ATOMIC); 365 forw_packet->packet_buff = kmalloc(packet_len, GFP_ATOMIC);
366 if (!forw_packet->packet_buff) 366 if (!forw_packet->packet_buff) {
367 kfree(forw_packet);
367 return; 368 return;
369 }
368 370
369 forw_packet->packet_len = packet_len; 371 forw_packet->packet_len = packet_len;
370 memcpy(forw_packet->packet_buff, packet_buff, forw_packet->packet_len); 372 memcpy(forw_packet->packet_buff, packet_buff, forw_packet->packet_len);
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index ccc5cdc008c6..b559a9c2f857 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -451,7 +451,7 @@
451 451
452#define COMEDI_CB_EOS 1 /* end of scan */ 452#define COMEDI_CB_EOS 1 /* end of scan */
453#define COMEDI_CB_EOA 2 /* end of acquisition */ 453#define COMEDI_CB_EOA 2 /* end of acquisition */
454#define COMEDI_CB_BLOCK 4 /* DEPRECATED: convenient block size */ 454#define COMEDI_CB_BLOCK 4 /* data has arrived: wakes up read() / write() */
455#define COMEDI_CB_EOBUF 8 /* DEPRECATED: end of buffer */ 455#define COMEDI_CB_EOBUF 8 /* DEPRECATED: end of buffer */
456#define COMEDI_CB_ERROR 16 /* card error during acquisition */ 456#define COMEDI_CB_ERROR 16 /* card error during acquisition */
457#define COMEDI_CB_OVERFLOW 32 /* buffer overflow/underflow */ 457#define COMEDI_CB_OVERFLOW 32 /* buffer overflow/underflow */
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index 0d2c2eb23b23..bd397840dcba 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -849,8 +849,11 @@ static int jr3_pci_attach(struct comedi_device *dev,
849 } 849 }
850 850
851 devpriv->pci_enabled = 1; 851 devpriv->pci_enabled = 1;
852 devpriv->iobase = 852 devpriv->iobase = ioremap(pci_resource_start(card, 0),
853 ioremap(pci_resource_start(card, 0), sizeof(struct jr3_t)); 853 offsetof(struct jr3_t, channel[devpriv->n_channels]));
854 if (!devpriv->iobase)
855 return -ENOMEM;
856
854 result = alloc_subdevices(dev, devpriv->n_channels); 857 result = alloc_subdevices(dev, devpriv->n_channels);
855 if (result < 0) 858 if (result < 0)
856 goto out; 859 goto out;
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index 06c020466298..9a1b559c4b0d 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -1,4 +1,4 @@
1#define DRIVER_VERSION "v2.3" 1#define DRIVER_VERSION "v2.4"
2#define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com" 2#define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com"
3#define DRIVER_DESC "Stirling/ITL USB-DUX -- Bernd.Porr@f2s.com" 3#define DRIVER_DESC "Stirling/ITL USB-DUX -- Bernd.Porr@f2s.com"
4/* 4/*
@@ -81,6 +81,8 @@ sampling rate. If you sample two channels you get 4kHz and so on.
81 * 2.1: changed PWM API 81 * 2.1: changed PWM API
82 * 2.2: added firmware kernel request to fix an udev problem 82 * 2.2: added firmware kernel request to fix an udev problem
83 * 2.3: corrected a bug in bulk timeouts which were far too short 83 * 2.3: corrected a bug in bulk timeouts which were far too short
84 * 2.4: fixed a bug which causes the driver to hang when it ran out of data.
85 * Thanks to Jan-Matthias Braun and Ian to spot the bug and fix it.
84 * 86 *
85 */ 87 */
86 88
@@ -532,6 +534,7 @@ static void usbduxsub_ai_IsocIrq(struct urb *urb)
532 } 534 }
533 } 535 }
534 /* tell comedi that data is there */ 536 /* tell comedi that data is there */
537 s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
535 comedi_event(this_usbduxsub->comedidev, s); 538 comedi_event(this_usbduxsub->comedidev, s);
536} 539}
537 540
diff --git a/drivers/staging/dst/Kconfig b/drivers/staging/dst/Kconfig
deleted file mode 100644
index 448d342ac2a2..000000000000
--- a/drivers/staging/dst/Kconfig
+++ /dev/null
@@ -1,67 +0,0 @@
1config DST
2 tristate "Distributed storage"
3 depends on NET && CRYPTO && SYSFS && BLK_DEV
4 select CONNECTOR
5 ---help---
6 DST is a network block device storage, which can be used to organize
7 exported storage on the remote nodes into the local block device.
8
9 DST works on top of any network media and protocol; it is just a matter
10 of configuration utility to understand the correct addresses. The most
11 common example is TCP over IP, which allows to pass through firewalls and
12 create remote backup storage in a different datacenter. DST requires
13 single port to be enabled on the exporting node and outgoing connections
14 on the local node.
15
16 DST works with in-kernel client and server, which improves performance by
17 eliminating unneded data copies and by not depending on the version
18 of the external IO components. It requires userspace configuration utility
19 though.
20
21 DST uses transaction model, when each store has to be explicitly acked
22 from the remote node to be considered as successfully written. There
23 may be lots of in-flight transactions. When remote host does not ack
24 the transaction it will be resent predefined number of times with specified
25 timeouts between them. All those parameters are configurable. Transactions
26 are marked as failed after all resends complete unsuccessfully; having
27 long enough resend timeout and/or large number of resends allows not to
28 return error to the higher (FS usually) layer in case of short network
29 problems or remote node outages. In case of network RAID setup this means
30 that storage will not degrade until transactions are marked as failed, and
31 thus will not force checksum recalculation and data rebuild. In case of
32 connection failure DST will try to reconnect to the remote node automatically.
33 DST sends ping commands at idle time to detect if remote node is alive.
34
35 Because of transactional model it is possible to use zero-copy sending
36 without worry of data corruption (which in turn could be detected by the
37 strong checksums though).
38
39 DST may fully encrypt the data channel in case of untrusted channel and implement
40 strong checksum of the transferred data. It is possible to configure algorithms
41 and crypto keys; they should match on both sides of the network channel.
42 Crypto processing does not introduce noticeble performance overhead, since DST
43 uses configurable pool of threads to perform crypto processing.
44
45 DST utilizes memory pool model of all its transaction allocations (it is the
46 only additional allocation on the client) and server allocations (bio pools,
47 while pages are allocated from the slab).
48
49 At startup DST performs a simple negotiation with the export node to determine
50 access permissions and size of the exported storage. It can be extended if
51 new parameters should be autonegotiated.
52
53 DST carries block IO flags in the protocol, which allows to transparently implement
54 barriers and sync/flush operations. Those flags are used in the export node where
55 IO against the local storage is performed, which means that sync write will be sync
56 on the remote node too, which in turn improves data integrity and improved resistance
57 to errors and data corruption during power outages or storage damages.
58
59 Homepage: http://www.ioremap.net/projects/dst
60 Userspace configuration utility and the latest releases: http://www.ioremap.net/archive/dst/
61
62config DST_DEBUG
63 bool "DST debug"
64 depends on DST
65 ---help---
66 This option will enable HEAVY debugging of the DST.
67 Turn it on ONLY if you have to debug some really obscure problem.
diff --git a/drivers/staging/dst/Makefile b/drivers/staging/dst/Makefile
deleted file mode 100644
index 3a8b0cf9643e..000000000000
--- a/drivers/staging/dst/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
1obj-$(CONFIG_DST) += nst.o
2
3nst-y := dcore.o state.o export.o thread_pool.o crypto.o trans.o
diff --git a/drivers/staging/dst/crypto.c b/drivers/staging/dst/crypto.c
deleted file mode 100644
index 351295c97a4b..000000000000
--- a/drivers/staging/dst/crypto.c
+++ /dev/null
@@ -1,733 +0,0 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/bio.h>
17#include <linux/crypto.h>
18#include <linux/dst.h>
19#include <linux/kernel.h>
20#include <linux/scatterlist.h>
21#include <linux/slab.h>
22
23/*
24 * Tricky bastard, but IV can be more complex with time...
25 */
26static inline u64 dst_gen_iv(struct dst_trans *t)
27{
28 return t->gen;
29}
30
31/*
32 * Crypto machinery: hash/cipher support for the given crypto controls.
33 */
34static struct crypto_hash *dst_init_hash(struct dst_crypto_ctl *ctl, u8 *key)
35{
36 int err;
37 struct crypto_hash *hash;
38
39 hash = crypto_alloc_hash(ctl->hash_algo, 0, CRYPTO_ALG_ASYNC);
40 if (IS_ERR(hash)) {
41 err = PTR_ERR(hash);
42 dprintk("%s: failed to allocate hash '%s', err: %d.\n",
43 __func__, ctl->hash_algo, err);
44 goto err_out_exit;
45 }
46
47 ctl->crypto_attached_size = crypto_hash_digestsize(hash);
48
49 if (!ctl->hash_keysize)
50 return hash;
51
52 err = crypto_hash_setkey(hash, key, ctl->hash_keysize);
53 if (err) {
54 dprintk("%s: failed to set key for hash '%s', err: %d.\n",
55 __func__, ctl->hash_algo, err);
56 goto err_out_free;
57 }
58
59 return hash;
60
61err_out_free:
62 crypto_free_hash(hash);
63err_out_exit:
64 return ERR_PTR(err);
65}
66
67static struct crypto_ablkcipher *dst_init_cipher(struct dst_crypto_ctl *ctl,
68 u8 *key)
69{
70 int err = -EINVAL;
71 struct crypto_ablkcipher *cipher;
72
73 if (!ctl->cipher_keysize)
74 goto err_out_exit;
75
76 cipher = crypto_alloc_ablkcipher(ctl->cipher_algo, 0, 0);
77 if (IS_ERR(cipher)) {
78 err = PTR_ERR(cipher);
79 dprintk("%s: failed to allocate cipher '%s', err: %d.\n",
80 __func__, ctl->cipher_algo, err);
81 goto err_out_exit;
82 }
83
84 crypto_ablkcipher_clear_flags(cipher, ~0);
85
86 err = crypto_ablkcipher_setkey(cipher, key, ctl->cipher_keysize);
87 if (err) {
88 dprintk("%s: failed to set key for cipher '%s', err: %d.\n",
89 __func__, ctl->cipher_algo, err);
90 goto err_out_free;
91 }
92
93 return cipher;
94
95err_out_free:
96 crypto_free_ablkcipher(cipher);
97err_out_exit:
98 return ERR_PTR(err);
99}
100
101/*
102 * Crypto engine has a pool of pages to encrypt data into before sending
103 * it over the network. This pool is freed/allocated here.
104 */
105static void dst_crypto_pages_free(struct dst_crypto_engine *e)
106{
107 unsigned int i;
108
109 for (i = 0; i < e->page_num; ++i)
110 __free_page(e->pages[i]);
111 kfree(e->pages);
112}
113
114static int dst_crypto_pages_alloc(struct dst_crypto_engine *e, int num)
115{
116 int i;
117
118 e->pages = kmalloc(num * sizeof(struct page **), GFP_KERNEL);
119 if (!e->pages)
120 return -ENOMEM;
121
122 for (i = 0; i < num; ++i) {
123 e->pages[i] = alloc_page(GFP_KERNEL);
124 if (!e->pages[i])
125 goto err_out_free_pages;
126 }
127
128 e->page_num = num;
129 return 0;
130
131err_out_free_pages:
132 while (--i >= 0)
133 __free_page(e->pages[i]);
134
135 kfree(e->pages);
136 return -ENOMEM;
137}
138
139/*
140 * Initialize crypto engine for given node.
141 * Setup cipher/hash, keys, pool of threads and private data.
142 */
143static int dst_crypto_engine_init(struct dst_crypto_engine *e,
144 struct dst_node *n)
145{
146 int err;
147 struct dst_crypto_ctl *ctl = &n->crypto;
148
149 err = dst_crypto_pages_alloc(e, n->max_pages);
150 if (err)
151 goto err_out_exit;
152
153 e->size = PAGE_SIZE;
154 e->data = kmalloc(e->size, GFP_KERNEL);
155 if (!e->data) {
156 err = -ENOMEM;
157 goto err_out_free_pages;
158 }
159
160 if (ctl->hash_algo[0]) {
161 e->hash = dst_init_hash(ctl, n->hash_key);
162 if (IS_ERR(e->hash)) {
163 err = PTR_ERR(e->hash);
164 e->hash = NULL;
165 goto err_out_free;
166 }
167 }
168
169 if (ctl->cipher_algo[0]) {
170 e->cipher = dst_init_cipher(ctl, n->cipher_key);
171 if (IS_ERR(e->cipher)) {
172 err = PTR_ERR(e->cipher);
173 e->cipher = NULL;
174 goto err_out_free_hash;
175 }
176 }
177
178 return 0;
179
180err_out_free_hash:
181 crypto_free_hash(e->hash);
182err_out_free:
183 kfree(e->data);
184err_out_free_pages:
185 dst_crypto_pages_free(e);
186err_out_exit:
187 return err;
188}
189
190static void dst_crypto_engine_exit(struct dst_crypto_engine *e)
191{
192 if (e->hash)
193 crypto_free_hash(e->hash);
194 if (e->cipher)
195 crypto_free_ablkcipher(e->cipher);
196 dst_crypto_pages_free(e);
197 kfree(e->data);
198}
199
200/*
201 * Waiting for cipher processing to be completed.
202 */
203struct dst_crypto_completion {
204 struct completion complete;
205 int error;
206};
207
208static void dst_crypto_complete(struct crypto_async_request *req, int err)
209{
210 struct dst_crypto_completion *c = req->data;
211
212 if (err == -EINPROGRESS)
213 return;
214
215 dprintk("%s: req: %p, err: %d.\n", __func__, req, err);
216 c->error = err;
217 complete(&c->complete);
218}
219
220static int dst_crypto_process(struct ablkcipher_request *req,
221 struct scatterlist *sg_dst, struct scatterlist *sg_src,
222 void *iv, int enc, unsigned long timeout)
223{
224 struct dst_crypto_completion c;
225 int err;
226
227 init_completion(&c.complete);
228 c.error = -EINPROGRESS;
229
230 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
231 dst_crypto_complete, &c);
232
233 ablkcipher_request_set_crypt(req, sg_src, sg_dst, sg_src->length, iv);
234
235 if (enc)
236 err = crypto_ablkcipher_encrypt(req);
237 else
238 err = crypto_ablkcipher_decrypt(req);
239
240 switch (err) {
241 case -EINPROGRESS:
242 case -EBUSY:
243 err = wait_for_completion_interruptible_timeout(&c.complete,
244 timeout);
245 if (!err)
246 err = -ETIMEDOUT;
247 else
248 err = c.error;
249 break;
250 default:
251 break;
252 }
253
254 return err;
255}
256
257/*
258 * DST uses generic iteration approach for data crypto processing.
259 * Single block IO request is switched into array of scatterlists,
260 * which are submitted to the crypto processing iterator.
261 *
262 * Input and output iterator initialization are different, since
263 * in output case we can not encrypt data in-place and need a
264 * temporary storage, which is then being sent to the remote peer.
265 */
266static int dst_trans_iter_out(struct bio *bio, struct dst_crypto_engine *e,
267 int (*iterator) (struct dst_crypto_engine *e,
268 struct scatterlist *dst,
269 struct scatterlist *src))
270{
271 struct bio_vec *bv;
272 int err, i;
273
274 sg_init_table(e->src, bio->bi_vcnt);
275 sg_init_table(e->dst, bio->bi_vcnt);
276
277 bio_for_each_segment(bv, bio, i) {
278 sg_set_page(&e->src[i], bv->bv_page, bv->bv_len, bv->bv_offset);
279 sg_set_page(&e->dst[i], e->pages[i], bv->bv_len, bv->bv_offset);
280
281 err = iterator(e, &e->dst[i], &e->src[i]);
282 if (err)
283 return err;
284 }
285
286 return 0;
287}
288
289static int dst_trans_iter_in(struct bio *bio, struct dst_crypto_engine *e,
290 int (*iterator) (struct dst_crypto_engine *e,
291 struct scatterlist *dst,
292 struct scatterlist *src))
293{
294 struct bio_vec *bv;
295 int err, i;
296
297 sg_init_table(e->src, bio->bi_vcnt);
298 sg_init_table(e->dst, bio->bi_vcnt);
299
300 bio_for_each_segment(bv, bio, i) {
301 sg_set_page(&e->src[i], bv->bv_page, bv->bv_len, bv->bv_offset);
302 sg_set_page(&e->dst[i], bv->bv_page, bv->bv_len, bv->bv_offset);
303
304 err = iterator(e, &e->dst[i], &e->src[i]);
305 if (err)
306 return err;
307 }
308
309 return 0;
310}
311
312static int dst_crypt_iterator(struct dst_crypto_engine *e,
313 struct scatterlist *sg_dst, struct scatterlist *sg_src)
314{
315 struct ablkcipher_request *req = e->data;
316 u8 iv[32];
317
318 memset(iv, 0, sizeof(iv));
319
320 memcpy(iv, &e->iv, sizeof(e->iv));
321
322 return dst_crypto_process(req, sg_dst, sg_src, iv, e->enc, e->timeout);
323}
324
325static int dst_crypt(struct dst_crypto_engine *e, struct bio *bio)
326{
327 struct ablkcipher_request *req = e->data;
328
329 memset(req, 0, sizeof(struct ablkcipher_request));
330 ablkcipher_request_set_tfm(req, e->cipher);
331
332 if (e->enc)
333 return dst_trans_iter_out(bio, e, dst_crypt_iterator);
334 else
335 return dst_trans_iter_in(bio, e, dst_crypt_iterator);
336}
337
338static int dst_hash_iterator(struct dst_crypto_engine *e,
339 struct scatterlist *sg_dst, struct scatterlist *sg_src)
340{
341 return crypto_hash_update(e->data, sg_src, sg_src->length);
342}
343
344static int dst_hash(struct dst_crypto_engine *e, struct bio *bio, void *dst)
345{
346 struct hash_desc *desc = e->data;
347 int err;
348
349 desc->tfm = e->hash;
350 desc->flags = 0;
351
352 err = crypto_hash_init(desc);
353 if (err)
354 return err;
355
356 err = dst_trans_iter_in(bio, e, dst_hash_iterator);
357 if (err)
358 return err;
359
360 err = crypto_hash_final(desc, dst);
361 if (err)
362 return err;
363
364 return 0;
365}
366
367/*
368 * Initialize/cleanup a crypto thread. The only thing it should
369 * do is to allocate a pool of pages as temporary storage.
370 * And to setup cipher and/or hash.
371 */
372static void *dst_crypto_thread_init(void *data)
373{
374 struct dst_node *n = data;
375 struct dst_crypto_engine *e;
376 int err = -ENOMEM;
377
378 e = kzalloc(sizeof(struct dst_crypto_engine), GFP_KERNEL);
379 if (!e)
380 goto err_out_exit;
381 e->src = kcalloc(2 * n->max_pages, sizeof(struct scatterlist),
382 GFP_KERNEL);
383 if (!e->src)
384 goto err_out_free;
385
386 e->dst = e->src + n->max_pages;
387
388 err = dst_crypto_engine_init(e, n);
389 if (err)
390 goto err_out_free_all;
391
392 return e;
393
394err_out_free_all:
395 kfree(e->src);
396err_out_free:
397 kfree(e);
398err_out_exit:
399 return ERR_PTR(err);
400}
401
402static void dst_crypto_thread_cleanup(void *private)
403{
404 struct dst_crypto_engine *e = private;
405
406 dst_crypto_engine_exit(e);
407 kfree(e->src);
408 kfree(e);
409}
410
411/*
412 * Initialize crypto engine for given node: store keys, create pool
413 * of threads, initialize each one.
414 *
415 * Each thread has unique ID, but 0 and 1 are reserved for receiving and
416 * accepting threads (if export node), so IDs could start from 2, but starting
417 * them from 10 allows easily understand what this thread is for.
418 */
419int dst_node_crypto_init(struct dst_node *n, struct dst_crypto_ctl *ctl)
420{
421 void *key = (ctl + 1);
422 int err = -ENOMEM, i;
423 char name[32];
424
425 if (ctl->hash_keysize) {
426 n->hash_key = kmalloc(ctl->hash_keysize, GFP_KERNEL);
427 if (!n->hash_key)
428 goto err_out_exit;
429 memcpy(n->hash_key, key, ctl->hash_keysize);
430 }
431
432 if (ctl->cipher_keysize) {
433 n->cipher_key = kmalloc(ctl->cipher_keysize, GFP_KERNEL);
434 if (!n->cipher_key)
435 goto err_out_free_hash;
436 memcpy(n->cipher_key, key, ctl->cipher_keysize);
437 }
438 memcpy(&n->crypto, ctl, sizeof(struct dst_crypto_ctl));
439
440 for (i = 0; i < ctl->thread_num; ++i) {
441 snprintf(name, sizeof(name), "%s-crypto-%d", n->name, i);
442 /* Unique ids... */
443 err = thread_pool_add_worker(n->pool, name, i + 10,
444 dst_crypto_thread_init, dst_crypto_thread_cleanup, n);
445 if (err)
446 goto err_out_free_threads;
447 }
448
449 return 0;
450
451err_out_free_threads:
452 while (--i >= 0)
453 thread_pool_del_worker_id(n->pool, i+10);
454
455 if (ctl->cipher_keysize)
456 kfree(n->cipher_key);
457 ctl->cipher_keysize = 0;
458err_out_free_hash:
459 if (ctl->hash_keysize)
460 kfree(n->hash_key);
461 ctl->hash_keysize = 0;
462err_out_exit:
463 return err;
464}
465
466void dst_node_crypto_exit(struct dst_node *n)
467{
468 struct dst_crypto_ctl *ctl = &n->crypto;
469
470 if (ctl->cipher_algo[0] || ctl->hash_algo[0]) {
471 kfree(n->hash_key);
472 kfree(n->cipher_key);
473 }
474}
475
476/*
477 * Thrad pool setup callback. Just stores a transaction in private data.
478 */
479static int dst_trans_crypto_setup(void *crypto_engine, void *trans)
480{
481 struct dst_crypto_engine *e = crypto_engine;
482
483 e->private = trans;
484 return 0;
485}
486
487#if 0
488static void dst_dump_bio(struct bio *bio)
489{
490 u8 *p;
491 struct bio_vec *bv;
492 int i;
493
494 bio_for_each_segment(bv, bio, i) {
495 dprintk("%s: %llu/%u: size: %u, offset: %u, data: ",
496 __func__, bio->bi_sector, bio->bi_size,
497 bv->bv_len, bv->bv_offset);
498
499 p = kmap(bv->bv_page) + bv->bv_offset;
500 for (i = 0; i < bv->bv_len; ++i)
501 printk(KERN_DEBUG "%02x ", p[i]);
502 kunmap(bv->bv_page);
503 printk("\n");
504 }
505}
506#endif
507
508/*
509 * Encrypt/hash data and send it to the network.
510 */
511static int dst_crypto_process_sending(struct dst_crypto_engine *e,
512 struct bio *bio, u8 *hash)
513{
514 int err;
515
516 if (e->cipher) {
517 err = dst_crypt(e, bio);
518 if (err)
519 goto err_out_exit;
520 }
521
522 if (e->hash) {
523 err = dst_hash(e, bio, hash);
524 if (err)
525 goto err_out_exit;
526
527#ifdef CONFIG_DST_DEBUG
528 {
529 unsigned int i;
530
531 /* dst_dump_bio(bio); */
532
533 printk(KERN_DEBUG "%s: bio: %llu/%u, rw: %lu, hash: ",
534 __func__, (u64)bio->bi_sector,
535 bio->bi_size, bio_data_dir(bio));
536 for (i = 0; i < crypto_hash_digestsize(e->hash); ++i)
537 printk("%02x ", hash[i]);
538 printk("\n");
539 }
540#endif
541 }
542
543 return 0;
544
545err_out_exit:
546 return err;
547}
548
549/*
550 * Check if received data is valid. Decipher if it is.
551 */
552static int dst_crypto_process_receiving(struct dst_crypto_engine *e,
553 struct bio *bio, u8 *hash, u8 *recv_hash)
554{
555 int err;
556
557 if (e->hash) {
558 int mismatch;
559
560 err = dst_hash(e, bio, hash);
561 if (err)
562 goto err_out_exit;
563
564 mismatch = !!memcmp(recv_hash, hash,
565 crypto_hash_digestsize(e->hash));
566#ifdef CONFIG_DST_DEBUG
567 /* dst_dump_bio(bio); */
568
569 printk(KERN_DEBUG "%s: bio: %llu/%u, rw: %lu, hash mismatch: %d",
570 __func__, (u64)bio->bi_sector, bio->bi_size,
571 bio_data_dir(bio), mismatch);
572 if (mismatch) {
573 unsigned int i;
574
575 printk(", recv/calc: ");
576 for (i = 0; i < crypto_hash_digestsize(e->hash); ++i)
577 printk("%02x/%02x ", recv_hash[i], hash[i]);
578
579 }
580 printk("\n");
581#endif
582 err = -1;
583 if (mismatch)
584 goto err_out_exit;
585 }
586
587 if (e->cipher) {
588 err = dst_crypt(e, bio);
589 if (err)
590 goto err_out_exit;
591 }
592
593 return 0;
594
595err_out_exit:
596 return err;
597}
598
599/*
600 * Thread pool callback to encrypt data and send it to the netowork.
601 */
602static int dst_trans_crypto_action(void *crypto_engine, void *schedule_data)
603{
604 struct dst_crypto_engine *e = crypto_engine;
605 struct dst_trans *t = schedule_data;
606 struct bio *bio = t->bio;
607 int err;
608
609 dprintk("%s: t: %p, gen: %llu, cipher: %p, hash: %p.\n",
610 __func__, t, t->gen, e->cipher, e->hash);
611
612 e->enc = t->enc;
613 e->iv = dst_gen_iv(t);
614
615 if (bio_data_dir(bio) == WRITE) {
616 err = dst_crypto_process_sending(e, bio, t->cmd.hash);
617 if (err)
618 goto err_out_exit;
619
620 if (e->hash) {
621 t->cmd.csize = crypto_hash_digestsize(e->hash);
622 t->cmd.size += t->cmd.csize;
623 }
624
625 return dst_trans_send(t);
626 } else {
627 u8 *hash = e->data + e->size/2;
628
629 err = dst_crypto_process_receiving(e, bio, hash, t->cmd.hash);
630 if (err)
631 goto err_out_exit;
632
633 dst_trans_remove(t);
634 dst_trans_put(t);
635 }
636
637 return 0;
638
639err_out_exit:
640 t->error = err;
641 dst_trans_put(t);
642 return err;
643}
644
645/*
646 * Schedule crypto processing for given transaction.
647 */
648int dst_trans_crypto(struct dst_trans *t)
649{
650 struct dst_node *n = t->n;
651 int err;
652
653 err = thread_pool_schedule(n->pool,
654 dst_trans_crypto_setup, dst_trans_crypto_action,
655 t, MAX_SCHEDULE_TIMEOUT);
656 if (err)
657 goto err_out_exit;
658
659 return 0;
660
661err_out_exit:
662 dst_trans_put(t);
663 return err;
664}
665
666/*
667 * Crypto machinery for the export node.
668 */
669static int dst_export_crypto_setup(void *crypto_engine, void *bio)
670{
671 struct dst_crypto_engine *e = crypto_engine;
672
673 e->private = bio;
674 return 0;
675}
676
677static int dst_export_crypto_action(void *crypto_engine, void *schedule_data)
678{
679 struct dst_crypto_engine *e = crypto_engine;
680 struct bio *bio = schedule_data;
681 struct dst_export_priv *p = bio->bi_private;
682 int err;
683
684 dprintk("%s: e: %p, data: %p, bio: %llu/%u, dir: %lu.\n",
685 __func__, e, e->data, (u64)bio->bi_sector,
686 bio->bi_size, bio_data_dir(bio));
687
688 e->enc = (bio_data_dir(bio) == READ);
689 e->iv = p->cmd.id;
690
691 if (bio_data_dir(bio) == WRITE) {
692 u8 *hash = e->data + e->size/2;
693
694 err = dst_crypto_process_receiving(e, bio, hash, p->cmd.hash);
695 if (err)
696 goto err_out_exit;
697
698 generic_make_request(bio);
699 } else {
700 err = dst_crypto_process_sending(e, bio, p->cmd.hash);
701 if (err)
702 goto err_out_exit;
703
704 if (e->hash) {
705 p->cmd.csize = crypto_hash_digestsize(e->hash);
706 p->cmd.size += p->cmd.csize;
707 }
708
709 err = dst_export_send_bio(bio);
710 }
711 return 0;
712
713err_out_exit:
714 bio_put(bio);
715 return err;
716}
717
718int dst_export_crypto(struct dst_node *n, struct bio *bio)
719{
720 int err;
721
722 err = thread_pool_schedule(n->pool,
723 dst_export_crypto_setup, dst_export_crypto_action,
724 bio, MAX_SCHEDULE_TIMEOUT);
725 if (err)
726 goto err_out_exit;
727
728 return 0;
729
730err_out_exit:
731 bio_put(bio);
732 return err;
733}
diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
deleted file mode 100644
index c83ca7e3d048..000000000000
--- a/drivers/staging/dst/dcore.c
+++ /dev/null
@@ -1,968 +0,0 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/blkdev.h>
19#include <linux/bio.h>
20#include <linux/buffer_head.h>
21#include <linux/connector.h>
22#include <linux/dst.h>
23#include <linux/device.h>
24#include <linux/jhash.h>
25#include <linux/idr.h>
26#include <linux/init.h>
27#include <linux/namei.h>
28#include <linux/slab.h>
29#include <linux/socket.h>
30
31#include <linux/in.h>
32#include <linux/in6.h>
33
34#include <net/sock.h>
35
36static int dst_major;
37
38static DEFINE_MUTEX(dst_hash_lock);
39static struct list_head *dst_hashtable;
40static unsigned int dst_hashtable_size = 128;
41module_param(dst_hashtable_size, uint, 0644);
42
43static char dst_name[] = "Dementianting goldfish";
44
45static DEFINE_IDR(dst_index_idr);
46static struct cb_id cn_dst_id = { CN_DST_IDX, CN_DST_VAL };
47
48/*
49 * DST sysfs tree for device called 'storage':
50 *
51 * /sys/bus/dst/devices/storage/
52 * /sys/bus/dst/devices/storage/type : 192.168.4.80:1025
53 * /sys/bus/dst/devices/storage/size : 800
54 * /sys/bus/dst/devices/storage/name : storage
55 */
56
57static int dst_dev_match(struct device *dev, struct device_driver *drv)
58{
59 return 1;
60}
61
62static struct bus_type dst_dev_bus_type = {
63 .name = "dst",
64 .match = &dst_dev_match,
65};
66
67static void dst_node_release(struct device *dev)
68{
69 struct dst_info *info = container_of(dev, struct dst_info, device);
70
71 kfree(info);
72}
73
74static struct device dst_node_dev = {
75 .bus = &dst_dev_bus_type,
76 .release = &dst_node_release
77};
78
79/*
80 * Setting size of the node after it was changed.
81 */
82static void dst_node_set_size(struct dst_node *n)
83{
84 struct block_device *bdev;
85
86 set_capacity(n->disk, n->size >> 9);
87
88 bdev = bdget_disk(n->disk, 0);
89 if (bdev) {
90 mutex_lock(&bdev->bd_inode->i_mutex);
91 i_size_write(bdev->bd_inode, n->size);
92 mutex_unlock(&bdev->bd_inode->i_mutex);
93 bdput(bdev);
94 }
95}
96
97/*
98 * Distributed storage request processing function.
99 */
100static int dst_request(struct request_queue *q, struct bio *bio)
101{
102 struct dst_node *n = q->queuedata;
103 int err = -EIO;
104
105 if (bio_empty_barrier(bio) && !blk_queue_discard(q)) {
106 /*
107 * This is a dirty^Wnice hack, but if we complete this
108 * operation with -EOPNOTSUPP like intended, XFS
109 * will stuck and freeze the machine. This may be
110 * not particulary XFS problem though, but it is the
111 * only FS which sends empty barrier at umount time
112 * I worked with.
113 *
114 * Empty barriers are not allowed anyway, see 51fd77bd9f512
115 * for example, although later it was changed to
116 * bio_rw_flagged(bio, BIO_RW_DISCARD) only, which does not
117 * work in this case.
118 */
119 /* err = -EOPNOTSUPP; */
120 err = 0;
121 goto end_io;
122 }
123
124 bio_get(bio);
125
126 return dst_process_bio(n, bio);
127
128end_io:
129 bio_endio(bio, err);
130 return err;
131}
132
133/*
134 * Open/close callbacks for appropriate block device.
135 */
136static int dst_bdev_open(struct block_device *bdev, fmode_t mode)
137{
138 struct dst_node *n = bdev->bd_disk->private_data;
139
140 dst_node_get(n);
141 return 0;
142}
143
144static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
145{
146 struct dst_node *n = disk->private_data;
147
148 dst_node_put(n);
149 return 0;
150}
151
152static struct block_device_operations dst_blk_ops = {
153 .open = dst_bdev_open,
154 .release = dst_bdev_release,
155 .owner = THIS_MODULE,
156};
157
158/*
159 * Block layer binding - disk is created when array is fully configured
160 * by userspace request.
161 */
162static int dst_node_create_disk(struct dst_node *n)
163{
164 int err = -ENOMEM;
165 u32 index = 0;
166
167 n->queue = blk_init_queue(NULL, NULL);
168 if (!n->queue)
169 goto err_out_exit;
170
171 n->queue->queuedata = n;
172 blk_queue_make_request(n->queue, dst_request);
173 blk_queue_max_phys_segments(n->queue, n->max_pages);
174 blk_queue_max_hw_segments(n->queue, n->max_pages);
175
176 err = -ENOMEM;
177 n->disk = alloc_disk(1);
178 if (!n->disk)
179 goto err_out_free_queue;
180
181 if (!(n->state->permissions & DST_PERM_WRITE)) {
182 printk(KERN_INFO "DST node %s attached read-only.\n", n->name);
183 set_disk_ro(n->disk, 1);
184 }
185
186 if (!idr_pre_get(&dst_index_idr, GFP_KERNEL))
187 goto err_out_put;
188
189 mutex_lock(&dst_hash_lock);
190 err = idr_get_new(&dst_index_idr, NULL, &index);
191 mutex_unlock(&dst_hash_lock);
192 if (err)
193 goto err_out_put;
194
195 n->disk->major = dst_major;
196 n->disk->first_minor = index;
197 n->disk->fops = &dst_blk_ops;
198 n->disk->queue = n->queue;
199 n->disk->private_data = n;
200 snprintf(n->disk->disk_name, sizeof(n->disk->disk_name),
201 "dst-%s", n->name);
202
203 return 0;
204
205err_out_put:
206 put_disk(n->disk);
207err_out_free_queue:
208 blk_cleanup_queue(n->queue);
209err_out_exit:
210 return err;
211}
212
213/*
214 * Sysfs machinery: show device's size.
215 */
216static ssize_t dst_show_size(struct device *dev,
217 struct device_attribute *attr, char *buf)
218{
219 struct dst_info *info = container_of(dev, struct dst_info, device);
220
221 return sprintf(buf, "%llu\n", info->size);
222}
223
224/*
225 * Show local exported device.
226 */
227static ssize_t dst_show_local(struct device *dev,
228 struct device_attribute *attr, char *buf)
229{
230 struct dst_info *info = container_of(dev, struct dst_info, device);
231
232 return sprintf(buf, "%s\n", info->local);
233}
234
235/*
236 * Shows type of the remote node - device major/minor number
237 * for local nodes and address (af_inet ipv4/ipv6 only) for remote nodes.
238 */
239static ssize_t dst_show_type(struct device *dev,
240 struct device_attribute *attr, char *buf)
241{
242 struct dst_info *info = container_of(dev, struct dst_info, device);
243 int family = info->net.addr.sa_family;
244
245 if (family == AF_INET) {
246 struct sockaddr_in *sin = (struct sockaddr_in *)&info->net.addr;
247 return sprintf(buf, "%u.%u.%u.%u:%d\n",
248 NIPQUAD(sin->sin_addr.s_addr), ntohs(sin->sin_port));
249 } else if (family == AF_INET6) {
250 struct sockaddr_in6 *sin = (struct sockaddr_in6 *)
251 &info->net.addr;
252 return sprintf(buf,
253 "%pi6:%d\n",
254 &sin->sin6_addr, ntohs(sin->sin6_port));
255 } else {
256 int i, sz = PAGE_SIZE - 2; /* 0 symbol and '\n' below */
257 int size, addrlen = info->net.addr.sa_data_len;
258 unsigned char *a = (unsigned char *)&info->net.addr.sa_data;
259 char *buf_orig = buf;
260
261 size = snprintf(buf, sz, "family: %d, addrlen: %u, addr: ",
262 family, addrlen);
263 sz -= size;
264 buf += size;
265
266 for (i = 0; i < addrlen; ++i) {
267 if (sz < 3)
268 break;
269
270 size = snprintf(buf, sz, "%02x ", a[i]);
271 sz -= size;
272 buf += size;
273 }
274 buf += sprintf(buf, "\n");
275
276 return buf - buf_orig;
277 }
278 return 0;
279}
280
281static struct device_attribute dst_node_attrs[] = {
282 __ATTR(size, 0444, dst_show_size, NULL),
283 __ATTR(type, 0444, dst_show_type, NULL),
284 __ATTR(local, 0444, dst_show_local, NULL),
285};
286
287static int dst_create_node_attributes(struct dst_node *n)
288{
289 int err, i;
290
291 for (i = 0; i < ARRAY_SIZE(dst_node_attrs); ++i) {
292 err = device_create_file(&n->info->device,
293 &dst_node_attrs[i]);
294 if (err)
295 goto err_out_remove_all;
296 }
297 return 0;
298
299err_out_remove_all:
300 while (--i >= 0)
301 device_remove_file(&n->info->device,
302 &dst_node_attrs[i]);
303
304 return err;
305}
306
307static void dst_remove_node_attributes(struct dst_node *n)
308{
309 int i;
310
311 for (i = 0; i < ARRAY_SIZE(dst_node_attrs); ++i)
312 device_remove_file(&n->info->device,
313 &dst_node_attrs[i]);
314}
315
316/*
317 * Sysfs cleanup and initialization.
318 * Shows number of useful parameters.
319 */
320static void dst_node_sysfs_exit(struct dst_node *n)
321{
322 if (n->info) {
323 dst_remove_node_attributes(n);
324 device_unregister(&n->info->device);
325 n->info = NULL;
326 }
327}
328
329static int dst_node_sysfs_init(struct dst_node *n)
330{
331 int err;
332
333 n->info = kzalloc(sizeof(struct dst_info), GFP_KERNEL);
334 if (!n->info)
335 return -ENOMEM;
336
337 memcpy(&n->info->device, &dst_node_dev, sizeof(struct device));
338 n->info->size = n->size;
339
340 dev_set_name(&n->info->device, "dst-%s", n->name);
341 err = device_register(&n->info->device);
342 if (err) {
343 dprintk(KERN_ERR "Failed to register node '%s', err: %d.\n",
344 n->name, err);
345 goto err_out_exit;
346 }
347
348 dst_create_node_attributes(n);
349
350 return 0;
351
352err_out_exit:
353 kfree(n->info);
354 n->info = NULL;
355 return err;
356}
357
358/*
359 * DST node hash tables machinery.
360 */
361static inline unsigned int dst_hash(char *str, unsigned int size)
362{
363 return jhash(str, size, 0) % dst_hashtable_size;
364}
365
366static void dst_node_remove(struct dst_node *n)
367{
368 mutex_lock(&dst_hash_lock);
369 list_del_init(&n->node_entry);
370 mutex_unlock(&dst_hash_lock);
371}
372
373static void dst_node_add(struct dst_node *n)
374{
375 unsigned hash = dst_hash(n->name, sizeof(n->name));
376
377 mutex_lock(&dst_hash_lock);
378 list_add_tail(&n->node_entry, &dst_hashtable[hash]);
379 mutex_unlock(&dst_hash_lock);
380}
381
382/*
383 * Cleaning node when it is about to be freed.
384 * There are still users of the socket though,
385 * so connection cleanup should be protected.
386 */
387static void dst_node_cleanup(struct dst_node *n)
388{
389 struct dst_state *st = n->state;
390
391 if (!st)
392 return;
393
394 if (n->queue) {
395 blk_cleanup_queue(n->queue);
396
397 mutex_lock(&dst_hash_lock);
398 idr_remove(&dst_index_idr, n->disk->first_minor);
399 mutex_unlock(&dst_hash_lock);
400
401 put_disk(n->disk);
402 }
403
404 if (n->bdev) {
405 sync_blockdev(n->bdev);
406 close_bdev_exclusive(n->bdev, FMODE_READ|FMODE_WRITE);
407 }
408
409 dst_state_lock(st);
410 st->need_exit = 1;
411 dst_state_exit_connected(st);
412 dst_state_unlock(st);
413
414 wake_up(&st->thread_wait);
415
416 dst_state_put(st);
417 n->state = NULL;
418}
419
420/*
421 * Free security attributes attached to given node.
422 */
423static void dst_security_exit(struct dst_node *n)
424{
425 struct dst_secure *s, *tmp;
426
427 list_for_each_entry_safe(s, tmp, &n->security_list, sec_entry) {
428 list_del(&s->sec_entry);
429 kfree(s);
430 }
431}
432
433/*
434 * Free node when there are no more users.
435 * Actually node has to be freed on behalf od userspace process,
436 * since there are number of threads, which are embedded in the
437 * node, so they can not exit and free node from there, that is
438 * why there is a wakeup if reference counter is not equal to zero.
439 */
440void dst_node_put(struct dst_node *n)
441{
442 if (unlikely(!n))
443 return;
444
445 dprintk("%s: n: %p, refcnt: %d.\n",
446 __func__, n, atomic_read(&n->refcnt));
447
448 if (atomic_dec_and_test(&n->refcnt)) {
449 dst_node_remove(n);
450 n->trans_scan_timeout = 0;
451 dst_node_cleanup(n);
452 thread_pool_destroy(n->pool);
453 dst_node_sysfs_exit(n);
454 dst_node_crypto_exit(n);
455 dst_security_exit(n);
456 dst_node_trans_exit(n);
457
458 kfree(n);
459
460 dprintk("%s: freed n: %p.\n", __func__, n);
461 } else {
462 wake_up(&n->wait);
463 }
464}
465
466/*
467 * Setting up export device: lookup by the name, get its size
468 * and setup listening socket, which will accept clients, which
469 * will submit IO for given storage.
470 */
471static int dst_setup_export(struct dst_node *n, struct dst_ctl *ctl,
472 struct dst_export_ctl *le)
473{
474 int err;
475
476 snprintf(n->info->local, sizeof(n->info->local), "%s", le->device);
477
478 n->bdev = open_bdev_exclusive(le->device, FMODE_READ|FMODE_WRITE, NULL);
479 if (IS_ERR(n->bdev))
480 return PTR_ERR(n->bdev);
481
482 if (n->size != 0)
483 n->size = min_t(loff_t, n->bdev->bd_inode->i_size, n->size);
484 else
485 n->size = n->bdev->bd_inode->i_size;
486
487 n->info->size = n->size;
488 err = dst_node_init_listened(n, le);
489 if (err)
490 goto err_out_cleanup;
491
492 return 0;
493
494err_out_cleanup:
495 close_bdev_exclusive(n->bdev, FMODE_READ|FMODE_WRITE);
496 n->bdev = NULL;
497
498 return err;
499}
500
501/* Empty thread pool callbacks for the network processing threads. */
502static inline void *dst_thread_network_init(void *data)
503{
504 dprintk("%s: data: %p.\n", __func__, data);
505 return data;
506}
507
508static inline void dst_thread_network_cleanup(void *data)
509{
510 dprintk("%s: data: %p.\n", __func__, data);
511}
512
513/*
514 * Allocate DST node and initialize some of its parameters.
515 */
516static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
517 int (*start)(struct dst_node *),
518 int num)
519{
520 struct dst_node *n;
521 int err;
522
523 n = kzalloc(sizeof(struct dst_node), GFP_KERNEL);
524 if (!n)
525 return NULL;
526
527 INIT_LIST_HEAD(&n->node_entry);
528
529 INIT_LIST_HEAD(&n->security_list);
530 mutex_init(&n->security_lock);
531
532 init_waitqueue_head(&n->wait);
533
534 n->trans_scan_timeout = msecs_to_jiffies(ctl->trans_scan_timeout);
535 if (!n->trans_scan_timeout)
536 n->trans_scan_timeout = HZ;
537
538 n->trans_max_retries = ctl->trans_max_retries;
539 if (!n->trans_max_retries)
540 n->trans_max_retries = 10;
541
542 /*
543 * Pretty much arbitrary default numbers.
544 * 32 matches maximum number of pages in bio originated from ext3 (31).
545 */
546 n->max_pages = ctl->max_pages;
547 if (!n->max_pages)
548 n->max_pages = 32;
549
550 if (n->max_pages > 1024)
551 n->max_pages = 1024;
552
553 n->start = start;
554 n->size = ctl->size;
555
556 atomic_set(&n->refcnt, 1);
557 atomic_long_set(&n->gen, 0);
558 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
559
560 err = dst_node_sysfs_init(n);
561 if (err)
562 goto err_out_free;
563
564 n->pool = thread_pool_create(num, n->name, dst_thread_network_init,
565 dst_thread_network_cleanup, n);
566 if (IS_ERR(n->pool)) {
567 err = PTR_ERR(n->pool);
568 goto err_out_sysfs_exit;
569 }
570
571 dprintk("%s: n: %p, name: %s.\n", __func__, n, n->name);
572
573 return n;
574
575err_out_sysfs_exit:
576 dst_node_sysfs_exit(n);
577err_out_free:
578 kfree(n);
579 return NULL;
580}
581
582/*
583 * Starting a node, connected to the remote server:
584 * register block device and initialize transaction mechanism.
585 * In revers order though.
586 *
587 * It will autonegotiate some parameters with the remote node
588 * and update local if needed.
589 *
590 * Transaction initialization should be the last thing before
591 * starting the node, since transaction should include not only
592 * block IO, but also crypto related data (if any), which are
593 * initialized separately.
594 */
595static int dst_start_remote(struct dst_node *n)
596{
597 int err;
598
599 err = dst_node_trans_init(n, sizeof(struct dst_trans));
600 if (err)
601 return err;
602
603 err = dst_node_create_disk(n);
604 if (err)
605 return err;
606
607 dst_node_set_size(n);
608 add_disk(n->disk);
609
610 dprintk("DST: started remote node '%s', minor: %d.\n",
611 n->name, n->disk->first_minor);
612
613 return 0;
614}
615
616/*
617 * Adding remote node and initialize connection.
618 */
619static int dst_add_remote(struct dst_node *n, struct dst_ctl *ctl,
620 void *data, unsigned int size)
621{
622 int err;
623 struct dst_network_ctl *rctl = data;
624
625 if (n)
626 return -EEXIST;
627
628 if (size != sizeof(struct dst_network_ctl))
629 return -EINVAL;
630
631 n = dst_alloc_node(ctl, dst_start_remote, 1);
632 if (!n)
633 return -ENOMEM;
634
635 memcpy(&n->info->net, rctl, sizeof(struct dst_network_ctl));
636 err = dst_node_init_connected(n, rctl);
637 if (err)
638 goto err_out_free;
639
640 dst_node_add(n);
641
642 return 0;
643
644err_out_free:
645 dst_node_put(n);
646 return err;
647}
648
649/*
650 * Adding export node: initializing block device and listening socket.
651 */
652static int dst_add_export(struct dst_node *n, struct dst_ctl *ctl,
653 void *data, unsigned int size)
654{
655 int err;
656 struct dst_export_ctl *le = data;
657
658 if (n)
659 return -EEXIST;
660
661 if (size != sizeof(struct dst_export_ctl))
662 return -EINVAL;
663
664 n = dst_alloc_node(ctl, dst_start_export, 2);
665 if (!n)
666 return -EINVAL;
667
668 err = dst_setup_export(n, ctl, le);
669 if (err)
670 goto err_out_free;
671
672 dst_node_add(n);
673
674 return 0;
675
676err_out_free:
677 dst_node_put(n);
678 return err;
679}
680
681static int dst_node_remove_unload(struct dst_node *n)
682{
683 printk(KERN_INFO "STOPPED name: '%s', size: %llu.\n",
684 n->name, n->size);
685
686 if (n->disk)
687 del_gendisk(n->disk);
688
689 dst_node_remove(n);
690 dst_node_sysfs_exit(n);
691
692 /*
693 * This is not a hack. Really.
694 * Node's reference counter allows to implement fine grained
695 * node freeing, but since all transactions (which hold node's
696 * reference counter) are processed in the dedicated thread,
697 * it is possible that reference will hit zero in that thread,
698 * so we will not be able to exit thread and cleanup the node.
699 *
700 * So, we remove disk, so no new activity is possible, and
701 * wait until all pending transaction are completed (either
702 * in receiving thread or by timeout in workqueue), in this
703 * case reference counter will be less or equal to 2 (once set in
704 * dst_alloc_node() and then in connector message parser;
705 * or when we force module unloading, and connector message
706 * parser does not hold a reference, in this case reference
707 * counter will be equal to 1),
708 * and subsequent dst_node_put() calls will free the node.
709 */
710 dprintk("%s: going to sleep with %d refcnt.\n",
711 __func__, atomic_read(&n->refcnt));
712 wait_event(n->wait, atomic_read(&n->refcnt) <= 2);
713
714 dst_node_put(n);
715 return 0;
716}
717
718/*
719 * Remove node from the hash table.
720 */
721static int dst_del_node(struct dst_node *n, struct dst_ctl *ctl,
722 void *data, unsigned int size)
723{
724 if (!n)
725 return -ENODEV;
726
727 return dst_node_remove_unload(n);
728}
729
730/*
731 * Initialize crypto processing for given node.
732 */
733static int dst_crypto_init(struct dst_node *n, struct dst_ctl *ctl,
734 void *data, unsigned int size)
735{
736 struct dst_crypto_ctl *crypto = data;
737
738 if (!n)
739 return -ENODEV;
740
741 if (size != sizeof(struct dst_crypto_ctl) + crypto->hash_keysize +
742 crypto->cipher_keysize)
743 return -EINVAL;
744
745 if (n->trans_cache)
746 return -EEXIST;
747
748 return dst_node_crypto_init(n, crypto);
749}
750
751/*
752 * Security attributes for given node.
753 */
754static int dst_security_init(struct dst_node *n, struct dst_ctl *ctl,
755 void *data, unsigned int size)
756{
757 struct dst_secure *s;
758
759 if (!n)
760 return -ENODEV;
761
762 if (size != sizeof(struct dst_secure_user))
763 return -EINVAL;
764
765 s = kmalloc(sizeof(struct dst_secure), GFP_KERNEL);
766 if (!s)
767 return -ENOMEM;
768
769 memcpy(&s->sec, data, size);
770
771 mutex_lock(&n->security_lock);
772 list_add_tail(&s->sec_entry, &n->security_list);
773 mutex_unlock(&n->security_lock);
774
775 return 0;
776}
777
778/*
779 * Kill'em all!
780 */
781static int dst_start_node(struct dst_node *n, struct dst_ctl *ctl,
782 void *data, unsigned int size)
783{
784 int err;
785
786 if (!n)
787 return -ENODEV;
788
789 if (n->trans_cache)
790 return 0;
791
792 err = n->start(n);
793 if (err)
794 return err;
795
796 printk(KERN_INFO "STARTED name: '%s', size: %llu.\n", n->name, n->size);
797 return 0;
798}
799
800typedef int (*dst_command_func)(struct dst_node *n, struct dst_ctl *ctl,
801 void *data, unsigned int size);
802
803/*
804 * List of userspace commands.
805 */
806static dst_command_func dst_commands[] = {
807 [DST_ADD_REMOTE] = &dst_add_remote,
808 [DST_ADD_EXPORT] = &dst_add_export,
809 [DST_DEL_NODE] = &dst_del_node,
810 [DST_CRYPTO] = &dst_crypto_init,
811 [DST_SECURITY] = &dst_security_init,
812 [DST_START] = &dst_start_node,
813};
814
815/*
816 * Configuration parser.
817 */
818static void cn_dst_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
819{
820 struct dst_ctl *ctl;
821 int err;
822 struct dst_ctl_ack ack;
823 struct dst_node *n = NULL, *tmp;
824 unsigned int hash;
825
826 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) {
827 err = -EPERM;
828 goto out;
829 }
830
831 if (msg->len < sizeof(struct dst_ctl)) {
832 err = -EBADMSG;
833 goto out;
834 }
835
836 ctl = (struct dst_ctl *)msg->data;
837
838 if (ctl->cmd >= DST_CMD_MAX) {
839 err = -EINVAL;
840 goto out;
841 }
842 hash = dst_hash(ctl->name, sizeof(ctl->name));
843
844 mutex_lock(&dst_hash_lock);
845 list_for_each_entry(tmp, &dst_hashtable[hash], node_entry) {
846 if (!memcmp(tmp->name, ctl->name, sizeof(tmp->name))) {
847 n = tmp;
848 dst_node_get(n);
849 break;
850 }
851 }
852 mutex_unlock(&dst_hash_lock);
853
854 err = dst_commands[ctl->cmd](n, ctl, msg->data + sizeof(struct dst_ctl),
855 msg->len - sizeof(struct dst_ctl));
856
857 dst_node_put(n);
858out:
859 memcpy(&ack.msg, msg, sizeof(struct cn_msg));
860
861 ack.msg.ack = msg->ack + 1;
862 ack.msg.len = sizeof(struct dst_ctl_ack) - sizeof(struct cn_msg);
863
864 ack.error = err;
865
866 cn_netlink_send(&ack.msg, 0, GFP_KERNEL);
867}
868
869/*
870 * Global initialization: sysfs, hash table, block device registration,
871 * connector and various caches.
872 */
873static int __init dst_sysfs_init(void)
874{
875 return bus_register(&dst_dev_bus_type);
876}
877
878static void dst_sysfs_exit(void)
879{
880 bus_unregister(&dst_dev_bus_type);
881}
882
883static int __init dst_hashtable_init(void)
884{
885 unsigned int i;
886
887 dst_hashtable = kcalloc(dst_hashtable_size, sizeof(struct list_head),
888 GFP_KERNEL);
889 if (!dst_hashtable)
890 return -ENOMEM;
891
892 for (i = 0; i < dst_hashtable_size; ++i)
893 INIT_LIST_HEAD(&dst_hashtable[i]);
894
895 return 0;
896}
897
898static void dst_hashtable_exit(void)
899{
900 unsigned int i;
901 struct dst_node *n, *tmp;
902
903 for (i = 0; i < dst_hashtable_size; ++i) {
904 list_for_each_entry_safe(n, tmp, &dst_hashtable[i], node_entry) {
905 dst_node_remove_unload(n);
906 }
907 }
908
909 kfree(dst_hashtable);
910}
911
912static int __init dst_sys_init(void)
913{
914 int err = -ENOMEM;
915
916 err = dst_hashtable_init();
917 if (err)
918 goto err_out_exit;
919
920 err = dst_export_init();
921 if (err)
922 goto err_out_hashtable_exit;
923
924 err = register_blkdev(dst_major, DST_NAME);
925 if (err < 0)
926 goto err_out_export_exit;
927 if (err)
928 dst_major = err;
929
930 err = dst_sysfs_init();
931 if (err)
932 goto err_out_unregister;
933
934 err = cn_add_callback(&cn_dst_id, "DST", cn_dst_callback);
935 if (err)
936 goto err_out_sysfs_exit;
937
938 printk(KERN_INFO "Distributed storage, '%s' release.\n", dst_name);
939
940 return 0;
941
942err_out_sysfs_exit:
943 dst_sysfs_exit();
944err_out_unregister:
945 unregister_blkdev(dst_major, DST_NAME);
946err_out_export_exit:
947 dst_export_exit();
948err_out_hashtable_exit:
949 dst_hashtable_exit();
950err_out_exit:
951 return err;
952}
953
954static void __exit dst_sys_exit(void)
955{
956 cn_del_callback(&cn_dst_id);
957 unregister_blkdev(dst_major, DST_NAME);
958 dst_hashtable_exit();
959 dst_sysfs_exit();
960 dst_export_exit();
961}
962
963module_init(dst_sys_init);
964module_exit(dst_sys_exit);
965
966MODULE_DESCRIPTION("Distributed storage");
967MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
968MODULE_LICENSE("GPL");
diff --git a/drivers/staging/dst/export.c b/drivers/staging/dst/export.c
deleted file mode 100644
index c324230e8b60..000000000000
--- a/drivers/staging/dst/export.c
+++ /dev/null
@@ -1,660 +0,0 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/blkdev.h>
17#include <linux/bio.h>
18#include <linux/dst.h>
19#include <linux/in.h>
20#include <linux/in6.h>
21#include <linux/poll.h>
22#include <linux/slab.h>
23#include <linux/socket.h>
24
25#include <net/sock.h>
26
27/*
28 * Export bioset is used for server block IO requests.
29 */
30static struct bio_set *dst_bio_set;
31
32int __init dst_export_init(void)
33{
34 int err = -ENOMEM;
35
36 dst_bio_set = bioset_create(32, sizeof(struct dst_export_priv));
37 if (!dst_bio_set)
38 goto err_out_exit;
39
40 return 0;
41
42err_out_exit:
43 return err;
44}
45
46void dst_export_exit(void)
47{
48 bioset_free(dst_bio_set);
49}
50
51/*
52 * When client connects and autonegotiates with the server node,
53 * its permissions are checked in a security attributes and sent
54 * back.
55 */
56static unsigned int dst_check_permissions(struct dst_state *main,
57 struct dst_state *st)
58{
59 struct dst_node *n = main->node;
60 struct dst_secure *sentry;
61 struct dst_secure_user *s;
62 struct saddr *sa = &st->ctl.addr;
63 unsigned int perm = 0;
64
65 mutex_lock(&n->security_lock);
66 list_for_each_entry(sentry, &n->security_list, sec_entry) {
67 s = &sentry->sec;
68
69 if (s->addr.sa_family != sa->sa_family)
70 continue;
71
72 if (s->addr.sa_data_len != sa->sa_data_len)
73 continue;
74
75 /*
76 * This '2' below is a port field. This may be very wrong to do
77 * in atalk for example though. If there will be any need
78 * to extent protocol to something else, I can create
79 * per-family helpers and use them instead of this memcmp.
80 */
81 if (memcmp(s->addr.sa_data + 2, sa->sa_data + 2,
82 sa->sa_data_len - 2))
83 continue;
84
85 perm = s->permissions;
86 }
87 mutex_unlock(&n->security_lock);
88
89 return perm;
90}
91
92/*
93 * Accept new client: allocate appropriate network state and check permissions.
94 */
95static struct dst_state *dst_accept_client(struct dst_state *st)
96{
97 unsigned int revents = 0;
98 unsigned int err_mask = POLLERR | POLLHUP | POLLRDHUP;
99 unsigned int mask = err_mask | POLLIN;
100 struct dst_node *n = st->node;
101 int err = 0;
102 struct socket *sock = NULL;
103 struct dst_state *new;
104
105 while (!err && !sock) {
106 revents = dst_state_poll(st);
107
108 if (!(revents & mask)) {
109 DEFINE_WAIT(wait);
110
111 for (;;) {
112 prepare_to_wait(&st->thread_wait,
113 &wait, TASK_INTERRUPTIBLE);
114 if (!n->trans_scan_timeout || st->need_exit)
115 break;
116
117 revents = dst_state_poll(st);
118
119 if (revents & mask)
120 break;
121
122 if (signal_pending(current))
123 break;
124
125 /*
126 * Magic HZ? Polling check above is not safe in
127 * all cases (like socket reset in BH context),
128 * so it is simpler just to postpone it to the
129 * process context instead of implementing
130 * special locking there.
131 */
132 schedule_timeout(HZ);
133 }
134 finish_wait(&st->thread_wait, &wait);
135 }
136
137 err = -ECONNRESET;
138 dst_state_lock(st);
139
140 dprintk("%s: st: %p, revents: %x [err: %d, in: %d].\n",
141 __func__, st, revents, revents & err_mask,
142 revents & POLLIN);
143
144 if (revents & err_mask) {
145 dprintk("%s: revents: %x, socket: %p, err: %d.\n",
146 __func__, revents, st->socket, err);
147 err = -ECONNRESET;
148 }
149
150 if (!n->trans_scan_timeout || st->need_exit)
151 err = -ENODEV;
152
153 if (st->socket && (revents & POLLIN))
154 err = kernel_accept(st->socket, &sock, 0);
155
156 dst_state_unlock(st);
157 }
158
159 if (err)
160 goto err_out_exit;
161
162 new = dst_state_alloc(st->node);
163 if (IS_ERR(new)) {
164 err = -ENOMEM;
165 goto err_out_release;
166 }
167 new->socket = sock;
168
169 new->ctl.addr.sa_data_len = sizeof(struct sockaddr);
170 err = kernel_getpeername(sock, (struct sockaddr *)&new->ctl.addr,
171 (int *)&new->ctl.addr.sa_data_len);
172 if (err)
173 goto err_out_put;
174
175 new->permissions = dst_check_permissions(st, new);
176 if (new->permissions == 0) {
177 err = -EPERM;
178 dst_dump_addr(sock, (struct sockaddr *)&new->ctl.addr,
179 "Client is not allowed to connect");
180 goto err_out_put;
181 }
182
183 err = dst_poll_init(new);
184 if (err)
185 goto err_out_put;
186
187 dst_dump_addr(sock, (struct sockaddr *)&new->ctl.addr,
188 "Connected client");
189
190 return new;
191
192err_out_put:
193 dst_state_put(new);
194err_out_release:
195 sock_release(sock);
196err_out_exit:
197 return ERR_PTR(err);
198}
199
200/*
201 * Each server's block request sometime finishes.
202 * Usually it happens in hard irq context of the appropriate controller,
203 * so to play good with all cases we just queue BIO into the queue
204 * and wake up processing thread, which gets completed request and
205 * send (encrypting if needed) it back to the client (if it was a read
206 * request), or sends back reply that writing successfully completed.
207 */
208static int dst_export_process_request_queue(struct dst_state *st)
209{
210 unsigned long flags;
211 struct dst_export_priv *p = NULL;
212 struct bio *bio;
213 int err = 0;
214
215 while (!list_empty(&st->request_list)) {
216 spin_lock_irqsave(&st->request_lock, flags);
217 if (!list_empty(&st->request_list)) {
218 p = list_first_entry(&st->request_list,
219 struct dst_export_priv, request_entry);
220 list_del(&p->request_entry);
221 }
222 spin_unlock_irqrestore(&st->request_lock, flags);
223
224 if (!p)
225 break;
226
227 bio = p->bio;
228
229 if (dst_need_crypto(st->node) && (bio_data_dir(bio) == READ))
230 err = dst_export_crypto(st->node, bio);
231 else
232 err = dst_export_send_bio(bio);
233
234 if (err)
235 break;
236 }
237
238 return err;
239}
240
241/*
242 * Cleanup export state.
243 * It has to wait until all requests are finished,
244 * and then free them all.
245 */
246static void dst_state_cleanup_export(struct dst_state *st)
247{
248 struct dst_export_priv *p;
249 unsigned long flags;
250
251 /*
252 * This loop waits for all pending bios to be completed and freed.
253 */
254 while (atomic_read(&st->refcnt) > 1) {
255 dprintk("%s: st: %p, refcnt: %d, list_empty: %d.\n",
256 __func__, st, atomic_read(&st->refcnt),
257 list_empty(&st->request_list));
258 wait_event_timeout(st->thread_wait,
259 (atomic_read(&st->refcnt) == 1) ||
260 !list_empty(&st->request_list),
261 HZ/2);
262
263 while (!list_empty(&st->request_list)) {
264 p = NULL;
265 spin_lock_irqsave(&st->request_lock, flags);
266 if (!list_empty(&st->request_list)) {
267 p = list_first_entry(&st->request_list,
268 struct dst_export_priv, request_entry);
269 list_del(&p->request_entry);
270 }
271 spin_unlock_irqrestore(&st->request_lock, flags);
272
273 if (p)
274 bio_put(p->bio);
275
276 dprintk("%s: st: %p, refcnt: %d, list_empty: %d, p: "
277 "%p.\n", __func__, st, atomic_read(&st->refcnt),
278 list_empty(&st->request_list), p);
279 }
280 }
281
282 dst_state_put(st);
283}
284
285/*
286 * Client accepting thread.
287 * Not only accepts new connection, but also schedules receiving thread
288 * and performs request completion described above.
289 */
290static int dst_accept(void *init_data, void *schedule_data)
291{
292 struct dst_state *main_st = schedule_data;
293 struct dst_node *n = init_data;
294 struct dst_state *st;
295 int err;
296
297 while (n->trans_scan_timeout && !main_st->need_exit) {
298 dprintk("%s: main_st: %p, n: %p.\n", __func__, main_st, n);
299 st = dst_accept_client(main_st);
300 if (IS_ERR(st))
301 continue;
302
303 err = dst_state_schedule_receiver(st);
304 if (!err) {
305 while (n->trans_scan_timeout) {
306 err = wait_event_interruptible_timeout(st->thread_wait,
307 !list_empty(&st->request_list) ||
308 !n->trans_scan_timeout ||
309 st->need_exit,
310 HZ);
311
312 if (!n->trans_scan_timeout || st->need_exit)
313 break;
314
315 if (list_empty(&st->request_list))
316 continue;
317
318 err = dst_export_process_request_queue(st);
319 if (err)
320 break;
321 }
322
323 st->need_exit = 1;
324 wake_up(&st->thread_wait);
325 }
326
327 dst_state_cleanup_export(st);
328 }
329
330 dprintk("%s: freeing listening socket st: %p.\n", __func__, main_st);
331
332 dst_state_lock(main_st);
333 dst_poll_exit(main_st);
334 dst_state_socket_release(main_st);
335 dst_state_unlock(main_st);
336 dst_state_put(main_st);
337 dprintk("%s: freed listening socket st: %p.\n", __func__, main_st);
338
339 return 0;
340}
341
342int dst_start_export(struct dst_node *n)
343{
344 if (list_empty(&n->security_list)) {
345 printk(KERN_ERR "You are trying to export node '%s' "
346 "without security attributes.\nNo clients will "
347 "be allowed to connect. Exiting.\n", n->name);
348 return -EINVAL;
349 }
350 return dst_node_trans_init(n, sizeof(struct dst_export_priv));
351}
352
353/*
354 * Initialize listening state and schedule accepting thread.
355 */
356int dst_node_init_listened(struct dst_node *n, struct dst_export_ctl *le)
357{
358 struct dst_state *st;
359 int err = -ENOMEM;
360 struct dst_network_ctl *ctl = &le->ctl;
361
362 memcpy(&n->info->net, ctl, sizeof(struct dst_network_ctl));
363
364 st = dst_state_alloc(n);
365 if (IS_ERR(st)) {
366 err = PTR_ERR(st);
367 goto err_out_exit;
368 }
369 memcpy(&st->ctl, ctl, sizeof(struct dst_network_ctl));
370
371 err = dst_state_socket_create(st);
372 if (err)
373 goto err_out_put;
374
375 st->socket->sk->sk_reuse = 1;
376
377 err = kernel_bind(st->socket, (struct sockaddr *)&ctl->addr,
378 ctl->addr.sa_data_len);
379 if (err)
380 goto err_out_socket_release;
381
382 err = kernel_listen(st->socket, 1024);
383 if (err)
384 goto err_out_socket_release;
385 n->state = st;
386
387 err = dst_poll_init(st);
388 if (err)
389 goto err_out_socket_release;
390
391 dst_state_get(st);
392
393 err = thread_pool_schedule(n->pool, dst_thread_setup,
394 dst_accept, st, MAX_SCHEDULE_TIMEOUT);
395 if (err)
396 goto err_out_poll_exit;
397
398 return 0;
399
400err_out_poll_exit:
401 dst_poll_exit(st);
402err_out_socket_release:
403 dst_state_socket_release(st);
404err_out_put:
405 dst_state_put(st);
406err_out_exit:
407 n->state = NULL;
408 return err;
409}
410
411/*
412 * Free bio and related private data.
413 * Also drop a reference counter for appropriate state,
414 * which waits when there are no more block IOs in-flight.
415 */
416static void dst_bio_destructor(struct bio *bio)
417{
418 struct bio_vec *bv;
419 struct dst_export_priv *priv = bio->bi_private;
420 int i;
421
422 bio_for_each_segment(bv, bio, i) {
423 if (!bv->bv_page)
424 break;
425
426 __free_page(bv->bv_page);
427 }
428
429 if (priv)
430 dst_state_put(priv->state);
431 bio_free(bio, dst_bio_set);
432}
433
434/*
435 * Block IO completion. Queue request to be sent back to
436 * the client (or just confirmation).
437 */
438static void dst_bio_end_io(struct bio *bio, int err)
439{
440 struct dst_export_priv *p = bio->bi_private;
441 struct dst_state *st = p->state;
442 unsigned long flags;
443
444 spin_lock_irqsave(&st->request_lock, flags);
445 list_add_tail(&p->request_entry, &st->request_list);
446 spin_unlock_irqrestore(&st->request_lock, flags);
447
448 wake_up(&st->thread_wait);
449}
450
451/*
452 * Allocate read request for the server.
453 */
454static int dst_export_read_request(struct bio *bio, unsigned int total_size)
455{
456 unsigned int size;
457 struct page *page;
458 int err;
459
460 while (total_size) {
461 err = -ENOMEM;
462 page = alloc_page(GFP_KERNEL);
463 if (!page)
464 goto err_out_exit;
465
466 size = min_t(unsigned int, PAGE_SIZE, total_size);
467
468 err = bio_add_page(bio, page, size, 0);
469 dprintk("%s: bio: %llu/%u, size: %u, err: %d.\n",
470 __func__, (u64)bio->bi_sector, bio->bi_size,
471 size, err);
472 if (err <= 0)
473 goto err_out_free_page;
474
475 total_size -= size;
476 }
477
478 return 0;
479
480err_out_free_page:
481 __free_page(page);
482err_out_exit:
483 return err;
484}
485
486/*
487 * Allocate write request for the server.
488 * Should not only get pages, but also read data from the network.
489 */
490static int dst_export_write_request(struct dst_state *st,
491 struct bio *bio, unsigned int total_size)
492{
493 unsigned int size;
494 struct page *page;
495 void *data;
496 int err;
497
498 while (total_size) {
499 err = -ENOMEM;
500 page = alloc_page(GFP_KERNEL);
501 if (!page)
502 goto err_out_exit;
503
504 data = kmap(page);
505 if (!data)
506 goto err_out_free_page;
507
508 size = min_t(unsigned int, PAGE_SIZE, total_size);
509
510 err = dst_data_recv(st, data, size);
511 if (err)
512 goto err_out_unmap_page;
513
514 err = bio_add_page(bio, page, size, 0);
515 if (err <= 0)
516 goto err_out_unmap_page;
517
518 kunmap(page);
519
520 total_size -= size;
521 }
522
523 return 0;
524
525err_out_unmap_page:
526 kunmap(page);
527err_out_free_page:
528 __free_page(page);
529err_out_exit:
530 return err;
531}
532
533/*
534 * Groovy, we've gotten an IO request from the client.
535 * Allocate BIO from the bioset, private data from the mempool
536 * and lots of pages for IO.
537 */
538int dst_process_io(struct dst_state *st)
539{
540 struct dst_node *n = st->node;
541 struct dst_cmd *cmd = st->data;
542 struct bio *bio;
543 struct dst_export_priv *priv;
544 int err = -ENOMEM;
545
546 if (unlikely(!n->bdev)) {
547 err = -EINVAL;
548 goto err_out_exit;
549 }
550
551 bio = bio_alloc_bioset(GFP_KERNEL,
552 PAGE_ALIGN(cmd->size) >> PAGE_SHIFT,
553 dst_bio_set);
554 if (!bio)
555 goto err_out_exit;
556
557 priv = (struct dst_export_priv *)(((void *)bio) -
558 sizeof (struct dst_export_priv));
559
560 priv->state = dst_state_get(st);
561 priv->bio = bio;
562
563 bio->bi_private = priv;
564 bio->bi_end_io = dst_bio_end_io;
565 bio->bi_destructor = dst_bio_destructor;
566 bio->bi_bdev = n->bdev;
567
568 /*
569 * Server side is only interested in two low bits:
570 * uptodate (set by itself actually) and rw block
571 */
572 bio->bi_flags |= cmd->flags & 3;
573
574 bio->bi_rw = cmd->rw;
575 bio->bi_size = 0;
576 bio->bi_sector = cmd->sector;
577
578 dst_bio_to_cmd(bio, &priv->cmd, DST_IO_RESPONSE, cmd->id);
579
580 priv->cmd.flags = 0;
581 priv->cmd.size = cmd->size;
582
583 if (bio_data_dir(bio) == WRITE) {
584 err = dst_recv_cdata(st, priv->cmd.hash);
585 if (err)
586 goto err_out_free;
587
588 err = dst_export_write_request(st, bio, cmd->size);
589 if (err)
590 goto err_out_free;
591
592 if (dst_need_crypto(n))
593 return dst_export_crypto(n, bio);
594 } else {
595 err = dst_export_read_request(bio, cmd->size);
596 if (err)
597 goto err_out_free;
598 }
599
600 dprintk("%s: bio: %llu/%u, rw: %lu, dir: %lu, flags: %lx, phys: %d.\n",
601 __func__, (u64)bio->bi_sector, bio->bi_size,
602 bio->bi_rw, bio_data_dir(bio),
603 bio->bi_flags, bio->bi_phys_segments);
604
605 generic_make_request(bio);
606
607 return 0;
608
609err_out_free:
610 bio_put(bio);
611err_out_exit:
612 return err;
613}
614
615/*
616 * Ok, block IO is ready, let's send it back to the client...
617 */
618int dst_export_send_bio(struct bio *bio)
619{
620 struct dst_export_priv *p = bio->bi_private;
621 struct dst_state *st = p->state;
622 struct dst_cmd *cmd = &p->cmd;
623 int err;
624
625 dprintk("%s: id: %llu, bio: %llu/%u, csize: %u, flags: %lu, rw: %lu.\n",
626 __func__, cmd->id, (u64)bio->bi_sector, bio->bi_size,
627 cmd->csize, bio->bi_flags, bio->bi_rw);
628
629 dst_convert_cmd(cmd);
630
631 dst_state_lock(st);
632 if (!st->socket) {
633 err = -ECONNRESET;
634 goto err_out_unlock;
635 }
636
637 if (bio_data_dir(bio) == WRITE) {
638 /* ... or just confirmation that writing has completed. */
639 cmd->size = cmd->csize = 0;
640 err = dst_data_send_header(st->socket, cmd,
641 sizeof(struct dst_cmd), 0);
642 if (err)
643 goto err_out_unlock;
644 } else {
645 err = dst_send_bio(st, cmd, bio);
646 if (err)
647 goto err_out_unlock;
648 }
649
650 dst_state_unlock(st);
651
652 bio_put(bio);
653 return 0;
654
655err_out_unlock:
656 dst_state_unlock(st);
657
658 bio_put(bio);
659 return err;
660}
diff --git a/drivers/staging/dst/state.c b/drivers/staging/dst/state.c
deleted file mode 100644
index 02a05e6c48c3..000000000000
--- a/drivers/staging/dst/state.c
+++ /dev/null
@@ -1,844 +0,0 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/buffer_head.h>
17#include <linux/blkdev.h>
18#include <linux/bio.h>
19#include <linux/connector.h>
20#include <linux/dst.h>
21#include <linux/device.h>
22#include <linux/in.h>
23#include <linux/in6.h>
24#include <linux/socket.h>
25#include <linux/slab.h>
26
27#include <net/sock.h>
28
29/*
30 * Polling machinery.
31 */
32
33struct dst_poll_helper {
34 poll_table pt;
35 struct dst_state *st;
36};
37
38static int dst_queue_wake(wait_queue_t *wait, unsigned mode,
39 int sync, void *key)
40{
41 struct dst_state *st = container_of(wait, struct dst_state, wait);
42
43 wake_up(&st->thread_wait);
44 return 1;
45}
46
47static void dst_queue_func(struct file *file, wait_queue_head_t *whead,
48 poll_table *pt)
49{
50 struct dst_state *st = container_of(pt, struct dst_poll_helper, pt)->st;
51
52 st->whead = whead;
53 init_waitqueue_func_entry(&st->wait, dst_queue_wake);
54 add_wait_queue(whead, &st->wait);
55}
56
57void dst_poll_exit(struct dst_state *st)
58{
59 if (st->whead) {
60 remove_wait_queue(st->whead, &st->wait);
61 st->whead = NULL;
62 }
63}
64
65int dst_poll_init(struct dst_state *st)
66{
67 struct dst_poll_helper ph;
68
69 ph.st = st;
70 init_poll_funcptr(&ph.pt, &dst_queue_func);
71
72 st->socket->ops->poll(NULL, st->socket, &ph.pt);
73 return 0;
74}
75
76/*
77 * Header receiving function - may block.
78 */
79static int dst_data_recv_header(struct socket *sock,
80 void *data, unsigned int size, int block)
81{
82 struct msghdr msg;
83 struct kvec iov;
84 int err;
85
86 iov.iov_base = data;
87 iov.iov_len = size;
88
89 msg.msg_iov = (struct iovec *)&iov;
90 msg.msg_iovlen = 1;
91 msg.msg_name = NULL;
92 msg.msg_namelen = 0;
93 msg.msg_control = NULL;
94 msg.msg_controllen = 0;
95 msg.msg_flags = (block) ? MSG_WAITALL : MSG_DONTWAIT;
96
97 err = kernel_recvmsg(sock, &msg, &iov, 1, iov.iov_len,
98 msg.msg_flags);
99 if (err != size)
100 return -1;
101
102 return 0;
103}
104
105/*
106 * Header sending function - may block.
107 */
108int dst_data_send_header(struct socket *sock,
109 void *data, unsigned int size, int more)
110{
111 struct msghdr msg;
112 struct kvec iov;
113 int err;
114
115 iov.iov_base = data;
116 iov.iov_len = size;
117
118 msg.msg_iov = (struct iovec *)&iov;
119 msg.msg_iovlen = 1;
120 msg.msg_name = NULL;
121 msg.msg_namelen = 0;
122 msg.msg_control = NULL;
123 msg.msg_controllen = 0;
124 msg.msg_flags = MSG_WAITALL | (more ? MSG_MORE : 0);
125
126 err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
127 if (err != size) {
128 dprintk("%s: size: %u, more: %d, err: %d.\n",
129 __func__, size, more, err);
130 return -1;
131 }
132
133 return 0;
134}
135
136/*
137 * Block autoconfiguration: request size of the storage and permissions.
138 */
139static int dst_request_remote_config(struct dst_state *st)
140{
141 struct dst_node *n = st->node;
142 int err = -EINVAL;
143 struct dst_cmd *cmd = st->data;
144
145 memset(cmd, 0, sizeof(struct dst_cmd));
146 cmd->cmd = DST_CFG;
147
148 dst_convert_cmd(cmd);
149
150 err = dst_data_send_header(st->socket, cmd, sizeof(struct dst_cmd), 0);
151 if (err)
152 goto out;
153
154 err = dst_data_recv_header(st->socket, cmd, sizeof(struct dst_cmd), 1);
155 if (err)
156 goto out;
157
158 dst_convert_cmd(cmd);
159
160 if (cmd->cmd != DST_CFG) {
161 err = -EINVAL;
162 dprintk("%s: checking result: cmd: %d, size reported: %llu.\n",
163 __func__, cmd->cmd, cmd->sector);
164 goto out;
165 }
166
167 if (n->size != 0)
168 n->size = min_t(loff_t, n->size, cmd->sector);
169 else
170 n->size = cmd->sector;
171
172 n->info->size = n->size;
173 st->permissions = cmd->rw;
174
175out:
176 dprintk("%s: n: %p, err: %d, size: %llu, permission: %x.\n",
177 __func__, n, err, n->size, st->permissions);
178 return err;
179}
180
181/*
182 * Socket machinery.
183 */
184
185#define DST_DEFAULT_TIMEO 20000
186
187int dst_state_socket_create(struct dst_state *st)
188{
189 int err;
190 struct socket *sock;
191 struct dst_network_ctl *ctl = &st->ctl;
192
193 err = sock_create(ctl->addr.sa_family, ctl->type, ctl->proto, &sock);
194 if (err < 0)
195 return err;
196
197 sock->sk->sk_sndtimeo = sock->sk->sk_rcvtimeo =
198 msecs_to_jiffies(DST_DEFAULT_TIMEO);
199 sock->sk->sk_allocation = GFP_NOIO;
200
201 st->socket = st->read_socket = sock;
202 return 0;
203}
204
205void dst_state_socket_release(struct dst_state *st)
206{
207 dprintk("%s: st: %p, socket: %p, n: %p.\n",
208 __func__, st, st->socket, st->node);
209 if (st->socket) {
210 sock_release(st->socket);
211 st->socket = NULL;
212 st->read_socket = NULL;
213 }
214}
215
216void dst_dump_addr(struct socket *sk, struct sockaddr *sa, char *str)
217{
218 if (sk->ops->family == AF_INET) {
219 struct sockaddr_in *sin = (struct sockaddr_in *)sa;
220 printk(KERN_INFO "%s %u.%u.%u.%u:%d.\n", str,
221 NIPQUAD(sin->sin_addr.s_addr), ntohs(sin->sin_port));
222 } else if (sk->ops->family == AF_INET6) {
223 struct sockaddr_in6 *sin = (struct sockaddr_in6 *)sa;
224 printk(KERN_INFO "%s %pi6:%d",
225 str, &sin->sin6_addr, ntohs(sin->sin6_port));
226 }
227}
228
229void dst_state_exit_connected(struct dst_state *st)
230{
231 if (st->socket) {
232 dst_poll_exit(st);
233 st->socket->ops->shutdown(st->socket, 2);
234
235 dst_dump_addr(st->socket, (struct sockaddr *)&st->ctl.addr,
236 "Disconnected peer");
237 dst_state_socket_release(st);
238 }
239}
240
241static int dst_state_init_connected(struct dst_state *st)
242{
243 int err;
244 struct dst_network_ctl *ctl = &st->ctl;
245
246 err = dst_state_socket_create(st);
247 if (err)
248 goto err_out_exit;
249
250 err = kernel_connect(st->socket, (struct sockaddr *)&st->ctl.addr,
251 st->ctl.addr.sa_data_len, 0);
252 if (err)
253 goto err_out_release;
254
255 err = dst_poll_init(st);
256 if (err)
257 goto err_out_release;
258
259 dst_dump_addr(st->socket, (struct sockaddr *)&ctl->addr,
260 "Connected to peer");
261
262 return 0;
263
264err_out_release:
265 dst_state_socket_release(st);
266err_out_exit:
267 return err;
268}
269
270/*
271 * State reset is used to reconnect to the remote peer.
272 * May fail, but who cares, we will try again later.
273 */
274static inline void dst_state_reset_nolock(struct dst_state *st)
275{
276 dst_state_exit_connected(st);
277 dst_state_init_connected(st);
278}
279
280static inline void dst_state_reset(struct dst_state *st)
281{
282 dst_state_lock(st);
283 dst_state_reset_nolock(st);
284 dst_state_unlock(st);
285}
286
287/*
288 * Basic network sending/receiving functions.
289 * Blocked mode is used.
290 */
291static int dst_data_recv_raw(struct dst_state *st, void *buf, u64 size)
292{
293 struct msghdr msg;
294 struct kvec iov;
295 int err;
296
297 BUG_ON(!size);
298
299 iov.iov_base = buf;
300 iov.iov_len = size;
301
302 msg.msg_iov = (struct iovec *)&iov;
303 msg.msg_iovlen = 1;
304 msg.msg_name = NULL;
305 msg.msg_namelen = 0;
306 msg.msg_control = NULL;
307 msg.msg_controllen = 0;
308 msg.msg_flags = MSG_DONTWAIT;
309
310 err = kernel_recvmsg(st->socket, &msg, &iov, 1, iov.iov_len,
311 msg.msg_flags);
312 if (err <= 0) {
313 dprintk("%s: failed to recv data: size: %llu, err: %d.\n",
314 __func__, size, err);
315 if (err == 0)
316 err = -ECONNRESET;
317
318 dst_state_exit_connected(st);
319 }
320
321 return err;
322}
323
324/*
325 * Ping command to early detect failed nodes.
326 */
327static int dst_send_ping(struct dst_state *st)
328{
329 struct dst_cmd *cmd = st->data;
330 int err = -ECONNRESET;
331
332 dst_state_lock(st);
333 if (st->socket) {
334 memset(cmd, 0, sizeof(struct dst_cmd));
335
336 cmd->cmd = __cpu_to_be32(DST_PING);
337
338 err = dst_data_send_header(st->socket, cmd,
339 sizeof(struct dst_cmd), 0);
340 }
341 dprintk("%s: st: %p, socket: %p, err: %d.\n", __func__,
342 st, st->socket, err);
343 dst_state_unlock(st);
344
345 return err;
346}
347
348/*
349 * Receiving function, which should either return error or read
350 * whole block request. If there was no traffic for a one second,
351 * send a ping, since remote node may die.
352 */
353int dst_data_recv(struct dst_state *st, void *data, unsigned int size)
354{
355 unsigned int revents = 0;
356 unsigned int err_mask = POLLERR | POLLHUP | POLLRDHUP;
357 unsigned int mask = err_mask | POLLIN;
358 struct dst_node *n = st->node;
359 int err = 0;
360
361 while (size && !err) {
362 revents = dst_state_poll(st);
363
364 if (!(revents & mask)) {
365 DEFINE_WAIT(wait);
366
367 for (;;) {
368 prepare_to_wait(&st->thread_wait, &wait,
369 TASK_INTERRUPTIBLE);
370 if (!n->trans_scan_timeout || st->need_exit)
371 break;
372
373 revents = dst_state_poll(st);
374
375 if (revents & mask)
376 break;
377
378 if (signal_pending(current))
379 break;
380
381 if (!schedule_timeout(HZ)) {
382 err = dst_send_ping(st);
383 if (err)
384 return err;
385 }
386
387 continue;
388 }
389 finish_wait(&st->thread_wait, &wait);
390 }
391
392 err = -ECONNRESET;
393 dst_state_lock(st);
394
395 if (st->socket && (st->read_socket == st->socket) &&
396 (revents & POLLIN)) {
397 err = dst_data_recv_raw(st, data, size);
398 if (err > 0) {
399 data += err;
400 size -= err;
401 err = 0;
402 }
403 }
404
405 if (revents & err_mask || !st->socket) {
406 dprintk("%s: revents: %x, socket: %p, size: %u, "
407 "err: %d.\n", __func__, revents,
408 st->socket, size, err);
409 err = -ECONNRESET;
410 }
411
412 dst_state_unlock(st);
413
414 if (!n->trans_scan_timeout)
415 err = -ENODEV;
416 }
417
418 return err;
419}
420
421/*
422 * Send block autoconf reply.
423 */
424static int dst_process_cfg(struct dst_state *st)
425{
426 struct dst_node *n = st->node;
427 struct dst_cmd *cmd = st->data;
428 int err;
429
430 cmd->sector = n->size;
431 cmd->rw = st->permissions;
432
433 dst_convert_cmd(cmd);
434
435 dst_state_lock(st);
436 err = dst_data_send_header(st->socket, cmd, sizeof(struct dst_cmd), 0);
437 dst_state_unlock(st);
438
439 return err;
440}
441
442/*
443 * Receive block IO from the network.
444 */
445static int dst_recv_bio(struct dst_state *st, struct bio *bio,
446 unsigned int total_size)
447{
448 struct bio_vec *bv;
449 int i, err;
450 void *data;
451 unsigned int sz;
452
453 bio_for_each_segment(bv, bio, i) {
454 sz = min(total_size, bv->bv_len);
455
456 dprintk("%s: bio: %llu/%u, total: %u, len: %u, sz: %u, "
457 "off: %u.\n", __func__, (u64)bio->bi_sector,
458 bio->bi_size, total_size, bv->bv_len, sz,
459 bv->bv_offset);
460
461 data = kmap(bv->bv_page) + bv->bv_offset;
462 err = dst_data_recv(st, data, sz);
463 kunmap(bv->bv_page);
464
465 bv->bv_len = sz;
466
467 if (err)
468 return err;
469
470 total_size -= sz;
471 if (total_size == 0)
472 break;
473 }
474
475 return 0;
476}
477
478/*
479 * Our block IO has just completed and arrived: get it.
480 */
481static int dst_process_io_response(struct dst_state *st)
482{
483 struct dst_node *n = st->node;
484 struct dst_cmd *cmd = st->data;
485 struct dst_trans *t;
486 int err = 0;
487 struct bio *bio;
488
489 mutex_lock(&n->trans_lock);
490 t = dst_trans_search(n, cmd->id);
491 mutex_unlock(&n->trans_lock);
492
493 if (!t)
494 goto err_out_exit;
495
496 bio = t->bio;
497
498 dprintk("%s: bio: %llu/%u, cmd_size: %u, csize: %u, dir: %lu.\n",
499 __func__, (u64)bio->bi_sector, bio->bi_size, cmd->size,
500 cmd->csize, bio_data_dir(bio));
501
502 if (bio_data_dir(bio) == READ) {
503 if (bio->bi_size != cmd->size - cmd->csize)
504 goto err_out_exit;
505
506 if (dst_need_crypto(n)) {
507 err = dst_recv_cdata(st, t->cmd.hash);
508 if (err)
509 goto err_out_exit;
510 }
511
512 err = dst_recv_bio(st, t->bio, bio->bi_size);
513 if (err)
514 goto err_out_exit;
515
516 if (dst_need_crypto(n))
517 return dst_trans_crypto(t);
518 } else {
519 err = -EBADMSG;
520 if (cmd->size || cmd->csize)
521 goto err_out_exit;
522 }
523
524 dst_trans_remove(t);
525 dst_trans_put(t);
526
527 return 0;
528
529err_out_exit:
530 return err;
531}
532
533/*
534 * Receive crypto data.
535 */
536int dst_recv_cdata(struct dst_state *st, void *cdata)
537{
538 struct dst_cmd *cmd = st->data;
539 struct dst_node *n = st->node;
540 struct dst_crypto_ctl *c = &n->crypto;
541 int err;
542
543 if (cmd->csize != c->crypto_attached_size) {
544 dprintk("%s: cmd: cmd: %u, sector: %llu, size: %u, "
545 "csize: %u != digest size %u.\n",
546 __func__, cmd->cmd, cmd->sector, cmd->size,
547 cmd->csize, c->crypto_attached_size);
548 err = -EINVAL;
549 goto err_out_exit;
550 }
551
552 err = dst_data_recv(st, cdata, cmd->csize);
553 if (err)
554 goto err_out_exit;
555
556 cmd->size -= cmd->csize;
557 return 0;
558
559err_out_exit:
560 return err;
561}
562
563/*
564 * Receive the command and start its processing.
565 */
566static int dst_recv_processing(struct dst_state *st)
567{
568 int err = -EINTR;
569 struct dst_cmd *cmd = st->data;
570
571 /*
572 * If socket will be reset after this statement, then
573 * dst_data_recv() will just fail and loop will
574 * start again, so it can be done without any locks.
575 *
576 * st->read_socket is needed to prevents state machine
577 * breaking between this data reading and subsequent one
578 * in protocol specific functions during connection reset.
579 * In case of reset we have to read next command and do
580 * not expect data for old command to magically appear in
581 * new connection.
582 */
583 st->read_socket = st->socket;
584 err = dst_data_recv(st, cmd, sizeof(struct dst_cmd));
585 if (err)
586 goto out_exit;
587
588 dst_convert_cmd(cmd);
589
590 dprintk("%s: cmd: %u, size: %u, csize: %u, id: %llu, "
591 "sector: %llu, flags: %llx, rw: %llx.\n",
592 __func__, cmd->cmd, cmd->size,
593 cmd->csize, cmd->id, cmd->sector,
594 cmd->flags, cmd->rw);
595
596 /*
597 * This should catch protocol breakage and random garbage
598 * instead of commands.
599 */
600 if (unlikely(cmd->csize > st->size - sizeof(struct dst_cmd))) {
601 err = -EBADMSG;
602 goto out_exit;
603 }
604
605 err = -EPROTO;
606 switch (cmd->cmd) {
607 case DST_IO_RESPONSE:
608 err = dst_process_io_response(st);
609 break;
610 case DST_IO:
611 err = dst_process_io(st);
612 break;
613 case DST_CFG:
614 err = dst_process_cfg(st);
615 break;
616 case DST_PING:
617 err = 0;
618 break;
619 default:
620 break;
621 }
622
623out_exit:
624 return err;
625}
626
627/*
628 * Receiving thread. For the client node we should try to reconnect,
629 * for accepted client we just drop the state and expect it to reconnect.
630 */
631static int dst_recv(void *init_data, void *schedule_data)
632{
633 struct dst_state *st = schedule_data;
634 struct dst_node *n = init_data;
635 int err = 0;
636
637 dprintk("%s: start st: %p, n: %p, scan: %lu, need_exit: %d.\n",
638 __func__, st, n, n->trans_scan_timeout, st->need_exit);
639
640 while (n->trans_scan_timeout && !st->need_exit) {
641 err = dst_recv_processing(st);
642 if (err < 0) {
643 if (!st->ctl.type)
644 break;
645
646 if (!n->trans_scan_timeout || st->need_exit)
647 break;
648
649 dst_state_reset(st);
650 msleep(1000);
651 }
652 }
653
654 st->need_exit = 1;
655 wake_up(&st->thread_wait);
656
657 dprintk("%s: freeing receiving socket st: %p.\n", __func__, st);
658 dst_state_lock(st);
659 dst_state_exit_connected(st);
660 dst_state_unlock(st);
661 dst_state_put(st);
662
663 dprintk("%s: freed receiving socket st: %p.\n", __func__, st);
664
665 return err;
666}
667
668/*
669 * Network state dies here and borns couple of lines below.
670 * This object is the main network state processing engine:
671 * sending, receiving, reconnections, all network related
672 * tasks are handled on behalf of the state.
673 */
674static void dst_state_free(struct dst_state *st)
675{
676 dprintk("%s: st: %p.\n", __func__, st);
677 if (st->cleanup)
678 st->cleanup(st);
679 kfree(st->data);
680 kfree(st);
681}
682
683struct dst_state *dst_state_alloc(struct dst_node *n)
684{
685 struct dst_state *st;
686 int err = -ENOMEM;
687
688 st = kzalloc(sizeof(struct dst_state), GFP_KERNEL);
689 if (!st)
690 goto err_out_exit;
691
692 st->node = n;
693 st->need_exit = 0;
694
695 st->size = PAGE_SIZE;
696 st->data = kmalloc(st->size, GFP_KERNEL);
697 if (!st->data)
698 goto err_out_free;
699
700 spin_lock_init(&st->request_lock);
701 INIT_LIST_HEAD(&st->request_list);
702
703 mutex_init(&st->state_lock);
704 init_waitqueue_head(&st->thread_wait);
705
706 /*
707 * One for processing thread, another one for node itself.
708 */
709 atomic_set(&st->refcnt, 2);
710
711 dprintk("%s: st: %p, n: %p.\n", __func__, st, st->node);
712
713 return st;
714
715err_out_free:
716 kfree(st);
717err_out_exit:
718 return ERR_PTR(err);
719}
720
721int dst_state_schedule_receiver(struct dst_state *st)
722{
723 return thread_pool_schedule_private(st->node->pool, dst_thread_setup,
724 dst_recv, st, MAX_SCHEDULE_TIMEOUT, st->node);
725}
726
727/*
728 * Initialize client's connection to the remote peer: allocate state,
729 * connect and perform block IO autoconfiguration.
730 */
731int dst_node_init_connected(struct dst_node *n, struct dst_network_ctl *r)
732{
733 struct dst_state *st;
734 int err = -ENOMEM;
735
736 st = dst_state_alloc(n);
737 if (IS_ERR(st)) {
738 err = PTR_ERR(st);
739 goto err_out_exit;
740 }
741 memcpy(&st->ctl, r, sizeof(struct dst_network_ctl));
742
743 err = dst_state_init_connected(st);
744 if (err)
745 goto err_out_free_data;
746
747 err = dst_request_remote_config(st);
748 if (err)
749 goto err_out_exit_connected;
750 n->state = st;
751
752 err = dst_state_schedule_receiver(st);
753 if (err)
754 goto err_out_exit_connected;
755
756 return 0;
757
758err_out_exit_connected:
759 dst_state_exit_connected(st);
760err_out_free_data:
761 dst_state_free(st);
762err_out_exit:
763 n->state = NULL;
764 return err;
765}
766
767void dst_state_put(struct dst_state *st)
768{
769 dprintk("%s: st: %p, refcnt: %d.\n",
770 __func__, st, atomic_read(&st->refcnt));
771 if (atomic_dec_and_test(&st->refcnt))
772 dst_state_free(st);
773}
774
775/*
776 * Send block IO to the network one by one using zero-copy ->sendpage().
777 */
778int dst_send_bio(struct dst_state *st, struct dst_cmd *cmd, struct bio *bio)
779{
780 struct bio_vec *bv;
781 struct dst_crypto_ctl *c = &st->node->crypto;
782 int err, i = 0;
783 int flags = MSG_WAITALL;
784
785 err = dst_data_send_header(st->socket, cmd,
786 sizeof(struct dst_cmd) + c->crypto_attached_size, bio->bi_vcnt);
787 if (err)
788 goto err_out_exit;
789
790 bio_for_each_segment(bv, bio, i) {
791 if (i < bio->bi_vcnt - 1)
792 flags |= MSG_MORE;
793
794 err = kernel_sendpage(st->socket, bv->bv_page, bv->bv_offset,
795 bv->bv_len, flags);
796 if (err <= 0)
797 goto err_out_exit;
798 }
799
800 return 0;
801
802err_out_exit:
803 dprintk("%s: %d/%d, flags: %x, err: %d.\n",
804 __func__, i, bio->bi_vcnt, flags, err);
805 return err;
806}
807
808/*
809 * Send transaction to the remote peer.
810 */
811int dst_trans_send(struct dst_trans *t)
812{
813 int err;
814 struct dst_state *st = t->n->state;
815 struct bio *bio = t->bio;
816
817 dst_convert_cmd(&t->cmd);
818
819 dst_state_lock(st);
820 if (!st->socket) {
821 err = dst_state_init_connected(st);
822 if (err)
823 goto err_out_unlock;
824 }
825
826 if (bio_data_dir(bio) == WRITE) {
827 err = dst_send_bio(st, &t->cmd, t->bio);
828 } else {
829 err = dst_data_send_header(st->socket, &t->cmd,
830 sizeof(struct dst_cmd), 0);
831 }
832 if (err)
833 goto err_out_reset;
834
835 dst_state_unlock(st);
836 return 0;
837
838err_out_reset:
839 dst_state_reset_nolock(st);
840err_out_unlock:
841 dst_state_unlock(st);
842
843 return err;
844}
diff --git a/drivers/staging/dst/thread_pool.c b/drivers/staging/dst/thread_pool.c
deleted file mode 100644
index 29a82b2602f3..000000000000
--- a/drivers/staging/dst/thread_pool.c
+++ /dev/null
@@ -1,348 +0,0 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/kernel.h>
17#include <linux/dst.h>
18#include <linux/kthread.h>
19#include <linux/slab.h>
20
21/*
22 * Thread pool abstraction allows to schedule a work to be performed
23 * on behalf of kernel thread. One does not operate with threads itself,
24 * instead user provides setup and cleanup callbacks for thread pool itself,
25 * and action and cleanup callbacks for each submitted work.
26 *
27 * Each worker has private data initialized at creation time and data,
28 * provided by user at scheduling time.
29 *
30 * When action is being performed, thread can not be used by other users,
31 * instead they will sleep until there is free thread to pick their work.
32 */
33struct thread_pool_worker {
34 struct list_head worker_entry;
35
36 struct task_struct *thread;
37
38 struct thread_pool *pool;
39
40 int error;
41 int has_data;
42 int need_exit;
43 unsigned int id;
44
45 wait_queue_head_t wait;
46
47 void *private;
48 void *schedule_data;
49
50 int (*action)(void *private, void *schedule_data);
51 void (*cleanup)(void *private);
52};
53
54static void thread_pool_exit_worker(struct thread_pool_worker *w)
55{
56 kthread_stop(w->thread);
57
58 w->cleanup(w->private);
59 kfree(w);
60}
61
62/*
63 * Called to mark thread as ready and allow users to schedule new work.
64 */
65static void thread_pool_worker_make_ready(struct thread_pool_worker *w)
66{
67 struct thread_pool *p = w->pool;
68
69 mutex_lock(&p->thread_lock);
70
71 if (!w->need_exit) {
72 list_move_tail(&w->worker_entry, &p->ready_list);
73 w->has_data = 0;
74 mutex_unlock(&p->thread_lock);
75
76 wake_up(&p->wait);
77 } else {
78 p->thread_num--;
79 list_del(&w->worker_entry);
80 mutex_unlock(&p->thread_lock);
81
82 thread_pool_exit_worker(w);
83 }
84}
85
86/*
87 * Thread action loop: waits until there is new work.
88 */
89static int thread_pool_worker_func(void *data)
90{
91 struct thread_pool_worker *w = data;
92
93 while (!kthread_should_stop()) {
94 wait_event_interruptible(w->wait,
95 kthread_should_stop() || w->has_data);
96
97 if (kthread_should_stop())
98 break;
99
100 if (!w->has_data)
101 continue;
102
103 w->action(w->private, w->schedule_data);
104 thread_pool_worker_make_ready(w);
105 }
106
107 return 0;
108}
109
110/*
111 * Remove single worker without specifying which one.
112 */
113void thread_pool_del_worker(struct thread_pool *p)
114{
115 struct thread_pool_worker *w = NULL;
116
117 while (!w && p->thread_num) {
118 wait_event(p->wait, !list_empty(&p->ready_list) ||
119 !p->thread_num);
120
121 dprintk("%s: locking list_empty: %d, thread_num: %d.\n",
122 __func__, list_empty(&p->ready_list),
123 p->thread_num);
124
125 mutex_lock(&p->thread_lock);
126 if (!list_empty(&p->ready_list)) {
127 w = list_first_entry(&p->ready_list,
128 struct thread_pool_worker,
129 worker_entry);
130
131 dprintk("%s: deleting w: %p, thread_num: %d, "
132 "list: %p [%p.%p].\n", __func__,
133 w, p->thread_num, &p->ready_list,
134 p->ready_list.prev, p->ready_list.next);
135
136 p->thread_num--;
137 list_del(&w->worker_entry);
138 }
139 mutex_unlock(&p->thread_lock);
140 }
141
142 if (w)
143 thread_pool_exit_worker(w);
144 dprintk("%s: deleted w: %p, thread_num: %d.\n",
145 __func__, w, p->thread_num);
146}
147
148/*
149 * Remove a worker with given ID.
150 */
151void thread_pool_del_worker_id(struct thread_pool *p, unsigned int id)
152{
153 struct thread_pool_worker *w;
154 int found = 0;
155
156 mutex_lock(&p->thread_lock);
157 list_for_each_entry(w, &p->ready_list, worker_entry) {
158 if (w->id == id) {
159 found = 1;
160 p->thread_num--;
161 list_del(&w->worker_entry);
162 break;
163 }
164 }
165
166 if (!found) {
167 list_for_each_entry(w, &p->active_list, worker_entry) {
168 if (w->id == id) {
169 w->need_exit = 1;
170 break;
171 }
172 }
173 }
174 mutex_unlock(&p->thread_lock);
175
176 if (found)
177 thread_pool_exit_worker(w);
178}
179
180/*
181 * Add new worker thread with given parameters.
182 * If initialization callback fails, return error.
183 */
184int thread_pool_add_worker(struct thread_pool *p,
185 char *name,
186 unsigned int id,
187 void *(*init)(void *private),
188 void (*cleanup)(void *private),
189 void *private)
190{
191 struct thread_pool_worker *w;
192 int err = -ENOMEM;
193
194 w = kzalloc(sizeof(struct thread_pool_worker), GFP_KERNEL);
195 if (!w)
196 goto err_out_exit;
197
198 w->pool = p;
199 init_waitqueue_head(&w->wait);
200 w->cleanup = cleanup;
201 w->id = id;
202
203 w->thread = kthread_run(thread_pool_worker_func, w, "%s", name);
204 if (IS_ERR(w->thread)) {
205 err = PTR_ERR(w->thread);
206 goto err_out_free;
207 }
208
209 w->private = init(private);
210 if (IS_ERR(w->private)) {
211 err = PTR_ERR(w->private);
212 goto err_out_stop_thread;
213 }
214
215 mutex_lock(&p->thread_lock);
216 list_add_tail(&w->worker_entry, &p->ready_list);
217 p->thread_num++;
218 mutex_unlock(&p->thread_lock);
219
220 return 0;
221
222err_out_stop_thread:
223 kthread_stop(w->thread);
224err_out_free:
225 kfree(w);
226err_out_exit:
227 return err;
228}
229
230/*
231 * Destroy the whole pool.
232 */
233void thread_pool_destroy(struct thread_pool *p)
234{
235 while (p->thread_num) {
236 dprintk("%s: num: %d.\n", __func__, p->thread_num);
237 thread_pool_del_worker(p);
238 }
239
240 kfree(p);
241}
242
243/*
244 * Create a pool with given number of threads.
245 * They will have sequential IDs started from zero.
246 */
247struct thread_pool *thread_pool_create(int num, char *name,
248 void *(*init)(void *private),
249 void (*cleanup)(void *private),
250 void *private)
251{
252 struct thread_pool_worker *w, *tmp;
253 struct thread_pool *p;
254 int err = -ENOMEM;
255 int i;
256
257 p = kzalloc(sizeof(struct thread_pool), GFP_KERNEL);
258 if (!p)
259 goto err_out_exit;
260
261 init_waitqueue_head(&p->wait);
262 mutex_init(&p->thread_lock);
263 INIT_LIST_HEAD(&p->ready_list);
264 INIT_LIST_HEAD(&p->active_list);
265 p->thread_num = 0;
266
267 for (i = 0; i < num; ++i) {
268 err = thread_pool_add_worker(p, name, i, init,
269 cleanup, private);
270 if (err)
271 goto err_out_free_all;
272 }
273
274 return p;
275
276err_out_free_all:
277 list_for_each_entry_safe(w, tmp, &p->ready_list, worker_entry) {
278 list_del(&w->worker_entry);
279 thread_pool_exit_worker(w);
280 }
281 kfree(p);
282err_out_exit:
283 return ERR_PTR(err);
284}
285
286/*
287 * Schedule execution of the action on a given thread,
288 * provided ID pointer has to match previously stored
289 * private data.
290 */
291int thread_pool_schedule_private(struct thread_pool *p,
292 int (*setup)(void *private, void *data),
293 int (*action)(void *private, void *data),
294 void *data, long timeout, void *id)
295{
296 struct thread_pool_worker *w, *tmp, *worker = NULL;
297 int err = 0;
298
299 while (!worker && !err) {
300 timeout = wait_event_interruptible_timeout(p->wait,
301 !list_empty(&p->ready_list),
302 timeout);
303
304 if (!timeout) {
305 err = -ETIMEDOUT;
306 break;
307 }
308
309 worker = NULL;
310 mutex_lock(&p->thread_lock);
311 list_for_each_entry_safe(w, tmp, &p->ready_list, worker_entry) {
312 if (id && id != w->private)
313 continue;
314
315 worker = w;
316
317 list_move_tail(&w->worker_entry, &p->active_list);
318
319 err = setup(w->private, data);
320 if (!err) {
321 w->schedule_data = data;
322 w->action = action;
323 w->has_data = 1;
324 wake_up(&w->wait);
325 } else {
326 list_move_tail(&w->worker_entry,
327 &p->ready_list);
328 }
329
330 break;
331 }
332 mutex_unlock(&p->thread_lock);
333 }
334
335 return err;
336}
337
338/*
339 * Schedule execution on arbitrary thread from the pool.
340 */
341int thread_pool_schedule(struct thread_pool *p,
342 int (*setup)(void *private, void *data),
343 int (*action)(void *private, void *data),
344 void *data, long timeout)
345{
346 return thread_pool_schedule_private(p, setup,
347 action, data, timeout, NULL);
348}
diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
deleted file mode 100644
index 1c36a6bc31d5..000000000000
--- a/drivers/staging/dst/trans.c
+++ /dev/null
@@ -1,337 +0,0 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/bio.h>
17#include <linux/dst.h>
18#include <linux/slab.h>
19#include <linux/mempool.h>
20
21/*
22 * Transaction memory pool size.
23 */
24static int dst_mempool_num = 32;
25module_param(dst_mempool_num, int, 0644);
26
27/*
28 * Transaction tree management.
29 */
30static inline int dst_trans_cmp(dst_gen_t gen, dst_gen_t new)
31{
32 if (gen < new)
33 return 1;
34 if (gen > new)
35 return -1;
36 return 0;
37}
38
39struct dst_trans *dst_trans_search(struct dst_node *node, dst_gen_t gen)
40{
41 struct rb_root *root = &node->trans_root;
42 struct rb_node *n = root->rb_node;
43 struct dst_trans *t, *ret = NULL;
44 int cmp;
45
46 while (n) {
47 t = rb_entry(n, struct dst_trans, trans_entry);
48
49 cmp = dst_trans_cmp(t->gen, gen);
50 if (cmp < 0)
51 n = n->rb_left;
52 else if (cmp > 0)
53 n = n->rb_right;
54 else {
55 ret = t;
56 break;
57 }
58 }
59
60 dprintk("%s: %s transaction: id: %llu.\n", __func__,
61 (ret) ? "found" : "not found", gen);
62
63 return ret;
64}
65
66static int dst_trans_insert(struct dst_trans *new)
67{
68 struct rb_root *root = &new->n->trans_root;
69 struct rb_node **n = &root->rb_node, *parent = NULL;
70 struct dst_trans *ret = NULL, *t;
71 int cmp;
72
73 while (*n) {
74 parent = *n;
75
76 t = rb_entry(parent, struct dst_trans, trans_entry);
77
78 cmp = dst_trans_cmp(t->gen, new->gen);
79 if (cmp < 0)
80 n = &parent->rb_left;
81 else if (cmp > 0)
82 n = &parent->rb_right;
83 else {
84 ret = t;
85 break;
86 }
87 }
88
89 new->send_time = jiffies;
90 if (ret) {
91 printk(KERN_DEBUG "%s: exist: old: gen: %llu, bio: %llu/%u, "
92 "send_time: %lu, new: gen: %llu, bio: %llu/%u, "
93 "send_time: %lu.\n", __func__,
94 ret->gen, (u64)ret->bio->bi_sector,
95 ret->bio->bi_size, ret->send_time,
96 new->gen, (u64)new->bio->bi_sector,
97 new->bio->bi_size, new->send_time);
98 return -EEXIST;
99 }
100
101 rb_link_node(&new->trans_entry, parent, n);
102 rb_insert_color(&new->trans_entry, root);
103
104 dprintk("%s: inserted: gen: %llu, bio: %llu/%u, send_time: %lu.\n",
105 __func__, new->gen, (u64)new->bio->bi_sector,
106 new->bio->bi_size, new->send_time);
107
108 return 0;
109}
110
111int dst_trans_remove_nolock(struct dst_trans *t)
112{
113 struct dst_node *n = t->n;
114
115 if (t->trans_entry.rb_parent_color) {
116 rb_erase(&t->trans_entry, &n->trans_root);
117 t->trans_entry.rb_parent_color = 0;
118 }
119 return 0;
120}
121
122int dst_trans_remove(struct dst_trans *t)
123{
124 int ret;
125 struct dst_node *n = t->n;
126
127 mutex_lock(&n->trans_lock);
128 ret = dst_trans_remove_nolock(t);
129 mutex_unlock(&n->trans_lock);
130
131 return ret;
132}
133
134/*
135 * When transaction is completed and there are no more users,
136 * we complete appriate block IO request with given error status.
137 */
138void dst_trans_put(struct dst_trans *t)
139{
140 if (atomic_dec_and_test(&t->refcnt)) {
141 struct bio *bio = t->bio;
142
143 dprintk("%s: completed t: %p, gen: %llu, bio: %p.\n",
144 __func__, t, t->gen, bio);
145
146 bio_endio(bio, t->error);
147 bio_put(bio);
148
149 dst_node_put(t->n);
150 mempool_free(t, t->n->trans_pool);
151 }
152}
153
154/*
155 * Process given block IO request: allocate transaction, insert it into the tree
156 * and send/schedule crypto processing.
157 */
158int dst_process_bio(struct dst_node *n, struct bio *bio)
159{
160 struct dst_trans *t;
161 int err = -ENOMEM;
162
163 t = mempool_alloc(n->trans_pool, GFP_NOFS);
164 if (!t)
165 goto err_out_exit;
166
167 t->n = dst_node_get(n);
168 t->bio = bio;
169 t->error = 0;
170 t->retries = 0;
171 atomic_set(&t->refcnt, 1);
172 t->gen = atomic_long_inc_return(&n->gen);
173
174 t->enc = bio_data_dir(bio);
175 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
176
177 mutex_lock(&n->trans_lock);
178 err = dst_trans_insert(t);
179 mutex_unlock(&n->trans_lock);
180 if (err)
181 goto err_out_free;
182
183 dprintk("%s: gen: %llu, bio: %llu/%u, dir/enc: %d, need_crypto: %d.\n",
184 __func__, t->gen, (u64)bio->bi_sector,
185 bio->bi_size, t->enc, dst_need_crypto(n));
186
187 if (dst_need_crypto(n) && t->enc)
188 dst_trans_crypto(t);
189 else
190 dst_trans_send(t);
191
192 return 0;
193
194err_out_free:
195 dst_node_put(n);
196 mempool_free(t, n->trans_pool);
197err_out_exit:
198 bio_endio(bio, err);
199 bio_put(bio);
200 return err;
201}
202
203/*
204 * Scan for timeout/stale transactions.
205 * Each transaction is being resent multiple times before error completion.
206 */
207static void dst_trans_scan(struct work_struct *work)
208{
209 struct dst_node *n = container_of(work, struct dst_node,
210 trans_work.work);
211 struct rb_node *rb_node;
212 struct dst_trans *t;
213 unsigned long timeout = n->trans_scan_timeout;
214 int num = 10 * n->trans_max_retries;
215
216 mutex_lock(&n->trans_lock);
217
218 for (rb_node = rb_first(&n->trans_root); rb_node; ) {
219 t = rb_entry(rb_node, struct dst_trans, trans_entry);
220
221 if (timeout && time_after(t->send_time + timeout, jiffies)
222 && t->retries == 0)
223 break;
224#if 0
225 dprintk("%s: t: %p, gen: %llu, n: %s, retries: %u, max: %u.\n",
226 __func__, t, t->gen, n->name,
227 t->retries, n->trans_max_retries);
228#endif
229 if (--num == 0)
230 break;
231
232 dst_trans_get(t);
233
234 rb_node = rb_next(rb_node);
235
236 if (timeout && (++t->retries < n->trans_max_retries)) {
237 dst_trans_send(t);
238 } else {
239 t->error = -ETIMEDOUT;
240 dst_trans_remove_nolock(t);
241 dst_trans_put(t);
242 }
243
244 dst_trans_put(t);
245 }
246
247 mutex_unlock(&n->trans_lock);
248
249 /*
250 * If no timeout specified then system is in the middle of exiting
251 * process, so no need to reschedule scanning process again.
252 */
253 if (timeout) {
254 if (!num)
255 timeout = HZ;
256 schedule_delayed_work(&n->trans_work, timeout);
257 }
258}
259
260/*
261 * Flush all transactions and mark them as timed out.
262 * Destroy transaction pools.
263 */
264void dst_node_trans_exit(struct dst_node *n)
265{
266 struct dst_trans *t;
267 struct rb_node *rb_node;
268
269 if (!n->trans_cache)
270 return;
271
272 dprintk("%s: n: %p, cancelling the work.\n", __func__, n);
273 cancel_delayed_work_sync(&n->trans_work);
274 flush_scheduled_work();
275 dprintk("%s: n: %p, work has been cancelled.\n", __func__, n);
276
277 for (rb_node = rb_first(&n->trans_root); rb_node; ) {
278 t = rb_entry(rb_node, struct dst_trans, trans_entry);
279
280 dprintk("%s: t: %p, gen: %llu, n: %s.\n",
281 __func__, t, t->gen, n->name);
282
283 rb_node = rb_next(rb_node);
284
285 t->error = -ETIMEDOUT;
286 dst_trans_remove_nolock(t);
287 dst_trans_put(t);
288 }
289
290 mempool_destroy(n->trans_pool);
291 kmem_cache_destroy(n->trans_cache);
292}
293
294/*
295 * Initialize transaction storage for given node.
296 * Transaction stores not only control information,
297 * but also network command and crypto data (if needed)
298 * to reduce number of allocations. Thus transaction size
299 * differs from node to node.
300 */
301int dst_node_trans_init(struct dst_node *n, unsigned int size)
302{
303 /*
304 * We need this, since node with given name can be dropped from the
305 * hash table, but be still alive, so subsequent creation of the node
306 * with the same name may collide with existing cache name.
307 */
308
309 snprintf(n->cache_name, sizeof(n->cache_name), "%s-%p", n->name, n);
310
311 n->trans_cache = kmem_cache_create(n->cache_name,
312 size + n->crypto.crypto_attached_size,
313 0, 0, NULL);
314 if (!n->trans_cache)
315 goto err_out_exit;
316
317 n->trans_pool = mempool_create_slab_pool(dst_mempool_num,
318 n->trans_cache);
319 if (!n->trans_pool)
320 goto err_out_cache_destroy;
321
322 mutex_init(&n->trans_lock);
323 n->trans_root = RB_ROOT;
324
325 INIT_DELAYED_WORK(&n->trans_work, dst_trans_scan);
326 schedule_delayed_work(&n->trans_work, n->trans_scan_timeout);
327
328 dprintk("%s: n: %p, size: %u, crypto: %u.\n",
329 __func__, n, size, n->crypto.crypto_attached_size);
330
331 return 0;
332
333err_out_cache_destroy:
334 kmem_cache_destroy(n->trans_cache);
335err_out_exit:
336 return -ENOMEM;
337}
diff --git a/drivers/staging/iio/ring_sw.h b/drivers/staging/iio/ring_sw.h
index f0b86f02cd80..fd677f008365 100644
--- a/drivers/staging/iio/ring_sw.h
+++ b/drivers/staging/iio/ring_sw.h
@@ -29,7 +29,6 @@
29 * driver requests - some may support multiple options */ 29 * driver requests - some may support multiple options */
30 30
31 31
32#include <linux/autoconf.h>
33#include "iio.h" 32#include "iio.h"
34#include "ring_generic.h" 33#include "ring_generic.h"
35 34
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
index 536e2382de54..638ad6b35891 100644
--- a/drivers/staging/octeon/Kconfig
+++ b/drivers/staging/octeon/Kconfig
@@ -1,7 +1,8 @@
1config OCTEON_ETHERNET 1config OCTEON_ETHERNET
2 tristate "Cavium Networks Octeon Ethernet support" 2 tristate "Cavium Networks Octeon Ethernet support"
3 depends on CPU_CAVIUM_OCTEON 3 depends on CPU_CAVIUM_OCTEON
4 select MII 4 select PHYLIB
5 select MDIO_OCTEON
5 help 6 help
6 This driver supports the builtin ethernet ports on Cavium 7 This driver supports the builtin ethernet ports on Cavium
7 Networks' products in the Octeon family. This driver supports the 8 Networks' products in the Octeon family. This driver supports the
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 31a58e508924..05a5cc0f43ed 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -26,7 +26,8 @@
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/ethtool.h> 28#include <linux/ethtool.h>
29#include <linux/mii.h> 29#include <linux/phy.h>
30
30#include <net/dst.h> 31#include <net/dst.h>
31 32
32#include <asm/octeon/octeon.h> 33#include <asm/octeon/octeon.h>
@@ -34,86 +35,12 @@
34#include "ethernet-defines.h" 35#include "ethernet-defines.h"
35#include "octeon-ethernet.h" 36#include "octeon-ethernet.h"
36#include "ethernet-mdio.h" 37#include "ethernet-mdio.h"
38#include "ethernet-util.h"
37 39
38#include "cvmx-helper-board.h" 40#include "cvmx-helper-board.h"
39 41
40#include "cvmx-smix-defs.h" 42#include "cvmx-smix-defs.h"
41 43
42DECLARE_MUTEX(mdio_sem);
43
44/**
45 * Perform an MII read. Called by the generic MII routines
46 *
47 * @dev: Device to perform read for
48 * @phy_id: The MII phy id
49 * @location: Register location to read
50 * Returns Result from the read or zero on failure
51 */
52static int cvm_oct_mdio_read(struct net_device *dev, int phy_id, int location)
53{
54 union cvmx_smix_cmd smi_cmd;
55 union cvmx_smix_rd_dat smi_rd;
56
57 smi_cmd.u64 = 0;
58 smi_cmd.s.phy_op = 1;
59 smi_cmd.s.phy_adr = phy_id;
60 smi_cmd.s.reg_adr = location;
61 cvmx_write_csr(CVMX_SMIX_CMD(0), smi_cmd.u64);
62
63 do {
64 if (!in_interrupt())
65 yield();
66 smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(0));
67 } while (smi_rd.s.pending);
68
69 if (smi_rd.s.val)
70 return smi_rd.s.dat;
71 else
72 return 0;
73}
74
75static int cvm_oct_mdio_dummy_read(struct net_device *dev, int phy_id,
76 int location)
77{
78 return 0xffff;
79}
80
81/**
82 * Perform an MII write. Called by the generic MII routines
83 *
84 * @dev: Device to perform write for
85 * @phy_id: The MII phy id
86 * @location: Register location to write
87 * @val: Value to write
88 */
89static void cvm_oct_mdio_write(struct net_device *dev, int phy_id, int location,
90 int val)
91{
92 union cvmx_smix_cmd smi_cmd;
93 union cvmx_smix_wr_dat smi_wr;
94
95 smi_wr.u64 = 0;
96 smi_wr.s.dat = val;
97 cvmx_write_csr(CVMX_SMIX_WR_DAT(0), smi_wr.u64);
98
99 smi_cmd.u64 = 0;
100 smi_cmd.s.phy_op = 0;
101 smi_cmd.s.phy_adr = phy_id;
102 smi_cmd.s.reg_adr = location;
103 cvmx_write_csr(CVMX_SMIX_CMD(0), smi_cmd.u64);
104
105 do {
106 if (!in_interrupt())
107 yield();
108 smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(0));
109 } while (smi_wr.s.pending);
110}
111
112static void cvm_oct_mdio_dummy_write(struct net_device *dev, int phy_id,
113 int location, int val)
114{
115}
116
117static void cvm_oct_get_drvinfo(struct net_device *dev, 44static void cvm_oct_get_drvinfo(struct net_device *dev,
118 struct ethtool_drvinfo *info) 45 struct ethtool_drvinfo *info)
119{ 46{
@@ -125,49 +52,37 @@ static void cvm_oct_get_drvinfo(struct net_device *dev,
125static int cvm_oct_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 52static int cvm_oct_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
126{ 53{
127 struct octeon_ethernet *priv = netdev_priv(dev); 54 struct octeon_ethernet *priv = netdev_priv(dev);
128 int ret;
129 55
130 down(&mdio_sem); 56 if (priv->phydev)
131 ret = mii_ethtool_gset(&priv->mii_info, cmd); 57 return phy_ethtool_gset(priv->phydev, cmd);
132 up(&mdio_sem);
133 58
134 return ret; 59 return -EINVAL;
135} 60}
136 61
137static int cvm_oct_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 62static int cvm_oct_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
138{ 63{
139 struct octeon_ethernet *priv = netdev_priv(dev); 64 struct octeon_ethernet *priv = netdev_priv(dev);
140 int ret;
141 65
142 down(&mdio_sem); 66 if (!capable(CAP_NET_ADMIN))
143 ret = mii_ethtool_sset(&priv->mii_info, cmd); 67 return -EPERM;
144 up(&mdio_sem); 68
69 if (priv->phydev)
70 return phy_ethtool_sset(priv->phydev, cmd);
145 71
146 return ret; 72 return -EINVAL;
147} 73}
148 74
149static int cvm_oct_nway_reset(struct net_device *dev) 75static int cvm_oct_nway_reset(struct net_device *dev)
150{ 76{
151 struct octeon_ethernet *priv = netdev_priv(dev); 77 struct octeon_ethernet *priv = netdev_priv(dev);
152 int ret;
153 78
154 down(&mdio_sem); 79 if (!capable(CAP_NET_ADMIN))
155 ret = mii_nway_restart(&priv->mii_info); 80 return -EPERM;
156 up(&mdio_sem);
157 81
158 return ret; 82 if (priv->phydev)
159} 83 return phy_start_aneg(priv->phydev);
160 84
161static u32 cvm_oct_get_link(struct net_device *dev) 85 return -EINVAL;
162{
163 struct octeon_ethernet *priv = netdev_priv(dev);
164 u32 ret;
165
166 down(&mdio_sem);
167 ret = mii_link_ok(&priv->mii_info);
168 up(&mdio_sem);
169
170 return ret;
171} 86}
172 87
173const struct ethtool_ops cvm_oct_ethtool_ops = { 88const struct ethtool_ops cvm_oct_ethtool_ops = {
@@ -175,7 +90,7 @@ const struct ethtool_ops cvm_oct_ethtool_ops = {
175 .get_settings = cvm_oct_get_settings, 90 .get_settings = cvm_oct_get_settings,
176 .set_settings = cvm_oct_set_settings, 91 .set_settings = cvm_oct_set_settings,
177 .nway_reset = cvm_oct_nway_reset, 92 .nway_reset = cvm_oct_nway_reset,
178 .get_link = cvm_oct_get_link, 93 .get_link = ethtool_op_get_link,
179 .get_sg = ethtool_op_get_sg, 94 .get_sg = ethtool_op_get_sg,
180 .get_tx_csum = ethtool_op_get_tx_csum, 95 .get_tx_csum = ethtool_op_get_tx_csum,
181}; 96};
@@ -191,41 +106,78 @@ const struct ethtool_ops cvm_oct_ethtool_ops = {
191int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 106int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
192{ 107{
193 struct octeon_ethernet *priv = netdev_priv(dev); 108 struct octeon_ethernet *priv = netdev_priv(dev);
194 struct mii_ioctl_data *data = if_mii(rq);
195 unsigned int duplex_chg;
196 int ret;
197 109
198 down(&mdio_sem); 110 if (!netif_running(dev))
199 ret = generic_mii_ioctl(&priv->mii_info, data, cmd, &duplex_chg); 111 return -EINVAL;
200 up(&mdio_sem); 112
113 if (!priv->phydev)
114 return -EINVAL;
115
116 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
117}
201 118
202 return ret; 119static void cvm_oct_adjust_link(struct net_device *dev)
120{
121 struct octeon_ethernet *priv = netdev_priv(dev);
122 cvmx_helper_link_info_t link_info;
123
124 if (priv->last_link != priv->phydev->link) {
125 priv->last_link = priv->phydev->link;
126 link_info.u64 = 0;
127 link_info.s.link_up = priv->last_link ? 1 : 0;
128 link_info.s.full_duplex = priv->phydev->duplex ? 1 : 0;
129 link_info.s.speed = priv->phydev->speed;
130 cvmx_helper_link_set( priv->port, link_info);
131 if (priv->last_link) {
132 netif_carrier_on(dev);
133 if (priv->queue != -1)
134 DEBUGPRINT("%s: %u Mbps %s duplex, "
135 "port %2d, queue %2d\n",
136 dev->name, priv->phydev->speed,
137 priv->phydev->duplex ?
138 "Full" : "Half",
139 priv->port, priv->queue);
140 else
141 DEBUGPRINT("%s: %u Mbps %s duplex, "
142 "port %2d, POW\n",
143 dev->name, priv->phydev->speed,
144 priv->phydev->duplex ?
145 "Full" : "Half",
146 priv->port);
147 } else {
148 netif_carrier_off(dev);
149 DEBUGPRINT("%s: Link down\n", dev->name);
150 }
151 }
203} 152}
204 153
154
205/** 155/**
206 * Setup the MDIO device structures 156 * Setup the PHY
207 * 157 *
208 * @dev: Device to setup 158 * @dev: Device to setup
209 * 159 *
210 * Returns Zero on success, negative on failure 160 * Returns Zero on success, negative on failure
211 */ 161 */
212int cvm_oct_mdio_setup_device(struct net_device *dev) 162int cvm_oct_phy_setup_device(struct net_device *dev)
213{ 163{
214 struct octeon_ethernet *priv = netdev_priv(dev); 164 struct octeon_ethernet *priv = netdev_priv(dev);
215 int phy_id = cvmx_helper_board_get_mii_address(priv->port); 165
216 if (phy_id != -1) { 166 int phy_addr = cvmx_helper_board_get_mii_address(priv->port);
217 priv->mii_info.dev = dev; 167 if (phy_addr != -1) {
218 priv->mii_info.phy_id = phy_id; 168 char phy_id[20];
219 priv->mii_info.phy_id_mask = 0xff; 169
220 priv->mii_info.supports_gmii = 1; 170 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", phy_addr);
221 priv->mii_info.reg_num_mask = 0x1f; 171
222 priv->mii_info.mdio_read = cvm_oct_mdio_read; 172 priv->phydev = phy_connect(dev, phy_id, cvm_oct_adjust_link, 0,
223 priv->mii_info.mdio_write = cvm_oct_mdio_write; 173 PHY_INTERFACE_MODE_GMII);
224 } else { 174
225 /* Supply dummy MDIO routines so the kernel won't crash 175 if (IS_ERR(priv->phydev)) {
226 if the user tries to read them */ 176 priv->phydev = NULL;
227 priv->mii_info.mdio_read = cvm_oct_mdio_dummy_read; 177 return -1;
228 priv->mii_info.mdio_write = cvm_oct_mdio_dummy_write; 178 }
179 priv->last_link = 0;
180 phy_start_aneg(priv->phydev);
229 } 181 }
230 return 0; 182 return 0;
231} 183}
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
index b3328aeec2df..55d0614a7cd9 100644
--- a/drivers/staging/octeon/ethernet-mdio.h
+++ b/drivers/staging/octeon/ethernet-mdio.h
@@ -43,4 +43,4 @@
43 43
44extern const struct ethtool_ops cvm_oct_ethtool_ops; 44extern const struct ethtool_ops cvm_oct_ethtool_ops;
45int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 45int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
46int cvm_oct_mdio_setup_device(struct net_device *dev); 46int cvm_oct_phy_setup_device(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-proc.c b/drivers/staging/octeon/ethernet-proc.c
index 8fa88fc419b7..16308d484d3b 100644
--- a/drivers/staging/octeon/ethernet-proc.c
+++ b/drivers/staging/octeon/ethernet-proc.c
@@ -25,7 +25,6 @@
25 * Contact Cavium Networks for more information 25 * Contact Cavium Networks for more information
26**********************************************************************/ 26**********************************************************************/
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/mii.h>
29#include <linux/seq_file.h> 28#include <linux/seq_file.h>
30#include <linux/proc_fs.h> 29#include <linux/proc_fs.h>
31#include <net/dst.h> 30#include <net/dst.h>
@@ -38,112 +37,6 @@
38#include "cvmx-helper.h" 37#include "cvmx-helper.h"
39#include "cvmx-pip.h" 38#include "cvmx-pip.h"
40 39
41static unsigned long long cvm_oct_stats_read_switch(struct net_device *dev,
42 int phy_id, int offset)
43{
44 struct octeon_ethernet *priv = netdev_priv(dev);
45
46 priv->mii_info.mdio_write(dev, phy_id, 0x1d, 0xcc00 | offset);
47 return ((uint64_t) priv->mii_info.
48 mdio_read(dev, phy_id,
49 0x1e) << 16) | (uint64_t) priv->mii_info.
50 mdio_read(dev, phy_id, 0x1f);
51}
52
53static int cvm_oct_stats_switch_show(struct seq_file *m, void *v)
54{
55 static const int ports[] = { 0, 1, 2, 3, 9, -1 };
56 struct net_device *dev = cvm_oct_device[0];
57 int index = 0;
58
59 while (ports[index] != -1) {
60
61 /* Latch port */
62 struct octeon_ethernet *priv = netdev_priv(dev);
63
64 priv->mii_info.mdio_write(dev, 0x1b, 0x1d,
65 0xdc00 | ports[index]);
66 seq_printf(m, "\nSwitch Port %d\n", ports[index]);
67 seq_printf(m, "InGoodOctets: %12llu\t"
68 "OutOctets: %12llu\t"
69 "64 Octets: %12llu\n",
70 cvm_oct_stats_read_switch(dev, 0x1b,
71 0x00) |
72 (cvm_oct_stats_read_switch(dev, 0x1b, 0x01) << 32),
73 cvm_oct_stats_read_switch(dev, 0x1b,
74 0x0E) |
75 (cvm_oct_stats_read_switch(dev, 0x1b, 0x0F) << 32),
76 cvm_oct_stats_read_switch(dev, 0x1b, 0x08));
77
78 seq_printf(m, "InBadOctets: %12llu\t"
79 "OutUnicast: %12llu\t"
80 "65-127 Octets: %12llu\n",
81 cvm_oct_stats_read_switch(dev, 0x1b, 0x02),
82 cvm_oct_stats_read_switch(dev, 0x1b, 0x10),
83 cvm_oct_stats_read_switch(dev, 0x1b, 0x09));
84
85 seq_printf(m, "InUnicast: %12llu\t"
86 "OutBroadcasts: %12llu\t"
87 "128-255 Octets: %12llu\n",
88 cvm_oct_stats_read_switch(dev, 0x1b, 0x04),
89 cvm_oct_stats_read_switch(dev, 0x1b, 0x13),
90 cvm_oct_stats_read_switch(dev, 0x1b, 0x0A));
91
92 seq_printf(m, "InBroadcasts: %12llu\t"
93 "OutMulticasts: %12llu\t"
94 "256-511 Octets: %12llu\n",
95 cvm_oct_stats_read_switch(dev, 0x1b, 0x06),
96 cvm_oct_stats_read_switch(dev, 0x1b, 0x12),
97 cvm_oct_stats_read_switch(dev, 0x1b, 0x0B));
98
99 seq_printf(m, "InMulticasts: %12llu\t"
100 "OutPause: %12llu\t"
101 "512-1023 Octets:%12llu\n",
102 cvm_oct_stats_read_switch(dev, 0x1b, 0x07),
103 cvm_oct_stats_read_switch(dev, 0x1b, 0x15),
104 cvm_oct_stats_read_switch(dev, 0x1b, 0x0C));
105
106 seq_printf(m, "InPause: %12llu\t"
107 "Excessive: %12llu\t"
108 "1024-Max Octets:%12llu\n",
109 cvm_oct_stats_read_switch(dev, 0x1b, 0x16),
110 cvm_oct_stats_read_switch(dev, 0x1b, 0x11),
111 cvm_oct_stats_read_switch(dev, 0x1b, 0x0D));
112
113 seq_printf(m, "InUndersize: %12llu\t"
114 "Collisions: %12llu\n",
115 cvm_oct_stats_read_switch(dev, 0x1b, 0x18),
116 cvm_oct_stats_read_switch(dev, 0x1b, 0x1E));
117
118 seq_printf(m, "InFragments: %12llu\t"
119 "Deferred: %12llu\n",
120 cvm_oct_stats_read_switch(dev, 0x1b, 0x19),
121 cvm_oct_stats_read_switch(dev, 0x1b, 0x05));
122
123 seq_printf(m, "InOversize: %12llu\t"
124 "Single: %12llu\n",
125 cvm_oct_stats_read_switch(dev, 0x1b, 0x1A),
126 cvm_oct_stats_read_switch(dev, 0x1b, 0x14));
127
128 seq_printf(m, "InJabber: %12llu\t"
129 "Multiple: %12llu\n",
130 cvm_oct_stats_read_switch(dev, 0x1b, 0x1B),
131 cvm_oct_stats_read_switch(dev, 0x1b, 0x17));
132
133 seq_printf(m, "In RxErr: %12llu\t"
134 "OutFCSErr: %12llu\n",
135 cvm_oct_stats_read_switch(dev, 0x1b, 0x1C),
136 cvm_oct_stats_read_switch(dev, 0x1b, 0x03));
137
138 seq_printf(m, "InFCSErr: %12llu\t"
139 "Late: %12llu\n",
140 cvm_oct_stats_read_switch(dev, 0x1b, 0x1D),
141 cvm_oct_stats_read_switch(dev, 0x1b, 0x1F));
142 index++;
143 }
144 return 0;
145}
146
147/** 40/**
148 * User is reading /proc/octeon_ethernet_stats 41 * User is reading /proc/octeon_ethernet_stats
149 * 42 *
@@ -215,11 +108,6 @@ static int cvm_oct_stats_show(struct seq_file *m, void *v)
215 } 108 }
216 } 109 }
217 110
218 if (cvm_oct_device[0]) {
219 priv = netdev_priv(cvm_oct_device[0]);
220 if (priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII)
221 cvm_oct_stats_switch_show(m, v);
222 }
223 return 0; 111 return 0;
224} 112}
225 113
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index fbaa465d2fac..3820f1ec11d1 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -147,32 +147,36 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
147 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface), 147 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface),
148 gmxx_rxx_int_reg.u64); 148 gmxx_rxx_int_reg.u64);
149 } 149 }
150 150 if (priv->phydev == NULL) {
151 link_info = cvmx_helper_link_autoconf(priv->port); 151 link_info = cvmx_helper_link_autoconf(priv->port);
152 priv->link_info = link_info.u64; 152 priv->link_info = link_info.u64;
153 }
153 spin_unlock_irqrestore(&global_register_lock, flags); 154 spin_unlock_irqrestore(&global_register_lock, flags);
154 155
155 /* Tell Linux */ 156 if (priv->phydev == NULL) {
156 if (link_info.s.link_up) { 157 /* Tell core. */
157 158 if (link_info.s.link_up) {
158 if (!netif_carrier_ok(dev)) 159 if (!netif_carrier_ok(dev))
159 netif_carrier_on(dev); 160 netif_carrier_on(dev);
160 if (priv->queue != -1) 161 if (priv->queue != -1)
161 DEBUGPRINT 162 DEBUGPRINT("%s: %u Mbps %s duplex, "
162 ("%s: %u Mbps %s duplex, port %2d, queue %2d\n", 163 "port %2d, queue %2d\n",
163 dev->name, link_info.s.speed, 164 dev->name, link_info.s.speed,
164 (link_info.s.full_duplex) ? "Full" : "Half", 165 (link_info.s.full_duplex) ?
165 priv->port, priv->queue); 166 "Full" : "Half",
166 else 167 priv->port, priv->queue);
167 DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, POW\n", 168 else
168 dev->name, link_info.s.speed, 169 DEBUGPRINT("%s: %u Mbps %s duplex, "
169 (link_info.s.full_duplex) ? "Full" : "Half", 170 "port %2d, POW\n",
170 priv->port); 171 dev->name, link_info.s.speed,
171 } else { 172 (link_info.s.full_duplex) ?
172 173 "Full" : "Half",
173 if (netif_carrier_ok(dev)) 174 priv->port);
174 netif_carrier_off(dev); 175 } else {
175 DEBUGPRINT("%s: Link down\n", dev->name); 176 if (netif_carrier_ok(dev))
177 netif_carrier_off(dev);
178 DEBUGPRINT("%s: Link down\n", dev->name);
179 }
176 } 180 }
177} 181}
178 182
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
index 2b54996bd85d..6061d01eca2d 100644
--- a/drivers/staging/octeon/ethernet-sgmii.c
+++ b/drivers/staging/octeon/ethernet-sgmii.c
@@ -113,7 +113,7 @@ int cvm_oct_sgmii_init(struct net_device *dev)
113 struct octeon_ethernet *priv = netdev_priv(dev); 113 struct octeon_ethernet *priv = netdev_priv(dev);
114 cvm_oct_common_init(dev); 114 cvm_oct_common_init(dev);
115 dev->netdev_ops->ndo_stop(dev); 115 dev->netdev_ops->ndo_stop(dev);
116 if (!octeon_is_simulation()) 116 if (!octeon_is_simulation() && priv->phydev == NULL)
117 priv->poll = cvm_oct_sgmii_poll; 117 priv->poll = cvm_oct_sgmii_poll;
118 118
119 /* FIXME: Need autoneg logic */ 119 /* FIXME: Need autoneg logic */
diff --git a/drivers/staging/octeon/ethernet-xaui.c b/drivers/staging/octeon/ethernet-xaui.c
index 0c2e7cc40f35..ee3dc41b2c53 100644
--- a/drivers/staging/octeon/ethernet-xaui.c
+++ b/drivers/staging/octeon/ethernet-xaui.c
@@ -112,7 +112,7 @@ int cvm_oct_xaui_init(struct net_device *dev)
112 struct octeon_ethernet *priv = netdev_priv(dev); 112 struct octeon_ethernet *priv = netdev_priv(dev);
113 cvm_oct_common_init(dev); 113 cvm_oct_common_init(dev);
114 dev->netdev_ops->ndo_stop(dev); 114 dev->netdev_ops->ndo_stop(dev);
115 if (!octeon_is_simulation()) 115 if (!octeon_is_simulation() && priv->phydev == NULL)
116 priv->poll = cvm_oct_xaui_poll; 116 priv->poll = cvm_oct_xaui_poll;
117 117
118 return 0; 118 return 0;
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 492c5029992d..4cfd4b136b32 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -30,7 +30,7 @@
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/mii.h> 33#include <linux/phy.h>
34 34
35#include <net/dst.h> 35#include <net/dst.h>
36 36
@@ -132,8 +132,6 @@ static struct timer_list cvm_oct_poll_timer;
132 */ 132 */
133struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS]; 133struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
134 134
135extern struct semaphore mdio_sem;
136
137/** 135/**
138 * Periodic timer tick for slow management operations 136 * Periodic timer tick for slow management operations
139 * 137 *
@@ -160,13 +158,8 @@ static void cvm_do_timer(unsigned long arg)
160 goto out; 158 goto out;
161 159
162 priv = netdev_priv(cvm_oct_device[port]); 160 priv = netdev_priv(cvm_oct_device[port]);
163 if (priv->poll) { 161 if (priv->poll)
164 /* skip polling if we don't get the lock */ 162 priv->poll(cvm_oct_device[port]);
165 if (!down_trylock(&mdio_sem)) {
166 priv->poll(cvm_oct_device[port]);
167 up(&mdio_sem);
168 }
169 }
170 163
171 queues_per_port = cvmx_pko_get_num_queues(port); 164 queues_per_port = cvmx_pko_get_num_queues(port);
172 /* Drain any pending packets in the free list */ 165 /* Drain any pending packets in the free list */
@@ -524,7 +517,7 @@ int cvm_oct_common_init(struct net_device *dev)
524 dev->features |= NETIF_F_LLTX; 517 dev->features |= NETIF_F_LLTX;
525 SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops); 518 SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
526 519
527 cvm_oct_mdio_setup_device(dev); 520 cvm_oct_phy_setup_device(dev);
528 dev->netdev_ops->ndo_set_mac_address(dev, &sa); 521 dev->netdev_ops->ndo_set_mac_address(dev, &sa);
529 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu); 522 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
530 523
@@ -540,7 +533,10 @@ int cvm_oct_common_init(struct net_device *dev)
540 533
541void cvm_oct_common_uninit(struct net_device *dev) 534void cvm_oct_common_uninit(struct net_device *dev)
542{ 535{
543 /* Currently nothing to do */ 536 struct octeon_ethernet *priv = netdev_priv(dev);
537
538 if (priv->phydev)
539 phy_disconnect(priv->phydev);
544} 540}
545 541
546static const struct net_device_ops cvm_oct_npi_netdev_ops = { 542static const struct net_device_ops cvm_oct_npi_netdev_ops = {
@@ -627,6 +623,8 @@ static const struct net_device_ops cvm_oct_pow_netdev_ops = {
627#endif 623#endif
628}; 624};
629 625
626extern void octeon_mdiobus_force_mod_depencency(void);
627
630/** 628/**
631 * Module/ driver initialization. Creates the linux network 629 * Module/ driver initialization. Creates the linux network
632 * devices. 630 * devices.
@@ -640,6 +638,7 @@ static int __init cvm_oct_init_module(void)
640 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE; 638 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
641 int qos; 639 int qos;
642 640
641 octeon_mdiobus_force_mod_depencency();
643 pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION); 642 pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
644 643
645 if (OCTEON_IS_MODEL(OCTEON_CN52XX)) 644 if (OCTEON_IS_MODEL(OCTEON_CN52XX))
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 3aef9878fc0a..402a15b9bb0e 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -50,9 +50,9 @@ struct octeon_ethernet {
50 /* List of outstanding tx buffers per queue */ 50 /* List of outstanding tx buffers per queue */
51 struct sk_buff_head tx_free_list[16]; 51 struct sk_buff_head tx_free_list[16];
52 /* Device statistics */ 52 /* Device statistics */
53 struct net_device_stats stats 53 struct net_device_stats stats;
54; /* Generic MII info structure */ 54 struct phy_device *phydev;
55 struct mii_if_info mii_info; 55 unsigned int last_link;
56 /* Last negotiated link state */ 56 /* Last negotiated link state */
57 uint64_t link_info; 57 uint64_t link_info;
58 /* Called periodically to check link status */ 58 /* Called periodically to check link status */
diff --git a/drivers/staging/panel/Kconfig b/drivers/staging/panel/Kconfig
index 3abe7c9d558d..3defa0133f2e 100644
--- a/drivers/staging/panel/Kconfig
+++ b/drivers/staging/panel/Kconfig
@@ -47,7 +47,7 @@ config PANEL_PROFILE
47config PANEL_KEYPAD 47config PANEL_KEYPAD
48 depends on PANEL && PANEL_PROFILE="0" 48 depends on PANEL && PANEL_PROFILE="0"
49 int "Keypad type (0=none, 1=old 6 keys, 2=new 6 keys, 3=Nexcom 4 keys)" 49 int "Keypad type (0=none, 1=old 6 keys, 2=new 6 keys, 3=Nexcom 4 keys)"
50 range 0 4 50 range 0 3
51 default 0 51 default 0
52 ---help--- 52 ---help---
53 This enables and configures a keypad connected to the parallel port. 53 This enables and configures a keypad connected to the parallel port.
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index 4ce399b6d237..95c93e82ccec 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -55,7 +55,7 @@
55#include <linux/list.h> 55#include <linux/list.h>
56#include <linux/notifier.h> 56#include <linux/notifier.h>
57#include <linux/reboot.h> 57#include <linux/reboot.h>
58#include <linux/utsrelease.h> 58#include <generated/utsrelease.h>
59 59
60#include <linux/io.h> 60#include <linux/io.h>
61#include <asm/uaccess.h> 61#include <asm/uaccess.h>
@@ -378,7 +378,7 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
378 378
379#ifdef CONFIG_PANEL_LCD_CHARSET 379#ifdef CONFIG_PANEL_LCD_CHARSET
380#undef DEFAULT_LCD_CHARSET 380#undef DEFAULT_LCD_CHARSET
381#define DEFAULT_LCD_CHARSET 381#define DEFAULT_LCD_CHARSET CONFIG_PANEL_LCD_CHARSET
382#endif 382#endif
383 383
384#endif /* DEFAULT_PROFILE == 0 */ 384#endif /* DEFAULT_PROFILE == 0 */
diff --git a/drivers/staging/pohmelfs/dir.c b/drivers/staging/pohmelfs/dir.c
index 6c5b261e9f06..aacd25bfb0cb 100644
--- a/drivers/staging/pohmelfs/dir.c
+++ b/drivers/staging/pohmelfs/dir.c
@@ -722,8 +722,6 @@ static int pohmelfs_remove_entry(struct inode *dir, struct dentry *dentry)
722 if (inode->i_nlink) 722 if (inode->i_nlink)
723 inode_dec_link_count(inode); 723 inode_dec_link_count(inode);
724 } 724 }
725 dprintk("%s: inode: %p, lock: %ld, unhashed: %d.\n",
726 __func__, pi, inode->i_state & I_LOCK, hlist_unhashed(&inode->i_hash));
727 725
728 return err; 726 return err;
729} 727}
diff --git a/drivers/staging/ramzswap/TODO b/drivers/staging/ramzswap/TODO
index bac40d6cb9f1..8d64e28fac0e 100644
--- a/drivers/staging/ramzswap/TODO
+++ b/drivers/staging/ramzswap/TODO
@@ -1,6 +1,5 @@
1TODO: 1TODO:
2 - Add support for swap notifiers 2 - Add support for swap notifiers
3 - Remove CONFIG_ARM hack
4 3
5Please send patches to Greg Kroah-Hartman <greg@kroah.com> and 4Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
6Nitin Gupta <ngupta@vflare.org> 5Nitin Gupta <ngupta@vflare.org>
diff --git a/drivers/staging/ramzswap/ramzswap_drv.c b/drivers/staging/ramzswap/ramzswap_drv.c
index b839f05efbce..989fac5b01b3 100644
--- a/drivers/staging/ramzswap/ramzswap_drv.c
+++ b/drivers/staging/ramzswap/ramzswap_drv.c
@@ -222,28 +222,6 @@ out:
222 return ret; 222 return ret;
223} 223}
224 224
225static void ramzswap_flush_dcache_page(struct page *page)
226{
227#ifdef CONFIG_ARM
228 int flag = 0;
229 /*
230 * Ugly hack to get flush_dcache_page() work on ARM.
231 * page_mapping(page) == NULL after clearing this swap cache flag.
232 * Without clearing this flag, flush_dcache_page() will simply set
233 * "PG_dcache_dirty" bit and return.
234 */
235 if (PageSwapCache(page)) {
236 flag = 1;
237 ClearPageSwapCache(page);
238 }
239#endif
240 flush_dcache_page(page);
241#ifdef CONFIG_ARM
242 if (flag)
243 SetPageSwapCache(page);
244#endif
245}
246
247void ramzswap_ioctl_get_stats(struct ramzswap *rzs, 225void ramzswap_ioctl_get_stats(struct ramzswap *rzs,
248 struct ramzswap_ioctl_stats *s) 226 struct ramzswap_ioctl_stats *s)
249{ 227{
@@ -655,7 +633,7 @@ static int handle_zero_page(struct bio *bio)
655 memset(user_mem, 0, PAGE_SIZE); 633 memset(user_mem, 0, PAGE_SIZE);
656 kunmap_atomic(user_mem, KM_USER0); 634 kunmap_atomic(user_mem, KM_USER0);
657 635
658 ramzswap_flush_dcache_page(page); 636 flush_dcache_page(page);
659 637
660 set_bit(BIO_UPTODATE, &bio->bi_flags); 638 set_bit(BIO_UPTODATE, &bio->bi_flags);
661 bio_endio(bio, 0); 639 bio_endio(bio, 0);
@@ -679,7 +657,7 @@ static int handle_uncompressed_page(struct ramzswap *rzs, struct bio *bio)
679 kunmap_atomic(user_mem, KM_USER0); 657 kunmap_atomic(user_mem, KM_USER0);
680 kunmap_atomic(cmem, KM_USER1); 658 kunmap_atomic(cmem, KM_USER1);
681 659
682 ramzswap_flush_dcache_page(page); 660 flush_dcache_page(page);
683 661
684 set_bit(BIO_UPTODATE, &bio->bi_flags); 662 set_bit(BIO_UPTODATE, &bio->bi_flags);
685 bio_endio(bio, 0); 663 bio_endio(bio, 0);
@@ -779,7 +757,7 @@ static int ramzswap_read(struct ramzswap *rzs, struct bio *bio)
779 goto out; 757 goto out;
780 } 758 }
781 759
782 ramzswap_flush_dcache_page(page); 760 flush_dcache_page(page);
783 761
784 set_bit(BIO_UPTODATE, &bio->bi_flags); 762 set_bit(BIO_UPTODATE, &bio->bi_flags);
785 bio_endio(bio, 0); 763 bio_endio(bio, 0);
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
index 3222c22152fb..0d490c164db6 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
@@ -1318,13 +1318,13 @@ extern int ieee80211_encrypt_fragment(
1318 struct sk_buff *frag, 1318 struct sk_buff *frag,
1319 int hdr_len); 1319 int hdr_len);
1320 1320
1321extern int ieee80211_xmit(struct sk_buff *skb, 1321extern int ieee80211_rtl_xmit(struct sk_buff *skb,
1322 struct net_device *dev); 1322 struct net_device *dev);
1323extern void ieee80211_txb_free(struct ieee80211_txb *); 1323extern void ieee80211_txb_free(struct ieee80211_txb *);
1324 1324
1325 1325
1326/* ieee80211_rx.c */ 1326/* ieee80211_rx.c */
1327extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, 1327extern int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
1328 struct ieee80211_rx_stats *rx_stats); 1328 struct ieee80211_rx_stats *rx_stats);
1329extern void ieee80211_rx_mgt(struct ieee80211_device *ieee, 1329extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
1330 struct ieee80211_hdr_4addr *header, 1330 struct ieee80211_hdr_4addr *header,
@@ -1376,8 +1376,8 @@ extern void ieee80211_stop_protocol(struct ieee80211_device *ieee);
1376extern void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee); 1376extern void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee);
1377extern void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee); 1377extern void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee);
1378extern void ieee80211_reset_queue(struct ieee80211_device *ieee); 1378extern void ieee80211_reset_queue(struct ieee80211_device *ieee);
1379extern void ieee80211_wake_queue(struct ieee80211_device *ieee); 1379extern void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee);
1380extern void ieee80211_stop_queue(struct ieee80211_device *ieee); 1380extern void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee);
1381extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee); 1381extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
1382extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee); 1382extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
1383extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee); 1383extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
@@ -1385,7 +1385,7 @@ extern int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct
1385extern void notify_wx_assoc_event(struct ieee80211_device *ieee); 1385extern void notify_wx_assoc_event(struct ieee80211_device *ieee);
1386extern void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success); 1386extern void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success);
1387extern void SendDisassociation(struct ieee80211_device *ieee,u8* asSta,u8 asRsn); 1387extern void SendDisassociation(struct ieee80211_device *ieee,u8* asSta,u8 asRsn);
1388extern void ieee80211_start_scan(struct ieee80211_device *ieee); 1388extern void ieee80211_rtl_start_scan(struct ieee80211_device *ieee);
1389 1389
1390//Add for RF power on power off by lizhaoming 080512 1390//Add for RF power on power off by lizhaoming 080512
1391extern void SendDisassociation(struct ieee80211_device *ieee, 1391extern void SendDisassociation(struct ieee80211_device *ieee,
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
index f882dd8cf9b5..9128c181bc7d 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
@@ -469,7 +469,7 @@ drop:
469/* All received frames are sent to this function. @skb contains the frame in 469/* All received frames are sent to this function. @skb contains the frame in
470 * IEEE 802.11 format, i.e., in the format it was sent over air. 470 * IEEE 802.11 format, i.e., in the format it was sent over air.
471 * This function is called only as a tasklet (software IRQ). */ 471 * This function is called only as a tasklet (software IRQ). */
472int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, 472int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
473 struct ieee80211_rx_stats *rx_stats) 473 struct ieee80211_rx_stats *rx_stats)
474{ 474{
475 struct net_device *dev = ieee->dev; 475 struct net_device *dev = ieee->dev;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
index 1fe19c39d702..c7c645af0ebb 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
@@ -689,7 +689,7 @@ void ieee80211_stop_scan(struct ieee80211_device *ieee)
689} 689}
690 690
691/* called with ieee->lock held */ 691/* called with ieee->lock held */
692void ieee80211_start_scan(struct ieee80211_device *ieee) 692void ieee80211_rtl_start_scan(struct ieee80211_device *ieee)
693{ 693{
694 if(IS_DOT11D_ENABLE(ieee) ) 694 if(IS_DOT11D_ENABLE(ieee) )
695 { 695 {
@@ -1196,7 +1196,7 @@ void ieee80211_associate_step1(struct ieee80211_device *ieee)
1196 } 1196 }
1197} 1197}
1198 1198
1199void ieee80211_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen) 1199void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen)
1200{ 1200{
1201 u8 *c; 1201 u8 *c;
1202 struct sk_buff *skb; 1202 struct sk_buff *skb;
@@ -1898,7 +1898,7 @@ associate_complete:
1898 1898
1899 ieee80211_associate_step2(ieee); 1899 ieee80211_associate_step2(ieee);
1900 }else{ 1900 }else{
1901 ieee80211_auth_challenge(ieee, challenge, chlen); 1901 ieee80211_rtl_auth_challenge(ieee, challenge, chlen);
1902 } 1902 }
1903 }else{ 1903 }else{
1904 ieee->softmac_stats.rx_auth_rs_err++; 1904 ieee->softmac_stats.rx_auth_rs_err++;
@@ -2047,7 +2047,7 @@ void ieee80211_reset_queue(struct ieee80211_device *ieee)
2047 2047
2048} 2048}
2049 2049
2050void ieee80211_wake_queue(struct ieee80211_device *ieee) 2050void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee)
2051{ 2051{
2052 2052
2053 unsigned long flags; 2053 unsigned long flags;
@@ -2089,7 +2089,7 @@ exit :
2089} 2089}
2090 2090
2091 2091
2092void ieee80211_stop_queue(struct ieee80211_device *ieee) 2092void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee)
2093{ 2093{
2094 //unsigned long flags; 2094 //unsigned long flags;
2095 //spin_lock_irqsave(&ieee->lock,flags); 2095 //spin_lock_irqsave(&ieee->lock,flags);
@@ -2301,7 +2301,7 @@ void ieee80211_start_bss(struct ieee80211_device *ieee)
2301//#else 2301//#else
2302 if (ieee->state == IEEE80211_NOLINK){ 2302 if (ieee->state == IEEE80211_NOLINK){
2303 ieee->actscanning = true; 2303 ieee->actscanning = true;
2304 ieee80211_start_scan(ieee); 2304 ieee80211_rtl_start_scan(ieee);
2305 } 2305 }
2306//#endif 2306//#endif
2307 spin_unlock_irqrestore(&ieee->lock, flags); 2307 spin_unlock_irqrestore(&ieee->lock, flags);
@@ -2357,7 +2357,7 @@ void ieee80211_associate_retry_wq(struct work_struct *work)
2357 if(ieee->state == IEEE80211_NOLINK){ 2357 if(ieee->state == IEEE80211_NOLINK){
2358 ieee->beinretry = false; 2358 ieee->beinretry = false;
2359 ieee->actscanning = true; 2359 ieee->actscanning = true;
2360 ieee80211_start_scan(ieee); 2360 ieee80211_rtl_start_scan(ieee);
2361 } 2361 }
2362 //YJ,add,080828, notify os here 2362 //YJ,add,080828, notify os here
2363 if(ieee->state == IEEE80211_NOLINK) 2363 if(ieee->state == IEEE80211_NOLINK)
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
index dde1f2e0cf32..69bd02164b0c 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
@@ -304,7 +304,7 @@ ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
304} 304}
305 305
306/* SKBs are added to the ieee->tx_queue. */ 306/* SKBs are added to the ieee->tx_queue. */
307int ieee80211_xmit(struct sk_buff *skb, 307int ieee80211_rtl_xmit(struct sk_buff *skb,
308 struct net_device *dev) 308 struct net_device *dev)
309{ 309{
310 struct ieee80211_device *ieee = netdev_priv(dev); 310 struct ieee80211_device *ieee = netdev_priv(dev);
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index 57c62b0a402f..e0f13efdb15a 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -1811,7 +1811,7 @@ void rtl8180_rx(struct net_device *dev)
1811 if(priv->rx_skb->len > 4) 1811 if(priv->rx_skb->len > 4)
1812 skb_trim(priv->rx_skb,priv->rx_skb->len-4); 1812 skb_trim(priv->rx_skb,priv->rx_skb->len-4);
1813#ifndef RX_DONT_PASS_UL 1813#ifndef RX_DONT_PASS_UL
1814 if(!ieee80211_rx(priv->ieee80211, 1814 if(!ieee80211_rtl_rx(priv->ieee80211,
1815 priv->rx_skb, &stats)){ 1815 priv->rx_skb, &stats)){
1816#endif // RX_DONT_PASS_UL 1816#endif // RX_DONT_PASS_UL
1817 1817
@@ -1917,11 +1917,11 @@ rate)
1917 if (!check_nic_enought_desc(dev, priority)){ 1917 if (!check_nic_enought_desc(dev, priority)){
1918 DMESGW("Error: no descriptor left by previous TX (avail %d) ", 1918 DMESGW("Error: no descriptor left by previous TX (avail %d) ",
1919 get_curr_tx_free_desc(dev, priority)); 1919 get_curr_tx_free_desc(dev, priority));
1920 ieee80211_stop_queue(priv->ieee80211); 1920 ieee80211_rtl_stop_queue(priv->ieee80211);
1921 } 1921 }
1922 rtl8180_tx(dev, skb->data, skb->len, priority, morefrag,0,rate); 1922 rtl8180_tx(dev, skb->data, skb->len, priority, morefrag,0,rate);
1923 if (!check_nic_enought_desc(dev, priority)) 1923 if (!check_nic_enought_desc(dev, priority))
1924 ieee80211_stop_queue(priv->ieee80211); 1924 ieee80211_rtl_stop_queue(priv->ieee80211);
1925 1925
1926 spin_unlock_irqrestore(&priv->tx_lock,flags); 1926 spin_unlock_irqrestore(&priv->tx_lock,flags);
1927} 1927}
@@ -3680,7 +3680,7 @@ static const struct net_device_ops rtl8180_netdev_ops = {
3680 .ndo_set_mac_address = r8180_set_mac_adr, 3680 .ndo_set_mac_address = r8180_set_mac_adr,
3681 .ndo_validate_addr = eth_validate_addr, 3681 .ndo_validate_addr = eth_validate_addr,
3682 .ndo_change_mtu = eth_change_mtu, 3682 .ndo_change_mtu = eth_change_mtu,
3683 .ndo_start_xmit = ieee80211_xmit, 3683 .ndo_start_xmit = ieee80211_rtl_xmit,
3684}; 3684};
3685 3685
3686static int __devinit rtl8180_pci_probe(struct pci_dev *pdev, 3686static int __devinit rtl8180_pci_probe(struct pci_dev *pdev,
@@ -3900,7 +3900,7 @@ void rtl8180_try_wake_queue(struct net_device *dev, int pri)
3900 spin_unlock_irqrestore(&priv->tx_lock,flags); 3900 spin_unlock_irqrestore(&priv->tx_lock,flags);
3901 3901
3902 if(enough_desc) 3902 if(enough_desc)
3903 ieee80211_wake_queue(priv->ieee80211); 3903 ieee80211_rtl_wake_queue(priv->ieee80211);
3904} 3904}
3905 3905
3906void rtl8180_tx_isr(struct net_device *dev, int pri,short error) 3906void rtl8180_tx_isr(struct net_device *dev, int pri,short error)
diff --git a/drivers/staging/rtl8187se/r8180_wx.c b/drivers/staging/rtl8187se/r8180_wx.c
index 536cb6e8e796..124cde356cbc 100644
--- a/drivers/staging/rtl8187se/r8180_wx.c
+++ b/drivers/staging/rtl8187se/r8180_wx.c
@@ -377,7 +377,7 @@ static int r8180_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
377 // queue_work(priv->ieee80211->wq, &priv->ieee80211->wx_sync_scan_wq); 377 // queue_work(priv->ieee80211->wq, &priv->ieee80211->wx_sync_scan_wq);
378 //printk("start scan============================>\n"); 378 //printk("start scan============================>\n");
379 ieee80211_softmac_ips_scan_syncro(priv->ieee80211); 379 ieee80211_softmac_ips_scan_syncro(priv->ieee80211);
380//ieee80211_start_scan(priv->ieee80211); 380//ieee80211_rtl_start_scan(priv->ieee80211);
381 /* intentionally forget to up sem */ 381 /* intentionally forget to up sem */
382// up(&priv->ieee80211->wx_sem); 382// up(&priv->ieee80211->wx_sem);
383 ret = 0; 383 ret = 0;
diff --git a/drivers/staging/rtl8192e/ieee80211.h b/drivers/staging/rtl8192e/ieee80211.h
index 97137ddefff4..3ba9e9e90bda 100644
--- a/drivers/staging/rtl8192e/ieee80211.h
+++ b/drivers/staging/rtl8192e/ieee80211.h
@@ -303,8 +303,8 @@ enum _ReasonCode{
303#define ieee80211_rx_mgt ieee80211_rx_mgt_rsl 303#define ieee80211_rx_mgt ieee80211_rx_mgt_rsl
304 304
305#define ieee80211_get_beacon ieee80211_get_beacon_rsl 305#define ieee80211_get_beacon ieee80211_get_beacon_rsl
306#define ieee80211_wake_queue ieee80211_wake_queue_rsl 306#define ieee80211_rtl_wake_queue ieee80211_rtl_wake_queue_rsl
307#define ieee80211_stop_queue ieee80211_stop_queue_rsl 307#define ieee80211_rtl_stop_queue ieee80211_rtl_stop_queue_rsl
308#define ieee80211_reset_queue ieee80211_reset_queue_rsl 308#define ieee80211_reset_queue ieee80211_reset_queue_rsl
309#define ieee80211_softmac_stop_protocol ieee80211_softmac_stop_protocol_rsl 309#define ieee80211_softmac_stop_protocol ieee80211_softmac_stop_protocol_rsl
310#define ieee80211_softmac_start_protocol ieee80211_softmac_start_protocol_rsl 310#define ieee80211_softmac_start_protocol ieee80211_softmac_start_protocol_rsl
@@ -2435,13 +2435,13 @@ extern int ieee80211_encrypt_fragment(
2435 struct sk_buff *frag, 2435 struct sk_buff *frag,
2436 int hdr_len); 2436 int hdr_len);
2437 2437
2438extern int ieee80211_xmit(struct sk_buff *skb, 2438extern int ieee80211_rtl_xmit(struct sk_buff *skb,
2439 struct net_device *dev); 2439 struct net_device *dev);
2440extern void ieee80211_txb_free(struct ieee80211_txb *); 2440extern void ieee80211_txb_free(struct ieee80211_txb *);
2441 2441
2442 2442
2443/* ieee80211_rx.c */ 2443/* ieee80211_rx.c */
2444extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, 2444extern int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
2445 struct ieee80211_rx_stats *rx_stats); 2445 struct ieee80211_rx_stats *rx_stats);
2446extern void ieee80211_rx_mgt(struct ieee80211_device *ieee, 2446extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
2447 struct ieee80211_hdr_4addr *header, 2447 struct ieee80211_hdr_4addr *header,
@@ -2502,8 +2502,8 @@ extern void ieee80211_stop_protocol(struct ieee80211_device *ieee);
2502extern void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee); 2502extern void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee);
2503extern void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee); 2503extern void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee);
2504extern void ieee80211_reset_queue(struct ieee80211_device *ieee); 2504extern void ieee80211_reset_queue(struct ieee80211_device *ieee);
2505extern void ieee80211_wake_queue(struct ieee80211_device *ieee); 2505extern void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee);
2506extern void ieee80211_stop_queue(struct ieee80211_device *ieee); 2506extern void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee);
2507extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee); 2507extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
2508extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee); 2508extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
2509extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee); 2509extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211.h b/drivers/staging/rtl8192e/ieee80211/ieee80211.h
index 83c8452de378..aa76390487bb 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211.h
@@ -333,8 +333,8 @@ enum _ReasonCode{
333#define ieee80211_rx_mgt ieee80211_rx_mgt_rsl 333#define ieee80211_rx_mgt ieee80211_rx_mgt_rsl
334 334
335#define ieee80211_get_beacon ieee80211_get_beacon_rsl 335#define ieee80211_get_beacon ieee80211_get_beacon_rsl
336#define ieee80211_wake_queue ieee80211_wake_queue_rsl 336#define ieee80211_rtl_wake_queue ieee80211_rtl_wake_queue_rsl
337#define ieee80211_stop_queue ieee80211_stop_queue_rsl 337#define ieee80211_rtl_stop_queue ieee80211_rtl_stop_queue_rsl
338#define ieee80211_reset_queue ieee80211_reset_queue_rsl 338#define ieee80211_reset_queue ieee80211_reset_queue_rsl
339#define ieee80211_softmac_stop_protocol ieee80211_softmac_stop_protocol_rsl 339#define ieee80211_softmac_stop_protocol ieee80211_softmac_stop_protocol_rsl
340#define ieee80211_softmac_start_protocol ieee80211_softmac_start_protocol_rsl 340#define ieee80211_softmac_start_protocol ieee80211_softmac_start_protocol_rsl
@@ -2546,13 +2546,13 @@ extern int ieee80211_encrypt_fragment(
2546 struct sk_buff *frag, 2546 struct sk_buff *frag,
2547 int hdr_len); 2547 int hdr_len);
2548 2548
2549extern int ieee80211_xmit(struct sk_buff *skb, 2549extern int ieee80211_rtl_xmit(struct sk_buff *skb,
2550 struct net_device *dev); 2550 struct net_device *dev);
2551extern void ieee80211_txb_free(struct ieee80211_txb *); 2551extern void ieee80211_txb_free(struct ieee80211_txb *);
2552 2552
2553 2553
2554/* ieee80211_rx.c */ 2554/* ieee80211_rx.c */
2555extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, 2555extern int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
2556 struct ieee80211_rx_stats *rx_stats); 2556 struct ieee80211_rx_stats *rx_stats);
2557extern void ieee80211_rx_mgt(struct ieee80211_device *ieee, 2557extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
2558 struct ieee80211_hdr_4addr *header, 2558 struct ieee80211_hdr_4addr *header,
@@ -2613,8 +2613,8 @@ extern void ieee80211_stop_protocol(struct ieee80211_device *ieee);
2613extern void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee); 2613extern void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee);
2614extern void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee); 2614extern void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee);
2615extern void ieee80211_reset_queue(struct ieee80211_device *ieee); 2615extern void ieee80211_reset_queue(struct ieee80211_device *ieee);
2616extern void ieee80211_wake_queue(struct ieee80211_device *ieee); 2616extern void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee);
2617extern void ieee80211_stop_queue(struct ieee80211_device *ieee); 2617extern void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee);
2618extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee); 2618extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
2619extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee); 2619extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
2620extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee); 2620extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c
index 2644155737a8..f43a7db5c78b 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c
@@ -119,7 +119,7 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
119 ieee = (struct ieee80211_device *)dev->priv; 119 ieee = (struct ieee80211_device *)dev->priv;
120#endif 120#endif
121#if 0 121#if 0
122 dev->hard_start_xmit = ieee80211_xmit; 122 dev->hard_start_xmit = ieee80211_rtl_xmit;
123#endif 123#endif
124 124
125 memset(ieee, 0, sizeof(struct ieee80211_device)+sizeof_priv); 125 memset(ieee, 0, sizeof(struct ieee80211_device)+sizeof_priv);
@@ -333,7 +333,7 @@ extern void ieee80211_crypto_ccmp_exit(void);
333extern int ieee80211_crypto_wep_init(void); 333extern int ieee80211_crypto_wep_init(void);
334extern void ieee80211_crypto_wep_exit(void); 334extern void ieee80211_crypto_wep_exit(void);
335 335
336int __init ieee80211_init(void) 336int __init ieee80211_rtl_init(void)
337{ 337{
338 struct proc_dir_entry *e; 338 struct proc_dir_entry *e;
339 int retval; 339 int retval;
@@ -389,7 +389,7 @@ int __init ieee80211_init(void)
389 return 0; 389 return 0;
390} 390}
391 391
392void __exit ieee80211_exit(void) 392void __exit ieee80211_rtl_exit(void)
393{ 393{
394 if (ieee80211_proc) { 394 if (ieee80211_proc) {
395 remove_proc_entry("debug_level", ieee80211_proc); 395 remove_proc_entry("debug_level", ieee80211_proc);
@@ -412,8 +412,8 @@ module_param(debug, int, 0444);
412MODULE_PARM_DESC(debug, "debug output mask"); 412MODULE_PARM_DESC(debug, "debug output mask");
413 413
414 414
415//module_exit(ieee80211_exit); 415//module_exit(ieee80211_rtl_exit);
416//module_init(ieee80211_init); 416//module_init(ieee80211_rtl_init);
417#endif 417#endif
418#endif 418#endif
419 419
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c
index 5dc478b86375..06d91715143c 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c
@@ -923,7 +923,7 @@ u8 parse_subframe(struct sk_buff *skb,
923/* All received frames are sent to this function. @skb contains the frame in 923/* All received frames are sent to this function. @skb contains the frame in
924 * IEEE 802.11 format, i.e., in the format it was sent over air. 924 * IEEE 802.11 format, i.e., in the format it was sent over air.
925 * This function is called only as a tasklet (software IRQ). */ 925 * This function is called only as a tasklet (software IRQ). */
926int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, 926int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
927 struct ieee80211_rx_stats *rx_stats) 927 struct ieee80211_rx_stats *rx_stats)
928{ 928{
929 struct net_device *dev = ieee->dev; 929 struct net_device *dev = ieee->dev;
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
index 593d22825184..6d1ddec39f0e 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
@@ -684,7 +684,7 @@ void ieee80211_stop_scan(struct ieee80211_device *ieee)
684} 684}
685 685
686/* called with ieee->lock held */ 686/* called with ieee->lock held */
687void ieee80211_start_scan(struct ieee80211_device *ieee) 687void ieee80211_rtl_start_scan(struct ieee80211_device *ieee)
688{ 688{
689#ifdef ENABLE_DOT11D 689#ifdef ENABLE_DOT11D
690 if(IS_DOT11D_ENABLE(ieee) ) 690 if(IS_DOT11D_ENABLE(ieee) )
@@ -1430,7 +1430,7 @@ void ieee80211_associate_step1(struct ieee80211_device *ieee)
1430 } 1430 }
1431} 1431}
1432 1432
1433void ieee80211_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen) 1433void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen)
1434{ 1434{
1435 u8 *c; 1435 u8 *c;
1436 struct sk_buff *skb; 1436 struct sk_buff *skb;
@@ -2262,7 +2262,7 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
2262 2262
2263 ieee80211_associate_step2(ieee); 2263 ieee80211_associate_step2(ieee);
2264 }else{ 2264 }else{
2265 ieee80211_auth_challenge(ieee, challenge, chlen); 2265 ieee80211_rtl_auth_challenge(ieee, challenge, chlen);
2266 } 2266 }
2267 }else{ 2267 }else{
2268 ieee->softmac_stats.rx_auth_rs_err++; 2268 ieee->softmac_stats.rx_auth_rs_err++;
@@ -2376,7 +2376,7 @@ void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *
2376 * to check it any more. 2376 * to check it any more.
2377 * */ 2377 * */
2378 //printk("error:no descriptor left@queue_index %d\n", queue_index); 2378 //printk("error:no descriptor left@queue_index %d\n", queue_index);
2379 //ieee80211_stop_queue(ieee); 2379 //ieee80211_rtl_stop_queue(ieee);
2380#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE 2380#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
2381 skb_queue_tail(&ieee->skb_drv_aggQ[queue_index], txb->fragments[i]); 2381 skb_queue_tail(&ieee->skb_drv_aggQ[queue_index], txb->fragments[i]);
2382#else 2382#else
@@ -2440,7 +2440,7 @@ void ieee80211_reset_queue(struct ieee80211_device *ieee)
2440 2440
2441} 2441}
2442 2442
2443void ieee80211_wake_queue(struct ieee80211_device *ieee) 2443void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee)
2444{ 2444{
2445 2445
2446 unsigned long flags; 2446 unsigned long flags;
@@ -2481,7 +2481,7 @@ exit :
2481} 2481}
2482 2482
2483 2483
2484void ieee80211_stop_queue(struct ieee80211_device *ieee) 2484void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee)
2485{ 2485{
2486 //unsigned long flags; 2486 //unsigned long flags;
2487 //spin_lock_irqsave(&ieee->lock,flags); 2487 //spin_lock_irqsave(&ieee->lock,flags);
@@ -2706,7 +2706,7 @@ void ieee80211_start_bss(struct ieee80211_device *ieee)
2706 2706
2707 if (ieee->state == IEEE80211_NOLINK){ 2707 if (ieee->state == IEEE80211_NOLINK){
2708 ieee->actscanning = true; 2708 ieee->actscanning = true;
2709 ieee80211_start_scan(ieee); 2709 ieee80211_rtl_start_scan(ieee);
2710 } 2710 }
2711 spin_unlock_irqrestore(&ieee->lock, flags); 2711 spin_unlock_irqrestore(&ieee->lock, flags);
2712} 2712}
@@ -2775,7 +2775,7 @@ void ieee80211_associate_retry_wq(struct ieee80211_device *ieee)
2775 { 2775 {
2776 ieee->is_roaming= false; 2776 ieee->is_roaming= false;
2777 ieee->actscanning = true; 2777 ieee->actscanning = true;
2778 ieee80211_start_scan(ieee); 2778 ieee80211_rtl_start_scan(ieee);
2779 } 2779 }
2780 spin_unlock_irqrestore(&ieee->lock, flags); 2780 spin_unlock_irqrestore(&ieee->lock, flags);
2781 2781
@@ -3497,8 +3497,8 @@ void notify_wx_assoc_event(struct ieee80211_device *ieee)
3497 3497
3498#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) 3498#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
3499//EXPORT_SYMBOL(ieee80211_get_beacon); 3499//EXPORT_SYMBOL(ieee80211_get_beacon);
3500//EXPORT_SYMBOL(ieee80211_wake_queue); 3500//EXPORT_SYMBOL(ieee80211_rtl_wake_queue);
3501//EXPORT_SYMBOL(ieee80211_stop_queue); 3501//EXPORT_SYMBOL(ieee80211_rtl_stop_queue);
3502//EXPORT_SYMBOL(ieee80211_reset_queue); 3502//EXPORT_SYMBOL(ieee80211_reset_queue);
3503//EXPORT_SYMBOL(ieee80211_softmac_stop_protocol); 3503//EXPORT_SYMBOL(ieee80211_softmac_stop_protocol);
3504//EXPORT_SYMBOL(ieee80211_softmac_start_protocol); 3504//EXPORT_SYMBOL(ieee80211_softmac_start_protocol);
@@ -3518,8 +3518,8 @@ void notify_wx_assoc_event(struct ieee80211_device *ieee)
3518//EXPORT_SYMBOL(ieee80211_start_scan_syncro); 3518//EXPORT_SYMBOL(ieee80211_start_scan_syncro);
3519#else 3519#else
3520EXPORT_SYMBOL_NOVERS(ieee80211_get_beacon); 3520EXPORT_SYMBOL_NOVERS(ieee80211_get_beacon);
3521EXPORT_SYMBOL_NOVERS(ieee80211_wake_queue); 3521EXPORT_SYMBOL_NOVERS(ieee80211_rtl_wake_queue);
3522EXPORT_SYMBOL_NOVERS(ieee80211_stop_queue); 3522EXPORT_SYMBOL_NOVERS(ieee80211_rtl_stop_queue);
3523EXPORT_SYMBOL_NOVERS(ieee80211_reset_queue); 3523EXPORT_SYMBOL_NOVERS(ieee80211_reset_queue);
3524EXPORT_SYMBOL_NOVERS(ieee80211_softmac_stop_protocol); 3524EXPORT_SYMBOL_NOVERS(ieee80211_softmac_stop_protocol);
3525EXPORT_SYMBOL_NOVERS(ieee80211_softmac_start_protocol); 3525EXPORT_SYMBOL_NOVERS(ieee80211_softmac_start_protocol);
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
index 103b33c093f5..798fb4154c25 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
@@ -604,7 +604,7 @@ void ieee80211_query_seqnum(struct ieee80211_device*ieee, struct sk_buff* skb, u
604 } 604 }
605} 605}
606 606
607int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) 607int ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev)
608{ 608{
609#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) 609#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
610 struct ieee80211_device *ieee = netdev_priv(dev); 610 struct ieee80211_device *ieee = netdev_priv(dev);
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c
index 4e34a1f4c66b..3441b72dd8fa 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c
@@ -976,7 +976,7 @@ int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
976 { 976 {
977 if (len != ie[1]+2) 977 if (len != ie[1]+2)
978 { 978 {
979 printk("len:%d, ie:%d\n", len, ie[1]); 979 printk("len:%zu, ie:%d\n", len, ie[1]);
980 return -EINVAL; 980 return -EINVAL;
981 } 981 }
982 buf = kmalloc(len, GFP_KERNEL); 982 buf = kmalloc(len, GFP_KERNEL);
diff --git a/drivers/staging/rtl8192e/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192e/ieee80211/rtl819x_BAProc.c
index 98b3bb6b6d69..e41e8a0c739c 100644
--- a/drivers/staging/rtl8192e/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/ieee80211/rtl819x_BAProc.c
@@ -382,7 +382,7 @@ int ieee80211_rx_ADDBAReq( struct ieee80211_device* ieee, struct sk_buff *skb)
382 382
383 if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 9) 383 if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 9)
384 { 384 {
385 IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BAREQ(%d / %d)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9)); 385 IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BAREQ(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9));
386 return -1; 386 return -1;
387 } 387 }
388 388
@@ -481,7 +481,7 @@ int ieee80211_rx_ADDBARsp( struct ieee80211_device* ieee, struct sk_buff *skb)
481 481
482 if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 9) 482 if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 9)
483 { 483 {
484 IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BARSP(%d / %d)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9)); 484 IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BARSP(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9));
485 return -1; 485 return -1;
486 } 486 }
487 rsp = ( struct ieee80211_hdr_3addr*)skb->data; 487 rsp = ( struct ieee80211_hdr_3addr*)skb->data;
@@ -611,7 +611,7 @@ int ieee80211_rx_DELBA(struct ieee80211_device* ieee,struct sk_buff *skb)
611 611
612 if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 6) 612 if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 6)
613 { 613 {
614 IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in DELBA(%d / %d)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 6)); 614 IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in DELBA(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 6));
615 return -1; 615 return -1;
616 } 616 }
617 617
diff --git a/drivers/staging/rtl8192e/r8192E_core.c b/drivers/staging/rtl8192e/r8192E_core.c
index ff8fe7e32a92..0ca5d8b4f746 100644
--- a/drivers/staging/rtl8192e/r8192E_core.c
+++ b/drivers/staging/rtl8192e/r8192E_core.c
@@ -5795,7 +5795,7 @@ static void rtl8192_rx(struct net_device *dev)
5795 stats.fragoffset = 0; 5795 stats.fragoffset = 0;
5796 stats.ntotalfrag = 1; 5796 stats.ntotalfrag = 1;
5797 5797
5798 if(!ieee80211_rx(priv->ieee80211, skb, &stats)){ 5798 if(!ieee80211_rtl_rx(priv->ieee80211, skb, &stats)){
5799 dev_kfree_skb_any(skb); 5799 dev_kfree_skb_any(skb);
5800 } else { 5800 } else {
5801 priv->stats.rxok++; 5801 priv->stats.rxok++;
@@ -5837,7 +5837,7 @@ static const struct net_device_ops rtl8192_netdev_ops = {
5837 .ndo_do_ioctl = rtl8192_ioctl, 5837 .ndo_do_ioctl = rtl8192_ioctl,
5838 .ndo_set_multicast_list = r8192_set_multicast, 5838 .ndo_set_multicast_list = r8192_set_multicast,
5839 .ndo_set_mac_address = r8192_set_mac_adr, 5839 .ndo_set_mac_address = r8192_set_mac_adr,
5840 .ndo_start_xmit = ieee80211_xmit, 5840 .ndo_start_xmit = ieee80211_rtl_xmit,
5841}; 5841};
5842 5842
5843/**************************************************************************** 5843/****************************************************************************
@@ -6121,14 +6121,14 @@ static void __devexit rtl8192_pci_disconnect(struct pci_dev *pdev)
6121 RT_TRACE(COMP_DOWN, "wlan driver removed\n"); 6121 RT_TRACE(COMP_DOWN, "wlan driver removed\n");
6122} 6122}
6123 6123
6124extern int ieee80211_init(void); 6124extern int ieee80211_rtl_init(void);
6125extern void ieee80211_exit(void); 6125extern void ieee80211_rtl_exit(void);
6126 6126
6127static int __init rtl8192_pci_module_init(void) 6127static int __init rtl8192_pci_module_init(void)
6128{ 6128{
6129 int retval; 6129 int retval;
6130 6130
6131 retval = ieee80211_init(); 6131 retval = ieee80211_rtl_init();
6132 if (retval) 6132 if (retval)
6133 return retval; 6133 return retval;
6134 6134
@@ -6153,7 +6153,7 @@ static void __exit rtl8192_pci_module_exit(void)
6153 6153
6154 RT_TRACE(COMP_DOWN, "Exiting"); 6154 RT_TRACE(COMP_DOWN, "Exiting");
6155 rtl8192_proc_module_remove(); 6155 rtl8192_proc_module_remove();
6156 ieee80211_exit(); 6156 ieee80211_rtl_exit();
6157} 6157}
6158 6158
6159//warning message WB 6159//warning message WB
@@ -6313,7 +6313,7 @@ void rtl8192_try_wake_queue(struct net_device *dev, int pri)
6313 spin_unlock_irqrestore(&priv->tx_lock,flags); 6313 spin_unlock_irqrestore(&priv->tx_lock,flags);
6314 6314
6315 if(enough_desc) 6315 if(enough_desc)
6316 ieee80211_wake_queue(priv->ieee80211); 6316 ieee80211_rtl_wake_queue(priv->ieee80211);
6317#endif 6317#endif
6318} 6318}
6319 6319
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211.h b/drivers/staging/rtl8192su/ieee80211/ieee80211.h
index f22d024b1c39..9a4c858b0666 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211.h
@@ -1721,13 +1721,13 @@ extern int ieee80211_encrypt_fragment(
1721 struct sk_buff *frag, 1721 struct sk_buff *frag,
1722 int hdr_len); 1722 int hdr_len);
1723 1723
1724extern int rtl8192_ieee80211_xmit(struct sk_buff *skb, 1724extern int rtl8192_ieee80211_rtl_xmit(struct sk_buff *skb,
1725 struct net_device *dev); 1725 struct net_device *dev);
1726extern void ieee80211_txb_free(struct ieee80211_txb *); 1726extern void ieee80211_txb_free(struct ieee80211_txb *);
1727 1727
1728 1728
1729/* ieee80211_rx.c */ 1729/* ieee80211_rx.c */
1730extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, 1730extern int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
1731 struct ieee80211_rx_stats *rx_stats); 1731 struct ieee80211_rx_stats *rx_stats);
1732extern void ieee80211_rx_mgt(struct ieee80211_device *ieee, 1732extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
1733 struct ieee80211_hdr_4addr *header, 1733 struct ieee80211_hdr_4addr *header,
@@ -1783,8 +1783,8 @@ extern void ieee80211_stop_protocol(struct ieee80211_device *ieee);
1783extern void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee); 1783extern void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee);
1784extern void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee); 1784extern void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee);
1785extern void ieee80211_reset_queue(struct ieee80211_device *ieee); 1785extern void ieee80211_reset_queue(struct ieee80211_device *ieee);
1786extern void ieee80211_wake_queue(struct ieee80211_device *ieee); 1786extern void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee);
1787extern void ieee80211_stop_queue(struct ieee80211_device *ieee); 1787extern void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee);
1788extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee); 1788extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
1789extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee); 1789extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
1790extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee); 1790extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c
index ac223cef1d33..fecfa120ff48 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c
@@ -208,7 +208,7 @@ static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee,
208 * 208 *
209 * Responsible for handling management control frames 209 * Responsible for handling management control frames
210 * 210 *
211 * Called by ieee80211_rx */ 211 * Called by ieee80211_rtl_rx */
212static inline int 212static inline int
213ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb, 213ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
214 struct ieee80211_rx_stats *rx_stats, u16 type, 214 struct ieee80211_rx_stats *rx_stats, u16 type,
@@ -289,7 +289,7 @@ static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee,
289 return 0; 289 return 0;
290} 290}
291 291
292/* Called only as a tasklet (software IRQ), by ieee80211_rx */ 292/* Called only as a tasklet (software IRQ), by ieee80211_rtl_rx */
293static inline int 293static inline int
294ieee80211_rx_frame_decrypt(struct ieee80211_device* ieee, struct sk_buff *skb, 294ieee80211_rx_frame_decrypt(struct ieee80211_device* ieee, struct sk_buff *skb,
295 struct ieee80211_crypt_data *crypt) 295 struct ieee80211_crypt_data *crypt)
@@ -858,7 +858,7 @@ u8 parse_subframe(struct sk_buff *skb,
858/* All received frames are sent to this function. @skb contains the frame in 858/* All received frames are sent to this function. @skb contains the frame in
859 * IEEE 802.11 format, i.e., in the format it was sent over air. 859 * IEEE 802.11 format, i.e., in the format it was sent over air.
860 * This function is called only as a tasklet (software IRQ). */ 860 * This function is called only as a tasklet (software IRQ). */
861int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, 861int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
862 struct ieee80211_rx_stats *rx_stats) 862 struct ieee80211_rx_stats *rx_stats)
863{ 863{
864 struct net_device *dev = ieee->dev; 864 struct net_device *dev = ieee->dev;
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c
index 203c0a5cc8c1..95d4f84dcf3f 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c
@@ -610,7 +610,7 @@ void ieee80211_stop_scan(struct ieee80211_device *ieee)
610} 610}
611 611
612/* called with ieee->lock held */ 612/* called with ieee->lock held */
613void ieee80211_start_scan(struct ieee80211_device *ieee) 613void ieee80211_rtl_start_scan(struct ieee80211_device *ieee)
614{ 614{
615 if(IS_DOT11D_ENABLE(ieee) ) 615 if(IS_DOT11D_ENABLE(ieee) )
616 { 616 {
@@ -1281,7 +1281,7 @@ void ieee80211_associate_step1(struct ieee80211_device *ieee)
1281 } 1281 }
1282} 1282}
1283 1283
1284void ieee80211_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen) 1284void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen)
1285{ 1285{
1286 u8 *c; 1286 u8 *c;
1287 struct sk_buff *skb; 1287 struct sk_buff *skb;
@@ -2054,7 +2054,7 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
2054 2054
2055 ieee80211_associate_step2(ieee); 2055 ieee80211_associate_step2(ieee);
2056 }else{ 2056 }else{
2057 ieee80211_auth_challenge(ieee, challenge, chlen); 2057 ieee80211_rtl_auth_challenge(ieee, challenge, chlen);
2058 } 2058 }
2059 }else{ 2059 }else{
2060 ieee->softmac_stats.rx_auth_rs_err++; 2060 ieee->softmac_stats.rx_auth_rs_err++;
@@ -2162,7 +2162,7 @@ void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *
2162 * to check it any more. 2162 * to check it any more.
2163 * */ 2163 * */
2164 //printk("error:no descriptor left@queue_index %d, %d, %d\n", queue_index, skb_queue_len(&ieee->skb_waitQ[queue_index]), ieee->check_nic_enough_desc(ieee->dev,queue_index)); 2164 //printk("error:no descriptor left@queue_index %d, %d, %d\n", queue_index, skb_queue_len(&ieee->skb_waitQ[queue_index]), ieee->check_nic_enough_desc(ieee->dev,queue_index));
2165 //ieee80211_stop_queue(ieee); 2165 //ieee80211_rtl_stop_queue(ieee);
2166 skb_queue_tail(&ieee->skb_waitQ[queue_index], txb->fragments[i]); 2166 skb_queue_tail(&ieee->skb_waitQ[queue_index], txb->fragments[i]);
2167 }else{ 2167 }else{
2168 ieee->softmac_data_hard_start_xmit( 2168 ieee->softmac_data_hard_start_xmit(
@@ -2222,7 +2222,7 @@ void ieee80211_reset_queue(struct ieee80211_device *ieee)
2222 2222
2223} 2223}
2224 2224
2225void ieee80211_wake_queue(struct ieee80211_device *ieee) 2225void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee)
2226{ 2226{
2227 2227
2228 unsigned long flags; 2228 unsigned long flags;
@@ -2263,7 +2263,7 @@ exit :
2263} 2263}
2264 2264
2265 2265
2266void ieee80211_stop_queue(struct ieee80211_device *ieee) 2266void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee)
2267{ 2267{
2268 //unsigned long flags; 2268 //unsigned long flags;
2269 //spin_lock_irqsave(&ieee->lock,flags); 2269 //spin_lock_irqsave(&ieee->lock,flags);
@@ -2479,7 +2479,7 @@ void ieee80211_start_bss(struct ieee80211_device *ieee)
2479 2479
2480 if (ieee->state == IEEE80211_NOLINK){ 2480 if (ieee->state == IEEE80211_NOLINK){
2481 ieee->actscanning = true; 2481 ieee->actscanning = true;
2482 ieee80211_start_scan(ieee); 2482 ieee80211_rtl_start_scan(ieee);
2483 } 2483 }
2484 spin_unlock_irqrestore(&ieee->lock, flags); 2484 spin_unlock_irqrestore(&ieee->lock, flags);
2485} 2485}
@@ -2552,7 +2552,7 @@ void ieee80211_associate_retry_wq(struct work_struct *work)
2552 if(ieee->state == IEEE80211_NOLINK) 2552 if(ieee->state == IEEE80211_NOLINK)
2553 { 2553 {
2554 ieee->actscanning = true; 2554 ieee->actscanning = true;
2555 ieee80211_start_scan(ieee); 2555 ieee80211_rtl_start_scan(ieee);
2556 } 2556 }
2557 spin_unlock_irqrestore(&ieee->lock, flags); 2557 spin_unlock_irqrestore(&ieee->lock, flags);
2558 2558
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c
index 60621d6b2a6b..4d54e1e62d22 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_tx.c
@@ -604,7 +604,7 @@ void ieee80211_query_seqnum(struct ieee80211_device*ieee, struct sk_buff* skb, u
604 } 604 }
605} 605}
606 606
607int rtl8192_ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) 607int rtl8192_ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev)
608{ 608{
609 struct ieee80211_device *ieee = netdev_priv(dev); 609 struct ieee80211_device *ieee = netdev_priv(dev);
610 struct ieee80211_txb *txb = NULL; 610 struct ieee80211_txb *txb = NULL;
diff --git a/drivers/staging/rtl8192su/r8192U_core.c b/drivers/staging/rtl8192su/r8192U_core.c
index 66274d7666ff..ccb9d5b8cd44 100644
--- a/drivers/staging/rtl8192su/r8192U_core.c
+++ b/drivers/staging/rtl8192su/r8192U_core.c
@@ -126,6 +126,8 @@ static struct usb_device_id rtl8192_usb_id_tbl[] = {
126 {USB_DEVICE(0x2001, 0x3301)}, 126 {USB_DEVICE(0x2001, 0x3301)},
127 /* Zinwell */ 127 /* Zinwell */
128 {USB_DEVICE(0x5a57, 0x0290)}, 128 {USB_DEVICE(0x5a57, 0x0290)},
129 /* Guillemot */
130 {USB_DEVICE(0x06f8, 0xe031)},
129 //92SU 131 //92SU
130 {USB_DEVICE(0x0bda, 0x8172)}, 132 {USB_DEVICE(0x0bda, 0x8172)},
131 {} 133 {}
@@ -1501,7 +1503,7 @@ static void rtl8192_rx_isr(struct urb *urb)
1501 urb->context = skb; 1503 urb->context = skb;
1502 skb_queue_tail(&priv->rx_queue, skb); 1504 skb_queue_tail(&priv->rx_queue, skb);
1503 err = usb_submit_urb(urb, GFP_ATOMIC); 1505 err = usb_submit_urb(urb, GFP_ATOMIC);
1504 if(err && err != EPERM) 1506 if(err && err != -EPERM)
1505 printk("can not submit rxurb, err is %x,URB status is %x\n",err,urb->status); 1507 printk("can not submit rxurb, err is %x,URB status is %x\n",err,urb->status);
1506} 1508}
1507 1509
@@ -7155,7 +7157,7 @@ void rtl8192SU_rx_nomal(struct sk_buff* skb)
7155 unicast_packet = true; 7157 unicast_packet = true;
7156 } 7158 }
7157 7159
7158 if(!ieee80211_rx(priv->ieee80211,skb, &stats)) { 7160 if(!ieee80211_rtl_rx(priv->ieee80211,skb, &stats)) {
7159 dev_kfree_skb_any(skb); 7161 dev_kfree_skb_any(skb);
7160 } else { 7162 } else {
7161 // priv->stats.rxoktotal++; //YJ,test,090108 7163 // priv->stats.rxoktotal++; //YJ,test,090108
@@ -7426,7 +7428,7 @@ static const struct net_device_ops rtl8192_netdev_ops = {
7426 .ndo_set_mac_address = r8192_set_mac_adr, 7428 .ndo_set_mac_address = r8192_set_mac_adr,
7427 .ndo_validate_addr = eth_validate_addr, 7429 .ndo_validate_addr = eth_validate_addr,
7428 .ndo_change_mtu = eth_change_mtu, 7430 .ndo_change_mtu = eth_change_mtu,
7429 .ndo_start_xmit = rtl8192_ieee80211_xmit, 7431 .ndo_start_xmit = rtl8192_ieee80211_rtl_xmit,
7430}; 7432};
7431 7433
7432static int __devinit rtl8192_usb_probe(struct usb_interface *intf, 7434static int __devinit rtl8192_usb_probe(struct usb_interface *intf,
@@ -7619,7 +7621,7 @@ void rtl8192_try_wake_queue(struct net_device *dev, int pri)
7619 spin_unlock_irqrestore(&priv->tx_lock,flags); 7621 spin_unlock_irqrestore(&priv->tx_lock,flags);
7620 7622
7621 if(enough_desc) 7623 if(enough_desc)
7622 ieee80211_wake_queue(priv->ieee80211); 7624 ieee80211_rtl_wake_queue(priv->ieee80211);
7623} 7625}
7624 7626
7625void EnableHWSecurityConfig8192(struct net_device *dev) 7627void EnableHWSecurityConfig8192(struct net_device *dev)
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index d397f1d68eb7..5f12d62658c9 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -845,7 +845,7 @@ int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
845 { 845 {
846 if (len != ie[1]+2) 846 if (len != ie[1]+2)
847 { 847 {
848 printk("len:%d, ie:%d\n", len, ie[1]); 848 printk("len:%zu, ie:%d\n", len, ie[1]);
849 return -EINVAL; 849 return -EINVAL;
850 } 850 }
851 buf = kmalloc(len, GFP_KERNEL); 851 buf = kmalloc(len, GFP_KERNEL);
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index 26af43bb8390..512a57aebde3 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -340,7 +340,7 @@ int ieee80211_rx_ADDBAReq( struct ieee80211_device* ieee, struct sk_buff *skb)
340 340
341 if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 9) 341 if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 9)
342 { 342 {
343 IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BAREQ(%d / %d)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9)); 343 IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BAREQ(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9));
344 return -1; 344 return -1;
345 } 345 }
346 346
@@ -439,7 +439,7 @@ int ieee80211_rx_ADDBARsp( struct ieee80211_device* ieee, struct sk_buff *skb)
439 439
440 if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 9) 440 if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 9)
441 { 441 {
442 IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BARSP(%d / %d)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9)); 442 IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BARSP(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9));
443 return -1; 443 return -1;
444 } 444 }
445 rsp = ( struct ieee80211_hdr_3addr*)skb->data; 445 rsp = ( struct ieee80211_hdr_3addr*)skb->data;
@@ -569,7 +569,7 @@ int ieee80211_rx_DELBA(struct ieee80211_device* ieee,struct sk_buff *skb)
569 569
570 if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 6) 570 if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 6)
571 { 571 {
572 IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in DELBA(%d / %d)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 6)); 572 IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in DELBA(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 6));
573 return -1; 573 return -1;
574 } 574 }
575 575
diff --git a/drivers/staging/sm7xx/Kconfig b/drivers/staging/sm7xx/Kconfig
new file mode 100644
index 000000000000..204dbfc3c38b
--- /dev/null
+++ b/drivers/staging/sm7xx/Kconfig
@@ -0,0 +1,15 @@
1config FB_SM7XX
2 tristate "Silicon Motion SM7XX Frame Buffer Support"
3 depends on FB
4 select FB_CFB_FILLRECT
5 select FB_CFB_COPYAREA
6 select FB_CFB_IMAGEBLIT
7 help
8 Frame Buffer driver for the Silicon Motion SM7XX serial graphic card.
9
10config FB_SM7XX_ACCEL
11 bool "Siliconmotion Acceleration functions (EXPERIMENTAL)"
12 depends on FB_SM7XX && EXPERIMENTAL
13 help
14 This will compile the Trident frame buffer device with
15 acceleration functions.
diff --git a/drivers/staging/sm7xx/Makefile b/drivers/staging/sm7xx/Makefile
new file mode 100644
index 000000000000..f43cb9106305
--- /dev/null
+++ b/drivers/staging/sm7xx/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_FB_SM7XX) += sm7xx.o
2
3sm7xx-y := smtcfb.o
diff --git a/drivers/staging/sm7xx/TODO b/drivers/staging/sm7xx/TODO
new file mode 100644
index 000000000000..1f61f5e11cf5
--- /dev/null
+++ b/drivers/staging/sm7xx/TODO
@@ -0,0 +1,10 @@
1TODO:
2- Dual head support
3- use kernel coding style
4- checkpatch.pl clean
5- refine the code and remove unused code
6- use kernel framebuffer mode setting instead of hard code
7- move it to drivers/video/sm7xx/ or make it be drivers/video/sm7xxfb.c
8
9Please send any patches to Greg Kroah-Hartman <greg@kroah.com> and
10Teddy Wang <teddy.wang@siliconmotion.com.cn>.
diff --git a/drivers/staging/sm7xx/smtc2d.c b/drivers/staging/sm7xx/smtc2d.c
new file mode 100644
index 000000000000..133b86c6a678
--- /dev/null
+++ b/drivers/staging/sm7xx/smtc2d.c
@@ -0,0 +1,979 @@
1/*
2 * Silicon Motion SM7XX 2D drawing engine functions.
3 *
4 * Copyright (C) 2006 Silicon Motion Technology Corp.
5 * Author: Boyod boyod.yang@siliconmotion.com.cn
6 *
7 * Copyright (C) 2009 Lemote, Inc.
8 * Author: Wu Zhangjin, wuzj@lemote.com
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file COPYING in the main directory of this archive for
12 * more details.
13 *
14 * Version 0.10.26192.21.01
15 * - Add PowerPC support
16 * - Add 2D support for Lynx -
17 * Verified on 2.6.19.2
18 * Boyod.yang <boyod.yang@siliconmotion.com.cn>
19 */
20
21unsigned char smtc_de_busy;
22
23void SMTC_write2Dreg(unsigned long nOffset, unsigned long nData)
24{
25 writel(nData, smtc_2DBaseAddress + nOffset);
26}
27
28unsigned long SMTC_read2Dreg(unsigned long nOffset)
29{
30 return readl(smtc_2DBaseAddress + nOffset);
31}
32
33void SMTC_write2Ddataport(unsigned long nOffset, unsigned long nData)
34{
35 writel(nData, smtc_2Ddataport + nOffset);
36}
37
38/**********************************************************************
39 *
40 * deInit
41 *
42 * Purpose
43 * Drawing engine initialization.
44 *
45 **********************************************************************/
46
47void deInit(unsigned int nModeWidth, unsigned int nModeHeight,
48 unsigned int bpp)
49{
50 /* Get current power configuration. */
51 unsigned char clock;
52 clock = smtc_seqr(0x21);
53
54 /* initialize global 'mutex lock' variable */
55 smtc_de_busy = 0;
56
57 /* Enable 2D Drawing Engine */
58 smtc_seqw(0x21, clock & 0xF8);
59
60 SMTC_write2Dreg(DE_CLIP_TL,
61 FIELD_VALUE(0, DE_CLIP_TL, TOP, 0) |
62 FIELD_SET(0, DE_CLIP_TL, STATUS, DISABLE) |
63 FIELD_SET(0, DE_CLIP_TL, INHIBIT, OUTSIDE) |
64 FIELD_VALUE(0, DE_CLIP_TL, LEFT, 0));
65
66 if (bpp >= 24) {
67 SMTC_write2Dreg(DE_PITCH,
68 FIELD_VALUE(0, DE_PITCH, DESTINATION,
69 nModeWidth * 3) | FIELD_VALUE(0,
70 DE_PITCH,
71 SOURCE,
72 nModeWidth
73 * 3));
74 } else {
75 SMTC_write2Dreg(DE_PITCH,
76 FIELD_VALUE(0, DE_PITCH, DESTINATION,
77 nModeWidth) | FIELD_VALUE(0,
78 DE_PITCH,
79 SOURCE,
80 nModeWidth));
81 }
82
83 SMTC_write2Dreg(DE_WINDOW_WIDTH,
84 FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION,
85 nModeWidth) | FIELD_VALUE(0,
86 DE_WINDOW_WIDTH,
87 SOURCE,
88 nModeWidth));
89
90 switch (bpp) {
91 case 8:
92 SMTC_write2Dreg(DE_STRETCH_FORMAT,
93 FIELD_SET(0, DE_STRETCH_FORMAT, PATTERN_XY,
94 NORMAL) | FIELD_VALUE(0,
95 DE_STRETCH_FORMAT,
96 PATTERN_Y,
97 0) |
98 FIELD_VALUE(0, DE_STRETCH_FORMAT, PATTERN_X,
99 0) | FIELD_SET(0, DE_STRETCH_FORMAT,
100 PIXEL_FORMAT,
101 8) | FIELD_SET(0,
102 DE_STRETCH_FORMAT,
103 ADDRESSING,
104 XY) |
105 FIELD_VALUE(0, DE_STRETCH_FORMAT,
106 SOURCE_HEIGHT, 3));
107 break;
108 case 24:
109 SMTC_write2Dreg(DE_STRETCH_FORMAT,
110 FIELD_SET(0, DE_STRETCH_FORMAT, PATTERN_XY,
111 NORMAL) | FIELD_VALUE(0,
112 DE_STRETCH_FORMAT,
113 PATTERN_Y,
114 0) |
115 FIELD_VALUE(0, DE_STRETCH_FORMAT, PATTERN_X,
116 0) | FIELD_SET(0, DE_STRETCH_FORMAT,
117 PIXEL_FORMAT,
118 24) | FIELD_SET(0,
119 DE_STRETCH_FORMAT,
120 ADDRESSING,
121 XY) |
122 FIELD_VALUE(0, DE_STRETCH_FORMAT,
123 SOURCE_HEIGHT, 3));
124 break;
125 case 16:
126 default:
127 SMTC_write2Dreg(DE_STRETCH_FORMAT,
128 FIELD_SET(0, DE_STRETCH_FORMAT, PATTERN_XY,
129 NORMAL) | FIELD_VALUE(0,
130 DE_STRETCH_FORMAT,
131 PATTERN_Y,
132 0) |
133 FIELD_VALUE(0, DE_STRETCH_FORMAT, PATTERN_X,
134 0) | FIELD_SET(0, DE_STRETCH_FORMAT,
135 PIXEL_FORMAT,
136 16) | FIELD_SET(0,
137 DE_STRETCH_FORMAT,
138 ADDRESSING,
139 XY) |
140 FIELD_VALUE(0, DE_STRETCH_FORMAT,
141 SOURCE_HEIGHT, 3));
142 break;
143 }
144
145 SMTC_write2Dreg(DE_MASKS,
146 FIELD_VALUE(0, DE_MASKS, BYTE_MASK, 0xFFFF) |
147 FIELD_VALUE(0, DE_MASKS, BIT_MASK, 0xFFFF));
148 SMTC_write2Dreg(DE_COLOR_COMPARE_MASK,
149 FIELD_VALUE(0, DE_COLOR_COMPARE_MASK, MASKS, \
150 0xFFFFFF));
151 SMTC_write2Dreg(DE_COLOR_COMPARE,
152 FIELD_VALUE(0, DE_COLOR_COMPARE, COLOR, 0xFFFFFF));
153}
154
155void deVerticalLine(unsigned long dst_base,
156 unsigned long dst_pitch,
157 unsigned long nX,
158 unsigned long nY,
159 unsigned long dst_height, unsigned long nColor)
160{
161 deWaitForNotBusy();
162
163 SMTC_write2Dreg(DE_WINDOW_DESTINATION_BASE,
164 FIELD_VALUE(0, DE_WINDOW_DESTINATION_BASE, ADDRESS,
165 dst_base));
166
167 SMTC_write2Dreg(DE_PITCH,
168 FIELD_VALUE(0, DE_PITCH, DESTINATION, dst_pitch) |
169 FIELD_VALUE(0, DE_PITCH, SOURCE, dst_pitch));
170
171 SMTC_write2Dreg(DE_WINDOW_WIDTH,
172 FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION,
173 dst_pitch) | FIELD_VALUE(0, DE_WINDOW_WIDTH,
174 SOURCE,
175 dst_pitch));
176
177 SMTC_write2Dreg(DE_FOREGROUND,
178 FIELD_VALUE(0, DE_FOREGROUND, COLOR, nColor));
179
180 SMTC_write2Dreg(DE_DESTINATION,
181 FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) |
182 FIELD_VALUE(0, DE_DESTINATION, X, nX) |
183 FIELD_VALUE(0, DE_DESTINATION, Y, nY));
184
185 SMTC_write2Dreg(DE_DIMENSION,
186 FIELD_VALUE(0, DE_DIMENSION, X, 1) |
187 FIELD_VALUE(0, DE_DIMENSION, Y_ET, dst_height));
188
189 SMTC_write2Dreg(DE_CONTROL,
190 FIELD_SET(0, DE_CONTROL, STATUS, START) |
191 FIELD_SET(0, DE_CONTROL, DIRECTION, LEFT_TO_RIGHT) |
192 FIELD_SET(0, DE_CONTROL, MAJOR, Y) |
193 FIELD_SET(0, DE_CONTROL, STEP_X, NEGATIVE) |
194 FIELD_SET(0, DE_CONTROL, STEP_Y, POSITIVE) |
195 FIELD_SET(0, DE_CONTROL, LAST_PIXEL, OFF) |
196 FIELD_SET(0, DE_CONTROL, COMMAND, SHORT_STROKE) |
197 FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2) |
198 FIELD_VALUE(0, DE_CONTROL, ROP, 0x0C));
199
200 smtc_de_busy = 1;
201}
202
203void deHorizontalLine(unsigned long dst_base,
204 unsigned long dst_pitch,
205 unsigned long nX,
206 unsigned long nY,
207 unsigned long dst_width, unsigned long nColor)
208{
209 deWaitForNotBusy();
210
211 SMTC_write2Dreg(DE_WINDOW_DESTINATION_BASE,
212 FIELD_VALUE(0, DE_WINDOW_DESTINATION_BASE, ADDRESS,
213 dst_base));
214
215 SMTC_write2Dreg(DE_PITCH,
216 FIELD_VALUE(0, DE_PITCH, DESTINATION, dst_pitch) |
217 FIELD_VALUE(0, DE_PITCH, SOURCE, dst_pitch));
218
219 SMTC_write2Dreg(DE_WINDOW_WIDTH,
220 FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION,
221 dst_pitch) | FIELD_VALUE(0, DE_WINDOW_WIDTH,
222 SOURCE,
223 dst_pitch));
224 SMTC_write2Dreg(DE_FOREGROUND,
225 FIELD_VALUE(0, DE_FOREGROUND, COLOR, nColor));
226 SMTC_write2Dreg(DE_DESTINATION,
227 FIELD_SET(0, DE_DESTINATION, WRAP,
228 DISABLE) | FIELD_VALUE(0, DE_DESTINATION, X,
229 nX) | FIELD_VALUE(0,
230 DE_DESTINATION,
231 Y,
232 nY));
233 SMTC_write2Dreg(DE_DIMENSION,
234 FIELD_VALUE(0, DE_DIMENSION, X,
235 dst_width) | FIELD_VALUE(0, DE_DIMENSION,
236 Y_ET, 1));
237 SMTC_write2Dreg(DE_CONTROL,
238 FIELD_SET(0, DE_CONTROL, STATUS, START) | FIELD_SET(0,
239 DE_CONTROL,
240 DIRECTION,
241 RIGHT_TO_LEFT)
242 | FIELD_SET(0, DE_CONTROL, MAJOR, X) | FIELD_SET(0,
243 DE_CONTROL,
244 STEP_X,
245 POSITIVE)
246 | FIELD_SET(0, DE_CONTROL, STEP_Y,
247 NEGATIVE) | FIELD_SET(0, DE_CONTROL,
248 LAST_PIXEL,
249 OFF) | FIELD_SET(0,
250 DE_CONTROL,
251 COMMAND,
252 SHORT_STROKE)
253 | FIELD_SET(0, DE_CONTROL, ROP_SELECT,
254 ROP2) | FIELD_VALUE(0, DE_CONTROL, ROP,
255 0x0C));
256
257 smtc_de_busy = 1;
258}
259
260void deLine(unsigned long dst_base,
261 unsigned long dst_pitch,
262 unsigned long nX1,
263 unsigned long nY1,
264 unsigned long nX2, unsigned long nY2, unsigned long nColor)
265{
266 unsigned long nCommand =
267 FIELD_SET(0, DE_CONTROL, STATUS, START) |
268 FIELD_SET(0, DE_CONTROL, DIRECTION, LEFT_TO_RIGHT) |
269 FIELD_SET(0, DE_CONTROL, MAJOR, X) |
270 FIELD_SET(0, DE_CONTROL, STEP_X, POSITIVE) |
271 FIELD_SET(0, DE_CONTROL, STEP_Y, POSITIVE) |
272 FIELD_SET(0, DE_CONTROL, LAST_PIXEL, OFF) |
273 FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2) |
274 FIELD_VALUE(0, DE_CONTROL, ROP, 0x0C);
275 unsigned long DeltaX;
276 unsigned long DeltaY;
277
278 /* Calculate delta X */
279 if (nX1 <= nX2)
280 DeltaX = nX2 - nX1;
281 else {
282 DeltaX = nX1 - nX2;
283 nCommand = FIELD_SET(nCommand, DE_CONTROL, STEP_X, NEGATIVE);
284 }
285
286 /* Calculate delta Y */
287 if (nY1 <= nY2)
288 DeltaY = nY2 - nY1;
289 else {
290 DeltaY = nY1 - nY2;
291 nCommand = FIELD_SET(nCommand, DE_CONTROL, STEP_Y, NEGATIVE);
292 }
293
294 /* Determine the major axis */
295 if (DeltaX < DeltaY)
296 nCommand = FIELD_SET(nCommand, DE_CONTROL, MAJOR, Y);
297
298 /* Vertical line? */
299 if (nX1 == nX2)
300 deVerticalLine(dst_base, dst_pitch, nX1, nY1, DeltaY, nColor);
301
302 /* Horizontal line? */
303 else if (nY1 == nY2)
304 deHorizontalLine(dst_base, dst_pitch, nX1, nY1, \
305 DeltaX, nColor);
306
307 /* Diagonal line? */
308 else if (DeltaX == DeltaY) {
309 deWaitForNotBusy();
310
311 SMTC_write2Dreg(DE_WINDOW_DESTINATION_BASE,
312 FIELD_VALUE(0, DE_WINDOW_DESTINATION_BASE,
313 ADDRESS, dst_base));
314
315 SMTC_write2Dreg(DE_PITCH,
316 FIELD_VALUE(0, DE_PITCH, DESTINATION,
317 dst_pitch) | FIELD_VALUE(0,
318 DE_PITCH,
319 SOURCE,
320 dst_pitch));
321
322 SMTC_write2Dreg(DE_WINDOW_WIDTH,
323 FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION,
324 dst_pitch) | FIELD_VALUE(0,
325 DE_WINDOW_WIDTH,
326 SOURCE,
327 dst_pitch));
328
329 SMTC_write2Dreg(DE_FOREGROUND,
330 FIELD_VALUE(0, DE_FOREGROUND, COLOR, nColor));
331
332 SMTC_write2Dreg(DE_DESTINATION,
333 FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) |
334 FIELD_VALUE(0, DE_DESTINATION, X, 1) |
335 FIELD_VALUE(0, DE_DESTINATION, Y, nY1));
336
337 SMTC_write2Dreg(DE_DIMENSION,
338 FIELD_VALUE(0, DE_DIMENSION, X, 1) |
339 FIELD_VALUE(0, DE_DIMENSION, Y_ET, DeltaX));
340
341 SMTC_write2Dreg(DE_CONTROL,
342 FIELD_SET(nCommand, DE_CONTROL, COMMAND,
343 SHORT_STROKE));
344 }
345
346 /* Generic line */
347 else {
348 unsigned int k1, k2, et, w;
349 if (DeltaX < DeltaY) {
350 k1 = 2 * DeltaX;
351 et = k1 - DeltaY;
352 k2 = et - DeltaY;
353 w = DeltaY + 1;
354 } else {
355 k1 = 2 * DeltaY;
356 et = k1 - DeltaX;
357 k2 = et - DeltaX;
358 w = DeltaX + 1;
359 }
360
361 deWaitForNotBusy();
362
363 SMTC_write2Dreg(DE_WINDOW_DESTINATION_BASE,
364 FIELD_VALUE(0, DE_WINDOW_DESTINATION_BASE,
365 ADDRESS, dst_base));
366
367 SMTC_write2Dreg(DE_PITCH,
368 FIELD_VALUE(0, DE_PITCH, DESTINATION,
369 dst_pitch) | FIELD_VALUE(0,
370 DE_PITCH,
371 SOURCE,
372 dst_pitch));
373
374 SMTC_write2Dreg(DE_WINDOW_WIDTH,
375 FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION,
376 dst_pitch) | FIELD_VALUE(0,
377 DE_WINDOW_WIDTH,
378 SOURCE,
379 dst_pitch));
380
381 SMTC_write2Dreg(DE_FOREGROUND,
382 FIELD_VALUE(0, DE_FOREGROUND, COLOR, nColor));
383
384 SMTC_write2Dreg(DE_SOURCE,
385 FIELD_SET(0, DE_SOURCE, WRAP, DISABLE) |
386 FIELD_VALUE(0, DE_SOURCE, X_K1, k1) |
387 FIELD_VALUE(0, DE_SOURCE, Y_K2, k2));
388
389 SMTC_write2Dreg(DE_DESTINATION,
390 FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) |
391 FIELD_VALUE(0, DE_DESTINATION, X, nX1) |
392 FIELD_VALUE(0, DE_DESTINATION, Y, nY1));
393
394 SMTC_write2Dreg(DE_DIMENSION,
395 FIELD_VALUE(0, DE_DIMENSION, X, w) |
396 FIELD_VALUE(0, DE_DIMENSION, Y_ET, et));
397
398 SMTC_write2Dreg(DE_CONTROL,
399 FIELD_SET(nCommand, DE_CONTROL, COMMAND,
400 LINE_DRAW));
401 }
402
403 smtc_de_busy = 1;
404}
405
406void deFillRect(unsigned long dst_base,
407 unsigned long dst_pitch,
408 unsigned long dst_X,
409 unsigned long dst_Y,
410 unsigned long dst_width,
411 unsigned long dst_height, unsigned long nColor)
412{
413 deWaitForNotBusy();
414
415 SMTC_write2Dreg(DE_WINDOW_DESTINATION_BASE,
416 FIELD_VALUE(0, DE_WINDOW_DESTINATION_BASE, ADDRESS,
417 dst_base));
418
419 if (dst_pitch) {
420 SMTC_write2Dreg(DE_PITCH,
421 FIELD_VALUE(0, DE_PITCH, DESTINATION,
422 dst_pitch) | FIELD_VALUE(0,
423 DE_PITCH,
424 SOURCE,
425 dst_pitch));
426
427 SMTC_write2Dreg(DE_WINDOW_WIDTH,
428 FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION,
429 dst_pitch) | FIELD_VALUE(0,
430 DE_WINDOW_WIDTH,
431 SOURCE,
432 dst_pitch));
433 }
434
435 SMTC_write2Dreg(DE_FOREGROUND,
436 FIELD_VALUE(0, DE_FOREGROUND, COLOR, nColor));
437
438 SMTC_write2Dreg(DE_DESTINATION,
439 FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) |
440 FIELD_VALUE(0, DE_DESTINATION, X, dst_X) |
441 FIELD_VALUE(0, DE_DESTINATION, Y, dst_Y));
442
443 SMTC_write2Dreg(DE_DIMENSION,
444 FIELD_VALUE(0, DE_DIMENSION, X, dst_width) |
445 FIELD_VALUE(0, DE_DIMENSION, Y_ET, dst_height));
446
447 SMTC_write2Dreg(DE_CONTROL,
448 FIELD_SET(0, DE_CONTROL, STATUS, START) |
449 FIELD_SET(0, DE_CONTROL, DIRECTION, LEFT_TO_RIGHT) |
450 FIELD_SET(0, DE_CONTROL, LAST_PIXEL, OFF) |
451 FIELD_SET(0, DE_CONTROL, COMMAND, RECTANGLE_FILL) |
452 FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2) |
453 FIELD_VALUE(0, DE_CONTROL, ROP, 0x0C));
454
455 smtc_de_busy = 1;
456}
457
458/**********************************************************************
459 *
460 * deRotatePattern
461 *
462 * Purpose
463 * Rotate the given pattern if necessary
464 *
465 * Parameters
466 * [in]
467 * pPattern - Pointer to DE_SURFACE structure containing
468 * pattern attributes
469 * patternX - X position (0-7) of pattern origin
470 * patternY - Y position (0-7) of pattern origin
471 *
472 * [out]
473 * pattern_dstaddr - Pointer to pre-allocated buffer containing
474 * rotated pattern
475 *
476 **********************************************************************/
477void deRotatePattern(unsigned char *pattern_dstaddr,
478 unsigned long pattern_src_addr,
479 unsigned long pattern_BPP,
480 unsigned long pattern_stride, int patternX, int patternY)
481{
482 unsigned int i;
483 unsigned long pattern[PATTERN_WIDTH * PATTERN_HEIGHT];
484 unsigned int x, y;
485 unsigned char *pjPatByte;
486
487 if (pattern_dstaddr != NULL) {
488 deWaitForNotBusy();
489
490 if (patternX || patternY) {
491 /* Rotate pattern */
492 pjPatByte = (unsigned char *)pattern;
493
494 switch (pattern_BPP) {
495 case 8:
496 {
497 for (y = 0; y < 8; y++) {
498 unsigned char *pjBuffer =
499 pattern_dstaddr +
500 ((patternY + y) & 7) * 8;
501 for (x = 0; x < 8; x++) {
502 pjBuffer[(patternX +
503 x) & 7] =
504 pjPatByte[x];
505 }
506 pjPatByte += pattern_stride;
507 }
508 break;
509 }
510
511 case 16:
512 {
513 for (y = 0; y < 8; y++) {
514 unsigned short *pjBuffer =
515 (unsigned short *)
516 pattern_dstaddr +
517 ((patternY + y) & 7) * 8;
518 for (x = 0; x < 8; x++) {
519 pjBuffer[(patternX +
520 x) & 7] =
521 ((unsigned short *)
522 pjPatByte)[x];
523 }
524 pjPatByte += pattern_stride;
525 }
526 break;
527 }
528
529 case 32:
530 {
531 for (y = 0; y < 8; y++) {
532 unsigned long *pjBuffer =
533 (unsigned long *)
534 pattern_dstaddr +
535 ((patternY + y) & 7) * 8;
536 for (x = 0; x < 8; x++) {
537 pjBuffer[(patternX +
538 x) & 7] =
539 ((unsigned long *)
540 pjPatByte)[x];
541 }
542 pjPatByte += pattern_stride;
543 }
544 break;
545 }
546 }
547 } else {
548 /*Don't rotate,just copy pattern into pattern_dstaddr*/
549 for (i = 0; i < (pattern_BPP * 2); i++) {
550 ((unsigned long *)pattern_dstaddr)[i] =
551 pattern[i];
552 }
553 }
554
555 }
556}
557
558/**********************************************************************
559 *
560 * deCopy
561 *
562 * Purpose
563 * Copy a rectangular area of the source surface to a destination surface
564 *
565 * Remarks
566 * Source bitmap must have the same color depth (BPP) as the destination
567 * bitmap.
568 *
569**********************************************************************/
570void deCopy(unsigned long dst_base,
571 unsigned long dst_pitch,
572 unsigned long dst_BPP,
573 unsigned long dst_X,
574 unsigned long dst_Y,
575 unsigned long dst_width,
576 unsigned long dst_height,
577 unsigned long src_base,
578 unsigned long src_pitch,
579 unsigned long src_X,
580 unsigned long src_Y, pTransparent pTransp, unsigned char nROP2)
581{
582 unsigned long nDirection = 0;
583 unsigned long nTransparent = 0;
584 /* Direction of ROP2 operation:
585 * 1 = Left to Right,
586 * (-1) = Right to Left
587 */
588 unsigned long opSign = 1;
589 /* xWidth is in pixels */
590 unsigned long xWidth = 192 / (dst_BPP / 8);
591 unsigned long de_ctrl = 0;
592
593 deWaitForNotBusy();
594
595 SMTC_write2Dreg(DE_WINDOW_DESTINATION_BASE,
596 FIELD_VALUE(0, DE_WINDOW_DESTINATION_BASE, ADDRESS,
597 dst_base));
598
599 SMTC_write2Dreg(DE_WINDOW_SOURCE_BASE,
600 FIELD_VALUE(0, DE_WINDOW_SOURCE_BASE, ADDRESS,
601 src_base));
602
603 if (dst_pitch && src_pitch) {
604 SMTC_write2Dreg(DE_PITCH,
605 FIELD_VALUE(0, DE_PITCH, DESTINATION,
606 dst_pitch) | FIELD_VALUE(0,
607 DE_PITCH,
608 SOURCE,
609 src_pitch));
610
611 SMTC_write2Dreg(DE_WINDOW_WIDTH,
612 FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION,
613 dst_pitch) | FIELD_VALUE(0,
614 DE_WINDOW_WIDTH,
615 SOURCE,
616 src_pitch));
617 }
618
619 /* Set transparent bits if necessary */
620 if (pTransp != NULL) {
621 nTransparent =
622 pTransp->match | pTransp->select | pTransp->control;
623
624 /* Set color compare register */
625 SMTC_write2Dreg(DE_COLOR_COMPARE,
626 FIELD_VALUE(0, DE_COLOR_COMPARE, COLOR,
627 pTransp->color));
628 }
629
630 /* Determine direction of operation */
631 if (src_Y < dst_Y) {
632 /* +----------+
633 |S |
634 | +----------+
635 | | | |
636 | | | |
637 +---|------+ |
638 | D |
639 +----------+ */
640
641 nDirection = BOTTOM_TO_TOP;
642 } else if (src_Y > dst_Y) {
643 /* +----------+
644 |D |
645 | +----------+
646 | | | |
647 | | | |
648 +---|------+ |
649 | S |
650 +----------+ */
651
652 nDirection = TOP_TO_BOTTOM;
653 } else {
654 /* src_Y == dst_Y */
655
656 if (src_X <= dst_X) {
657 /* +------+---+------+
658 |S | | D|
659 | | | |
660 | | | |
661 | | | |
662 +------+---+------+ */
663
664 nDirection = RIGHT_TO_LEFT;
665 } else {
666 /* src_X > dst_X */
667
668 /* +------+---+------+
669 |D | | S|
670 | | | |
671 | | | |
672 | | | |
673 +------+---+------+ */
674
675 nDirection = LEFT_TO_RIGHT;
676 }
677 }
678
679 if ((nDirection == BOTTOM_TO_TOP) || (nDirection == RIGHT_TO_LEFT)) {
680 src_X += dst_width - 1;
681 src_Y += dst_height - 1;
682 dst_X += dst_width - 1;
683 dst_Y += dst_height - 1;
684 opSign = (-1);
685 }
686
687 if (dst_BPP >= 24) {
688 src_X *= 3;
689 src_Y *= 3;
690 dst_X *= 3;
691 dst_Y *= 3;
692 dst_width *= 3;
693 if ((nDirection == BOTTOM_TO_TOP)
694 || (nDirection == RIGHT_TO_LEFT)) {
695 src_X += 2;
696 dst_X += 2;
697 }
698 }
699
700 /* Workaround for 192 byte hw bug */
701 if ((nROP2 != 0x0C) && ((dst_width * (dst_BPP / 8)) >= 192)) {
702 /*
703 * Perform the ROP2 operation in chunks of (xWidth *
704 * dst_height)
705 */
706 while (1) {
707 deWaitForNotBusy();
708
709 SMTC_write2Dreg(DE_SOURCE,
710 FIELD_SET(0, DE_SOURCE, WRAP, DISABLE) |
711 FIELD_VALUE(0, DE_SOURCE, X_K1, src_X) |
712 FIELD_VALUE(0, DE_SOURCE, Y_K2, src_Y));
713
714 SMTC_write2Dreg(DE_DESTINATION,
715 FIELD_SET(0, DE_DESTINATION, WRAP,
716 DISABLE) | FIELD_VALUE(0,
717 DE_DESTINATION,
718 X,
719 dst_X)
720 | FIELD_VALUE(0, DE_DESTINATION, Y,
721 dst_Y));
722
723 SMTC_write2Dreg(DE_DIMENSION,
724 FIELD_VALUE(0, DE_DIMENSION, X,
725 xWidth) | FIELD_VALUE(0,
726 DE_DIMENSION,
727 Y_ET,
728 dst_height));
729
730 de_ctrl =
731 FIELD_VALUE(0, DE_CONTROL, ROP,
732 nROP2) | nTransparent | FIELD_SET(0,
733 DE_CONTROL,
734 ROP_SELECT,
735 ROP2)
736 | FIELD_SET(0, DE_CONTROL, COMMAND,
737 BITBLT) | ((nDirection ==
738 1) ? FIELD_SET(0,
739 DE_CONTROL,
740 DIRECTION,
741 RIGHT_TO_LEFT)
742 : FIELD_SET(0, DE_CONTROL,
743 DIRECTION,
744 LEFT_TO_RIGHT)) |
745 FIELD_SET(0, DE_CONTROL, STATUS, START);
746
747 SMTC_write2Dreg(DE_CONTROL, de_ctrl);
748
749 src_X += (opSign * xWidth);
750 dst_X += (opSign * xWidth);
751 dst_width -= xWidth;
752
753 if (dst_width <= 0) {
754 /* ROP2 operation is complete */
755 break;
756 }
757
758 if (xWidth > dst_width)
759 xWidth = dst_width;
760 }
761 } else {
762 deWaitForNotBusy();
763 SMTC_write2Dreg(DE_SOURCE,
764 FIELD_SET(0, DE_SOURCE, WRAP, DISABLE) |
765 FIELD_VALUE(0, DE_SOURCE, X_K1, src_X) |
766 FIELD_VALUE(0, DE_SOURCE, Y_K2, src_Y));
767
768 SMTC_write2Dreg(DE_DESTINATION,
769 FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) |
770 FIELD_VALUE(0, DE_DESTINATION, X, dst_X) |
771 FIELD_VALUE(0, DE_DESTINATION, Y, dst_Y));
772
773 SMTC_write2Dreg(DE_DIMENSION,
774 FIELD_VALUE(0, DE_DIMENSION, X, dst_width) |
775 FIELD_VALUE(0, DE_DIMENSION, Y_ET, dst_height));
776
777 de_ctrl = FIELD_VALUE(0, DE_CONTROL, ROP, nROP2) |
778 nTransparent |
779 FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2) |
780 FIELD_SET(0, DE_CONTROL, COMMAND, BITBLT) |
781 ((nDirection == 1) ? FIELD_SET(0, DE_CONTROL, DIRECTION,
782 RIGHT_TO_LEFT)
783 : FIELD_SET(0, DE_CONTROL, DIRECTION,
784 LEFT_TO_RIGHT)) | FIELD_SET(0, DE_CONTROL,
785 STATUS, START);
786 SMTC_write2Dreg(DE_CONTROL, de_ctrl);
787 }
788
789 smtc_de_busy = 1;
790}
791
792/*
793 * This function sets the pixel format that will apply to the 2D Engine.
794 */
795void deSetPixelFormat(unsigned long bpp)
796{
797 unsigned long de_format;
798
799 de_format = SMTC_read2Dreg(DE_STRETCH_FORMAT);
800
801 switch (bpp) {
802 case 8:
803 de_format =
804 FIELD_SET(de_format, DE_STRETCH_FORMAT, PIXEL_FORMAT, 8);
805 break;
806 default:
807 case 16:
808 de_format =
809 FIELD_SET(de_format, DE_STRETCH_FORMAT, PIXEL_FORMAT, 16);
810 break;
811 case 32:
812 de_format =
813 FIELD_SET(de_format, DE_STRETCH_FORMAT, PIXEL_FORMAT, 32);
814 break;
815 }
816
817 SMTC_write2Dreg(DE_STRETCH_FORMAT, de_format);
818}
819
820/*
821 * System memory to Video memory monochrome expansion.
822 *
823 * Source is monochrome image in system memory. This function expands the
824 * monochrome data to color image in video memory.
825 */
826
827long deSystemMem2VideoMemMonoBlt(const char *pSrcbuf,
828 long srcDelta,
829 unsigned long startBit,
830 unsigned long dBase,
831 unsigned long dPitch,
832 unsigned long bpp,
833 unsigned long dx, unsigned long dy,
834 unsigned long width, unsigned long height,
835 unsigned long fColor,
836 unsigned long bColor,
837 unsigned long rop2) {
838 unsigned long bytePerPixel;
839 unsigned long ulBytesPerScan;
840 unsigned long ul4BytesPerScan;
841 unsigned long ulBytesRemain;
842 unsigned long de_ctrl = 0;
843 unsigned char ajRemain[4];
844 long i, j;
845
846 bytePerPixel = bpp / 8;
847
848 /* Just make sure the start bit is within legal range */
849 startBit &= 7;
850
851 ulBytesPerScan = (width + startBit + 7) / 8;
852 ul4BytesPerScan = ulBytesPerScan & ~3;
853 ulBytesRemain = ulBytesPerScan & 3;
854
855 if (smtc_de_busy)
856 deWaitForNotBusy();
857
858 /*
859 * 2D Source Base. Use 0 for HOST Blt.
860 */
861
862 SMTC_write2Dreg(DE_WINDOW_SOURCE_BASE, 0);
863
864 /*
865 * 2D Destination Base.
866 *
867 * It is an address offset (128 bit aligned) from the beginning of
868 * frame buffer.
869 */
870
871 SMTC_write2Dreg(DE_WINDOW_DESTINATION_BASE, dBase);
872
873 if (dPitch) {
874
875 /*
876 * Program pitch (distance between the 1st points of two
877 * adjacent lines).
878 *
879 * Note that input pitch is BYTE value, but the 2D Pitch
880 * register uses pixel values. Need Byte to pixel convertion.
881 */
882
883 SMTC_write2Dreg(DE_PITCH,
884 FIELD_VALUE(0, DE_PITCH, DESTINATION,
885 dPitch /
886 bytePerPixel) | FIELD_VALUE(0,
887 DE_PITCH,
888 SOURCE,
889 dPitch /
890 bytePerPixel));
891
892 /* Screen Window width in Pixels.
893 *
894 * 2D engine uses this value to calculate the linear address in
895 * frame buffer for a given point.
896 */
897
898 SMTC_write2Dreg(DE_WINDOW_WIDTH,
899 FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION,
900 (dPitch /
901 bytePerPixel)) | FIELD_VALUE(0,
902 DE_WINDOW_WIDTH,
903 SOURCE,
904 (dPitch
905 /
906 bytePerPixel)));
907 }
908 /* Note: For 2D Source in Host Write, only X_K1 field is needed, and
909 * Y_K2 field is not used. For mono bitmap, use startBit for X_K1.
910 */
911
912 SMTC_write2Dreg(DE_SOURCE,
913 FIELD_SET(0, DE_SOURCE, WRAP, DISABLE) |
914 FIELD_VALUE(0, DE_SOURCE, X_K1, startBit) |
915 FIELD_VALUE(0, DE_SOURCE, Y_K2, 0));
916
917 SMTC_write2Dreg(DE_DESTINATION,
918 FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) |
919 FIELD_VALUE(0, DE_DESTINATION, X, dx) |
920 FIELD_VALUE(0, DE_DESTINATION, Y, dy));
921
922 SMTC_write2Dreg(DE_DIMENSION,
923 FIELD_VALUE(0, DE_DIMENSION, X, width) |
924 FIELD_VALUE(0, DE_DIMENSION, Y_ET, height));
925
926 SMTC_write2Dreg(DE_FOREGROUND, fColor);
927 SMTC_write2Dreg(DE_BACKGROUND, bColor);
928
929 if (bpp)
930 deSetPixelFormat(bpp);
931 /* Set the pixel format of the destination */
932
933 de_ctrl = FIELD_VALUE(0, DE_CONTROL, ROP, rop2) |
934 FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2) |
935 FIELD_SET(0, DE_CONTROL, COMMAND, HOST_WRITE) |
936 FIELD_SET(0, DE_CONTROL, HOST, MONO) |
937 FIELD_SET(0, DE_CONTROL, STATUS, START);
938
939 SMTC_write2Dreg(DE_CONTROL, de_ctrl | deGetTransparency());
940
941 /* Write MONO data (line by line) to 2D Engine data port */
942 for (i = 0; i < height; i++) {
943 /* For each line, send the data in chunks of 4 bytes */
944 for (j = 0; j < (ul4BytesPerScan / 4); j++)
945 SMTC_write2Ddataport(0,
946 *(unsigned long *)(pSrcbuf +
947 (j * 4)));
948
949 if (ulBytesRemain) {
950 memcpy(ajRemain, pSrcbuf + ul4BytesPerScan,
951 ulBytesRemain);
952 SMTC_write2Ddataport(0, *(unsigned long *)ajRemain);
953 }
954
955 pSrcbuf += srcDelta;
956 }
957 smtc_de_busy = 1;
958
959 return 0;
960}
961
962/*
963 * This function gets the transparency status from DE_CONTROL register.
964 * It returns a double word with the transparent fields properly set,
965 * while other fields are 0.
966 */
967unsigned long deGetTransparency(void)
968{
969 unsigned long de_ctrl;
970
971 de_ctrl = SMTC_read2Dreg(DE_CONTROL);
972
973 de_ctrl &=
974 FIELD_MASK(DE_CONTROL_TRANSPARENCY_MATCH) |
975 FIELD_MASK(DE_CONTROL_TRANSPARENCY_SELECT) |
976 FIELD_MASK(DE_CONTROL_TRANSPARENCY);
977
978 return de_ctrl;
979}
diff --git a/drivers/staging/sm7xx/smtc2d.h b/drivers/staging/sm7xx/smtc2d.h
new file mode 100644
index 000000000000..38d0c335322b
--- /dev/null
+++ b/drivers/staging/sm7xx/smtc2d.h
@@ -0,0 +1,530 @@
1/*
2 * Silicon Motion SM712 2D drawing engine functions.
3 *
4 * Copyright (C) 2006 Silicon Motion Technology Corp.
5 * Author: Ge Wang, gewang@siliconmotion.com
6 *
7 * Copyright (C) 2009 Lemote, Inc.
8 * Author: Wu Zhangjin, wuzj@lemote.com
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file COPYING in the main directory of this archive for
12 * more details.
13 */
14
15#ifndef NULL
16#define NULL 0
17#endif
18
19/* Internal macros */
20
21#define _F_START(f) (0 ? f)
22#define _F_END(f) (1 ? f)
23#define _F_SIZE(f) (1 + _F_END(f) - _F_START(f))
24#define _F_MASK(f) (((1ULL << _F_SIZE(f)) - 1) << _F_START(f))
25#define _F_NORMALIZE(v, f) (((v) & _F_MASK(f)) >> _F_START(f))
26#define _F_DENORMALIZE(v, f) (((v) << _F_START(f)) & _F_MASK(f))
27
28/* Global macros */
29
30#define FIELD_GET(x, reg, field) \
31( \
32 _F_NORMALIZE((x), reg ## _ ## field) \
33)
34
35#define FIELD_SET(x, reg, field, value) \
36( \
37 (x & ~_F_MASK(reg ## _ ## field)) \
38 | _F_DENORMALIZE(reg ## _ ## field ## _ ## value, reg ## _ ## field) \
39)
40
41#define FIELD_VALUE(x, reg, field, value) \
42( \
43 (x & ~_F_MASK(reg ## _ ## field)) \
44 | _F_DENORMALIZE(value, reg ## _ ## field) \
45)
46
47#define FIELD_CLEAR(reg, field) \
48( \
49 ~_F_MASK(reg ## _ ## field) \
50)
51
52/* Field Macros */
53
54#define FIELD_START(field) (0 ? field)
55#define FIELD_END(field) (1 ? field)
56#define FIELD_SIZE(field) \
57 (1 + FIELD_END(field) - FIELD_START(field))
58
59#define FIELD_MASK(field) \
60 (((1 << (FIELD_SIZE(field)-1)) \
61 | ((1 << (FIELD_SIZE(field)-1)) - 1)) \
62 << FIELD_START(field))
63
64#define FIELD_NORMALIZE(reg, field) \
65 (((reg) & FIELD_MASK(field)) >> FIELD_START(field))
66
67#define FIELD_DENORMALIZE(field, value) \
68 (((value) << FIELD_START(field)) & FIELD_MASK(field))
69
70#define FIELD_INIT(reg, field, value) \
71 FIELD_DENORMALIZE(reg ## _ ## field, \
72 reg ## _ ## field ## _ ## value)
73
74#define FIELD_INIT_VAL(reg, field, value) \
75 (FIELD_DENORMALIZE(reg ## _ ## field, value))
76
77#define FIELD_VAL_SET(x, r, f, v) ({ \
78 x = (x & ~FIELD_MASK(r ## _ ## f)) \
79 | FIELD_DENORMALIZE(r ## _ ## f, r ## _ ## f ## _ ## v) \
80})
81
82#define RGB(r, g, b) ((unsigned long)(((r) << 16) | ((g) << 8) | (b)))
83
84/* Transparent info definition */
85typedef struct {
86 unsigned long match; /* Matching pixel is OPAQUE/TRANSPARENT */
87 unsigned long select; /* Transparency controlled by SRC/DST */
88 unsigned long control; /* ENABLE/DISABLE transparency */
89 unsigned long color; /* Transparent color */
90} Transparent, *pTransparent;
91
92#define PIXEL_DEPTH_1_BP 0 /* 1 bit per pixel */
93#define PIXEL_DEPTH_8_BPP 1 /* 8 bits per pixel */
94#define PIXEL_DEPTH_16_BPP 2 /* 16 bits per pixel */
95#define PIXEL_DEPTH_32_BPP 3 /* 32 bits per pixel */
96#define PIXEL_DEPTH_YUV422 8 /* 16 bits per pixel YUV422 */
97#define PIXEL_DEPTH_YUV420 9 /* 16 bits per pixel YUV420 */
98
99#define PATTERN_WIDTH 8
100#define PATTERN_HEIGHT 8
101
102#define TOP_TO_BOTTOM 0
103#define BOTTOM_TO_TOP 1
104#define RIGHT_TO_LEFT BOTTOM_TO_TOP
105#define LEFT_TO_RIGHT TOP_TO_BOTTOM
106
107/* Constants used in Transparent structure */
108#define MATCH_OPAQUE 0x00000000
109#define MATCH_TRANSPARENT 0x00000400
110#define SOURCE 0x00000000
111#define DESTINATION 0x00000200
112
113/* 2D registers. */
114
115#define DE_SOURCE 0x000000
116#define DE_SOURCE_WRAP 31 : 31
117#define DE_SOURCE_WRAP_DISABLE 0
118#define DE_SOURCE_WRAP_ENABLE 1
119#define DE_SOURCE_X_K1 29 : 16
120#define DE_SOURCE_Y_K2 15 : 0
121
122#define DE_DESTINATION 0x000004
123#define DE_DESTINATION_WRAP 31 : 31
124#define DE_DESTINATION_WRAP_DISABLE 0
125#define DE_DESTINATION_WRAP_ENABLE 1
126#define DE_DESTINATION_X 28 : 16
127#define DE_DESTINATION_Y 15 : 0
128
129#define DE_DIMENSION 0x000008
130#define DE_DIMENSION_X 28 : 16
131#define DE_DIMENSION_Y_ET 15 : 0
132
133#define DE_CONTROL 0x00000C
134#define DE_CONTROL_STATUS 31 : 31
135#define DE_CONTROL_STATUS_STOP 0
136#define DE_CONTROL_STATUS_START 1
137#define DE_CONTROL_PATTERN 30 : 30
138#define DE_CONTROL_PATTERN_MONO 0
139#define DE_CONTROL_PATTERN_COLOR 1
140#define DE_CONTROL_UPDATE_DESTINATION_X 29 : 29
141#define DE_CONTROL_UPDATE_DESTINATION_X_DISABLE 0
142#define DE_CONTROL_UPDATE_DESTINATION_X_ENABLE 1
143#define DE_CONTROL_QUICK_START 28 : 28
144#define DE_CONTROL_QUICK_START_DISABLE 0
145#define DE_CONTROL_QUICK_START_ENABLE 1
146#define DE_CONTROL_DIRECTION 27 : 27
147#define DE_CONTROL_DIRECTION_LEFT_TO_RIGHT 0
148#define DE_CONTROL_DIRECTION_RIGHT_TO_LEFT 1
149#define DE_CONTROL_MAJOR 26 : 26
150#define DE_CONTROL_MAJOR_X 0
151#define DE_CONTROL_MAJOR_Y 1
152#define DE_CONTROL_STEP_X 25 : 25
153#define DE_CONTROL_STEP_X_POSITIVE 1
154#define DE_CONTROL_STEP_X_NEGATIVE 0
155#define DE_CONTROL_STEP_Y 24 : 24
156#define DE_CONTROL_STEP_Y_POSITIVE 1
157#define DE_CONTROL_STEP_Y_NEGATIVE 0
158#define DE_CONTROL_STRETCH 23 : 23
159#define DE_CONTROL_STRETCH_DISABLE 0
160#define DE_CONTROL_STRETCH_ENABLE 1
161#define DE_CONTROL_HOST 22 : 22
162#define DE_CONTROL_HOST_COLOR 0
163#define DE_CONTROL_HOST_MONO 1
164#define DE_CONTROL_LAST_PIXEL 21 : 21
165#define DE_CONTROL_LAST_PIXEL_OFF 0
166#define DE_CONTROL_LAST_PIXEL_ON 1
167#define DE_CONTROL_COMMAND 20 : 16
168#define DE_CONTROL_COMMAND_BITBLT 0
169#define DE_CONTROL_COMMAND_RECTANGLE_FILL 1
170#define DE_CONTROL_COMMAND_DE_TILE 2
171#define DE_CONTROL_COMMAND_TRAPEZOID_FILL 3
172#define DE_CONTROL_COMMAND_ALPHA_BLEND 4
173#define DE_CONTROL_COMMAND_RLE_STRIP 5
174#define DE_CONTROL_COMMAND_SHORT_STROKE 6
175#define DE_CONTROL_COMMAND_LINE_DRAW 7
176#define DE_CONTROL_COMMAND_HOST_WRITE 8
177#define DE_CONTROL_COMMAND_HOST_READ 9
178#define DE_CONTROL_COMMAND_HOST_WRITE_BOTTOM_UP 10
179#define DE_CONTROL_COMMAND_ROTATE 11
180#define DE_CONTROL_COMMAND_FONT 12
181#define DE_CONTROL_COMMAND_TEXTURE_LOAD 15
182#define DE_CONTROL_ROP_SELECT 15 : 15
183#define DE_CONTROL_ROP_SELECT_ROP3 0
184#define DE_CONTROL_ROP_SELECT_ROP2 1
185#define DE_CONTROL_ROP2_SOURCE 14 : 14
186#define DE_CONTROL_ROP2_SOURCE_BITMAP 0
187#define DE_CONTROL_ROP2_SOURCE_PATTERN 1
188#define DE_CONTROL_MONO_DATA 13 : 12
189#define DE_CONTROL_MONO_DATA_NOT_PACKED 0
190#define DE_CONTROL_MONO_DATA_8_PACKED 1
191#define DE_CONTROL_MONO_DATA_16_PACKED 2
192#define DE_CONTROL_MONO_DATA_32_PACKED 3
193#define DE_CONTROL_REPEAT_ROTATE 11 : 11
194#define DE_CONTROL_REPEAT_ROTATE_DISABLE 0
195#define DE_CONTROL_REPEAT_ROTATE_ENABLE 1
196#define DE_CONTROL_TRANSPARENCY_MATCH 10 : 10
197#define DE_CONTROL_TRANSPARENCY_MATCH_OPAQUE 0
198#define DE_CONTROL_TRANSPARENCY_MATCH_TRANSPARENT 1
199#define DE_CONTROL_TRANSPARENCY_SELECT 9 : 9
200#define DE_CONTROL_TRANSPARENCY_SELECT_SOURCE 0
201#define DE_CONTROL_TRANSPARENCY_SELECT_DESTINATION 1
202#define DE_CONTROL_TRANSPARENCY 8 : 8
203#define DE_CONTROL_TRANSPARENCY_DISABLE 0
204#define DE_CONTROL_TRANSPARENCY_ENABLE 1
205#define DE_CONTROL_ROP 7 : 0
206
207/* Pseudo fields. */
208
209#define DE_CONTROL_SHORT_STROKE_DIR 27 : 24
210#define DE_CONTROL_SHORT_STROKE_DIR_225 0
211#define DE_CONTROL_SHORT_STROKE_DIR_135 1
212#define DE_CONTROL_SHORT_STROKE_DIR_315 2
213#define DE_CONTROL_SHORT_STROKE_DIR_45 3
214#define DE_CONTROL_SHORT_STROKE_DIR_270 4
215#define DE_CONTROL_SHORT_STROKE_DIR_90 5
216#define DE_CONTROL_SHORT_STROKE_DIR_180 8
217#define DE_CONTROL_SHORT_STROKE_DIR_0 10
218#define DE_CONTROL_ROTATION 25 : 24
219#define DE_CONTROL_ROTATION_0 0
220#define DE_CONTROL_ROTATION_270 1
221#define DE_CONTROL_ROTATION_90 2
222#define DE_CONTROL_ROTATION_180 3
223
224#define DE_PITCH 0x000010
225#define DE_PITCH_DESTINATION 28 : 16
226#define DE_PITCH_SOURCE 12 : 0
227
228#define DE_FOREGROUND 0x000014
229#define DE_FOREGROUND_COLOR 31 : 0
230
231#define DE_BACKGROUND 0x000018
232#define DE_BACKGROUND_COLOR 31 : 0
233
234#define DE_STRETCH_FORMAT 0x00001C
235#define DE_STRETCH_FORMAT_PATTERN_XY 30 : 30
236#define DE_STRETCH_FORMAT_PATTERN_XY_NORMAL 0
237#define DE_STRETCH_FORMAT_PATTERN_XY_OVERWRITE 1
238#define DE_STRETCH_FORMAT_PATTERN_Y 29 : 27
239#define DE_STRETCH_FORMAT_PATTERN_X 25 : 23
240#define DE_STRETCH_FORMAT_PIXEL_FORMAT 21 : 20
241#define DE_STRETCH_FORMAT_PIXEL_FORMAT_8 0
242#define DE_STRETCH_FORMAT_PIXEL_FORMAT_16 1
243#define DE_STRETCH_FORMAT_PIXEL_FORMAT_24 3
244#define DE_STRETCH_FORMAT_PIXEL_FORMAT_32 2
245#define DE_STRETCH_FORMAT_ADDRESSING 19 : 16
246#define DE_STRETCH_FORMAT_ADDRESSING_XY 0
247#define DE_STRETCH_FORMAT_ADDRESSING_LINEAR 15
248#define DE_STRETCH_FORMAT_SOURCE_HEIGHT 11 : 0
249
250#define DE_COLOR_COMPARE 0x000020
251#define DE_COLOR_COMPARE_COLOR 23 : 0
252
253#define DE_COLOR_COMPARE_MASK 0x000024
254#define DE_COLOR_COMPARE_MASK_MASKS 23 : 0
255
256#define DE_MASKS 0x000028
257#define DE_MASKS_BYTE_MASK 31 : 16
258#define DE_MASKS_BIT_MASK 15 : 0
259
260#define DE_CLIP_TL 0x00002C
261#define DE_CLIP_TL_TOP 31 : 16
262#define DE_CLIP_TL_STATUS 13 : 13
263#define DE_CLIP_TL_STATUS_DISABLE 0
264#define DE_CLIP_TL_STATUS_ENABLE 1
265#define DE_CLIP_TL_INHIBIT 12 : 12
266#define DE_CLIP_TL_INHIBIT_OUTSIDE 0
267#define DE_CLIP_TL_INHIBIT_INSIDE 1
268#define DE_CLIP_TL_LEFT 11 : 0
269
270#define DE_CLIP_BR 0x000030
271#define DE_CLIP_BR_BOTTOM 31 : 16
272#define DE_CLIP_BR_RIGHT 12 : 0
273
274#define DE_MONO_PATTERN_LOW 0x000034
275#define DE_MONO_PATTERN_LOW_PATTERN 31 : 0
276
277#define DE_MONO_PATTERN_HIGH 0x000038
278#define DE_MONO_PATTERN_HIGH_PATTERN 31 : 0
279
280#define DE_WINDOW_WIDTH 0x00003C
281#define DE_WINDOW_WIDTH_DESTINATION 28 : 16
282#define DE_WINDOW_WIDTH_SOURCE 12 : 0
283
284#define DE_WINDOW_SOURCE_BASE 0x000040
285#define DE_WINDOW_SOURCE_BASE_EXT 27 : 27
286#define DE_WINDOW_SOURCE_BASE_EXT_LOCAL 0
287#define DE_WINDOW_SOURCE_BASE_EXT_EXTERNAL 1
288#define DE_WINDOW_SOURCE_BASE_CS 26 : 26
289#define DE_WINDOW_SOURCE_BASE_CS_0 0
290#define DE_WINDOW_SOURCE_BASE_CS_1 1
291#define DE_WINDOW_SOURCE_BASE_ADDRESS 25 : 0
292
293#define DE_WINDOW_DESTINATION_BASE 0x000044
294#define DE_WINDOW_DESTINATION_BASE_EXT 27 : 27
295#define DE_WINDOW_DESTINATION_BASE_EXT_LOCAL 0
296#define DE_WINDOW_DESTINATION_BASE_EXT_EXTERNAL 1
297#define DE_WINDOW_DESTINATION_BASE_CS 26 : 26
298#define DE_WINDOW_DESTINATION_BASE_CS_0 0
299#define DE_WINDOW_DESTINATION_BASE_CS_1 1
300#define DE_WINDOW_DESTINATION_BASE_ADDRESS 25 : 0
301
302#define DE_ALPHA 0x000048
303#define DE_ALPHA_VALUE 7 : 0
304
305#define DE_WRAP 0x00004C
306#define DE_WRAP_X 31 : 16
307#define DE_WRAP_Y 15 : 0
308
309#define DE_STATUS 0x000050
310#define DE_STATUS_CSC 1 : 1
311#define DE_STATUS_CSC_CLEAR 0
312#define DE_STATUS_CSC_NOT_ACTIVE 0
313#define DE_STATUS_CSC_ACTIVE 1
314#define DE_STATUS_2D 0 : 0
315#define DE_STATUS_2D_CLEAR 0
316#define DE_STATUS_2D_NOT_ACTIVE 0
317#define DE_STATUS_2D_ACTIVE 1
318
319/* Color Space Conversion registers. */
320
321#define CSC_Y_SOURCE_BASE 0x0000C8
322#define CSC_Y_SOURCE_BASE_EXT 27 : 27
323#define CSC_Y_SOURCE_BASE_EXT_LOCAL 0
324#define CSC_Y_SOURCE_BASE_EXT_EXTERNAL 1
325#define CSC_Y_SOURCE_BASE_CS 26 : 26
326#define CSC_Y_SOURCE_BASE_CS_0 0
327#define CSC_Y_SOURCE_BASE_CS_1 1
328#define CSC_Y_SOURCE_BASE_ADDRESS 25 : 0
329
330#define CSC_CONSTANTS 0x0000CC
331#define CSC_CONSTANTS_Y 31 : 24
332#define CSC_CONSTANTS_R 23 : 16
333#define CSC_CONSTANTS_G 15 : 8
334#define CSC_CONSTANTS_B 7 : 0
335
336#define CSC_Y_SOURCE_X 0x0000D0
337#define CSC_Y_SOURCE_X_INTEGER 26 : 16
338#define CSC_Y_SOURCE_X_FRACTION 15 : 3
339
340#define CSC_Y_SOURCE_Y 0x0000D4
341#define CSC_Y_SOURCE_Y_INTEGER 27 : 16
342#define CSC_Y_SOURCE_Y_FRACTION 15 : 3
343
344#define CSC_U_SOURCE_BASE 0x0000D8
345#define CSC_U_SOURCE_BASE_EXT 27 : 27
346#define CSC_U_SOURCE_BASE_EXT_LOCAL 0
347#define CSC_U_SOURCE_BASE_EXT_EXTERNAL 1
348#define CSC_U_SOURCE_BASE_CS 26 : 26
349#define CSC_U_SOURCE_BASE_CS_0 0
350#define CSC_U_SOURCE_BASE_CS_1 1
351#define CSC_U_SOURCE_BASE_ADDRESS 25 : 0
352
353#define CSC_V_SOURCE_BASE 0x0000DC
354#define CSC_V_SOURCE_BASE_EXT 27 : 27
355#define CSC_V_SOURCE_BASE_EXT_LOCAL 0
356#define CSC_V_SOURCE_BASE_EXT_EXTERNAL 1
357#define CSC_V_SOURCE_BASE_CS 26 : 26
358#define CSC_V_SOURCE_BASE_CS_0 0
359#define CSC_V_SOURCE_BASE_CS_1 1
360#define CSC_V_SOURCE_BASE_ADDRESS 25 : 0
361
362#define CSC_SOURCE_DIMENSION 0x0000E0
363#define CSC_SOURCE_DIMENSION_X 31 : 16
364#define CSC_SOURCE_DIMENSION_Y 15 : 0
365
366#define CSC_SOURCE_PITCH 0x0000E4
367#define CSC_SOURCE_PITCH_Y 31 : 16
368#define CSC_SOURCE_PITCH_UV 15 : 0
369
370#define CSC_DESTINATION 0x0000E8
371#define CSC_DESTINATION_WRAP 31 : 31
372#define CSC_DESTINATION_WRAP_DISABLE 0
373#define CSC_DESTINATION_WRAP_ENABLE 1
374#define CSC_DESTINATION_X 27 : 16
375#define CSC_DESTINATION_Y 11 : 0
376
377#define CSC_DESTINATION_DIMENSION 0x0000EC
378#define CSC_DESTINATION_DIMENSION_X 31 : 16
379#define CSC_DESTINATION_DIMENSION_Y 15 : 0
380
381#define CSC_DESTINATION_PITCH 0x0000F0
382#define CSC_DESTINATION_PITCH_X 31 : 16
383#define CSC_DESTINATION_PITCH_Y 15 : 0
384
385#define CSC_SCALE_FACTOR 0x0000F4
386#define CSC_SCALE_FACTOR_HORIZONTAL 31 : 16
387#define CSC_SCALE_FACTOR_VERTICAL 15 : 0
388
389#define CSC_DESTINATION_BASE 0x0000F8
390#define CSC_DESTINATION_BASE_EXT 27 : 27
391#define CSC_DESTINATION_BASE_EXT_LOCAL 0
392#define CSC_DESTINATION_BASE_EXT_EXTERNAL 1
393#define CSC_DESTINATION_BASE_CS 26 : 26
394#define CSC_DESTINATION_BASE_CS_0 0
395#define CSC_DESTINATION_BASE_CS_1 1
396#define CSC_DESTINATION_BASE_ADDRESS 25 : 0
397
398#define CSC_CONTROL 0x0000FC
399#define CSC_CONTROL_STATUS 31 : 31
400#define CSC_CONTROL_STATUS_STOP 0
401#define CSC_CONTROL_STATUS_START 1
402#define CSC_CONTROL_SOURCE_FORMAT 30 : 28
403#define CSC_CONTROL_SOURCE_FORMAT_YUV422 0
404#define CSC_CONTROL_SOURCE_FORMAT_YUV420I 1
405#define CSC_CONTROL_SOURCE_FORMAT_YUV420 2
406#define CSC_CONTROL_SOURCE_FORMAT_YVU9 3
407#define CSC_CONTROL_SOURCE_FORMAT_IYU1 4
408#define CSC_CONTROL_SOURCE_FORMAT_IYU2 5
409#define CSC_CONTROL_SOURCE_FORMAT_RGB565 6
410#define CSC_CONTROL_SOURCE_FORMAT_RGB8888 7
411#define CSC_CONTROL_DESTINATION_FORMAT 27 : 26
412#define CSC_CONTROL_DESTINATION_FORMAT_RGB565 0
413#define CSC_CONTROL_DESTINATION_FORMAT_RGB8888 1
414#define CSC_CONTROL_HORIZONTAL_FILTER 25 : 25
415#define CSC_CONTROL_HORIZONTAL_FILTER_DISABLE 0
416#define CSC_CONTROL_HORIZONTAL_FILTER_ENABLE 1
417#define CSC_CONTROL_VERTICAL_FILTER 24 : 24
418#define CSC_CONTROL_VERTICAL_FILTER_DISABLE 0
419#define CSC_CONTROL_VERTICAL_FILTER_ENABLE 1
420#define CSC_CONTROL_BYTE_ORDER 23 : 23
421#define CSC_CONTROL_BYTE_ORDER_YUYV 0
422#define CSC_CONTROL_BYTE_ORDER_UYVY 1
423
424#define DE_DATA_PORT_501 0x110000
425#define DE_DATA_PORT_712 0x400000
426#define DE_DATA_PORT_722 0x6000
427
428/* point to virtual Memory Map IO starting address */
429extern char *smtc_RegBaseAddress;
430/* point to virtual video memory starting address */
431extern char *smtc_VRAMBaseAddress;
432extern unsigned char smtc_de_busy;
433
434extern unsigned long memRead32(unsigned long nOffset);
435extern void memWrite32(unsigned long nOffset, unsigned long nData);
436extern unsigned long SMTC_read2Dreg(unsigned long nOffset);
437
438/* 2D functions */
439extern void deInit(unsigned int nModeWidth, unsigned int nModeHeight,
440 unsigned int bpp);
441
442extern void deWaitForNotBusy(void);
443
444extern void deVerticalLine(unsigned long dst_base,
445 unsigned long dst_pitch,
446 unsigned long nX,
447 unsigned long nY,
448 unsigned long dst_height,
449 unsigned long nColor);
450
451extern void deHorizontalLine(unsigned long dst_base,
452 unsigned long dst_pitch,
453 unsigned long nX,
454 unsigned long nY,
455 unsigned long dst_width,
456 unsigned long nColor);
457
458extern void deLine(unsigned long dst_base,
459 unsigned long dst_pitch,
460 unsigned long nX1,
461 unsigned long nY1,
462 unsigned long nX2,
463 unsigned long nY2,
464 unsigned long nColor);
465
466extern void deFillRect(unsigned long dst_base,
467 unsigned long dst_pitch,
468 unsigned long dst_X,
469 unsigned long dst_Y,
470 unsigned long dst_width,
471 unsigned long dst_height,
472 unsigned long nColor);
473
474extern void deRotatePattern(unsigned char *pattern_dstaddr,
475 unsigned long pattern_src_addr,
476 unsigned long pattern_BPP,
477 unsigned long pattern_stride,
478 int patternX,
479 int patternY);
480
481extern void deCopy(unsigned long dst_base,
482 unsigned long dst_pitch,
483 unsigned long dst_BPP,
484 unsigned long dst_X,
485 unsigned long dst_Y,
486 unsigned long dst_width,
487 unsigned long dst_height,
488 unsigned long src_base,
489 unsigned long src_pitch,
490 unsigned long src_X,
491 unsigned long src_Y,
492 pTransparent pTransp,
493 unsigned char nROP2);
494
495/*
496 * System memory to Video memory monochrome expansion.
497 *
498 * Source is monochrome image in system memory. This function expands the
499 * monochrome data to color image in video memory.
500 *
501 * @pSrcbuf: pointer to start of source buffer in system memory
502 * @srcDelta: Pitch value (in bytes) of the source buffer, +ive means top
503 * down and -ive mean button up
504 * @startBit: Mono data can start at any bit in a byte, this value should
505 * be 0 to 7
506 * @dBase: Address of destination : offset in frame buffer
507 * @dPitch: Pitch value of destination surface in BYTE
508 * @bpp: Color depth of destination surface
509 * @dx, dy: Starting coordinate of destination surface
510 * @width, height: width and height of rectange in pixel value
511 * @fColor,bColor: Foreground, Background color (corresponding to a 1, 0 in
512 * the monochrome data)
513 * @rop2: ROP value
514 */
515
516extern long deSystemMem2VideoMemMonoBlt(
517 const char *pSrcbuf,
518 long srcDelta,
519 unsigned long startBit,
520 unsigned long dBase,
521 unsigned long dPitch,
522 unsigned long bpp,
523 unsigned long dx, unsigned long dy,
524 unsigned long width, unsigned long height,
525 unsigned long fColor,
526 unsigned long bColor,
527 unsigned long rop2);
528
529extern unsigned long deGetTransparency(void);
530extern void deSetPixelFormat(unsigned long bpp);
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
new file mode 100644
index 000000000000..161dbc9c1397
--- /dev/null
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -0,0 +1,1253 @@
1/*
2 * Silicon Motion SM7XX frame buffer device
3 *
4 * Copyright (C) 2006 Silicon Motion Technology Corp.
5 * Authors: Ge Wang, gewang@siliconmotion.com
6 * Boyod boyod.yang@siliconmotion.com.cn
7 *
8 * Copyright (C) 2009 Lemote, Inc.
9 * Author: Wu Zhangjin, wuzj@lemote.com
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file COPYING in the main directory of this archive for
13 * more details.
14 *
15 * Version 0.10.26192.21.01
16 * - Add PowerPC/Big endian support
17 * - Add 2D support for Lynx
18 * - Verified on2.6.19.2 Boyod.yang <boyod.yang@siliconmotion.com.cn>
19 *
20 * Version 0.09.2621.00.01
21 * - Only support Linux Kernel's version 2.6.21.
22 * Boyod.yang <boyod.yang@siliconmotion.com.cn>
23 *
24 * Version 0.09
25 * - Only support Linux Kernel's version 2.6.12.
26 * Boyod.yang <boyod.yang@siliconmotion.com.cn>
27 */
28
29#ifndef __KERNEL__
30#define __KERNEL__
31#endif
32
33#include <linux/io.h>
34#include <linux/fb.h>
35#include <linux/pci.h>
36#include <linux/init.h>
37#include <linux/uaccess.h>
38#include <linux/console.h>
39#include <linux/screen_info.h>
40
41#ifdef CONFIG_PM
42#include <linux/pm.h>
43#endif
44
45struct screen_info smtc_screen_info;
46
47#include "smtcfb.h"
48#include "smtc2d.h"
49
50#ifdef DEBUG
51#define smdbg(format, arg...) printk(KERN_DEBUG format , ## arg)
52#else
53#define smdbg(format, arg...)
54#endif
55
56/*
57* Private structure
58*/
59struct smtcfb_info {
60 /*
61 * The following is a pointer to be passed into the
62 * functions below. The modules outside the main
63 * voyager.c driver have no knowledge as to what
64 * is within this structure.
65 */
66 struct fb_info fb;
67 struct display_switch *dispsw;
68 struct pci_dev *dev;
69 signed int currcon;
70
71 struct {
72 u8 red, green, blue;
73 } palette[NR_RGB];
74
75 u_int palette_size;
76};
77
78struct par_info {
79 /*
80 * Hardware
81 */
82 u16 chipID;
83 unsigned char __iomem *m_pMMIO;
84 char __iomem *m_pLFB;
85 char *m_pDPR;
86 char *m_pVPR;
87 char *m_pCPR;
88
89 u_int width;
90 u_int height;
91 u_int hz;
92 u_long BaseAddressInVRAM;
93 u8 chipRevID;
94};
95
96struct vesa_mode_table {
97 char mode_index[6];
98 u16 lfb_width;
99 u16 lfb_height;
100 u16 lfb_depth;
101};
102
103static struct vesa_mode_table vesa_mode[] = {
104 {"0x301", 640, 480, 8},
105 {"0x303", 800, 600, 8},
106 {"0x305", 1024, 768, 8},
107 {"0x307", 1280, 1024, 8},
108
109 {"0x311", 640, 480, 16},
110 {"0x314", 800, 600, 16},
111 {"0x317", 1024, 768, 16},
112 {"0x31A", 1280, 1024, 16},
113
114 {"0x312", 640, 480, 24},
115 {"0x315", 800, 600, 24},
116 {"0x318", 1024, 768, 24},
117 {"0x31B", 1280, 1024, 24},
118};
119
120char __iomem *smtc_RegBaseAddress; /* Memory Map IO starting address */
121char __iomem *smtc_VRAMBaseAddress; /* video memory starting address */
122
123char *smtc_2DBaseAddress; /* 2D engine starting address */
124char *smtc_2Ddataport; /* 2D data port offset */
125short smtc_2Dacceleration;
126
127static u32 colreg[17];
128static struct par_info hw; /* hardware information */
129
130u16 smtc_ChipIDs[] = {
131 0x710,
132 0x712,
133 0x720
134};
135
136#define numSMTCchipIDs (sizeof(smtc_ChipIDs) / sizeof(u16))
137
138void deWaitForNotBusy(void)
139{
140 unsigned long i = 0x1000000;
141 while (i--) {
142 if ((smtc_seqr(0x16) & 0x18) == 0x10)
143 break;
144 }
145 smtc_de_busy = 0;
146}
147
148static void sm712_set_timing(struct smtcfb_info *sfb,
149 struct par_info *ppar_info)
150{
151 int i = 0, j = 0;
152 u32 m_nScreenStride;
153
154 smdbg("\nppar_info->width = %d ppar_info->height = %d"
155 "sfb->fb.var.bits_per_pixel = %d ppar_info->hz = %d\n",
156 ppar_info->width, ppar_info->height,
157 sfb->fb.var.bits_per_pixel, ppar_info->hz);
158
159 for (j = 0; j < numVGAModes; j++) {
160 if (VGAMode[j].mmSizeX == ppar_info->width &&
161 VGAMode[j].mmSizeY == ppar_info->height &&
162 VGAMode[j].bpp == sfb->fb.var.bits_per_pixel &&
163 VGAMode[j].hz == ppar_info->hz) {
164
165 smdbg("\nVGAMode[j].mmSizeX = %d VGAMode[j].mmSizeY ="
166 "%d VGAMode[j].bpp = %d"
167 "VGAMode[j].hz=%d\n",
168 VGAMode[j].mmSizeX, VGAMode[j].mmSizeY,
169 VGAMode[j].bpp, VGAMode[j].hz);
170
171 smdbg("VGAMode index=%d\n", j);
172
173 smtc_mmiowb(0x0, 0x3c6);
174
175 smtc_seqw(0, 0x1);
176
177 smtc_mmiowb(VGAMode[j].Init_MISC, 0x3c2);
178
179 /* init SEQ register SR00 - SR04 */
180 for (i = 0; i < SIZE_SR00_SR04; i++)
181 smtc_seqw(i, VGAMode[j].Init_SR00_SR04[i]);
182
183 /* init SEQ register SR10 - SR24 */
184 for (i = 0; i < SIZE_SR10_SR24; i++)
185 smtc_seqw(i + 0x10,
186 VGAMode[j].Init_SR10_SR24[i]);
187
188 /* init SEQ register SR30 - SR75 */
189 for (i = 0; i < SIZE_SR30_SR75; i++)
190 if (((i + 0x30) != 0x62) \
191 && ((i + 0x30) != 0x6a) \
192 && ((i + 0x30) != 0x6b))
193 smtc_seqw(i + 0x30,
194 VGAMode[j].Init_SR30_SR75[i]);
195
196 /* init SEQ register SR80 - SR93 */
197 for (i = 0; i < SIZE_SR80_SR93; i++)
198 smtc_seqw(i + 0x80,
199 VGAMode[j].Init_SR80_SR93[i]);
200
201 /* init SEQ register SRA0 - SRAF */
202 for (i = 0; i < SIZE_SRA0_SRAF; i++)
203 smtc_seqw(i + 0xa0,
204 VGAMode[j].Init_SRA0_SRAF[i]);
205
206 /* init Graphic register GR00 - GR08 */
207 for (i = 0; i < SIZE_GR00_GR08; i++)
208 smtc_grphw(i, VGAMode[j].Init_GR00_GR08[i]);
209
210 /* init Attribute register AR00 - AR14 */
211 for (i = 0; i < SIZE_AR00_AR14; i++)
212 smtc_attrw(i, VGAMode[j].Init_AR00_AR14[i]);
213
214 /* init CRTC register CR00 - CR18 */
215 for (i = 0; i < SIZE_CR00_CR18; i++)
216 smtc_crtcw(i, VGAMode[j].Init_CR00_CR18[i]);
217
218 /* init CRTC register CR30 - CR4D */
219 for (i = 0; i < SIZE_CR30_CR4D; i++)
220 smtc_crtcw(i + 0x30,
221 VGAMode[j].Init_CR30_CR4D[i]);
222
223 /* init CRTC register CR90 - CRA7 */
224 for (i = 0; i < SIZE_CR90_CRA7; i++)
225 smtc_crtcw(i + 0x90,
226 VGAMode[j].Init_CR90_CRA7[i]);
227 }
228 }
229 smtc_mmiowb(0x67, 0x3c2);
230
231 /* set VPR registers */
232 writel(0x0, ppar_info->m_pVPR + 0x0C);
233 writel(0x0, ppar_info->m_pVPR + 0x40);
234
235 /* set data width */
236 m_nScreenStride =
237 (ppar_info->width * sfb->fb.var.bits_per_pixel) / 64;
238 switch (sfb->fb.var.bits_per_pixel) {
239 case 8:
240 writel(0x0, ppar_info->m_pVPR + 0x0);
241 break;
242 case 16:
243 writel(0x00020000, ppar_info->m_pVPR + 0x0);
244 break;
245 case 24:
246 writel(0x00040000, ppar_info->m_pVPR + 0x0);
247 break;
248 case 32:
249 writel(0x00030000, ppar_info->m_pVPR + 0x0);
250 break;
251 }
252 writel((u32) (((m_nScreenStride + 2) << 16) | m_nScreenStride),
253 ppar_info->m_pVPR + 0x10);
254
255}
256
257static void sm712_setpalette(int regno, unsigned red, unsigned green,
258 unsigned blue, struct fb_info *info)
259{
260 struct par_info *cur_par = (struct par_info *)info->par;
261
262 if (cur_par->BaseAddressInVRAM)
263 /*
264 * second display palette for dual head. Enable CRT RAM, 6-bit
265 * RAM
266 */
267 smtc_seqw(0x66, (smtc_seqr(0x66) & 0xC3) | 0x20);
268 else
269 /* primary display palette. Enable LCD RAM only, 6-bit RAM */
270 smtc_seqw(0x66, (smtc_seqr(0x66) & 0xC3) | 0x10);
271 smtc_mmiowb(regno, dac_reg);
272 smtc_mmiowb(red >> 10, dac_val);
273 smtc_mmiowb(green >> 10, dac_val);
274 smtc_mmiowb(blue >> 10, dac_val);
275}
276
277static void smtc_set_timing(struct smtcfb_info *sfb, struct par_info
278 *ppar_info)
279{
280 switch (ppar_info->chipID) {
281 case 0x710:
282 case 0x712:
283 case 0x720:
284 sm712_set_timing(sfb, ppar_info);
285 break;
286 }
287}
288
289static struct fb_var_screeninfo smtcfb_var = {
290 .xres = 1024,
291 .yres = 600,
292 .xres_virtual = 1024,
293 .yres_virtual = 600,
294 .bits_per_pixel = 16,
295 .red = {16, 8, 0},
296 .green = {8, 8, 0},
297 .blue = {0, 8, 0},
298 .activate = FB_ACTIVATE_NOW,
299 .height = -1,
300 .width = -1,
301 .vmode = FB_VMODE_NONINTERLACED,
302};
303
304static struct fb_fix_screeninfo smtcfb_fix = {
305 .id = "sm712fb",
306 .type = FB_TYPE_PACKED_PIXELS,
307 .visual = FB_VISUAL_TRUECOLOR,
308 .line_length = 800 * 3,
309 .accel = FB_ACCEL_SMI_LYNX,
310};
311
312/* chan_to_field
313 *
314 * convert a colour value into a field position
315 *
316 * from pxafb.c
317 */
318
319static inline unsigned int chan_to_field(unsigned int chan,
320 struct fb_bitfield *bf)
321{
322 chan &= 0xffff;
323 chan >>= 16 - bf->length;
324 return chan << bf->offset;
325}
326
327static int smtcfb_blank(int blank_mode, struct fb_info *info)
328{
329 /* clear DPMS setting */
330 switch (blank_mode) {
331 case FB_BLANK_UNBLANK:
332 /* Screen On: HSync: On, VSync : On */
333 smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
334 smtc_seqw(0x6a, 0x16);
335 smtc_seqw(0x6b, 0x02);
336 smtc_seqw(0x21, (smtc_seqr(0x21) & 0x77));
337 smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
338 smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
339 smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
340 smtc_seqw(0x31, (smtc_seqr(0x31) | 0x03));
341 break;
342 case FB_BLANK_NORMAL:
343 /* Screen Off: HSync: On, VSync : On Soft blank */
344 smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
345 smtc_seqw(0x6a, 0x16);
346 smtc_seqw(0x6b, 0x02);
347 smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
348 smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
349 smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
350 smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
351 break;
352 case FB_BLANK_VSYNC_SUSPEND:
353 /* Screen On: HSync: On, VSync : Off */
354 smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
355 smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
356 smtc_seqw(0x6a, 0x0c);
357 smtc_seqw(0x6b, 0x02);
358 smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
359 smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20));
360 smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
361 smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
362 smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
363 smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
364 break;
365 case FB_BLANK_HSYNC_SUSPEND:
366 /* Screen On: HSync: Off, VSync : On */
367 smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
368 smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
369 smtc_seqw(0x6a, 0x0c);
370 smtc_seqw(0x6b, 0x02);
371 smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
372 smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10));
373 smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
374 smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
375 smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
376 smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
377 break;
378 case FB_BLANK_POWERDOWN:
379 /* Screen On: HSync: Off, VSync : Off */
380 smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
381 smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
382 smtc_seqw(0x6a, 0x0c);
383 smtc_seqw(0x6b, 0x02);
384 smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
385 smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30));
386 smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
387 smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
388 smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
389 smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
390 break;
391 default:
392 return -EINVAL;
393 }
394
395 return 0;
396}
397
398static int smtc_setcolreg(unsigned regno, unsigned red, unsigned green,
399 unsigned blue, unsigned trans, struct fb_info *info)
400{
401 struct smtcfb_info *sfb = (struct smtcfb_info *)info;
402 u32 val;
403
404 if (regno > 255)
405 return 1;
406
407 switch (sfb->fb.fix.visual) {
408 case FB_VISUAL_DIRECTCOLOR:
409 case FB_VISUAL_TRUECOLOR:
410 /*
411 * 16/32 bit true-colour, use pseuo-palette for 16 base color
412 */
413 if (regno < 16) {
414 if (sfb->fb.var.bits_per_pixel == 16) {
415 u32 *pal = sfb->fb.pseudo_palette;
416 val = chan_to_field(red, &sfb->fb.var.red);
417 val |= chan_to_field(green, \
418 &sfb->fb.var.green);
419 val |= chan_to_field(blue, &sfb->fb.var.blue);
420#ifdef __BIG_ENDIAN
421 pal[regno] =
422 ((red & 0xf800) >> 8) |
423 ((green & 0xe000) >> 13) |
424 ((green & 0x1c00) << 3) |
425 ((blue & 0xf800) >> 3);
426#else
427 pal[regno] = val;
428#endif
429 } else {
430 u32 *pal = sfb->fb.pseudo_palette;
431 val = chan_to_field(red, &sfb->fb.var.red);
432 val |= chan_to_field(green, \
433 &sfb->fb.var.green);
434 val |= chan_to_field(blue, &sfb->fb.var.blue);
435#ifdef __BIG_ENDIAN
436 val =
437 (val & 0xff00ff00 >> 8) |
438 (val & 0x00ff00ff << 8);
439#endif
440 pal[regno] = val;
441 }
442 }
443 break;
444
445 case FB_VISUAL_PSEUDOCOLOR:
446 /* color depth 8 bit */
447 sm712_setpalette(regno, red, green, blue, info);
448 break;
449
450 default:
451 return 1; /* unknown type */
452 }
453
454 return 0;
455
456}
457
458#ifdef __BIG_ENDIAN
459static ssize_t smtcfb_read(struct fb_info *info, char __user * buf, size_t
460 count, loff_t *ppos)
461{
462 unsigned long p = *ppos;
463
464 u32 *buffer, *dst;
465 u32 __iomem *src;
466 int c, i, cnt = 0, err = 0;
467 unsigned long total_size;
468
469 if (!info || !info->screen_base)
470 return -ENODEV;
471
472 if (info->state != FBINFO_STATE_RUNNING)
473 return -EPERM;
474
475 total_size = info->screen_size;
476
477 if (total_size == 0)
478 total_size = info->fix.smem_len;
479
480 if (p >= total_size)
481 return 0;
482
483 if (count >= total_size)
484 count = total_size;
485
486 if (count + p > total_size)
487 count = total_size - p;
488
489 buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
490 if (!buffer)
491 return -ENOMEM;
492
493 src = (u32 __iomem *) (info->screen_base + p);
494
495 if (info->fbops->fb_sync)
496 info->fbops->fb_sync(info);
497
498 while (count) {
499 c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
500 dst = buffer;
501 for (i = c >> 2; i--;) {
502 *dst = fb_readl(src++);
503 *dst =
504 (*dst & 0xff00ff00 >> 8) |
505 (*dst & 0x00ff00ff << 8);
506 dst++;
507 }
508 if (c & 3) {
509 u8 *dst8 = (u8 *) dst;
510 u8 __iomem *src8 = (u8 __iomem *) src;
511
512 for (i = c & 3; i--;) {
513 if (i & 1) {
514 *dst8++ = fb_readb(++src8);
515 } else {
516 *dst8++ = fb_readb(--src8);
517 src8 += 2;
518 }
519 }
520 src = (u32 __iomem *) src8;
521 }
522
523 if (copy_to_user(buf, buffer, c)) {
524 err = -EFAULT;
525 break;
526 }
527 *ppos += c;
528 buf += c;
529 cnt += c;
530 count -= c;
531 }
532
533 kfree(buffer);
534
535 return (err) ? err : cnt;
536}
537
538static ssize_t
539smtcfb_write(struct fb_info *info, const char __user *buf, size_t count,
540 loff_t *ppos)
541{
542 unsigned long p = *ppos;
543
544 u32 *buffer, *src;
545 u32 __iomem *dst;
546 int c, i, cnt = 0, err = 0;
547 unsigned long total_size;
548
549 if (!info || !info->screen_base)
550 return -ENODEV;
551
552 if (info->state != FBINFO_STATE_RUNNING)
553 return -EPERM;
554
555 total_size = info->screen_size;
556
557 if (total_size == 0)
558 total_size = info->fix.smem_len;
559
560 if (p > total_size)
561 return -EFBIG;
562
563 if (count > total_size) {
564 err = -EFBIG;
565 count = total_size;
566 }
567
568 if (count + p > total_size) {
569 if (!err)
570 err = -ENOSPC;
571
572 count = total_size - p;
573 }
574
575 buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
576 if (!buffer)
577 return -ENOMEM;
578
579 dst = (u32 __iomem *) (info->screen_base + p);
580
581 if (info->fbops->fb_sync)
582 info->fbops->fb_sync(info);
583
584 while (count) {
585 c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
586 src = buffer;
587
588 if (copy_from_user(src, buf, c)) {
589 err = -EFAULT;
590 break;
591 }
592
593 for (i = c >> 2; i--;) {
594 fb_writel((*src & 0xff00ff00 >> 8) |
595 (*src & 0x00ff00ff << 8), dst++);
596 src++;
597 }
598 if (c & 3) {
599 u8 *src8 = (u8 *) src;
600 u8 __iomem *dst8 = (u8 __iomem *) dst;
601
602 for (i = c & 3; i--;) {
603 if (i & 1) {
604 fb_writeb(*src8++, ++dst8);
605 } else {
606 fb_writeb(*src8++, --dst8);
607 dst8 += 2;
608 }
609 }
610 dst = (u32 __iomem *) dst8;
611 }
612
613 *ppos += c;
614 buf += c;
615 cnt += c;
616 count -= c;
617 }
618
619 kfree(buffer);
620
621 return (cnt) ? cnt : err;
622}
623#endif /* ! __BIG_ENDIAN */
624
625#include "smtc2d.c"
626
627void smtcfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
628{
629 struct par_info *p = (struct par_info *)info->par;
630
631 if (smtc_2Dacceleration) {
632 if (!area->width || !area->height)
633 return;
634
635 deCopy(p->BaseAddressInVRAM, 0, info->var.bits_per_pixel,
636 area->dx, area->dy, area->width, area->height,
637 p->BaseAddressInVRAM, 0, area->sx, area->sy, 0, 0xC);
638
639 } else
640 cfb_copyarea(info, area);
641}
642
643void smtcfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
644{
645 struct par_info *p = (struct par_info *)info->par;
646
647 if (smtc_2Dacceleration) {
648 if (!rect->width || !rect->height)
649 return;
650 if (info->var.bits_per_pixel >= 24)
651 deFillRect(p->BaseAddressInVRAM, 0, rect->dx * 3,
652 rect->dy * 3, rect->width * 3, rect->height,
653 rect->color);
654 else
655 deFillRect(p->BaseAddressInVRAM, 0, rect->dx, rect->dy,
656 rect->width, rect->height, rect->color);
657 } else
658 cfb_fillrect(info, rect);
659}
660
661void smtcfb_imageblit(struct fb_info *info, const struct fb_image *image)
662{
663 struct par_info *p = (struct par_info *)info->par;
664 u32 bg_col = 0, fg_col = 0;
665
666 if ((smtc_2Dacceleration) && (image->depth == 1)) {
667 if (smtc_de_busy)
668 deWaitForNotBusy();
669
670 switch (info->var.bits_per_pixel) {
671 case 8:
672 bg_col = image->bg_color;
673 fg_col = image->fg_color;
674 break;
675 case 16:
676 bg_col =
677 ((u32 *) (info->pseudo_palette))[image->bg_color];
678 fg_col =
679 ((u32 *) (info->pseudo_palette))[image->fg_color];
680 break;
681 case 32:
682 bg_col =
683 ((u32 *) (info->pseudo_palette))[image->bg_color];
684 fg_col =
685 ((u32 *) (info->pseudo_palette))[image->fg_color];
686 break;
687 }
688
689 deSystemMem2VideoMemMonoBlt(
690 image->data,
691 image->width / 8,
692 0,
693 p->BaseAddressInVRAM,
694 0,
695 0,
696 image->dx, image->dy,
697 image->width, image->height,
698 fg_col, bg_col,
699 0x0C);
700
701 } else
702 cfb_imageblit(info, image);
703}
704
705static struct fb_ops smtcfb_ops = {
706 .owner = THIS_MODULE,
707 .fb_setcolreg = smtc_setcolreg,
708 .fb_blank = smtcfb_blank,
709 .fb_fillrect = smtcfb_fillrect,
710 .fb_imageblit = smtcfb_imageblit,
711 .fb_copyarea = smtcfb_copyarea,
712#ifdef __BIG_ENDIAN
713 .fb_read = smtcfb_read,
714 .fb_write = smtcfb_write,
715#endif
716
717};
718
719void smtcfb_setmode(struct smtcfb_info *sfb)
720{
721 switch (sfb->fb.var.bits_per_pixel) {
722 case 32:
723 sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
724 sfb->fb.fix.line_length = sfb->fb.var.xres * 4;
725 sfb->fb.var.red.length = 8;
726 sfb->fb.var.green.length = 8;
727 sfb->fb.var.blue.length = 8;
728 sfb->fb.var.red.offset = 16;
729 sfb->fb.var.green.offset = 8;
730 sfb->fb.var.blue.offset = 0;
731
732 break;
733 case 8:
734 sfb->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR;
735 sfb->fb.fix.line_length = sfb->fb.var.xres;
736 sfb->fb.var.red.offset = 5;
737 sfb->fb.var.red.length = 3;
738 sfb->fb.var.green.offset = 2;
739 sfb->fb.var.green.length = 3;
740 sfb->fb.var.blue.offset = 0;
741 sfb->fb.var.blue.length = 2;
742 break;
743 case 24:
744 sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
745 sfb->fb.fix.line_length = sfb->fb.var.xres * 3;
746 sfb->fb.var.red.length = 8;
747 sfb->fb.var.green.length = 8;
748 sfb->fb.var.blue.length = 8;
749
750 sfb->fb.var.red.offset = 16;
751 sfb->fb.var.green.offset = 8;
752 sfb->fb.var.blue.offset = 0;
753
754 break;
755 case 16:
756 default:
757 sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
758 sfb->fb.fix.line_length = sfb->fb.var.xres * 2;
759
760 sfb->fb.var.red.length = 5;
761 sfb->fb.var.green.length = 6;
762 sfb->fb.var.blue.length = 5;
763
764 sfb->fb.var.red.offset = 11;
765 sfb->fb.var.green.offset = 5;
766 sfb->fb.var.blue.offset = 0;
767
768 break;
769 }
770
771 hw.width = sfb->fb.var.xres;
772 hw.height = sfb->fb.var.yres;
773 hw.hz = 60;
774 smtc_set_timing(sfb, &hw);
775 if (smtc_2Dacceleration) {
776 printk("2D acceleration enabled!\n");
777 /* Init smtc drawing engine */
778 deInit(sfb->fb.var.xres, sfb->fb.var.yres,
779 sfb->fb.var.bits_per_pixel);
780 }
781}
782
783/*
784 * Alloc struct smtcfb_info and assign the default value
785 */
786static struct smtcfb_info *smtc_alloc_fb_info(struct pci_dev *dev,
787 char *name)
788{
789 struct smtcfb_info *sfb;
790
791 sfb = kmalloc(sizeof(struct smtcfb_info), GFP_KERNEL);
792
793 if (!sfb)
794 return NULL;
795
796 memset(sfb, 0, sizeof(struct smtcfb_info));
797
798 sfb->currcon = -1;
799 sfb->dev = dev;
800
801 /*** Init sfb->fb with default value ***/
802 sfb->fb.flags = FBINFO_FLAG_DEFAULT;
803 sfb->fb.fbops = &smtcfb_ops;
804 sfb->fb.var = smtcfb_var;
805 sfb->fb.fix = smtcfb_fix;
806
807 strcpy(sfb->fb.fix.id, name);
808
809 sfb->fb.fix.type = FB_TYPE_PACKED_PIXELS;
810 sfb->fb.fix.type_aux = 0;
811 sfb->fb.fix.xpanstep = 0;
812 sfb->fb.fix.ypanstep = 0;
813 sfb->fb.fix.ywrapstep = 0;
814 sfb->fb.fix.accel = FB_ACCEL_SMI_LYNX;
815
816 sfb->fb.var.nonstd = 0;
817 sfb->fb.var.activate = FB_ACTIVATE_NOW;
818 sfb->fb.var.height = -1;
819 sfb->fb.var.width = -1;
820 /* text mode acceleration */
821 sfb->fb.var.accel_flags = FB_ACCELF_TEXT;
822 sfb->fb.var.vmode = FB_VMODE_NONINTERLACED;
823 sfb->fb.par = &hw;
824 sfb->fb.pseudo_palette = colreg;
825
826 return sfb;
827}
828
829/*
830 * Unmap in the memory mapped IO registers
831 */
832
833static void smtc_unmap_mmio(struct smtcfb_info *sfb)
834{
835 if (sfb && smtc_RegBaseAddress)
836 smtc_RegBaseAddress = NULL;
837}
838
839/*
840 * Map in the screen memory
841 */
842
843static int smtc_map_smem(struct smtcfb_info *sfb,
844 struct pci_dev *dev, u_long smem_len)
845{
846 if (sfb->fb.var.bits_per_pixel == 32) {
847#ifdef __BIG_ENDIAN
848 sfb->fb.fix.smem_start = pci_resource_start(dev, 0)
849 + 0x800000;
850#else
851 sfb->fb.fix.smem_start = pci_resource_start(dev, 0);
852#endif
853 } else {
854 sfb->fb.fix.smem_start = pci_resource_start(dev, 0);
855 }
856
857 sfb->fb.fix.smem_len = smem_len;
858
859 sfb->fb.screen_base = smtc_VRAMBaseAddress;
860
861 if (!sfb->fb.screen_base) {
862 printk(KERN_INFO "%s: unable to map screen memory\n",
863 sfb->fb.fix.id);
864 return -ENOMEM;
865 }
866
867 return 0;
868}
869
870/*
871 * Unmap in the screen memory
872 *
873 */
874static void smtc_unmap_smem(struct smtcfb_info *sfb)
875{
876 if (sfb && sfb->fb.screen_base) {
877 iounmap(sfb->fb.screen_base);
878 sfb->fb.screen_base = NULL;
879 }
880}
881
882/*
883 * We need to wake up the LynxEM+, and make sure its in linear memory mode.
884 */
885static inline void sm7xx_init_hw(void)
886{
887 outb_p(0x18, 0x3c4);
888 outb_p(0x11, 0x3c5);
889}
890
891static void smtc_free_fb_info(struct smtcfb_info *sfb)
892{
893 if (sfb) {
894 fb_alloc_cmap(&sfb->fb.cmap, 0, 0);
895 kfree(sfb);
896 }
897}
898
899/*
900 * sm712vga_setup - process command line options, get vga parameter
901 * @options: string of options
902 * Returns zero.
903 *
904 */
905static int __init __maybe_unused sm712vga_setup(char *options)
906{
907 int index;
908
909 if (!options || !*options) {
910 smdbg("\n No vga parameter\n");
911 return -EINVAL;
912 }
913
914 smtc_screen_info.lfb_width = 0;
915 smtc_screen_info.lfb_height = 0;
916 smtc_screen_info.lfb_depth = 0;
917
918 smdbg("\nsm712vga_setup = %s\n", options);
919
920 for (index = 0;
921 index < (sizeof(vesa_mode) / sizeof(struct vesa_mode_table));
922 index++) {
923 if (strstr(options, vesa_mode[index].mode_index)) {
924 smtc_screen_info.lfb_width = vesa_mode[index].lfb_width;
925 smtc_screen_info.lfb_height =
926 vesa_mode[index].lfb_height;
927 smtc_screen_info.lfb_depth = vesa_mode[index].lfb_depth;
928 return 0;
929 }
930 }
931
932 return -1;
933}
934__setup("vga=", sm712vga_setup);
935
936/* Jason (08/13/2009)
937 * Original init function changed to probe method to be used by pci_drv
938 * process used to detect chips replaced with kernel process in pci_drv
939 */
940static int __init smtcfb_pci_probe(struct pci_dev *pdev,
941 const struct pci_device_id *ent)
942{
943 struct smtcfb_info *sfb;
944 u_long smem_size = 0x00800000; /* default 8MB */
945 char name[16];
946 int err;
947 unsigned long pFramebufferPhysical;
948
949 printk(KERN_INFO
950 "Silicon Motion display driver " SMTC_LINUX_FB_VERSION "\n");
951
952 err = pci_enable_device(pdev); /* enable SMTC chip */
953
954 if (err)
955 return err;
956 err = -ENOMEM;
957
958 hw.chipID = ent->device;
959 sprintf(name, "sm%Xfb", hw.chipID);
960
961 sfb = smtc_alloc_fb_info(pdev, name);
962
963 if (!sfb)
964 goto failed;
965 /* Jason (08/13/2009)
966 * Store fb_info to be further used when suspending and resuming
967 */
968 pci_set_drvdata(pdev, sfb);
969
970 sm7xx_init_hw();
971
972 /*get mode parameter from smtc_screen_info */
973 if (smtc_screen_info.lfb_width != 0) {
974 sfb->fb.var.xres = smtc_screen_info.lfb_width;
975 sfb->fb.var.yres = smtc_screen_info.lfb_height;
976 sfb->fb.var.bits_per_pixel = smtc_screen_info.lfb_depth;
977 } else {
978 /* default resolution 1024x600 16bit mode */
979 sfb->fb.var.xres = SCREEN_X_RES;
980 sfb->fb.var.yres = SCREEN_Y_RES;
981 sfb->fb.var.bits_per_pixel = SCREEN_BPP;
982 }
983
984#ifdef __BIG_ENDIAN
985 if (sfb->fb.var.bits_per_pixel == 24)
986 sfb->fb.var.bits_per_pixel = (smtc_screen_info.lfb_depth = 32);
987#endif
988 /* Map address and memory detection */
989 pFramebufferPhysical = pci_resource_start(pdev, 0);
990 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw.chipRevID);
991
992 switch (hw.chipID) {
993 case 0x710:
994 case 0x712:
995 sfb->fb.fix.mmio_start = pFramebufferPhysical + 0x00400000;
996 sfb->fb.fix.mmio_len = 0x00400000;
997 smem_size = SM712_VIDEOMEMORYSIZE;
998#ifdef __BIG_ENDIAN
999 hw.m_pLFB = (smtc_VRAMBaseAddress =
1000 ioremap(pFramebufferPhysical, 0x00c00000));
1001#else
1002 hw.m_pLFB = (smtc_VRAMBaseAddress =
1003 ioremap(pFramebufferPhysical, 0x00800000));
1004#endif
1005 hw.m_pMMIO = (smtc_RegBaseAddress =
1006 smtc_VRAMBaseAddress + 0x00700000);
1007 smtc_2DBaseAddress = (hw.m_pDPR =
1008 smtc_VRAMBaseAddress + 0x00408000);
1009 smtc_2Ddataport = smtc_VRAMBaseAddress + DE_DATA_PORT_712;
1010 hw.m_pVPR = hw.m_pLFB + 0x0040c000;
1011#ifdef __BIG_ENDIAN
1012 if (sfb->fb.var.bits_per_pixel == 32) {
1013 smtc_VRAMBaseAddress += 0x800000;
1014 hw.m_pLFB += 0x800000;
1015 printk(KERN_INFO
1016 "\nsmtc_VRAMBaseAddress=%p hw.m_pLFB=%p\n",
1017 smtc_VRAMBaseAddress, hw.m_pLFB);
1018 }
1019#endif
1020 if (!smtc_RegBaseAddress) {
1021 printk(KERN_INFO
1022 "%s: unable to map memory mapped IO\n",
1023 sfb->fb.fix.id);
1024 return -ENOMEM;
1025 }
1026
1027 /* set MCLK = 14.31818 * (0x16 / 0x2) */
1028 smtc_seqw(0x6a, 0x16);
1029 smtc_seqw(0x6b, 0x02);
1030 smtc_seqw(0x62, 0x3e);
1031 /* enable PCI burst */
1032 smtc_seqw(0x17, 0x20);
1033 /* enable word swap */
1034#ifdef __BIG_ENDIAN
1035 if (sfb->fb.var.bits_per_pixel == 32)
1036 smtc_seqw(0x17, 0x30);
1037#endif
1038#ifdef CONFIG_FB_SM7XX_ACCEL
1039 smtc_2Dacceleration = 1;
1040#endif
1041 break;
1042 case 0x720:
1043 sfb->fb.fix.mmio_start = pFramebufferPhysical;
1044 sfb->fb.fix.mmio_len = 0x00200000;
1045 smem_size = SM722_VIDEOMEMORYSIZE;
1046 smtc_2DBaseAddress = (hw.m_pDPR =
1047 ioremap(pFramebufferPhysical, 0x00a00000));
1048 hw.m_pLFB = (smtc_VRAMBaseAddress =
1049 smtc_2DBaseAddress + 0x00200000);
1050 hw.m_pMMIO = (smtc_RegBaseAddress =
1051 smtc_2DBaseAddress + 0x000c0000);
1052 smtc_2Ddataport = smtc_2DBaseAddress + DE_DATA_PORT_722;
1053 hw.m_pVPR = smtc_2DBaseAddress + 0x800;
1054
1055 smtc_seqw(0x62, 0xff);
1056 smtc_seqw(0x6a, 0x0d);
1057 smtc_seqw(0x6b, 0x02);
1058 smtc_2Dacceleration = 0;
1059 break;
1060 default:
1061 printk(KERN_INFO
1062 "No valid Silicon Motion display chip was detected!\n");
1063
1064 smtc_free_fb_info(sfb);
1065 return err;
1066 }
1067
1068 /* can support 32 bpp */
1069 if (15 == sfb->fb.var.bits_per_pixel)
1070 sfb->fb.var.bits_per_pixel = 16;
1071
1072 sfb->fb.var.xres_virtual = sfb->fb.var.xres;
1073 sfb->fb.var.yres_virtual = sfb->fb.var.yres;
1074 err = smtc_map_smem(sfb, pdev, smem_size);
1075 if (err)
1076 goto failed;
1077
1078 smtcfb_setmode(sfb);
1079 /* Primary display starting from 0 postion */
1080 hw.BaseAddressInVRAM = 0;
1081 sfb->fb.par = &hw;
1082
1083 err = register_framebuffer(&sfb->fb);
1084 if (err < 0)
1085 goto failed;
1086
1087 printk(KERN_INFO "Silicon Motion SM%X Rev%X primary display mode"
1088 "%dx%d-%d Init Complete.\n", hw.chipID, hw.chipRevID,
1089 sfb->fb.var.xres, sfb->fb.var.yres,
1090 sfb->fb.var.bits_per_pixel);
1091
1092 return 0;
1093
1094 failed:
1095 printk(KERN_INFO "Silicon Motion, Inc. primary display init fail\n");
1096
1097 smtc_unmap_smem(sfb);
1098 smtc_unmap_mmio(sfb);
1099 smtc_free_fb_info(sfb);
1100
1101 return err;
1102}
1103
1104
1105/* Jason (08/11/2009) PCI_DRV wrapper essential structs */
1106static struct pci_device_id smtcfb_pci_table[] = {
1107 {0x126f, 0x710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1108 {0x126f, 0x712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1109 {0x126f, 0x720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1110 {0,}
1111};
1112
1113
1114/* Jason (08/14/2009)
1115 * do some clean up when the driver module is removed
1116 */
1117static void __devexit smtcfb_pci_remove(struct pci_dev *pdev)
1118{
1119 struct smtcfb_info *sfb;
1120
1121 sfb = pci_get_drvdata(pdev);
1122 pci_set_drvdata(pdev, NULL);
1123 smtc_unmap_smem(sfb);
1124 smtc_unmap_mmio(sfb);
1125 unregister_framebuffer(&sfb->fb);
1126 smtc_free_fb_info(sfb);
1127}
1128
1129/* Jason (08/14/2009)
1130 * suspend function, called when the suspend event is triggered
1131 */
1132static int __maybe_unused smtcfb_suspend(struct pci_dev *pdev, pm_message_t msg)
1133{
1134 struct smtcfb_info *sfb;
1135 int retv;
1136
1137 sfb = pci_get_drvdata(pdev);
1138
1139 /* set the hw in sleep mode use externel clock and self memory refresh
1140 * so that we can turn off internal PLLs later on
1141 */
1142 smtc_seqw(0x20, (smtc_seqr(0x20) | 0xc0));
1143 smtc_seqw(0x69, (smtc_seqr(0x69) & 0xf7));
1144
1145 switch (msg.event) {
1146 case PM_EVENT_FREEZE:
1147 case PM_EVENT_PRETHAW:
1148 pdev->dev.power.power_state = msg;
1149 return 0;
1150 }
1151
1152 /* when doing suspend, call fb apis and pci apis */
1153 if (msg.event == PM_EVENT_SUSPEND) {
1154 acquire_console_sem();
1155 fb_set_suspend(&sfb->fb, 1);
1156 release_console_sem();
1157 retv = pci_save_state(pdev);
1158 pci_disable_device(pdev);
1159 retv = pci_choose_state(pdev, msg);
1160 retv = pci_set_power_state(pdev, retv);
1161 }
1162
1163 pdev->dev.power.power_state = msg;
1164
1165 /* additionaly turn off all function blocks including internal PLLs */
1166 smtc_seqw(0x21, 0xff);
1167
1168 return 0;
1169}
1170
1171static int __maybe_unused smtcfb_resume(struct pci_dev *pdev)
1172{
1173 struct smtcfb_info *sfb;
1174 int retv;
1175
1176 sfb = pci_get_drvdata(pdev);
1177
1178 /* when resuming, restore pci data and fb cursor */
1179 if (pdev->dev.power.power_state.event != PM_EVENT_FREEZE) {
1180 retv = pci_set_power_state(pdev, PCI_D0);
1181 retv = pci_restore_state(pdev);
1182 if (pci_enable_device(pdev))
1183 return -1;
1184 pci_set_master(pdev);
1185 }
1186
1187 /* reinit hardware */
1188 sm7xx_init_hw();
1189 switch (hw.chipID) {
1190 case 0x710:
1191 case 0x712:
1192 /* set MCLK = 14.31818 * (0x16 / 0x2) */
1193 smtc_seqw(0x6a, 0x16);
1194 smtc_seqw(0x6b, 0x02);
1195 smtc_seqw(0x62, 0x3e);
1196 /* enable PCI burst */
1197 smtc_seqw(0x17, 0x20);
1198#ifdef __BIG_ENDIAN
1199 if (sfb->fb.var.bits_per_pixel == 32)
1200 smtc_seqw(0x17, 0x30);
1201#endif
1202 break;
1203 case 0x720:
1204 smtc_seqw(0x62, 0xff);
1205 smtc_seqw(0x6a, 0x0d);
1206 smtc_seqw(0x6b, 0x02);
1207 break;
1208 }
1209
1210 smtc_seqw(0x34, (smtc_seqr(0x34) | 0xc0));
1211 smtc_seqw(0x33, ((smtc_seqr(0x33) | 0x08) & 0xfb));
1212
1213 smtcfb_setmode(sfb);
1214
1215 acquire_console_sem();
1216 fb_set_suspend(&sfb->fb, 0);
1217 release_console_sem();
1218
1219 return 0;
1220}
1221
1222/* Jason (08/13/2009)
1223 * pci_driver struct used to wrap the original driver
1224 * so that it can be registered into the kernel and
1225 * the proper method would be called when suspending and resuming
1226 */
1227static struct pci_driver smtcfb_driver = {
1228 .name = "smtcfb",
1229 .id_table = smtcfb_pci_table,
1230 .probe = smtcfb_pci_probe,
1231 .remove = __devexit_p(smtcfb_pci_remove),
1232#ifdef CONFIG_PM
1233 .suspend = smtcfb_suspend,
1234 .resume = smtcfb_resume,
1235#endif
1236};
1237
1238static int __init smtcfb_init(void)
1239{
1240 return pci_register_driver(&smtcfb_driver);
1241}
1242
1243static void __exit smtcfb_exit(void)
1244{
1245 pci_unregister_driver(&smtcfb_driver);
1246}
1247
1248module_init(smtcfb_init);
1249module_exit(smtcfb_exit);
1250
1251MODULE_AUTHOR("Siliconmotion ");
1252MODULE_DESCRIPTION("Framebuffer driver for SMI Graphic Cards");
1253MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sm7xx/smtcfb.h b/drivers/staging/sm7xx/smtcfb.h
new file mode 100644
index 000000000000..7f2c34138215
--- /dev/null
+++ b/drivers/staging/sm7xx/smtcfb.h
@@ -0,0 +1,793 @@
1/*
2 * Silicon Motion SM712 frame buffer device
3 *
4 * Copyright (C) 2006 Silicon Motion Technology Corp.
5 * Authors: Ge Wang, gewang@siliconmotion.com
6 * Boyod boyod.yang@siliconmotion.com.cn
7 *
8 * Copyright (C) 2009 Lemote, Inc.
9 * Author: Wu Zhangjin, wuzj@lemote.com
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file COPYING in the main directory of this archive for
13 * more details.
14 */
15
16#define SMTC_LINUX_FB_VERSION "version 0.11.2619.21.01 July 27, 2008"
17
18#define NR_PALETTE 256
19#define NR_RGB 2
20
21#define FB_ACCEL_SMI_LYNX 88
22
23#ifdef __BIG_ENDIAN
24#define PC_VGA 0
25#else
26#define PC_VGA 1
27#endif
28
29#define SCREEN_X_RES 1024
30#define SCREEN_Y_RES 600
31#define SCREEN_BPP 16
32
33#ifndef FIELD_OFFSET
34#define FIELD_OFSFET(type, field) \
35 ((unsigned long) (PUCHAR) & (((type *)0)->field))
36#endif
37
38/*Assume SM712 graphics chip has 4MB VRAM */
39#define SM712_VIDEOMEMORYSIZE 0x00400000
40/*Assume SM722 graphics chip has 8MB VRAM */
41#define SM722_VIDEOMEMORYSIZE 0x00800000
42
43#define dac_reg (0x3c8)
44#define dac_val (0x3c9)
45
46extern char *smtc_RegBaseAddress;
47#define smtc_mmiowb(dat, reg) writeb(dat, smtc_RegBaseAddress + reg)
48#define smtc_mmioww(dat, reg) writew(dat, smtc_RegBaseAddress + reg)
49#define smtc_mmiowl(dat, reg) writel(dat, smtc_RegBaseAddress + reg)
50
51#define smtc_mmiorb(reg) readb(smtc_RegBaseAddress + reg)
52#define smtc_mmiorw(reg) readw(smtc_RegBaseAddress + reg)
53#define smtc_mmiorl(reg) readl(smtc_RegBaseAddress + reg)
54
55#define SIZE_SR00_SR04 (0x04 - 0x00 + 1)
56#define SIZE_SR10_SR24 (0x24 - 0x10 + 1)
57#define SIZE_SR30_SR75 (0x75 - 0x30 + 1)
58#define SIZE_SR80_SR93 (0x93 - 0x80 + 1)
59#define SIZE_SRA0_SRAF (0xAF - 0xA0 + 1)
60#define SIZE_GR00_GR08 (0x08 - 0x00 + 1)
61#define SIZE_AR00_AR14 (0x14 - 0x00 + 1)
62#define SIZE_CR00_CR18 (0x18 - 0x00 + 1)
63#define SIZE_CR30_CR4D (0x4D - 0x30 + 1)
64#define SIZE_CR90_CRA7 (0xA7 - 0x90 + 1)
65#define SIZE_VPR (0x6C + 1)
66#define SIZE_DPR (0x44 + 1)
67
68static inline void smtc_crtcw(int reg, int val)
69{
70 smtc_mmiowb(reg, 0x3d4);
71 smtc_mmiowb(val, 0x3d5);
72}
73
74static inline unsigned int smtc_crtcr(int reg)
75{
76 smtc_mmiowb(reg, 0x3d4);
77 return smtc_mmiorb(0x3d5);
78}
79
80static inline void smtc_grphw(int reg, int val)
81{
82 smtc_mmiowb(reg, 0x3ce);
83 smtc_mmiowb(val, 0x3cf);
84}
85
86static inline unsigned int smtc_grphr(int reg)
87{
88 smtc_mmiowb(reg, 0x3ce);
89 return smtc_mmiorb(0x3cf);
90}
91
92static inline void smtc_attrw(int reg, int val)
93{
94 smtc_mmiorb(0x3da);
95 smtc_mmiowb(reg, 0x3c0);
96 smtc_mmiorb(0x3c1);
97 smtc_mmiowb(val, 0x3c0);
98}
99
100static inline void smtc_seqw(int reg, int val)
101{
102 smtc_mmiowb(reg, 0x3c4);
103 smtc_mmiowb(val, 0x3c5);
104}
105
106static inline unsigned int smtc_seqr(int reg)
107{
108 smtc_mmiowb(reg, 0x3c4);
109 return smtc_mmiorb(0x3c5);
110}
111
112/* The next structure holds all information relevant for a specific video mode.
113 */
114
115struct ModeInit {
116 int mmSizeX;
117 int mmSizeY;
118 int bpp;
119 int hz;
120 unsigned char Init_MISC;
121 unsigned char Init_SR00_SR04[SIZE_SR00_SR04];
122 unsigned char Init_SR10_SR24[SIZE_SR10_SR24];
123 unsigned char Init_SR30_SR75[SIZE_SR30_SR75];
124 unsigned char Init_SR80_SR93[SIZE_SR80_SR93];
125 unsigned char Init_SRA0_SRAF[SIZE_SRA0_SRAF];
126 unsigned char Init_GR00_GR08[SIZE_GR00_GR08];
127 unsigned char Init_AR00_AR14[SIZE_AR00_AR14];
128 unsigned char Init_CR00_CR18[SIZE_CR00_CR18];
129 unsigned char Init_CR30_CR4D[SIZE_CR30_CR4D];
130 unsigned char Init_CR90_CRA7[SIZE_CR90_CRA7];
131};
132
133/**********************************************************************
134 SM712 Mode table.
135 **********************************************************************/
136struct ModeInit VGAMode[] = {
137 {
138 /* mode#0: 640 x 480 16Bpp 60Hz */
139 640, 480, 16, 60,
140 /* Init_MISC */
141 0xE3,
142 { /* Init_SR0_SR4 */
143 0x03, 0x01, 0x0F, 0x00, 0x0E,
144 },
145 { /* Init_SR10_SR24 */
146 0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
147 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
148 0xC4, 0x30, 0x02, 0x01, 0x01,
149 },
150 { /* Init_SR30_SR75 */
151 0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
152 0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
153 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
154 0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
155 0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
156 0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
157 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
158 0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
159 0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
160 },
161 { /* Init_SR80_SR93 */
162 0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
163 0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
164 0x00, 0x00, 0x00, 0x00,
165 },
166 { /* Init_SRA0_SRAF */
167 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
168 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
169 },
170 { /* Init_GR00_GR08 */
171 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
172 0xFF,
173 },
174 { /* Init_AR00_AR14 */
175 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
176 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
177 0x41, 0x00, 0x0F, 0x00, 0x00,
178 },
179 { /* Init_CR00_CR18 */
180 0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
181 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
182 0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
183 0xFF,
184 },
185 { /* Init_CR30_CR4D */
186 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
187 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
188 0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
189 0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
190 },
191 { /* Init_CR90_CRA7 */
192 0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
193 0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
194 0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
195 },
196 },
197 {
198 /* mode#1: 640 x 480 24Bpp 60Hz */
199 640, 480, 24, 60,
200 /* Init_MISC */
201 0xE3,
202 { /* Init_SR0_SR4 */
203 0x03, 0x01, 0x0F, 0x00, 0x0E,
204 },
205 { /* Init_SR10_SR24 */
206 0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
207 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
208 0xC4, 0x30, 0x02, 0x01, 0x01,
209 },
210 { /* Init_SR30_SR75 */
211 0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
212 0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
213 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
214 0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
215 0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
216 0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
217 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
218 0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
219 0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
220 },
221 { /* Init_SR80_SR93 */
222 0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
223 0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
224 0x00, 0x00, 0x00, 0x00,
225 },
226 { /* Init_SRA0_SRAF */
227 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
228 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
229 },
230 { /* Init_GR00_GR08 */
231 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
232 0xFF,
233 },
234 { /* Init_AR00_AR14 */
235 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
236 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
237 0x41, 0x00, 0x0F, 0x00, 0x00,
238 },
239 { /* Init_CR00_CR18 */
240 0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
241 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
242 0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
243 0xFF,
244 },
245 { /* Init_CR30_CR4D */
246 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
247 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
248 0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
249 0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
250 },
251 { /* Init_CR90_CRA7 */
252 0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
253 0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
254 0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
255 },
256 },
257 {
258 /* mode#0: 640 x 480 32Bpp 60Hz */
259 640, 480, 32, 60,
260 /* Init_MISC */
261 0xE3,
262 { /* Init_SR0_SR4 */
263 0x03, 0x01, 0x0F, 0x00, 0x0E,
264 },
265 { /* Init_SR10_SR24 */
266 0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
267 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
268 0xC4, 0x30, 0x02, 0x01, 0x01,
269 },
270 { /* Init_SR30_SR75 */
271 0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
272 0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
273 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
274 0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
275 0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
276 0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
277 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
278 0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
279 0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
280 },
281 { /* Init_SR80_SR93 */
282 0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
283 0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
284 0x00, 0x00, 0x00, 0x00,
285 },
286 { /* Init_SRA0_SRAF */
287 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
288 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
289 },
290 { /* Init_GR00_GR08 */
291 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
292 0xFF,
293 },
294 { /* Init_AR00_AR14 */
295 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
296 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
297 0x41, 0x00, 0x0F, 0x00, 0x00,
298 },
299 { /* Init_CR00_CR18 */
300 0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
301 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
302 0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
303 0xFF,
304 },
305 { /* Init_CR30_CR4D */
306 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
307 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
308 0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
309 0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
310 },
311 { /* Init_CR90_CRA7 */
312 0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
313 0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
314 0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
315 },
316 },
317
318 { /* mode#2: 800 x 600 16Bpp 60Hz */
319 800, 600, 16, 60,
320 /* Init_MISC */
321 0x2B,
322 { /* Init_SR0_SR4 */
323 0x03, 0x01, 0x0F, 0x03, 0x0E,
324 },
325 { /* Init_SR10_SR24 */
326 0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
327 0x99, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
328 0xC4, 0x30, 0x02, 0x01, 0x01,
329 },
330 { /* Init_SR30_SR75 */
331 0x34, 0x03, 0x20, 0x09, 0xC0, 0x24, 0x24, 0x24,
332 0x24, 0x24, 0x24, 0x24, 0x00, 0x00, 0x03, 0xFF,
333 0x00, 0xFC, 0x00, 0x00, 0x20, 0x38, 0x00, 0xFC,
334 0x20, 0x0C, 0x44, 0x20, 0x00, 0x24, 0x24, 0x24,
335 0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
336 0x04, 0x55, 0x59, 0x24, 0x24, 0x00, 0x00, 0x24,
337 0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
338 0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
339 0x02, 0x45, 0x30, 0x35, 0x40, 0x20,
340 },
341 { /* Init_SR80_SR93 */
342 0x00, 0x00, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x24,
343 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x24, 0x24,
344 0x00, 0x00, 0x00, 0x00,
345 },
346 { /* Init_SRA0_SRAF */
347 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
348 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
349 },
350 { /* Init_GR00_GR08 */
351 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
352 0xFF,
353 },
354 { /* Init_AR00_AR14 */
355 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
356 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
357 0x41, 0x00, 0x0F, 0x00, 0x00,
358 },
359 { /* Init_CR00_CR18 */
360 0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
361 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
362 0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
363 0xFF,
364 },
365 { /* Init_CR30_CR4D */
366 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
367 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
368 0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
369 0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
370 },
371 { /* Init_CR90_CRA7 */
372 0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
373 0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
374 0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
375 },
376 },
377 { /* mode#3: 800 x 600 24Bpp 60Hz */
378 800, 600, 24, 60,
379 0x2B,
380 { /* Init_SR0_SR4 */
381 0x03, 0x01, 0x0F, 0x03, 0x0E,
382 },
383 { /* Init_SR10_SR24 */
384 0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
385 0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
386 0xC4, 0x30, 0x02, 0x01, 0x01,
387 },
388 { /* Init_SR30_SR75 */
389 0x36, 0x03, 0x20, 0x09, 0xC0, 0x36, 0x36, 0x36,
390 0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x03, 0xFF,
391 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
392 0x20, 0x0C, 0x44, 0x20, 0x00, 0x36, 0x36, 0x36,
393 0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
394 0x04, 0x55, 0x59, 0x36, 0x36, 0x00, 0x00, 0x36,
395 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
396 0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
397 0x02, 0x45, 0x30, 0x30, 0x40, 0x20,
398 },
399 { /* Init_SR80_SR93 */
400 0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x36,
401 0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x36, 0x36,
402 0x00, 0x00, 0x00, 0x00,
403 },
404 { /* Init_SRA0_SRAF */
405 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
406 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
407 },
408 { /* Init_GR00_GR08 */
409 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
410 0xFF,
411 },
412 { /* Init_AR00_AR14 */
413 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
414 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
415 0x41, 0x00, 0x0F, 0x00, 0x00,
416 },
417 { /* Init_CR00_CR18 */
418 0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
419 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
420 0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
421 0xFF,
422 },
423 { /* Init_CR30_CR4D */
424 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
425 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
426 0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
427 0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
428 },
429 { /* Init_CR90_CRA7 */
430 0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
431 0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
432 0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
433 },
434 },
435 { /* mode#7: 800 x 600 32Bpp 60Hz */
436 800, 600, 32, 60,
437 /* Init_MISC */
438 0x2B,
439 { /* Init_SR0_SR4 */
440 0x03, 0x01, 0x0F, 0x03, 0x0E,
441 },
442 { /* Init_SR10_SR24 */
443 0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
444 0x99, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
445 0xC4, 0x30, 0x02, 0x01, 0x01,
446 },
447 { /* Init_SR30_SR75 */
448 0x34, 0x03, 0x20, 0x09, 0xC0, 0x24, 0x24, 0x24,
449 0x24, 0x24, 0x24, 0x24, 0x00, 0x00, 0x03, 0xFF,
450 0x00, 0xFC, 0x00, 0x00, 0x20, 0x38, 0x00, 0xFC,
451 0x20, 0x0C, 0x44, 0x20, 0x00, 0x24, 0x24, 0x24,
452 0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
453 0x04, 0x55, 0x59, 0x24, 0x24, 0x00, 0x00, 0x24,
454 0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
455 0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
456 0x02, 0x45, 0x30, 0x35, 0x40, 0x20,
457 },
458 { /* Init_SR80_SR93 */
459 0x00, 0x00, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x24,
460 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x24, 0x24,
461 0x00, 0x00, 0x00, 0x00,
462 },
463 { /* Init_SRA0_SRAF */
464 0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
465 0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
466 },
467 { /* Init_GR00_GR08 */
468 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
469 0xFF,
470 },
471 { /* Init_AR00_AR14 */
472 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
473 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
474 0x41, 0x00, 0x0F, 0x00, 0x00,
475 },
476 { /* Init_CR00_CR18 */
477 0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
478 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
479 0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
480 0xFF,
481 },
482 { /* Init_CR30_CR4D */
483 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
484 0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
485 0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
486 0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
487 },
488 { /* Init_CR90_CRA7 */
489 0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
490 0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
491 0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
492 },
493 },
494 /* We use 1024x768 table to light 1024x600 panel for lemote */
495 { /* mode#4: 1024 x 600 16Bpp 60Hz */
496 1024, 600, 16, 60,
497 /* Init_MISC */
498 0xEB,
499 { /* Init_SR0_SR4 */
500 0x03, 0x01, 0x0F, 0x00, 0x0E,
501 },
502 { /* Init_SR10_SR24 */
503 0xC8, 0x40, 0x14, 0x60, 0x00, 0x0A, 0x17, 0x20,
504 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
505 0xC4, 0x30, 0x02, 0x00, 0x01,
506 },
507 { /* Init_SR30_SR75 */
508 0x22, 0x03, 0x24, 0x09, 0xC0, 0x22, 0x22, 0x22,
509 0x22, 0x22, 0x22, 0x22, 0x00, 0x00, 0x03, 0xFF,
510 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
511 0x20, 0x0C, 0x44, 0x20, 0x00, 0x22, 0x22, 0x22,
512 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
513 0x00, 0x60, 0x59, 0x22, 0x22, 0x00, 0x00, 0x22,
514 0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
515 0x50, 0x03, 0x16, 0x02, 0x0D, 0x82, 0x09, 0x02,
516 0x04, 0x45, 0x3F, 0x30, 0x40, 0x20,
517 },
518 { /* Init_SR80_SR93 */
519 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
520 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
521 0x00, 0x00, 0x00, 0x00,
522 },
523 { /* Init_SRA0_SRAF */
524 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
525 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
526 },
527 { /* Init_GR00_GR08 */
528 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
529 0xFF,
530 },
531 { /* Init_AR00_AR14 */
532 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
533 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
534 0x41, 0x00, 0x0F, 0x00, 0x00,
535 },
536 { /* Init_CR00_CR18 */
537 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
538 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
539 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
540 0xFF,
541 },
542 { /* Init_CR30_CR4D */
543 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
544 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
545 0xA3, 0x7F, 0x00, 0x82, 0x0b, 0x6f, 0x57, 0x00,
546 0x5c, 0x0f, 0xE0, 0xe0, 0x7F, 0x57,
547 },
548 { /* Init_CR90_CRA7 */
549 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
550 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
551 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
552 },
553 },
554 { /* mode#5: 1024 x 768 24Bpp 60Hz */
555 1024, 768, 24, 60,
556 /* Init_MISC */
557 0xEB,
558 { /* Init_SR0_SR4 */
559 0x03, 0x01, 0x0F, 0x03, 0x0E,
560 },
561 { /* Init_SR10_SR24 */
562 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
563 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
564 0xC4, 0x30, 0x02, 0x01, 0x01,
565 },
566 { /* Init_SR30_SR75 */
567 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
568 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
569 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
570 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
571 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
572 0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
573 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
574 0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
575 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
576 },
577 { /* Init_SR80_SR93 */
578 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
579 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
580 0x00, 0x00, 0x00, 0x00,
581 },
582 { /* Init_SRA0_SRAF */
583 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
584 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
585 },
586 { /* Init_GR00_GR08 */
587 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
588 0xFF,
589 },
590 { /* Init_AR00_AR14 */
591 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
592 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
593 0x41, 0x00, 0x0F, 0x00, 0x00,
594 },
595 { /* Init_CR00_CR18 */
596 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
597 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
598 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
599 0xFF,
600 },
601 { /* Init_CR30_CR4D */
602 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
603 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
604 0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
605 0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
606 },
607 { /* Init_CR90_CRA7 */
608 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
609 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
610 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
611 },
612 },
613 { /* mode#4: 1024 x 768 32Bpp 60Hz */
614 1024, 768, 32, 60,
615 /* Init_MISC */
616 0xEB,
617 { /* Init_SR0_SR4 */
618 0x03, 0x01, 0x0F, 0x03, 0x0E,
619 },
620 { /* Init_SR10_SR24 */
621 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
622 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
623 0xC4, 0x32, 0x02, 0x01, 0x01,
624 },
625 { /* Init_SR30_SR75 */
626 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
627 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
628 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
629 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
630 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
631 0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
632 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
633 0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
634 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
635 },
636 { /* Init_SR80_SR93 */
637 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
638 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
639 0x00, 0x00, 0x00, 0x00,
640 },
641 { /* Init_SRA0_SRAF */
642 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
643 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
644 },
645 { /* Init_GR00_GR08 */
646 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
647 0xFF,
648 },
649 { /* Init_AR00_AR14 */
650 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
651 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
652 0x41, 0x00, 0x0F, 0x00, 0x00,
653 },
654 { /* Init_CR00_CR18 */
655 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
656 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
657 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
658 0xFF,
659 },
660 { /* Init_CR30_CR4D */
661 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
662 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
663 0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
664 0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
665 },
666 { /* Init_CR90_CRA7 */
667 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
668 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
669 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
670 },
671 },
672 { /* mode#6: 320 x 240 16Bpp 60Hz */
673 320, 240, 16, 60,
674 /* Init_MISC */
675 0xEB,
676 { /* Init_SR0_SR4 */
677 0x03, 0x01, 0x0F, 0x03, 0x0E,
678 },
679 { /* Init_SR10_SR24 */
680 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
681 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
682 0xC4, 0x32, 0x02, 0x01, 0x01,
683 },
684 { /* Init_SR30_SR75 */
685 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
686 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
687 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
688 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
689 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
690 0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
691 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
692 0x50, 0x03, 0x74, 0x14, 0x08, 0x43, 0x08, 0x43,
693 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
694 },
695 { /* Init_SR80_SR93 */
696 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
697 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
698 0x00, 0x00, 0x00, 0x00,
699 },
700 { /* Init_SRA0_SRAF */
701 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
702 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
703 },
704 { /* Init_GR00_GR08 */
705 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
706 0xFF,
707 },
708 { /* Init_AR00_AR14 */
709 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
710 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
711 0x41, 0x00, 0x0F, 0x00, 0x00,
712 },
713 { /* Init_CR00_CR18 */
714 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
715 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
716 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
717 0xFF,
718 },
719 { /* Init_CR30_CR4D */
720 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
721 0x00, 0x00, 0x30, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
722 0x2E, 0x27, 0x00, 0x2b, 0x0c, 0x0F, 0xEF, 0x00,
723 0xFe, 0x0f, 0x01, 0xC0, 0x27, 0xEF,
724 },
725 { /* Init_CR90_CRA7 */
726 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
727 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
728 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
729 },
730 },
731
732 { /* mode#8: 320 x 240 32Bpp 60Hz */
733 320, 240, 32, 60,
734 /* Init_MISC */
735 0xEB,
736 { /* Init_SR0_SR4 */
737 0x03, 0x01, 0x0F, 0x03, 0x0E,
738 },
739 { /* Init_SR10_SR24 */
740 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
741 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
742 0xC4, 0x32, 0x02, 0x01, 0x01,
743 },
744 { /* Init_SR30_SR75 */
745 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
746 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
747 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
748 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
749 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
750 0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
751 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
752 0x50, 0x03, 0x74, 0x14, 0x08, 0x43, 0x08, 0x43,
753 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
754 },
755 { /* Init_SR80_SR93 */
756 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
757 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
758 0x00, 0x00, 0x00, 0x00,
759 },
760 { /* Init_SRA0_SRAF */
761 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
762 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
763 },
764 { /* Init_GR00_GR08 */
765 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
766 0xFF,
767 },
768 { /* Init_AR00_AR14 */
769 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
770 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
771 0x41, 0x00, 0x0F, 0x00, 0x00,
772 },
773 { /* Init_CR00_CR18 */
774 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
775 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
776 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
777 0xFF,
778 },
779 { /* Init_CR30_CR4D */
780 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
781 0x00, 0x00, 0x30, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
782 0x2E, 0x27, 0x00, 0x2b, 0x0c, 0x0F, 0xEF, 0x00,
783 0xFe, 0x0f, 0x01, 0xC0, 0x27, 0xEF,
784 },
785 { /* Init_CR90_CRA7 */
786 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
787 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
788 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
789 },
790 },
791};
792
793#define numVGAModes (sizeof(VGAMode) / sizeof(struct ModeInit))
diff --git a/drivers/staging/vt6655/Kconfig b/drivers/staging/vt6655/Kconfig
index 825bbc4fc3fa..061e730df2d0 100644
--- a/drivers/staging/vt6655/Kconfig
+++ b/drivers/staging/vt6655/Kconfig
@@ -1,6 +1,6 @@
1config VT6655 1config VT6655
2 tristate "VIA Technologies VT6655 support" 2 tristate "VIA Technologies VT6655 support"
3 depends on PCI 3 depends on PCI && WLAN
4 select WIRELESS_EXT 4 select WIRELESS_EXT
5 select WEXT_PRIV 5 select WEXT_PRIV
6 ---help--- 6 ---help---
diff --git a/drivers/staging/vt6656/Kconfig b/drivers/staging/vt6656/Kconfig
index 87bcd269310c..1055b526c532 100644
--- a/drivers/staging/vt6656/Kconfig
+++ b/drivers/staging/vt6656/Kconfig
@@ -1,6 +1,6 @@
1config VT6656 1config VT6656
2 tristate "VIA Technologies VT6656 support" 2 tristate "VIA Technologies VT6656 support"
3 depends on USB 3 depends on USB && WLAN
4 select WIRELESS_EXT 4 select WIRELESS_EXT
5 select WEXT_PRIV 5 select WEXT_PRIV
6 ---help--- 6 ---help---
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 7d76a7f92a33..aaa70ed57710 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -439,7 +439,7 @@ void free_chunks(imgchunk_t *fchunk, unsigned int *nfchunks)
439 } 439 }
440 } 440 }
441 *nfchunks = 0; 441 *nfchunks = 0;
442 memset(fchunk, 0, sizeof(fchunk)); 442 memset(fchunk, 0, sizeof(*fchunk));
443 443
444} 444}
445 445
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 473aa1a20de9..be3c9b80bc9f 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -44,5 +44,3 @@ obj-y += early/
44 44
45obj-$(CONFIG_USB_ATM) += atm/ 45obj-$(CONFIG_USB_ATM) += atm/
46obj-$(CONFIG_USB_SPEEDTOUCH) += atm/ 46obj-$(CONFIG_USB_SPEEDTOUCH) += atm/
47
48obj-$(CONFIG_USB_ULPI) += otg/
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 6dac3b802d41..0495fa651225 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1597,7 +1597,9 @@ rescan:
1597} 1597}
1598 1598
1599/** 1599/**
1600 * Check whether a new bandwidth setting exceeds the bus bandwidth. 1600 * usb_hcd_alloc_bandwidth - check whether a new bandwidth setting exceeds
1601 * the bus bandwidth
1602 * @udev: target &usb_device
1601 * @new_config: new configuration to install 1603 * @new_config: new configuration to install
1602 * @cur_alt: the current alternate interface setting 1604 * @cur_alt: the current alternate interface setting
1603 * @new_alt: alternate interface setting that is being installed 1605 * @new_alt: alternate interface setting that is being installed
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 06af970e1064..0cec6caf6e9b 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1658,12 +1658,12 @@ static inline void announce_device(struct usb_device *udev) { }
1658#endif 1658#endif
1659 1659
1660/** 1660/**
1661 * usb_configure_device_otg - FIXME (usbcore-internal) 1661 * usb_enumerate_device_otg - FIXME (usbcore-internal)
1662 * @udev: newly addressed device (in ADDRESS state) 1662 * @udev: newly addressed device (in ADDRESS state)
1663 * 1663 *
1664 * Do configuration for On-The-Go devices 1664 * Finish enumeration for On-The-Go devices
1665 */ 1665 */
1666static int usb_configure_device_otg(struct usb_device *udev) 1666static int usb_enumerate_device_otg(struct usb_device *udev)
1667{ 1667{
1668 int err = 0; 1668 int err = 0;
1669 1669
@@ -1734,7 +1734,7 @@ fail:
1734 1734
1735 1735
1736/** 1736/**
1737 * usb_configure_device - Detect and probe device intfs/otg (usbcore-internal) 1737 * usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal)
1738 * @udev: newly addressed device (in ADDRESS state) 1738 * @udev: newly addressed device (in ADDRESS state)
1739 * 1739 *
1740 * This is only called by usb_new_device() and usb_authorize_device() 1740 * This is only called by usb_new_device() and usb_authorize_device()
@@ -1745,7 +1745,7 @@ fail:
1745 * the string descriptors, as they will be errored out by the device 1745 * the string descriptors, as they will be errored out by the device
1746 * until it has been authorized. 1746 * until it has been authorized.
1747 */ 1747 */
1748static int usb_configure_device(struct usb_device *udev) 1748static int usb_enumerate_device(struct usb_device *udev)
1749{ 1749{
1750 int err; 1750 int err;
1751 1751
@@ -1769,7 +1769,7 @@ static int usb_configure_device(struct usb_device *udev)
1769 udev->descriptor.iManufacturer); 1769 udev->descriptor.iManufacturer);
1770 udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber); 1770 udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
1771 } 1771 }
1772 err = usb_configure_device_otg(udev); 1772 err = usb_enumerate_device_otg(udev);
1773fail: 1773fail:
1774 return err; 1774 return err;
1775} 1775}
@@ -1779,8 +1779,8 @@ fail:
1779 * usb_new_device - perform initial device setup (usbcore-internal) 1779 * usb_new_device - perform initial device setup (usbcore-internal)
1780 * @udev: newly addressed device (in ADDRESS state) 1780 * @udev: newly addressed device (in ADDRESS state)
1781 * 1781 *
1782 * This is called with devices which have been enumerated, but not yet 1782 * This is called with devices which have been detected but not fully
1783 * configured. The device descriptor is available, but not descriptors 1783 * enumerated. The device descriptor is available, but not descriptors
1784 * for any device configuration. The caller must have locked either 1784 * for any device configuration. The caller must have locked either
1785 * the parent hub (if udev is a normal device) or else the 1785 * the parent hub (if udev is a normal device) or else the
1786 * usb_bus_list_lock (if udev is a root hub). The parent's pointer to 1786 * usb_bus_list_lock (if udev is a root hub). The parent's pointer to
@@ -1803,8 +1803,8 @@ int usb_new_device(struct usb_device *udev)
1803 if (udev->parent) 1803 if (udev->parent)
1804 usb_autoresume_device(udev->parent); 1804 usb_autoresume_device(udev->parent);
1805 1805
1806 usb_detect_quirks(udev); /* Determine quirks */ 1806 usb_detect_quirks(udev);
1807 err = usb_configure_device(udev); /* detect & probe dev/intfs */ 1807 err = usb_enumerate_device(udev); /* Read descriptors */
1808 if (err < 0) 1808 if (err < 0)
1809 goto fail; 1809 goto fail;
1810 dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n", 1810 dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n",
@@ -1849,21 +1849,23 @@ fail:
1849 */ 1849 */
1850int usb_deauthorize_device(struct usb_device *usb_dev) 1850int usb_deauthorize_device(struct usb_device *usb_dev)
1851{ 1851{
1852 unsigned cnt;
1853 usb_lock_device(usb_dev); 1852 usb_lock_device(usb_dev);
1854 if (usb_dev->authorized == 0) 1853 if (usb_dev->authorized == 0)
1855 goto out_unauthorized; 1854 goto out_unauthorized;
1855
1856 usb_dev->authorized = 0; 1856 usb_dev->authorized = 0;
1857 usb_set_configuration(usb_dev, -1); 1857 usb_set_configuration(usb_dev, -1);
1858
1859 kfree(usb_dev->product);
1858 usb_dev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL); 1860 usb_dev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL);
1861 kfree(usb_dev->manufacturer);
1859 usb_dev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL); 1862 usb_dev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL);
1863 kfree(usb_dev->serial);
1860 usb_dev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL); 1864 usb_dev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL);
1861 kfree(usb_dev->config); 1865
1862 usb_dev->config = NULL; 1866 usb_destroy_configuration(usb_dev);
1863 for (cnt = 0; cnt < usb_dev->descriptor.bNumConfigurations; cnt++)
1864 kfree(usb_dev->rawdescriptors[cnt]);
1865 usb_dev->descriptor.bNumConfigurations = 0; 1867 usb_dev->descriptor.bNumConfigurations = 0;
1866 kfree(usb_dev->rawdescriptors); 1868
1867out_unauthorized: 1869out_unauthorized:
1868 usb_unlock_device(usb_dev); 1870 usb_unlock_device(usb_dev);
1869 return 0; 1871 return 0;
@@ -1873,15 +1875,11 @@ out_unauthorized:
1873int usb_authorize_device(struct usb_device *usb_dev) 1875int usb_authorize_device(struct usb_device *usb_dev)
1874{ 1876{
1875 int result = 0, c; 1877 int result = 0, c;
1878
1876 usb_lock_device(usb_dev); 1879 usb_lock_device(usb_dev);
1877 if (usb_dev->authorized == 1) 1880 if (usb_dev->authorized == 1)
1878 goto out_authorized; 1881 goto out_authorized;
1879 kfree(usb_dev->product); 1882
1880 usb_dev->product = NULL;
1881 kfree(usb_dev->manufacturer);
1882 usb_dev->manufacturer = NULL;
1883 kfree(usb_dev->serial);
1884 usb_dev->serial = NULL;
1885 result = usb_autoresume_device(usb_dev); 1883 result = usb_autoresume_device(usb_dev);
1886 if (result < 0) { 1884 if (result < 0) {
1887 dev_err(&usb_dev->dev, 1885 dev_err(&usb_dev->dev,
@@ -1894,10 +1892,18 @@ int usb_authorize_device(struct usb_device *usb_dev)
1894 "authorization: %d\n", result); 1892 "authorization: %d\n", result);
1895 goto error_device_descriptor; 1893 goto error_device_descriptor;
1896 } 1894 }
1895
1896 kfree(usb_dev->product);
1897 usb_dev->product = NULL;
1898 kfree(usb_dev->manufacturer);
1899 usb_dev->manufacturer = NULL;
1900 kfree(usb_dev->serial);
1901 usb_dev->serial = NULL;
1902
1897 usb_dev->authorized = 1; 1903 usb_dev->authorized = 1;
1898 result = usb_configure_device(usb_dev); 1904 result = usb_enumerate_device(usb_dev);
1899 if (result < 0) 1905 if (result < 0)
1900 goto error_configure; 1906 goto error_enumerate;
1901 /* Choose and set the configuration. This registers the interfaces 1907 /* Choose and set the configuration. This registers the interfaces
1902 * with the driver core and lets interface drivers bind to them. 1908 * with the driver core and lets interface drivers bind to them.
1903 */ 1909 */
@@ -1912,8 +1918,10 @@ int usb_authorize_device(struct usb_device *usb_dev)
1912 } 1918 }
1913 } 1919 }
1914 dev_info(&usb_dev->dev, "authorized to connect\n"); 1920 dev_info(&usb_dev->dev, "authorized to connect\n");
1915error_configure: 1921
1922error_enumerate:
1916error_device_descriptor: 1923error_device_descriptor:
1924 usb_autosuspend_device(usb_dev);
1917error_autoresume: 1925error_autoresume:
1918out_authorized: 1926out_authorized:
1919 usb_unlock_device(usb_dev); // complements locktree 1927 usb_unlock_device(usb_dev); // complements locktree
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 15477008b631..485edf937f25 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -82,9 +82,13 @@ static ssize_t show_##name(struct device *dev, \
82 struct device_attribute *attr, char *buf) \ 82 struct device_attribute *attr, char *buf) \
83{ \ 83{ \
84 struct usb_device *udev; \ 84 struct usb_device *udev; \
85 int retval; \
85 \ 86 \
86 udev = to_usb_device(dev); \ 87 udev = to_usb_device(dev); \
87 return sprintf(buf, "%s\n", udev->name); \ 88 usb_lock_device(udev); \
89 retval = sprintf(buf, "%s\n", udev->name); \
90 usb_unlock_device(udev); \
91 return retval; \
88} \ 92} \
89static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL); 93static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
90 94
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 2fb42043b305..0daff0d968ba 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -66,9 +66,9 @@ MODULE_PARM_DESC(autosuspend, "default autosuspend delay");
66/** 66/**
67 * usb_find_alt_setting() - Given a configuration, find the alternate setting 67 * usb_find_alt_setting() - Given a configuration, find the alternate setting
68 * for the given interface. 68 * for the given interface.
69 * @config - the configuration to search (not necessarily the current config). 69 * @config: the configuration to search (not necessarily the current config).
70 * @iface_num - interface number to search in 70 * @iface_num: interface number to search in
71 * @alt_num - alternate interface setting number to search for. 71 * @alt_num: alternate interface setting number to search for.
72 * 72 *
73 * Search the configuration's interface cache for the given alt setting. 73 * Search the configuration's interface cache for the given alt setting.
74 */ 74 */
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index 1206a26ef893..2958a1271b20 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -613,7 +613,7 @@ err:
613} 613}
614EXPORT_SYMBOL_GPL(dbgp_external_startup); 614EXPORT_SYMBOL_GPL(dbgp_external_startup);
615 615
616static int __init ehci_reset_port(int port) 616static int ehci_reset_port(int port)
617{ 617{
618 u32 portsc; 618 u32 portsc;
619 u32 delay_time, delay; 619 u32 delay_time, delay;
diff --git a/drivers/usb/gadget/audio.c b/drivers/usb/gadget/audio.c
index 58f220323847..a62af7b59094 100644
--- a/drivers/usb/gadget/audio.c
+++ b/drivers/usb/gadget/audio.c
@@ -158,6 +158,7 @@ fail:
158 158
159static int __exit audio_unbind(struct usb_composite_dev *cdev) 159static int __exit audio_unbind(struct usb_composite_dev *cdev)
160{ 160{
161 gaudio_cleanup();
161 return 0; 162 return 0;
162} 163}
163 164
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
index c43c89ffa2c8..df77f6131c73 100644
--- a/drivers/usb/gadget/f_audio.c
+++ b/drivers/usb/gadget/f_audio.c
@@ -56,13 +56,16 @@ static struct usb_interface_descriptor ac_interface_desc __initdata = {
56DECLARE_UAC_AC_HEADER_DESCRIPTOR(2); 56DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
57 57
58#define UAC_DT_AC_HEADER_LENGTH UAC_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES) 58#define UAC_DT_AC_HEADER_LENGTH UAC_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES)
59/* 1 input terminal, 1 output terminal and 1 feature unit */
60#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH + UAC_DT_INPUT_TERMINAL_SIZE \
61 + UAC_DT_OUTPUT_TERMINAL_SIZE + UAC_DT_FEATURE_UNIT_SIZE(0))
59/* B.3.2 Class-Specific AC Interface Descriptor */ 62/* B.3.2 Class-Specific AC Interface Descriptor */
60static struct uac_ac_header_descriptor_2 ac_header_desc = { 63static struct uac_ac_header_descriptor_2 ac_header_desc = {
61 .bLength = UAC_DT_AC_HEADER_LENGTH, 64 .bLength = UAC_DT_AC_HEADER_LENGTH,
62 .bDescriptorType = USB_DT_CS_INTERFACE, 65 .bDescriptorType = USB_DT_CS_INTERFACE,
63 .bDescriptorSubtype = UAC_HEADER, 66 .bDescriptorSubtype = UAC_HEADER,
64 .bcdADC = __constant_cpu_to_le16(0x0100), 67 .bcdADC = __constant_cpu_to_le16(0x0100),
65 .wTotalLength = __constant_cpu_to_le16(UAC_DT_AC_HEADER_LENGTH), 68 .wTotalLength = __constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
66 .bInCollection = F_AUDIO_NUM_INTERFACES, 69 .bInCollection = F_AUDIO_NUM_INTERFACES,
67 .baInterfaceNr = { 70 .baInterfaceNr = {
68 [0] = F_AUDIO_AC_INTERFACE, 71 [0] = F_AUDIO_AC_INTERFACE,
@@ -252,12 +255,12 @@ static struct f_audio_buf *f_audio_buffer_alloc(int buf_size)
252 255
253 copy_buf = kzalloc(sizeof *copy_buf, GFP_ATOMIC); 256 copy_buf = kzalloc(sizeof *copy_buf, GFP_ATOMIC);
254 if (!copy_buf) 257 if (!copy_buf)
255 return (struct f_audio_buf *)-ENOMEM; 258 return ERR_PTR(-ENOMEM);
256 259
257 copy_buf->buf = kzalloc(buf_size, GFP_ATOMIC); 260 copy_buf->buf = kzalloc(buf_size, GFP_ATOMIC);
258 if (!copy_buf->buf) { 261 if (!copy_buf->buf) {
259 kfree(copy_buf); 262 kfree(copy_buf);
260 return (struct f_audio_buf *)-ENOMEM; 263 return ERR_PTR(-ENOMEM);
261 } 264 }
262 265
263 return copy_buf; 266 return copy_buf;
@@ -332,7 +335,7 @@ static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req)
332 list_add_tail(&copy_buf->list, &audio->play_queue); 335 list_add_tail(&copy_buf->list, &audio->play_queue);
333 schedule_work(&audio->playback_work); 336 schedule_work(&audio->playback_work);
334 copy_buf = f_audio_buffer_alloc(audio_buf_size); 337 copy_buf = f_audio_buffer_alloc(audio_buf_size);
335 if (copy_buf < 0) 338 if (IS_ERR(copy_buf))
336 return -ENOMEM; 339 return -ENOMEM;
337 } 340 }
338 341
@@ -576,6 +579,8 @@ static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
576 usb_ep_enable(out_ep, audio->out_desc); 579 usb_ep_enable(out_ep, audio->out_desc);
577 out_ep->driver_data = audio; 580 out_ep->driver_data = audio;
578 audio->copy_buf = f_audio_buffer_alloc(audio_buf_size); 581 audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
582 if (IS_ERR(audio->copy_buf))
583 return -ENOMEM;
579 584
580 /* 585 /*
581 * allocate a bunch of read buffers 586 * allocate a bunch of read buffers
@@ -787,7 +792,7 @@ int __init audio_bind_config(struct usb_configuration *c)
787 return status; 792 return status;
788 793
789add_fail: 794add_fail:
790 gaudio_cleanup(&audio->card); 795 gaudio_cleanup();
791setup_fail: 796setup_fail:
792 kfree(audio); 797 kfree(audio);
793 return status; 798 return status;
diff --git a/drivers/usb/gadget/u_audio.c b/drivers/usb/gadget/u_audio.c
index 8252595d619d..35e0930f5bbb 100644
--- a/drivers/usb/gadget/u_audio.c
+++ b/drivers/usb/gadget/u_audio.c
@@ -288,6 +288,7 @@ static int gaudio_close_snd_dev(struct gaudio *gau)
288 return 0; 288 return 0;
289} 289}
290 290
291static struct gaudio *the_card;
291/** 292/**
292 * gaudio_setup - setup ALSA interface and preparing for USB transfer 293 * gaudio_setup - setup ALSA interface and preparing for USB transfer
293 * 294 *
@@ -303,6 +304,9 @@ int __init gaudio_setup(struct gaudio *card)
303 if (ret) 304 if (ret)
304 ERROR(card, "we need at least one control device\n"); 305 ERROR(card, "we need at least one control device\n");
305 306
307 if (!the_card)
308 the_card = card;
309
306 return ret; 310 return ret;
307 311
308} 312}
@@ -312,9 +316,11 @@ int __init gaudio_setup(struct gaudio *card)
312 * 316 *
313 * This is called to free all resources allocated by @gaudio_setup(). 317 * This is called to free all resources allocated by @gaudio_setup().
314 */ 318 */
315void gaudio_cleanup(struct gaudio *card) 319void gaudio_cleanup(void)
316{ 320{
317 if (card) 321 if (the_card) {
318 gaudio_close_snd_dev(card); 322 gaudio_close_snd_dev(the_card);
323 the_card = NULL;
324 }
319} 325}
320 326
diff --git a/drivers/usb/gadget/u_audio.h b/drivers/usb/gadget/u_audio.h
index cc8d159c648a..08ffce3298e6 100644
--- a/drivers/usb/gadget/u_audio.h
+++ b/drivers/usb/gadget/u_audio.h
@@ -51,6 +51,6 @@ struct gaudio {
51}; 51};
52 52
53int gaudio_setup(struct gaudio *card); 53int gaudio_setup(struct gaudio *card);
54void gaudio_cleanup(struct gaudio *card); 54void gaudio_cleanup(void);
55 55
56#endif /* __U_AUDIO_H */ 56#endif /* __U_AUDIO_H */
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 12f1ad2fd0e8..74d07f4e8b7d 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -37,7 +37,7 @@
37#include <linux/platform_device.h> 37#include <linux/platform_device.h>
38#include <linux/clk.h> 38#include <linux/clk.h>
39#include <linux/gpio.h> 39#include <linux/gpio.h>
40#include <mach/usb.h> 40#include <plat/usb.h>
41 41
42/* 42/*
43 * OMAP USBHOST Register addresses: VIRTUAL ADDRESSES 43 * OMAP USBHOST Register addresses: VIRTUAL ADDRESSES
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
index 00a29855d0c4..ff43747a614f 100644
--- a/drivers/usb/host/fhci-sched.c
+++ b/drivers/usb/host/fhci-sched.c
@@ -37,7 +37,7 @@ static void recycle_frame(struct fhci_usb *usb, struct packet *pkt)
37 pkt->info = 0; 37 pkt->info = 0;
38 pkt->priv_data = NULL; 38 pkt->priv_data = NULL;
39 39
40 cq_put(usb->ep0->empty_frame_Q, pkt); 40 cq_put(&usb->ep0->empty_frame_Q, pkt);
41} 41}
42 42
43/* confirm submitted packet */ 43/* confirm submitted packet */
@@ -57,7 +57,7 @@ void fhci_transaction_confirm(struct fhci_usb *usb, struct packet *pkt)
57 if ((td->data + td->actual_len) && trans_len) 57 if ((td->data + td->actual_len) && trans_len)
58 memcpy(td->data + td->actual_len, pkt->data, 58 memcpy(td->data + td->actual_len, pkt->data,
59 trans_len); 59 trans_len);
60 cq_put(usb->ep0->dummy_packets_Q, pkt->data); 60 cq_put(&usb->ep0->dummy_packets_Q, pkt->data);
61 } 61 }
62 62
63 recycle_frame(usb, pkt); 63 recycle_frame(usb, pkt);
@@ -213,7 +213,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td)
213 } 213 }
214 214
215 /* update frame object fields before transmitting */ 215 /* update frame object fields before transmitting */
216 pkt = cq_get(usb->ep0->empty_frame_Q); 216 pkt = cq_get(&usb->ep0->empty_frame_Q);
217 if (!pkt) { 217 if (!pkt) {
218 fhci_dbg(usb->fhci, "there is no empty frame\n"); 218 fhci_dbg(usb->fhci, "there is no empty frame\n");
219 return -1; 219 return -1;
@@ -222,7 +222,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td)
222 222
223 pkt->info = 0; 223 pkt->info = 0;
224 if (data == NULL) { 224 if (data == NULL) {
225 data = cq_get(usb->ep0->dummy_packets_Q); 225 data = cq_get(&usb->ep0->dummy_packets_Q);
226 BUG_ON(!data); 226 BUG_ON(!data);
227 pkt->info = PKT_DUMMY_PACKET; 227 pkt->info = PKT_DUMMY_PACKET;
228 } 228 }
@@ -246,7 +246,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td)
246 list_del_init(&td->frame_lh); 246 list_del_init(&td->frame_lh);
247 td->status = USB_TD_OK; 247 td->status = USB_TD_OK;
248 if (pkt->info & PKT_DUMMY_PACKET) 248 if (pkt->info & PKT_DUMMY_PACKET)
249 cq_put(usb->ep0->dummy_packets_Q, pkt->data); 249 cq_put(&usb->ep0->dummy_packets_Q, pkt->data);
250 recycle_frame(usb, pkt); 250 recycle_frame(usb, pkt);
251 usb->actual_frame->total_bytes -= (len + PROTOCOL_OVERHEAD); 251 usb->actual_frame->total_bytes -= (len + PROTOCOL_OVERHEAD);
252 fhci_err(usb->fhci, "host transaction failed\n"); 252 fhci_err(usb->fhci, "host transaction failed\n");
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
index b40332290319..d224ab467a40 100644
--- a/drivers/usb/host/fhci-tds.c
+++ b/drivers/usb/host/fhci-tds.c
@@ -106,33 +106,33 @@ void fhci_ep0_free(struct fhci_usb *usb)
106 cpm_muram_free(cpm_muram_offset(ep->td_base)); 106 cpm_muram_free(cpm_muram_offset(ep->td_base));
107 107
108 if (ep->conf_frame_Q) { 108 if (ep->conf_frame_Q) {
109 size = cq_howmany(ep->conf_frame_Q); 109 size = cq_howmany(&ep->conf_frame_Q);
110 for (; size; size--) { 110 for (; size; size--) {
111 struct packet *pkt = cq_get(ep->conf_frame_Q); 111 struct packet *pkt = cq_get(&ep->conf_frame_Q);
112 112
113 kfree(pkt); 113 kfree(pkt);
114 } 114 }
115 cq_delete(ep->conf_frame_Q); 115 cq_delete(&ep->conf_frame_Q);
116 } 116 }
117 117
118 if (ep->empty_frame_Q) { 118 if (ep->empty_frame_Q) {
119 size = cq_howmany(ep->empty_frame_Q); 119 size = cq_howmany(&ep->empty_frame_Q);
120 for (; size; size--) { 120 for (; size; size--) {
121 struct packet *pkt = cq_get(ep->empty_frame_Q); 121 struct packet *pkt = cq_get(&ep->empty_frame_Q);
122 122
123 kfree(pkt); 123 kfree(pkt);
124 } 124 }
125 cq_delete(ep->empty_frame_Q); 125 cq_delete(&ep->empty_frame_Q);
126 } 126 }
127 127
128 if (ep->dummy_packets_Q) { 128 if (ep->dummy_packets_Q) {
129 size = cq_howmany(ep->dummy_packets_Q); 129 size = cq_howmany(&ep->dummy_packets_Q);
130 for (; size; size--) { 130 for (; size; size--) {
131 u8 *buff = cq_get(ep->dummy_packets_Q); 131 u8 *buff = cq_get(&ep->dummy_packets_Q);
132 132
133 kfree(buff); 133 kfree(buff);
134 } 134 }
135 cq_delete(ep->dummy_packets_Q); 135 cq_delete(&ep->dummy_packets_Q);
136 } 136 }
137 137
138 kfree(ep); 138 kfree(ep);
@@ -175,10 +175,9 @@ u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem,
175 ep->td_base = cpm_muram_addr(ep_offset); 175 ep->td_base = cpm_muram_addr(ep_offset);
176 176
177 /* zero all queue pointers */ 177 /* zero all queue pointers */
178 ep->conf_frame_Q = cq_new(ring_len + 2); 178 if (cq_new(&ep->conf_frame_Q, ring_len + 2) ||
179 ep->empty_frame_Q = cq_new(ring_len + 2); 179 cq_new(&ep->empty_frame_Q, ring_len + 2) ||
180 ep->dummy_packets_Q = cq_new(ring_len + 2); 180 cq_new(&ep->dummy_packets_Q, ring_len + 2)) {
181 if (!ep->conf_frame_Q || !ep->empty_frame_Q || !ep->dummy_packets_Q) {
182 err_for = "frame_queues"; 181 err_for = "frame_queues";
183 goto err; 182 goto err;
184 } 183 }
@@ -199,8 +198,8 @@ u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem,
199 err_for = "buffer"; 198 err_for = "buffer";
200 goto err; 199 goto err;
201 } 200 }
202 cq_put(ep->empty_frame_Q, pkt); 201 cq_put(&ep->empty_frame_Q, pkt);
203 cq_put(ep->dummy_packets_Q, buff); 202 cq_put(&ep->dummy_packets_Q, buff);
204 } 203 }
205 204
206 /* we put the endpoint parameter RAM right behind the TD ring */ 205 /* we put the endpoint parameter RAM right behind the TD ring */
@@ -319,7 +318,7 @@ static void fhci_td_transaction_confirm(struct fhci_usb *usb)
319 if ((buf == DUMMY2_BD_BUFFER) && !(td_status & ~TD_W)) 318 if ((buf == DUMMY2_BD_BUFFER) && !(td_status & ~TD_W))
320 continue; 319 continue;
321 320
322 pkt = cq_get(ep->conf_frame_Q); 321 pkt = cq_get(&ep->conf_frame_Q);
323 if (!pkt) 322 if (!pkt)
324 fhci_err(usb->fhci, "no frame to confirm\n"); 323 fhci_err(usb->fhci, "no frame to confirm\n");
325 324
@@ -460,9 +459,9 @@ u32 fhci_host_transaction(struct fhci_usb *usb,
460 out_be16(&td->length, pkt->len); 459 out_be16(&td->length, pkt->len);
461 460
462 /* put the frame to the confirmation queue */ 461 /* put the frame to the confirmation queue */
463 cq_put(ep->conf_frame_Q, pkt); 462 cq_put(&ep->conf_frame_Q, pkt);
464 463
465 if (cq_howmany(ep->conf_frame_Q) == 1) 464 if (cq_howmany(&ep->conf_frame_Q) == 1)
466 out_8(&usb->fhci->regs->usb_comm, USB_CMD_STR_FIFO); 465 out_8(&usb->fhci->regs->usb_comm, USB_CMD_STR_FIFO);
467 466
468 return 0; 467 return 0;
diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
index 7116284ed21a..72dae1c5ab38 100644
--- a/drivers/usb/host/fhci.h
+++ b/drivers/usb/host/fhci.h
@@ -423,9 +423,9 @@ struct endpoint {
423 struct usb_td __iomem *td_base; /* first TD in the ring */ 423 struct usb_td __iomem *td_base; /* first TD in the ring */
424 struct usb_td __iomem *conf_td; /* next TD for confirm after transac */ 424 struct usb_td __iomem *conf_td; /* next TD for confirm after transac */
425 struct usb_td __iomem *empty_td;/* next TD for new transaction req. */ 425 struct usb_td __iomem *empty_td;/* next TD for new transaction req. */
426 struct kfifo *empty_frame_Q; /* Empty frames list to use */ 426 struct kfifo empty_frame_Q; /* Empty frames list to use */
427 struct kfifo *conf_frame_Q; /* frames passed to TDs,waiting for tx */ 427 struct kfifo conf_frame_Q; /* frames passed to TDs,waiting for tx */
428 struct kfifo *dummy_packets_Q;/* dummy packets for the CRC overun */ 428 struct kfifo dummy_packets_Q;/* dummy packets for the CRC overun */
429 429
430 bool already_pushed_dummy_bd; 430 bool already_pushed_dummy_bd;
431}; 431};
@@ -493,9 +493,9 @@ static inline struct usb_hcd *fhci_to_hcd(struct fhci_hcd *fhci)
493} 493}
494 494
495/* fifo of pointers */ 495/* fifo of pointers */
496static inline struct kfifo *cq_new(int size) 496static inline int cq_new(struct kfifo *fifo, int size)
497{ 497{
498 return kfifo_alloc(size * sizeof(void *), GFP_KERNEL, NULL); 498 return kfifo_alloc(fifo, size * sizeof(void *), GFP_KERNEL);
499} 499}
500 500
501static inline void cq_delete(struct kfifo *kfifo) 501static inline void cq_delete(struct kfifo *kfifo)
@@ -505,19 +505,19 @@ static inline void cq_delete(struct kfifo *kfifo)
505 505
506static inline unsigned int cq_howmany(struct kfifo *kfifo) 506static inline unsigned int cq_howmany(struct kfifo *kfifo)
507{ 507{
508 return __kfifo_len(kfifo) / sizeof(void *); 508 return kfifo_len(kfifo) / sizeof(void *);
509} 509}
510 510
511static inline int cq_put(struct kfifo *kfifo, void *p) 511static inline int cq_put(struct kfifo *kfifo, void *p)
512{ 512{
513 return __kfifo_put(kfifo, (void *)&p, sizeof(p)); 513 return kfifo_in(kfifo, (void *)&p, sizeof(p));
514} 514}
515 515
516static inline void *cq_get(struct kfifo *kfifo) 516static inline void *cq_get(struct kfifo *kfifo)
517{ 517{
518 void *p = NULL; 518 void *p = NULL;
519 519
520 __kfifo_get(kfifo, (void *)&p, sizeof(p)); 520 kfifo_out(kfifo, (void *)&p, sizeof(p));
521 return p; 521 return p;
522} 522}
523 523
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 1d8e39a557d9..1eb9e4162cc6 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -60,6 +60,7 @@
60static struct usb_device_id appledisplay_table [] = { 60static struct usb_device_id appledisplay_table [] = {
61 { APPLEDISPLAY_DEVICE(0x9218) }, 61 { APPLEDISPLAY_DEVICE(0x9218) },
62 { APPLEDISPLAY_DEVICE(0x9219) }, 62 { APPLEDISPLAY_DEVICE(0x9219) },
63 { APPLEDISPLAY_DEVICE(0x921c) },
63 { APPLEDISPLAY_DEVICE(0x921d) }, 64 { APPLEDISPLAY_DEVICE(0x921d) },
64 65
65 /* Terminating entry */ 66 /* Terminating entry */
@@ -72,8 +73,8 @@ struct appledisplay {
72 struct usb_device *udev; /* usb device */ 73 struct usb_device *udev; /* usb device */
73 struct urb *urb; /* usb request block */ 74 struct urb *urb; /* usb request block */
74 struct backlight_device *bd; /* backlight device */ 75 struct backlight_device *bd; /* backlight device */
75 char *urbdata; /* interrupt URB data buffer */ 76 u8 *urbdata; /* interrupt URB data buffer */
76 char *msgdata; /* control message data buffer */ 77 u8 *msgdata; /* control message data buffer */
77 78
78 struct delayed_work work; 79 struct delayed_work work;
79 int button_pressed; 80 int button_pressed;
diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
index 602ee05ba9ff..59860b328534 100644
--- a/drivers/usb/misc/emi62.c
+++ b/drivers/usb/misc/emi62.c
@@ -167,7 +167,7 @@ static int emi62_load_firmware (struct usb_device *dev)
167 err("%s - error loading firmware: error = %d", __func__, err); 167 err("%s - error loading firmware: error = %d", __func__, err);
168 goto wraperr; 168 goto wraperr;
169 } 169 }
170 } while (i > 0); 170 } while (rec);
171 171
172 /* Assert reset (stop the CPU in the EMI) */ 172 /* Assert reset (stop the CPU in the EMI) */
173 err = emi62_set_reset(dev,1); 173 err = emi62_set_reset(dev,1);
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index fe4934d9602c..ad26e6569665 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -29,6 +29,8 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
29{ 29{
30 void __iomem *fifo = hw_ep->fifo; 30 void __iomem *fifo = hw_ep->fifo;
31 void __iomem *epio = hw_ep->regs; 31 void __iomem *epio = hw_ep->regs;
32 u8 epnum = hw_ep->epnum;
33 u16 dma_reg = 0;
32 34
33 prefetch((u8 *)src); 35 prefetch((u8 *)src);
34 36
@@ -39,67 +41,113 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
39 41
40 dump_fifo_data(src, len); 42 dump_fifo_data(src, len);
41 43
42 if (unlikely((unsigned long)src & 0x01)) 44 if (!ANOMALY_05000380 && epnum != 0) {
43 outsw_8((unsigned long)fifo, src, 45 flush_dcache_range((unsigned int)src,
44 len & 0x01 ? (len >> 1) + 1 : len >> 1); 46 (unsigned int)(src + len));
45 else 47
46 outsw((unsigned long)fifo, src, 48 /* Setup DMA address register */
47 len & 0x01 ? (len >> 1) + 1 : len >> 1); 49 dma_reg = (u16) ((u32) src & 0xFFFF);
48} 50 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
51 SSYNC();
52
53 dma_reg = (u16) (((u32) src >> 16) & 0xFFFF);
54 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
55 SSYNC();
56
57 /* Setup DMA count register */
58 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_LOW), len);
59 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_HIGH), 0);
60 SSYNC();
61
62 /* Enable the DMA */
63 dma_reg = (epnum << 4) | DMA_ENA | INT_ENA | DIRECTION;
64 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), dma_reg);
65 SSYNC();
66
67 /* Wait for compelete */
68 while (!(bfin_read_USB_DMA_INTERRUPT() & (1 << epnum)))
69 cpu_relax();
70
71 /* acknowledge dma interrupt */
72 bfin_write_USB_DMA_INTERRUPT(1 << epnum);
73 SSYNC();
74
75 /* Reset DMA */
76 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), 0);
77 SSYNC();
78 } else {
79 SSYNC();
80
81 if (unlikely((unsigned long)src & 0x01))
82 outsw_8((unsigned long)fifo, src,
83 len & 0x01 ? (len >> 1) + 1 : len >> 1);
84 else
85 outsw((unsigned long)fifo, src,
86 len & 0x01 ? (len >> 1) + 1 : len >> 1);
49 87
88 }
89}
50/* 90/*
51 * Unload an endpoint's FIFO 91 * Unload an endpoint's FIFO
52 */ 92 */
53void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) 93void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
54{ 94{
55 void __iomem *fifo = hw_ep->fifo; 95 void __iomem *fifo = hw_ep->fifo;
56
57#ifdef CONFIG_BF52x
58 u8 epnum = hw_ep->epnum; 96 u8 epnum = hw_ep->epnum;
59 u16 dma_reg = 0; 97 u16 dma_reg = 0;
60 98
61 invalidate_dcache_range((unsigned int)dst, 99 if (ANOMALY_05000467 && epnum != 0) {
62 (unsigned int)(dst + len));
63 100
64 /* Setup DMA address register */ 101 invalidate_dcache_range((unsigned int)dst,
65 dma_reg = (u16) ((u32) dst & 0xFFFF); 102 (unsigned int)(dst + len));
66 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
67 SSYNC();
68 103
69 dma_reg = (u16) (((u32) dst >> 16) & 0xFFFF); 104 /* Setup DMA address register */
70 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg); 105 dma_reg = (u16) ((u32) dst & 0xFFFF);
71 SSYNC(); 106 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
107 SSYNC();
72 108
73 /* Setup DMA count register */ 109 dma_reg = (u16) (((u32) dst >> 16) & 0xFFFF);
74 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_LOW), len); 110 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
75 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_HIGH), 0); 111 SSYNC();
76 SSYNC();
77 112
78 /* Enable the DMA */ 113 /* Setup DMA count register */
79 dma_reg = (epnum << 4) | DMA_ENA | INT_ENA; 114 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_LOW), len);
80 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), dma_reg); 115 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_HIGH), 0);
81 SSYNC(); 116 SSYNC();
82 117
83 /* Wait for compelete */ 118 /* Enable the DMA */
84 while (!(bfin_read_USB_DMA_INTERRUPT() & (1 << epnum))) 119 dma_reg = (epnum << 4) | DMA_ENA | INT_ENA;
85 cpu_relax(); 120 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), dma_reg);
121 SSYNC();
86 122
87 /* acknowledge dma interrupt */ 123 /* Wait for compelete */
88 bfin_write_USB_DMA_INTERRUPT(1 << epnum); 124 while (!(bfin_read_USB_DMA_INTERRUPT() & (1 << epnum)))
89 SSYNC(); 125 cpu_relax();
90 126
91 /* Reset DMA */ 127 /* acknowledge dma interrupt */
92 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), 0); 128 bfin_write_USB_DMA_INTERRUPT(1 << epnum);
93 SSYNC(); 129 SSYNC();
94#else
95 if (unlikely((unsigned long)dst & 0x01))
96 insw_8((unsigned long)fifo, dst,
97 len & 0x01 ? (len >> 1) + 1 : len >> 1);
98 else
99 insw((unsigned long)fifo, dst,
100 len & 0x01 ? (len >> 1) + 1 : len >> 1);
101#endif
102 130
131 /* Reset DMA */
132 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), 0);
133 SSYNC();
134 } else {
135 SSYNC();
136 /* Read the last byte of packet with odd size from address fifo + 4
137 * to trigger 1 byte access to EP0 FIFO.
138 */
139 if (len == 1)
140 *dst = (u8)inw((unsigned long)fifo + 4);
141 else {
142 if (unlikely((unsigned long)dst & 0x01))
143 insw_8((unsigned long)fifo, dst, len >> 1);
144 else
145 insw((unsigned long)fifo, dst, len >> 1);
146
147 if (len & 0x01)
148 *(dst + len - 1) = (u8)inw((unsigned long)fifo + 4);
149 }
150 }
103 DBG(4, "%cX ep%d fifo %p count %d buf %p\n", 151 DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
104 'R', hw_ep->epnum, fifo, len, dst); 152 'R', hw_ep->epnum, fifo, len, dst);
105 153
diff --git a/drivers/usb/musb/blackfin.h b/drivers/usb/musb/blackfin.h
index 10b7d7584f4b..bd9352a2ef2a 100644
--- a/drivers/usb/musb/blackfin.h
+++ b/drivers/usb/musb/blackfin.h
@@ -69,7 +69,6 @@ static void dump_fifo_data(u8 *buf, u16 len)
69#define dump_fifo_data(buf, len) do {} while (0) 69#define dump_fifo_data(buf, len) do {} while (0)
70#endif 70#endif
71 71
72#ifdef CONFIG_BF52x
73 72
74#define USB_DMA_BASE USB_DMA_INTERRUPT 73#define USB_DMA_BASE USB_DMA_INTERRUPT
75#define USB_DMAx_CTRL 0x04 74#define USB_DMAx_CTRL 0x04
@@ -79,7 +78,6 @@ static void dump_fifo_data(u8 *buf, u16 len)
79#define USB_DMAx_COUNT_HIGH 0x14 78#define USB_DMAx_COUNT_HIGH 0x14
80 79
81#define USB_DMA_REG(ep, reg) (USB_DMA_BASE + 0x20 * ep + reg) 80#define USB_DMA_REG(ep, reg) (USB_DMA_BASE + 0x20 * ep + reg)
82#endif
83 81
84/* Almost 1 second */ 82/* Almost 1 second */
85#define TIMER_DELAY (1 * HZ) 83#define TIMER_DELAY (1 * HZ)
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index ef2332a9941d..a44a450c860d 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -1154,8 +1154,11 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
1154 struct musb_hw_ep *hw_ep = NULL; 1154 struct musb_hw_ep *hw_ep = NULL;
1155 u32 rx, tx; 1155 u32 rx, tx;
1156 int i, index; 1156 int i, index;
1157 unsigned long flags;
1157 1158
1158 cppi = container_of(musb->dma_controller, struct cppi, controller); 1159 cppi = container_of(musb->dma_controller, struct cppi, controller);
1160 if (cppi->irq)
1161 spin_lock_irqsave(&musb->lock, flags);
1159 1162
1160 tibase = musb->ctrl_base; 1163 tibase = musb->ctrl_base;
1161 1164
@@ -1285,6 +1288,9 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
1285 /* write to CPPI EOI register to re-enable interrupts */ 1288 /* write to CPPI EOI register to re-enable interrupts */
1286 musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0); 1289 musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
1287 1290
1291 if (cppi->irq)
1292 spin_unlock_irqrestore(&musb->lock, flags);
1293
1288 return IRQ_HANDLED; 1294 return IRQ_HANDLED;
1289} 1295}
1290 1296
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index e16ff605c458..66913811af5e 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -42,7 +42,7 @@
42#include "musb_core.h" 42#include "musb_core.h"
43 43
44#ifdef CONFIG_MACH_DAVINCI_EVM 44#ifdef CONFIG_MACH_DAVINCI_EVM
45#define GPIO_nVBUS_DRV 144 45#define GPIO_nVBUS_DRV 160
46#endif 46#endif
47 47
48#include "davinci.h" 48#include "davinci.h"
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index bfe08f4975a3..5eb9318cff77 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1319,7 +1319,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
1319#endif 1319#endif
1320 u8 reg; 1320 u8 reg;
1321 char *type; 1321 char *type;
1322 char aInfo[78], aRevision[32], aDate[12]; 1322 char aInfo[90], aRevision[32], aDate[12];
1323 void __iomem *mbase = musb->mregs; 1323 void __iomem *mbase = musb->mregs;
1324 int status = 0; 1324 int status = 0;
1325 int i; 1325 int i;
@@ -1521,6 +1521,14 @@ irqreturn_t musb_interrupt(struct musb *musb)
1521 (devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral", 1521 (devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral",
1522 musb->int_usb, musb->int_tx, musb->int_rx); 1522 musb->int_usb, musb->int_tx, musb->int_rx);
1523 1523
1524#ifdef CONFIG_USB_GADGET_MUSB_HDRC
1525 if (is_otg_enabled(musb) || is_peripheral_enabled(musb))
1526 if (!musb->gadget_driver) {
1527 DBG(5, "No gadget driver loaded\n");
1528 return IRQ_HANDLED;
1529 }
1530#endif
1531
1524 /* the core can interrupt us for multiple reasons; docs have 1532 /* the core can interrupt us for multiple reasons; docs have
1525 * a generic interrupt flowchart to follow 1533 * a generic interrupt flowchart to follow
1526 */ 1534 */
@@ -2139,7 +2147,7 @@ static int __init musb_probe(struct platform_device *pdev)
2139 return musb_init_controller(dev, irq, base); 2147 return musb_init_controller(dev, irq, base);
2140} 2148}
2141 2149
2142static int __devexit musb_remove(struct platform_device *pdev) 2150static int __exit musb_remove(struct platform_device *pdev)
2143{ 2151{
2144 struct musb *musb = dev_to_musb(&pdev->dev); 2152 struct musb *musb = dev_to_musb(&pdev->dev);
2145 void __iomem *ctrl_base = musb->ctrl_base; 2153 void __iomem *ctrl_base = musb->ctrl_base;
@@ -2231,7 +2239,7 @@ static struct platform_driver musb_driver = {
2231 .owner = THIS_MODULE, 2239 .owner = THIS_MODULE,
2232 .pm = MUSB_DEV_PM_OPS, 2240 .pm = MUSB_DEV_PM_OPS,
2233 }, 2241 },
2234 .remove = __devexit_p(musb_remove), 2242 .remove = __exit_p(musb_remove),
2235 .shutdown = musb_shutdown, 2243 .shutdown = musb_shutdown,
2236}; 2244};
2237 2245
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index c49b9ba025ab..cbcf14a236e6 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -309,7 +309,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
309 size_t request_size; 309 size_t request_size;
310 310
311 /* setup DMA, then program endpoint CSR */ 311 /* setup DMA, then program endpoint CSR */
312 request_size = min(request->length, 312 request_size = min_t(size_t, request->length,
313 musb_ep->dma->max_len); 313 musb_ep->dma->max_len);
314 if (request_size < musb_ep->packet_sz) 314 if (request_size < musb_ep->packet_sz)
315 musb_ep->dma->desired_mode = 0; 315 musb_ep->dma->desired_mode = 0;
@@ -319,7 +319,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
319 use_dma = use_dma && c->channel_program( 319 use_dma = use_dma && c->channel_program(
320 musb_ep->dma, musb_ep->packet_sz, 320 musb_ep->dma, musb_ep->packet_sz,
321 musb_ep->dma->desired_mode, 321 musb_ep->dma->desired_mode,
322 request->dma, request_size); 322 request->dma + request->actual, request_size);
323 if (use_dma) { 323 if (use_dma) {
324 if (musb_ep->dma->desired_mode == 0) { 324 if (musb_ep->dma->desired_mode == 0) {
325 /* 325 /*
@@ -515,12 +515,12 @@ void musb_g_tx(struct musb *musb, u8 epnum)
515 if (csr & MUSB_TXCSR_FIFONOTEMPTY) 515 if (csr & MUSB_TXCSR_FIFONOTEMPTY)
516 return; 516 return;
517 517
518 if (!musb_ep->desc) { 518 request = musb_ep->desc ? next_request(musb_ep) : NULL;
519 if (!request) {
519 DBG(4, "%s idle now\n", 520 DBG(4, "%s idle now\n",
520 musb_ep->end_point.name); 521 musb_ep->end_point.name);
521 return; 522 return;
522 } else 523 }
523 request = next_request(musb_ep);
524 } 524 }
525 525
526 txstate(musb, to_musb_request(request)); 526 txstate(musb, to_musb_request(request));
@@ -746,6 +746,8 @@ void musb_g_rx(struct musb *musb, u8 epnum)
746 musb_ep_select(mbase, epnum); 746 musb_ep_select(mbase, epnum);
747 747
748 request = next_request(musb_ep); 748 request = next_request(musb_ep);
749 if (!request)
750 return;
749 751
750 csr = musb_readw(epio, MUSB_RXCSR); 752 csr = musb_readw(epio, MUSB_RXCSR);
751 dma = is_dma_capable() ? musb_ep->dma : NULL; 753 dma = is_dma_capable() ? musb_ep->dma : NULL;
@@ -1731,6 +1733,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1731 spin_lock_irqsave(&musb->lock, flags); 1733 spin_lock_irqsave(&musb->lock, flags);
1732 1734
1733 otg_set_peripheral(musb->xceiv, &musb->g); 1735 otg_set_peripheral(musb->xceiv, &musb->g);
1736 musb->xceiv->state = OTG_STATE_B_IDLE;
1734 musb->is_active = 1; 1737 musb->is_active = 1;
1735 1738
1736 /* FIXME this ignores the softconnect flag. Drivers are 1739 /* FIXME this ignores the softconnect flag. Drivers are
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 8fba3f11e473..53d06451f820 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -664,7 +664,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
664 musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; 664 musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
665 break; 665 break;
666 default: 666 default:
667 ERR("SetupEnd came in a wrong ep0stage %s", 667 ERR("SetupEnd came in a wrong ep0stage %s\n",
668 decode_ep0stage(musb->ep0_state)); 668 decode_ep0stage(musb->ep0_state));
669 } 669 }
670 csr = musb_readw(regs, MUSB_CSR0); 670 csr = musb_readw(regs, MUSB_CSR0);
@@ -787,12 +787,18 @@ setup:
787 handled = service_zero_data_request( 787 handled = service_zero_data_request(
788 musb, &setup); 788 musb, &setup);
789 789
790 /*
791 * We're expecting no data in any case, so
792 * always set the DATAEND bit -- doing this
793 * here helps avoid SetupEnd interrupt coming
794 * in the idle stage when we're stalling...
795 */
796 musb->ackpend |= MUSB_CSR0_P_DATAEND;
797
790 /* status stage might be immediate */ 798 /* status stage might be immediate */
791 if (handled > 0) { 799 if (handled > 0)
792 musb->ackpend |= MUSB_CSR0_P_DATAEND;
793 musb->ep0_state = 800 musb->ep0_state =
794 MUSB_EP0_STAGE_STATUSIN; 801 MUSB_EP0_STAGE_STATUSIN;
795 }
796 break; 802 break;
797 803
798 /* sequence #1 (IN to host), includes GET_STATUS 804 /* sequence #1 (IN to host), includes GET_STATUS
diff --git a/drivers/usb/otg/isp1301_omap.c b/drivers/usb/otg/isp1301_omap.c
index d54460a88173..78a209709260 100644
--- a/drivers/usb/otg/isp1301_omap.c
+++ b/drivers/usb/otg/isp1301_omap.c
@@ -843,7 +843,7 @@ static irqreturn_t omap_otg_irq(int irq, void *_isp)
843 843
844static struct platform_device *otg_dev; 844static struct platform_device *otg_dev;
845 845
846static int otg_init(struct isp1301 *isp) 846static int isp1301_otg_init(struct isp1301 *isp)
847{ 847{
848 u32 l; 848 u32 l;
849 849
@@ -1275,7 +1275,7 @@ static int __exit isp1301_remove(struct i2c_client *i2c)
1275static int isp1301_otg_enable(struct isp1301 *isp) 1275static int isp1301_otg_enable(struct isp1301 *isp)
1276{ 1276{
1277 power_up(isp); 1277 power_up(isp);
1278 otg_init(isp); 1278 isp1301_otg_init(isp);
1279 1279
1280 /* NOTE: since we don't change this, this provides 1280 /* NOTE: since we don't change this, this provides
1281 * a few more interrupts than are strictly needed. 1281 * a few more interrupts than are strictly needed.
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index f99498fca99a..216f187582ab 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -44,6 +44,7 @@
44#include <linux/serial.h> 44#include <linux/serial.h>
45#include <linux/usb/serial.h> 45#include <linux/usb/serial.h>
46#include "ftdi_sio.h" 46#include "ftdi_sio.h"
47#include "ftdi_sio_ids.h"
47 48
48/* 49/*
49 * Version Information 50 * Version Information
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 4586a24fafb0..b0e0d64f822e 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -1,7 +1,10 @@
1/* 1/*
2 * Definitions for the FTDI USB Single Port Serial Converter - 2 * Driver definitions for the FTDI USB Single Port Serial Converter -
3 * known as FTDI_SIO (Serial Input/Output application of the chipset) 3 * known as FTDI_SIO (Serial Input/Output application of the chipset)
4 * 4 *
5 * For USB vendor/product IDs (VID/PID), please see ftdi_sio_ids.h
6 *
7 *
5 * The example I have is known as the USC-1000 which is available from 8 * The example I have is known as the USC-1000 which is available from
6 * http://www.dse.co.nz - cat no XH4214 It looks similar to this: 9 * http://www.dse.co.nz - cat no XH4214 It looks similar to this:
7 * http://www.dansdata.com/usbser.htm but I can't be sure There are other 10 * http://www.dansdata.com/usbser.htm but I can't be sure There are other
@@ -17,880 +20,7 @@
17 * Bill Ryder - bryder@sgi.com formerly of Silicon Graphics, Inc.- wrote the 20 * Bill Ryder - bryder@sgi.com formerly of Silicon Graphics, Inc.- wrote the
18 * FTDI_SIO implementation. 21 * FTDI_SIO implementation.
19 * 22 *
20 * Philipp Gühring - pg@futureware.at - added the Device ID of the USB relais
21 * from Rudolf Gugler
22 *
23 */
24
25#define FTDI_VID 0x0403 /* Vendor Id */
26#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
27#define FTDI_8U232AM_PID 0x6001 /* Similar device to SIO above */
28#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
29#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
30#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */
31#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
32#define FTDI_RELAIS_PID 0xFA10 /* Relais device from Rudolf Gugler */
33#define FTDI_NF_RIC_VID 0x0DCD /* Vendor Id */
34#define FTDI_NF_RIC_PID 0x0001 /* Product Id */
35#define FTDI_USBX_707_PID 0xF857 /* ADSTech IR Blaster USBX-707 */
36
37/* Larsen and Brusgaard AltiTrack/USBtrack */
38#define LARSENBRUSGAARD_VID 0x0FD8
39#define LB_ALTITRACK_PID 0x0001
40
41/* www.canusb.com Lawicel CANUSB device */
42#define FTDI_CANUSB_PID 0xFFA8 /* Product Id */
43
44/* AlphaMicro Components AMC-232USB01 device */
45#define FTDI_AMC232_PID 0xFF00 /* Product Id */
46
47/* www.candapter.com Ewert Energy Systems CANdapter device */
48#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
49
50/* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */
51/* the VID is the standard ftdi vid (FTDI_VID) */
52#define FTDI_SCS_DEVICE_0_PID 0xD010 /* SCS PTC-IIusb */
53#define FTDI_SCS_DEVICE_1_PID 0xD011 /* SCS Tracker / DSP TNC */
54#define FTDI_SCS_DEVICE_2_PID 0xD012
55#define FTDI_SCS_DEVICE_3_PID 0xD013
56#define FTDI_SCS_DEVICE_4_PID 0xD014
57#define FTDI_SCS_DEVICE_5_PID 0xD015
58#define FTDI_SCS_DEVICE_6_PID 0xD016
59#define FTDI_SCS_DEVICE_7_PID 0xD017
60
61/* ACT Solutions HomePro ZWave interface (http://www.act-solutions.com/HomePro.htm) */
62#define FTDI_ACTZWAVE_PID 0xF2D0
63
64
65/* www.starting-point-systems.com µChameleon device */
66#define FTDI_MICRO_CHAMELEON_PID 0xCAA0 /* Product Id */
67
68/* www.irtrans.de device */
69#define FTDI_IRTRANS_PID 0xFC60 /* Product Id */
70
71
72/* www.thoughttechnology.com/ TT-USB provide with procomp use ftdi_sio */
73#define FTDI_TTUSB_PID 0xFF20 /* Product Id */
74
75/* iPlus device */
76#define FTDI_IPLUS_PID 0xD070 /* Product Id */
77#define FTDI_IPLUS2_PID 0xD071 /* Product Id */
78
79/* DMX4ALL DMX Interfaces */
80#define FTDI_DMX4ALL 0xC850
81
82/* OpenDCC (www.opendcc.de) product id */
83#define FTDI_OPENDCC_PID 0xBFD8
84#define FTDI_OPENDCC_SNIFFER_PID 0xBFD9
85#define FTDI_OPENDCC_THROTTLE_PID 0xBFDA
86#define FTDI_OPENDCC_GATEWAY_PID 0xBFDB
87
88/* Sprog II (Andrew Crosland's SprogII DCC interface) */
89#define FTDI_SPROG_II 0xF0C8
90
91/* www.crystalfontz.com devices - thanx for providing free devices for evaluation ! */
92/* they use the ftdi chipset for the USB interface and the vendor id is the same */
93#define FTDI_XF_632_PID 0xFC08 /* 632: 16x2 Character Display */
94#define FTDI_XF_634_PID 0xFC09 /* 634: 20x4 Character Display */
95#define FTDI_XF_547_PID 0xFC0A /* 547: Two line Display */
96#define FTDI_XF_633_PID 0xFC0B /* 633: 16x2 Character Display with Keys */
97#define FTDI_XF_631_PID 0xFC0C /* 631: 20x2 Character Display */
98#define FTDI_XF_635_PID 0xFC0D /* 635: 20x4 Character Display */
99#define FTDI_XF_640_PID 0xFC0E /* 640: Two line Display */
100#define FTDI_XF_642_PID 0xFC0F /* 642: Two line Display */
101
102/* Video Networks Limited / Homechoice in the UK use an ftdi-based device for their 1Mb */
103/* broadband internet service. The following PID is exhibited by the usb device supplied */
104/* (the VID is the standard ftdi vid (FTDI_VID) */
105#define FTDI_VNHCPCUSB_D_PID 0xfe38 /* Product Id */
106
107/*
108 * PCDJ use ftdi based dj-controllers. The following PID is for their DAC-2 device
109 * http://www.pcdjhardware.com/DAC2.asp (PID sent by Wouter Paesen)
110 * (the VID is the standard ftdi vid (FTDI_VID) */
111#define FTDI_PCDJ_DAC2_PID 0xFA88
112
113/*
114 * The following are the values for the Matrix Orbital LCD displays,
115 * which are the FT232BM ( similar to the 8U232AM )
116 */
117#define FTDI_MTXORB_0_PID 0xFA00 /* Matrix Orbital Product Id */
118#define FTDI_MTXORB_1_PID 0xFA01 /* Matrix Orbital Product Id */
119#define FTDI_MTXORB_2_PID 0xFA02 /* Matrix Orbital Product Id */
120#define FTDI_MTXORB_3_PID 0xFA03 /* Matrix Orbital Product Id */
121#define FTDI_MTXORB_4_PID 0xFA04 /* Matrix Orbital Product Id */
122#define FTDI_MTXORB_5_PID 0xFA05 /* Matrix Orbital Product Id */
123#define FTDI_MTXORB_6_PID 0xFA06 /* Matrix Orbital Product Id */
124
125/* OOCDlink by Joern Kaipf <joernk@web.de>
126 * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */
127#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */
128
129/*
130 * The following are the values for the Matrix Orbital FTDI Range
131 * Anything in this range will use an FT232RL.
132 */
133#define MTXORB_VID 0x1B3D
134#define MTXORB_FTDI_RANGE_0100_PID 0x0100
135#define MTXORB_FTDI_RANGE_0101_PID 0x0101
136#define MTXORB_FTDI_RANGE_0102_PID 0x0102
137#define MTXORB_FTDI_RANGE_0103_PID 0x0103
138#define MTXORB_FTDI_RANGE_0104_PID 0x0104
139#define MTXORB_FTDI_RANGE_0105_PID 0x0105
140#define MTXORB_FTDI_RANGE_0106_PID 0x0106
141#define MTXORB_FTDI_RANGE_0107_PID 0x0107
142#define MTXORB_FTDI_RANGE_0108_PID 0x0108
143#define MTXORB_FTDI_RANGE_0109_PID 0x0109
144#define MTXORB_FTDI_RANGE_010A_PID 0x010A
145#define MTXORB_FTDI_RANGE_010B_PID 0x010B
146#define MTXORB_FTDI_RANGE_010C_PID 0x010C
147#define MTXORB_FTDI_RANGE_010D_PID 0x010D
148#define MTXORB_FTDI_RANGE_010E_PID 0x010E
149#define MTXORB_FTDI_RANGE_010F_PID 0x010F
150#define MTXORB_FTDI_RANGE_0110_PID 0x0110
151#define MTXORB_FTDI_RANGE_0111_PID 0x0111
152#define MTXORB_FTDI_RANGE_0112_PID 0x0112
153#define MTXORB_FTDI_RANGE_0113_PID 0x0113
154#define MTXORB_FTDI_RANGE_0114_PID 0x0114
155#define MTXORB_FTDI_RANGE_0115_PID 0x0115
156#define MTXORB_FTDI_RANGE_0116_PID 0x0116
157#define MTXORB_FTDI_RANGE_0117_PID 0x0117
158#define MTXORB_FTDI_RANGE_0118_PID 0x0118
159#define MTXORB_FTDI_RANGE_0119_PID 0x0119
160#define MTXORB_FTDI_RANGE_011A_PID 0x011A
161#define MTXORB_FTDI_RANGE_011B_PID 0x011B
162#define MTXORB_FTDI_RANGE_011C_PID 0x011C
163#define MTXORB_FTDI_RANGE_011D_PID 0x011D
164#define MTXORB_FTDI_RANGE_011E_PID 0x011E
165#define MTXORB_FTDI_RANGE_011F_PID 0x011F
166#define MTXORB_FTDI_RANGE_0120_PID 0x0120
167#define MTXORB_FTDI_RANGE_0121_PID 0x0121
168#define MTXORB_FTDI_RANGE_0122_PID 0x0122
169#define MTXORB_FTDI_RANGE_0123_PID 0x0123
170#define MTXORB_FTDI_RANGE_0124_PID 0x0124
171#define MTXORB_FTDI_RANGE_0125_PID 0x0125
172#define MTXORB_FTDI_RANGE_0126_PID 0x0126
173#define MTXORB_FTDI_RANGE_0127_PID 0x0127
174#define MTXORB_FTDI_RANGE_0128_PID 0x0128
175#define MTXORB_FTDI_RANGE_0129_PID 0x0129
176#define MTXORB_FTDI_RANGE_012A_PID 0x012A
177#define MTXORB_FTDI_RANGE_012B_PID 0x012B
178#define MTXORB_FTDI_RANGE_012C_PID 0x012C
179#define MTXORB_FTDI_RANGE_012D_PID 0x012D
180#define MTXORB_FTDI_RANGE_012E_PID 0x012E
181#define MTXORB_FTDI_RANGE_012F_PID 0x012F
182#define MTXORB_FTDI_RANGE_0130_PID 0x0130
183#define MTXORB_FTDI_RANGE_0131_PID 0x0131
184#define MTXORB_FTDI_RANGE_0132_PID 0x0132
185#define MTXORB_FTDI_RANGE_0133_PID 0x0133
186#define MTXORB_FTDI_RANGE_0134_PID 0x0134
187#define MTXORB_FTDI_RANGE_0135_PID 0x0135
188#define MTXORB_FTDI_RANGE_0136_PID 0x0136
189#define MTXORB_FTDI_RANGE_0137_PID 0x0137
190#define MTXORB_FTDI_RANGE_0138_PID 0x0138
191#define MTXORB_FTDI_RANGE_0139_PID 0x0139
192#define MTXORB_FTDI_RANGE_013A_PID 0x013A
193#define MTXORB_FTDI_RANGE_013B_PID 0x013B
194#define MTXORB_FTDI_RANGE_013C_PID 0x013C
195#define MTXORB_FTDI_RANGE_013D_PID 0x013D
196#define MTXORB_FTDI_RANGE_013E_PID 0x013E
197#define MTXORB_FTDI_RANGE_013F_PID 0x013F
198#define MTXORB_FTDI_RANGE_0140_PID 0x0140
199#define MTXORB_FTDI_RANGE_0141_PID 0x0141
200#define MTXORB_FTDI_RANGE_0142_PID 0x0142
201#define MTXORB_FTDI_RANGE_0143_PID 0x0143
202#define MTXORB_FTDI_RANGE_0144_PID 0x0144
203#define MTXORB_FTDI_RANGE_0145_PID 0x0145
204#define MTXORB_FTDI_RANGE_0146_PID 0x0146
205#define MTXORB_FTDI_RANGE_0147_PID 0x0147
206#define MTXORB_FTDI_RANGE_0148_PID 0x0148
207#define MTXORB_FTDI_RANGE_0149_PID 0x0149
208#define MTXORB_FTDI_RANGE_014A_PID 0x014A
209#define MTXORB_FTDI_RANGE_014B_PID 0x014B
210#define MTXORB_FTDI_RANGE_014C_PID 0x014C
211#define MTXORB_FTDI_RANGE_014D_PID 0x014D
212#define MTXORB_FTDI_RANGE_014E_PID 0x014E
213#define MTXORB_FTDI_RANGE_014F_PID 0x014F
214#define MTXORB_FTDI_RANGE_0150_PID 0x0150
215#define MTXORB_FTDI_RANGE_0151_PID 0x0151
216#define MTXORB_FTDI_RANGE_0152_PID 0x0152
217#define MTXORB_FTDI_RANGE_0153_PID 0x0153
218#define MTXORB_FTDI_RANGE_0154_PID 0x0154
219#define MTXORB_FTDI_RANGE_0155_PID 0x0155
220#define MTXORB_FTDI_RANGE_0156_PID 0x0156
221#define MTXORB_FTDI_RANGE_0157_PID 0x0157
222#define MTXORB_FTDI_RANGE_0158_PID 0x0158
223#define MTXORB_FTDI_RANGE_0159_PID 0x0159
224#define MTXORB_FTDI_RANGE_015A_PID 0x015A
225#define MTXORB_FTDI_RANGE_015B_PID 0x015B
226#define MTXORB_FTDI_RANGE_015C_PID 0x015C
227#define MTXORB_FTDI_RANGE_015D_PID 0x015D
228#define MTXORB_FTDI_RANGE_015E_PID 0x015E
229#define MTXORB_FTDI_RANGE_015F_PID 0x015F
230#define MTXORB_FTDI_RANGE_0160_PID 0x0160
231#define MTXORB_FTDI_RANGE_0161_PID 0x0161
232#define MTXORB_FTDI_RANGE_0162_PID 0x0162
233#define MTXORB_FTDI_RANGE_0163_PID 0x0163
234#define MTXORB_FTDI_RANGE_0164_PID 0x0164
235#define MTXORB_FTDI_RANGE_0165_PID 0x0165
236#define MTXORB_FTDI_RANGE_0166_PID 0x0166
237#define MTXORB_FTDI_RANGE_0167_PID 0x0167
238#define MTXORB_FTDI_RANGE_0168_PID 0x0168
239#define MTXORB_FTDI_RANGE_0169_PID 0x0169
240#define MTXORB_FTDI_RANGE_016A_PID 0x016A
241#define MTXORB_FTDI_RANGE_016B_PID 0x016B
242#define MTXORB_FTDI_RANGE_016C_PID 0x016C
243#define MTXORB_FTDI_RANGE_016D_PID 0x016D
244#define MTXORB_FTDI_RANGE_016E_PID 0x016E
245#define MTXORB_FTDI_RANGE_016F_PID 0x016F
246#define MTXORB_FTDI_RANGE_0170_PID 0x0170
247#define MTXORB_FTDI_RANGE_0171_PID 0x0171
248#define MTXORB_FTDI_RANGE_0172_PID 0x0172
249#define MTXORB_FTDI_RANGE_0173_PID 0x0173
250#define MTXORB_FTDI_RANGE_0174_PID 0x0174
251#define MTXORB_FTDI_RANGE_0175_PID 0x0175
252#define MTXORB_FTDI_RANGE_0176_PID 0x0176
253#define MTXORB_FTDI_RANGE_0177_PID 0x0177
254#define MTXORB_FTDI_RANGE_0178_PID 0x0178
255#define MTXORB_FTDI_RANGE_0179_PID 0x0179
256#define MTXORB_FTDI_RANGE_017A_PID 0x017A
257#define MTXORB_FTDI_RANGE_017B_PID 0x017B
258#define MTXORB_FTDI_RANGE_017C_PID 0x017C
259#define MTXORB_FTDI_RANGE_017D_PID 0x017D
260#define MTXORB_FTDI_RANGE_017E_PID 0x017E
261#define MTXORB_FTDI_RANGE_017F_PID 0x017F
262#define MTXORB_FTDI_RANGE_0180_PID 0x0180
263#define MTXORB_FTDI_RANGE_0181_PID 0x0181
264#define MTXORB_FTDI_RANGE_0182_PID 0x0182
265#define MTXORB_FTDI_RANGE_0183_PID 0x0183
266#define MTXORB_FTDI_RANGE_0184_PID 0x0184
267#define MTXORB_FTDI_RANGE_0185_PID 0x0185
268#define MTXORB_FTDI_RANGE_0186_PID 0x0186
269#define MTXORB_FTDI_RANGE_0187_PID 0x0187
270#define MTXORB_FTDI_RANGE_0188_PID 0x0188
271#define MTXORB_FTDI_RANGE_0189_PID 0x0189
272#define MTXORB_FTDI_RANGE_018A_PID 0x018A
273#define MTXORB_FTDI_RANGE_018B_PID 0x018B
274#define MTXORB_FTDI_RANGE_018C_PID 0x018C
275#define MTXORB_FTDI_RANGE_018D_PID 0x018D
276#define MTXORB_FTDI_RANGE_018E_PID 0x018E
277#define MTXORB_FTDI_RANGE_018F_PID 0x018F
278#define MTXORB_FTDI_RANGE_0190_PID 0x0190
279#define MTXORB_FTDI_RANGE_0191_PID 0x0191
280#define MTXORB_FTDI_RANGE_0192_PID 0x0192
281#define MTXORB_FTDI_RANGE_0193_PID 0x0193
282#define MTXORB_FTDI_RANGE_0194_PID 0x0194
283#define MTXORB_FTDI_RANGE_0195_PID 0x0195
284#define MTXORB_FTDI_RANGE_0196_PID 0x0196
285#define MTXORB_FTDI_RANGE_0197_PID 0x0197
286#define MTXORB_FTDI_RANGE_0198_PID 0x0198
287#define MTXORB_FTDI_RANGE_0199_PID 0x0199
288#define MTXORB_FTDI_RANGE_019A_PID 0x019A
289#define MTXORB_FTDI_RANGE_019B_PID 0x019B
290#define MTXORB_FTDI_RANGE_019C_PID 0x019C
291#define MTXORB_FTDI_RANGE_019D_PID 0x019D
292#define MTXORB_FTDI_RANGE_019E_PID 0x019E
293#define MTXORB_FTDI_RANGE_019F_PID 0x019F
294#define MTXORB_FTDI_RANGE_01A0_PID 0x01A0
295#define MTXORB_FTDI_RANGE_01A1_PID 0x01A1
296#define MTXORB_FTDI_RANGE_01A2_PID 0x01A2
297#define MTXORB_FTDI_RANGE_01A3_PID 0x01A3
298#define MTXORB_FTDI_RANGE_01A4_PID 0x01A4
299#define MTXORB_FTDI_RANGE_01A5_PID 0x01A5
300#define MTXORB_FTDI_RANGE_01A6_PID 0x01A6
301#define MTXORB_FTDI_RANGE_01A7_PID 0x01A7
302#define MTXORB_FTDI_RANGE_01A8_PID 0x01A8
303#define MTXORB_FTDI_RANGE_01A9_PID 0x01A9
304#define MTXORB_FTDI_RANGE_01AA_PID 0x01AA
305#define MTXORB_FTDI_RANGE_01AB_PID 0x01AB
306#define MTXORB_FTDI_RANGE_01AC_PID 0x01AC
307#define MTXORB_FTDI_RANGE_01AD_PID 0x01AD
308#define MTXORB_FTDI_RANGE_01AE_PID 0x01AE
309#define MTXORB_FTDI_RANGE_01AF_PID 0x01AF
310#define MTXORB_FTDI_RANGE_01B0_PID 0x01B0
311#define MTXORB_FTDI_RANGE_01B1_PID 0x01B1
312#define MTXORB_FTDI_RANGE_01B2_PID 0x01B2
313#define MTXORB_FTDI_RANGE_01B3_PID 0x01B3
314#define MTXORB_FTDI_RANGE_01B4_PID 0x01B4
315#define MTXORB_FTDI_RANGE_01B5_PID 0x01B5
316#define MTXORB_FTDI_RANGE_01B6_PID 0x01B6
317#define MTXORB_FTDI_RANGE_01B7_PID 0x01B7
318#define MTXORB_FTDI_RANGE_01B8_PID 0x01B8
319#define MTXORB_FTDI_RANGE_01B9_PID 0x01B9
320#define MTXORB_FTDI_RANGE_01BA_PID 0x01BA
321#define MTXORB_FTDI_RANGE_01BB_PID 0x01BB
322#define MTXORB_FTDI_RANGE_01BC_PID 0x01BC
323#define MTXORB_FTDI_RANGE_01BD_PID 0x01BD
324#define MTXORB_FTDI_RANGE_01BE_PID 0x01BE
325#define MTXORB_FTDI_RANGE_01BF_PID 0x01BF
326#define MTXORB_FTDI_RANGE_01C0_PID 0x01C0
327#define MTXORB_FTDI_RANGE_01C1_PID 0x01C1
328#define MTXORB_FTDI_RANGE_01C2_PID 0x01C2
329#define MTXORB_FTDI_RANGE_01C3_PID 0x01C3
330#define MTXORB_FTDI_RANGE_01C4_PID 0x01C4
331#define MTXORB_FTDI_RANGE_01C5_PID 0x01C5
332#define MTXORB_FTDI_RANGE_01C6_PID 0x01C6
333#define MTXORB_FTDI_RANGE_01C7_PID 0x01C7
334#define MTXORB_FTDI_RANGE_01C8_PID 0x01C8
335#define MTXORB_FTDI_RANGE_01C9_PID 0x01C9
336#define MTXORB_FTDI_RANGE_01CA_PID 0x01CA
337#define MTXORB_FTDI_RANGE_01CB_PID 0x01CB
338#define MTXORB_FTDI_RANGE_01CC_PID 0x01CC
339#define MTXORB_FTDI_RANGE_01CD_PID 0x01CD
340#define MTXORB_FTDI_RANGE_01CE_PID 0x01CE
341#define MTXORB_FTDI_RANGE_01CF_PID 0x01CF
342#define MTXORB_FTDI_RANGE_01D0_PID 0x01D0
343#define MTXORB_FTDI_RANGE_01D1_PID 0x01D1
344#define MTXORB_FTDI_RANGE_01D2_PID 0x01D2
345#define MTXORB_FTDI_RANGE_01D3_PID 0x01D3
346#define MTXORB_FTDI_RANGE_01D4_PID 0x01D4
347#define MTXORB_FTDI_RANGE_01D5_PID 0x01D5
348#define MTXORB_FTDI_RANGE_01D6_PID 0x01D6
349#define MTXORB_FTDI_RANGE_01D7_PID 0x01D7
350#define MTXORB_FTDI_RANGE_01D8_PID 0x01D8
351#define MTXORB_FTDI_RANGE_01D9_PID 0x01D9
352#define MTXORB_FTDI_RANGE_01DA_PID 0x01DA
353#define MTXORB_FTDI_RANGE_01DB_PID 0x01DB
354#define MTXORB_FTDI_RANGE_01DC_PID 0x01DC
355#define MTXORB_FTDI_RANGE_01DD_PID 0x01DD
356#define MTXORB_FTDI_RANGE_01DE_PID 0x01DE
357#define MTXORB_FTDI_RANGE_01DF_PID 0x01DF
358#define MTXORB_FTDI_RANGE_01E0_PID 0x01E0
359#define MTXORB_FTDI_RANGE_01E1_PID 0x01E1
360#define MTXORB_FTDI_RANGE_01E2_PID 0x01E2
361#define MTXORB_FTDI_RANGE_01E3_PID 0x01E3
362#define MTXORB_FTDI_RANGE_01E4_PID 0x01E4
363#define MTXORB_FTDI_RANGE_01E5_PID 0x01E5
364#define MTXORB_FTDI_RANGE_01E6_PID 0x01E6
365#define MTXORB_FTDI_RANGE_01E7_PID 0x01E7
366#define MTXORB_FTDI_RANGE_01E8_PID 0x01E8
367#define MTXORB_FTDI_RANGE_01E9_PID 0x01E9
368#define MTXORB_FTDI_RANGE_01EA_PID 0x01EA
369#define MTXORB_FTDI_RANGE_01EB_PID 0x01EB
370#define MTXORB_FTDI_RANGE_01EC_PID 0x01EC
371#define MTXORB_FTDI_RANGE_01ED_PID 0x01ED
372#define MTXORB_FTDI_RANGE_01EE_PID 0x01EE
373#define MTXORB_FTDI_RANGE_01EF_PID 0x01EF
374#define MTXORB_FTDI_RANGE_01F0_PID 0x01F0
375#define MTXORB_FTDI_RANGE_01F1_PID 0x01F1
376#define MTXORB_FTDI_RANGE_01F2_PID 0x01F2
377#define MTXORB_FTDI_RANGE_01F3_PID 0x01F3
378#define MTXORB_FTDI_RANGE_01F4_PID 0x01F4
379#define MTXORB_FTDI_RANGE_01F5_PID 0x01F5
380#define MTXORB_FTDI_RANGE_01F6_PID 0x01F6
381#define MTXORB_FTDI_RANGE_01F7_PID 0x01F7
382#define MTXORB_FTDI_RANGE_01F8_PID 0x01F8
383#define MTXORB_FTDI_RANGE_01F9_PID 0x01F9
384#define MTXORB_FTDI_RANGE_01FA_PID 0x01FA
385#define MTXORB_FTDI_RANGE_01FB_PID 0x01FB
386#define MTXORB_FTDI_RANGE_01FC_PID 0x01FC
387#define MTXORB_FTDI_RANGE_01FD_PID 0x01FD
388#define MTXORB_FTDI_RANGE_01FE_PID 0x01FE
389#define MTXORB_FTDI_RANGE_01FF_PID 0x01FF
390
391
392
393/* Interbiometrics USB I/O Board */
394/* Developed for Interbiometrics by Rudolf Gugler */
395#define INTERBIOMETRICS_VID 0x1209
396#define INTERBIOMETRICS_IOBOARD_PID 0x1002
397#define INTERBIOMETRICS_MINI_IOBOARD_PID 0x1006
398
399/*
400 * The following are the values for the Perle Systems
401 * UltraPort USB serial converters
402 */
403#define FTDI_PERLE_ULTRAPORT_PID 0xF0C0 /* Perle UltraPort Product Id */
404
405/*
406 * The following are the values for the Sealevel SeaLINK+ adapters.
407 * (Original list sent by Tuan Hoang. Ian Abbott renamed the macros and
408 * removed some PIDs that don't seem to match any existing products.)
409 */
410#define SEALEVEL_VID 0x0c52 /* Sealevel Vendor ID */
411#define SEALEVEL_2101_PID 0x2101 /* SeaLINK+232 (2101/2105) */
412#define SEALEVEL_2102_PID 0x2102 /* SeaLINK+485 (2102) */
413#define SEALEVEL_2103_PID 0x2103 /* SeaLINK+232I (2103) */
414#define SEALEVEL_2104_PID 0x2104 /* SeaLINK+485I (2104) */
415#define SEALEVEL_2106_PID 0x9020 /* SeaLINK+422 (2106) */
416#define SEALEVEL_2201_1_PID 0x2211 /* SeaPORT+2/232 (2201) Port 1 */
417#define SEALEVEL_2201_2_PID 0x2221 /* SeaPORT+2/232 (2201) Port 2 */
418#define SEALEVEL_2202_1_PID 0x2212 /* SeaPORT+2/485 (2202) Port 1 */
419#define SEALEVEL_2202_2_PID 0x2222 /* SeaPORT+2/485 (2202) Port 2 */
420#define SEALEVEL_2203_1_PID 0x2213 /* SeaPORT+2 (2203) Port 1 */
421#define SEALEVEL_2203_2_PID 0x2223 /* SeaPORT+2 (2203) Port 2 */
422#define SEALEVEL_2401_1_PID 0x2411 /* SeaPORT+4/232 (2401) Port 1 */
423#define SEALEVEL_2401_2_PID 0x2421 /* SeaPORT+4/232 (2401) Port 2 */
424#define SEALEVEL_2401_3_PID 0x2431 /* SeaPORT+4/232 (2401) Port 3 */
425#define SEALEVEL_2401_4_PID 0x2441 /* SeaPORT+4/232 (2401) Port 4 */
426#define SEALEVEL_2402_1_PID 0x2412 /* SeaPORT+4/485 (2402) Port 1 */
427#define SEALEVEL_2402_2_PID 0x2422 /* SeaPORT+4/485 (2402) Port 2 */
428#define SEALEVEL_2402_3_PID 0x2432 /* SeaPORT+4/485 (2402) Port 3 */
429#define SEALEVEL_2402_4_PID 0x2442 /* SeaPORT+4/485 (2402) Port 4 */
430#define SEALEVEL_2403_1_PID 0x2413 /* SeaPORT+4 (2403) Port 1 */
431#define SEALEVEL_2403_2_PID 0x2423 /* SeaPORT+4 (2403) Port 2 */
432#define SEALEVEL_2403_3_PID 0x2433 /* SeaPORT+4 (2403) Port 3 */
433#define SEALEVEL_2403_4_PID 0x2443 /* SeaPORT+4 (2403) Port 4 */
434#define SEALEVEL_2801_1_PID 0X2811 /* SeaLINK+8/232 (2801) Port 1 */
435#define SEALEVEL_2801_2_PID 0X2821 /* SeaLINK+8/232 (2801) Port 2 */
436#define SEALEVEL_2801_3_PID 0X2831 /* SeaLINK+8/232 (2801) Port 3 */
437#define SEALEVEL_2801_4_PID 0X2841 /* SeaLINK+8/232 (2801) Port 4 */
438#define SEALEVEL_2801_5_PID 0X2851 /* SeaLINK+8/232 (2801) Port 5 */
439#define SEALEVEL_2801_6_PID 0X2861 /* SeaLINK+8/232 (2801) Port 6 */
440#define SEALEVEL_2801_7_PID 0X2871 /* SeaLINK+8/232 (2801) Port 7 */
441#define SEALEVEL_2801_8_PID 0X2881 /* SeaLINK+8/232 (2801) Port 8 */
442#define SEALEVEL_2802_1_PID 0X2812 /* SeaLINK+8/485 (2802) Port 1 */
443#define SEALEVEL_2802_2_PID 0X2822 /* SeaLINK+8/485 (2802) Port 2 */
444#define SEALEVEL_2802_3_PID 0X2832 /* SeaLINK+8/485 (2802) Port 3 */
445#define SEALEVEL_2802_4_PID 0X2842 /* SeaLINK+8/485 (2802) Port 4 */
446#define SEALEVEL_2802_5_PID 0X2852 /* SeaLINK+8/485 (2802) Port 5 */
447#define SEALEVEL_2802_6_PID 0X2862 /* SeaLINK+8/485 (2802) Port 6 */
448#define SEALEVEL_2802_7_PID 0X2872 /* SeaLINK+8/485 (2802) Port 7 */
449#define SEALEVEL_2802_8_PID 0X2882 /* SeaLINK+8/485 (2802) Port 8 */
450#define SEALEVEL_2803_1_PID 0X2813 /* SeaLINK+8 (2803) Port 1 */
451#define SEALEVEL_2803_2_PID 0X2823 /* SeaLINK+8 (2803) Port 2 */
452#define SEALEVEL_2803_3_PID 0X2833 /* SeaLINK+8 (2803) Port 3 */
453#define SEALEVEL_2803_4_PID 0X2843 /* SeaLINK+8 (2803) Port 4 */
454#define SEALEVEL_2803_5_PID 0X2853 /* SeaLINK+8 (2803) Port 5 */
455#define SEALEVEL_2803_6_PID 0X2863 /* SeaLINK+8 (2803) Port 6 */
456#define SEALEVEL_2803_7_PID 0X2873 /* SeaLINK+8 (2803) Port 7 */
457#define SEALEVEL_2803_8_PID 0X2883 /* SeaLINK+8 (2803) Port 8 */
458
459/*
460 * The following are the values for two KOBIL chipcard terminals.
461 */
462#define KOBIL_VID 0x0d46 /* KOBIL Vendor ID */
463#define KOBIL_CONV_B1_PID 0x2020 /* KOBIL Konverter for B1 */
464#define KOBIL_CONV_KAAN_PID 0x2021 /* KOBIL_Konverter for KAAN */
465
466/*
467 * Icom ID-1 digital transceiver
468 */
469
470#define ICOM_ID1_VID 0x0C26
471#define ICOM_ID1_PID 0x0004
472
473/*
474 * ASK.fr devices
475 */
476#define FTDI_ASK_RDR400_PID 0xC991 /* ASK RDR 400 series card reader */
477
478/*
479 * FTDI USB UART chips used in construction projects from the
480 * Elektor Electronics magazine (http://elektor-electronics.co.uk)
481 */
482#define ELEKTOR_VID 0x0C7D
483#define ELEKTOR_FT323R_PID 0x0005 /* RFID-Reader, issue 09-2006 */
484
485/*
486 * DSS-20 Sync Station for Sony Ericsson P800
487 */
488#define FTDI_DSS20_PID 0xFC82
489
490/*
491 * Home Electronics (www.home-electro.com) USB gadgets
492 */
493#define FTDI_HE_TIRA1_PID 0xFA78 /* Tira-1 IR transceiver */
494
495/* USB-UIRT - An infrared receiver and transmitter using the 8U232AM chip */
496/* http://home.earthlink.net/~jrhees/USBUIRT/index.htm */
497#define FTDI_USB_UIRT_PID 0xF850 /* Product Id */
498
499/* TNC-X USB-to-packet-radio adapter, versions prior to 3.0 (DLP module) */
500
501#define FTDI_TNC_X_PID 0xEBE0
502
503/*
504 * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
505 * All of these devices use FTDI's vendor ID (0x0403).
506 *
507 * The previously included PID for the UO 100 module was incorrect.
508 * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58).
509 *
510 * Armin Laeuger originally sent the PID for the UM 100 module.
511 */
512#define FTDI_R2000KU_TRUE_RNG 0xFB80 /* R2000KU TRUE RNG */
513#define FTDI_ELV_UR100_PID 0xFB58 /* USB-RS232-Umsetzer (UR 100) */
514#define FTDI_ELV_UM100_PID 0xFB5A /* USB-Modul UM 100 */
515#define FTDI_ELV_UO100_PID 0xFB5B /* USB-Modul UO 100 */
516#define FTDI_ELV_ALC8500_PID 0xF06E /* ALC 8500 Expert */
517/* Additional ELV PIDs that default to using the FTDI D2XX drivers on
518 * MS Windows, rather than the FTDI Virtual Com Port drivers.
519 * Maybe these will be easier to use with the libftdi/libusb user-space
520 * drivers, or possibly the Comedi drivers in some cases. */
521#define FTDI_ELV_CLI7000_PID 0xFB59 /* Computer-Light-Interface (CLI 7000) */
522#define FTDI_ELV_PPS7330_PID 0xFB5C /* Processor-Power-Supply (PPS 7330) */
523#define FTDI_ELV_TFM100_PID 0xFB5D /* Temperartur-Feuchte Messgeraet (TFM 100) */
524#define FTDI_ELV_UDF77_PID 0xFB5E /* USB DCF Funkurh (UDF 77) */
525#define FTDI_ELV_UIO88_PID 0xFB5F /* USB-I/O Interface (UIO 88) */
526#define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */
527#define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */
528#define FTDI_ELV_USI2_PID 0xF06A /* USB-Schrittmotoren-Interface (USI 2) */
529#define FTDI_ELV_T1100_PID 0xF06B /* Thermometer (T 1100) */
530#define FTDI_ELV_PCD200_PID 0xF06C /* PC-Datenlogger (PCD 200) */
531#define FTDI_ELV_ULA200_PID 0xF06D /* USB-LCD-Ansteuerung (ULA 200) */
532#define FTDI_ELV_FHZ1000PC_PID 0xF06F /* FHZ 1000 PC */
533#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */
534#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */
535#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */
536#define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */
537#define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */
538#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */
539#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */
540#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */
541#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */
542#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
543#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */
544
545/*
546 * Definitions for ID TECH (www.idt-net.com) devices
547 */
548#define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */
549#define IDTECH_IDT1221U_PID 0x0300 /* IDT1221U USB to RS-232 adapter */
550
551/*
552 * Definitions for Omnidirectional Control Technology, Inc. devices
553 */
554#define OCT_VID 0x0B39 /* OCT vendor ID */
555/* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */
556/* Also rebadged as Dick Smith Electronics (Aus) XH6451 */
557/* Also rebadged as SIIG Inc. model US2308 hardware version 1 */
558#define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */
559
560/* an infrared receiver for user access control with IR tags */
561#define FTDI_PIEGROUP_PID 0xF208 /* Product Id */
562
563/*
564 * Definitions for Artemis astronomical USB based cameras
565 * Check it at http://www.artemisccd.co.uk/
566 */
567#define FTDI_ARTEMIS_PID 0xDF28 /* All Artemis Cameras */
568
569/*
570 * Definitions for ATIK Instruments astronomical USB based cameras
571 * Check it at http://www.atik-instruments.com/
572 */
573#define FTDI_ATIK_ATK16_PID 0xDF30 /* ATIK ATK-16 Grayscale Camera */
574#define FTDI_ATIK_ATK16C_PID 0xDF32 /* ATIK ATK-16C Colour Camera */
575#define FTDI_ATIK_ATK16HR_PID 0xDF31 /* ATIK ATK-16HR Grayscale Camera */
576#define FTDI_ATIK_ATK16HRC_PID 0xDF33 /* ATIK ATK-16HRC Colour Camera */
577#define FTDI_ATIK_ATK16IC_PID 0xDF35 /* ATIK ATK-16IC Grayscale Camera */
578
579/*
580 * Protego product ids
581 */
582#define PROTEGO_SPECIAL_1 0xFC70 /* special/unknown device */
583#define PROTEGO_R2X0 0xFC71 /* R200-USB TRNG unit (R210, R220, and R230) */
584#define PROTEGO_SPECIAL_3 0xFC72 /* special/unknown device */
585#define PROTEGO_SPECIAL_4 0xFC73 /* special/unknown device */
586
587/*
588 * Gude Analog- und Digitalsysteme GmbH
589 */
590#define FTDI_GUDEADS_E808_PID 0xE808
591#define FTDI_GUDEADS_E809_PID 0xE809
592#define FTDI_GUDEADS_E80A_PID 0xE80A
593#define FTDI_GUDEADS_E80B_PID 0xE80B
594#define FTDI_GUDEADS_E80C_PID 0xE80C
595#define FTDI_GUDEADS_E80D_PID 0xE80D
596#define FTDI_GUDEADS_E80E_PID 0xE80E
597#define FTDI_GUDEADS_E80F_PID 0xE80F
598#define FTDI_GUDEADS_E888_PID 0xE888 /* Expert ISDN Control USB */
599#define FTDI_GUDEADS_E889_PID 0xE889 /* USB RS-232 OptoBridge */
600#define FTDI_GUDEADS_E88A_PID 0xE88A
601#define FTDI_GUDEADS_E88B_PID 0xE88B
602#define FTDI_GUDEADS_E88C_PID 0xE88C
603#define FTDI_GUDEADS_E88D_PID 0xE88D
604#define FTDI_GUDEADS_E88E_PID 0xE88E
605#define FTDI_GUDEADS_E88F_PID 0xE88F
606
607/*
608 * Linx Technologies product ids
609 */
610#define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */
611#define LINX_MASTERDEVEL2_PID 0xF449 /* Linx Master Development 2.0 */
612#define LINX_FUTURE_0_PID 0xF44A /* Linx future device */
613#define LINX_FUTURE_1_PID 0xF44B /* Linx future device */
614#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */
615
616/* CCS Inc. ICDU/ICDU40 product ID - the FT232BM is used in an in-circuit-debugger */
617/* unit for PIC16's/PIC18's */
618#define FTDI_CCSICDU20_0_PID 0xF9D0
619#define FTDI_CCSICDU40_1_PID 0xF9D1
620#define FTDI_CCSMACHX_2_PID 0xF9D2
621#define FTDI_CCSLOAD_N_GO_3_PID 0xF9D3
622#define FTDI_CCSICDU64_4_PID 0xF9D4
623#define FTDI_CCSPRIME8_5_PID 0xF9D5
624
625/* Inside Accesso contactless reader (http://www.insidefr.com) */
626#define INSIDE_ACCESSO 0xFAD0
627
628/*
629 * Intrepid Control Systems (http://www.intrepidcs.com/) ValueCAN and NeoVI
630 */
631#define INTREPID_VID 0x093C
632#define INTREPID_VALUECAN_PID 0x0601
633#define INTREPID_NEOVI_PID 0x0701
634
635/*
636 * Falcom Wireless Communications GmbH
637 */
638#define FALCOM_VID 0x0F94 /* Vendor Id */
639#define FALCOM_TWIST_PID 0x0001 /* Falcom Twist USB GPRS modem */
640#define FALCOM_SAMBA_PID 0x0005 /* Falcom Samba USB GPRS modem */
641
642/*
643 * SUUNTO product ids
644 */
645#define FTDI_SUUNTO_SPORTS_PID 0xF680 /* Suunto Sports instrument */
646
647/*
648 * Oceanic product ids
649 */
650#define FTDI_OCEANIC_PID 0xF460 /* Oceanic dive instrument */
651
652/*
653 * TTi (Thurlby Thandar Instruments)
654 */
655#define TTI_VID 0x103E /* Vendor Id */
656#define TTI_QL355P_PID 0x03E8 /* TTi QL355P power supply */
657
658/*
659 * Definitions for B&B Electronics products.
660 */
661#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
662#define BANDB_USOTL4_PID 0xAC01 /* USOTL4 Isolated RS-485 Converter */
663#define BANDB_USTL4_PID 0xAC02 /* USTL4 RS-485 Converter */
664#define BANDB_USO9ML2_PID 0xAC03 /* USO9ML2 Isolated RS-232 Converter */
665#define BANDB_USOPTL4_PID 0xAC11
666#define BANDB_USPTL4_PID 0xAC12
667#define BANDB_USO9ML2DR_2_PID 0xAC16
668#define BANDB_USO9ML2DR_PID 0xAC17
669#define BANDB_USOPTL4DR2_PID 0xAC18 /* USOPTL4R-2 2-port Isolated RS-232 Converter */
670#define BANDB_USOPTL4DR_PID 0xAC19
671#define BANDB_485USB9F_2W_PID 0xAC25
672#define BANDB_485USB9F_4W_PID 0xAC26
673#define BANDB_232USB9M_PID 0xAC27
674#define BANDB_485USBTB_2W_PID 0xAC33
675#define BANDB_485USBTB_4W_PID 0xAC34
676#define BANDB_TTL5USB9M_PID 0xAC49
677#define BANDB_TTL3USB9M_PID 0xAC50
678#define BANDB_ZZ_PROG1_USB_PID 0xBA02
679
680/*
681 * RM Michaelides CANview USB (http://www.rmcan.com)
682 * CAN fieldbus interface adapter, added by port GmbH www.port.de)
683 * Ian Abbott changed the macro names for consistency.
684 */
685#define FTDI_RM_CANVIEW_PID 0xfd60 /* Product Id */
686
687/*
688 * EVER Eco Pro UPS (http://www.ever.com.pl/)
689 */
690
691#define EVER_ECO_PRO_CDS 0xe520 /* RS-232 converter */
692
693/*
694 * 4N-GALAXY.DE PIDs for CAN-USB, USB-RS232, USB-RS422, USB-RS485,
695 * USB-TTY activ, USB-TTY passiv. Some PIDs are used by several devices
696 * and I'm not entirely sure which are used by which.
697 */
698#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0
699#define FTDI_4N_GALAXY_DE_2_PID 0xF3C1
700
701/*
702 * Mobility Electronics products.
703 */
704#define MOBILITY_VID 0x1342
705#define MOBILITY_USB_SERIAL_PID 0x0202 /* EasiDock USB 200 serial */
706
707/*
708 * microHAM product IDs (http://www.microham.com).
709 * Submitted by Justin Burket (KL1RL) <zorton@jtan.com>
710 * and Mike Studer (K6EEP) <k6eep@hamsoftware.org>.
711 * Ian Abbott <abbotti@mev.co.uk> added a few more from the driver INF file.
712 */
713#define FTDI_MHAM_KW_PID 0xEEE8 /* USB-KW interface */
714#define FTDI_MHAM_YS_PID 0xEEE9 /* USB-YS interface */
715#define FTDI_MHAM_Y6_PID 0xEEEA /* USB-Y6 interface */
716#define FTDI_MHAM_Y8_PID 0xEEEB /* USB-Y8 interface */
717#define FTDI_MHAM_IC_PID 0xEEEC /* USB-IC interface */
718#define FTDI_MHAM_DB9_PID 0xEEED /* USB-DB9 interface */
719#define FTDI_MHAM_RS232_PID 0xEEEE /* USB-RS232 interface */
720#define FTDI_MHAM_Y9_PID 0xEEEF /* USB-Y9 interface */
721
722/*
723 * Active Robots product ids.
724 */
725#define FTDI_ACTIVE_ROBOTS_PID 0xE548 /* USB comms board */
726
727/*
728 * Xsens Technologies BV products (http://www.xsens.com).
729 */
730#define XSENS_CONVERTER_0_PID 0xD388
731#define XSENS_CONVERTER_1_PID 0xD389
732#define XSENS_CONVERTER_2_PID 0xD38A
733#define XSENS_CONVERTER_3_PID 0xD38B
734#define XSENS_CONVERTER_4_PID 0xD38C
735#define XSENS_CONVERTER_5_PID 0xD38D
736#define XSENS_CONVERTER_6_PID 0xD38E
737#define XSENS_CONVERTER_7_PID 0xD38F
738
739/*
740 * Teratronik product ids.
741 * Submitted by O. Wölfelschneider.
742 */
743#define FTDI_TERATRONIK_VCP_PID 0xEC88 /* Teratronik device (preferring VCP driver on windows) */
744#define FTDI_TERATRONIK_D2XX_PID 0xEC89 /* Teratronik device (preferring D2XX driver on windows) */
745
746/*
747 * Evolution Robotics products (http://www.evolution.com/).
748 * Submitted by Shawn M. Lavelle.
749 */
750#define EVOLUTION_VID 0xDEEE /* Vendor ID */
751#define EVOLUTION_ER1_PID 0x0300 /* ER1 Control Module */
752#define EVO_8U232AM_PID 0x02FF /* Evolution robotics RCM2 (FT232AM)*/
753#define EVO_HYBRID_PID 0x0302 /* Evolution robotics RCM4 PID (FT232BM)*/
754#define EVO_RCM4_PID 0x0303 /* Evolution robotics RCM4 PID */
755
756/* Pyramid Computer GmbH */
757#define FTDI_PYRAMID_PID 0xE6C8 /* Pyramid Appliance Display */
758
759/*
760 * NDI (www.ndigital.com) product ids
761 */
762#define FTDI_NDI_HUC_PID 0xDA70 /* NDI Host USB Converter */
763#define FTDI_NDI_SPECTRA_SCU_PID 0xDA71 /* NDI Spectra SCU */
764#define FTDI_NDI_FUTURE_2_PID 0xDA72 /* NDI future device #2 */
765#define FTDI_NDI_FUTURE_3_PID 0xDA73 /* NDI future device #3 */
766#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */
767
768/*
769 * Posiflex inc retail equipment (http://www.posiflex.com.tw)
770 */
771#define POSIFLEX_VID 0x0d3a /* Vendor ID */
772#define POSIFLEX_PP7000_PID 0x0300 /* PP-7000II thermal printer */
773
774/*
775 * Westrex International devices submitted by Cory Lee
776 */
777#define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */
778#define FTDI_WESTREX_MODEL_8900F_PID 0xDC01 /* Model 8900F */
779
780/*
781 * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com)
782 */
783#define FTDI_RRCIRKITS_LOCOBUFFER_PID 0xc7d0 /* LocoBuffer USB */
784
785/*
786 * Eclo (http://www.eclo.pt/) product IDs.
787 * PID 0xEA90 submitted by Martin Grill.
788 */
789#define FTDI_ECLO_COM_1WIRE_PID 0xEA90 /* COM to 1-Wire USB adaptor */
790
791/*
792 * Papouch products (http://www.papouch.com/)
793 * Submitted by Folkert van Heusden
794 */
795
796#define PAPOUCH_VID 0x5050 /* Vendor ID */
797#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */
798#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */
799
800/*
801 * ACG Identification Technologies GmbH products (http://www.acg.de/).
802 * Submitted by anton -at- goto10 -dot- org.
803 */ 23 */
804#define FTDI_ACG_HFDUAL_PID 0xDD20 /* HF Dual ISO Reader (RFID) */
805
806/*
807 * Yost Engineering, Inc. products (www.yostengineering.com).
808 * PID 0xE050 submitted by Aaron Prose.
809 */
810#define FTDI_YEI_SERVOCENTER31_PID 0xE050 /* YEI ServoCenter3.1 USB */
811
812/*
813 * ThorLabs USB motor drivers
814 */
815#define FTDI_THORLABS_PID 0xfaf0 /* ThorLabs USB motor drivers */
816
817/*
818 * Testo products (http://www.testo.com/)
819 * Submitted by Colin Leroy
820 */
821#define TESTO_VID 0x128D
822#define TESTO_USB_INTERFACE_PID 0x0001
823
824/*
825 * Gamma Scout (http://gamma-scout.com/). Submitted by rsc@runtux.com.
826 */
827#define FTDI_GAMMA_SCOUT_PID 0xD678 /* Gamma Scout online */
828
829/*
830 * Tactrix OpenPort (ECU) devices.
831 * OpenPort 1.3M submitted by Donour Sizemore.
832 * OpenPort 1.3S and 1.3U submitted by Ian Abbott.
833 */
834#define FTDI_TACTRIX_OPENPORT_13M_PID 0xCC48 /* OpenPort 1.3 Mitsubishi */
835#define FTDI_TACTRIX_OPENPORT_13S_PID 0xCC49 /* OpenPort 1.3 Subaru */
836#define FTDI_TACTRIX_OPENPORT_13U_PID 0xCC4A /* OpenPort 1.3 Universal */
837
838/*
839 * Telldus Technologies
840 */
841#define TELLDUS_VID 0x1781 /* Vendor ID */
842#define TELLDUS_TELLSTICK_PID 0x0C30 /* RF control dongle 433 MHz using FT232RL */
843
844/*
845 * IBS elektronik product ids
846 * Submitted by Thomas Schleusener
847 */
848#define FTDI_IBS_US485_PID 0xff38 /* IBS US485 (USB<-->RS422/485 interface) */
849#define FTDI_IBS_PICPRO_PID 0xff39 /* IBS PIC-Programmer */
850#define FTDI_IBS_PCMCIA_PID 0xff3a /* IBS Card reader for PCMCIA SRAM-cards */
851#define FTDI_IBS_PK1_PID 0xff3b /* IBS PK1 - Particel counter */
852#define FTDI_IBS_RS232MON_PID 0xff3c /* IBS RS232 - Monitor */
853#define FTDI_IBS_APP70_PID 0xff3d /* APP 70 (dust monitoring system) */
854#define FTDI_IBS_PEDO_PID 0xff3e /* IBS PEDO-Modem (RF modem 868.35 MHz) */
855#define FTDI_IBS_PROD_PID 0xff3f /* future device */
856
857/*
858 * MaxStream devices www.maxstream.net
859 */
860#define FTDI_MAXSTREAM_PID 0xEE18 /* Xbee PKG-U Module */
861
862/* Olimex */
863#define OLIMEX_VID 0x15BA
864#define OLIMEX_ARM_USB_OCD_PID 0x0003
865
866/* Luminary Micro Stellaris Boards, VID = FTDI_VID */
867/* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */
868#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8
869#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
870
871/* www.elsterelectricity.com Elster Unicom III Optical Probe */
872#define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */
873
874/*
875 * The Mobility Lab (TML)
876 * Submitted by Pierre Castella
877 */
878#define TML_VID 0x1B91 /* Vendor ID */
879#define TML_USB_SERIAL_PID 0x0064 /* USB - Serial Converter */
880
881/* Propox devices */
882#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
883
884/* Rig Expert Ukraine devices */
885#define FTDI_REU_TINY_PID 0xED22 /* RigExpert Tiny */
886
887/* Domintell products http://www.domintell.com */
888#define FTDI_DOMINTELL_DGQG_PID 0xEF50 /* Master */
889#define FTDI_DOMINTELL_DUSB_PID 0xEF51 /* DUSB01 module */
890
891/* Alti-2 products http://www.alti-2.com */
892#define ALTI2_VID 0x1BC9
893#define ALTI2_N3_PID 0x6001 /* Neptune 3 */
894 24
895/* Commands */ 25/* Commands */
896#define FTDI_SIO_RESET 0 /* Reset the port */ 26#define FTDI_SIO_RESET 0 /* Reset the port */
@@ -910,86 +40,6 @@
910#define INTERFACE_C 3 40#define INTERFACE_C 3
911#define INTERFACE_D 4 41#define INTERFACE_D 4
912 42
913/*
914 * FIC / OpenMoko, Inc. http://wiki.openmoko.org/wiki/Neo1973_Debug_Board_v3
915 * Submitted by Harald Welte <laforge@openmoko.org>
916 */
917#define FIC_VID 0x1457
918#define FIC_NEO1973_DEBUG_PID 0x5118
919
920/*
921 * RATOC REX-USB60F
922 */
923#define RATOC_VENDOR_ID 0x0584
924#define RATOC_PRODUCT_ID_USB60F 0xb020
925
926/*
927 * DIEBOLD BCS SE923
928 */
929#define DIEBOLD_BCS_SE923_PID 0xfb99
930
931/*
932 * Atmel STK541
933 */
934#define ATMEL_VID 0x03eb /* Vendor ID */
935#define STK541_PID 0x2109 /* Zigbee Controller */
936
937/*
938 * Dresden Elektronic Sensor Terminal Board
939 */
940#define DE_VID 0x1cf1 /* Vendor ID */
941#define STB_PID 0x0001 /* Sensor Terminal Board */
942#define WHT_PID 0x0004 /* Wireless Handheld Terminal */
943
944/*
945 * Blackfin gnICE JTAG
946 * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
947 */
948#define ADI_VID 0x0456
949#define ADI_GNICE_PID 0xF000
950#define ADI_GNICEPLUS_PID 0xF001
951
952/*
953 * JETI SPECTROMETER SPECBOS 1201
954 * http://www.jeti.com/products/sys/scb/scb1201.php
955 */
956#define JETI_VID 0x0c6c
957#define JETI_SPC1201_PID 0x04b2
958
959/*
960 * Marvell SheevaPlug
961 */
962#define MARVELL_VID 0x9e88
963#define MARVELL_SHEEVAPLUG_PID 0x9e8f
964
965#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */
966
967/*
968 * GN Otometrics (http://www.otometrics.com)
969 * Submitted by Ville Sundberg.
970 */
971#define GN_OTOMETRICS_VID 0x0c33 /* Vendor ID */
972#define AURICAL_USB_PID 0x0010 /* Aurical USB Audiometer */
973
974/*
975 * Bayer Ascensia Contour blood glucose meter USB-converter cable.
976 * http://winglucofacts.com/cables/
977 */
978#define BAYER_VID 0x1A79
979#define BAYER_CONTOUR_CABLE_PID 0x6001
980
981/*
982 * Marvell OpenRD Base, Client
983 * http://www.open-rd.org
984 * OpenRD Base, Client use VID 0x0403
985 */
986#define MARVELL_OPENRD_PID 0x9e90
987
988/*
989 * Hameg HO820 and HO870 interface (using VID 0x0403)
990 */
991#define HAMEG_HO820_PID 0xed74
992#define HAMEG_HO870_PID 0xed71
993 43
994/* 44/*
995 * BmRequestType: 1100 0000b 45 * BmRequestType: 1100 0000b
@@ -1504,4 +554,3 @@ typedef enum {
1504 * B2..7 Length of message - (not including Byte 0) 554 * B2..7 Length of message - (not including Byte 0)
1505 * 555 *
1506 */ 556 */
1507
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
new file mode 100644
index 000000000000..da92b4952ffb
--- /dev/null
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -0,0 +1,986 @@
1/*
2 * vendor/product IDs (VID/PID) of devices using FTDI USB serial converters.
3 * Please keep numerically sorted within individual areas, thanks!
4 *
5 * Philipp Gühring - pg@futureware.at - added the Device ID of the USB relais
6 * from Rudolf Gugler
7 *
8 */
9
10
11/**********************************/
12/***** devices using FTDI VID *****/
13/**********************************/
14
15
16#define FTDI_VID 0x0403 /* Vendor Id */
17
18
19/*** "original" FTDI device PIDs ***/
20
21#define FTDI_8U232AM_PID 0x6001 /* Similar device to SIO above */
22#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
23#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
24#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
25#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
26#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */
27
28
29/*** third-party PIDs (using FTDI_VID) ***/
30
31/*
32 * Marvell OpenRD Base, Client
33 * http://www.open-rd.org
34 * OpenRD Base, Client use VID 0x0403
35 */
36#define MARVELL_OPENRD_PID 0x9e90
37
38/* www.candapter.com Ewert Energy Systems CANdapter device */
39#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
40
41/* OOCDlink by Joern Kaipf <joernk@web.de>
42 * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */
43#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */
44
45/* Luminary Micro Stellaris Boards, VID = FTDI_VID */
46/* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */
47#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8
48#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
49
50#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */
51
52/* OpenDCC (www.opendcc.de) product id */
53#define FTDI_OPENDCC_PID 0xBFD8
54#define FTDI_OPENDCC_SNIFFER_PID 0xBFD9
55#define FTDI_OPENDCC_THROTTLE_PID 0xBFDA
56#define FTDI_OPENDCC_GATEWAY_PID 0xBFDB
57
58/*
59 * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com)
60 */
61#define FTDI_RRCIRKITS_LOCOBUFFER_PID 0xc7d0 /* LocoBuffer USB */
62
63/* DMX4ALL DMX Interfaces */
64#define FTDI_DMX4ALL 0xC850
65
66/*
67 * ASK.fr devices
68 */
69#define FTDI_ASK_RDR400_PID 0xC991 /* ASK RDR 400 series card reader */
70
71/* www.starting-point-systems.com µChameleon device */
72#define FTDI_MICRO_CHAMELEON_PID 0xCAA0 /* Product Id */
73
74/*
75 * Tactrix OpenPort (ECU) devices.
76 * OpenPort 1.3M submitted by Donour Sizemore.
77 * OpenPort 1.3S and 1.3U submitted by Ian Abbott.
78 */
79#define FTDI_TACTRIX_OPENPORT_13M_PID 0xCC48 /* OpenPort 1.3 Mitsubishi */
80#define FTDI_TACTRIX_OPENPORT_13S_PID 0xCC49 /* OpenPort 1.3 Subaru */
81#define FTDI_TACTRIX_OPENPORT_13U_PID 0xCC4A /* OpenPort 1.3 Universal */
82
83/* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */
84/* the VID is the standard ftdi vid (FTDI_VID) */
85#define FTDI_SCS_DEVICE_0_PID 0xD010 /* SCS PTC-IIusb */
86#define FTDI_SCS_DEVICE_1_PID 0xD011 /* SCS Tracker / DSP TNC */
87#define FTDI_SCS_DEVICE_2_PID 0xD012
88#define FTDI_SCS_DEVICE_3_PID 0xD013
89#define FTDI_SCS_DEVICE_4_PID 0xD014
90#define FTDI_SCS_DEVICE_5_PID 0xD015
91#define FTDI_SCS_DEVICE_6_PID 0xD016
92#define FTDI_SCS_DEVICE_7_PID 0xD017
93
94/* iPlus device */
95#define FTDI_IPLUS_PID 0xD070 /* Product Id */
96#define FTDI_IPLUS2_PID 0xD071 /* Product Id */
97
98/*
99 * Gamma Scout (http://gamma-scout.com/). Submitted by rsc@runtux.com.
100 */
101#define FTDI_GAMMA_SCOUT_PID 0xD678 /* Gamma Scout online */
102
103/* Propox devices */
104#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
105
106/*
107 * Xsens Technologies BV products (http://www.xsens.com).
108 */
109#define XSENS_CONVERTER_0_PID 0xD388
110#define XSENS_CONVERTER_1_PID 0xD389
111#define XSENS_CONVERTER_2_PID 0xD38A
112#define XSENS_CONVERTER_3_PID 0xD38B
113#define XSENS_CONVERTER_4_PID 0xD38C
114#define XSENS_CONVERTER_5_PID 0xD38D
115#define XSENS_CONVERTER_6_PID 0xD38E
116#define XSENS_CONVERTER_7_PID 0xD38F
117
118/*
119 * NDI (www.ndigital.com) product ids
120 */
121#define FTDI_NDI_HUC_PID 0xDA70 /* NDI Host USB Converter */
122#define FTDI_NDI_SPECTRA_SCU_PID 0xDA71 /* NDI Spectra SCU */
123#define FTDI_NDI_FUTURE_2_PID 0xDA72 /* NDI future device #2 */
124#define FTDI_NDI_FUTURE_3_PID 0xDA73 /* NDI future device #3 */
125#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */
126
127/*
128 * Westrex International devices submitted by Cory Lee
129 */
130#define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */
131#define FTDI_WESTREX_MODEL_8900F_PID 0xDC01 /* Model 8900F */
132
133/*
134 * ACG Identification Technologies GmbH products (http://www.acg.de/).
135 * Submitted by anton -at- goto10 -dot- org.
136 */
137#define FTDI_ACG_HFDUAL_PID 0xDD20 /* HF Dual ISO Reader (RFID) */
138
139/*
140 * Definitions for Artemis astronomical USB based cameras
141 * Check it at http://www.artemisccd.co.uk/
142 */
143#define FTDI_ARTEMIS_PID 0xDF28 /* All Artemis Cameras */
144
145/*
146 * Definitions for ATIK Instruments astronomical USB based cameras
147 * Check it at http://www.atik-instruments.com/
148 */
149#define FTDI_ATIK_ATK16_PID 0xDF30 /* ATIK ATK-16 Grayscale Camera */
150#define FTDI_ATIK_ATK16C_PID 0xDF32 /* ATIK ATK-16C Colour Camera */
151#define FTDI_ATIK_ATK16HR_PID 0xDF31 /* ATIK ATK-16HR Grayscale Camera */
152#define FTDI_ATIK_ATK16HRC_PID 0xDF33 /* ATIK ATK-16HRC Colour Camera */
153#define FTDI_ATIK_ATK16IC_PID 0xDF35 /* ATIK ATK-16IC Grayscale Camera */
154
155/*
156 * Yost Engineering, Inc. products (www.yostengineering.com).
157 * PID 0xE050 submitted by Aaron Prose.
158 */
159#define FTDI_YEI_SERVOCENTER31_PID 0xE050 /* YEI ServoCenter3.1 USB */
160
161/*
162 * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
163 * All of these devices use FTDI's vendor ID (0x0403).
164 *
165 * The previously included PID for the UO 100 module was incorrect.
166 * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58).
167 *
168 * Armin Laeuger originally sent the PID for the UM 100 module.
169 */
170#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */
171#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */
172#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */
173#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
174#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */
175#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */
176#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */
177#define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */
178#define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */
179#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */
180#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */
181#define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */
182#define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */
183#define FTDI_ELV_USI2_PID 0xF06A /* USB-Schrittmotoren-Interface (USI 2) */
184#define FTDI_ELV_T1100_PID 0xF06B /* Thermometer (T 1100) */
185#define FTDI_ELV_PCD200_PID 0xF06C /* PC-Datenlogger (PCD 200) */
186#define FTDI_ELV_ULA200_PID 0xF06D /* USB-LCD-Ansteuerung (ULA 200) */
187#define FTDI_ELV_ALC8500_PID 0xF06E /* ALC 8500 Expert */
188#define FTDI_ELV_FHZ1000PC_PID 0xF06F /* FHZ 1000 PC */
189#define FTDI_ELV_UR100_PID 0xFB58 /* USB-RS232-Umsetzer (UR 100) */
190#define FTDI_ELV_UM100_PID 0xFB5A /* USB-Modul UM 100 */
191#define FTDI_ELV_UO100_PID 0xFB5B /* USB-Modul UO 100 */
192/* Additional ELV PIDs that default to using the FTDI D2XX drivers on
193 * MS Windows, rather than the FTDI Virtual Com Port drivers.
194 * Maybe these will be easier to use with the libftdi/libusb user-space
195 * drivers, or possibly the Comedi drivers in some cases. */
196#define FTDI_ELV_CLI7000_PID 0xFB59 /* Computer-Light-Interface (CLI 7000) */
197#define FTDI_ELV_PPS7330_PID 0xFB5C /* Processor-Power-Supply (PPS 7330) */
198#define FTDI_ELV_TFM100_PID 0xFB5D /* Temperartur-Feuchte Messgeraet (TFM 100) */
199#define FTDI_ELV_UDF77_PID 0xFB5E /* USB DCF Funkurh (UDF 77) */
200#define FTDI_ELV_UIO88_PID 0xFB5F /* USB-I/O Interface (UIO 88) */
201
202/*
203 * EVER Eco Pro UPS (http://www.ever.com.pl/)
204 */
205
206#define EVER_ECO_PRO_CDS 0xe520 /* RS-232 converter */
207
208/*
209 * Active Robots product ids.
210 */
211#define FTDI_ACTIVE_ROBOTS_PID 0xE548 /* USB comms board */
212
213/* Pyramid Computer GmbH */
214#define FTDI_PYRAMID_PID 0xE6C8 /* Pyramid Appliance Display */
215
216/* www.elsterelectricity.com Elster Unicom III Optical Probe */
217#define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */
218
219/*
220 * Gude Analog- und Digitalsysteme GmbH
221 */
222#define FTDI_GUDEADS_E808_PID 0xE808
223#define FTDI_GUDEADS_E809_PID 0xE809
224#define FTDI_GUDEADS_E80A_PID 0xE80A
225#define FTDI_GUDEADS_E80B_PID 0xE80B
226#define FTDI_GUDEADS_E80C_PID 0xE80C
227#define FTDI_GUDEADS_E80D_PID 0xE80D
228#define FTDI_GUDEADS_E80E_PID 0xE80E
229#define FTDI_GUDEADS_E80F_PID 0xE80F
230#define FTDI_GUDEADS_E888_PID 0xE888 /* Expert ISDN Control USB */
231#define FTDI_GUDEADS_E889_PID 0xE889 /* USB RS-232 OptoBridge */
232#define FTDI_GUDEADS_E88A_PID 0xE88A
233#define FTDI_GUDEADS_E88B_PID 0xE88B
234#define FTDI_GUDEADS_E88C_PID 0xE88C
235#define FTDI_GUDEADS_E88D_PID 0xE88D
236#define FTDI_GUDEADS_E88E_PID 0xE88E
237#define FTDI_GUDEADS_E88F_PID 0xE88F
238
239/*
240 * Eclo (http://www.eclo.pt/) product IDs.
241 * PID 0xEA90 submitted by Martin Grill.
242 */
243#define FTDI_ECLO_COM_1WIRE_PID 0xEA90 /* COM to 1-Wire USB adaptor */
244
245/* TNC-X USB-to-packet-radio adapter, versions prior to 3.0 (DLP module) */
246#define FTDI_TNC_X_PID 0xEBE0
247
248/*
249 * Teratronik product ids.
250 * Submitted by O. Wölfelschneider.
251 */
252#define FTDI_TERATRONIK_VCP_PID 0xEC88 /* Teratronik device (preferring VCP driver on windows) */
253#define FTDI_TERATRONIK_D2XX_PID 0xEC89 /* Teratronik device (preferring D2XX driver on windows) */
254
255/* Rig Expert Ukraine devices */
256#define FTDI_REU_TINY_PID 0xED22 /* RigExpert Tiny */
257
258/*
259 * Hameg HO820 and HO870 interface (using VID 0x0403)
260 */
261#define HAMEG_HO820_PID 0xed74
262#define HAMEG_HO870_PID 0xed71
263
264/*
265 * MaxStream devices www.maxstream.net
266 */
267#define FTDI_MAXSTREAM_PID 0xEE18 /* Xbee PKG-U Module */
268
269/*
270 * microHAM product IDs (http://www.microham.com).
271 * Submitted by Justin Burket (KL1RL) <zorton@jtan.com>
272 * and Mike Studer (K6EEP) <k6eep@hamsoftware.org>.
273 * Ian Abbott <abbotti@mev.co.uk> added a few more from the driver INF file.
274 */
275#define FTDI_MHAM_KW_PID 0xEEE8 /* USB-KW interface */
276#define FTDI_MHAM_YS_PID 0xEEE9 /* USB-YS interface */
277#define FTDI_MHAM_Y6_PID 0xEEEA /* USB-Y6 interface */
278#define FTDI_MHAM_Y8_PID 0xEEEB /* USB-Y8 interface */
279#define FTDI_MHAM_IC_PID 0xEEEC /* USB-IC interface */
280#define FTDI_MHAM_DB9_PID 0xEEED /* USB-DB9 interface */
281#define FTDI_MHAM_RS232_PID 0xEEEE /* USB-RS232 interface */
282#define FTDI_MHAM_Y9_PID 0xEEEF /* USB-Y9 interface */
283
284/* Domintell products http://www.domintell.com */
285#define FTDI_DOMINTELL_DGQG_PID 0xEF50 /* Master */
286#define FTDI_DOMINTELL_DUSB_PID 0xEF51 /* DUSB01 module */
287
288/*
289 * The following are the values for the Perle Systems
290 * UltraPort USB serial converters
291 */
292#define FTDI_PERLE_ULTRAPORT_PID 0xF0C0 /* Perle UltraPort Product Id */
293
294/* Sprog II (Andrew Crosland's SprogII DCC interface) */
295#define FTDI_SPROG_II 0xF0C8
296
297/* an infrared receiver for user access control with IR tags */
298#define FTDI_PIEGROUP_PID 0xF208 /* Product Id */
299
300/* ACT Solutions HomePro ZWave interface
301 (http://www.act-solutions.com/HomePro.htm) */
302#define FTDI_ACTZWAVE_PID 0xF2D0
303
304/*
305 * 4N-GALAXY.DE PIDs for CAN-USB, USB-RS232, USB-RS422, USB-RS485,
306 * USB-TTY activ, USB-TTY passiv. Some PIDs are used by several devices
307 * and I'm not entirely sure which are used by which.
308 */
309#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0
310#define FTDI_4N_GALAXY_DE_2_PID 0xF3C1
311
312/*
313 * Linx Technologies product ids
314 */
315#define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */
316#define LINX_MASTERDEVEL2_PID 0xF449 /* Linx Master Development 2.0 */
317#define LINX_FUTURE_0_PID 0xF44A /* Linx future device */
318#define LINX_FUTURE_1_PID 0xF44B /* Linx future device */
319#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */
320
321/*
322 * Oceanic product ids
323 */
324#define FTDI_OCEANIC_PID 0xF460 /* Oceanic dive instrument */
325
326/*
327 * SUUNTO product ids
328 */
329#define FTDI_SUUNTO_SPORTS_PID 0xF680 /* Suunto Sports instrument */
330
331/* USB-UIRT - An infrared receiver and transmitter using the 8U232AM chip */
332/* http://home.earthlink.net/~jrhees/USBUIRT/index.htm */
333#define FTDI_USB_UIRT_PID 0xF850 /* Product Id */
334
335/* CCS Inc. ICDU/ICDU40 product ID -
336 * the FT232BM is used in an in-circuit-debugger unit for PIC16's/PIC18's */
337#define FTDI_CCSICDU20_0_PID 0xF9D0
338#define FTDI_CCSICDU40_1_PID 0xF9D1
339#define FTDI_CCSMACHX_2_PID 0xF9D2
340#define FTDI_CCSLOAD_N_GO_3_PID 0xF9D3
341#define FTDI_CCSICDU64_4_PID 0xF9D4
342#define FTDI_CCSPRIME8_5_PID 0xF9D5
343
344/*
345 * The following are the values for the Matrix Orbital LCD displays,
346 * which are the FT232BM ( similar to the 8U232AM )
347 */
348#define FTDI_MTXORB_0_PID 0xFA00 /* Matrix Orbital Product Id */
349#define FTDI_MTXORB_1_PID 0xFA01 /* Matrix Orbital Product Id */
350#define FTDI_MTXORB_2_PID 0xFA02 /* Matrix Orbital Product Id */
351#define FTDI_MTXORB_3_PID 0xFA03 /* Matrix Orbital Product Id */
352#define FTDI_MTXORB_4_PID 0xFA04 /* Matrix Orbital Product Id */
353#define FTDI_MTXORB_5_PID 0xFA05 /* Matrix Orbital Product Id */
354#define FTDI_MTXORB_6_PID 0xFA06 /* Matrix Orbital Product Id */
355
356/*
357 * Home Electronics (www.home-electro.com) USB gadgets
358 */
359#define FTDI_HE_TIRA1_PID 0xFA78 /* Tira-1 IR transceiver */
360
361/* Inside Accesso contactless reader (http://www.insidefr.com) */
362#define INSIDE_ACCESSO 0xFAD0
363
364/*
365 * ThorLabs USB motor drivers
366 */
367#define FTDI_THORLABS_PID 0xfaf0 /* ThorLabs USB motor drivers */
368
369/*
370 * Protego product ids
371 */
372#define PROTEGO_SPECIAL_1 0xFC70 /* special/unknown device */
373#define PROTEGO_R2X0 0xFC71 /* R200-USB TRNG unit (R210, R220, and R230) */
374#define PROTEGO_SPECIAL_3 0xFC72 /* special/unknown device */
375#define PROTEGO_SPECIAL_4 0xFC73 /* special/unknown device */
376
377/*
378 * DSS-20 Sync Station for Sony Ericsson P800
379 */
380#define FTDI_DSS20_PID 0xFC82
381
382/* www.irtrans.de device */
383#define FTDI_IRTRANS_PID 0xFC60 /* Product Id */
384
385/*
386 * RM Michaelides CANview USB (http://www.rmcan.com) (FTDI_VID)
387 * CAN fieldbus interface adapter, added by port GmbH www.port.de)
388 * Ian Abbott changed the macro names for consistency.
389 */
390#define FTDI_RM_CANVIEW_PID 0xfd60 /* Product Id */
391/* www.thoughttechnology.com/ TT-USB provide with procomp use ftdi_sio */
392#define FTDI_TTUSB_PID 0xFF20 /* Product Id */
393
394#define FTDI_USBX_707_PID 0xF857 /* ADSTech IR Blaster USBX-707 (FTDI_VID) */
395
396#define FTDI_RELAIS_PID 0xFA10 /* Relais device from Rudolf Gugler */
397
398/*
399 * PCDJ use ftdi based dj-controllers. The following PID is
400 * for their DAC-2 device http://www.pcdjhardware.com/DAC2.asp
401 * (the VID is the standard ftdi vid (FTDI_VID), PID sent by Wouter Paesen)
402 */
403#define FTDI_PCDJ_DAC2_PID 0xFA88
404
405#define FTDI_R2000KU_TRUE_RNG 0xFB80 /* R2000KU TRUE RNG (FTDI_VID) */
406
407/*
408 * DIEBOLD BCS SE923 (FTDI_VID)
409 */
410#define DIEBOLD_BCS_SE923_PID 0xfb99
411
412/* www.crystalfontz.com devices
413 * - thanx for providing free devices for evaluation !
414 * they use the ftdi chipset for the USB interface
415 * and the vendor id is the same
416 */
417#define FTDI_XF_632_PID 0xFC08 /* 632: 16x2 Character Display */
418#define FTDI_XF_634_PID 0xFC09 /* 634: 20x4 Character Display */
419#define FTDI_XF_547_PID 0xFC0A /* 547: Two line Display */
420#define FTDI_XF_633_PID 0xFC0B /* 633: 16x2 Character Display with Keys */
421#define FTDI_XF_631_PID 0xFC0C /* 631: 20x2 Character Display */
422#define FTDI_XF_635_PID 0xFC0D /* 635: 20x4 Character Display */
423#define FTDI_XF_640_PID 0xFC0E /* 640: Two line Display */
424#define FTDI_XF_642_PID 0xFC0F /* 642: Two line Display */
425
426/*
427 * Video Networks Limited / Homechoice in the UK use an ftdi-based device
428 * for their 1Mb broadband internet service. The following PID is exhibited
429 * by the usb device supplied (the VID is the standard ftdi vid (FTDI_VID)
430 */
431#define FTDI_VNHCPCUSB_D_PID 0xfe38 /* Product Id */
432
433/* AlphaMicro Components AMC-232USB01 device (FTDI_VID) */
434#define FTDI_AMC232_PID 0xFF00 /* Product Id */
435
436/*
437 * IBS elektronik product ids (FTDI_VID)
438 * Submitted by Thomas Schleusener
439 */
440#define FTDI_IBS_US485_PID 0xff38 /* IBS US485 (USB<-->RS422/485 interface) */
441#define FTDI_IBS_PICPRO_PID 0xff39 /* IBS PIC-Programmer */
442#define FTDI_IBS_PCMCIA_PID 0xff3a /* IBS Card reader for PCMCIA SRAM-cards */
443#define FTDI_IBS_PK1_PID 0xff3b /* IBS PK1 - Particel counter */
444#define FTDI_IBS_RS232MON_PID 0xff3c /* IBS RS232 - Monitor */
445#define FTDI_IBS_APP70_PID 0xff3d /* APP 70 (dust monitoring system) */
446#define FTDI_IBS_PEDO_PID 0xff3e /* IBS PEDO-Modem (RF modem 868.35 MHz) */
447#define FTDI_IBS_PROD_PID 0xff3f /* future device */
448/* www.canusb.com Lawicel CANUSB device (FTDI_VID) */
449#define FTDI_CANUSB_PID 0xFFA8 /* Product Id */
450
451
452
453/********************************/
454/** third-party VID/PID combos **/
455/********************************/
456
457
458
459/*
460 * Atmel STK541
461 */
462#define ATMEL_VID 0x03eb /* Vendor ID */
463#define STK541_PID 0x2109 /* Zigbee Controller */
464
465/*
466 * Blackfin gnICE JTAG
467 * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
468 */
469#define ADI_VID 0x0456
470#define ADI_GNICE_PID 0xF000
471#define ADI_GNICEPLUS_PID 0xF001
472
473/*
474 * RATOC REX-USB60F
475 */
476#define RATOC_VENDOR_ID 0x0584
477#define RATOC_PRODUCT_ID_USB60F 0xb020
478
479/*
480 * Definitions for B&B Electronics products.
481 */
482#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
483#define BANDB_USOTL4_PID 0xAC01 /* USOTL4 Isolated RS-485 Converter */
484#define BANDB_USTL4_PID 0xAC02 /* USTL4 RS-485 Converter */
485#define BANDB_USO9ML2_PID 0xAC03 /* USO9ML2 Isolated RS-232 Converter */
486#define BANDB_USOPTL4_PID 0xAC11
487#define BANDB_USPTL4_PID 0xAC12
488#define BANDB_USO9ML2DR_2_PID 0xAC16
489#define BANDB_USO9ML2DR_PID 0xAC17
490#define BANDB_USOPTL4DR2_PID 0xAC18 /* USOPTL4R-2 2-port Isolated RS-232 Converter */
491#define BANDB_USOPTL4DR_PID 0xAC19
492#define BANDB_485USB9F_2W_PID 0xAC25
493#define BANDB_485USB9F_4W_PID 0xAC26
494#define BANDB_232USB9M_PID 0xAC27
495#define BANDB_485USBTB_2W_PID 0xAC33
496#define BANDB_485USBTB_4W_PID 0xAC34
497#define BANDB_TTL5USB9M_PID 0xAC49
498#define BANDB_TTL3USB9M_PID 0xAC50
499#define BANDB_ZZ_PROG1_USB_PID 0xBA02
500
501/*
502 * Intrepid Control Systems (http://www.intrepidcs.com/) ValueCAN and NeoVI
503 */
504#define INTREPID_VID 0x093C
505#define INTREPID_VALUECAN_PID 0x0601
506#define INTREPID_NEOVI_PID 0x0701
507
508/*
509 * Definitions for ID TECH (www.idt-net.com) devices
510 */
511#define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */
512#define IDTECH_IDT1221U_PID 0x0300 /* IDT1221U USB to RS-232 adapter */
513
514/*
515 * Definitions for Omnidirectional Control Technology, Inc. devices
516 */
517#define OCT_VID 0x0B39 /* OCT vendor ID */
518/* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */
519/* Also rebadged as Dick Smith Electronics (Aus) XH6451 */
520/* Also rebadged as SIIG Inc. model US2308 hardware version 1 */
521#define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */
522
523/*
524 * Icom ID-1 digital transceiver
525 */
526
527#define ICOM_ID1_VID 0x0C26
528#define ICOM_ID1_PID 0x0004
529
530/*
531 * GN Otometrics (http://www.otometrics.com)
532 * Submitted by Ville Sundberg.
533 */
534#define GN_OTOMETRICS_VID 0x0c33 /* Vendor ID */
535#define AURICAL_USB_PID 0x0010 /* Aurical USB Audiometer */
536
537/*
538 * The following are the values for the Sealevel SeaLINK+ adapters.
539 * (Original list sent by Tuan Hoang. Ian Abbott renamed the macros and
540 * removed some PIDs that don't seem to match any existing products.)
541 */
542#define SEALEVEL_VID 0x0c52 /* Sealevel Vendor ID */
543#define SEALEVEL_2101_PID 0x2101 /* SeaLINK+232 (2101/2105) */
544#define SEALEVEL_2102_PID 0x2102 /* SeaLINK+485 (2102) */
545#define SEALEVEL_2103_PID 0x2103 /* SeaLINK+232I (2103) */
546#define SEALEVEL_2104_PID 0x2104 /* SeaLINK+485I (2104) */
547#define SEALEVEL_2106_PID 0x9020 /* SeaLINK+422 (2106) */
548#define SEALEVEL_2201_1_PID 0x2211 /* SeaPORT+2/232 (2201) Port 1 */
549#define SEALEVEL_2201_2_PID 0x2221 /* SeaPORT+2/232 (2201) Port 2 */
550#define SEALEVEL_2202_1_PID 0x2212 /* SeaPORT+2/485 (2202) Port 1 */
551#define SEALEVEL_2202_2_PID 0x2222 /* SeaPORT+2/485 (2202) Port 2 */
552#define SEALEVEL_2203_1_PID 0x2213 /* SeaPORT+2 (2203) Port 1 */
553#define SEALEVEL_2203_2_PID 0x2223 /* SeaPORT+2 (2203) Port 2 */
554#define SEALEVEL_2401_1_PID 0x2411 /* SeaPORT+4/232 (2401) Port 1 */
555#define SEALEVEL_2401_2_PID 0x2421 /* SeaPORT+4/232 (2401) Port 2 */
556#define SEALEVEL_2401_3_PID 0x2431 /* SeaPORT+4/232 (2401) Port 3 */
557#define SEALEVEL_2401_4_PID 0x2441 /* SeaPORT+4/232 (2401) Port 4 */
558#define SEALEVEL_2402_1_PID 0x2412 /* SeaPORT+4/485 (2402) Port 1 */
559#define SEALEVEL_2402_2_PID 0x2422 /* SeaPORT+4/485 (2402) Port 2 */
560#define SEALEVEL_2402_3_PID 0x2432 /* SeaPORT+4/485 (2402) Port 3 */
561#define SEALEVEL_2402_4_PID 0x2442 /* SeaPORT+4/485 (2402) Port 4 */
562#define SEALEVEL_2403_1_PID 0x2413 /* SeaPORT+4 (2403) Port 1 */
563#define SEALEVEL_2403_2_PID 0x2423 /* SeaPORT+4 (2403) Port 2 */
564#define SEALEVEL_2403_3_PID 0x2433 /* SeaPORT+4 (2403) Port 3 */
565#define SEALEVEL_2403_4_PID 0x2443 /* SeaPORT+4 (2403) Port 4 */
566#define SEALEVEL_2801_1_PID 0X2811 /* SeaLINK+8/232 (2801) Port 1 */
567#define SEALEVEL_2801_2_PID 0X2821 /* SeaLINK+8/232 (2801) Port 2 */
568#define SEALEVEL_2801_3_PID 0X2831 /* SeaLINK+8/232 (2801) Port 3 */
569#define SEALEVEL_2801_4_PID 0X2841 /* SeaLINK+8/232 (2801) Port 4 */
570#define SEALEVEL_2801_5_PID 0X2851 /* SeaLINK+8/232 (2801) Port 5 */
571#define SEALEVEL_2801_6_PID 0X2861 /* SeaLINK+8/232 (2801) Port 6 */
572#define SEALEVEL_2801_7_PID 0X2871 /* SeaLINK+8/232 (2801) Port 7 */
573#define SEALEVEL_2801_8_PID 0X2881 /* SeaLINK+8/232 (2801) Port 8 */
574#define SEALEVEL_2802_1_PID 0X2812 /* SeaLINK+8/485 (2802) Port 1 */
575#define SEALEVEL_2802_2_PID 0X2822 /* SeaLINK+8/485 (2802) Port 2 */
576#define SEALEVEL_2802_3_PID 0X2832 /* SeaLINK+8/485 (2802) Port 3 */
577#define SEALEVEL_2802_4_PID 0X2842 /* SeaLINK+8/485 (2802) Port 4 */
578#define SEALEVEL_2802_5_PID 0X2852 /* SeaLINK+8/485 (2802) Port 5 */
579#define SEALEVEL_2802_6_PID 0X2862 /* SeaLINK+8/485 (2802) Port 6 */
580#define SEALEVEL_2802_7_PID 0X2872 /* SeaLINK+8/485 (2802) Port 7 */
581#define SEALEVEL_2802_8_PID 0X2882 /* SeaLINK+8/485 (2802) Port 8 */
582#define SEALEVEL_2803_1_PID 0X2813 /* SeaLINK+8 (2803) Port 1 */
583#define SEALEVEL_2803_2_PID 0X2823 /* SeaLINK+8 (2803) Port 2 */
584#define SEALEVEL_2803_3_PID 0X2833 /* SeaLINK+8 (2803) Port 3 */
585#define SEALEVEL_2803_4_PID 0X2843 /* SeaLINK+8 (2803) Port 4 */
586#define SEALEVEL_2803_5_PID 0X2853 /* SeaLINK+8 (2803) Port 5 */
587#define SEALEVEL_2803_6_PID 0X2863 /* SeaLINK+8 (2803) Port 6 */
588#define SEALEVEL_2803_7_PID 0X2873 /* SeaLINK+8 (2803) Port 7 */
589#define SEALEVEL_2803_8_PID 0X2883 /* SeaLINK+8 (2803) Port 8 */
590
591/*
592 * JETI SPECTROMETER SPECBOS 1201
593 * http://www.jeti.com/products/sys/scb/scb1201.php
594 */
595#define JETI_VID 0x0c6c
596#define JETI_SPC1201_PID 0x04b2
597
598/*
599 * FTDI USB UART chips used in construction projects from the
600 * Elektor Electronics magazine (http://elektor-electronics.co.uk)
601 */
602#define ELEKTOR_VID 0x0C7D
603#define ELEKTOR_FT323R_PID 0x0005 /* RFID-Reader, issue 09-2006 */
604
605/*
606 * Posiflex inc retail equipment (http://www.posiflex.com.tw)
607 */
608#define POSIFLEX_VID 0x0d3a /* Vendor ID */
609#define POSIFLEX_PP7000_PID 0x0300 /* PP-7000II thermal printer */
610
611/*
612 * The following are the values for two KOBIL chipcard terminals.
613 */
614#define KOBIL_VID 0x0d46 /* KOBIL Vendor ID */
615#define KOBIL_CONV_B1_PID 0x2020 /* KOBIL Konverter for B1 */
616#define KOBIL_CONV_KAAN_PID 0x2021 /* KOBIL_Konverter for KAAN */
617
618#define FTDI_NF_RIC_VID 0x0DCD /* Vendor Id */
619#define FTDI_NF_RIC_PID 0x0001 /* Product Id */
620
621/*
622 * Falcom Wireless Communications GmbH
623 */
624#define FALCOM_VID 0x0F94 /* Vendor Id */
625#define FALCOM_TWIST_PID 0x0001 /* Falcom Twist USB GPRS modem */
626#define FALCOM_SAMBA_PID 0x0005 /* Falcom Samba USB GPRS modem */
627
628/* Larsen and Brusgaard AltiTrack/USBtrack */
629#define LARSENBRUSGAARD_VID 0x0FD8
630#define LB_ALTITRACK_PID 0x0001
631
632/*
633 * TTi (Thurlby Thandar Instruments)
634 */
635#define TTI_VID 0x103E /* Vendor Id */
636#define TTI_QL355P_PID 0x03E8 /* TTi QL355P power supply */
637
638/* Interbiometrics USB I/O Board */
639/* Developed for Interbiometrics by Rudolf Gugler */
640#define INTERBIOMETRICS_VID 0x1209
641#define INTERBIOMETRICS_IOBOARD_PID 0x1002
642#define INTERBIOMETRICS_MINI_IOBOARD_PID 0x1006
643
644/*
645 * Testo products (http://www.testo.com/)
646 * Submitted by Colin Leroy
647 */
648#define TESTO_VID 0x128D
649#define TESTO_USB_INTERFACE_PID 0x0001
650
651/*
652 * Mobility Electronics products.
653 */
654#define MOBILITY_VID 0x1342
655#define MOBILITY_USB_SERIAL_PID 0x0202 /* EasiDock USB 200 serial */
656
657/*
658 * FIC / OpenMoko, Inc. http://wiki.openmoko.org/wiki/Neo1973_Debug_Board_v3
659 * Submitted by Harald Welte <laforge@openmoko.org>
660 */
661#define FIC_VID 0x1457
662#define FIC_NEO1973_DEBUG_PID 0x5118
663
664/* Olimex */
665#define OLIMEX_VID 0x15BA
666#define OLIMEX_ARM_USB_OCD_PID 0x0003
667
668/*
669 * Telldus Technologies
670 */
671#define TELLDUS_VID 0x1781 /* Vendor ID */
672#define TELLDUS_TELLSTICK_PID 0x0C30 /* RF control dongle 433 MHz using FT232RL */
673
674/*
675 * Bayer Ascensia Contour blood glucose meter USB-converter cable.
676 * http://winglucofacts.com/cables/
677 */
678#define BAYER_VID 0x1A79
679#define BAYER_CONTOUR_CABLE_PID 0x6001
680
681/*
682 * The following are the values for the Matrix Orbital FTDI Range
683 * Anything in this range will use an FT232RL.
684 */
685#define MTXORB_VID 0x1B3D
686#define MTXORB_FTDI_RANGE_0100_PID 0x0100
687#define MTXORB_FTDI_RANGE_0101_PID 0x0101
688#define MTXORB_FTDI_RANGE_0102_PID 0x0102
689#define MTXORB_FTDI_RANGE_0103_PID 0x0103
690#define MTXORB_FTDI_RANGE_0104_PID 0x0104
691#define MTXORB_FTDI_RANGE_0105_PID 0x0105
692#define MTXORB_FTDI_RANGE_0106_PID 0x0106
693#define MTXORB_FTDI_RANGE_0107_PID 0x0107
694#define MTXORB_FTDI_RANGE_0108_PID 0x0108
695#define MTXORB_FTDI_RANGE_0109_PID 0x0109
696#define MTXORB_FTDI_RANGE_010A_PID 0x010A
697#define MTXORB_FTDI_RANGE_010B_PID 0x010B
698#define MTXORB_FTDI_RANGE_010C_PID 0x010C
699#define MTXORB_FTDI_RANGE_010D_PID 0x010D
700#define MTXORB_FTDI_RANGE_010E_PID 0x010E
701#define MTXORB_FTDI_RANGE_010F_PID 0x010F
702#define MTXORB_FTDI_RANGE_0110_PID 0x0110
703#define MTXORB_FTDI_RANGE_0111_PID 0x0111
704#define MTXORB_FTDI_RANGE_0112_PID 0x0112
705#define MTXORB_FTDI_RANGE_0113_PID 0x0113
706#define MTXORB_FTDI_RANGE_0114_PID 0x0114
707#define MTXORB_FTDI_RANGE_0115_PID 0x0115
708#define MTXORB_FTDI_RANGE_0116_PID 0x0116
709#define MTXORB_FTDI_RANGE_0117_PID 0x0117
710#define MTXORB_FTDI_RANGE_0118_PID 0x0118
711#define MTXORB_FTDI_RANGE_0119_PID 0x0119
712#define MTXORB_FTDI_RANGE_011A_PID 0x011A
713#define MTXORB_FTDI_RANGE_011B_PID 0x011B
714#define MTXORB_FTDI_RANGE_011C_PID 0x011C
715#define MTXORB_FTDI_RANGE_011D_PID 0x011D
716#define MTXORB_FTDI_RANGE_011E_PID 0x011E
717#define MTXORB_FTDI_RANGE_011F_PID 0x011F
718#define MTXORB_FTDI_RANGE_0120_PID 0x0120
719#define MTXORB_FTDI_RANGE_0121_PID 0x0121
720#define MTXORB_FTDI_RANGE_0122_PID 0x0122
721#define MTXORB_FTDI_RANGE_0123_PID 0x0123
722#define MTXORB_FTDI_RANGE_0124_PID 0x0124
723#define MTXORB_FTDI_RANGE_0125_PID 0x0125
724#define MTXORB_FTDI_RANGE_0126_PID 0x0126
725#define MTXORB_FTDI_RANGE_0127_PID 0x0127
726#define MTXORB_FTDI_RANGE_0128_PID 0x0128
727#define MTXORB_FTDI_RANGE_0129_PID 0x0129
728#define MTXORB_FTDI_RANGE_012A_PID 0x012A
729#define MTXORB_FTDI_RANGE_012B_PID 0x012B
730#define MTXORB_FTDI_RANGE_012C_PID 0x012C
731#define MTXORB_FTDI_RANGE_012D_PID 0x012D
732#define MTXORB_FTDI_RANGE_012E_PID 0x012E
733#define MTXORB_FTDI_RANGE_012F_PID 0x012F
734#define MTXORB_FTDI_RANGE_0130_PID 0x0130
735#define MTXORB_FTDI_RANGE_0131_PID 0x0131
736#define MTXORB_FTDI_RANGE_0132_PID 0x0132
737#define MTXORB_FTDI_RANGE_0133_PID 0x0133
738#define MTXORB_FTDI_RANGE_0134_PID 0x0134
739#define MTXORB_FTDI_RANGE_0135_PID 0x0135
740#define MTXORB_FTDI_RANGE_0136_PID 0x0136
741#define MTXORB_FTDI_RANGE_0137_PID 0x0137
742#define MTXORB_FTDI_RANGE_0138_PID 0x0138
743#define MTXORB_FTDI_RANGE_0139_PID 0x0139
744#define MTXORB_FTDI_RANGE_013A_PID 0x013A
745#define MTXORB_FTDI_RANGE_013B_PID 0x013B
746#define MTXORB_FTDI_RANGE_013C_PID 0x013C
747#define MTXORB_FTDI_RANGE_013D_PID 0x013D
748#define MTXORB_FTDI_RANGE_013E_PID 0x013E
749#define MTXORB_FTDI_RANGE_013F_PID 0x013F
750#define MTXORB_FTDI_RANGE_0140_PID 0x0140
751#define MTXORB_FTDI_RANGE_0141_PID 0x0141
752#define MTXORB_FTDI_RANGE_0142_PID 0x0142
753#define MTXORB_FTDI_RANGE_0143_PID 0x0143
754#define MTXORB_FTDI_RANGE_0144_PID 0x0144
755#define MTXORB_FTDI_RANGE_0145_PID 0x0145
756#define MTXORB_FTDI_RANGE_0146_PID 0x0146
757#define MTXORB_FTDI_RANGE_0147_PID 0x0147
758#define MTXORB_FTDI_RANGE_0148_PID 0x0148
759#define MTXORB_FTDI_RANGE_0149_PID 0x0149
760#define MTXORB_FTDI_RANGE_014A_PID 0x014A
761#define MTXORB_FTDI_RANGE_014B_PID 0x014B
762#define MTXORB_FTDI_RANGE_014C_PID 0x014C
763#define MTXORB_FTDI_RANGE_014D_PID 0x014D
764#define MTXORB_FTDI_RANGE_014E_PID 0x014E
765#define MTXORB_FTDI_RANGE_014F_PID 0x014F
766#define MTXORB_FTDI_RANGE_0150_PID 0x0150
767#define MTXORB_FTDI_RANGE_0151_PID 0x0151
768#define MTXORB_FTDI_RANGE_0152_PID 0x0152
769#define MTXORB_FTDI_RANGE_0153_PID 0x0153
770#define MTXORB_FTDI_RANGE_0154_PID 0x0154
771#define MTXORB_FTDI_RANGE_0155_PID 0x0155
772#define MTXORB_FTDI_RANGE_0156_PID 0x0156
773#define MTXORB_FTDI_RANGE_0157_PID 0x0157
774#define MTXORB_FTDI_RANGE_0158_PID 0x0158
775#define MTXORB_FTDI_RANGE_0159_PID 0x0159
776#define MTXORB_FTDI_RANGE_015A_PID 0x015A
777#define MTXORB_FTDI_RANGE_015B_PID 0x015B
778#define MTXORB_FTDI_RANGE_015C_PID 0x015C
779#define MTXORB_FTDI_RANGE_015D_PID 0x015D
780#define MTXORB_FTDI_RANGE_015E_PID 0x015E
781#define MTXORB_FTDI_RANGE_015F_PID 0x015F
782#define MTXORB_FTDI_RANGE_0160_PID 0x0160
783#define MTXORB_FTDI_RANGE_0161_PID 0x0161
784#define MTXORB_FTDI_RANGE_0162_PID 0x0162
785#define MTXORB_FTDI_RANGE_0163_PID 0x0163
786#define MTXORB_FTDI_RANGE_0164_PID 0x0164
787#define MTXORB_FTDI_RANGE_0165_PID 0x0165
788#define MTXORB_FTDI_RANGE_0166_PID 0x0166
789#define MTXORB_FTDI_RANGE_0167_PID 0x0167
790#define MTXORB_FTDI_RANGE_0168_PID 0x0168
791#define MTXORB_FTDI_RANGE_0169_PID 0x0169
792#define MTXORB_FTDI_RANGE_016A_PID 0x016A
793#define MTXORB_FTDI_RANGE_016B_PID 0x016B
794#define MTXORB_FTDI_RANGE_016C_PID 0x016C
795#define MTXORB_FTDI_RANGE_016D_PID 0x016D
796#define MTXORB_FTDI_RANGE_016E_PID 0x016E
797#define MTXORB_FTDI_RANGE_016F_PID 0x016F
798#define MTXORB_FTDI_RANGE_0170_PID 0x0170
799#define MTXORB_FTDI_RANGE_0171_PID 0x0171
800#define MTXORB_FTDI_RANGE_0172_PID 0x0172
801#define MTXORB_FTDI_RANGE_0173_PID 0x0173
802#define MTXORB_FTDI_RANGE_0174_PID 0x0174
803#define MTXORB_FTDI_RANGE_0175_PID 0x0175
804#define MTXORB_FTDI_RANGE_0176_PID 0x0176
805#define MTXORB_FTDI_RANGE_0177_PID 0x0177
806#define MTXORB_FTDI_RANGE_0178_PID 0x0178
807#define MTXORB_FTDI_RANGE_0179_PID 0x0179
808#define MTXORB_FTDI_RANGE_017A_PID 0x017A
809#define MTXORB_FTDI_RANGE_017B_PID 0x017B
810#define MTXORB_FTDI_RANGE_017C_PID 0x017C
811#define MTXORB_FTDI_RANGE_017D_PID 0x017D
812#define MTXORB_FTDI_RANGE_017E_PID 0x017E
813#define MTXORB_FTDI_RANGE_017F_PID 0x017F
814#define MTXORB_FTDI_RANGE_0180_PID 0x0180
815#define MTXORB_FTDI_RANGE_0181_PID 0x0181
816#define MTXORB_FTDI_RANGE_0182_PID 0x0182
817#define MTXORB_FTDI_RANGE_0183_PID 0x0183
818#define MTXORB_FTDI_RANGE_0184_PID 0x0184
819#define MTXORB_FTDI_RANGE_0185_PID 0x0185
820#define MTXORB_FTDI_RANGE_0186_PID 0x0186
821#define MTXORB_FTDI_RANGE_0187_PID 0x0187
822#define MTXORB_FTDI_RANGE_0188_PID 0x0188
823#define MTXORB_FTDI_RANGE_0189_PID 0x0189
824#define MTXORB_FTDI_RANGE_018A_PID 0x018A
825#define MTXORB_FTDI_RANGE_018B_PID 0x018B
826#define MTXORB_FTDI_RANGE_018C_PID 0x018C
827#define MTXORB_FTDI_RANGE_018D_PID 0x018D
828#define MTXORB_FTDI_RANGE_018E_PID 0x018E
829#define MTXORB_FTDI_RANGE_018F_PID 0x018F
830#define MTXORB_FTDI_RANGE_0190_PID 0x0190
831#define MTXORB_FTDI_RANGE_0191_PID 0x0191
832#define MTXORB_FTDI_RANGE_0192_PID 0x0192
833#define MTXORB_FTDI_RANGE_0193_PID 0x0193
834#define MTXORB_FTDI_RANGE_0194_PID 0x0194
835#define MTXORB_FTDI_RANGE_0195_PID 0x0195
836#define MTXORB_FTDI_RANGE_0196_PID 0x0196
837#define MTXORB_FTDI_RANGE_0197_PID 0x0197
838#define MTXORB_FTDI_RANGE_0198_PID 0x0198
839#define MTXORB_FTDI_RANGE_0199_PID 0x0199
840#define MTXORB_FTDI_RANGE_019A_PID 0x019A
841#define MTXORB_FTDI_RANGE_019B_PID 0x019B
842#define MTXORB_FTDI_RANGE_019C_PID 0x019C
843#define MTXORB_FTDI_RANGE_019D_PID 0x019D
844#define MTXORB_FTDI_RANGE_019E_PID 0x019E
845#define MTXORB_FTDI_RANGE_019F_PID 0x019F
846#define MTXORB_FTDI_RANGE_01A0_PID 0x01A0
847#define MTXORB_FTDI_RANGE_01A1_PID 0x01A1
848#define MTXORB_FTDI_RANGE_01A2_PID 0x01A2
849#define MTXORB_FTDI_RANGE_01A3_PID 0x01A3
850#define MTXORB_FTDI_RANGE_01A4_PID 0x01A4
851#define MTXORB_FTDI_RANGE_01A5_PID 0x01A5
852#define MTXORB_FTDI_RANGE_01A6_PID 0x01A6
853#define MTXORB_FTDI_RANGE_01A7_PID 0x01A7
854#define MTXORB_FTDI_RANGE_01A8_PID 0x01A8
855#define MTXORB_FTDI_RANGE_01A9_PID 0x01A9
856#define MTXORB_FTDI_RANGE_01AA_PID 0x01AA
857#define MTXORB_FTDI_RANGE_01AB_PID 0x01AB
858#define MTXORB_FTDI_RANGE_01AC_PID 0x01AC
859#define MTXORB_FTDI_RANGE_01AD_PID 0x01AD
860#define MTXORB_FTDI_RANGE_01AE_PID 0x01AE
861#define MTXORB_FTDI_RANGE_01AF_PID 0x01AF
862#define MTXORB_FTDI_RANGE_01B0_PID 0x01B0
863#define MTXORB_FTDI_RANGE_01B1_PID 0x01B1
864#define MTXORB_FTDI_RANGE_01B2_PID 0x01B2
865#define MTXORB_FTDI_RANGE_01B3_PID 0x01B3
866#define MTXORB_FTDI_RANGE_01B4_PID 0x01B4
867#define MTXORB_FTDI_RANGE_01B5_PID 0x01B5
868#define MTXORB_FTDI_RANGE_01B6_PID 0x01B6
869#define MTXORB_FTDI_RANGE_01B7_PID 0x01B7
870#define MTXORB_FTDI_RANGE_01B8_PID 0x01B8
871#define MTXORB_FTDI_RANGE_01B9_PID 0x01B9
872#define MTXORB_FTDI_RANGE_01BA_PID 0x01BA
873#define MTXORB_FTDI_RANGE_01BB_PID 0x01BB
874#define MTXORB_FTDI_RANGE_01BC_PID 0x01BC
875#define MTXORB_FTDI_RANGE_01BD_PID 0x01BD
876#define MTXORB_FTDI_RANGE_01BE_PID 0x01BE
877#define MTXORB_FTDI_RANGE_01BF_PID 0x01BF
878#define MTXORB_FTDI_RANGE_01C0_PID 0x01C0
879#define MTXORB_FTDI_RANGE_01C1_PID 0x01C1
880#define MTXORB_FTDI_RANGE_01C2_PID 0x01C2
881#define MTXORB_FTDI_RANGE_01C3_PID 0x01C3
882#define MTXORB_FTDI_RANGE_01C4_PID 0x01C4
883#define MTXORB_FTDI_RANGE_01C5_PID 0x01C5
884#define MTXORB_FTDI_RANGE_01C6_PID 0x01C6
885#define MTXORB_FTDI_RANGE_01C7_PID 0x01C7
886#define MTXORB_FTDI_RANGE_01C8_PID 0x01C8
887#define MTXORB_FTDI_RANGE_01C9_PID 0x01C9
888#define MTXORB_FTDI_RANGE_01CA_PID 0x01CA
889#define MTXORB_FTDI_RANGE_01CB_PID 0x01CB
890#define MTXORB_FTDI_RANGE_01CC_PID 0x01CC
891#define MTXORB_FTDI_RANGE_01CD_PID 0x01CD
892#define MTXORB_FTDI_RANGE_01CE_PID 0x01CE
893#define MTXORB_FTDI_RANGE_01CF_PID 0x01CF
894#define MTXORB_FTDI_RANGE_01D0_PID 0x01D0
895#define MTXORB_FTDI_RANGE_01D1_PID 0x01D1
896#define MTXORB_FTDI_RANGE_01D2_PID 0x01D2
897#define MTXORB_FTDI_RANGE_01D3_PID 0x01D3
898#define MTXORB_FTDI_RANGE_01D4_PID 0x01D4
899#define MTXORB_FTDI_RANGE_01D5_PID 0x01D5
900#define MTXORB_FTDI_RANGE_01D6_PID 0x01D6
901#define MTXORB_FTDI_RANGE_01D7_PID 0x01D7
902#define MTXORB_FTDI_RANGE_01D8_PID 0x01D8
903#define MTXORB_FTDI_RANGE_01D9_PID 0x01D9
904#define MTXORB_FTDI_RANGE_01DA_PID 0x01DA
905#define MTXORB_FTDI_RANGE_01DB_PID 0x01DB
906#define MTXORB_FTDI_RANGE_01DC_PID 0x01DC
907#define MTXORB_FTDI_RANGE_01DD_PID 0x01DD
908#define MTXORB_FTDI_RANGE_01DE_PID 0x01DE
909#define MTXORB_FTDI_RANGE_01DF_PID 0x01DF
910#define MTXORB_FTDI_RANGE_01E0_PID 0x01E0
911#define MTXORB_FTDI_RANGE_01E1_PID 0x01E1
912#define MTXORB_FTDI_RANGE_01E2_PID 0x01E2
913#define MTXORB_FTDI_RANGE_01E3_PID 0x01E3
914#define MTXORB_FTDI_RANGE_01E4_PID 0x01E4
915#define MTXORB_FTDI_RANGE_01E5_PID 0x01E5
916#define MTXORB_FTDI_RANGE_01E6_PID 0x01E6
917#define MTXORB_FTDI_RANGE_01E7_PID 0x01E7
918#define MTXORB_FTDI_RANGE_01E8_PID 0x01E8
919#define MTXORB_FTDI_RANGE_01E9_PID 0x01E9
920#define MTXORB_FTDI_RANGE_01EA_PID 0x01EA
921#define MTXORB_FTDI_RANGE_01EB_PID 0x01EB
922#define MTXORB_FTDI_RANGE_01EC_PID 0x01EC
923#define MTXORB_FTDI_RANGE_01ED_PID 0x01ED
924#define MTXORB_FTDI_RANGE_01EE_PID 0x01EE
925#define MTXORB_FTDI_RANGE_01EF_PID 0x01EF
926#define MTXORB_FTDI_RANGE_01F0_PID 0x01F0
927#define MTXORB_FTDI_RANGE_01F1_PID 0x01F1
928#define MTXORB_FTDI_RANGE_01F2_PID 0x01F2
929#define MTXORB_FTDI_RANGE_01F3_PID 0x01F3
930#define MTXORB_FTDI_RANGE_01F4_PID 0x01F4
931#define MTXORB_FTDI_RANGE_01F5_PID 0x01F5
932#define MTXORB_FTDI_RANGE_01F6_PID 0x01F6
933#define MTXORB_FTDI_RANGE_01F7_PID 0x01F7
934#define MTXORB_FTDI_RANGE_01F8_PID 0x01F8
935#define MTXORB_FTDI_RANGE_01F9_PID 0x01F9
936#define MTXORB_FTDI_RANGE_01FA_PID 0x01FA
937#define MTXORB_FTDI_RANGE_01FB_PID 0x01FB
938#define MTXORB_FTDI_RANGE_01FC_PID 0x01FC
939#define MTXORB_FTDI_RANGE_01FD_PID 0x01FD
940#define MTXORB_FTDI_RANGE_01FE_PID 0x01FE
941#define MTXORB_FTDI_RANGE_01FF_PID 0x01FF
942
943
944
945/*
946 * The Mobility Lab (TML)
947 * Submitted by Pierre Castella
948 */
949#define TML_VID 0x1B91 /* Vendor ID */
950#define TML_USB_SERIAL_PID 0x0064 /* USB - Serial Converter */
951
952/* Alti-2 products http://www.alti-2.com */
953#define ALTI2_VID 0x1BC9
954#define ALTI2_N3_PID 0x6001 /* Neptune 3 */
955
956/*
957 * Dresden Elektronic Sensor Terminal Board
958 */
959#define DE_VID 0x1cf1 /* Vendor ID */
960#define STB_PID 0x0001 /* Sensor Terminal Board */
961#define WHT_PID 0x0004 /* Wireless Handheld Terminal */
962
963/*
964 * Papouch products (http://www.papouch.com/)
965 * Submitted by Folkert van Heusden
966 */
967
968#define PAPOUCH_VID 0x5050 /* Vendor ID */
969#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */
970#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */
971
972/*
973 * Marvell SheevaPlug
974 */
975#define MARVELL_VID 0x9e88
976#define MARVELL_SHEEVAPLUG_PID 0x9e8f
977
978/*
979 * Evolution Robotics products (http://www.evolution.com/).
980 * Submitted by Shawn M. Lavelle.
981 */
982#define EVOLUTION_VID 0xDEEE /* Vendor ID */
983#define EVOLUTION_ER1_PID 0x0300 /* ER1 Control Module */
984#define EVO_8U232AM_PID 0x02FF /* Evolution robotics RCM2 (FT232AM)*/
985#define EVO_HYBRID_PID 0x0302 /* Evolution robotics RCM4 PID (FT232BM)*/
986#define EVO_RCM4_PID 0x0303 /* Evolution robotics RCM4 PID */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index bbe005cefcfb..f1ea3a33b6e6 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -276,7 +276,7 @@ static int usb_serial_generic_write_start(struct usb_serial_port *port)
276 if (port->write_urb_busy) 276 if (port->write_urb_busy)
277 start_io = false; 277 start_io = false;
278 else { 278 else {
279 start_io = (__kfifo_len(port->write_fifo) != 0); 279 start_io = (kfifo_len(&port->write_fifo) != 0);
280 port->write_urb_busy = start_io; 280 port->write_urb_busy = start_io;
281 } 281 }
282 spin_unlock_irqrestore(&port->lock, flags); 282 spin_unlock_irqrestore(&port->lock, flags);
@@ -285,7 +285,7 @@ static int usb_serial_generic_write_start(struct usb_serial_port *port)
285 return 0; 285 return 0;
286 286
287 data = port->write_urb->transfer_buffer; 287 data = port->write_urb->transfer_buffer;
288 count = kfifo_get(port->write_fifo, data, port->bulk_out_size); 288 count = kfifo_out_locked(&port->write_fifo, data, port->bulk_out_size, &port->lock);
289 usb_serial_debug_data(debug, &port->dev, __func__, count, data); 289 usb_serial_debug_data(debug, &port->dev, __func__, count, data);
290 290
291 /* set up our urb */ 291 /* set up our urb */
@@ -345,7 +345,7 @@ int usb_serial_generic_write(struct tty_struct *tty,
345 return usb_serial_multi_urb_write(tty, port, 345 return usb_serial_multi_urb_write(tty, port,
346 buf, count); 346 buf, count);
347 347
348 count = kfifo_put(port->write_fifo, buf, count); 348 count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock);
349 result = usb_serial_generic_write_start(port); 349 result = usb_serial_generic_write_start(port);
350 350
351 if (result >= 0) 351 if (result >= 0)
@@ -370,7 +370,7 @@ int usb_serial_generic_write_room(struct tty_struct *tty)
370 (serial->type->max_in_flight_urbs - 370 (serial->type->max_in_flight_urbs -
371 port->urbs_in_flight); 371 port->urbs_in_flight);
372 } else if (serial->num_bulk_out) 372 } else if (serial->num_bulk_out)
373 room = port->write_fifo->size - __kfifo_len(port->write_fifo); 373 room = kfifo_avail(&port->write_fifo);
374 spin_unlock_irqrestore(&port->lock, flags); 374 spin_unlock_irqrestore(&port->lock, flags);
375 375
376 dbg("%s - returns %d", __func__, room); 376 dbg("%s - returns %d", __func__, room);
@@ -391,7 +391,7 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
391 chars = port->tx_bytes_flight; 391 chars = port->tx_bytes_flight;
392 spin_unlock_irqrestore(&port->lock, flags); 392 spin_unlock_irqrestore(&port->lock, flags);
393 } else if (serial->num_bulk_out) 393 } else if (serial->num_bulk_out)
394 chars = kfifo_len(port->write_fifo); 394 chars = kfifo_len(&port->write_fifo);
395 395
396 dbg("%s - returns %d", __func__, chars); 396 dbg("%s - returns %d", __func__, chars);
397 return chars; 397 return chars;
@@ -507,7 +507,7 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb)
507 if (status) { 507 if (status) {
508 dbg("%s - nonzero multi-urb write bulk status " 508 dbg("%s - nonzero multi-urb write bulk status "
509 "received: %d", __func__, status); 509 "received: %d", __func__, status);
510 kfifo_reset(port->write_fifo); 510 kfifo_reset_out(&port->write_fifo);
511 } else 511 } else
512 usb_serial_generic_write_start(port); 512 usb_serial_generic_write_start(port);
513 } 513 }
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 485fa9c5b107..2cfe2451ed97 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -127,8 +127,9 @@
127#define BANDB_DEVICE_ID_US9ML2_4 0xAC30 127#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
128#define BANDB_DEVICE_ID_USPTL4_2 0xAC31 128#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
129#define BANDB_DEVICE_ID_USPTL4_4 0xAC32 129#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
130#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 130#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
131#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 131#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
132#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
132 133
133/* This driver also supports 134/* This driver also supports
134 * ATEN UC2324 device using Moschip MCS7840 135 * ATEN UC2324 device using Moschip MCS7840
@@ -191,6 +192,7 @@ static struct usb_device_id moschip_port_id_table[] = {
191 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, 192 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
192 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, 193 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
193 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, 194 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
195 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
194 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, 196 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
195 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, 197 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
196 {} /* terminating entry */ 198 {} /* terminating entry */
@@ -207,6 +209,7 @@ static __devinitdata struct usb_device_id moschip_id_table_combined[] = {
207 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, 209 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
208 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, 210 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
209 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, 211 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
212 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
210 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, 213 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
211 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, 214 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
212 {} /* terminating entry */ 215 {} /* terminating entry */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 9a2b903492ec..6e94a6711f08 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -340,6 +340,10 @@ static int option_resume(struct usb_serial *serial);
340#define FOUR_G_SYSTEMS_VENDOR_ID 0x1c9e 340#define FOUR_G_SYSTEMS_VENDOR_ID 0x1c9e
341#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 341#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
342 342
343/* Haier products */
344#define HAIER_VENDOR_ID 0x201e
345#define HAIER_PRODUCT_CE100 0x2009
346
343static struct usb_device_id option_ids[] = { 347static struct usb_device_id option_ids[] = {
344 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 348 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
345 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, 349 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -641,6 +645,7 @@ static struct usb_device_id option_ids[] = {
641 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, 645 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
642 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, 646 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
643 { USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) }, 647 { USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) },
648 { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
644 { } /* Terminating entry */ 649 { } /* Terminating entry */
645}; 650};
646MODULE_DEVICE_TABLE(usb, option_ids); 651MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 4543f359be75..33c85f7084f8 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -595,8 +595,7 @@ static void port_release(struct device *dev)
595 usb_free_urb(port->write_urb); 595 usb_free_urb(port->write_urb);
596 usb_free_urb(port->interrupt_in_urb); 596 usb_free_urb(port->interrupt_in_urb);
597 usb_free_urb(port->interrupt_out_urb); 597 usb_free_urb(port->interrupt_out_urb);
598 if (!IS_ERR(port->write_fifo) && port->write_fifo) 598 kfifo_free(&port->write_fifo);
599 kfifo_free(port->write_fifo);
600 kfree(port->bulk_in_buffer); 599 kfree(port->bulk_in_buffer);
601 kfree(port->bulk_out_buffer); 600 kfree(port->bulk_out_buffer);
602 kfree(port->interrupt_in_buffer); 601 kfree(port->interrupt_in_buffer);
@@ -939,9 +938,7 @@ int usb_serial_probe(struct usb_interface *interface,
939 dev_err(&interface->dev, "No free urbs available\n"); 938 dev_err(&interface->dev, "No free urbs available\n");
940 goto probe_error; 939 goto probe_error;
941 } 940 }
942 port->write_fifo = kfifo_alloc(PAGE_SIZE, GFP_KERNEL, 941 if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
943 &port->lock);
944 if (IS_ERR(port->write_fifo))
945 goto probe_error; 942 goto probe_error;
946 buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); 943 buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
947 port->bulk_out_size = buffer_size; 944 port->bulk_out_size = buffer_size;
diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c
index 2051c9dc813b..b7687c55fe16 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/atafb.c
@@ -2245,9 +2245,6 @@ static int ext_setcolreg(unsigned int regno, unsigned int red,
2245 if (regno > 255) 2245 if (regno > 255)
2246 return 1; 2246 return 1;
2247 2247
2248 if (regno > 255)
2249 return 1;
2250
2251 switch (external_card_type) { 2248 switch (external_card_type) {
2252 case IS_VGA: 2249 case IS_VGA:
2253 OUTB(0x3c8, regno); 2250 OUTB(0x3c8, regno);
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index 4c10edecfb66..86d95c228adb 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -85,7 +85,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
85 return error ? data->current_brightness : reg_val; 85 return error ? data->current_brightness : reg_val;
86} 86}
87 87
88static struct backlight_ops adp5520_bl_ops = { 88static const struct backlight_ops adp5520_bl_ops = {
89 .update_status = adp5520_bl_update_status, 89 .update_status = adp5520_bl_update_status,
90 .get_brightness = adp5520_bl_get_brightness, 90 .get_brightness = adp5520_bl_get_brightness,
91}; 91};
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
index 2c3bdfc620b7..d769b0bab21a 100644
--- a/drivers/video/backlight/adx_bl.c
+++ b/drivers/video/backlight/adx_bl.c
@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
61 return 1; 61 return 1;
62} 62}
63 63
64static struct backlight_ops adx_backlight_ops = { 64static const struct backlight_ops adx_backlight_ops = {
65 .options = 0, 65 .options = 0,
66 .update_status = adx_backlight_update_status, 66 .update_status = adx_backlight_update_status,
67 .get_brightness = adx_backlight_get_brightness, 67 .get_brightness = adx_backlight_get_brightness,
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
index 2cf7ba52f67c..f625ffc69ad3 100644
--- a/drivers/video/backlight/atmel-pwm-bl.c
+++ b/drivers/video/backlight/atmel-pwm-bl.c
@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
113 return pwm_channel_enable(&pwmbl->pwmc); 113 return pwm_channel_enable(&pwmbl->pwmc);
114} 114}
115 115
116static struct backlight_ops atmel_pwm_bl_ops = { 116static const struct backlight_ops atmel_pwm_bl_ops = {
117 .get_brightness = atmel_pwm_bl_get_intensity, 117 .get_brightness = atmel_pwm_bl_get_intensity,
118 .update_status = atmel_pwm_bl_set_intensity, 118 .update_status = atmel_pwm_bl_set_intensity,
119}; 119};
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 6615ac7fa60a..18829cf68b1b 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
269 * ERR_PTR() or a pointer to the newly allocated device. 269 * ERR_PTR() or a pointer to the newly allocated device.
270 */ 270 */
271struct backlight_device *backlight_device_register(const char *name, 271struct backlight_device *backlight_device_register(const char *name,
272 struct device *parent, void *devdata, struct backlight_ops *ops) 272 struct device *parent, void *devdata, const struct backlight_ops *ops)
273{ 273{
274 struct backlight_device *new_bd; 274 struct backlight_device *new_bd;
275 int rc; 275 int rc;
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 96774949cd30..b4bcf8043797 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
451} 451}
452EXPORT_SYMBOL(corgi_lcd_limit_intensity); 452EXPORT_SYMBOL(corgi_lcd_limit_intensity);
453 453
454static struct backlight_ops corgi_bl_ops = { 454static const struct backlight_ops corgi_bl_ops = {
455 .get_brightness = corgi_bl_get_intensity, 455 .get_brightness = corgi_bl_get_intensity,
456 .update_status = corgi_bl_update_status, 456 .update_status = corgi_bl_update_status,
457}; 457};
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index b9fe62b475c6..da86db4374a0 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
108 return intensity; 108 return intensity;
109} 109}
110 110
111static struct backlight_ops cr_backlight_ops = { 111static const struct backlight_ops cr_backlight_ops = {
112 .get_brightness = cr_backlight_get_intensity, 112 .get_brightness = cr_backlight_get_intensity,
113 .update_status = cr_backlight_set_intensity, 113 .update_status = cr_backlight_set_intensity,
114}; 114};
@@ -201,7 +201,7 @@ static int cr_backlight_probe(struct platform_device *pdev)
201 if (IS_ERR(ldp)) { 201 if (IS_ERR(ldp)) {
202 backlight_device_unregister(bdp); 202 backlight_device_unregister(bdp);
203 pci_dev_put(lpc_dev); 203 pci_dev_put(lpc_dev);
204 return PTR_ERR(bdp); 204 return PTR_ERR(ldp);
205 } 205 }
206 206
207 pci_read_config_dword(lpc_dev, CRVML_REG_GPIOBAR, 207 pci_read_config_dword(lpc_dev, CRVML_REG_GPIOBAR,
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index f2d76dae1eb3..74cdc640173d 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -95,7 +95,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
95 return data->current_brightness; 95 return data->current_brightness;
96} 96}
97 97
98static struct backlight_ops da903x_backlight_ops = { 98static const struct backlight_ops da903x_backlight_ops = {
99 .update_status = da903x_backlight_update_status, 99 .update_status = da903x_backlight_update_status,
100 .get_brightness = da903x_backlight_get_brightness, 100 .get_brightness = da903x_backlight_get_brightness,
101}; 101};
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index 6d27f62fdcd0..e6d348e63596 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
70} 70}
71EXPORT_SYMBOL(corgibl_limit_intensity); 71EXPORT_SYMBOL(corgibl_limit_intensity);
72 72
73static struct backlight_ops genericbl_ops = { 73static const struct backlight_ops genericbl_ops = {
74 .options = BL_CORE_SUSPENDRESUME, 74 .options = BL_CORE_SUSPENDRESUME,
75 .get_brightness = genericbl_get_intensity, 75 .get_brightness = genericbl_get_intensity,
76 .update_status = genericbl_send_intensity, 76 .update_status = genericbl_send_intensity,
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index 7fb4eefff80d..f7cc528d5be7 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
98 return current_intensity; 98 return current_intensity;
99} 99}
100 100
101static struct backlight_ops hp680bl_ops = { 101static const struct backlight_ops hp680bl_ops = {
102 .get_brightness = hp680bl_get_intensity, 102 .get_brightness = hp680bl_get_intensity,
103 .update_status = hp680bl_set_intensity, 103 .update_status = hp680bl_set_intensity,
104}; 104};
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index 7aed2565c1bd..db9071fc5665 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -93,7 +93,7 @@ out:
93 return ret; 93 return ret;
94} 94}
95 95
96static struct backlight_ops jornada_bl_ops = { 96static const struct backlight_ops jornada_bl_ops = {
97 .get_brightness = jornada_bl_get_brightness, 97 .get_brightness = jornada_bl_get_brightness,
98 .update_status = jornada_bl_update_status, 98 .update_status = jornada_bl_update_status,
99 .options = BL_CORE_SUSPENDRESUME, 99 .options = BL_CORE_SUSPENDRESUME,
diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
index a38fda1742dd..939e7b830cf3 100644
--- a/drivers/video/backlight/kb3886_bl.c
+++ b/drivers/video/backlight/kb3886_bl.c
@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
134 return kb3886bl_intensity; 134 return kb3886bl_intensity;
135} 135}
136 136
137static struct backlight_ops kb3886bl_ops = { 137static const struct backlight_ops kb3886bl_ops = {
138 .get_brightness = kb3886bl_get_intensity, 138 .get_brightness = kb3886bl_get_intensity,
139 .update_status = kb3886bl_send_intensity, 139 .update_status = kb3886bl_send_intensity,
140}; 140};
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index 6b488b8a7eee..00a9591b0003 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
141 return current_intensity; 141 return current_intensity;
142} 142}
143 143
144static struct backlight_ops locomobl_data = { 144static const struct backlight_ops locomobl_data = {
145 .get_brightness = locomolcd_get_intensity, 145 .get_brightness = locomolcd_get_intensity,
146 .update_status = locomolcd_set_intensity, 146 .update_status = locomolcd_set_intensity,
147}; 147};
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 9edb8d7c295f..2e78b0784bdc 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -33,7 +33,7 @@ struct dmi_match_data {
33 unsigned long iostart; 33 unsigned long iostart;
34 unsigned long iolen; 34 unsigned long iolen;
35 /* Backlight operations structure. */ 35 /* Backlight operations structure. */
36 struct backlight_ops backlight_ops; 36 const struct backlight_ops backlight_ops;
37}; 37};
38 38
39/* Module parameters. */ 39/* Module parameters. */
@@ -220,6 +220,24 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
220 }, 220 },
221 { 221 {
222 .callback = mbp_dmi_match, 222 .callback = mbp_dmi_match,
223 .ident = "MacBookPro 5,3",
224 .matches = {
225 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
226 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,3"),
227 },
228 .driver_data = (void *)&nvidia_chipset_data,
229 },
230 {
231 .callback = mbp_dmi_match,
232 .ident = "MacBookPro 5,4",
233 .matches = {
234 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
235 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,4"),
236 },
237 .driver_data = (void *)&nvidia_chipset_data,
238 },
239 {
240 .callback = mbp_dmi_match,
223 .ident = "MacBookPro 5,5", 241 .ident = "MacBookPro 5,5",
224 .matches = { 242 .matches = {
225 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 243 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index 8693e5fcd2eb..409ca9643528 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
125 return bl->current_intensity; 125 return bl->current_intensity;
126} 126}
127 127
128static struct backlight_ops omapbl_ops = { 128static const struct backlight_ops omapbl_ops = {
129 .get_brightness = omapbl_get_intensity, 129 .get_brightness = omapbl_get_intensity,
130 .update_status = omapbl_update_status, 130 .update_status = omapbl_update_status,
131}; 131};
diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
index 9edaf24fd82d..075786e05034 100644
--- a/drivers/video/backlight/progear_bl.c
+++ b/drivers/video/backlight/progear_bl.c
@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
54 return intensity - HW_LEVEL_MIN; 54 return intensity - HW_LEVEL_MIN;
55} 55}
56 56
57static struct backlight_ops progearbl_ops = { 57static const struct backlight_ops progearbl_ops = {
58 .get_brightness = progearbl_get_intensity, 58 .get_brightness = progearbl_get_intensity,
59 .update_status = progearbl_set_intensity, 59 .update_status = progearbl_set_intensity,
60}; 60};
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 887166267443..9d2ec2a1cce8 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -22,8 +22,10 @@
22 22
23struct pwm_bl_data { 23struct pwm_bl_data {
24 struct pwm_device *pwm; 24 struct pwm_device *pwm;
25 struct device *dev;
25 unsigned int period; 26 unsigned int period;
26 int (*notify)(int brightness); 27 int (*notify)(struct device *,
28 int brightness);
27}; 29};
28 30
29static int pwm_backlight_update_status(struct backlight_device *bl) 31static int pwm_backlight_update_status(struct backlight_device *bl)
@@ -39,7 +41,7 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
39 brightness = 0; 41 brightness = 0;
40 42
41 if (pb->notify) 43 if (pb->notify)
42 brightness = pb->notify(brightness); 44 brightness = pb->notify(pb->dev, brightness);
43 45
44 if (brightness == 0) { 46 if (brightness == 0) {
45 pwm_config(pb->pwm, 0, pb->period); 47 pwm_config(pb->pwm, 0, pb->period);
@@ -56,7 +58,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
56 return bl->props.brightness; 58 return bl->props.brightness;
57} 59}
58 60
59static struct backlight_ops pwm_backlight_ops = { 61static const struct backlight_ops pwm_backlight_ops = {
60 .update_status = pwm_backlight_update_status, 62 .update_status = pwm_backlight_update_status,
61 .get_brightness = pwm_backlight_get_brightness, 63 .get_brightness = pwm_backlight_get_brightness,
62}; 64};
@@ -88,6 +90,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
88 90
89 pb->period = data->pwm_period_ns; 91 pb->period = data->pwm_period_ns;
90 pb->notify = data->notify; 92 pb->notify = data->notify;
93 pb->dev = &pdev->dev;
91 94
92 pb->pwm = pwm_request(data->pwm_id, "backlight"); 95 pb->pwm = pwm_request(data->pwm_id, "backlight");
93 if (IS_ERR(pb->pwm)) { 96 if (IS_ERR(pb->pwm)) {
@@ -146,7 +149,7 @@ static int pwm_backlight_suspend(struct platform_device *pdev,
146 struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev); 149 struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
147 150
148 if (pb->notify) 151 if (pb->notify)
149 pb->notify(0); 152 pb->notify(pb->dev, 0);
150 pwm_config(pb->pwm, 0, pb->period); 153 pwm_config(pb->pwm, 0, pb->period);
151 pwm_disable(pb->pwm); 154 pwm_disable(pb->pwm);
152 return 0; 155 return 0;
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 43edbada12d1..e14ce4d469f5 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
72 return props->brightness; 72 return props->brightness;
73} 73}
74 74
75static struct backlight_ops bl_ops = { 75static const struct backlight_ops bl_ops = {
76 .get_brightness = tosa_bl_get_brightness, 76 .get_brightness = tosa_bl_get_brightness,
77 .update_status = tosa_bl_update_status, 77 .update_status = tosa_bl_update_status,
78}; 78};
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index 467bdb7efb23..e32add37a203 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
112 return data->current_brightness; 112 return data->current_brightness;
113} 113}
114 114
115static struct backlight_ops wm831x_backlight_ops = { 115static const struct backlight_ops wm831x_backlight_ops = {
116 .options = BL_CORE_SUSPENDRESUME, 116 .options = BL_CORE_SUSPENDRESUME,
117 .update_status = wm831x_backlight_update_status, 117 .update_status = wm831x_backlight_update_status,
118 .get_brightness = wm831x_backlight_get_brightness, 118 .get_brightness = wm831x_backlight_get_brightness,
diff --git a/drivers/video/omap/lcd_ldp.c b/drivers/video/omap/lcd_ldp.c
index 5bb7f6f14601..0f5952cae85e 100644
--- a/drivers/video/omap/lcd_ldp.c
+++ b/drivers/video/omap/lcd_ldp.c
@@ -24,7 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/i2c/twl4030.h> 27#include <linux/i2c/twl.h>
28 28
29#include <mach/gpio.h> 29#include <mach/gpio.h>
30#include <plat/mux.h> 30#include <plat/mux.h>
@@ -59,7 +59,7 @@
59#define TWL4030_VPLL2_DEV_GRP 0x33 59#define TWL4030_VPLL2_DEV_GRP 0x33
60#define TWL4030_VPLL2_DEDICATED 0x36 60#define TWL4030_VPLL2_DEDICATED 0x36
61 61
62#define t2_out(c, r, v) twl4030_i2c_write_u8(c, r, v) 62#define t2_out(c, r, v) twl_i2c_write_u8(c, r, v)
63 63
64 64
65static int ldp_panel_init(struct lcd_panel *panel, 65static int ldp_panel_init(struct lcd_panel *panel,
diff --git a/drivers/video/omap/lcd_omap2evm.c b/drivers/video/omap/lcd_omap2evm.c
index 006c2fe7360e..7e7a65c08452 100644
--- a/drivers/video/omap/lcd_omap2evm.c
+++ b/drivers/video/omap/lcd_omap2evm.c
@@ -24,7 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/gpio.h> 26#include <linux/gpio.h>
27#include <linux/i2c/twl4030.h> 27#include <linux/i2c/twl.h>
28 28
29#include <plat/mux.h> 29#include <plat/mux.h>
30#include <asm/mach-types.h> 30#include <asm/mach-types.h>
@@ -61,9 +61,9 @@ static int omap2evm_panel_init(struct lcd_panel *panel,
61 gpio_direction_output(LCD_PANEL_LR, 1); 61 gpio_direction_output(LCD_PANEL_LR, 1);
62 gpio_direction_output(LCD_PANEL_UD, 1); 62 gpio_direction_output(LCD_PANEL_UD, 1);
63 63
64 twl4030_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN); 64 twl_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN);
65 twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON); 65 twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON);
66 twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF); 66 twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF);
67 bklight_level = 100; 67 bklight_level = 100;
68 68
69 return 0; 69 return 0;
@@ -101,7 +101,7 @@ static int omap2evm_bklight_setlevel(struct lcd_panel *panel,
101 u8 c; 101 u8 c;
102 if ((level >= 0) && (level <= 100)) { 102 if ((level >= 0) && (level <= 100)) {
103 c = (125 * (100 - level)) / 100 + 2; 103 c = (125 * (100 - level)) / 100 + 2;
104 twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF); 104 twl_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF);
105 bklight_level = level; 105 bklight_level = level;
106 } 106 }
107 return 0; 107 return 0;
diff --git a/drivers/video/omap/lcd_omap3beagle.c b/drivers/video/omap/lcd_omap3beagle.c
index fc503d8f3c24..ca75cc2a87a5 100644
--- a/drivers/video/omap/lcd_omap3beagle.c
+++ b/drivers/video/omap/lcd_omap3beagle.c
@@ -23,7 +23,7 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/gpio.h> 25#include <linux/gpio.h>
26#include <linux/i2c/twl4030.h> 26#include <linux/i2c/twl.h>
27 27
28#include <plat/mux.h> 28#include <plat/mux.h>
29#include <plat/mux.h> 29#include <plat/mux.h>
diff --git a/drivers/video/omap/lcd_omap3evm.c b/drivers/video/omap/lcd_omap3evm.c
index ae2edc4081a8..06840da0b094 100644
--- a/drivers/video/omap/lcd_omap3evm.c
+++ b/drivers/video/omap/lcd_omap3evm.c
@@ -23,7 +23,7 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/gpio.h> 25#include <linux/gpio.h>
26#include <linux/i2c/twl4030.h> 26#include <linux/i2c/twl.h>
27 27
28#include <plat/mux.h> 28#include <plat/mux.h>
29#include <asm/mach-types.h> 29#include <asm/mach-types.h>
@@ -63,9 +63,9 @@ static int omap3evm_panel_init(struct lcd_panel *panel,
63 gpio_direction_output(LCD_PANEL_LR, 1); 63 gpio_direction_output(LCD_PANEL_LR, 1);
64 gpio_direction_output(LCD_PANEL_UD, 1); 64 gpio_direction_output(LCD_PANEL_UD, 1);
65 65
66 twl4030_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN); 66 twl_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN);
67 twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON); 67 twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON);
68 twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF); 68 twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF);
69 bklight_level = 100; 69 bklight_level = 100;
70 70
71 return 0; 71 return 0;
@@ -102,7 +102,7 @@ static int omap3evm_bklight_setlevel(struct lcd_panel *panel,
102 u8 c; 102 u8 c;
103 if ((level >= 0) && (level <= 100)) { 103 if ((level >= 0) && (level <= 100)) {
104 c = (125 * (100 - level)) / 100 + 2; 104 c = (125 * (100 - level)) / 100 + 2;
105 twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF); 105 twl_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF);
106 bklight_level = level; 106 bklight_level = level;
107 } 107 }
108 return 0; 108 return 0;
diff --git a/drivers/video/omap/lcd_overo.c b/drivers/video/omap/lcd_overo.c
index 56ee192e9ee2..564933ffac6e 100644
--- a/drivers/video/omap/lcd_overo.c
+++ b/drivers/video/omap/lcd_overo.c
@@ -21,7 +21,7 @@
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/i2c/twl4030.h> 24#include <linux/i2c/twl.h>
25 25
26#include <mach/gpio.h> 26#include <mach/gpio.h>
27#include <plat/mux.h> 27#include <plat/mux.h>
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 10d8c4b4baeb..d8df17a7d5fc 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -680,7 +680,7 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
680 if (!viafb_gamma_table) 680 if (!viafb_gamma_table)
681 return -ENOMEM; 681 return -ENOMEM;
682 if (copy_from_user(viafb_gamma_table, argp, 682 if (copy_from_user(viafb_gamma_table, argp,
683 sizeof(viafb_gamma_table))) { 683 256 * sizeof(u32))) {
684 kfree(viafb_gamma_table); 684 kfree(viafb_gamma_table);
685 return -EFAULT; 685 return -EFAULT;
686 } 686 }
@@ -694,7 +694,7 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
694 return -ENOMEM; 694 return -ENOMEM;
695 viafb_get_gamma_table(viafb_gamma_table); 695 viafb_get_gamma_table(viafb_gamma_table);
696 if (copy_to_user(argp, viafb_gamma_table, 696 if (copy_to_user(argp, viafb_gamma_table,
697 sizeof(viafb_gamma_table))) { 697 256 * sizeof(u32))) {
698 kfree(viafb_gamma_table); 698 kfree(viafb_gamma_table);
699 return -EFAULT; 699 return -EFAULT;
700 } 700 }
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index d958b76430a2..088f32f29a6e 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -368,7 +368,7 @@ config ALIM7101_WDT
368 368
369config GEODE_WDT 369config GEODE_WDT
370 tristate "AMD Geode CS5535/CS5536 Watchdog" 370 tristate "AMD Geode CS5535/CS5536 Watchdog"
371 depends on MGEODE_LX 371 depends on CS5535_MFGPT
372 help 372 help
373 This driver enables a watchdog capability built into the 373 This driver enables a watchdog capability built into the
374 CS5535/CS5536 companion chips for the AMD Geode GX and LX 374 CS5535/CS5536 companion chips for the AMD Geode GX and LX
@@ -815,16 +815,6 @@ config PNX833X_WDT
815 timer has expired and no process has written to /dev/watchdog during 815 timer has expired and no process has written to /dev/watchdog during
816 that time. 816 that time.
817 817
818config WDT_RM9K_GPI
819 tristate "RM9000/GPI hardware watchdog"
820 depends on CPU_RM9000
821 help
822 Watchdog implementation using the GPI hardware found on
823 PMC-Sierra RM9xxx CPUs.
824
825 To compile this driver as a module, choose M here: the
826 module will be called rm9k_wdt.
827
828config SIBYTE_WDOG 818config SIBYTE_WDOG
829 tristate "Sibyte SoC hardware watchdog" 819 tristate "Sibyte SoC hardware watchdog"
830 depends on CPU_SB1 820 depends on CPU_SB1
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 89c045dc468e..475c61100069 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -109,7 +109,6 @@ obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o
109obj-$(CONFIG_INDYDOG) += indydog.o 109obj-$(CONFIG_INDYDOG) += indydog.o
110obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o 110obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o
111obj-$(CONFIG_PNX833X_WDT) += pnx833x_wdt.o 111obj-$(CONFIG_PNX833X_WDT) += pnx833x_wdt.o
112obj-$(CONFIG_WDT_RM9K_GPI) += rm9k_wdt.o
113obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o 112obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o
114obj-$(CONFIG_AR7_WDT) += ar7_wdt.o 113obj-$(CONFIG_AR7_WDT) += ar7_wdt.o
115obj-$(CONFIG_TXX9_WDT) += txx9wdt.o 114obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index 9acf0015a1e7..38252ff828ca 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -1,6 +1,7 @@
1/* Watchdog timer for the Geode GX/LX with the CS5535/CS5536 companion chip 1/* Watchdog timer for machines with the CS5535/CS5536 companion chip
2 * 2 *
3 * Copyright (C) 2006-2007, Advanced Micro Devices, Inc. 3 * Copyright (C) 2006-2007, Advanced Micro Devices, Inc.
4 * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
4 * 5 *
5 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
@@ -19,7 +20,7 @@
19#include <linux/reboot.h> 20#include <linux/reboot.h>
20#include <linux/uaccess.h> 21#include <linux/uaccess.h>
21 22
22#include <asm/geode.h> 23#include <linux/cs5535.h>
23 24
24#define GEODEWDT_HZ 500 25#define GEODEWDT_HZ 500
25#define GEODEWDT_SCALE 6 26#define GEODEWDT_SCALE 6
@@ -46,25 +47,25 @@ MODULE_PARM_DESC(nowayout,
46 47
47static struct platform_device *geodewdt_platform_device; 48static struct platform_device *geodewdt_platform_device;
48static unsigned long wdt_flags; 49static unsigned long wdt_flags;
49static int wdt_timer; 50static struct cs5535_mfgpt_timer *wdt_timer;
50static int safe_close; 51static int safe_close;
51 52
52static void geodewdt_ping(void) 53static void geodewdt_ping(void)
53{ 54{
54 /* Stop the counter */ 55 /* Stop the counter */
55 geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0); 56 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
56 57
57 /* Reset the counter */ 58 /* Reset the counter */
58 geode_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0); 59 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
59 60
60 /* Enable the counter */ 61 /* Enable the counter */
61 geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN); 62 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN);
62} 63}
63 64
64static void geodewdt_disable(void) 65static void geodewdt_disable(void)
65{ 66{
66 geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0); 67 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
67 geode_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0); 68 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
68} 69}
69 70
70static int geodewdt_set_heartbeat(int val) 71static int geodewdt_set_heartbeat(int val)
@@ -72,10 +73,10 @@ static int geodewdt_set_heartbeat(int val)
72 if (val < 1 || val > GEODEWDT_MAX_SECONDS) 73 if (val < 1 || val > GEODEWDT_MAX_SECONDS)
73 return -EINVAL; 74 return -EINVAL;
74 75
75 geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0); 76 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
76 geode_mfgpt_write(wdt_timer, MFGPT_REG_CMP2, val * GEODEWDT_HZ); 77 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_CMP2, val * GEODEWDT_HZ);
77 geode_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0); 78 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
78 geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN); 79 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN);
79 80
80 timeout = val; 81 timeout = val;
81 return 0; 82 return 0;
@@ -215,28 +216,25 @@ static struct miscdevice geodewdt_miscdev = {
215 216
216static int __devinit geodewdt_probe(struct platform_device *dev) 217static int __devinit geodewdt_probe(struct platform_device *dev)
217{ 218{
218 int ret, timer; 219 int ret;
219
220 timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
221 220
222 if (timer == -1) { 221 wdt_timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
222 if (!wdt_timer) {
223 printk(KERN_ERR "geodewdt: No timers were available\n"); 223 printk(KERN_ERR "geodewdt: No timers were available\n");
224 return -ENODEV; 224 return -ENODEV;
225 } 225 }
226 226
227 wdt_timer = timer;
228
229 /* Set up the timer */ 227 /* Set up the timer */
230 228
231 geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 229 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP,
232 GEODEWDT_SCALE | (3 << 8)); 230 GEODEWDT_SCALE | (3 << 8));
233 231
234 /* Set up comparator 2 to reset when the event fires */ 232 /* Set up comparator 2 to reset when the event fires */
235 geode_mfgpt_toggle_event(wdt_timer, MFGPT_CMP2, MFGPT_EVENT_RESET, 1); 233 cs5535_mfgpt_toggle_event(wdt_timer, MFGPT_CMP2, MFGPT_EVENT_RESET, 1);
236 234
237 /* Set up the initial timeout */ 235 /* Set up the initial timeout */
238 236
239 geode_mfgpt_write(wdt_timer, MFGPT_REG_CMP2, 237 cs5535_mfgpt_write(wdt_timer, MFGPT_REG_CMP2,
240 timeout * GEODEWDT_HZ); 238 timeout * GEODEWDT_HZ);
241 239
242 ret = misc_register(&geodewdt_miscdev); 240 ret = misc_register(&geodewdt_miscdev);
diff --git a/drivers/watchdog/rm9k_wdt.c b/drivers/watchdog/rm9k_wdt.c
deleted file mode 100644
index bb66958b9433..000000000000
--- a/drivers/watchdog/rm9k_wdt.c
+++ /dev/null
@@ -1,419 +0,0 @@
1/*
2 * Watchdog implementation for GPI h/w found on PMC-Sierra RM9xxx
3 * chips.
4 *
5 * Copyright (C) 2004 by Basler Vision Technologies AG
6 * Author: Thomas Koeller <thomas.koeller@baslerweb.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/platform_device.h>
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/interrupt.h>
27#include <linux/fs.h>
28#include <linux/reboot.h>
29#include <linux/notifier.h>
30#include <linux/miscdevice.h>
31#include <linux/watchdog.h>
32#include <linux/io.h>
33#include <linux/uaccess.h>
34#include <asm/atomic.h>
35#include <asm/processor.h>
36#include <asm/system.h>
37#include <asm/rm9k-ocd.h>
38
39#include <rm9k_wdt.h>
40
41
42#define CLOCK 125000000
43#define MAX_TIMEOUT_SECONDS 32
44#define CPCCR 0x0080
45#define CPGIG1SR 0x0044
46#define CPGIG1ER 0x0054
47
48
49/* Function prototypes */
50static irqreturn_t wdt_gpi_irqhdl(int, void *);
51static void wdt_gpi_start(void);
52static void wdt_gpi_stop(void);
53static void wdt_gpi_set_timeout(unsigned int);
54static int wdt_gpi_open(struct inode *, struct file *);
55static int wdt_gpi_release(struct inode *, struct file *);
56static ssize_t wdt_gpi_write(struct file *, const char __user *, size_t,
57 loff_t *);
58static long wdt_gpi_ioctl(struct file *, unsigned int, unsigned long);
59static int wdt_gpi_notify(struct notifier_block *, unsigned long, void *);
60static const struct resource *wdt_gpi_get_resource(struct platform_device *,
61 const char *, unsigned int);
62static int __init wdt_gpi_probe(struct platform_device *);
63static int __exit wdt_gpi_remove(struct platform_device *);
64
65
66static const char wdt_gpi_name[] = "wdt_gpi";
67static atomic_t opencnt;
68static int expect_close;
69static int locked;
70
71
72/* These are set from device resources */
73static void __iomem *wd_regs;
74static unsigned int wd_irq, wd_ctr;
75
76
77/* Module arguments */
78static int timeout = MAX_TIMEOUT_SECONDS;
79module_param(timeout, int, 0444);
80MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds");
81
82static unsigned long resetaddr = 0xbffdc200;
83module_param(resetaddr, ulong, 0444);
84MODULE_PARM_DESC(resetaddr, "Address to write to to force a reset");
85
86static unsigned long flagaddr = 0xbffdc104;
87module_param(flagaddr, ulong, 0444);
88MODULE_PARM_DESC(flagaddr, "Address to write to boot flags to");
89
90static int powercycle;
91module_param(powercycle, bool, 0444);
92MODULE_PARM_DESC(powercycle, "Cycle power if watchdog expires");
93
94static int nowayout = WATCHDOG_NOWAYOUT;
95module_param(nowayout, bool, 0444);
96MODULE_PARM_DESC(nowayout, "Watchdog cannot be disabled once started");
97
98
99/* Kernel interfaces */
100static const struct file_operations fops = {
101 .owner = THIS_MODULE,
102 .open = wdt_gpi_open,
103 .release = wdt_gpi_release,
104 .write = wdt_gpi_write,
105 .unlocked_ioctl = wdt_gpi_ioctl,
106};
107
108static struct miscdevice miscdev = {
109 .minor = WATCHDOG_MINOR,
110 .name = wdt_gpi_name,
111 .fops = &fops,
112};
113
114static struct notifier_block wdt_gpi_shutdown = {
115 .notifier_call = wdt_gpi_notify,
116};
117
118
119/* Interrupt handler */
120static irqreturn_t wdt_gpi_irqhdl(int irq, void *ctxt)
121{
122 if (!unlikely(__raw_readl(wd_regs + 0x0008) & 0x1))
123 return IRQ_NONE;
124 __raw_writel(0x1, wd_regs + 0x0008);
125
126
127 printk(KERN_CRIT "%s: watchdog expired - resetting system\n",
128 wdt_gpi_name);
129
130 *(volatile char *) flagaddr |= 0x01;
131 *(volatile char *) resetaddr = powercycle ? 0x01 : 0x2;
132 iob();
133 while (1)
134 cpu_relax();
135}
136
137
138/* Watchdog functions */
139static void wdt_gpi_start(void)
140{
141 u32 reg;
142
143 lock_titan_regs();
144 reg = titan_readl(CPGIG1ER);
145 titan_writel(reg | (0x100 << wd_ctr), CPGIG1ER);
146 iob();
147 unlock_titan_regs();
148}
149
150static void wdt_gpi_stop(void)
151{
152 u32 reg;
153
154 lock_titan_regs();
155 reg = titan_readl(CPCCR) & ~(0xf << (wd_ctr * 4));
156 titan_writel(reg, CPCCR);
157 reg = titan_readl(CPGIG1ER);
158 titan_writel(reg & ~(0x100 << wd_ctr), CPGIG1ER);
159 iob();
160 unlock_titan_regs();
161}
162
163static void wdt_gpi_set_timeout(unsigned int to)
164{
165 u32 reg;
166 const u32 wdval = (to * CLOCK) & ~0x0000000f;
167
168 lock_titan_regs();
169 reg = titan_readl(CPCCR) & ~(0xf << (wd_ctr * 4));
170 titan_writel(reg, CPCCR);
171 wmb();
172 __raw_writel(wdval, wd_regs + 0x0000);
173 wmb();
174 titan_writel(reg | (0x2 << (wd_ctr * 4)), CPCCR);
175 wmb();
176 titan_writel(reg | (0x5 << (wd_ctr * 4)), CPCCR);
177 iob();
178 unlock_titan_regs();
179}
180
181
182/* /dev/watchdog operations */
183static int wdt_gpi_open(struct inode *inode, struct file *file)
184{
185 int res;
186
187 if (unlikely(atomic_dec_if_positive(&opencnt) < 0))
188 return -EBUSY;
189
190 expect_close = 0;
191 if (locked) {
192 module_put(THIS_MODULE);
193 free_irq(wd_irq, &miscdev);
194 locked = 0;
195 }
196
197 res = request_irq(wd_irq, wdt_gpi_irqhdl, IRQF_SHARED | IRQF_DISABLED,
198 wdt_gpi_name, &miscdev);
199 if (unlikely(res))
200 return res;
201
202 wdt_gpi_set_timeout(timeout);
203 wdt_gpi_start();
204
205 printk(KERN_INFO "%s: watchdog started, timeout = %u seconds\n",
206 wdt_gpi_name, timeout);
207 return nonseekable_open(inode, file);
208}
209
210static int wdt_gpi_release(struct inode *inode, struct file *file)
211{
212 if (nowayout) {
213 printk(KERN_INFO "%s: no way out - watchdog left running\n",
214 wdt_gpi_name);
215 __module_get(THIS_MODULE);
216 locked = 1;
217 } else {
218 if (expect_close) {
219 wdt_gpi_stop();
220 free_irq(wd_irq, &miscdev);
221 printk(KERN_INFO "%s: watchdog stopped\n",
222 wdt_gpi_name);
223 } else {
224 printk(KERN_CRIT "%s: unexpected close() -"
225 " watchdog left running\n",
226 wdt_gpi_name);
227 wdt_gpi_set_timeout(timeout);
228 __module_get(THIS_MODULE);
229 locked = 1;
230 }
231 }
232
233 atomic_inc(&opencnt);
234 return 0;
235}
236
237static ssize_t wdt_gpi_write(struct file *f, const char __user *d, size_t s,
238 loff_t *o)
239{
240 char val;
241
242 wdt_gpi_set_timeout(timeout);
243 expect_close = (s > 0) && !get_user(val, d) && (val == 'V');
244 return s ? 1 : 0;
245}
246
247static long wdt_gpi_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
248{
249 long res = -ENOTTY;
250 const long size = _IOC_SIZE(cmd);
251 int stat;
252 void __user *argp = (void __user *)arg;
253 static struct watchdog_info wdinfo = {
254 .identity = "RM9xxx/GPI watchdog",
255 .firmware_version = 0,
256 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING
257 };
258
259 if (unlikely(_IOC_TYPE(cmd) != WATCHDOG_IOCTL_BASE))
260 return -ENOTTY;
261
262 if ((_IOC_DIR(cmd) & _IOC_READ)
263 && !access_ok(VERIFY_WRITE, arg, size))
264 return -EFAULT;
265
266 if ((_IOC_DIR(cmd) & _IOC_WRITE)
267 && !access_ok(VERIFY_READ, arg, size))
268 return -EFAULT;
269
270 expect_close = 0;
271
272 switch (cmd) {
273 case WDIOC_GETSUPPORT:
274 wdinfo.options = nowayout ?
275 WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING :
276 WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
277 WDIOF_MAGICCLOSE;
278 res = __copy_to_user(argp, &wdinfo, size) ? -EFAULT : size;
279 break;
280
281 case WDIOC_GETSTATUS:
282 break;
283
284 case WDIOC_GETBOOTSTATUS:
285 stat = (*(volatile char *) flagaddr & 0x01)
286 ? WDIOF_CARDRESET : 0;
287 res = __copy_to_user(argp, &stat, size) ?
288 -EFAULT : size;
289 break;
290
291 case WDIOC_SETOPTIONS:
292 break;
293
294 case WDIOC_KEEPALIVE:
295 wdt_gpi_set_timeout(timeout);
296 res = size;
297 break;
298
299 case WDIOC_SETTIMEOUT:
300 {
301 int val;
302 if (unlikely(__copy_from_user(&val, argp, size))) {
303 res = -EFAULT;
304 break;
305 }
306
307 if (val > MAX_TIMEOUT_SECONDS)
308 val = MAX_TIMEOUT_SECONDS;
309 timeout = val;
310 wdt_gpi_set_timeout(val);
311 res = size;
312 printk(KERN_INFO "%s: timeout set to %u seconds\n",
313 wdt_gpi_name, timeout);
314 }
315 break;
316
317 case WDIOC_GETTIMEOUT:
318 res = __copy_to_user(argp, &timeout, size) ?
319 -EFAULT : size;
320 break;
321 }
322
323 return res;
324}
325
326
327/* Shutdown notifier */
328static int wdt_gpi_notify(struct notifier_block *this, unsigned long code,
329 void *unused)
330{
331 if (code == SYS_DOWN || code == SYS_HALT)
332 wdt_gpi_stop();
333
334 return NOTIFY_DONE;
335}
336
337
338/* Init & exit procedures */
339static const struct resource *wdt_gpi_get_resource(struct platform_device *pdv,
340 const char *name, unsigned int type)
341{
342 char buf[80];
343 if (snprintf(buf, sizeof(buf), "%s_0", name) >= sizeof(buf))
344 return NULL;
345 return platform_get_resource_byname(pdv, type, buf);
346}
347
348/* No hotplugging on the platform bus - use __devinit */
349static int __devinit wdt_gpi_probe(struct platform_device *pdv)
350{
351 int res;
352 const struct resource
353 * const rr = wdt_gpi_get_resource(pdv, WDT_RESOURCE_REGS,
354 IORESOURCE_MEM),
355 * const ri = wdt_gpi_get_resource(pdv, WDT_RESOURCE_IRQ,
356 IORESOURCE_IRQ),
357 * const rc = wdt_gpi_get_resource(pdv, WDT_RESOURCE_COUNTER,
358 0);
359
360 if (unlikely(!rr || !ri || !rc))
361 return -ENXIO;
362
363 wd_regs = ioremap_nocache(rr->start, rr->end + 1 - rr->start);
364 if (unlikely(!wd_regs))
365 return -ENOMEM;
366 wd_irq = ri->start;
367 wd_ctr = rc->start;
368 res = misc_register(&miscdev);
369 if (res)
370 iounmap(wd_regs);
371 else
372 register_reboot_notifier(&wdt_gpi_shutdown);
373 return res;
374}
375
376static int __devexit wdt_gpi_remove(struct platform_device *dev)
377{
378 int res;
379
380 unregister_reboot_notifier(&wdt_gpi_shutdown);
381 res = misc_deregister(&miscdev);
382 iounmap(wd_regs);
383 wd_regs = NULL;
384 return res;
385}
386
387
388/* Device driver init & exit */
389static struct platform_driver wgt_gpi_driver = {
390 .driver = {
391 .name = wdt_gpi_name,
392 .owner = THIS_MODULE,
393 },
394 .probe = wdt_gpi_probe,
395 .remove = __devexit_p(wdt_gpi_remove),
396};
397
398static int __init wdt_gpi_init_module(void)
399{
400 atomic_set(&opencnt, 1);
401 if (timeout > MAX_TIMEOUT_SECONDS)
402 timeout = MAX_TIMEOUT_SECONDS;
403 return platform_driver_register(&wdt_gpi_driver);
404}
405
406static void __exit wdt_gpi_cleanup_module(void)
407{
408 platform_driver_unregister(&wdt_gpi_driver);
409}
410
411module_init(wdt_gpi_init_module);
412module_exit(wdt_gpi_cleanup_module);
413
414MODULE_AUTHOR("Thomas Koeller <thomas.koeller@baslerweb.com>");
415MODULE_DESCRIPTION("Basler eXcite watchdog driver for gpi devices");
416MODULE_VERSION("0.1");
417MODULE_LICENSE("GPL");
418MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
419