aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/bus.c9
-rw-r--r--drivers/ata/pata_macio.c2
-rw-r--r--drivers/ata/pata_mpc52xx.c14
-rw-r--r--drivers/ata/pata_of_platform.c9
-rw-r--r--drivers/ata/sata_fsl.c11
-rw-r--r--drivers/atm/fore200e.c23
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c8
-rw-r--r--drivers/base/node.c3
-rw-r--r--drivers/base/topology.c2
-rw-r--r--drivers/block/swim3.c2
-rw-r--r--drivers/block/xsysace.c13
-rw-r--r--drivers/cdrom/viocd.c2
-rw-r--r--drivers/char/Kconfig7
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/amd64-agp.c28
-rw-r--r--drivers/char/amiserial.c61
-rw-r--r--drivers/char/apm-emulation.c8
-rw-r--r--drivers/char/applicom.c22
-rw-r--r--drivers/char/ds1620.c16
-rw-r--r--drivers/char/dtlk.c15
-rw-r--r--drivers/char/generic_nvram.c17
-rw-r--r--drivers/char/genrtc.c16
-rw-r--r--drivers/char/hangcheck-timer.c20
-rw-r--r--drivers/char/hpet.c14
-rw-r--r--drivers/char/hvsi.c6
-rw-r--r--drivers/char/hw_random/n2-drv.c9
-rw-r--r--drivers/char/hw_random/nomadik-rng.c17
-rw-r--r--drivers/char/hw_random/pasemi-rng.c9
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c26
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c15
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c479
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c17
-rw-r--r--drivers/char/misc.c1
-rw-r--r--drivers/char/nvram.c10
-rw-r--r--drivers/char/nwflash.c7
-rw-r--r--drivers/char/ppdev.c4
-rw-r--r--drivers/char/ramoops.c162
-rw-r--r--drivers/char/raw.c42
-rw-r--r--drivers/char/viotape.c2
-rw-r--r--drivers/char/vt.c10
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c11
-rw-r--r--drivers/cpuidle/governors/menu.c60
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c7
-rw-r--r--drivers/crypto/talitos.c9
-rw-r--r--drivers/dma/fsldma.c17
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/dma/shdma.c5
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/edac/i5000_edac.c20
-rw-r--r--drivers/edac/i5400_edac.c20
-rw-r--r--drivers/edac/i82443bxgx_edac.c22
-rw-r--r--drivers/edac/mpc85xx_edac.c30
-rw-r--r--drivers/edac/ppc4xx_edac.c10
-rw-r--r--drivers/firewire/core-card.c22
-rw-r--r--drivers/firewire/core-cdev.c8
-rw-r--r--drivers/firewire/core-transaction.c96
-rw-r--r--drivers/firewire/core.h6
-rw-r--r--drivers/firewire/ohci.c188
-rw-r--r--drivers/firewire/ohci.h10
-rw-r--r--drivers/gpio/Kconfig11
-rw-r--r--drivers/gpio/cs5535-gpio.c2
-rw-r--r--drivers/gpio/gpiolib.c49
-rw-r--r--drivers/gpio/it8761e_gpio.c5
-rw-r--r--drivers/gpio/langwell_gpio.c83
-rw-r--r--drivers/gpio/max732x.c368
-rw-r--r--drivers/gpio/pca953x.c4
-rw-r--r--drivers/gpio/pl061.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/hid/Kconfig8
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-gyration.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-roccat-kone.c73
-rw-r--r--drivers/hid/hid-roccat-kone.h9
-rw-r--r--drivers/hid/hid-roccat.c428
-rw-r--r--drivers/hid/hid-roccat.h31
-rw-r--r--drivers/hwmon/Kconfig9
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/ads7871.c253
-rw-r--r--drivers/hwmon/coretemp.c93
-rw-r--r--drivers/hwmon/fschmd.c9
-rw-r--r--drivers/hwmon/lis3lv02d.c245
-rw-r--r--drivers/hwmon/lis3lv02d.h11
-rw-r--r--drivers/hwmon/ultra45_env.c7
-rw-r--r--drivers/hwmon/w83793.c10
-rw-r--r--drivers/i2c/busses/i2c-cpm.c30
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c11
-rw-r--r--drivers/i2c/busses/i2c-mpc.c25
-rw-r--r--drivers/i2c/i2c-core.c3
-rw-r--r--drivers/ide/cmd640.c6
-rw-r--r--drivers/ide/gayle.c147
-rw-r--r--drivers/ide/ide_platform.c1
-rw-r--r--drivers/ide/pdc202xx_old.c5
-rw-r--r--drivers/ide/pmac.c10
-rw-r--r--drivers/ieee1394/dv1394.c11
-rw-r--r--drivers/ieee1394/raw1394.c3
-rw-r--r--drivers/ieee1394/video1394.c5
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/core/core_priv.h4
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/core/mad.c4
-rw-r--r--drivers/infiniband/core/sysfs.c21
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c50
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c11
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c33
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h76
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c20
-rw-r--r--drivers/infiniband/hw/ipath/Kconfig8
-rw-r--r--drivers/infiniband/hw/ipath/Makefile6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c28
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c1862
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba7220.c2631
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c12
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c72
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/Kconfig7
-rw-r--r--drivers/infiniband/hw/qib/Makefile15
-rw-r--r--drivers/infiniband/hw/qib/qib.h1439
-rw-r--r--drivers/infiniband/hw/qib/qib_6120_regs.h977
-rw-r--r--drivers/infiniband/hw/qib/qib_7220.h156
-rw-r--r--drivers/infiniband/hw/qib/qib_7220_regs.h1496
-rw-r--r--drivers/infiniband/hw/qib/qib_7322_regs.h3163
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h758
-rw-r--r--drivers/infiniband/hw/qib/qib_cq.c484
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c894
-rw-r--r--drivers/infiniband/hw/qib/qib_dma.c182
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c665
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c451
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2317
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c613
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c3588
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c4618
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c8058
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c1580
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c236
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c328
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c2173
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h373
-rw-r--r--drivers/infiniband/hw/qib/qib_mmap.c174
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c503
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c738
-rw-r--r--drivers/infiniband/hw/qib/qib_pio_copy.c64
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c1255
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c564
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.h184
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c2288
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c817
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c (renamed from drivers/infiniband/hw/ipath/ipath_sd7220.c)859
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220_img.c (renamed from drivers/infiniband/hw/ipath/ipath_sd7220_img.c)19
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c973
-rw-r--r--drivers/infiniband/hw/qib/qib_srq.c375
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c691
-rw-r--r--drivers/infiniband/hw/qib/qib_twsi.c498
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c557
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c555
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c607
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c157
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c897
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.h52
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2248
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h1100
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs_mcast.c368
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_ppc64.c (renamed from drivers/infiniband/hw/ipath/ipath_7220.h)49
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c171
-rw-r--r--drivers/input/joydev.c10
-rw-r--r--drivers/input/keyboard/amikbd.c97
-rw-r--r--drivers/input/misc/Kconfig10
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c34
-rw-r--r--drivers/input/misc/max8925_onkey.c148
-rw-r--r--drivers/input/misc/sparcspkr.c14
-rw-r--r--drivers/input/misc/twl4030-vibra.c2
-rw-r--r--drivers/input/misc/uinput.c4
-rw-r--r--drivers/input/mouse/amimouse.c98
-rw-r--r--drivers/input/serio/i8042-sparcio.h9
-rw-r--r--drivers/input/serio/xilinx_ps2.c15
-rw-r--r--drivers/input/touchscreen/ads7846.c4
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c2
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c7
-rw-r--r--drivers/isdn/capi/capi.c17
-rw-r--r--drivers/isdn/capi/kcapi.c6
-rw-r--r--drivers/isdn/gigaset/capi.c41
-rw-r--r--drivers/isdn/i4l/isdn_common.c18
-rw-r--r--drivers/isdn/mISDN/timerdev.c12
-rw-r--r--drivers/leds/leds-gpio.c4
-rw-r--r--drivers/macintosh/macio_asic.c28
-rw-r--r--drivers/macintosh/macio_sysfs.c6
-rw-r--r--drivers/macintosh/mediabay.c2
-rw-r--r--drivers/macintosh/nvram.c2
-rw-r--r--drivers/macintosh/rack-meter.c4
-rw-r--r--drivers/macintosh/smu.c7
-rw-r--r--drivers/macintosh/therm_pm72.c9
-rw-r--r--drivers/macintosh/therm_windtunnel.c7
-rw-r--r--drivers/macintosh/via-pmu.c17
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c31
-rw-r--r--drivers/media/dvb/dvb-core/dvb_ca_en50221.c17
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c30
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c15
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c17
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.h11
-rw-r--r--drivers/media/dvb/firewire/firedtv-ci.c5
-rw-r--r--drivers/media/dvb/ttpci/av7110.c4
-rw-r--r--drivers/media/dvb/ttpci/av7110_av.c8
-rw-r--r--drivers/media/dvb/ttpci/av7110_ca.c5
-rw-r--r--drivers/message/fusion/mptscsih.c6
-rw-r--r--drivers/message/i2o/i2o_config.c11
-rw-r--r--drivers/mfd/88pm860x-core.c36
-rw-r--r--drivers/mfd/Kconfig6
-rw-r--r--drivers/mfd/sh_mobile_sdhi.c28
-rw-r--r--drivers/misc/Kconfig32
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c134
-rw-r--r--drivers/misc/ad525x_dpot-spi.c172
-rw-r--r--drivers/misc/ad525x_dpot.c1016
-rw-r--r--drivers/misc/ad525x_dpot.h202
-rw-r--r--drivers/misc/lkdtm.c20
-rw-r--r--drivers/mmc/core/core.c3
-rw-r--r--drivers/mmc/core/sd_ops.c2
-rw-r--r--drivers/mmc/core/sdio.c6
-rw-r--r--drivers/mmc/core/sdio_io.c30
-rw-r--r--drivers/mmc/host/Kconfig20
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/at91_mci.c2
-rw-r--r--drivers/mmc/host/atmel-mci.c64
-rw-r--r--drivers/mmc/host/au1xmmc.c2
-rw-r--r--drivers/mmc/host/bfin_sdh.c2
-rw-r--r--drivers/mmc/host/cb710-mmc.c2
-rw-r--r--drivers/mmc/host/davinci_mmc.c111
-rw-r--r--drivers/mmc/host/imxmmc.c2
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/msm_sdcc.c2
-rw-r--r--drivers/mmc/host/mvsdio.c2
-rw-r--r--drivers/mmc/host/mxcmmc.c116
-rw-r--r--drivers/mmc/host/of_mmc_spi.c4
-rw-r--r--drivers/mmc/host/omap.c64
-rw-r--r--drivers/mmc/host/omap_hsmmc.c279
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/s3cmci.c3
-rw-r--r--drivers/mmc/host/sdhci-of-core.c11
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c12
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c12
-rw-r--r--drivers/mmc/host/sdhci-pci.c2
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c26
-rw-r--r--drivers/mmc/host/sdhci-s3c.c10
-rw-r--r--drivers/mmc/host/sdhci-spear.c298
-rw-r--r--drivers/mmc/host/sdhci.c25
-rw-r--r--drivers/mmc/host/sdhci.h42
-rw-r--r--drivers/mmc/host/sdricoh_cs.c2
-rw-r--r--drivers/mmc/host/sh_mmcif.c965
-rw-r--r--drivers/mmc/host/tifm_sd.c2
-rw-r--r--drivers/mmc/host/tmio_mmc.c369
-rw-r--r--drivers/mmc/host/tmio_mmc.h13
-rw-r--r--drivers/mmc/host/via-sdmmc.c2
-rw-r--r--drivers/mmc/host/wbsd.c2
-rw-r--r--drivers/mtd/maps/physmap_of.c13
-rw-r--r--drivers/mtd/maps/sun_uflash.c9
-rw-r--r--drivers/mtd/mtdchar.c19
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c11
-rw-r--r--drivers/mtd/nand/fsl_upm.c7
-rw-r--r--drivers/mtd/nand/ndfc.c15
-rw-r--r--drivers/mtd/nand/pasemi_nand.c9
-rw-r--r--drivers/mtd/nand/socrates_nand.c7
-rw-r--r--drivers/net/benet/be.h2
-rw-r--r--drivers/net/benet/be_cmds.c19
-rw-r--r--drivers/net/benet/be_main.c11
-rw-r--r--drivers/net/bfin_mac.c2
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c8
-rw-r--r--drivers/net/can/sja1000/sja1000.c2
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c12
-rw-r--r--drivers/net/ehea/ehea_main.c21
-rw-r--r--drivers/net/enic/enic_main.c29
-rw-r--r--drivers/net/ethoc.c34
-rw-r--r--drivers/net/fec.c22
-rw-r--r--drivers/net/fec.h2
-rw-r--r--drivers/net/fec_mpc52xx.c20
-rw-r--r--drivers/net/fec_mpc52xx_phy.c11
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c15
-rw-r--r--drivers/net/fs_enet/mac-fcc.c8
-rw-r--r--drivers/net/fs_enet/mac-fec.c4
-rw-r--r--drivers/net/fs_enet/mac-scc.c6
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c7
-rw-r--r--drivers/net/fs_enet/mii-fec.c13
-rw-r--r--drivers/net/fsl_pq_mdio.c9
-rw-r--r--drivers/net/gianfar.c16
-rw-r--r--drivers/net/greth.c3
-rw-r--r--drivers/net/ibm_newemac/core.c21
-rw-r--r--drivers/net/ibm_newemac/debug.c9
-rw-r--r--drivers/net/ibm_newemac/debug.h4
-rw-r--r--drivers/net/ibm_newemac/mal.c36
-rw-r--r--drivers/net/ibm_newemac/rgmii.c20
-rw-r--r--drivers/net/ibm_newemac/tah.c15
-rw-r--r--drivers/net/ibm_newemac/zmii.c17
-rw-r--r--drivers/net/irda/bfin_sir.c8
-rw-r--r--drivers/net/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c69
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c30
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h4
-rw-r--r--drivers/net/ll_temac_main.c12
-rw-r--r--drivers/net/macvlan.c9
-rw-r--r--drivers/net/mlx4/icm.c36
-rw-r--r--drivers/net/myri_sbus.c9
-rw-r--r--drivers/net/niu.c17
-rw-r--r--drivers/net/phy/mdio-gpio.c13
-rw-r--r--drivers/net/ppp_generic.c4
-rw-r--r--drivers/net/pppoe.c1
-rw-r--r--drivers/net/sh_eth.c3
-rw-r--r--drivers/net/sunbmac.c13
-rw-r--r--drivers/net/sunhme.c15
-rw-r--r--drivers/net/sunlance.c13
-rw-r--r--drivers/net/sunqe.c13
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/ucc_geth.c9
-rw-r--r--drivers/net/usb/asix.c4
-rw-r--r--drivers/net/usb/hso.c3
-rw-r--r--drivers/net/wimax/i2400m/rx.c4
-rw-r--r--drivers/net/wireless/airo.c15
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c75
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c28
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ict.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c16
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c7
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.c2
-rw-r--r--drivers/net/xilinx_emaclite.c17
-rw-r--r--drivers/of/device.c25
-rw-r--r--drivers/of/fdt.c15
-rw-r--r--drivers/of/of_i2c.c4
-rw-r--r--drivers/of/of_mdio.c6
-rw-r--r--drivers/of/of_spi.c2
-rw-r--r--drivers/of/platform.c13
-rw-r--r--drivers/parport/parport_amiga.c64
-rw-r--r--drivers/parport/parport_sunbpp.c7
-rw-r--r--drivers/pcmcia/electra_cf.c9
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c7
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c17
-rw-r--r--drivers/power/Kconfig15
-rw-r--r--drivers/power/Makefile2
-rw-r--r--drivers/power/ds2760_battery.c64
-rw-r--r--drivers/power/ds2782_battery.c194
-rw-r--r--drivers/power/pda_power.c10
-rw-r--r--drivers/power/power_supply.h7
-rw-r--r--drivers/power/power_supply_core.c48
-rw-r--r--drivers/power/power_supply_sysfs.c147
-rw-r--r--drivers/power/test_power.c163
-rw-r--r--drivers/power/tosa_battery.c4
-rw-r--r--drivers/power/wm831x_power.c33
-rw-r--r--drivers/power/wm97xx_battery.c3
-rw-r--r--drivers/power/z2_battery.c328
-rw-r--r--drivers/rapidio/Kconfig24
-rw-r--r--drivers/rapidio/Makefile4
-rw-r--r--drivers/rapidio/rio-scan.c424
-rw-r--r--drivers/rapidio/rio.c431
-rw-r--r--drivers/rapidio/rio.h44
-rw-r--r--drivers/rapidio/switches/Kconfig28
-rw-r--r--drivers/rapidio/switches/Makefile9
-rw-r--r--drivers/rapidio/switches/idtcps.c137
-rw-r--r--drivers/rapidio/switches/tsi500.c20
-rw-r--r--drivers/rapidio/switches/tsi568.c146
-rw-r--r--drivers/rapidio/switches/tsi57x.c315
-rw-r--r--drivers/regulator/88pm8607.c533
-rw-r--r--drivers/regulator/ab3100.c10
-rw-r--r--drivers/regulator/bq24022.c1
-rw-r--r--drivers/regulator/core.c83
-rw-r--r--drivers/regulator/mc13783-regulator.c6
-rw-r--r--drivers/regulator/twl-regulator.c138
-rw-r--r--drivers/rtc/Kconfig9
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-ab8500.c363
-rw-r--r--drivers/rtc/rtc-cmos.c5
-rw-r--r--drivers/rtc/rtc-ds1302.c85
-rw-r--r--drivers/rtc/rtc-isl1208.c45
-rw-r--r--drivers/rtc/rtc-m41t80.c22
-rw-r--r--drivers/rtc/rtc-mxc.c25
-rw-r--r--drivers/rtc/rtc-s3c.c107
-rw-r--r--drivers/rtc/rtc-wm831x.c16
-rw-r--r--drivers/sbus/char/bbc_envctrl.c4
-rw-r--r--drivers/sbus/char/bbc_i2c.c11
-rw-r--r--drivers/sbus/char/display7seg.c9
-rw-r--r--drivers/sbus/char/envctrl.c9
-rw-r--r--drivers/sbus/char/flash.c11
-rw-r--r--drivers/sbus/char/openprom.c44
-rw-r--r--drivers/sbus/char/uctrl.c9
-rw-r--r--drivers/scsi/3w-9xxx.c10
-rw-r--r--drivers/scsi/3w-sas.c8
-rw-r--r--drivers/scsi/3w-xxxx.c11
-rw-r--r--drivers/scsi/a2091.c245
-rw-r--r--drivers/scsi/a2091.h4
-rw-r--r--drivers/scsi/a3000.c256
-rw-r--r--drivers/scsi/a3000.h4
-rw-r--r--drivers/scsi/a4000t.c101
-rw-r--r--drivers/scsi/aacraid/linit.c11
-rw-r--r--drivers/scsi/dpt_i2o.c20
-rw-r--r--drivers/scsi/fcoe/fcoe.c2
-rw-r--r--drivers/scsi/gdth.c20
-rw-r--r--drivers/scsi/gvp11.c541
-rw-r--r--drivers/scsi/gvp11.h11
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/megaraid.c20
-rw-r--r--drivers/scsi/megaraid.h3
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c22
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c2
-rw-r--r--drivers/scsi/mvme147.c33
-rw-r--r--drivers/scsi/osst.c23
-rw-r--r--drivers/scsi/qlogicpti.c17
-rw-r--r--drivers/scsi/scsi_scan.c26
-rw-r--r--drivers/scsi/sg.c17
-rw-r--r--drivers/scsi/st.c1
-rw-r--r--drivers/scsi/sun_esp.c23
-rw-r--r--drivers/serial/68328serial.c2
-rw-r--r--drivers/serial/apbuart.c10
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c9
-rw-r--r--drivers/serial/mpc52xx_uart.c84
-rw-r--r--drivers/serial/nwpserial.c2
-rw-r--r--drivers/serial/of_serial.c12
-rw-r--r--drivers/serial/pmac_zilog.c2
-rw-r--r--drivers/serial/sh-sci.c5
-rw-r--r--drivers/serial/sunhv.c9
-rw-r--r--drivers/serial/sunsab.c13
-rw-r--r--drivers/serial/sunsu.c13
-rw-r--r--drivers/serial/sunzilog.c15
-rw-r--r--drivers/serial/uartlite.c11
-rw-r--r--drivers/serial/ucc_uart.c10
-rw-r--r--drivers/spi/Kconfig17
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/amba-pl022.c250
-rw-r--r--drivers/spi/davinci_spi.c12
-rw-r--r--drivers/spi/ep93xx_spi.c938
-rw-r--r--drivers/spi/mpc512x_psc_spi.c576
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c15
-rw-r--r--drivers/spi/mpc52xx_spi.c22
-rw-r--r--drivers/spi/omap2_mcspi.c153
-rw-r--r--drivers/spi/spi_bitbang_txrx.h93
-rw-r--r--drivers/spi/spi_butterfly.c3
-rw-r--r--drivers/spi/spi_gpio.c3
-rw-r--r--drivers/spi/spi_lm70llp.c3
-rw-r--r--drivers/spi/spi_mpc8xxx.c125
-rw-r--r--drivers/spi/spi_ppc4xx.c2
-rw-r--r--drivers/spi/spi_s3c24xx_gpio.c3
-rw-r--r--drivers/spi/spi_sh_sci.c3
-rw-r--r--drivers/spi/xilinx_spi_of.c10
-rw-r--r--drivers/staging/go7007/saa7134-go7007.c8
-rw-r--r--drivers/staging/rt2860/common/rtmp_init.c15
-rw-r--r--drivers/staging/rt2860/rtmp.h2
-rw-r--r--drivers/telephony/ixj.c15
-rw-r--r--drivers/usb/atm/speedtch.c5
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c7
-rw-r--r--drivers/usb/host/ehci-mxc.c4
-rw-r--r--drivers/usb/host/ehci-ppc-of.c11
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c9
-rw-r--r--drivers/usb/host/fhci-hcd.c11
-rw-r--r--drivers/usb/host/isp1760-if.c9
-rw-r--r--drivers/usb/host/ohci-ppc-of.c15
-rw-r--r--drivers/usb/mon/mon_bin.c23
-rw-r--r--drivers/usb/mon/mon_stat.c3
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--drivers/video/arcfb.c8
-rw-r--r--drivers/video/aty/atyfb_base.c4
-rw-r--r--drivers/video/bf54x-lq043fb.c7
-rw-r--r--drivers/video/bfin-lq035q1-fb.c252
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c7
-rw-r--r--drivers/video/bw2.c7
-rw-r--r--drivers/video/cg14.c7
-rw-r--r--drivers/video/cg3.c7
-rw-r--r--drivers/video/cg6.c9
-rw-r--r--drivers/video/da8xx-fb.c301
-rw-r--r--drivers/video/fb_defio.c40
-rw-r--r--drivers/video/ffb.c9
-rw-r--r--drivers/video/fsl-diu-fb.c10
-rw-r--r--drivers/video/hgafb.c10
-rw-r--r--drivers/video/hitfb.c8
-rw-r--r--drivers/video/intelfb/intelfb.h4
-rw-r--r--drivers/video/leo.c7
-rw-r--r--drivers/video/mb862xx/mb862xxfb.c8
-rw-r--r--drivers/video/nuc900fb.c2
-rw-r--r--drivers/video/p9100.c7
-rw-r--r--drivers/video/platinumfb.c9
-rw-r--r--drivers/video/s3c2410fb.c10
-rw-r--r--drivers/video/s3fb.c101
-rw-r--r--drivers/video/sgivwfb.c10
-rw-r--r--drivers/video/sis/sis_main.c2
-rw-r--r--drivers/video/sunxvr1000.c9
-rw-r--r--drivers/video/tcx.c7
-rw-r--r--drivers/video/vfb.c4
-rw-r--r--drivers/video/vga16fb.c10
-rw-r--r--drivers/video/via/viafbdev.c11
-rw-r--r--drivers/video/w100fb.c10
-rw-r--r--drivers/video/xilinxfb.c23
-rw-r--r--drivers/watchdog/Kconfig26
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/bfin_wdt.c19
-rw-r--r--drivers/watchdog/booke_wdt.c6
-rw-r--r--drivers/watchdog/cpwd.c9
-rw-r--r--drivers/watchdog/eurotechwdt.c1
-rw-r--r--drivers/watchdog/gef_wdt.c8
-rw-r--r--drivers/watchdog/iTCO_vendor_support.c11
-rw-r--r--drivers/watchdog/iTCO_wdt.c29
-rw-r--r--drivers/watchdog/imx2_wdt.c358
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c10
-rw-r--r--drivers/watchdog/pc87413_wdt.c9
-rw-r--r--drivers/watchdog/pnx833x_wdt.c11
-rw-r--r--drivers/watchdog/riowd.c7
-rw-r--r--drivers/watchdog/s3c2410_wdt.c9
-rw-r--r--drivers/watchdog/shwdt.c2
-rw-r--r--drivers/watchdog/twl4030_wdt.c2
-rw-r--r--drivers/watchdog/wdt.c2
-rw-r--r--drivers/watchdog/wdt977.c2
-rw-r--r--drivers/xen/manage.c14
544 files changed, 65980 insertions, 9694 deletions
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 9042a8579668..c1d23cd71652 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -401,11 +401,6 @@ static void acpi_print_osc_error(acpi_handle handle,
401 printk("\n"); 401 printk("\n");
402} 402}
403 403
404static u8 hex_val(unsigned char c)
405{
406 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
407}
408
409static acpi_status acpi_str_to_uuid(char *str, u8 *uuid) 404static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
410{ 405{
411 int i; 406 int i;
@@ -422,8 +417,8 @@ static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
422 return AE_BAD_PARAMETER; 417 return AE_BAD_PARAMETER;
423 } 418 }
424 for (i = 0; i < 16; i++) { 419 for (i = 0; i < 16; i++) {
425 uuid[i] = hex_val(str[opc_map_to_uuid[i]]) << 4; 420 uuid[i] = hex_to_bin(str[opc_map_to_uuid[i]]) << 4;
426 uuid[i] |= hex_val(str[opc_map_to_uuid[i] + 1]); 421 uuid[i] |= hex_to_bin(str[opc_map_to_uuid[i] + 1]);
427 } 422 }
428 return AE_OK; 423 return AE_OK;
429} 424}
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 25df50f51c04..b5b48e703cb7 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -1140,7 +1140,7 @@ static int __devinit pata_macio_attach(struct macio_dev *mdev,
1140 "Failed to allocate private memory\n"); 1140 "Failed to allocate private memory\n");
1141 return -ENOMEM; 1141 return -ENOMEM;
1142 } 1142 }
1143 priv->node = of_node_get(mdev->ofdev.node); 1143 priv->node = of_node_get(mdev->ofdev.dev.of_node);
1144 priv->mdev = mdev; 1144 priv->mdev = mdev;
1145 priv->dev = &mdev->ofdev.dev; 1145 priv->dev = &mdev->ofdev.dev;
1146 1146
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 96b11b604ae0..36afe2c1c747 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -694,7 +694,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
694 struct bcom_task *dmatsk = NULL; 694 struct bcom_task *dmatsk = NULL;
695 695
696 /* Get ipb frequency */ 696 /* Get ipb frequency */
697 ipb_freq = mpc5xxx_get_bus_frequency(op->node); 697 ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node);
698 if (!ipb_freq) { 698 if (!ipb_freq) {
699 dev_err(&op->dev, "could not determine IPB bus frequency\n"); 699 dev_err(&op->dev, "could not determine IPB bus frequency\n");
700 return -ENODEV; 700 return -ENODEV;
@@ -702,7 +702,7 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
702 702
703 /* Get device base address from device tree, request the region 703 /* Get device base address from device tree, request the region
704 * and ioremap it. */ 704 * and ioremap it. */
705 rv = of_address_to_resource(op->node, 0, &res_mem); 705 rv = of_address_to_resource(op->dev.of_node, 0, &res_mem);
706 if (rv) { 706 if (rv) {
707 dev_err(&op->dev, "could not determine device base address\n"); 707 dev_err(&op->dev, "could not determine device base address\n");
708 return rv; 708 return rv;
@@ -735,14 +735,14 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
735 * The MPC5200 ATA controller supports MWDMA modes 0, 1 and 2 and 735 * The MPC5200 ATA controller supports MWDMA modes 0, 1 and 2 and
736 * UDMA modes 0, 1 and 2. 736 * UDMA modes 0, 1 and 2.
737 */ 737 */
738 prop = of_get_property(op->node, "mwdma-mode", &proplen); 738 prop = of_get_property(op->dev.of_node, "mwdma-mode", &proplen);
739 if ((prop) && (proplen >= 4)) 739 if ((prop) && (proplen >= 4))
740 mwdma_mask = ATA_MWDMA2 & ((1 << (*prop + 1)) - 1); 740 mwdma_mask = ATA_MWDMA2 & ((1 << (*prop + 1)) - 1);
741 prop = of_get_property(op->node, "udma-mode", &proplen); 741 prop = of_get_property(op->dev.of_node, "udma-mode", &proplen);
742 if ((prop) && (proplen >= 4)) 742 if ((prop) && (proplen >= 4))
743 udma_mask = ATA_UDMA2 & ((1 << (*prop + 1)) - 1); 743 udma_mask = ATA_UDMA2 & ((1 << (*prop + 1)) - 1);
744 744
745 ata_irq = irq_of_parse_and_map(op->node, 0); 745 ata_irq = irq_of_parse_and_map(op->dev.of_node, 0);
746 if (ata_irq == NO_IRQ) { 746 if (ata_irq == NO_IRQ) {
747 dev_err(&op->dev, "error mapping irq\n"); 747 dev_err(&op->dev, "error mapping irq\n");
748 return -EINVAL; 748 return -EINVAL;
@@ -884,9 +884,6 @@ static struct of_device_id mpc52xx_ata_of_match[] = {
884 884
885 885
886static struct of_platform_driver mpc52xx_ata_of_platform_driver = { 886static struct of_platform_driver mpc52xx_ata_of_platform_driver = {
887 .owner = THIS_MODULE,
888 .name = DRV_NAME,
889 .match_table = mpc52xx_ata_of_match,
890 .probe = mpc52xx_ata_probe, 887 .probe = mpc52xx_ata_probe,
891 .remove = mpc52xx_ata_remove, 888 .remove = mpc52xx_ata_remove,
892#ifdef CONFIG_PM 889#ifdef CONFIG_PM
@@ -896,6 +893,7 @@ static struct of_platform_driver mpc52xx_ata_of_platform_driver = {
896 .driver = { 893 .driver = {
897 .name = DRV_NAME, 894 .name = DRV_NAME,
898 .owner = THIS_MODULE, 895 .owner = THIS_MODULE,
896 .of_match_table = mpc52xx_ata_of_match,
899 }, 897 },
900}; 898};
901 899
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 1f18ad9e4fe1..5a1b82c08be9 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -18,7 +18,7 @@ static int __devinit pata_of_platform_probe(struct of_device *ofdev,
18 const struct of_device_id *match) 18 const struct of_device_id *match)
19{ 19{
20 int ret; 20 int ret;
21 struct device_node *dn = ofdev->node; 21 struct device_node *dn = ofdev->dev.of_node;
22 struct resource io_res; 22 struct resource io_res;
23 struct resource ctl_res; 23 struct resource ctl_res;
24 struct resource irq_res; 24 struct resource irq_res;
@@ -91,8 +91,11 @@ static struct of_device_id pata_of_platform_match[] = {
91MODULE_DEVICE_TABLE(of, pata_of_platform_match); 91MODULE_DEVICE_TABLE(of, pata_of_platform_match);
92 92
93static struct of_platform_driver pata_of_platform_driver = { 93static struct of_platform_driver pata_of_platform_driver = {
94 .name = "pata_of_platform", 94 .driver = {
95 .match_table = pata_of_platform_match, 95 .name = "pata_of_platform",
96 .owner = THIS_MODULE,
97 .of_match_table = pata_of_platform_match,
98 },
96 .probe = pata_of_platform_probe, 99 .probe = pata_of_platform_probe,
97 .remove = __devexit_p(pata_of_platform_remove), 100 .remove = __devexit_p(pata_of_platform_remove),
98}; 101};
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index a69192b38b43..61c89b54ea23 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1313,7 +1313,7 @@ static int sata_fsl_probe(struct of_device *ofdev,
1313 dev_printk(KERN_INFO, &ofdev->dev, 1313 dev_printk(KERN_INFO, &ofdev->dev,
1314 "Sata FSL Platform/CSB Driver init\n"); 1314 "Sata FSL Platform/CSB Driver init\n");
1315 1315
1316 hcr_base = of_iomap(ofdev->node, 0); 1316 hcr_base = of_iomap(ofdev->dev.of_node, 0);
1317 if (!hcr_base) 1317 if (!hcr_base)
1318 goto error_exit_with_cleanup; 1318 goto error_exit_with_cleanup;
1319 1319
@@ -1332,7 +1332,7 @@ static int sata_fsl_probe(struct of_device *ofdev,
1332 host_priv->ssr_base = ssr_base; 1332 host_priv->ssr_base = ssr_base;
1333 host_priv->csr_base = csr_base; 1333 host_priv->csr_base = csr_base;
1334 1334
1335 irq = irq_of_parse_and_map(ofdev->node, 0); 1335 irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1336 if (irq < 0) { 1336 if (irq < 0) {
1337 dev_printk(KERN_ERR, &ofdev->dev, "invalid irq from platform\n"); 1337 dev_printk(KERN_ERR, &ofdev->dev, "invalid irq from platform\n");
1338 goto error_exit_with_cleanup; 1338 goto error_exit_with_cleanup;
@@ -1427,8 +1427,11 @@ static struct of_device_id fsl_sata_match[] = {
1427MODULE_DEVICE_TABLE(of, fsl_sata_match); 1427MODULE_DEVICE_TABLE(of, fsl_sata_match);
1428 1428
1429static struct of_platform_driver fsl_sata_driver = { 1429static struct of_platform_driver fsl_sata_driver = {
1430 .name = "fsl-sata", 1430 .driver = {
1431 .match_table = fsl_sata_match, 1431 .name = "fsl-sata",
1432 .owner = THIS_MODULE,
1433 .of_match_table = fsl_sata_match,
1434 },
1432 .probe = sata_fsl_probe, 1435 .probe = sata_fsl_probe,
1433 .remove = sata_fsl_remove, 1436 .remove = sata_fsl_remove,
1434#ifdef CONFIG_PM 1437#ifdef CONFIG_PM
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index f7d6ebaa0418..da8f176c051e 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -789,7 +789,7 @@ static int __init fore200e_sba_map(struct fore200e *fore200e)
789 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */ 789 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
790 790
791 /* get the supported DVMA burst sizes */ 791 /* get the supported DVMA burst sizes */
792 bursts = of_getintprop_default(op->node->parent, "burst-sizes", 0x00); 792 bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00);
793 793
794 if (sbus_can_dma_64bit()) 794 if (sbus_can_dma_64bit())
795 sbus_set_sbus64(&op->dev, bursts); 795 sbus_set_sbus64(&op->dev, bursts);
@@ -820,18 +820,20 @@ static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_
820 const u8 *prop; 820 const u8 *prop;
821 int len; 821 int len;
822 822
823 prop = of_get_property(op->node, "madaddrlo2", &len); 823 prop = of_get_property(op->dev.of_node, "madaddrlo2", &len);
824 if (!prop) 824 if (!prop)
825 return -ENODEV; 825 return -ENODEV;
826 memcpy(&prom->mac_addr[4], prop, 4); 826 memcpy(&prom->mac_addr[4], prop, 4);
827 827
828 prop = of_get_property(op->node, "madaddrhi4", &len); 828 prop = of_get_property(op->dev.of_node, "madaddrhi4", &len);
829 if (!prop) 829 if (!prop)
830 return -ENODEV; 830 return -ENODEV;
831 memcpy(&prom->mac_addr[2], prop, 4); 831 memcpy(&prom->mac_addr[2], prop, 4);
832 832
833 prom->serial_number = of_getintprop_default(op->node, "serialnumber", 0); 833 prom->serial_number = of_getintprop_default(op->dev.of_node,
834 prom->hw_revision = of_getintprop_default(op->node, "promversion", 0); 834 "serialnumber", 0);
835 prom->hw_revision = of_getintprop_default(op->dev.of_node,
836 "promversion", 0);
835 837
836 return 0; 838 return 0;
837} 839}
@@ -841,10 +843,10 @@ static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
841 struct of_device *op = fore200e->bus_dev; 843 struct of_device *op = fore200e->bus_dev;
842 const struct linux_prom_registers *regs; 844 const struct linux_prom_registers *regs;
843 845
844 regs = of_get_property(op->node, "reg", NULL); 846 regs = of_get_property(op->dev.of_node, "reg", NULL);
845 847
846 return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", 848 return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n",
847 (regs ? regs->which_io : 0), op->node->name); 849 (regs ? regs->which_io : 0), op->dev.of_node->name);
848} 850}
849#endif /* CONFIG_SBUS */ 851#endif /* CONFIG_SBUS */
850 852
@@ -2693,8 +2695,11 @@ static const struct of_device_id fore200e_sba_match[] = {
2693MODULE_DEVICE_TABLE(of, fore200e_sba_match); 2695MODULE_DEVICE_TABLE(of, fore200e_sba_match);
2694 2696
2695static struct of_platform_driver fore200e_sba_driver = { 2697static struct of_platform_driver fore200e_sba_driver = {
2696 .name = "fore_200e", 2698 .driver = {
2697 .match_table = fore200e_sba_match, 2699 .name = "fore_200e",
2700 .owner = THIS_MODULE,
2701 .of_match_table = fore200e_sba_match,
2702 },
2698 .probe = fore200e_sba_probe, 2703 .probe = fore200e_sba_probe,
2699 .remove = __devexit_p(fore200e_sba_remove), 2704 .remove = __devexit_p(fore200e_sba_remove),
2700}; 2705};
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 3fecfb446d90..5ad3bad2b0a5 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -37,7 +37,7 @@
37 37
38#define CFAG12864BFB_NAME "cfag12864bfb" 38#define CFAG12864BFB_NAME "cfag12864bfb"
39 39
40static struct fb_fix_screeninfo cfag12864bfb_fix __initdata = { 40static struct fb_fix_screeninfo cfag12864bfb_fix __devinitdata = {
41 .id = "cfag12864b", 41 .id = "cfag12864b",
42 .type = FB_TYPE_PACKED_PIXELS, 42 .type = FB_TYPE_PACKED_PIXELS,
43 .visual = FB_VISUAL_MONO10, 43 .visual = FB_VISUAL_MONO10,
@@ -48,7 +48,7 @@ static struct fb_fix_screeninfo cfag12864bfb_fix __initdata = {
48 .accel = FB_ACCEL_NONE, 48 .accel = FB_ACCEL_NONE,
49}; 49};
50 50
51static struct fb_var_screeninfo cfag12864bfb_var __initdata = { 51static struct fb_var_screeninfo cfag12864bfb_var __devinitdata = {
52 .xres = CFAG12864B_WIDTH, 52 .xres = CFAG12864B_WIDTH,
53 .yres = CFAG12864B_HEIGHT, 53 .yres = CFAG12864B_HEIGHT,
54 .xres_virtual = CFAG12864B_WIDTH, 54 .xres_virtual = CFAG12864B_WIDTH,
@@ -114,7 +114,7 @@ none:
114 return ret; 114 return ret;
115} 115}
116 116
117static int cfag12864bfb_remove(struct platform_device *device) 117static int __devexit cfag12864bfb_remove(struct platform_device *device)
118{ 118{
119 struct fb_info *info = platform_get_drvdata(device); 119 struct fb_info *info = platform_get_drvdata(device);
120 120
@@ -128,7 +128,7 @@ static int cfag12864bfb_remove(struct platform_device *device)
128 128
129static struct platform_driver cfag12864bfb_driver = { 129static struct platform_driver cfag12864bfb_driver = {
130 .probe = cfag12864bfb_probe, 130 .probe = cfag12864bfb_probe,
131 .remove = cfag12864bfb_remove, 131 .remove = __devexit_p(cfag12864bfb_remove),
132 .driver = { 132 .driver = {
133 .name = CFAG12864BFB_NAME, 133 .name = CFAG12864BFB_NAME,
134 }, 134 },
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 057979a19eea..2bdd8a94ec94 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -9,6 +9,7 @@
9#include <linux/memory.h> 9#include <linux/memory.h>
10#include <linux/node.h> 10#include <linux/node.h>
11#include <linux/hugetlb.h> 11#include <linux/hugetlb.h>
12#include <linux/compaction.h>
12#include <linux/cpumask.h> 13#include <linux/cpumask.h>
13#include <linux/topology.h> 14#include <linux/topology.h>
14#include <linux/nodemask.h> 15#include <linux/nodemask.h>
@@ -246,6 +247,8 @@ int register_node(struct node *node, int num, struct node *parent)
246 scan_unevictable_register_node(node); 247 scan_unevictable_register_node(node);
247 248
248 hugetlb_register_node(node); 249 hugetlb_register_node(node);
250
251 compaction_register_node(node);
249 } 252 }
250 return error; 253 return error;
251} 254}
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index bf6b13206d00..9fc630ce1ddb 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -162,7 +162,7 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
162 topology_remove_dev(cpu); 162 topology_remove_dev(cpu);
163 break; 163 break;
164 } 164 }
165 return rc ? NOTIFY_BAD : NOTIFY_OK; 165 return notifier_from_errno(rc);
166} 166}
167 167
168static int __cpuinit topology_sysfs_init(void) 168static int __cpuinit topology_sysfs_init(void)
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 59ca2b77b574..52f2d11bc7b9 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1004,7 +1004,7 @@ static const struct block_device_operations floppy_fops = {
1004 1004
1005static int swim3_add_device(struct macio_dev *mdev, int index) 1005static int swim3_add_device(struct macio_dev *mdev, int index)
1006{ 1006{
1007 struct device_node *swim = mdev->ofdev.node; 1007 struct device_node *swim = mdev->ofdev.dev.of_node;
1008 struct floppy_state *fs = &floppy_states[index]; 1008 struct floppy_state *fs = &floppy_states[index];
1009 int rc = -EBUSY; 1009 int rc = -EBUSY;
1010 1010
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index e1c95e208a66..a7b83c0a7eb5 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1198,10 +1198,10 @@ ace_of_probe(struct of_device *op, const struct of_device_id *match)
1198 dev_dbg(&op->dev, "ace_of_probe(%p, %p)\n", op, match); 1198 dev_dbg(&op->dev, "ace_of_probe(%p, %p)\n", op, match);
1199 1199
1200 /* device id */ 1200 /* device id */
1201 id = of_get_property(op->node, "port-number", NULL); 1201 id = of_get_property(op->dev.of_node, "port-number", NULL);
1202 1202
1203 /* physaddr */ 1203 /* physaddr */
1204 rc = of_address_to_resource(op->node, 0, &res); 1204 rc = of_address_to_resource(op->dev.of_node, 0, &res);
1205 if (rc) { 1205 if (rc) {
1206 dev_err(&op->dev, "invalid address\n"); 1206 dev_err(&op->dev, "invalid address\n");
1207 return rc; 1207 return rc;
@@ -1209,11 +1209,11 @@ ace_of_probe(struct of_device *op, const struct of_device_id *match)
1209 physaddr = res.start; 1209 physaddr = res.start;
1210 1210
1211 /* irq */ 1211 /* irq */
1212 irq = irq_of_parse_and_map(op->node, 0); 1212 irq = irq_of_parse_and_map(op->dev.of_node, 0);
1213 1213
1214 /* bus width */ 1214 /* bus width */
1215 bus_width = ACE_BUS_WIDTH_16; 1215 bus_width = ACE_BUS_WIDTH_16;
1216 if (of_find_property(op->node, "8-bit", NULL)) 1216 if (of_find_property(op->dev.of_node, "8-bit", NULL))
1217 bus_width = ACE_BUS_WIDTH_8; 1217 bus_width = ACE_BUS_WIDTH_8;
1218 1218
1219 /* Call the bus-independant setup code */ 1219 /* Call the bus-independant setup code */
@@ -1237,13 +1237,12 @@ static const struct of_device_id ace_of_match[] __devinitconst = {
1237MODULE_DEVICE_TABLE(of, ace_of_match); 1237MODULE_DEVICE_TABLE(of, ace_of_match);
1238 1238
1239static struct of_platform_driver ace_of_driver = { 1239static struct of_platform_driver ace_of_driver = {
1240 .owner = THIS_MODULE,
1241 .name = "xsysace",
1242 .match_table = ace_of_match,
1243 .probe = ace_of_probe, 1240 .probe = ace_of_probe,
1244 .remove = __devexit_p(ace_of_remove), 1241 .remove = __devexit_p(ace_of_remove),
1245 .driver = { 1242 .driver = {
1246 .name = "xsysace", 1243 .name = "xsysace",
1244 .owner = THIS_MODULE,
1245 .of_match_table = ace_of_match,
1247 }, 1246 },
1248}; 1247};
1249 1248
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index cc435be0bc13..451cd7071b1d 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -567,7 +567,7 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
567 struct disk_info *d; 567 struct disk_info *d;
568 struct cdrom_device_info *c; 568 struct cdrom_device_info *c;
569 struct request_queue *q; 569 struct request_queue *q;
570 struct device_node *node = vdev->dev.archdata.of_node; 570 struct device_node *node = vdev->dev.of_node;
571 571
572 deviceno = vdev->unit_address; 572 deviceno = vdev->unit_address;
573 if (deviceno >= VIOCD_MAX_CD) 573 if (deviceno >= VIOCD_MAX_CD)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index e21175be25d0..f09fc0e2062d 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -1121,5 +1121,12 @@ config DEVPORT
1121 1121
1122source "drivers/s390/char/Kconfig" 1122source "drivers/s390/char/Kconfig"
1123 1123
1124config RAMOOPS
1125 tristate "Log panic/oops to a RAM buffer"
1126 default n
1127 help
1128 This enables panic and oops messages to be logged to a circular
1129 buffer in RAM where it can be read back at some later point.
1130
1124endmenu 1131endmenu
1125 1132
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index d39be4cf1f5d..88d6eac69754 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -108,6 +108,7 @@ obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
108obj-$(CONFIG_TCG_TPM) += tpm/ 108obj-$(CONFIG_TCG_TPM) += tpm/
109 109
110obj-$(CONFIG_PS3_FLASH) += ps3flash.o 110obj-$(CONFIG_PS3_FLASH) += ps3flash.o
111obj-$(CONFIG_RAMOOPS) += ramoops.o
111 112
112obj-$(CONFIG_JS_RTC) += js-rtc.o 113obj-$(CONFIG_JS_RTC) += js-rtc.o
113js-rtc-y = rtc.o 114js-rtc-y = rtc.o
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 67ea3a60de74..70312da4c968 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -384,7 +384,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
384{ 384{
385 u32 httfea,baseaddr,enuscr; 385 u32 httfea,baseaddr,enuscr;
386 struct pci_dev *dev1; 386 struct pci_dev *dev1;
387 int i; 387 int i, ret;
388 unsigned size = amd64_fetch_size(); 388 unsigned size = amd64_fetch_size();
389 389
390 dev_info(&pdev->dev, "setting up ULi AGP\n"); 390 dev_info(&pdev->dev, "setting up ULi AGP\n");
@@ -400,15 +400,18 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
400 400
401 if (i == ARRAY_SIZE(uli_sizes)) { 401 if (i == ARRAY_SIZE(uli_sizes)) {
402 dev_info(&pdev->dev, "no ULi size found for %d\n", size); 402 dev_info(&pdev->dev, "no ULi size found for %d\n", size);
403 return -ENODEV; 403 ret = -ENODEV;
404 goto put;
404 } 405 }
405 406
406 /* shadow x86-64 registers into ULi registers */ 407 /* shadow x86-64 registers into ULi registers */
407 pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea); 408 pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea);
408 409
409 /* if x86-64 aperture base is beyond 4G, exit here */ 410 /* if x86-64 aperture base is beyond 4G, exit here */
410 if ((httfea & 0x7fff) >> (32 - 25)) 411 if ((httfea & 0x7fff) >> (32 - 25)) {
411 return -ENODEV; 412 ret = -ENODEV;
413 goto put;
414 }
412 415
413 httfea = (httfea& 0x7fff) << 25; 416 httfea = (httfea& 0x7fff) << 25;
414 417
@@ -420,9 +423,10 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
420 enuscr= httfea+ (size * 1024 * 1024) - 1; 423 enuscr= httfea+ (size * 1024 * 1024) - 1;
421 pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea); 424 pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea);
422 pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr); 425 pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr);
423 426 ret = 0;
427put:
424 pci_dev_put(dev1); 428 pci_dev_put(dev1);
425 return 0; 429 return ret;
426} 430}
427 431
428 432
@@ -441,7 +445,7 @@ static int nforce3_agp_init(struct pci_dev *pdev)
441{ 445{
442 u32 tmp, apbase, apbar, aplimit; 446 u32 tmp, apbase, apbar, aplimit;
443 struct pci_dev *dev1; 447 struct pci_dev *dev1;
444 int i; 448 int i, ret;
445 unsigned size = amd64_fetch_size(); 449 unsigned size = amd64_fetch_size();
446 450
447 dev_info(&pdev->dev, "setting up Nforce3 AGP\n"); 451 dev_info(&pdev->dev, "setting up Nforce3 AGP\n");
@@ -458,7 +462,8 @@ static int nforce3_agp_init(struct pci_dev *pdev)
458 462
459 if (i == ARRAY_SIZE(nforce3_sizes)) { 463 if (i == ARRAY_SIZE(nforce3_sizes)) {
460 dev_info(&pdev->dev, "no NForce3 size found for %d\n", size); 464 dev_info(&pdev->dev, "no NForce3 size found for %d\n", size);
461 return -ENODEV; 465 ret = -ENODEV;
466 goto put;
462 } 467 }
463 468
464 pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp); 469 pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp);
@@ -472,7 +477,8 @@ static int nforce3_agp_init(struct pci_dev *pdev)
472 /* if x86-64 aperture base is beyond 4G, exit here */ 477 /* if x86-64 aperture base is beyond 4G, exit here */
473 if ( (apbase & 0x7fff) >> (32 - 25) ) { 478 if ( (apbase & 0x7fff) >> (32 - 25) ) {
474 dev_info(&pdev->dev, "aperture base > 4G\n"); 479 dev_info(&pdev->dev, "aperture base > 4G\n");
475 return -ENODEV; 480 ret = -ENODEV;
481 goto put;
476 } 482 }
477 483
478 apbase = (apbase & 0x7fff) << 25; 484 apbase = (apbase & 0x7fff) << 25;
@@ -488,9 +494,11 @@ static int nforce3_agp_init(struct pci_dev *pdev)
488 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase); 494 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase);
489 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit); 495 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit);
490 496
497 ret = 0;
498put:
491 pci_dev_put(dev1); 499 pci_dev_put(dev1);
492 500
493 return 0; 501 return ret;
494} 502}
495 503
496static int __devinit agp_amd64_probe(struct pci_dev *pdev, 504static int __devinit agp_amd64_probe(struct pci_dev *pdev,
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index 56b27671adc4..4f8d60c25a98 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -84,6 +84,7 @@ static char *serial_version = "4.30";
84#include <linux/smp_lock.h> 84#include <linux/smp_lock.h>
85#include <linux/init.h> 85#include <linux/init.h>
86#include <linux/bitops.h> 86#include <linux/bitops.h>
87#include <linux/platform_device.h>
87 88
88#include <asm/setup.h> 89#include <asm/setup.h>
89 90
@@ -1954,29 +1955,16 @@ static const struct tty_operations serial_ops = {
1954/* 1955/*
1955 * The serial driver boot-time initialization code! 1956 * The serial driver boot-time initialization code!
1956 */ 1957 */
1957static int __init rs_init(void) 1958static int __init amiga_serial_probe(struct platform_device *pdev)
1958{ 1959{
1959 unsigned long flags; 1960 unsigned long flags;
1960 struct serial_state * state; 1961 struct serial_state * state;
1961 int error; 1962 int error;
1962 1963
1963 if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_SERIAL))
1964 return -ENODEV;
1965
1966 serial_driver = alloc_tty_driver(1); 1964 serial_driver = alloc_tty_driver(1);
1967 if (!serial_driver) 1965 if (!serial_driver)
1968 return -ENOMEM; 1966 return -ENOMEM;
1969 1967
1970 /*
1971 * We request SERDAT and SERPER only, because the serial registers are
1972 * too spreaded over the custom register space
1973 */
1974 if (!request_mem_region(CUSTOM_PHYSADDR+0x30, 4,
1975 "amiserial [Paula]")) {
1976 error = -EBUSY;
1977 goto fail_put_tty_driver;
1978 }
1979
1980 IRQ_ports = NULL; 1968 IRQ_ports = NULL;
1981 1969
1982 show_serial_version(); 1970 show_serial_version();
@@ -1998,7 +1986,7 @@ static int __init rs_init(void)
1998 1986
1999 error = tty_register_driver(serial_driver); 1987 error = tty_register_driver(serial_driver);
2000 if (error) 1988 if (error)
2001 goto fail_release_mem_region; 1989 goto fail_put_tty_driver;
2002 1990
2003 state = rs_table; 1991 state = rs_table;
2004 state->magic = SSTATE_MAGIC; 1992 state->magic = SSTATE_MAGIC;
@@ -2050,23 +2038,24 @@ static int __init rs_init(void)
2050 ciab.ddra |= (SER_DTR | SER_RTS); /* outputs */ 2038 ciab.ddra |= (SER_DTR | SER_RTS); /* outputs */
2051 ciab.ddra &= ~(SER_DCD | SER_CTS | SER_DSR); /* inputs */ 2039 ciab.ddra &= ~(SER_DCD | SER_CTS | SER_DSR); /* inputs */
2052 2040
2041 platform_set_drvdata(pdev, state);
2042
2053 return 0; 2043 return 0;
2054 2044
2055fail_free_irq: 2045fail_free_irq:
2056 free_irq(IRQ_AMIGA_TBE, state); 2046 free_irq(IRQ_AMIGA_TBE, state);
2057fail_unregister: 2047fail_unregister:
2058 tty_unregister_driver(serial_driver); 2048 tty_unregister_driver(serial_driver);
2059fail_release_mem_region:
2060 release_mem_region(CUSTOM_PHYSADDR+0x30, 4);
2061fail_put_tty_driver: 2049fail_put_tty_driver:
2062 put_tty_driver(serial_driver); 2050 put_tty_driver(serial_driver);
2063 return error; 2051 return error;
2064} 2052}
2065 2053
2066static __exit void rs_exit(void) 2054static int __exit amiga_serial_remove(struct platform_device *pdev)
2067{ 2055{
2068 int error; 2056 int error;
2069 struct async_struct *info = rs_table[0].info; 2057 struct serial_state *state = platform_get_drvdata(pdev);
2058 struct async_struct *info = state->info;
2070 2059
2071 /* printk("Unloading %s: version %s\n", serial_name, serial_version); */ 2060 /* printk("Unloading %s: version %s\n", serial_name, serial_version); */
2072 tasklet_kill(&info->tlet); 2061 tasklet_kill(&info->tlet);
@@ -2075,19 +2064,38 @@ static __exit void rs_exit(void)
2075 error); 2064 error);
2076 put_tty_driver(serial_driver); 2065 put_tty_driver(serial_driver);
2077 2066
2078 if (info) { 2067 rs_table[0].info = NULL;
2079 rs_table[0].info = NULL; 2068 kfree(info);
2080 kfree(info);
2081 }
2082 2069
2083 free_irq(IRQ_AMIGA_TBE, rs_table); 2070 free_irq(IRQ_AMIGA_TBE, rs_table);
2084 free_irq(IRQ_AMIGA_RBF, rs_table); 2071 free_irq(IRQ_AMIGA_RBF, rs_table);
2085 2072
2086 release_mem_region(CUSTOM_PHYSADDR+0x30, 4); 2073 platform_set_drvdata(pdev, NULL);
2074
2075 return error;
2076}
2077
2078static struct platform_driver amiga_serial_driver = {
2079 .remove = __exit_p(amiga_serial_remove),
2080 .driver = {
2081 .name = "amiga-serial",
2082 .owner = THIS_MODULE,
2083 },
2084};
2085
2086static int __init amiga_serial_init(void)
2087{
2088 return platform_driver_probe(&amiga_serial_driver, amiga_serial_probe);
2089}
2090
2091module_init(amiga_serial_init);
2092
2093static void __exit amiga_serial_exit(void)
2094{
2095 platform_driver_unregister(&amiga_serial_driver);
2087} 2096}
2088 2097
2089module_init(rs_init) 2098module_exit(amiga_serial_exit);
2090module_exit(rs_exit)
2091 2099
2092 2100
2093#if defined(CONFIG_SERIAL_CONSOLE) && !defined(MODULE) 2101#if defined(CONFIG_SERIAL_CONSOLE) && !defined(MODULE)
@@ -2154,3 +2162,4 @@ console_initcall(amiserial_console_init);
2154#endif /* CONFIG_SERIAL_CONSOLE && !MODULE */ 2162#endif /* CONFIG_SERIAL_CONSOLE && !MODULE */
2155 2163
2156MODULE_LICENSE("GPL"); 2164MODULE_LICENSE("GPL");
2165MODULE_ALIAS("platform:amiga-serial");
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 4f568cb9af3f..033e1505fca9 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -265,8 +265,8 @@ static unsigned int apm_poll(struct file *fp, poll_table * wait)
265 * Only when everyone who has opened /dev/apm_bios with write permission 265 * Only when everyone who has opened /dev/apm_bios with write permission
266 * has acknowledge does the actual suspend happen. 266 * has acknowledge does the actual suspend happen.
267 */ 267 */
268static int 268static long
269apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) 269apm_ioctl(struct file *filp, u_int cmd, u_long arg)
270{ 270{
271 struct apm_user *as = filp->private_data; 271 struct apm_user *as = filp->private_data;
272 int err = -EINVAL; 272 int err = -EINVAL;
@@ -274,6 +274,7 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
274 if (!as->suser || !as->writer) 274 if (!as->suser || !as->writer)
275 return -EPERM; 275 return -EPERM;
276 276
277 lock_kernel();
277 switch (cmd) { 278 switch (cmd) {
278 case APM_IOC_SUSPEND: 279 case APM_IOC_SUSPEND:
279 mutex_lock(&state_lock); 280 mutex_lock(&state_lock);
@@ -334,6 +335,7 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
334 mutex_unlock(&state_lock); 335 mutex_unlock(&state_lock);
335 break; 336 break;
336 } 337 }
338 unlock_kernel();
337 339
338 return err; 340 return err;
339} 341}
@@ -397,7 +399,7 @@ static const struct file_operations apm_bios_fops = {
397 .owner = THIS_MODULE, 399 .owner = THIS_MODULE,
398 .read = apm_read, 400 .read = apm_read,
399 .poll = apm_poll, 401 .poll = apm_poll,
400 .ioctl = apm_ioctl, 402 .unlocked_ioctl = apm_ioctl,
401 .open = apm_open, 403 .open = apm_open,
402 .release = apm_release, 404 .release = apm_release,
403}; 405};
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index a7424bf7eacf..f4ae0e0fb631 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -26,6 +26,7 @@
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/errno.h> 28#include <linux/errno.h>
29#include <linux/smp_lock.h>
29#include <linux/miscdevice.h> 30#include <linux/miscdevice.h>
30#include <linux/pci.h> 31#include <linux/pci.h>
31#include <linux/wait.h> 32#include <linux/wait.h>
@@ -106,8 +107,7 @@ static unsigned int DeviceErrorCount; /* number of device error */
106 107
107static ssize_t ac_read (struct file *, char __user *, size_t, loff_t *); 108static ssize_t ac_read (struct file *, char __user *, size_t, loff_t *);
108static ssize_t ac_write (struct file *, const char __user *, size_t, loff_t *); 109static ssize_t ac_write (struct file *, const char __user *, size_t, loff_t *);
109static int ac_ioctl(struct inode *, struct file *, unsigned int, 110static long ac_ioctl(struct file *, unsigned int, unsigned long);
110 unsigned long);
111static irqreturn_t ac_interrupt(int, void *); 111static irqreturn_t ac_interrupt(int, void *);
112 112
113static const struct file_operations ac_fops = { 113static const struct file_operations ac_fops = {
@@ -115,7 +115,7 @@ static const struct file_operations ac_fops = {
115 .llseek = no_llseek, 115 .llseek = no_llseek,
116 .read = ac_read, 116 .read = ac_read,
117 .write = ac_write, 117 .write = ac_write,
118 .ioctl = ac_ioctl, 118 .unlocked_ioctl = ac_ioctl,
119}; 119};
120 120
121static struct miscdevice ac_miscdev = { 121static struct miscdevice ac_miscdev = {
@@ -689,7 +689,7 @@ static irqreturn_t ac_interrupt(int vec, void *dev_instance)
689 689
690 690
691 691
692static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 692static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
693 693
694{ /* @ ADG ou ATO selon le cas */ 694{ /* @ ADG ou ATO selon le cas */
695 int i; 695 int i;
@@ -703,15 +703,11 @@ static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
703 /* In general, the device is only openable by root anyway, so we're not 703 /* In general, the device is only openable by root anyway, so we're not
704 particularly concerned that bogus ioctls can flood the console. */ 704 particularly concerned that bogus ioctls can flood the console. */
705 705
706 adgl = kmalloc(sizeof(struct st_ram_io), GFP_KERNEL); 706 adgl = memdup_user(argp, sizeof(struct st_ram_io));
707 if (!adgl) 707 if (IS_ERR(adgl))
708 return -ENOMEM; 708 return PTR_ERR(adgl);
709 709
710 if (copy_from_user(adgl, argp, sizeof(struct st_ram_io))) { 710 lock_kernel();
711 kfree(adgl);
712 return -EFAULT;
713 }
714
715 IndexCard = adgl->num_card-1; 711 IndexCard = adgl->num_card-1;
716 712
717 if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) { 713 if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
@@ -721,6 +717,7 @@ static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
721 warncount--; 717 warncount--;
722 } 718 }
723 kfree(adgl); 719 kfree(adgl);
720 unlock_kernel();
724 return -EINVAL; 721 return -EINVAL;
725 } 722 }
726 723
@@ -838,6 +835,7 @@ static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
838 } 835 }
839 Dummy = readb(apbs[IndexCard].RamIO + VERS); 836 Dummy = readb(apbs[IndexCard].RamIO + VERS);
840 kfree(adgl); 837 kfree(adgl);
838 unlock_kernel();
841 return 0; 839 return 0;
842} 840}
843 841
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
index 61f0146e215d..dbee8688f75c 100644
--- a/drivers/char/ds1620.c
+++ b/drivers/char/ds1620.c
@@ -232,7 +232,7 @@ ds1620_read(struct file *file, char __user *buf, size_t count, loff_t *ptr)
232} 232}
233 233
234static int 234static int
235ds1620_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 235ds1620_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
236{ 236{
237 struct therm therm; 237 struct therm therm;
238 union { 238 union {
@@ -316,6 +316,18 @@ ds1620_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
316 return 0; 316 return 0;
317} 317}
318 318
319static long
320ds1620_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
321{
322 int ret;
323
324 lock_kernel();
325 ret = ds1620_ioctl(file, cmd, arg);
326 unlock_kernel();
327
328 return ret;
329}
330
319#ifdef THERM_USE_PROC 331#ifdef THERM_USE_PROC
320static int 332static int
321proc_therm_ds1620_read(char *buf, char **start, off_t offset, 333proc_therm_ds1620_read(char *buf, char **start, off_t offset,
@@ -344,7 +356,7 @@ static const struct file_operations ds1620_fops = {
344 .owner = THIS_MODULE, 356 .owner = THIS_MODULE,
345 .open = ds1620_open, 357 .open = ds1620_open,
346 .read = ds1620_read, 358 .read = ds1620_read,
347 .ioctl = ds1620_ioctl, 359 .unlocked_ioctl = ds1620_unlocked_ioctl,
348}; 360};
349 361
350static struct miscdevice ds1620_miscdev = { 362static struct miscdevice ds1620_miscdev = {
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 045c930e6320..e3859d4eaead 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -93,8 +93,8 @@ static ssize_t dtlk_write(struct file *, const char __user *,
93static unsigned int dtlk_poll(struct file *, poll_table *); 93static unsigned int dtlk_poll(struct file *, poll_table *);
94static int dtlk_open(struct inode *, struct file *); 94static int dtlk_open(struct inode *, struct file *);
95static int dtlk_release(struct inode *, struct file *); 95static int dtlk_release(struct inode *, struct file *);
96static int dtlk_ioctl(struct inode *inode, struct file *file, 96static long dtlk_ioctl(struct file *file,
97 unsigned int cmd, unsigned long arg); 97 unsigned int cmd, unsigned long arg);
98 98
99static const struct file_operations dtlk_fops = 99static const struct file_operations dtlk_fops =
100{ 100{
@@ -102,7 +102,7 @@ static const struct file_operations dtlk_fops =
102 .read = dtlk_read, 102 .read = dtlk_read,
103 .write = dtlk_write, 103 .write = dtlk_write,
104 .poll = dtlk_poll, 104 .poll = dtlk_poll,
105 .ioctl = dtlk_ioctl, 105 .unlocked_ioctl = dtlk_ioctl,
106 .open = dtlk_open, 106 .open = dtlk_open,
107 .release = dtlk_release, 107 .release = dtlk_release,
108}; 108};
@@ -263,10 +263,9 @@ static void dtlk_timer_tick(unsigned long data)
263 wake_up_interruptible(&dtlk_process_list); 263 wake_up_interruptible(&dtlk_process_list);
264} 264}
265 265
266static int dtlk_ioctl(struct inode *inode, 266static long dtlk_ioctl(struct file *file,
267 struct file *file, 267 unsigned int cmd,
268 unsigned int cmd, 268 unsigned long arg)
269 unsigned long arg)
270{ 269{
271 char __user *argp = (char __user *)arg; 270 char __user *argp = (char __user *)arg;
272 struct dtlk_settings *sp; 271 struct dtlk_settings *sp;
@@ -276,7 +275,9 @@ static int dtlk_ioctl(struct inode *inode,
276 switch (cmd) { 275 switch (cmd) {
277 276
278 case DTLK_INTERROGATE: 277 case DTLK_INTERROGATE:
278 lock_kernel();
279 sp = dtlk_interrogate(); 279 sp = dtlk_interrogate();
280 unlock_kernel();
280 if (copy_to_user(argp, sp, sizeof(struct dtlk_settings))) 281 if (copy_to_user(argp, sp, sizeof(struct dtlk_settings)))
281 return -EINVAL; 282 return -EINVAL;
282 return 0; 283 return 0;
diff --git a/drivers/char/generic_nvram.c b/drivers/char/generic_nvram.c
index fda4181b5e67..82b5a88a82d7 100644
--- a/drivers/char/generic_nvram.c
+++ b/drivers/char/generic_nvram.c
@@ -19,6 +19,7 @@
19#include <linux/miscdevice.h> 19#include <linux/miscdevice.h>
20#include <linux/fcntl.h> 20#include <linux/fcntl.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/smp_lock.h>
22#include <asm/uaccess.h> 23#include <asm/uaccess.h>
23#include <asm/nvram.h> 24#include <asm/nvram.h>
24#ifdef CONFIG_PPC_PMAC 25#ifdef CONFIG_PPC_PMAC
@@ -84,8 +85,7 @@ static ssize_t write_nvram(struct file *file, const char __user *buf,
84 return p - buf; 85 return p - buf;
85} 86}
86 87
87static int nvram_ioctl(struct inode *inode, struct file *file, 88static int nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
88 unsigned int cmd, unsigned long arg)
89{ 89{
90 switch(cmd) { 90 switch(cmd) {
91#ifdef CONFIG_PPC_PMAC 91#ifdef CONFIG_PPC_PMAC
@@ -116,12 +116,23 @@ static int nvram_ioctl(struct inode *inode, struct file *file,
116 return 0; 116 return 0;
117} 117}
118 118
119static long nvram_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
120{
121 int ret;
122
123 lock_kernel();
124 ret = nvram_ioctl(file, cmd, arg);
125 unlock_kernel();
126
127 return ret;
128}
129
119const struct file_operations nvram_fops = { 130const struct file_operations nvram_fops = {
120 .owner = THIS_MODULE, 131 .owner = THIS_MODULE,
121 .llseek = nvram_llseek, 132 .llseek = nvram_llseek,
122 .read = read_nvram, 133 .read = read_nvram,
123 .write = write_nvram, 134 .write = write_nvram,
124 .ioctl = nvram_ioctl, 135 .unlocked_ioctl = nvram_unlocked_ioctl,
125}; 136};
126 137
127static struct miscdevice nvram_dev = { 138static struct miscdevice nvram_dev = {
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index 31e7c91c2d9d..b6c2cc167c11 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -262,7 +262,7 @@ static inline int gen_set_rtc_irq_bit(unsigned char bit)
262#endif 262#endif
263} 263}
264 264
265static int gen_rtc_ioctl(struct inode *inode, struct file *file, 265static int gen_rtc_ioctl(struct file *file,
266 unsigned int cmd, unsigned long arg) 266 unsigned int cmd, unsigned long arg)
267{ 267{
268 struct rtc_time wtime; 268 struct rtc_time wtime;
@@ -332,6 +332,18 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
332 return -EINVAL; 332 return -EINVAL;
333} 333}
334 334
335static long gen_rtc_unlocked_ioctl(struct file *file, unsigned int cmd,
336 unsigned long arg)
337{
338 int ret;
339
340 lock_kernel();
341 ret = gen_rtc_ioctl(file, cmd, arg);
342 unlock_kernel();
343
344 return ret;
345}
346
335/* 347/*
336 * We enforce only one user at a time here with the open/close. 348 * We enforce only one user at a time here with the open/close.
337 * Also clear the previous interrupt data on an open, and clean 349 * Also clear the previous interrupt data on an open, and clean
@@ -482,7 +494,7 @@ static const struct file_operations gen_rtc_fops = {
482 .read = gen_rtc_read, 494 .read = gen_rtc_read,
483 .poll = gen_rtc_poll, 495 .poll = gen_rtc_poll,
484#endif 496#endif
485 .ioctl = gen_rtc_ioctl, 497 .unlocked_ioctl = gen_rtc_unlocked_ioctl,
486 .open = gen_rtc_open, 498 .open = gen_rtc_open,
487 .release = gen_rtc_release, 499 .release = gen_rtc_release,
488}; 500};
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index 712d9f271aa6..e0249722d25f 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -49,8 +49,9 @@
49#include <asm/uaccess.h> 49#include <asm/uaccess.h>
50#include <linux/sysrq.h> 50#include <linux/sysrq.h>
51#include <linux/timer.h> 51#include <linux/timer.h>
52#include <linux/time.h>
52 53
53#define VERSION_STR "0.9.0" 54#define VERSION_STR "0.9.1"
54 55
55#define DEFAULT_IOFENCE_MARGIN 60 /* Default fudge factor, in seconds */ 56#define DEFAULT_IOFENCE_MARGIN 60 /* Default fudge factor, in seconds */
56#define DEFAULT_IOFENCE_TICK 180 /* Default timer timeout, in seconds */ 57#define DEFAULT_IOFENCE_TICK 180 /* Default timer timeout, in seconds */
@@ -119,10 +120,8 @@ __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
119#if defined(CONFIG_S390) 120#if defined(CONFIG_S390)
120# define HAVE_MONOTONIC 121# define HAVE_MONOTONIC
121# define TIMER_FREQ 1000000000ULL 122# define TIMER_FREQ 1000000000ULL
122#elif defined(CONFIG_IA64)
123# define TIMER_FREQ ((unsigned long long)local_cpu_data->itc_freq)
124#else 123#else
125# define TIMER_FREQ (HZ*loops_per_jiffy) 124# define TIMER_FREQ 1000000000ULL
126#endif 125#endif
127 126
128#ifdef HAVE_MONOTONIC 127#ifdef HAVE_MONOTONIC
@@ -130,7 +129,9 @@ extern unsigned long long monotonic_clock(void);
130#else 129#else
131static inline unsigned long long monotonic_clock(void) 130static inline unsigned long long monotonic_clock(void)
132{ 131{
133 return get_cycles(); 132 struct timespec ts;
133 getrawmonotonic(&ts);
134 return timespec_to_ns(&ts);
134} 135}
135#endif /* HAVE_MONOTONIC */ 136#endif /* HAVE_MONOTONIC */
136 137
@@ -168,6 +169,13 @@ static void hangcheck_fire(unsigned long data)
168 printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n"); 169 printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n");
169 } 170 }
170 } 171 }
172#if 0
173 /*
174 * Enable to investigate delays in detail
175 */
176 printk("Hangcheck: called %Ld ns since last time (%Ld ns overshoot)\n",
177 tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ);
178#endif
171 mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ)); 179 mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
172 hangcheck_tsc = monotonic_clock(); 180 hangcheck_tsc = monotonic_clock();
173} 181}
@@ -180,7 +188,7 @@ static int __init hangcheck_init(void)
180#if defined (HAVE_MONOTONIC) 188#if defined (HAVE_MONOTONIC)
181 printk("Hangcheck: Using monotonic_clock().\n"); 189 printk("Hangcheck: Using monotonic_clock().\n");
182#else 190#else
183 printk("Hangcheck: Using get_cycles().\n"); 191 printk("Hangcheck: Using getrawmonotonic().\n");
184#endif /* HAVE_MONOTONIC */ 192#endif /* HAVE_MONOTONIC */
185 hangcheck_tsc_margin = 193 hangcheck_tsc_margin =
186 (unsigned long long)(hangcheck_margin + hangcheck_tick); 194 (unsigned long long)(hangcheck_margin + hangcheck_tick);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 9ded667625ac..a0a1829d3198 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -431,14 +431,18 @@ static int hpet_release(struct inode *inode, struct file *file)
431 431
432static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int); 432static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
433 433
434static int 434static long hpet_ioctl(struct file *file, unsigned int cmd,
435hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 435 unsigned long arg)
436 unsigned long arg)
437{ 436{
438 struct hpet_dev *devp; 437 struct hpet_dev *devp;
438 int ret;
439 439
440 devp = file->private_data; 440 devp = file->private_data;
441 return hpet_ioctl_common(devp, cmd, arg, 0); 441 lock_kernel();
442 ret = hpet_ioctl_common(devp, cmd, arg, 0);
443 unlock_kernel();
444
445 return ret;
442} 446}
443 447
444static int hpet_ioctl_ieon(struct hpet_dev *devp) 448static int hpet_ioctl_ieon(struct hpet_dev *devp)
@@ -654,7 +658,7 @@ static const struct file_operations hpet_fops = {
654 .llseek = no_llseek, 658 .llseek = no_llseek,
655 .read = hpet_read, 659 .read = hpet_read,
656 .poll = hpet_poll, 660 .poll = hpet_poll,
657 .ioctl = hpet_ioctl, 661 .unlocked_ioctl = hpet_ioctl,
658 .open = hpet_open, 662 .open = hpet_open,
659 .release = hpet_release, 663 .release = hpet_release,
660 .fasync = hpet_fasync, 664 .fasync = hpet_fasync,
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 793b236c9266..d4b14ff1c4c1 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -194,10 +194,8 @@ static inline void print_state(struct hvsi_struct *hp)
194 "HVSI_WAIT_FOR_MCTRL_RESPONSE", 194 "HVSI_WAIT_FOR_MCTRL_RESPONSE",
195 "HVSI_FSP_DIED", 195 "HVSI_FSP_DIED",
196 }; 196 };
197 const char *name = state_names[hp->state]; 197 const char *name = (hp->state < ARRAY_SIZE(state_names))
198 198 ? state_names[hp->state] : "UNKNOWN";
199 if (hp->state > ARRAY_SIZE(state_names))
200 name = "UNKNOWN";
201 199
202 pr_debug("hvsi%i: state = %s\n", hp->index, name); 200 pr_debug("hvsi%i: state = %s\n", hp->index, name);
203#endif /* DEBUG */ 201#endif /* DEBUG */
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 10f868eefaa6..0f9cbf1aaf15 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -660,7 +660,7 @@ static int __devinit n2rng_probe(struct of_device *op,
660 np->hvapi_major); 660 np->hvapi_major);
661 goto out_hvapi_unregister; 661 goto out_hvapi_unregister;
662 } 662 }
663 np->num_units = of_getintprop_default(op->node, 663 np->num_units = of_getintprop_default(op->dev.of_node,
664 "rng-#units", 0); 664 "rng-#units", 0);
665 if (!np->num_units) { 665 if (!np->num_units) {
666 dev_err(&op->dev, "VF RNG lacks rng-#units property\n"); 666 dev_err(&op->dev, "VF RNG lacks rng-#units property\n");
@@ -751,8 +751,11 @@ static const struct of_device_id n2rng_match[] = {
751MODULE_DEVICE_TABLE(of, n2rng_match); 751MODULE_DEVICE_TABLE(of, n2rng_match);
752 752
753static struct of_platform_driver n2rng_driver = { 753static struct of_platform_driver n2rng_driver = {
754 .name = "n2rng", 754 .driver = {
755 .match_table = n2rng_match, 755 .name = "n2rng",
756 .owner = THIS_MODULE,
757 .of_match_table = n2rng_match,
758 },
756 .probe = n2rng_probe, 759 .probe = n2rng_probe,
757 .remove = __devexit_p(n2rng_remove), 760 .remove = __devexit_p(n2rng_remove),
758}; 761};
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index a8b4c4010144..a348c7e9aa0b 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -15,6 +15,10 @@
15#include <linux/amba/bus.h> 15#include <linux/amba/bus.h>
16#include <linux/hw_random.h> 16#include <linux/hw_random.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/clk.h>
19#include <linux/err.h>
20
21static struct clk *rng_clk;
18 22
19static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) 23static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
20{ 24{
@@ -40,6 +44,15 @@ static int nmk_rng_probe(struct amba_device *dev, struct amba_id *id)
40 void __iomem *base; 44 void __iomem *base;
41 int ret; 45 int ret;
42 46
47 rng_clk = clk_get(&dev->dev, NULL);
48 if (IS_ERR(rng_clk)) {
49 dev_err(&dev->dev, "could not get rng clock\n");
50 ret = PTR_ERR(rng_clk);
51 return ret;
52 }
53
54 clk_enable(rng_clk);
55
43 ret = amba_request_regions(dev, dev->dev.init_name); 56 ret = amba_request_regions(dev, dev->dev.init_name);
44 if (ret) 57 if (ret)
45 return ret; 58 return ret;
@@ -57,6 +70,8 @@ out_unmap:
57 iounmap(base); 70 iounmap(base);
58out_release: 71out_release:
59 amba_release_regions(dev); 72 amba_release_regions(dev);
73 clk_disable(rng_clk);
74 clk_put(rng_clk);
60 return ret; 75 return ret;
61} 76}
62 77
@@ -66,6 +81,8 @@ static int nmk_rng_remove(struct amba_device *dev)
66 hwrng_unregister(&nmk_rng); 81 hwrng_unregister(&nmk_rng);
67 iounmap(base); 82 iounmap(base);
68 amba_release_regions(dev); 83 amba_release_regions(dev);
84 clk_disable(rng_clk);
85 clk_put(rng_clk);
69 return 0; 86 return 0;
70} 87}
71 88
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index 7fa61dd1d9d9..261ba8f22b8b 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -98,7 +98,7 @@ static int __devinit rng_probe(struct of_device *ofdev,
98 const struct of_device_id *match) 98 const struct of_device_id *match)
99{ 99{
100 void __iomem *rng_regs; 100 void __iomem *rng_regs;
101 struct device_node *rng_np = ofdev->node; 101 struct device_node *rng_np = ofdev->dev.of_node;
102 struct resource res; 102 struct resource res;
103 int err = 0; 103 int err = 0;
104 104
@@ -140,8 +140,11 @@ static struct of_device_id rng_match[] = {
140}; 140};
141 141
142static struct of_platform_driver rng_driver = { 142static struct of_platform_driver rng_driver = {
143 .name = "pasemi-rng", 143 .driver = {
144 .match_table = rng_match, 144 .name = "pasemi-rng",
145 .owner = THIS_MODULE,
146 .of_match_table = rng_match,
147 },
145 .probe = rng_probe, 148 .probe = rng_probe,
146 .remove = rng_remove, 149 .remove = rng_remove,
147}; 150};
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 65545de3dbf4..d8ec92a38980 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -228,8 +228,7 @@ static int handle_send_req(ipmi_user_t user,
228 return rv; 228 return rv;
229} 229}
230 230
231static int ipmi_ioctl(struct inode *inode, 231static int ipmi_ioctl(struct file *file,
232 struct file *file,
233 unsigned int cmd, 232 unsigned int cmd,
234 unsigned long data) 233 unsigned long data)
235{ 234{
@@ -630,6 +629,23 @@ static int ipmi_ioctl(struct inode *inode,
630 return rv; 629 return rv;
631} 630}
632 631
632/*
633 * Note: it doesn't make sense to take the BKL here but
634 * not in compat_ipmi_ioctl. -arnd
635 */
636static long ipmi_unlocked_ioctl(struct file *file,
637 unsigned int cmd,
638 unsigned long data)
639{
640 int ret;
641
642 lock_kernel();
643 ret = ipmi_ioctl(file, cmd, data);
644 unlock_kernel();
645
646 return ret;
647}
648
633#ifdef CONFIG_COMPAT 649#ifdef CONFIG_COMPAT
634 650
635/* 651/*
@@ -802,7 +818,7 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
802 if (copy_to_user(precv64, &recv64, sizeof(recv64))) 818 if (copy_to_user(precv64, &recv64, sizeof(recv64)))
803 return -EFAULT; 819 return -EFAULT;
804 820
805 rc = ipmi_ioctl(filep->f_path.dentry->d_inode, filep, 821 rc = ipmi_ioctl(filep,
806 ((cmd == COMPAT_IPMICTL_RECEIVE_MSG) 822 ((cmd == COMPAT_IPMICTL_RECEIVE_MSG)
807 ? IPMICTL_RECEIVE_MSG 823 ? IPMICTL_RECEIVE_MSG
808 : IPMICTL_RECEIVE_MSG_TRUNC), 824 : IPMICTL_RECEIVE_MSG_TRUNC),
@@ -819,14 +835,14 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
819 return rc; 835 return rc;
820 } 836 }
821 default: 837 default:
822 return ipmi_ioctl(filep->f_path.dentry->d_inode, filep, cmd, arg); 838 return ipmi_ioctl(filep, cmd, arg);
823 } 839 }
824} 840}
825#endif 841#endif
826 842
827static const struct file_operations ipmi_fops = { 843static const struct file_operations ipmi_fops = {
828 .owner = THIS_MODULE, 844 .owner = THIS_MODULE,
829 .ioctl = ipmi_ioctl, 845 .unlocked_ioctl = ipmi_unlocked_ioctl,
830#ifdef CONFIG_COMPAT 846#ifdef CONFIG_COMPAT
831 .compat_ioctl = compat_ipmi_ioctl, 847 .compat_ioctl = compat_ipmi_ioctl,
832#endif 848#endif
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c6ad4234378d..4f3f8c9ec262 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2505,12 +2505,11 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2505 return rv; 2505 return rv;
2506 } 2506 }
2507 2507
2508 printk(KERN_INFO 2508 dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
2509 "ipmi: Found new BMC (man_id: 0x%6.6x, " 2509 "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2510 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 2510 bmc->id.manufacturer_id,
2511 bmc->id.manufacturer_id, 2511 bmc->id.product_id,
2512 bmc->id.product_id, 2512 bmc->id.device_id);
2513 bmc->id.device_id);
2514 } 2513 }
2515 2514
2516 /* 2515 /*
@@ -4037,8 +4036,8 @@ static void ipmi_request_event(void)
4037 4036
4038static struct timer_list ipmi_timer; 4037static struct timer_list ipmi_timer;
4039 4038
4040/* Call every ~100 ms. */ 4039/* Call every ~1000 ms. */
4041#define IPMI_TIMEOUT_TIME 100 4040#define IPMI_TIMEOUT_TIME 1000
4042 4041
4043/* How many jiffies does it take to get to the timeout time. */ 4042/* How many jiffies does it take to get to the timeout time. */
4044#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 4043#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 4462b113ba3f..35603dd4e6c5 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -107,6 +107,14 @@ enum si_type {
107}; 107};
108static char *si_to_str[] = { "kcs", "smic", "bt" }; 108static char *si_to_str[] = { "kcs", "smic", "bt" };
109 109
110enum ipmi_addr_src {
111 SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
112 SI_PCI, SI_DEVICETREE, SI_DEFAULT
113};
114static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
115 "ACPI", "SMBIOS", "PCI",
116 "device-tree", "default" };
117
110#define DEVICE_NAME "ipmi_si" 118#define DEVICE_NAME "ipmi_si"
111 119
112static struct platform_driver ipmi_driver = { 120static struct platform_driver ipmi_driver = {
@@ -188,7 +196,7 @@ struct smi_info {
188 int (*irq_setup)(struct smi_info *info); 196 int (*irq_setup)(struct smi_info *info);
189 void (*irq_cleanup)(struct smi_info *info); 197 void (*irq_cleanup)(struct smi_info *info);
190 unsigned int io_size; 198 unsigned int io_size;
191 char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */ 199 enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
192 void (*addr_source_cleanup)(struct smi_info *info); 200 void (*addr_source_cleanup)(struct smi_info *info);
193 void *addr_source_data; 201 void *addr_source_data;
194 202
@@ -300,6 +308,7 @@ static int num_max_busy_us;
300 308
301static int unload_when_empty = 1; 309static int unload_when_empty = 1;
302 310
311static int add_smi(struct smi_info *smi);
303static int try_smi_init(struct smi_info *smi); 312static int try_smi_init(struct smi_info *smi);
304static void cleanup_one_si(struct smi_info *to_clean); 313static void cleanup_one_si(struct smi_info *to_clean);
305 314
@@ -314,9 +323,14 @@ static void deliver_recv_msg(struct smi_info *smi_info,
314{ 323{
315 /* Deliver the message to the upper layer with the lock 324 /* Deliver the message to the upper layer with the lock
316 released. */ 325 released. */
317 spin_unlock(&(smi_info->si_lock)); 326
318 ipmi_smi_msg_received(smi_info->intf, msg); 327 if (smi_info->run_to_completion) {
319 spin_lock(&(smi_info->si_lock)); 328 ipmi_smi_msg_received(smi_info->intf, msg);
329 } else {
330 spin_unlock(&(smi_info->si_lock));
331 ipmi_smi_msg_received(smi_info->intf, msg);
332 spin_lock(&(smi_info->si_lock));
333 }
320} 334}
321 335
322static void return_hosed_msg(struct smi_info *smi_info, int cCode) 336static void return_hosed_msg(struct smi_info *smi_info, int cCode)
@@ -445,6 +459,9 @@ static inline void disable_si_irq(struct smi_info *smi_info)
445 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 459 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
446 start_disable_irq(smi_info); 460 start_disable_irq(smi_info);
447 smi_info->interrupt_disabled = 1; 461 smi_info->interrupt_disabled = 1;
462 if (!atomic_read(&smi_info->stop_operation))
463 mod_timer(&smi_info->si_timer,
464 jiffies + SI_TIMEOUT_JIFFIES);
448 } 465 }
449} 466}
450 467
@@ -576,9 +593,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
576 smi_info->handlers->get_result(smi_info->si_sm, msg, 3); 593 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
577 if (msg[2] != 0) { 594 if (msg[2] != 0) {
578 /* Error clearing flags */ 595 /* Error clearing flags */
579 printk(KERN_WARNING 596 dev_warn(smi_info->dev,
580 "ipmi_si: Error clearing flags: %2.2x\n", 597 "Error clearing flags: %2.2x\n", msg[2]);
581 msg[2]);
582 } 598 }
583 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ) 599 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
584 start_enable_irq(smi_info); 600 start_enable_irq(smi_info);
@@ -670,9 +686,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
670 /* We got the flags from the SMI, now handle them. */ 686 /* We got the flags from the SMI, now handle them. */
671 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 687 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
672 if (msg[2] != 0) { 688 if (msg[2] != 0) {
673 printk(KERN_WARNING 689 dev_warn(smi_info->dev, "Could not enable interrupts"
674 "ipmi_si: Could not enable interrupts" 690 ", failed get, using polled mode.\n");
675 ", failed get, using polled mode.\n");
676 smi_info->si_state = SI_NORMAL; 691 smi_info->si_state = SI_NORMAL;
677 } else { 692 } else {
678 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 693 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -693,11 +708,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
693 708
694 /* We got the flags from the SMI, now handle them. */ 709 /* We got the flags from the SMI, now handle them. */
695 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 710 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
696 if (msg[2] != 0) { 711 if (msg[2] != 0)
697 printk(KERN_WARNING 712 dev_warn(smi_info->dev, "Could not enable interrupts"
698 "ipmi_si: Could not enable interrupts" 713 ", failed set, using polled mode.\n");
699 ", failed set, using polled mode.\n"); 714 else
700 } 715 smi_info->interrupt_disabled = 0;
701 smi_info->si_state = SI_NORMAL; 716 smi_info->si_state = SI_NORMAL;
702 break; 717 break;
703 } 718 }
@@ -709,9 +724,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
709 /* We got the flags from the SMI, now handle them. */ 724 /* We got the flags from the SMI, now handle them. */
710 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 725 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
711 if (msg[2] != 0) { 726 if (msg[2] != 0) {
712 printk(KERN_WARNING 727 dev_warn(smi_info->dev, "Could not disable interrupts"
713 "ipmi_si: Could not disable interrupts" 728 ", failed get.\n");
714 ", failed get.\n");
715 smi_info->si_state = SI_NORMAL; 729 smi_info->si_state = SI_NORMAL;
716 } else { 730 } else {
717 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 731 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -733,9 +747,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
733 /* We got the flags from the SMI, now handle them. */ 747 /* We got the flags from the SMI, now handle them. */
734 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 748 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
735 if (msg[2] != 0) { 749 if (msg[2] != 0) {
736 printk(KERN_WARNING 750 dev_warn(smi_info->dev, "Could not disable interrupts"
737 "ipmi_si: Could not disable interrupts" 751 ", failed set.\n");
738 ", failed set.\n");
739 } 752 }
740 smi_info->si_state = SI_NORMAL; 753 smi_info->si_state = SI_NORMAL;
741 break; 754 break;
@@ -877,6 +890,11 @@ static void sender(void *send_info,
877 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); 890 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
878#endif 891#endif
879 892
893 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
894
895 if (smi_info->thread)
896 wake_up_process(smi_info->thread);
897
880 if (smi_info->run_to_completion) { 898 if (smi_info->run_to_completion) {
881 /* 899 /*
882 * If we are running to completion, then throw it in 900 * If we are running to completion, then throw it in
@@ -997,6 +1015,8 @@ static int ipmi_thread(void *data)
997 ; /* do nothing */ 1015 ; /* do nothing */
998 else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) 1016 else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
999 schedule(); 1017 schedule();
1018 else if (smi_result == SI_SM_IDLE)
1019 schedule_timeout_interruptible(100);
1000 else 1020 else
1001 schedule_timeout_interruptible(0); 1021 schedule_timeout_interruptible(0);
1002 } 1022 }
@@ -1039,6 +1059,7 @@ static void smi_timeout(unsigned long data)
1039 unsigned long flags; 1059 unsigned long flags;
1040 unsigned long jiffies_now; 1060 unsigned long jiffies_now;
1041 long time_diff; 1061 long time_diff;
1062 long timeout;
1042#ifdef DEBUG_TIMING 1063#ifdef DEBUG_TIMING
1043 struct timeval t; 1064 struct timeval t;
1044#endif 1065#endif
@@ -1059,9 +1080,9 @@ static void smi_timeout(unsigned long data)
1059 1080
1060 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 1081 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
1061 /* Running with interrupts, only do long timeouts. */ 1082 /* Running with interrupts, only do long timeouts. */
1062 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; 1083 timeout = jiffies + SI_TIMEOUT_JIFFIES;
1063 smi_inc_stat(smi_info, long_timeouts); 1084 smi_inc_stat(smi_info, long_timeouts);
1064 goto do_add_timer; 1085 goto do_mod_timer;
1065 } 1086 }
1066 1087
1067 /* 1088 /*
@@ -1070,14 +1091,15 @@ static void smi_timeout(unsigned long data)
1070 */ 1091 */
1071 if (smi_result == SI_SM_CALL_WITH_DELAY) { 1092 if (smi_result == SI_SM_CALL_WITH_DELAY) {
1072 smi_inc_stat(smi_info, short_timeouts); 1093 smi_inc_stat(smi_info, short_timeouts);
1073 smi_info->si_timer.expires = jiffies + 1; 1094 timeout = jiffies + 1;
1074 } else { 1095 } else {
1075 smi_inc_stat(smi_info, long_timeouts); 1096 smi_inc_stat(smi_info, long_timeouts);
1076 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; 1097 timeout = jiffies + SI_TIMEOUT_JIFFIES;
1077 } 1098 }
1078 1099
1079 do_add_timer: 1100 do_mod_timer:
1080 add_timer(&(smi_info->si_timer)); 1101 if (smi_result != SI_SM_IDLE)
1102 mod_timer(&(smi_info->si_timer), timeout);
1081} 1103}
1082 1104
1083static irqreturn_t si_irq_handler(int irq, void *data) 1105static irqreturn_t si_irq_handler(int irq, void *data)
@@ -1144,10 +1166,10 @@ static int smi_start_processing(void *send_info,
1144 new_smi->thread = kthread_run(ipmi_thread, new_smi, 1166 new_smi->thread = kthread_run(ipmi_thread, new_smi,
1145 "kipmi%d", new_smi->intf_num); 1167 "kipmi%d", new_smi->intf_num);
1146 if (IS_ERR(new_smi->thread)) { 1168 if (IS_ERR(new_smi->thread)) {
1147 printk(KERN_NOTICE "ipmi_si_intf: Could not start" 1169 dev_notice(new_smi->dev, "Could not start"
1148 " kernel thread due to error %ld, only using" 1170 " kernel thread due to error %ld, only using"
1149 " timers to drive the interface\n", 1171 " timers to drive the interface\n",
1150 PTR_ERR(new_smi->thread)); 1172 PTR_ERR(new_smi->thread));
1151 new_smi->thread = NULL; 1173 new_smi->thread = NULL;
1152 } 1174 }
1153 } 1175 }
@@ -1308,14 +1330,13 @@ static int std_irq_setup(struct smi_info *info)
1308 DEVICE_NAME, 1330 DEVICE_NAME,
1309 info); 1331 info);
1310 if (rv) { 1332 if (rv) {
1311 printk(KERN_WARNING 1333 dev_warn(info->dev, "%s unable to claim interrupt %d,"
1312 "ipmi_si: %s unable to claim interrupt %d," 1334 " running polled\n",
1313 " running polled\n", 1335 DEVICE_NAME, info->irq);
1314 DEVICE_NAME, info->irq);
1315 info->irq = 0; 1336 info->irq = 0;
1316 } else { 1337 } else {
1317 info->irq_cleanup = std_irq_cleanup; 1338 info->irq_cleanup = std_irq_cleanup;
1318 printk(" Using irq %d\n", info->irq); 1339 dev_info(info->dev, "Using irq %d\n", info->irq);
1319 } 1340 }
1320 1341
1321 return rv; 1342 return rv;
@@ -1406,8 +1427,8 @@ static int port_setup(struct smi_info *info)
1406 info->io.outputb = port_outl; 1427 info->io.outputb = port_outl;
1407 break; 1428 break;
1408 default: 1429 default:
1409 printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n", 1430 dev_warn(info->dev, "Invalid register size: %d\n",
1410 info->io.regsize); 1431 info->io.regsize);
1411 return -EINVAL; 1432 return -EINVAL;
1412 } 1433 }
1413 1434
@@ -1529,8 +1550,8 @@ static int mem_setup(struct smi_info *info)
1529 break; 1550 break;
1530#endif 1551#endif
1531 default: 1552 default:
1532 printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n", 1553 dev_warn(info->dev, "Invalid register size: %d\n",
1533 info->io.regsize); 1554 info->io.regsize);
1534 return -EINVAL; 1555 return -EINVAL;
1535 } 1556 }
1536 1557
@@ -1755,7 +1776,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
1755 goto out; 1776 goto out;
1756 } 1777 }
1757 1778
1758 info->addr_source = "hotmod"; 1779 info->addr_source = SI_HOTMOD;
1759 info->si_type = si_type; 1780 info->si_type = si_type;
1760 info->io.addr_data = addr; 1781 info->io.addr_data = addr;
1761 info->io.addr_type = addr_space; 1782 info->io.addr_type = addr_space;
@@ -1777,7 +1798,9 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
1777 info->irq_setup = std_irq_setup; 1798 info->irq_setup = std_irq_setup;
1778 info->slave_addr = ipmb; 1799 info->slave_addr = ipmb;
1779 1800
1780 try_smi_init(info); 1801 if (!add_smi(info))
1802 if (try_smi_init(info))
1803 cleanup_one_si(info);
1781 } else { 1804 } else {
1782 /* remove */ 1805 /* remove */
1783 struct smi_info *e, *tmp_e; 1806 struct smi_info *e, *tmp_e;
@@ -1813,7 +1836,8 @@ static __devinit void hardcode_find_bmc(void)
1813 if (!info) 1836 if (!info)
1814 return; 1837 return;
1815 1838
1816 info->addr_source = "hardcoded"; 1839 info->addr_source = SI_HARDCODED;
1840 printk(KERN_INFO PFX "probing via hardcoded address\n");
1817 1841
1818 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) { 1842 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1819 info->si_type = SI_KCS; 1843 info->si_type = SI_KCS;
@@ -1822,8 +1846,7 @@ static __devinit void hardcode_find_bmc(void)
1822 } else if (strcmp(si_type[i], "bt") == 0) { 1846 } else if (strcmp(si_type[i], "bt") == 0) {
1823 info->si_type = SI_BT; 1847 info->si_type = SI_BT;
1824 } else { 1848 } else {
1825 printk(KERN_WARNING 1849 printk(KERN_WARNING PFX "Interface type specified "
1826 "ipmi_si: Interface type specified "
1827 "for interface %d, was invalid: %s\n", 1850 "for interface %d, was invalid: %s\n",
1828 i, si_type[i]); 1851 i, si_type[i]);
1829 kfree(info); 1852 kfree(info);
@@ -1841,11 +1864,9 @@ static __devinit void hardcode_find_bmc(void)
1841 info->io.addr_data = addrs[i]; 1864 info->io.addr_data = addrs[i];
1842 info->io.addr_type = IPMI_MEM_ADDR_SPACE; 1865 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1843 } else { 1866 } else {
1844 printk(KERN_WARNING 1867 printk(KERN_WARNING PFX "Interface type specified "
1845 "ipmi_si: Interface type specified " 1868 "for interface %d, but port and address were "
1846 "for interface %d, " 1869 "not set or set to zero.\n", i);
1847 "but port and address were not set or "
1848 "set to zero.\n", i);
1849 kfree(info); 1870 kfree(info);
1850 continue; 1871 continue;
1851 } 1872 }
@@ -1863,7 +1884,9 @@ static __devinit void hardcode_find_bmc(void)
1863 info->irq_setup = std_irq_setup; 1884 info->irq_setup = std_irq_setup;
1864 info->slave_addr = slave_addrs[i]; 1885 info->slave_addr = slave_addrs[i];
1865 1886
1866 try_smi_init(info); 1887 if (!add_smi(info))
1888 if (try_smi_init(info))
1889 cleanup_one_si(info);
1867 } 1890 }
1868} 1891}
1869 1892
@@ -1923,15 +1946,13 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
1923 &ipmi_acpi_gpe, 1946 &ipmi_acpi_gpe,
1924 info); 1947 info);
1925 if (status != AE_OK) { 1948 if (status != AE_OK) {
1926 printk(KERN_WARNING 1949 dev_warn(info->dev, "%s unable to claim ACPI GPE %d,"
1927 "ipmi_si: %s unable to claim ACPI GPE %d," 1950 " running polled\n", DEVICE_NAME, info->irq);
1928 " running polled\n",
1929 DEVICE_NAME, info->irq);
1930 info->irq = 0; 1951 info->irq = 0;
1931 return -EINVAL; 1952 return -EINVAL;
1932 } else { 1953 } else {
1933 info->irq_cleanup = acpi_gpe_irq_cleanup; 1954 info->irq_cleanup = acpi_gpe_irq_cleanup;
1934 printk(" Using ACPI GPE %d\n", info->irq); 1955 dev_info(info->dev, "Using ACPI GPE %d\n", info->irq);
1935 return 0; 1956 return 0;
1936 } 1957 }
1937} 1958}
@@ -1989,8 +2010,8 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
1989 u8 addr_space; 2010 u8 addr_space;
1990 2011
1991 if (spmi->IPMIlegacy != 1) { 2012 if (spmi->IPMIlegacy != 1) {
1992 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy); 2013 printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1993 return -ENODEV; 2014 return -ENODEV;
1994 } 2015 }
1995 2016
1996 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 2017 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
@@ -2000,11 +2021,12 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
2000 2021
2001 info = kzalloc(sizeof(*info), GFP_KERNEL); 2022 info = kzalloc(sizeof(*info), GFP_KERNEL);
2002 if (!info) { 2023 if (!info) {
2003 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n"); 2024 printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
2004 return -ENOMEM; 2025 return -ENOMEM;
2005 } 2026 }
2006 2027
2007 info->addr_source = "SPMI"; 2028 info->addr_source = SI_SPMI;
2029 printk(KERN_INFO PFX "probing via SPMI\n");
2008 2030
2009 /* Figure out the interface type. */ 2031 /* Figure out the interface type. */
2010 switch (spmi->InterfaceType) { 2032 switch (spmi->InterfaceType) {
@@ -2018,8 +2040,8 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
2018 info->si_type = SI_BT; 2040 info->si_type = SI_BT;
2019 break; 2041 break;
2020 default: 2042 default:
2021 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n", 2043 printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
2022 spmi->InterfaceType); 2044 spmi->InterfaceType);
2023 kfree(info); 2045 kfree(info);
2024 return -EIO; 2046 return -EIO;
2025 } 2047 }
@@ -2055,13 +2077,12 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
2055 info->io.addr_type = IPMI_IO_ADDR_SPACE; 2077 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2056 } else { 2078 } else {
2057 kfree(info); 2079 kfree(info);
2058 printk(KERN_WARNING 2080 printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n");
2059 "ipmi_si: Unknown ACPI I/O Address type\n");
2060 return -EIO; 2081 return -EIO;
2061 } 2082 }
2062 info->io.addr_data = spmi->addr.address; 2083 info->io.addr_data = spmi->addr.address;
2063 2084
2064 try_smi_init(info); 2085 add_smi(info);
2065 2086
2066 return 0; 2087 return 0;
2067} 2088}
@@ -2093,6 +2114,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2093{ 2114{
2094 struct acpi_device *acpi_dev; 2115 struct acpi_device *acpi_dev;
2095 struct smi_info *info; 2116 struct smi_info *info;
2117 struct resource *res;
2096 acpi_handle handle; 2118 acpi_handle handle;
2097 acpi_status status; 2119 acpi_status status;
2098 unsigned long long tmp; 2120 unsigned long long tmp;
@@ -2105,7 +2127,8 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2105 if (!info) 2127 if (!info)
2106 return -ENOMEM; 2128 return -ENOMEM;
2107 2129
2108 info->addr_source = "ACPI"; 2130 info->addr_source = SI_ACPI;
2131 printk(KERN_INFO PFX "probing via ACPI\n");
2109 2132
2110 handle = acpi_dev->handle; 2133 handle = acpi_dev->handle;
2111 2134
@@ -2125,22 +2148,26 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2125 info->si_type = SI_BT; 2148 info->si_type = SI_BT;
2126 break; 2149 break;
2127 default: 2150 default:
2128 dev_info(&dev->dev, "unknown interface type %lld\n", tmp); 2151 dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
2129 goto err_free; 2152 goto err_free;
2130 } 2153 }
2131 2154
2132 if (pnp_port_valid(dev, 0)) { 2155 res = pnp_get_resource(dev, IORESOURCE_IO, 0);
2156 if (res) {
2133 info->io_setup = port_setup; 2157 info->io_setup = port_setup;
2134 info->io.addr_type = IPMI_IO_ADDR_SPACE; 2158 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2135 info->io.addr_data = pnp_port_start(dev, 0);
2136 } else if (pnp_mem_valid(dev, 0)) {
2137 info->io_setup = mem_setup;
2138 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2139 info->io.addr_data = pnp_mem_start(dev, 0);
2140 } else { 2159 } else {
2160 res = pnp_get_resource(dev, IORESOURCE_MEM, 0);
2161 if (res) {
2162 info->io_setup = mem_setup;
2163 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2164 }
2165 }
2166 if (!res) {
2141 dev_err(&dev->dev, "no I/O or memory address\n"); 2167 dev_err(&dev->dev, "no I/O or memory address\n");
2142 goto err_free; 2168 goto err_free;
2143 } 2169 }
2170 info->io.addr_data = res->start;
2144 2171
2145 info->io.regspacing = DEFAULT_REGSPACING; 2172 info->io.regspacing = DEFAULT_REGSPACING;
2146 info->io.regsize = DEFAULT_REGSPACING; 2173 info->io.regsize = DEFAULT_REGSPACING;
@@ -2156,10 +2183,14 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2156 info->irq_setup = std_irq_setup; 2183 info->irq_setup = std_irq_setup;
2157 } 2184 }
2158 2185
2159 info->dev = &acpi_dev->dev; 2186 info->dev = &dev->dev;
2160 pnp_set_drvdata(dev, info); 2187 pnp_set_drvdata(dev, info);
2161 2188
2162 return try_smi_init(info); 2189 dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n",
2190 res, info->io.regsize, info->io.regspacing,
2191 info->irq);
2192
2193 return add_smi(info);
2163 2194
2164err_free: 2195err_free:
2165 kfree(info); 2196 kfree(info);
@@ -2264,12 +2295,12 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2264 2295
2265 info = kzalloc(sizeof(*info), GFP_KERNEL); 2296 info = kzalloc(sizeof(*info), GFP_KERNEL);
2266 if (!info) { 2297 if (!info) {
2267 printk(KERN_ERR 2298 printk(KERN_ERR PFX "Could not allocate SI data\n");
2268 "ipmi_si: Could not allocate SI data\n");
2269 return; 2299 return;
2270 } 2300 }
2271 2301
2272 info->addr_source = "SMBIOS"; 2302 info->addr_source = SI_SMBIOS;
2303 printk(KERN_INFO PFX "probing via SMBIOS\n");
2273 2304
2274 switch (ipmi_data->type) { 2305 switch (ipmi_data->type) {
2275 case 0x01: /* KCS */ 2306 case 0x01: /* KCS */
@@ -2299,8 +2330,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2299 2330
2300 default: 2331 default:
2301 kfree(info); 2332 kfree(info);
2302 printk(KERN_WARNING 2333 printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n",
2303 "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2304 ipmi_data->addr_space); 2334 ipmi_data->addr_space);
2305 return; 2335 return;
2306 } 2336 }
@@ -2318,7 +2348,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2318 if (info->irq) 2348 if (info->irq)
2319 info->irq_setup = std_irq_setup; 2349 info->irq_setup = std_irq_setup;
2320 2350
2321 try_smi_init(info); 2351 add_smi(info);
2322} 2352}
2323 2353
2324static void __devinit dmi_find_bmc(void) 2354static void __devinit dmi_find_bmc(void)
@@ -2368,7 +2398,8 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2368 if (!info) 2398 if (!info)
2369 return -ENOMEM; 2399 return -ENOMEM;
2370 2400
2371 info->addr_source = "PCI"; 2401 info->addr_source = SI_PCI;
2402 dev_info(&pdev->dev, "probing via PCI");
2372 2403
2373 switch (class_type) { 2404 switch (class_type) {
2374 case PCI_ERMC_CLASSCODE_TYPE_SMIC: 2405 case PCI_ERMC_CLASSCODE_TYPE_SMIC:
@@ -2385,15 +2416,13 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2385 2416
2386 default: 2417 default:
2387 kfree(info); 2418 kfree(info);
2388 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n", 2419 dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type);
2389 pci_name(pdev), class_type);
2390 return -ENOMEM; 2420 return -ENOMEM;
2391 } 2421 }
2392 2422
2393 rv = pci_enable_device(pdev); 2423 rv = pci_enable_device(pdev);
2394 if (rv) { 2424 if (rv) {
2395 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n", 2425 dev_err(&pdev->dev, "couldn't enable PCI device\n");
2396 pci_name(pdev));
2397 kfree(info); 2426 kfree(info);
2398 return rv; 2427 return rv;
2399 } 2428 }
@@ -2421,7 +2450,11 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2421 info->dev = &pdev->dev; 2450 info->dev = &pdev->dev;
2422 pci_set_drvdata(pdev, info); 2451 pci_set_drvdata(pdev, info);
2423 2452
2424 return try_smi_init(info); 2453 dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
2454 &pdev->resource[0], info->io.regsize, info->io.regspacing,
2455 info->irq);
2456
2457 return add_smi(info);
2425} 2458}
2426 2459
2427static void __devexit ipmi_pci_remove(struct pci_dev *pdev) 2460static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
@@ -2469,11 +2502,11 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
2469 struct smi_info *info; 2502 struct smi_info *info;
2470 struct resource resource; 2503 struct resource resource;
2471 const int *regsize, *regspacing, *regshift; 2504 const int *regsize, *regspacing, *regshift;
2472 struct device_node *np = dev->node; 2505 struct device_node *np = dev->dev.of_node;
2473 int ret; 2506 int ret;
2474 int proplen; 2507 int proplen;
2475 2508
2476 dev_info(&dev->dev, PFX "probing via device tree\n"); 2509 dev_info(&dev->dev, "probing via device tree\n");
2477 2510
2478 ret = of_address_to_resource(np, 0, &resource); 2511 ret = of_address_to_resource(np, 0, &resource);
2479 if (ret) { 2512 if (ret) {
@@ -2503,12 +2536,12 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
2503 2536
2504 if (!info) { 2537 if (!info) {
2505 dev_err(&dev->dev, 2538 dev_err(&dev->dev,
2506 PFX "could not allocate memory for OF probe\n"); 2539 "could not allocate memory for OF probe\n");
2507 return -ENOMEM; 2540 return -ENOMEM;
2508 } 2541 }
2509 2542
2510 info->si_type = (enum si_type) match->data; 2543 info->si_type = (enum si_type) match->data;
2511 info->addr_source = "device-tree"; 2544 info->addr_source = SI_DEVICETREE;
2512 info->irq_setup = std_irq_setup; 2545 info->irq_setup = std_irq_setup;
2513 2546
2514 if (resource.flags & IORESOURCE_IO) { 2547 if (resource.flags & IORESOURCE_IO) {
@@ -2525,16 +2558,16 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
2525 info->io.regspacing = regspacing ? *regspacing : DEFAULT_REGSPACING; 2558 info->io.regspacing = regspacing ? *regspacing : DEFAULT_REGSPACING;
2526 info->io.regshift = regshift ? *regshift : 0; 2559 info->io.regshift = regshift ? *regshift : 0;
2527 2560
2528 info->irq = irq_of_parse_and_map(dev->node, 0); 2561 info->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
2529 info->dev = &dev->dev; 2562 info->dev = &dev->dev;
2530 2563
2531 dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %x\n", 2564 dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
2532 info->io.addr_data, info->io.regsize, info->io.regspacing, 2565 info->io.addr_data, info->io.regsize, info->io.regspacing,
2533 info->irq); 2566 info->irq);
2534 2567
2535 dev_set_drvdata(&dev->dev, info); 2568 dev_set_drvdata(&dev->dev, info);
2536 2569
2537 return try_smi_init(info); 2570 return add_smi(info);
2538} 2571}
2539 2572
2540static int __devexit ipmi_of_remove(struct of_device *dev) 2573static int __devexit ipmi_of_remove(struct of_device *dev)
@@ -2555,8 +2588,11 @@ static struct of_device_id ipmi_match[] =
2555}; 2588};
2556 2589
2557static struct of_platform_driver ipmi_of_platform_driver = { 2590static struct of_platform_driver ipmi_of_platform_driver = {
2558 .name = "ipmi", 2591 .driver = {
2559 .match_table = ipmi_match, 2592 .name = "ipmi",
2593 .owner = THIS_MODULE,
2594 .of_match_table = ipmi_match,
2595 },
2560 .probe = ipmi_of_probe, 2596 .probe = ipmi_of_probe,
2561 .remove = __devexit_p(ipmi_of_remove), 2597 .remove = __devexit_p(ipmi_of_remove),
2562}; 2598};
@@ -2640,9 +2676,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
2640 2676
2641 rv = wait_for_msg_done(smi_info); 2677 rv = wait_for_msg_done(smi_info);
2642 if (rv) { 2678 if (rv) {
2643 printk(KERN_WARNING 2679 printk(KERN_WARNING PFX "Error getting response from get"
2644 "ipmi_si: Error getting response from get global," 2680 " global enables command, the event buffer is not"
2645 " enables command, the event buffer is not"
2646 " enabled.\n"); 2681 " enabled.\n");
2647 goto out; 2682 goto out;
2648 } 2683 }
@@ -2654,10 +2689,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
2654 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || 2689 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
2655 resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || 2690 resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
2656 resp[2] != 0) { 2691 resp[2] != 0) {
2657 printk(KERN_WARNING 2692 printk(KERN_WARNING PFX "Invalid return from get global"
2658 "ipmi_si: Invalid return from get global" 2693 " enables command, cannot enable the event buffer.\n");
2659 " enables command, cannot enable the event"
2660 " buffer.\n");
2661 rv = -EINVAL; 2694 rv = -EINVAL;
2662 goto out; 2695 goto out;
2663 } 2696 }
@@ -2673,9 +2706,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
2673 2706
2674 rv = wait_for_msg_done(smi_info); 2707 rv = wait_for_msg_done(smi_info);
2675 if (rv) { 2708 if (rv) {
2676 printk(KERN_WARNING 2709 printk(KERN_WARNING PFX "Error getting response from set"
2677 "ipmi_si: Error getting response from set global," 2710 " global, enables command, the event buffer is not"
2678 " enables command, the event buffer is not"
2679 " enabled.\n"); 2711 " enabled.\n");
2680 goto out; 2712 goto out;
2681 } 2713 }
@@ -2686,10 +2718,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
2686 if (resp_len < 3 || 2718 if (resp_len < 3 ||
2687 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || 2719 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
2688 resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { 2720 resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
2689 printk(KERN_WARNING 2721 printk(KERN_WARNING PFX "Invalid return from get global,"
2690 "ipmi_si: Invalid return from get global," 2722 "enables command, not enable the event buffer.\n");
2691 "enables command, not enable the event"
2692 " buffer.\n");
2693 rv = -EINVAL; 2723 rv = -EINVAL;
2694 goto out; 2724 goto out;
2695 } 2725 }
@@ -2948,7 +2978,7 @@ static __devinit void default_find_bmc(void)
2948 if (!info) 2978 if (!info)
2949 return; 2979 return;
2950 2980
2951 info->addr_source = NULL; 2981 info->addr_source = SI_DEFAULT;
2952 2982
2953 info->si_type = ipmi_defaults[i].type; 2983 info->si_type = ipmi_defaults[i].type;
2954 info->io_setup = port_setup; 2984 info->io_setup = port_setup;
@@ -2960,14 +2990,16 @@ static __devinit void default_find_bmc(void)
2960 info->io.regsize = DEFAULT_REGSPACING; 2990 info->io.regsize = DEFAULT_REGSPACING;
2961 info->io.regshift = 0; 2991 info->io.regshift = 0;
2962 2992
2963 if (try_smi_init(info) == 0) { 2993 if (add_smi(info) == 0) {
2964 /* Found one... */ 2994 if ((try_smi_init(info)) == 0) {
2965 printk(KERN_INFO "ipmi_si: Found default %s state" 2995 /* Found one... */
2966 " machine at %s address 0x%lx\n", 2996 printk(KERN_INFO PFX "Found default %s"
2967 si_to_str[info->si_type], 2997 " state machine at %s address 0x%lx\n",
2968 addr_space_to_str[info->io.addr_type], 2998 si_to_str[info->si_type],
2969 info->io.addr_data); 2999 addr_space_to_str[info->io.addr_type],
2970 return; 3000 info->io.addr_data);
3001 } else
3002 cleanup_one_si(info);
2971 } 3003 }
2972 } 3004 }
2973} 3005}
@@ -2986,34 +3018,48 @@ static int is_new_interface(struct smi_info *info)
2986 return 1; 3018 return 1;
2987} 3019}
2988 3020
2989static int try_smi_init(struct smi_info *new_smi) 3021static int add_smi(struct smi_info *new_smi)
2990{ 3022{
2991 int rv; 3023 int rv = 0;
2992 int i;
2993
2994 if (new_smi->addr_source) {
2995 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2996 " machine at %s address 0x%lx, slave address 0x%x,"
2997 " irq %d\n",
2998 new_smi->addr_source,
2999 si_to_str[new_smi->si_type],
3000 addr_space_to_str[new_smi->io.addr_type],
3001 new_smi->io.addr_data,
3002 new_smi->slave_addr, new_smi->irq);
3003 }
3004 3024
3025 printk(KERN_INFO PFX "Adding %s-specified %s state machine",
3026 ipmi_addr_src_to_str[new_smi->addr_source],
3027 si_to_str[new_smi->si_type]);
3005 mutex_lock(&smi_infos_lock); 3028 mutex_lock(&smi_infos_lock);
3006 if (!is_new_interface(new_smi)) { 3029 if (!is_new_interface(new_smi)) {
3007 printk(KERN_WARNING "ipmi_si: duplicate interface\n"); 3030 printk(KERN_CONT PFX "duplicate interface\n");
3008 rv = -EBUSY; 3031 rv = -EBUSY;
3009 goto out_err; 3032 goto out_err;
3010 } 3033 }
3011 3034
3035 printk(KERN_CONT "\n");
3036
3012 /* So we know not to free it unless we have allocated one. */ 3037 /* So we know not to free it unless we have allocated one. */
3013 new_smi->intf = NULL; 3038 new_smi->intf = NULL;
3014 new_smi->si_sm = NULL; 3039 new_smi->si_sm = NULL;
3015 new_smi->handlers = NULL; 3040 new_smi->handlers = NULL;
3016 3041
3042 list_add_tail(&new_smi->link, &smi_infos);
3043
3044out_err:
3045 mutex_unlock(&smi_infos_lock);
3046 return rv;
3047}
3048
3049static int try_smi_init(struct smi_info *new_smi)
3050{
3051 int rv = 0;
3052 int i;
3053
3054 printk(KERN_INFO PFX "Trying %s-specified %s state"
3055 " machine at %s address 0x%lx, slave address 0x%x,"
3056 " irq %d\n",
3057 ipmi_addr_src_to_str[new_smi->addr_source],
3058 si_to_str[new_smi->si_type],
3059 addr_space_to_str[new_smi->io.addr_type],
3060 new_smi->io.addr_data,
3061 new_smi->slave_addr, new_smi->irq);
3062
3017 switch (new_smi->si_type) { 3063 switch (new_smi->si_type) {
3018 case SI_KCS: 3064 case SI_KCS:
3019 new_smi->handlers = &kcs_smi_handlers; 3065 new_smi->handlers = &kcs_smi_handlers;
@@ -3036,7 +3082,8 @@ static int try_smi_init(struct smi_info *new_smi)
3036 /* Allocate the state machine's data and initialize it. */ 3082 /* Allocate the state machine's data and initialize it. */
3037 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); 3083 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
3038 if (!new_smi->si_sm) { 3084 if (!new_smi->si_sm) {
3039 printk(KERN_ERR "Could not allocate state machine memory\n"); 3085 printk(KERN_ERR PFX
3086 "Could not allocate state machine memory\n");
3040 rv = -ENOMEM; 3087 rv = -ENOMEM;
3041 goto out_err; 3088 goto out_err;
3042 } 3089 }
@@ -3046,7 +3093,7 @@ static int try_smi_init(struct smi_info *new_smi)
3046 /* Now that we know the I/O size, we can set up the I/O. */ 3093 /* Now that we know the I/O size, we can set up the I/O. */
3047 rv = new_smi->io_setup(new_smi); 3094 rv = new_smi->io_setup(new_smi);
3048 if (rv) { 3095 if (rv) {
3049 printk(KERN_ERR "Could not set up I/O space\n"); 3096 printk(KERN_ERR PFX "Could not set up I/O space\n");
3050 goto out_err; 3097 goto out_err;
3051 } 3098 }
3052 3099
@@ -3056,8 +3103,7 @@ static int try_smi_init(struct smi_info *new_smi)
3056 /* Do low-level detection first. */ 3103 /* Do low-level detection first. */
3057 if (new_smi->handlers->detect(new_smi->si_sm)) { 3104 if (new_smi->handlers->detect(new_smi->si_sm)) {
3058 if (new_smi->addr_source) 3105 if (new_smi->addr_source)
3059 printk(KERN_INFO "ipmi_si: Interface detection" 3106 printk(KERN_INFO PFX "Interface detection failed\n");
3060 " failed\n");
3061 rv = -ENODEV; 3107 rv = -ENODEV;
3062 goto out_err; 3108 goto out_err;
3063 } 3109 }
@@ -3069,7 +3115,7 @@ static int try_smi_init(struct smi_info *new_smi)
3069 rv = try_get_dev_id(new_smi); 3115 rv = try_get_dev_id(new_smi);
3070 if (rv) { 3116 if (rv) {
3071 if (new_smi->addr_source) 3117 if (new_smi->addr_source)
3072 printk(KERN_INFO "ipmi_si: There appears to be no BMC" 3118 printk(KERN_INFO PFX "There appears to be no BMC"
3073 " at this location\n"); 3119 " at this location\n");
3074 goto out_err; 3120 goto out_err;
3075 } 3121 }
@@ -3085,7 +3131,7 @@ static int try_smi_init(struct smi_info *new_smi)
3085 for (i = 0; i < SI_NUM_STATS; i++) 3131 for (i = 0; i < SI_NUM_STATS; i++)
3086 atomic_set(&new_smi->stats[i], 0); 3132 atomic_set(&new_smi->stats[i], 0);
3087 3133
3088 new_smi->interrupt_disabled = 0; 3134 new_smi->interrupt_disabled = 1;
3089 atomic_set(&new_smi->stop_operation, 0); 3135 atomic_set(&new_smi->stop_operation, 0);
3090 new_smi->intf_num = smi_num; 3136 new_smi->intf_num = smi_num;
3091 smi_num++; 3137 smi_num++;
@@ -3111,9 +3157,8 @@ static int try_smi_init(struct smi_info *new_smi)
3111 new_smi->pdev = platform_device_alloc("ipmi_si", 3157 new_smi->pdev = platform_device_alloc("ipmi_si",
3112 new_smi->intf_num); 3158 new_smi->intf_num);
3113 if (!new_smi->pdev) { 3159 if (!new_smi->pdev) {
3114 printk(KERN_ERR 3160 printk(KERN_ERR PFX
3115 "ipmi_si_intf:" 3161 "Unable to allocate platform device\n");
3116 " Unable to allocate platform device\n");
3117 goto out_err; 3162 goto out_err;
3118 } 3163 }
3119 new_smi->dev = &new_smi->pdev->dev; 3164 new_smi->dev = &new_smi->pdev->dev;
@@ -3121,9 +3166,8 @@ static int try_smi_init(struct smi_info *new_smi)
3121 3166
3122 rv = platform_device_add(new_smi->pdev); 3167 rv = platform_device_add(new_smi->pdev);
3123 if (rv) { 3168 if (rv) {
3124 printk(KERN_ERR 3169 printk(KERN_ERR PFX
3125 "ipmi_si_intf:" 3170 "Unable to register system interface device:"
3126 " Unable to register system interface device:"
3127 " %d\n", 3171 " %d\n",
3128 rv); 3172 rv);
3129 goto out_err; 3173 goto out_err;
@@ -3138,9 +3182,8 @@ static int try_smi_init(struct smi_info *new_smi)
3138 "bmc", 3182 "bmc",
3139 new_smi->slave_addr); 3183 new_smi->slave_addr);
3140 if (rv) { 3184 if (rv) {
3141 printk(KERN_ERR 3185 dev_err(new_smi->dev, "Unable to register device: error %d\n",
3142 "ipmi_si: Unable to register device: error %d\n", 3186 rv);
3143 rv);
3144 goto out_err_stop_timer; 3187 goto out_err_stop_timer;
3145 } 3188 }
3146 3189
@@ -3148,9 +3191,7 @@ static int try_smi_init(struct smi_info *new_smi)
3148 type_file_read_proc, 3191 type_file_read_proc,
3149 new_smi); 3192 new_smi);
3150 if (rv) { 3193 if (rv) {
3151 printk(KERN_ERR 3194 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3152 "ipmi_si: Unable to create proc entry: %d\n",
3153 rv);
3154 goto out_err_stop_timer; 3195 goto out_err_stop_timer;
3155 } 3196 }
3156 3197
@@ -3158,9 +3199,7 @@ static int try_smi_init(struct smi_info *new_smi)
3158 stat_file_read_proc, 3199 stat_file_read_proc,
3159 new_smi); 3200 new_smi);
3160 if (rv) { 3201 if (rv) {
3161 printk(KERN_ERR 3202 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3162 "ipmi_si: Unable to create proc entry: %d\n",
3163 rv);
3164 goto out_err_stop_timer; 3203 goto out_err_stop_timer;
3165 } 3204 }
3166 3205
@@ -3168,18 +3207,12 @@ static int try_smi_init(struct smi_info *new_smi)
3168 param_read_proc, 3207 param_read_proc,
3169 new_smi); 3208 new_smi);
3170 if (rv) { 3209 if (rv) {
3171 printk(KERN_ERR 3210 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3172 "ipmi_si: Unable to create proc entry: %d\n",
3173 rv);
3174 goto out_err_stop_timer; 3211 goto out_err_stop_timer;
3175 } 3212 }
3176 3213
3177 list_add_tail(&new_smi->link, &smi_infos); 3214 dev_info(new_smi->dev, "IPMI %s interface initialized\n",
3178 3215 si_to_str[new_smi->si_type]);
3179 mutex_unlock(&smi_infos_lock);
3180
3181 printk(KERN_INFO "IPMI %s interface initialized\n",
3182 si_to_str[new_smi->si_type]);
3183 3216
3184 return 0; 3217 return 0;
3185 3218
@@ -3188,11 +3221,17 @@ static int try_smi_init(struct smi_info *new_smi)
3188 wait_for_timer_and_thread(new_smi); 3221 wait_for_timer_and_thread(new_smi);
3189 3222
3190 out_err: 3223 out_err:
3191 if (new_smi->intf) 3224 new_smi->interrupt_disabled = 1;
3225
3226 if (new_smi->intf) {
3192 ipmi_unregister_smi(new_smi->intf); 3227 ipmi_unregister_smi(new_smi->intf);
3228 new_smi->intf = NULL;
3229 }
3193 3230
3194 if (new_smi->irq_cleanup) 3231 if (new_smi->irq_cleanup) {
3195 new_smi->irq_cleanup(new_smi); 3232 new_smi->irq_cleanup(new_smi);
3233 new_smi->irq_cleanup = NULL;
3234 }
3196 3235
3197 /* 3236 /*
3198 * Wait until we know that we are out of any interrupt 3237 * Wait until we know that we are out of any interrupt
@@ -3205,18 +3244,21 @@ static int try_smi_init(struct smi_info *new_smi)
3205 if (new_smi->handlers) 3244 if (new_smi->handlers)
3206 new_smi->handlers->cleanup(new_smi->si_sm); 3245 new_smi->handlers->cleanup(new_smi->si_sm);
3207 kfree(new_smi->si_sm); 3246 kfree(new_smi->si_sm);
3247 new_smi->si_sm = NULL;
3208 } 3248 }
3209 if (new_smi->addr_source_cleanup) 3249 if (new_smi->addr_source_cleanup) {
3210 new_smi->addr_source_cleanup(new_smi); 3250 new_smi->addr_source_cleanup(new_smi);
3211 if (new_smi->io_cleanup) 3251 new_smi->addr_source_cleanup = NULL;
3252 }
3253 if (new_smi->io_cleanup) {
3212 new_smi->io_cleanup(new_smi); 3254 new_smi->io_cleanup(new_smi);
3255 new_smi->io_cleanup = NULL;
3256 }
3213 3257
3214 if (new_smi->dev_registered) 3258 if (new_smi->dev_registered) {
3215 platform_device_unregister(new_smi->pdev); 3259 platform_device_unregister(new_smi->pdev);
3216 3260 new_smi->dev_registered = 0;
3217 kfree(new_smi); 3261 }
3218
3219 mutex_unlock(&smi_infos_lock);
3220 3262
3221 return rv; 3263 return rv;
3222} 3264}
@@ -3226,6 +3268,8 @@ static __devinit int init_ipmi_si(void)
3226 int i; 3268 int i;
3227 char *str; 3269 char *str;
3228 int rv; 3270 int rv;
3271 struct smi_info *e;
3272 enum ipmi_addr_src type = SI_INVALID;
3229 3273
3230 if (initialized) 3274 if (initialized)
3231 return 0; 3275 return 0;
@@ -3234,9 +3278,7 @@ static __devinit int init_ipmi_si(void)
3234 /* Register the device drivers. */ 3278 /* Register the device drivers. */
3235 rv = driver_register(&ipmi_driver.driver); 3279 rv = driver_register(&ipmi_driver.driver);
3236 if (rv) { 3280 if (rv) {
3237 printk(KERN_ERR 3281 printk(KERN_ERR PFX "Unable to register driver: %d\n", rv);
3238 "init_ipmi_si: Unable to register driver: %d\n",
3239 rv);
3240 return rv; 3282 return rv;
3241 } 3283 }
3242 3284
@@ -3260,38 +3302,81 @@ static __devinit int init_ipmi_si(void)
3260 3302
3261 hardcode_find_bmc(); 3303 hardcode_find_bmc();
3262 3304
3263#ifdef CONFIG_DMI 3305 /* If the user gave us a device, they presumably want us to use it */
3264 dmi_find_bmc(); 3306 mutex_lock(&smi_infos_lock);
3265#endif 3307 if (!list_empty(&smi_infos)) {
3308 mutex_unlock(&smi_infos_lock);
3309 return 0;
3310 }
3311 mutex_unlock(&smi_infos_lock);
3266 3312
3267#ifdef CONFIG_ACPI 3313#ifdef CONFIG_PCI
3268 spmi_find_bmc(); 3314 rv = pci_register_driver(&ipmi_pci_driver);
3315 if (rv)
3316 printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv);
3269#endif 3317#endif
3318
3270#ifdef CONFIG_ACPI 3319#ifdef CONFIG_ACPI
3271 pnp_register_driver(&ipmi_pnp_driver); 3320 pnp_register_driver(&ipmi_pnp_driver);
3272#endif 3321#endif
3273 3322
3274#ifdef CONFIG_PCI 3323#ifdef CONFIG_DMI
3275 rv = pci_register_driver(&ipmi_pci_driver); 3324 dmi_find_bmc();
3276 if (rv) 3325#endif
3277 printk(KERN_ERR 3326
3278 "init_ipmi_si: Unable to register PCI driver: %d\n", 3327#ifdef CONFIG_ACPI
3279 rv); 3328 spmi_find_bmc();
3280#endif 3329#endif
3281 3330
3282#ifdef CONFIG_PPC_OF 3331#ifdef CONFIG_PPC_OF
3283 of_register_platform_driver(&ipmi_of_platform_driver); 3332 of_register_platform_driver(&ipmi_of_platform_driver);
3284#endif 3333#endif
3285 3334
3335 /* We prefer devices with interrupts, but in the case of a machine
3336 with multiple BMCs we assume that there will be several instances
3337 of a given type so if we succeed in registering a type then also
3338 try to register everything else of the same type */
3339
3340 mutex_lock(&smi_infos_lock);
3341 list_for_each_entry(e, &smi_infos, link) {
3342 /* Try to register a device if it has an IRQ and we either
3343 haven't successfully registered a device yet or this
3344 device has the same type as one we successfully registered */
3345 if (e->irq && (!type || e->addr_source == type)) {
3346 if (!try_smi_init(e)) {
3347 type = e->addr_source;
3348 }
3349 }
3350 }
3351
3352 /* type will only have been set if we successfully registered an si */
3353 if (type) {
3354 mutex_unlock(&smi_infos_lock);
3355 return 0;
3356 }
3357
3358 /* Fall back to the preferred device */
3359
3360 list_for_each_entry(e, &smi_infos, link) {
3361 if (!e->irq && (!type || e->addr_source == type)) {
3362 if (!try_smi_init(e)) {
3363 type = e->addr_source;
3364 }
3365 }
3366 }
3367 mutex_unlock(&smi_infos_lock);
3368
3369 if (type)
3370 return 0;
3371
3286 if (si_trydefaults) { 3372 if (si_trydefaults) {
3287 mutex_lock(&smi_infos_lock); 3373 mutex_lock(&smi_infos_lock);
3288 if (list_empty(&smi_infos)) { 3374 if (list_empty(&smi_infos)) {
3289 /* No BMC was found, try defaults. */ 3375 /* No BMC was found, try defaults. */
3290 mutex_unlock(&smi_infos_lock); 3376 mutex_unlock(&smi_infos_lock);
3291 default_find_bmc(); 3377 default_find_bmc();
3292 } else { 3378 } else
3293 mutex_unlock(&smi_infos_lock); 3379 mutex_unlock(&smi_infos_lock);
3294 }
3295 } 3380 }
3296 3381
3297 mutex_lock(&smi_infos_lock); 3382 mutex_lock(&smi_infos_lock);
@@ -3305,8 +3390,8 @@ static __devinit int init_ipmi_si(void)
3305 of_unregister_platform_driver(&ipmi_of_platform_driver); 3390 of_unregister_platform_driver(&ipmi_of_platform_driver);
3306#endif 3391#endif
3307 driver_unregister(&ipmi_driver.driver); 3392 driver_unregister(&ipmi_driver.driver);
3308 printk(KERN_WARNING 3393 printk(KERN_WARNING PFX
3309 "ipmi_si: Unable to find any System Interface(s)\n"); 3394 "Unable to find any System Interface(s)\n");
3310 return -ENODEV; 3395 return -ENODEV;
3311 } else { 3396 } else {
3312 mutex_unlock(&smi_infos_lock); 3397 mutex_unlock(&smi_infos_lock);
@@ -3317,7 +3402,7 @@ module_init(init_ipmi_si);
3317 3402
3318static void cleanup_one_si(struct smi_info *to_clean) 3403static void cleanup_one_si(struct smi_info *to_clean)
3319{ 3404{
3320 int rv; 3405 int rv = 0;
3321 unsigned long flags; 3406 unsigned long flags;
3322 3407
3323 if (!to_clean) 3408 if (!to_clean)
@@ -3361,14 +3446,16 @@ static void cleanup_one_si(struct smi_info *to_clean)
3361 schedule_timeout_uninterruptible(1); 3446 schedule_timeout_uninterruptible(1);
3362 } 3447 }
3363 3448
3364 rv = ipmi_unregister_smi(to_clean->intf); 3449 if (to_clean->intf)
3450 rv = ipmi_unregister_smi(to_clean->intf);
3451
3365 if (rv) { 3452 if (rv) {
3366 printk(KERN_ERR 3453 printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n",
3367 "ipmi_si: Unable to unregister device: errno=%d\n",
3368 rv); 3454 rv);
3369 } 3455 }
3370 3456
3371 to_clean->handlers->cleanup(to_clean->si_sm); 3457 if (to_clean->handlers)
3458 to_clean->handlers->cleanup(to_clean->si_sm);
3372 3459
3373 kfree(to_clean->si_sm); 3460 kfree(to_clean->si_sm);
3374 3461
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index a4d57e31f713..82bcdb262a3a 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -659,7 +659,7 @@ static struct watchdog_info ident = {
659 .identity = "IPMI" 659 .identity = "IPMI"
660}; 660};
661 661
662static int ipmi_ioctl(struct inode *inode, struct file *file, 662static int ipmi_ioctl(struct file *file,
663 unsigned int cmd, unsigned long arg) 663 unsigned int cmd, unsigned long arg)
664{ 664{
665 void __user *argp = (void __user *)arg; 665 void __user *argp = (void __user *)arg;
@@ -730,6 +730,19 @@ static int ipmi_ioctl(struct inode *inode, struct file *file,
730 } 730 }
731} 731}
732 732
733static long ipmi_unlocked_ioctl(struct file *file,
734 unsigned int cmd,
735 unsigned long arg)
736{
737 int ret;
738
739 lock_kernel();
740 ret = ipmi_ioctl(file, cmd, arg);
741 unlock_kernel();
742
743 return ret;
744}
745
733static ssize_t ipmi_write(struct file *file, 746static ssize_t ipmi_write(struct file *file,
734 const char __user *buf, 747 const char __user *buf,
735 size_t len, 748 size_t len,
@@ -880,7 +893,7 @@ static const struct file_operations ipmi_wdog_fops = {
880 .read = ipmi_read, 893 .read = ipmi_read,
881 .poll = ipmi_poll, 894 .poll = ipmi_poll,
882 .write = ipmi_write, 895 .write = ipmi_write,
883 .ioctl = ipmi_ioctl, 896 .unlocked_ioctl = ipmi_unlocked_ioctl,
884 .open = ipmi_open, 897 .open = ipmi_open,
885 .release = ipmi_close, 898 .release = ipmi_close,
886 .fasync = ipmi_fasync, 899 .fasync = ipmi_fasync,
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 92ab03d28294..cd650ca8c679 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -144,6 +144,7 @@ static int misc_open(struct inode * inode, struct file * file)
144 old_fops = file->f_op; 144 old_fops = file->f_op;
145 file->f_op = new_fops; 145 file->f_op = new_fops;
146 if (file->f_op->open) { 146 if (file->f_op->open) {
147 file->private_data = c;
147 err=file->f_op->open(inode,file); 148 err=file->f_op->open(inode,file);
148 if (err) { 149 if (err) {
149 fops_put(file->f_op); 150 fops_put(file->f_op);
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 47e8f7b0e4c1..66d2917b003f 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -296,8 +296,8 @@ checksum_err:
296 return -EIO; 296 return -EIO;
297} 297}
298 298
299static int nvram_ioctl(struct inode *inode, struct file *file, 299static long nvram_ioctl(struct file *file, unsigned int cmd,
300 unsigned int cmd, unsigned long arg) 300 unsigned long arg)
301{ 301{
302 int i; 302 int i;
303 303
@@ -308,6 +308,7 @@ static int nvram_ioctl(struct inode *inode, struct file *file,
308 if (!capable(CAP_SYS_ADMIN)) 308 if (!capable(CAP_SYS_ADMIN))
309 return -EACCES; 309 return -EACCES;
310 310
311 lock_kernel();
311 spin_lock_irq(&rtc_lock); 312 spin_lock_irq(&rtc_lock);
312 313
313 for (i = 0; i < NVRAM_BYTES; ++i) 314 for (i = 0; i < NVRAM_BYTES; ++i)
@@ -315,6 +316,7 @@ static int nvram_ioctl(struct inode *inode, struct file *file,
315 __nvram_set_checksum(); 316 __nvram_set_checksum();
316 317
317 spin_unlock_irq(&rtc_lock); 318 spin_unlock_irq(&rtc_lock);
319 unlock_kernel();
318 return 0; 320 return 0;
319 321
320 case NVRAM_SETCKS: 322 case NVRAM_SETCKS:
@@ -323,9 +325,11 @@ static int nvram_ioctl(struct inode *inode, struct file *file,
323 if (!capable(CAP_SYS_ADMIN)) 325 if (!capable(CAP_SYS_ADMIN))
324 return -EACCES; 326 return -EACCES;
325 327
328 lock_kernel();
326 spin_lock_irq(&rtc_lock); 329 spin_lock_irq(&rtc_lock);
327 __nvram_set_checksum(); 330 __nvram_set_checksum();
328 spin_unlock_irq(&rtc_lock); 331 spin_unlock_irq(&rtc_lock);
332 unlock_kernel();
329 return 0; 333 return 0;
330 334
331 default: 335 default:
@@ -422,7 +426,7 @@ static const struct file_operations nvram_fops = {
422 .llseek = nvram_llseek, 426 .llseek = nvram_llseek,
423 .read = nvram_read, 427 .read = nvram_read,
424 .write = nvram_write, 428 .write = nvram_write,
425 .ioctl = nvram_ioctl, 429 .unlocked_ioctl = nvram_ioctl,
426 .open = nvram_open, 430 .open = nvram_open,
427 .release = nvram_release, 431 .release = nvram_release,
428}; 432};
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index f80810901db6..043a1c7b86be 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -94,8 +94,9 @@ static int get_flash_id(void)
94 return c2; 94 return c2;
95} 95}
96 96
97static int flash_ioctl(struct inode *inodep, struct file *filep, unsigned int cmd, unsigned long arg) 97static long flash_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
98{ 98{
99 lock_kernel();
99 switch (cmd) { 100 switch (cmd) {
100 case CMD_WRITE_DISABLE: 101 case CMD_WRITE_DISABLE:
101 gbWriteBase64Enable = 0; 102 gbWriteBase64Enable = 0;
@@ -113,8 +114,10 @@ static int flash_ioctl(struct inode *inodep, struct file *filep, unsigned int cm
113 default: 114 default:
114 gbWriteBase64Enable = 0; 115 gbWriteBase64Enable = 0;
115 gbWriteEnable = 0; 116 gbWriteEnable = 0;
117 unlock_kernel();
116 return -EINVAL; 118 return -EINVAL;
117 } 119 }
120 unlock_kernel();
118 return 0; 121 return 0;
119} 122}
120 123
@@ -631,7 +634,7 @@ static const struct file_operations flash_fops =
631 .llseek = flash_llseek, 634 .llseek = flash_llseek,
632 .read = flash_read, 635 .read = flash_read,
633 .write = flash_write, 636 .write = flash_write,
634 .ioctl = flash_ioctl, 637 .unlocked_ioctl = flash_ioctl,
635}; 638};
636 639
637static struct miscdevice flash_miscdev = 640static struct miscdevice flash_miscdev =
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index fdd37543aa79..02abfddce45a 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -287,12 +287,10 @@ static int register_device (int minor, struct pp_struct *pp)
287 char *name; 287 char *name;
288 int fl; 288 int fl;
289 289
290 name = kmalloc (strlen (CHRDEV) + 3, GFP_KERNEL); 290 name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
291 if (name == NULL) 291 if (name == NULL)
292 return -ENOMEM; 292 return -ENOMEM;
293 293
294 sprintf (name, CHRDEV "%x", minor);
295
296 port = parport_find_number (minor); 294 port = parport_find_number (minor);
297 if (!port) { 295 if (!port) {
298 printk (KERN_WARNING "%s: no associated port!\n", name); 296 printk (KERN_WARNING "%s: no associated port!\n", name);
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
new file mode 100644
index 000000000000..74f00b5ffa36
--- /dev/null
+++ b/drivers/char/ramoops.c
@@ -0,0 +1,162 @@
1/*
2 * RAM Oops/Panic logger
3 *
4 * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/kmsg_dump.h>
25#include <linux/time.h>
26#include <linux/io.h>
27#include <linux/ioport.h>
28
29#define RAMOOPS_KERNMSG_HDR "===="
30#define RAMOOPS_HEADER_SIZE (5 + sizeof(struct timeval))
31
32#define RECORD_SIZE 4096
33
34static ulong mem_address;
35module_param(mem_address, ulong, 0400);
36MODULE_PARM_DESC(mem_address,
37 "start of reserved RAM used to store oops/panic logs");
38
39static ulong mem_size;
40module_param(mem_size, ulong, 0400);
41MODULE_PARM_DESC(mem_size,
42 "size of reserved RAM used to store oops/panic logs");
43
44static int dump_oops = 1;
45module_param(dump_oops, int, 0600);
46MODULE_PARM_DESC(dump_oops,
47 "set to 1 to dump oopses, 0 to only dump panics (default 1)");
48
49static struct ramoops_context {
50 struct kmsg_dumper dump;
51 void *virt_addr;
52 phys_addr_t phys_addr;
53 unsigned long size;
54 int count;
55 int max_count;
56} oops_cxt;
57
58static void ramoops_do_dump(struct kmsg_dumper *dumper,
59 enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
60 const char *s2, unsigned long l2)
61{
62 struct ramoops_context *cxt = container_of(dumper,
63 struct ramoops_context, dump);
64 unsigned long s1_start, s2_start;
65 unsigned long l1_cpy, l2_cpy;
66 int res;
67 char *buf;
68 struct timeval timestamp;
69
70 /* Only dump oopses if dump_oops is set */
71 if (reason == KMSG_DUMP_OOPS && !dump_oops)
72 return;
73
74 buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE));
75 memset(buf, '\0', RECORD_SIZE);
76 res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR);
77 buf += res;
78 do_gettimeofday(&timestamp);
79 res = sprintf(buf, "%lu.%lu\n", (long)timestamp.tv_sec, (long)timestamp.tv_usec);
80 buf += res;
81
82 l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE));
83 l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE) - l2_cpy);
84
85 s2_start = l2 - l2_cpy;
86 s1_start = l1 - l1_cpy;
87
88 memcpy(buf, s1 + s1_start, l1_cpy);
89 memcpy(buf + l1_cpy, s2 + s2_start, l2_cpy);
90
91 cxt->count = (cxt->count + 1) % cxt->max_count;
92}
93
94static int __init ramoops_init(void)
95{
96 struct ramoops_context *cxt = &oops_cxt;
97 int err = -EINVAL;
98
99 if (!mem_size) {
100 printk(KERN_ERR "ramoops: invalid size specification");
101 goto fail3;
102 }
103
104 rounddown_pow_of_two(mem_size);
105
106 if (mem_size < RECORD_SIZE) {
107 printk(KERN_ERR "ramoops: size too small");
108 goto fail3;
109 }
110
111 cxt->max_count = mem_size / RECORD_SIZE;
112 cxt->count = 0;
113 cxt->size = mem_size;
114 cxt->phys_addr = mem_address;
115
116 if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) {
117 printk(KERN_ERR "ramoops: request mem region failed");
118 err = -EINVAL;
119 goto fail3;
120 }
121
122 cxt->virt_addr = ioremap(cxt->phys_addr, cxt->size);
123 if (!cxt->virt_addr) {
124 printk(KERN_ERR "ramoops: ioremap failed");
125 goto fail2;
126 }
127
128 cxt->dump.dump = ramoops_do_dump;
129 err = kmsg_dump_register(&cxt->dump);
130 if (err) {
131 printk(KERN_ERR "ramoops: registering kmsg dumper failed");
132 goto fail1;
133 }
134
135 return 0;
136
137fail1:
138 iounmap(cxt->virt_addr);
139fail2:
140 release_mem_region(cxt->phys_addr, cxt->size);
141fail3:
142 return err;
143}
144
145static void __exit ramoops_exit(void)
146{
147 struct ramoops_context *cxt = &oops_cxt;
148
149 if (kmsg_dump_unregister(&cxt->dump) < 0)
150 printk(KERN_WARNING "ramoops: could not unregister kmsg_dumper");
151
152 iounmap(cxt->virt_addr);
153 release_mem_region(cxt->phys_addr, cxt->size);
154}
155
156
157module_init(ramoops_init);
158module_exit(ramoops_exit);
159
160MODULE_LICENSE("GPL");
161MODULE_AUTHOR("Marco Stornelli <marco.stornelli@gmail.com>");
162MODULE_DESCRIPTION("RAM Oops/Panic logger/driver");
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 8756ab0daa8b..b38942f6bf31 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -121,13 +121,17 @@ static int raw_release(struct inode *inode, struct file *filp)
121/* 121/*
122 * Forward ioctls to the underlying block device. 122 * Forward ioctls to the underlying block device.
123 */ 123 */
124static int 124static long
125raw_ioctl(struct inode *inode, struct file *filp, 125raw_ioctl(struct file *filp, unsigned int command, unsigned long arg)
126 unsigned int command, unsigned long arg)
127{ 126{
128 struct block_device *bdev = filp->private_data; 127 struct block_device *bdev = filp->private_data;
128 int ret;
129
130 lock_kernel();
131 ret = blkdev_ioctl(bdev, 0, command, arg);
132 unlock_kernel();
129 133
130 return blkdev_ioctl(bdev, 0, command, arg); 134 return ret;
131} 135}
132 136
133static void bind_device(struct raw_config_request *rq) 137static void bind_device(struct raw_config_request *rq)
@@ -141,13 +145,14 @@ static void bind_device(struct raw_config_request *rq)
141 * Deal with ioctls against the raw-device control interface, to bind 145 * Deal with ioctls against the raw-device control interface, to bind
142 * and unbind other raw devices. 146 * and unbind other raw devices.
143 */ 147 */
144static int raw_ctl_ioctl(struct inode *inode, struct file *filp, 148static long raw_ctl_ioctl(struct file *filp, unsigned int command,
145 unsigned int command, unsigned long arg) 149 unsigned long arg)
146{ 150{
147 struct raw_config_request rq; 151 struct raw_config_request rq;
148 struct raw_device_data *rawdev; 152 struct raw_device_data *rawdev;
149 int err = 0; 153 int err = 0;
150 154
155 lock_kernel();
151 switch (command) { 156 switch (command) {
152 case RAW_SETBIND: 157 case RAW_SETBIND:
153 case RAW_GETBIND: 158 case RAW_GETBIND:
@@ -240,25 +245,26 @@ static int raw_ctl_ioctl(struct inode *inode, struct file *filp,
240 break; 245 break;
241 } 246 }
242out: 247out:
248 unlock_kernel();
243 return err; 249 return err;
244} 250}
245 251
246static const struct file_operations raw_fops = { 252static const struct file_operations raw_fops = {
247 .read = do_sync_read, 253 .read = do_sync_read,
248 .aio_read = generic_file_aio_read, 254 .aio_read = generic_file_aio_read,
249 .write = do_sync_write, 255 .write = do_sync_write,
250 .aio_write = blkdev_aio_write, 256 .aio_write = blkdev_aio_write,
251 .fsync = blkdev_fsync, 257 .fsync = blkdev_fsync,
252 .open = raw_open, 258 .open = raw_open,
253 .release= raw_release, 259 .release = raw_release,
254 .ioctl = raw_ioctl, 260 .unlocked_ioctl = raw_ioctl,
255 .owner = THIS_MODULE, 261 .owner = THIS_MODULE,
256}; 262};
257 263
258static const struct file_operations raw_ctl_fops = { 264static const struct file_operations raw_ctl_fops = {
259 .ioctl = raw_ctl_ioctl, 265 .unlocked_ioctl = raw_ctl_ioctl,
260 .open = raw_open, 266 .open = raw_open,
261 .owner = THIS_MODULE, 267 .owner = THIS_MODULE,
262}; 268};
263 269
264static struct cdev raw_cdev; 270static struct cdev raw_cdev;
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index 1144a04cda6e..42f7fa442ff8 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -866,7 +866,7 @@ static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id)
866{ 866{
867 int i = vdev->unit_address; 867 int i = vdev->unit_address;
868 int j; 868 int j;
869 struct device_node *node = vdev->dev.archdata.of_node; 869 struct device_node *node = vdev->dev.of_node;
870 870
871 if (i >= VIOTAPE_MAX_TAPE) 871 if (i >= VIOTAPE_MAX_TAPE)
872 return -ENODEV; 872 return -ENODEV;
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index bd1d1164fec5..7cdb6ee569cd 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -3967,13 +3967,9 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
3967 font.charcount = op->charcount; 3967 font.charcount = op->charcount;
3968 font.height = op->height; 3968 font.height = op->height;
3969 font.width = op->width; 3969 font.width = op->width;
3970 font.data = kmalloc(size, GFP_KERNEL); 3970 font.data = memdup_user(op->data, size);
3971 if (!font.data) 3971 if (IS_ERR(font.data))
3972 return -ENOMEM; 3972 return PTR_ERR(font.data);
3973 if (copy_from_user(font.data, op->data, size)) {
3974 kfree(font.data);
3975 return -EFAULT;
3976 }
3977 acquire_console_sem(); 3973 acquire_console_sem();
3978 if (vc->vc_sw->con_font_set) 3974 if (vc->vc_sw->con_font_set)
3979 rc = vc->vc_sw->con_font_set(vc, &font, op->flags); 3975 rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 7261b8d9087c..ed8a9cec2a05 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -772,18 +772,18 @@ hwicap_of_probe(struct of_device *op, const struct of_device_id *match)
772 772
773 dev_dbg(&op->dev, "hwicap_of_probe(%p, %p)\n", op, match); 773 dev_dbg(&op->dev, "hwicap_of_probe(%p, %p)\n", op, match);
774 774
775 rc = of_address_to_resource(op->node, 0, &res); 775 rc = of_address_to_resource(op->dev.of_node, 0, &res);
776 if (rc) { 776 if (rc) {
777 dev_err(&op->dev, "invalid address\n"); 777 dev_err(&op->dev, "invalid address\n");
778 return rc; 778 return rc;
779 } 779 }
780 780
781 id = of_get_property(op->node, "port-number", NULL); 781 id = of_get_property(op->dev.of_node, "port-number", NULL);
782 782
783 /* It's most likely that we're using V4, if the family is not 783 /* It's most likely that we're using V4, if the family is not
784 specified */ 784 specified */
785 regs = &v4_config_registers; 785 regs = &v4_config_registers;
786 family = of_get_property(op->node, "xlnx,family", NULL); 786 family = of_get_property(op->dev.of_node, "xlnx,family", NULL);
787 787
788 if (family) { 788 if (family) {
789 if (!strcmp(family, "virtex2p")) { 789 if (!strcmp(family, "virtex2p")) {
@@ -812,13 +812,12 @@ static const struct of_device_id __devinitconst hwicap_of_match[] = {
812MODULE_DEVICE_TABLE(of, hwicap_of_match); 812MODULE_DEVICE_TABLE(of, hwicap_of_match);
813 813
814static struct of_platform_driver hwicap_of_driver = { 814static struct of_platform_driver hwicap_of_driver = {
815 .owner = THIS_MODULE,
816 .name = DRIVER_NAME,
817 .match_table = hwicap_of_match,
818 .probe = hwicap_of_probe, 815 .probe = hwicap_of_probe,
819 .remove = __devexit_p(hwicap_of_remove), 816 .remove = __devexit_p(hwicap_of_remove),
820 .driver = { 817 .driver = {
821 .name = DRIVER_NAME, 818 .name = DRIVER_NAME,
819 .owner = THIS_MODULE,
820 .of_match_table = hwicap_of_match,
822 }, 821 },
823}; 822};
824 823
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index b81ad9c731ae..52ff8aa63f84 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -21,9 +21,12 @@
21#include <linux/math64.h> 21#include <linux/math64.h>
22 22
23#define BUCKETS 12 23#define BUCKETS 12
24#define INTERVALS 8
24#define RESOLUTION 1024 25#define RESOLUTION 1024
25#define DECAY 4 26#define DECAY 8
26#define MAX_INTERESTING 50000 27#define MAX_INTERESTING 50000
28#define STDDEV_THRESH 400
29
27 30
28/* 31/*
29 * Concepts and ideas behind the menu governor 32 * Concepts and ideas behind the menu governor
@@ -64,6 +67,16 @@
64 * indexed based on the magnitude of the expected duration as well as the 67 * indexed based on the magnitude of the expected duration as well as the
65 * "is IO outstanding" property. 68 * "is IO outstanding" property.
66 * 69 *
70 * Repeatable-interval-detector
71 * ----------------------------
72 * There are some cases where "next timer" is a completely unusable predictor:
73 * Those cases where the interval is fixed, for example due to hardware
74 * interrupt mitigation, but also due to fixed transfer rate devices such as
75 * mice.
76 * For this, we use a different predictor: We track the duration of the last 8
77 * intervals and if the stand deviation of these 8 intervals is below a
78 * threshold value, we use the average of these intervals as prediction.
79 *
67 * Limiting Performance Impact 80 * Limiting Performance Impact
68 * --------------------------- 81 * ---------------------------
69 * C states, especially those with large exit latencies, can have a real 82 * C states, especially those with large exit latencies, can have a real
@@ -104,6 +117,8 @@ struct menu_device {
104 unsigned int exit_us; 117 unsigned int exit_us;
105 unsigned int bucket; 118 unsigned int bucket;
106 u64 correction_factor[BUCKETS]; 119 u64 correction_factor[BUCKETS];
120 u32 intervals[INTERVALS];
121 int interval_ptr;
107}; 122};
108 123
109 124
@@ -175,6 +190,42 @@ static u64 div_round64(u64 dividend, u32 divisor)
175 return div_u64(dividend + (divisor / 2), divisor); 190 return div_u64(dividend + (divisor / 2), divisor);
176} 191}
177 192
193/*
194 * Try detecting repeating patterns by keeping track of the last 8
195 * intervals, and checking if the standard deviation of that set
196 * of points is below a threshold. If it is... then use the
197 * average of these 8 points as the estimated value.
198 */
199static void detect_repeating_patterns(struct menu_device *data)
200{
201 int i;
202 uint64_t avg = 0;
203 uint64_t stddev = 0; /* contains the square of the std deviation */
204
205 /* first calculate average and standard deviation of the past */
206 for (i = 0; i < INTERVALS; i++)
207 avg += data->intervals[i];
208 avg = avg / INTERVALS;
209
210 /* if the avg is beyond the known next tick, it's worthless */
211 if (avg > data->expected_us)
212 return;
213
214 for (i = 0; i < INTERVALS; i++)
215 stddev += (data->intervals[i] - avg) *
216 (data->intervals[i] - avg);
217
218 stddev = stddev / INTERVALS;
219
220 /*
221 * now.. if stddev is small.. then assume we have a
222 * repeating pattern and predict we keep doing this.
223 */
224
225 if (avg && stddev < STDDEV_THRESH)
226 data->predicted_us = avg;
227}
228
178/** 229/**
179 * menu_select - selects the next idle state to enter 230 * menu_select - selects the next idle state to enter
180 * @dev: the CPU 231 * @dev: the CPU
@@ -218,6 +269,8 @@ static int menu_select(struct cpuidle_device *dev)
218 data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], 269 data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
219 RESOLUTION * DECAY); 270 RESOLUTION * DECAY);
220 271
272 detect_repeating_patterns(data);
273
221 /* 274 /*
222 * We want to default to C1 (hlt), not to busy polling 275 * We want to default to C1 (hlt), not to busy polling
223 * unless the timer is happening really really soon. 276 * unless the timer is happening really really soon.
@@ -310,6 +363,11 @@ static void menu_update(struct cpuidle_device *dev)
310 new_factor = 1; 363 new_factor = 1;
311 364
312 data->correction_factor[data->bucket] = new_factor; 365 data->correction_factor[data->bucket] = new_factor;
366
367 /* update the repeating-pattern data */
368 data->intervals[data->interval_ptr++] = last_idle_us;
369 if (data->interval_ptr >= INTERVALS)
370 data->interval_ptr = 0;
313} 371}
314 372
315/** 373/**
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 6c4c8b7ce3aa..9d65b371de64 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1281,8 +1281,11 @@ static const struct of_device_id crypto4xx_match[] = {
1281}; 1281};
1282 1282
1283static struct of_platform_driver crypto4xx_driver = { 1283static struct of_platform_driver crypto4xx_driver = {
1284 .name = "crypto4xx", 1284 .driver = {
1285 .match_table = crypto4xx_match, 1285 .name = "crypto4xx",
1286 .owner = THIS_MODULE,
1287 .of_match_table = crypto4xx_match,
1288 },
1286 .probe = crypto4xx_probe, 1289 .probe = crypto4xx_probe,
1287 .remove = crypto4xx_remove, 1290 .remove = crypto4xx_remove,
1288}; 1291};
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 6a0f59d1fc5c..637c105f53d2 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -2398,7 +2398,7 @@ static int talitos_probe(struct of_device *ofdev,
2398 const struct of_device_id *match) 2398 const struct of_device_id *match)
2399{ 2399{
2400 struct device *dev = &ofdev->dev; 2400 struct device *dev = &ofdev->dev;
2401 struct device_node *np = ofdev->node; 2401 struct device_node *np = ofdev->dev.of_node;
2402 struct talitos_private *priv; 2402 struct talitos_private *priv;
2403 const unsigned int *prop; 2403 const unsigned int *prop;
2404 int i, err; 2404 int i, err;
@@ -2573,8 +2573,11 @@ static const struct of_device_id talitos_match[] = {
2573MODULE_DEVICE_TABLE(of, talitos_match); 2573MODULE_DEVICE_TABLE(of, talitos_match);
2574 2574
2575static struct of_platform_driver talitos_driver = { 2575static struct of_platform_driver talitos_driver = {
2576 .name = "talitos", 2576 .driver = {
2577 .match_table = talitos_match, 2577 .name = "talitos",
2578 .owner = THIS_MODULE,
2579 .of_match_table = talitos_match,
2580 },
2578 .probe = talitos_probe, 2581 .probe = talitos_probe,
2579 .remove = talitos_remove, 2582 .remove = talitos_remove,
2580}; 2583};
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 1fdf180cbd67..8088b14ba5f7 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1315,7 +1315,7 @@ static int __devinit fsldma_of_probe(struct of_device *op,
1315 INIT_LIST_HEAD(&fdev->common.channels); 1315 INIT_LIST_HEAD(&fdev->common.channels);
1316 1316
1317 /* ioremap the registers for use */ 1317 /* ioremap the registers for use */
1318 fdev->regs = of_iomap(op->node, 0); 1318 fdev->regs = of_iomap(op->dev.of_node, 0);
1319 if (!fdev->regs) { 1319 if (!fdev->regs) {
1320 dev_err(&op->dev, "unable to ioremap registers\n"); 1320 dev_err(&op->dev, "unable to ioremap registers\n");
1321 err = -ENOMEM; 1321 err = -ENOMEM;
@@ -1323,7 +1323,7 @@ static int __devinit fsldma_of_probe(struct of_device *op,
1323 } 1323 }
1324 1324
1325 /* map the channel IRQ if it exists, but don't hookup the handler yet */ 1325 /* map the channel IRQ if it exists, but don't hookup the handler yet */
1326 fdev->irq = irq_of_parse_and_map(op->node, 0); 1326 fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1327 1327
1328 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); 1328 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1329 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); 1329 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
@@ -1345,7 +1345,7 @@ static int __devinit fsldma_of_probe(struct of_device *op,
1345 * of_platform_bus_remove(). Instead, we manually instantiate every DMA 1345 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
1346 * channel object. 1346 * channel object.
1347 */ 1347 */
1348 for_each_child_of_node(op->node, child) { 1348 for_each_child_of_node(op->dev.of_node, child) {
1349 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) { 1349 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
1350 fsl_dma_chan_probe(fdev, child, 1350 fsl_dma_chan_probe(fdev, child,
1351 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, 1351 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
@@ -1411,10 +1411,13 @@ static const struct of_device_id fsldma_of_ids[] = {
1411}; 1411};
1412 1412
1413static struct of_platform_driver fsldma_of_driver = { 1413static struct of_platform_driver fsldma_of_driver = {
1414 .name = "fsl-elo-dma", 1414 .driver = {
1415 .match_table = fsldma_of_ids, 1415 .name = "fsl-elo-dma",
1416 .probe = fsldma_of_probe, 1416 .owner = THIS_MODULE,
1417 .remove = fsldma_of_remove, 1417 .of_match_table = fsldma_of_ids,
1418 },
1419 .probe = fsldma_of_probe,
1420 .remove = fsldma_of_remove,
1418}; 1421};
1419 1422
1420/*----------------------------------------------------------------------------*/ 1423/*----------------------------------------------------------------------------*/
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index c6079fcca13f..fa98abe4686f 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4944,12 +4944,12 @@ static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = {
4944MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match); 4944MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match);
4945 4945
4946static struct of_platform_driver ppc440spe_adma_driver = { 4946static struct of_platform_driver ppc440spe_adma_driver = {
4947 .match_table = ppc440spe_adma_of_match,
4948 .probe = ppc440spe_adma_probe, 4947 .probe = ppc440spe_adma_probe,
4949 .remove = __devexit_p(ppc440spe_adma_remove), 4948 .remove = __devexit_p(ppc440spe_adma_remove),
4950 .driver = { 4949 .driver = {
4951 .name = "PPC440SP(E)-ADMA", 4950 .name = "PPC440SP(E)-ADMA",
4952 .owner = THIS_MODULE, 4951 .owner = THIS_MODULE,
4952 .of_match_table = ppc440spe_adma_of_match,
4953 }, 4953 },
4954}; 4954};
4955 4955
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index a1727522343e..a2a519fd2a24 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -722,6 +722,10 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
722{ 722{
723 while (__ld_cleanup(sh_chan, all)) 723 while (__ld_cleanup(sh_chan, all))
724 ; 724 ;
725
726 if (all)
727 /* Terminating - forgive uncompleted cookies */
728 sh_chan->completed_cookie = sh_chan->common.cookie;
725} 729}
726 730
727static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) 731static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
@@ -1188,6 +1192,7 @@ static struct platform_driver sh_dmae_driver = {
1188 .remove = __exit_p(sh_dmae_remove), 1192 .remove = __exit_p(sh_dmae_remove),
1189 .shutdown = sh_dmae_shutdown, 1193 .shutdown = sh_dmae_shutdown,
1190 .driver = { 1194 .driver = {
1195 .owner = THIS_MODULE,
1191 .name = "sh-dma-engine", 1196 .name = "sh-dma-engine",
1192 }, 1197 },
1193}; 1198};
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 0172fa3c7a2b..a1bf77c1993f 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -188,7 +188,7 @@ static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
188static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, 188static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
189 struct scatterlist *sg, bool last) 189 struct scatterlist *sg, bool last)
190{ 190{
191 if (sg_dma_len(sg) > USHORT_MAX) { 191 if (sg_dma_len(sg) > USHRT_MAX) {
192 dev_err(chan2dev(&td_chan->chan), "Too big sg element\n"); 192 dev_err(chan2dev(&td_chan->chan), "Too big sg element\n");
193 return -EINVAL; 193 return -EINVAL;
194 } 194 }
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index adc10a2ac5f6..996c1bdb5a34 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -774,7 +774,7 @@ static void i5000_clear_error(struct mem_ctl_info *mci)
774static void i5000_check_error(struct mem_ctl_info *mci) 774static void i5000_check_error(struct mem_ctl_info *mci)
775{ 775{
776 struct i5000_error_info info; 776 struct i5000_error_info info;
777 debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 777 debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
778 i5000_get_error_info(mci, &info); 778 i5000_get_error_info(mci, &info);
779 i5000_process_error_info(mci, &info, 1); 779 i5000_process_error_info(mci, &info, 1);
780} 780}
@@ -1353,8 +1353,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1353 int num_dimms_per_channel; 1353 int num_dimms_per_channel;
1354 int num_csrows; 1354 int num_csrows;
1355 1355
1356 debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", 1356 debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
1357 __func__, 1357 __FILE__, __func__,
1358 pdev->bus->number, 1358 pdev->bus->number,
1359 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 1359 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1360 1360
@@ -1389,7 +1389,7 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1389 return -ENOMEM; 1389 return -ENOMEM;
1390 1390
1391 kobject_get(&mci->edac_mci_kobj); 1391 kobject_get(&mci->edac_mci_kobj);
1392 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); 1392 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
1393 1393
1394 mci->dev = &pdev->dev; /* record ptr to the generic device */ 1394 mci->dev = &pdev->dev; /* record ptr to the generic device */
1395 1395
@@ -1432,8 +1432,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1432 1432
1433 /* add this new MC control structure to EDAC's list of MCs */ 1433 /* add this new MC control structure to EDAC's list of MCs */
1434 if (edac_mc_add_mc(mci)) { 1434 if (edac_mc_add_mc(mci)) {
1435 debugf0("MC: " __FILE__ 1435 debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
1436 ": %s(): failed edac_mc_add_mc()\n", __func__); 1436 __FILE__, __func__);
1437 /* FIXME: perhaps some code should go here that disables error 1437 /* FIXME: perhaps some code should go here that disables error
1438 * reporting if we just enabled it 1438 * reporting if we just enabled it
1439 */ 1439 */
@@ -1478,7 +1478,7 @@ static int __devinit i5000_init_one(struct pci_dev *pdev,
1478{ 1478{
1479 int rc; 1479 int rc;
1480 1480
1481 debugf0("MC: " __FILE__ ": %s()\n", __func__); 1481 debugf0("MC: %s: %s()\n", __FILE__, __func__);
1482 1482
1483 /* wake up device */ 1483 /* wake up device */
1484 rc = pci_enable_device(pdev); 1484 rc = pci_enable_device(pdev);
@@ -1497,7 +1497,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
1497{ 1497{
1498 struct mem_ctl_info *mci; 1498 struct mem_ctl_info *mci;
1499 1499
1500 debugf0(__FILE__ ": %s()\n", __func__); 1500 debugf0("%s: %s()\n", __FILE__, __func__);
1501 1501
1502 if (i5000_pci) 1502 if (i5000_pci)
1503 edac_pci_release_generic_ctl(i5000_pci); 1503 edac_pci_release_generic_ctl(i5000_pci);
@@ -1544,7 +1544,7 @@ static int __init i5000_init(void)
1544{ 1544{
1545 int pci_rc; 1545 int pci_rc;
1546 1546
1547 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1547 debugf2("MC: %s: %s()\n", __FILE__, __func__);
1548 1548
1549 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1549 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1550 opstate_init(); 1550 opstate_init();
@@ -1560,7 +1560,7 @@ static int __init i5000_init(void)
1560 */ 1560 */
1561static void __exit i5000_exit(void) 1561static void __exit i5000_exit(void)
1562{ 1562{
1563 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1563 debugf2("MC: %s: %s()\n", __FILE__, __func__);
1564 pci_unregister_driver(&i5000_driver); 1564 pci_unregister_driver(&i5000_driver);
1565} 1565}
1566 1566
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index f99d10655ed4..010c1d6526f5 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -694,7 +694,7 @@ static void i5400_clear_error(struct mem_ctl_info *mci)
694static void i5400_check_error(struct mem_ctl_info *mci) 694static void i5400_check_error(struct mem_ctl_info *mci)
695{ 695{
696 struct i5400_error_info info; 696 struct i5400_error_info info;
697 debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 697 debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
698 i5400_get_error_info(mci, &info); 698 i5400_get_error_info(mci, &info);
699 i5400_process_error_info(mci, &info); 699 i5400_process_error_info(mci, &info);
700} 700}
@@ -1227,8 +1227,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1227 if (dev_idx >= ARRAY_SIZE(i5400_devs)) 1227 if (dev_idx >= ARRAY_SIZE(i5400_devs))
1228 return -EINVAL; 1228 return -EINVAL;
1229 1229
1230 debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", 1230 debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
1231 __func__, 1231 __FILE__, __func__,
1232 pdev->bus->number, 1232 pdev->bus->number,
1233 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 1233 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1234 1234
@@ -1256,7 +1256,7 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1256 if (mci == NULL) 1256 if (mci == NULL)
1257 return -ENOMEM; 1257 return -ENOMEM;
1258 1258
1259 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); 1259 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
1260 1260
1261 mci->dev = &pdev->dev; /* record ptr to the generic device */ 1261 mci->dev = &pdev->dev; /* record ptr to the generic device */
1262 1262
@@ -1299,8 +1299,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1299 1299
1300 /* add this new MC control structure to EDAC's list of MCs */ 1300 /* add this new MC control structure to EDAC's list of MCs */
1301 if (edac_mc_add_mc(mci)) { 1301 if (edac_mc_add_mc(mci)) {
1302 debugf0("MC: " __FILE__ 1302 debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
1303 ": %s(): failed edac_mc_add_mc()\n", __func__); 1303 __FILE__, __func__);
1304 /* FIXME: perhaps some code should go here that disables error 1304 /* FIXME: perhaps some code should go here that disables error
1305 * reporting if we just enabled it 1305 * reporting if we just enabled it
1306 */ 1306 */
@@ -1344,7 +1344,7 @@ static int __devinit i5400_init_one(struct pci_dev *pdev,
1344{ 1344{
1345 int rc; 1345 int rc;
1346 1346
1347 debugf0("MC: " __FILE__ ": %s()\n", __func__); 1347 debugf0("MC: %s: %s()\n", __FILE__, __func__);
1348 1348
1349 /* wake up device */ 1349 /* wake up device */
1350 rc = pci_enable_device(pdev); 1350 rc = pci_enable_device(pdev);
@@ -1363,7 +1363,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
1363{ 1363{
1364 struct mem_ctl_info *mci; 1364 struct mem_ctl_info *mci;
1365 1365
1366 debugf0(__FILE__ ": %s()\n", __func__); 1366 debugf0("%s: %s()\n", __FILE__, __func__);
1367 1367
1368 if (i5400_pci) 1368 if (i5400_pci)
1369 edac_pci_release_generic_ctl(i5400_pci); 1369 edac_pci_release_generic_ctl(i5400_pci);
@@ -1409,7 +1409,7 @@ static int __init i5400_init(void)
1409{ 1409{
1410 int pci_rc; 1410 int pci_rc;
1411 1411
1412 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1412 debugf2("MC: %s: %s()\n", __FILE__, __func__);
1413 1413
1414 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1414 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1415 opstate_init(); 1415 opstate_init();
@@ -1425,7 +1425,7 @@ static int __init i5400_init(void)
1425 */ 1425 */
1426static void __exit i5400_exit(void) 1426static void __exit i5400_exit(void)
1427{ 1427{
1428 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1428 debugf2("MC: %s: %s()\n", __FILE__, __func__);
1429 pci_unregister_driver(&i5400_driver); 1429 pci_unregister_driver(&i5400_driver);
1430} 1430}
1431 1431
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 2bf2c5051bfe..a2fa1feed724 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -178,7 +178,7 @@ static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci)
178{ 178{
179 struct i82443bxgx_edacmc_error_info info; 179 struct i82443bxgx_edacmc_error_info info;
180 180
181 debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 181 debugf1("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
182 i82443bxgx_edacmc_get_error_info(mci, &info); 182 i82443bxgx_edacmc_get_error_info(mci, &info);
183 i82443bxgx_edacmc_process_error_info(mci, &info, 1); 183 i82443bxgx_edacmc_process_error_info(mci, &info, 1);
184} 184}
@@ -198,13 +198,13 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
198 for (index = 0; index < mci->nr_csrows; index++) { 198 for (index = 0; index < mci->nr_csrows; index++) {
199 csrow = &mci->csrows[index]; 199 csrow = &mci->csrows[index];
200 pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar); 200 pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
201 debugf1("MC%d: " __FILE__ ": %s() Row=%d DRB = %#0x\n", 201 debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n",
202 mci->mc_idx, __func__, index, drbar); 202 mci->mc_idx, __FILE__, __func__, index, drbar);
203 row_high_limit = ((u32) drbar << 23); 203 row_high_limit = ((u32) drbar << 23);
204 /* find the DRAM Chip Select Base address and mask */ 204 /* find the DRAM Chip Select Base address and mask */
205 debugf1("MC%d: " __FILE__ ": %s() Row=%d, " 205 debugf1("MC%d: %s: %s() Row=%d, "
206 "Boundry Address=%#0x, Last = %#0x \n", 206 "Boundry Address=%#0x, Last = %#0x\n",
207 mci->mc_idx, __func__, index, row_high_limit, 207 mci->mc_idx, __FILE__, __func__, index, row_high_limit,
208 row_high_limit_last); 208 row_high_limit_last);
209 209
210 /* 440GX goes to 2GB, represented with a DRB of 0. */ 210 /* 440GX goes to 2GB, represented with a DRB of 0. */
@@ -237,7 +237,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
237 enum mem_type mtype; 237 enum mem_type mtype;
238 enum edac_type edac_mode; 238 enum edac_type edac_mode;
239 239
240 debugf0("MC: " __FILE__ ": %s()\n", __func__); 240 debugf0("MC: %s: %s()\n", __FILE__, __func__);
241 241
242 /* Something is really hosed if PCI config space reads from 242 /* Something is really hosed if PCI config space reads from
243 * the MC aren't working. 243 * the MC aren't working.
@@ -250,7 +250,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
250 if (mci == NULL) 250 if (mci == NULL)
251 return -ENOMEM; 251 return -ENOMEM;
252 252
253 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); 253 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
254 mci->dev = &pdev->dev; 254 mci->dev = &pdev->dev;
255 mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR; 255 mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR;
256 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 256 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
@@ -336,7 +336,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
336 __func__); 336 __func__);
337 } 337 }
338 338
339 debugf3("MC: " __FILE__ ": %s(): success\n", __func__); 339 debugf3("MC: %s: %s(): success\n", __FILE__, __func__);
340 return 0; 340 return 0;
341 341
342fail: 342fail:
@@ -352,7 +352,7 @@ static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
352{ 352{
353 int rc; 353 int rc;
354 354
355 debugf0("MC: " __FILE__ ": %s()\n", __func__); 355 debugf0("MC: %s: %s()\n", __FILE__, __func__);
356 356
357 /* don't need to call pci_enable_device() */ 357 /* don't need to call pci_enable_device() */
358 rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data); 358 rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
@@ -367,7 +367,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
367{ 367{
368 struct mem_ctl_info *mci; 368 struct mem_ctl_info *mci;
369 369
370 debugf0(__FILE__ ": %s()\n", __func__); 370 debugf0("%s: %s()\n", __FILE__, __func__);
371 371
372 if (i82443bxgx_pci) 372 if (i82443bxgx_pci)
373 edac_pci_release_generic_ctl(i82443bxgx_pci); 373 edac_pci_release_generic_ctl(i82443bxgx_pci);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 4471647b4807..6c1886b497ff 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -338,15 +338,13 @@ static struct of_device_id mpc85xx_pci_err_of_match[] = {
338}; 338};
339 339
340static struct of_platform_driver mpc85xx_pci_err_driver = { 340static struct of_platform_driver mpc85xx_pci_err_driver = {
341 .owner = THIS_MODULE,
342 .name = "mpc85xx_pci_err",
343 .match_table = mpc85xx_pci_err_of_match,
344 .probe = mpc85xx_pci_err_probe, 341 .probe = mpc85xx_pci_err_probe,
345 .remove = __devexit_p(mpc85xx_pci_err_remove), 342 .remove = __devexit_p(mpc85xx_pci_err_remove),
346 .driver = { 343 .driver = {
347 .name = "mpc85xx_pci_err", 344 .name = "mpc85xx_pci_err",
348 .owner = THIS_MODULE, 345 .owner = THIS_MODULE,
349 }, 346 .of_match_table = mpc85xx_pci_err_of_match,
347 },
350}; 348};
351 349
352#endif /* CONFIG_PCI */ 350#endif /* CONFIG_PCI */
@@ -654,15 +652,13 @@ static struct of_device_id mpc85xx_l2_err_of_match[] = {
654}; 652};
655 653
656static struct of_platform_driver mpc85xx_l2_err_driver = { 654static struct of_platform_driver mpc85xx_l2_err_driver = {
657 .owner = THIS_MODULE,
658 .name = "mpc85xx_l2_err",
659 .match_table = mpc85xx_l2_err_of_match,
660 .probe = mpc85xx_l2_err_probe, 655 .probe = mpc85xx_l2_err_probe,
661 .remove = mpc85xx_l2_err_remove, 656 .remove = mpc85xx_l2_err_remove,
662 .driver = { 657 .driver = {
663 .name = "mpc85xx_l2_err", 658 .name = "mpc85xx_l2_err",
664 .owner = THIS_MODULE, 659 .owner = THIS_MODULE,
665 }, 660 .of_match_table = mpc85xx_l2_err_of_match,
661 },
666}; 662};
667 663
668/**************************** MC Err device ***************************/ 664/**************************** MC Err device ***************************/
@@ -1131,15 +1127,13 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = {
1131}; 1127};
1132 1128
1133static struct of_platform_driver mpc85xx_mc_err_driver = { 1129static struct of_platform_driver mpc85xx_mc_err_driver = {
1134 .owner = THIS_MODULE,
1135 .name = "mpc85xx_mc_err",
1136 .match_table = mpc85xx_mc_err_of_match,
1137 .probe = mpc85xx_mc_err_probe, 1130 .probe = mpc85xx_mc_err_probe,
1138 .remove = mpc85xx_mc_err_remove, 1131 .remove = mpc85xx_mc_err_remove,
1139 .driver = { 1132 .driver = {
1140 .name = "mpc85xx_mc_err", 1133 .name = "mpc85xx_mc_err",
1141 .owner = THIS_MODULE, 1134 .owner = THIS_MODULE,
1142 }, 1135 .of_match_table = mpc85xx_mc_err_of_match,
1136 },
1143}; 1137};
1144 1138
1145#ifdef CONFIG_MPC85xx 1139#ifdef CONFIG_MPC85xx
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 11f2172aa1e6..9d6f6783328c 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -202,13 +202,13 @@ static struct of_device_id ppc4xx_edac_match[] = {
202}; 202};
203 203
204static struct of_platform_driver ppc4xx_edac_driver = { 204static struct of_platform_driver ppc4xx_edac_driver = {
205 .match_table = ppc4xx_edac_match,
206 .probe = ppc4xx_edac_probe, 205 .probe = ppc4xx_edac_probe,
207 .remove = ppc4xx_edac_remove, 206 .remove = ppc4xx_edac_remove,
208 .driver = { 207 .driver = {
209 .owner = THIS_MODULE, 208 .owner = THIS_MODULE,
210 .name = PPC4XX_EDAC_MODULE_NAME 209 .name = PPC4XX_EDAC_MODULE_NAME
211 } 210 .of_match_table = ppc4xx_edac_match,
211 },
212}; 212};
213 213
214/* 214/*
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 5045156c5313..9dcb30466ec0 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -30,7 +30,6 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/spinlock.h> 32#include <linux/spinlock.h>
33#include <linux/timer.h>
34#include <linux/workqueue.h> 33#include <linux/workqueue.h>
35 34
36#include <asm/atomic.h> 35#include <asm/atomic.h>
@@ -63,7 +62,7 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
63#define BIB_CRC(v) ((v) << 0) 62#define BIB_CRC(v) ((v) << 0)
64#define BIB_CRC_LENGTH(v) ((v) << 16) 63#define BIB_CRC_LENGTH(v) ((v) << 16)
65#define BIB_INFO_LENGTH(v) ((v) << 24) 64#define BIB_INFO_LENGTH(v) ((v) << 24)
66 65#define BIB_BUS_NAME 0x31333934 /* "1394" */
67#define BIB_LINK_SPEED(v) ((v) << 0) 66#define BIB_LINK_SPEED(v) ((v) << 0)
68#define BIB_GENERATION(v) ((v) << 4) 67#define BIB_GENERATION(v) ((v) << 4)
69#define BIB_MAX_ROM(v) ((v) << 8) 68#define BIB_MAX_ROM(v) ((v) << 8)
@@ -73,7 +72,8 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
73#define BIB_BMC ((1) << 28) 72#define BIB_BMC ((1) << 28)
74#define BIB_ISC ((1) << 29) 73#define BIB_ISC ((1) << 29)
75#define BIB_CMC ((1) << 30) 74#define BIB_CMC ((1) << 30)
76#define BIB_IMC ((1) << 31) 75#define BIB_IRMC ((1) << 31)
76#define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
77 77
78static void generate_config_rom(struct fw_card *card, __be32 *config_rom) 78static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
79{ 79{
@@ -91,18 +91,18 @@ static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
91 91
92 config_rom[0] = cpu_to_be32( 92 config_rom[0] = cpu_to_be32(
93 BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0)); 93 BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0));
94 config_rom[1] = cpu_to_be32(0x31333934); 94 config_rom[1] = cpu_to_be32(BIB_BUS_NAME);
95 config_rom[2] = cpu_to_be32( 95 config_rom[2] = cpu_to_be32(
96 BIB_LINK_SPEED(card->link_speed) | 96 BIB_LINK_SPEED(card->link_speed) |
97 BIB_GENERATION(card->config_rom_generation++ % 14 + 2) | 97 BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
98 BIB_MAX_ROM(2) | 98 BIB_MAX_ROM(2) |
99 BIB_MAX_RECEIVE(card->max_receive) | 99 BIB_MAX_RECEIVE(card->max_receive) |
100 BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC); 100 BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC);
101 config_rom[3] = cpu_to_be32(card->guid >> 32); 101 config_rom[3] = cpu_to_be32(card->guid >> 32);
102 config_rom[4] = cpu_to_be32(card->guid); 102 config_rom[4] = cpu_to_be32(card->guid);
103 103
104 /* Generate root directory. */ 104 /* Generate root directory. */
105 config_rom[6] = cpu_to_be32(0x0c0083c0); /* node capabilities */ 105 config_rom[6] = cpu_to_be32(NODE_CAPABILITIES);
106 i = 7; 106 i = 7;
107 j = 7 + descriptor_count; 107 j = 7 + descriptor_count;
108 108
@@ -407,13 +407,6 @@ static void fw_card_bm_work(struct work_struct *work)
407 fw_card_put(card); 407 fw_card_put(card);
408} 408}
409 409
410static void flush_timer_callback(unsigned long data)
411{
412 struct fw_card *card = (struct fw_card *)data;
413
414 fw_flush_transactions(card);
415}
416
417void fw_card_initialize(struct fw_card *card, 410void fw_card_initialize(struct fw_card *card,
418 const struct fw_card_driver *driver, 411 const struct fw_card_driver *driver,
419 struct device *device) 412 struct device *device)
@@ -432,8 +425,6 @@ void fw_card_initialize(struct fw_card *card,
432 init_completion(&card->done); 425 init_completion(&card->done);
433 INIT_LIST_HEAD(&card->transaction_list); 426 INIT_LIST_HEAD(&card->transaction_list);
434 spin_lock_init(&card->lock); 427 spin_lock_init(&card->lock);
435 setup_timer(&card->flush_timer,
436 flush_timer_callback, (unsigned long)card);
437 428
438 card->local_node = NULL; 429 card->local_node = NULL;
439 430
@@ -558,7 +549,6 @@ void fw_core_remove_card(struct fw_card *card)
558 wait_for_completion(&card->done); 549 wait_for_completion(&card->done);
559 550
560 WARN_ON(!list_empty(&card->transaction_list)); 551 WARN_ON(!list_empty(&card->transaction_list));
561 del_timer_sync(&card->flush_timer);
562} 552}
563EXPORT_SYMBOL(fw_core_remove_card); 553EXPORT_SYMBOL(fw_core_remove_card);
564 554
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 14a34d99eea2..5bf106b9d791 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -227,7 +227,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
227 list_add_tail(&client->link, &device->client_list); 227 list_add_tail(&client->link, &device->client_list);
228 mutex_unlock(&device->client_list_mutex); 228 mutex_unlock(&device->client_list_mutex);
229 229
230 return 0; 230 return nonseekable_open(inode, file);
231} 231}
232 232
233static void queue_event(struct client *client, struct event *event, 233static void queue_event(struct client *client, struct event *event,
@@ -1496,13 +1496,13 @@ static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
1496 1496
1497const struct file_operations fw_device_ops = { 1497const struct file_operations fw_device_ops = {
1498 .owner = THIS_MODULE, 1498 .owner = THIS_MODULE,
1499 .llseek = no_llseek,
1499 .open = fw_device_op_open, 1500 .open = fw_device_op_open,
1500 .read = fw_device_op_read, 1501 .read = fw_device_op_read,
1501 .unlocked_ioctl = fw_device_op_ioctl, 1502 .unlocked_ioctl = fw_device_op_ioctl,
1502 .poll = fw_device_op_poll,
1503 .release = fw_device_op_release,
1504 .mmap = fw_device_op_mmap, 1503 .mmap = fw_device_op_mmap,
1505 1504 .release = fw_device_op_release,
1505 .poll = fw_device_op_poll,
1506#ifdef CONFIG_COMPAT 1506#ifdef CONFIG_COMPAT
1507 .compat_ioctl = fw_device_op_compat_ioctl, 1507 .compat_ioctl = fw_device_op_compat_ioctl,
1508#endif 1508#endif
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 673b03f8b4ec..fdc33ff06dc1 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -81,7 +81,7 @@ static int close_transaction(struct fw_transaction *transaction,
81 spin_lock_irqsave(&card->lock, flags); 81 spin_lock_irqsave(&card->lock, flags);
82 list_for_each_entry(t, &card->transaction_list, link) { 82 list_for_each_entry(t, &card->transaction_list, link) {
83 if (t == transaction) { 83 if (t == transaction) {
84 list_del(&t->link); 84 list_del_init(&t->link);
85 card->tlabel_mask &= ~(1ULL << t->tlabel); 85 card->tlabel_mask &= ~(1ULL << t->tlabel);
86 break; 86 break;
87 } 87 }
@@ -89,6 +89,7 @@ static int close_transaction(struct fw_transaction *transaction,
89 spin_unlock_irqrestore(&card->lock, flags); 89 spin_unlock_irqrestore(&card->lock, flags);
90 90
91 if (&t->link != &card->transaction_list) { 91 if (&t->link != &card->transaction_list) {
92 del_timer_sync(&t->split_timeout_timer);
92 t->callback(card, rcode, NULL, 0, t->callback_data); 93 t->callback(card, rcode, NULL, 0, t->callback_data);
93 return 0; 94 return 0;
94 } 95 }
@@ -121,6 +122,31 @@ int fw_cancel_transaction(struct fw_card *card,
121} 122}
122EXPORT_SYMBOL(fw_cancel_transaction); 123EXPORT_SYMBOL(fw_cancel_transaction);
123 124
125static void split_transaction_timeout_callback(unsigned long data)
126{
127 struct fw_transaction *t = (struct fw_transaction *)data;
128 struct fw_card *card = t->card;
129 unsigned long flags;
130
131 spin_lock_irqsave(&card->lock, flags);
132 if (list_empty(&t->link)) {
133 spin_unlock_irqrestore(&card->lock, flags);
134 return;
135 }
136 list_del(&t->link);
137 card->tlabel_mask &= ~(1ULL << t->tlabel);
138 spin_unlock_irqrestore(&card->lock, flags);
139
140 card->driver->cancel_packet(card, &t->packet);
141
142 /*
143 * At this point cancel_packet will never call the transaction
144 * callback, since we just took the transaction out of the list.
145 * So do it here.
146 */
147 t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
148}
149
124static void transmit_complete_callback(struct fw_packet *packet, 150static void transmit_complete_callback(struct fw_packet *packet,
125 struct fw_card *card, int status) 151 struct fw_card *card, int status)
126{ 152{
@@ -229,6 +255,23 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
229 packet->payload_mapped = false; 255 packet->payload_mapped = false;
230} 256}
231 257
258static int allocate_tlabel(struct fw_card *card)
259{
260 int tlabel;
261
262 tlabel = card->current_tlabel;
263 while (card->tlabel_mask & (1ULL << tlabel)) {
264 tlabel = (tlabel + 1) & 0x3f;
265 if (tlabel == card->current_tlabel)
266 return -EBUSY;
267 }
268
269 card->current_tlabel = (tlabel + 1) & 0x3f;
270 card->tlabel_mask |= 1ULL << tlabel;
271
272 return tlabel;
273}
274
232/** 275/**
233 * This function provides low-level access to the IEEE1394 transaction 276 * This function provides low-level access to the IEEE1394 transaction
234 * logic. Most C programs would use either fw_read(), fw_write() or 277 * logic. Most C programs would use either fw_read(), fw_write() or
@@ -277,31 +320,26 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
277 int tlabel; 320 int tlabel;
278 321
279 /* 322 /*
280 * Bump the flush timer up 100ms first of all so we
281 * don't race with a flush timer callback.
282 */
283
284 mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10));
285
286 /*
287 * Allocate tlabel from the bitmap and put the transaction on 323 * Allocate tlabel from the bitmap and put the transaction on
288 * the list while holding the card spinlock. 324 * the list while holding the card spinlock.
289 */ 325 */
290 326
291 spin_lock_irqsave(&card->lock, flags); 327 spin_lock_irqsave(&card->lock, flags);
292 328
293 tlabel = card->current_tlabel; 329 tlabel = allocate_tlabel(card);
294 if (card->tlabel_mask & (1ULL << tlabel)) { 330 if (tlabel < 0) {
295 spin_unlock_irqrestore(&card->lock, flags); 331 spin_unlock_irqrestore(&card->lock, flags);
296 callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data); 332 callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
297 return; 333 return;
298 } 334 }
299 335
300 card->current_tlabel = (card->current_tlabel + 1) & 0x3f;
301 card->tlabel_mask |= (1ULL << tlabel);
302
303 t->node_id = destination_id; 336 t->node_id = destination_id;
304 t->tlabel = tlabel; 337 t->tlabel = tlabel;
338 t->card = card;
339 setup_timer(&t->split_timeout_timer,
340 split_transaction_timeout_callback, (unsigned long)t);
341 /* FIXME: start this timer later, relative to t->timestamp */
342 mod_timer(&t->split_timeout_timer, jiffies + DIV_ROUND_UP(HZ, 10));
305 t->callback = callback; 343 t->callback = callback;
306 t->callback_data = callback_data; 344 t->callback_data = callback_data;
307 345
@@ -347,11 +385,13 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
347 struct transaction_callback_data d; 385 struct transaction_callback_data d;
348 struct fw_transaction t; 386 struct fw_transaction t;
349 387
388 init_timer_on_stack(&t.split_timeout_timer);
350 init_completion(&d.done); 389 init_completion(&d.done);
351 d.payload = payload; 390 d.payload = payload;
352 fw_send_request(card, &t, tcode, destination_id, generation, speed, 391 fw_send_request(card, &t, tcode, destination_id, generation, speed,
353 offset, payload, length, transaction_callback, &d); 392 offset, payload, length, transaction_callback, &d);
354 wait_for_completion(&d.done); 393 wait_for_completion(&d.done);
394 destroy_timer_on_stack(&t.split_timeout_timer);
355 395
356 return d.rcode; 396 return d.rcode;
357} 397}
@@ -394,30 +434,6 @@ void fw_send_phy_config(struct fw_card *card,
394 mutex_unlock(&phy_config_mutex); 434 mutex_unlock(&phy_config_mutex);
395} 435}
396 436
397void fw_flush_transactions(struct fw_card *card)
398{
399 struct fw_transaction *t, *next;
400 struct list_head list;
401 unsigned long flags;
402
403 INIT_LIST_HEAD(&list);
404 spin_lock_irqsave(&card->lock, flags);
405 list_splice_init(&card->transaction_list, &list);
406 card->tlabel_mask = 0;
407 spin_unlock_irqrestore(&card->lock, flags);
408
409 list_for_each_entry_safe(t, next, &list, link) {
410 card->driver->cancel_packet(card, &t->packet);
411
412 /*
413 * At this point cancel_packet will never call the
414 * transaction callback, since we just took all the
415 * transactions out of the list. So do it here.
416 */
417 t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
418 }
419}
420
421static struct fw_address_handler *lookup_overlapping_address_handler( 437static struct fw_address_handler *lookup_overlapping_address_handler(
422 struct list_head *list, unsigned long long offset, size_t length) 438 struct list_head *list, unsigned long long offset, size_t length)
423{ 439{
@@ -827,8 +843,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
827 spin_lock_irqsave(&card->lock, flags); 843 spin_lock_irqsave(&card->lock, flags);
828 list_for_each_entry(t, &card->transaction_list, link) { 844 list_for_each_entry(t, &card->transaction_list, link) {
829 if (t->node_id == source && t->tlabel == tlabel) { 845 if (t->node_id == source && t->tlabel == tlabel) {
830 list_del(&t->link); 846 list_del_init(&t->link);
831 card->tlabel_mask &= ~(1 << t->tlabel); 847 card->tlabel_mask &= ~(1ULL << t->tlabel);
832 break; 848 break;
833 } 849 }
834 } 850 }
@@ -869,6 +885,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
869 break; 885 break;
870 } 886 }
871 887
888 del_timer_sync(&t->split_timeout_timer);
889
872 /* 890 /*
873 * The response handler may be executed while the request handler 891 * The response handler may be executed while the request handler
874 * is still pending. Cancel the request handler. 892 * is still pending. Cancel the request handler.
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index fb0321300cce..0ecfcd95f4c5 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -27,7 +27,12 @@ struct fw_packet;
27#define PHY_LINK_ACTIVE 0x80 27#define PHY_LINK_ACTIVE 0x80
28#define PHY_CONTENDER 0x40 28#define PHY_CONTENDER 0x40
29#define PHY_BUS_RESET 0x40 29#define PHY_BUS_RESET 0x40
30#define PHY_EXTENDED_REGISTERS 0xe0
30#define PHY_BUS_SHORT_RESET 0x40 31#define PHY_BUS_SHORT_RESET 0x40
32#define PHY_INT_STATUS_BITS 0x3c
33#define PHY_ENABLE_ACCEL 0x02
34#define PHY_ENABLE_MULTI 0x01
35#define PHY_PAGE_SELECT 0xe0
31 36
32#define BANDWIDTH_AVAILABLE_INITIAL 4915 37#define BANDWIDTH_AVAILABLE_INITIAL 4915
33#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) 38#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
@@ -215,7 +220,6 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
215void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); 220void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
216void fw_fill_response(struct fw_packet *response, u32 *request_header, 221void fw_fill_response(struct fw_packet *response, u32 *request_header,
217 int rcode, void *payload, size_t length); 222 int rcode, void *payload, size_t length);
218void fw_flush_transactions(struct fw_card *card);
219void fw_send_phy_config(struct fw_card *card, 223void fw_send_phy_config(struct fw_card *card,
220 int node_id, int generation, int gap_count); 224 int node_id, int generation, int gap_count);
221 225
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index a3b083a7403a..9f627e758cfc 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -236,13 +236,15 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
236#define QUIRK_CYCLE_TIMER 1 236#define QUIRK_CYCLE_TIMER 1
237#define QUIRK_RESET_PACKET 2 237#define QUIRK_RESET_PACKET 2
238#define QUIRK_BE_HEADERS 4 238#define QUIRK_BE_HEADERS 4
239#define QUIRK_NO_1394A 8
239 240
240/* In case of multiple matches in ohci_quirks[], only the first one is used. */ 241/* In case of multiple matches in ohci_quirks[], only the first one is used. */
241static const struct { 242static const struct {
242 unsigned short vendor, device, flags; 243 unsigned short vendor, device, flags;
243} ohci_quirks[] = { 244} ohci_quirks[] = {
244 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER | 245 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER |
245 QUIRK_RESET_PACKET}, 246 QUIRK_RESET_PACKET |
247 QUIRK_NO_1394A},
246 {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, 248 {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
247 {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 249 {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
248 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 250 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
@@ -257,15 +259,16 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
257 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER) 259 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
258 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) 260 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
259 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) 261 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
262 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
260 ")"); 263 ")");
261 264
262#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
263
264#define OHCI_PARAM_DEBUG_AT_AR 1 265#define OHCI_PARAM_DEBUG_AT_AR 1
265#define OHCI_PARAM_DEBUG_SELFIDS 2 266#define OHCI_PARAM_DEBUG_SELFIDS 2
266#define OHCI_PARAM_DEBUG_IRQS 4 267#define OHCI_PARAM_DEBUG_IRQS 4
267#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */ 268#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
268 269
270#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
271
269static int param_debug; 272static int param_debug;
270module_param_named(debug, param_debug, int, 0644); 273module_param_named(debug, param_debug, int, 0644);
271MODULE_PARM_DESC(debug, "Verbose logging (default = 0" 274MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
@@ -438,9 +441,10 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
438 441
439#else 442#else
440 443
441#define log_irqs(evt) 444#define param_debug 0
442#define log_selfids(node_id, generation, self_id_count, sid) 445static inline void log_irqs(u32 evt) {}
443#define log_ar_at_event(dir, speed, header, evt) 446static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {}
447static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {}
444 448
445#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */ 449#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
446 450
@@ -460,27 +464,71 @@ static inline void flush_writes(const struct fw_ohci *ohci)
460 reg_read(ohci, OHCI1394_Version); 464 reg_read(ohci, OHCI1394_Version);
461} 465}
462 466
463static int ohci_update_phy_reg(struct fw_card *card, int addr, 467static int read_phy_reg(struct fw_ohci *ohci, int addr)
464 int clear_bits, int set_bits)
465{ 468{
466 struct fw_ohci *ohci = fw_ohci(card); 469 u32 val;
467 u32 val, old; 470 int i;
468 471
469 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); 472 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
470 flush_writes(ohci); 473 for (i = 0; i < 10; i++) {
471 msleep(2); 474 val = reg_read(ohci, OHCI1394_PhyControl);
472 val = reg_read(ohci, OHCI1394_PhyControl); 475 if (val & OHCI1394_PhyControl_ReadDone)
473 if ((val & OHCI1394_PhyControl_ReadDone) == 0) { 476 return OHCI1394_PhyControl_ReadData(val);
474 fw_error("failed to set phy reg bits.\n"); 477
475 return -EBUSY; 478 msleep(1);
476 } 479 }
480 fw_error("failed to read phy reg\n");
481
482 return -EBUSY;
483}
484
485static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
486{
487 int i;
477 488
478 old = OHCI1394_PhyControl_ReadData(val);
479 old = (old & ~clear_bits) | set_bits;
480 reg_write(ohci, OHCI1394_PhyControl, 489 reg_write(ohci, OHCI1394_PhyControl,
481 OHCI1394_PhyControl_Write(addr, old)); 490 OHCI1394_PhyControl_Write(addr, val));
491 for (i = 0; i < 100; i++) {
492 val = reg_read(ohci, OHCI1394_PhyControl);
493 if (!(val & OHCI1394_PhyControl_WritePending))
494 return 0;
482 495
483 return 0; 496 msleep(1);
497 }
498 fw_error("failed to write phy reg\n");
499
500 return -EBUSY;
501}
502
503static int ohci_update_phy_reg(struct fw_card *card, int addr,
504 int clear_bits, int set_bits)
505{
506 struct fw_ohci *ohci = fw_ohci(card);
507 int ret;
508
509 ret = read_phy_reg(ohci, addr);
510 if (ret < 0)
511 return ret;
512
513 /*
514 * The interrupt status bits are cleared by writing a one bit.
515 * Avoid clearing them unless explicitly requested in set_bits.
516 */
517 if (addr == 5)
518 clear_bits |= PHY_INT_STATUS_BITS;
519
520 return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
521}
522
523static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
524{
525 int ret;
526
527 ret = ohci_update_phy_reg(&ohci->card, 7, PHY_PAGE_SELECT, page << 5);
528 if (ret < 0)
529 return ret;
530
531 return read_phy_reg(ohci, addr);
484} 532}
485 533
486static int ar_context_add_page(struct ar_context *ctx) 534static int ar_context_add_page(struct ar_context *ctx)
@@ -1495,13 +1543,64 @@ static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
1495 memset(&dest[length], 0, CONFIG_ROM_SIZE - size); 1543 memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
1496} 1544}
1497 1545
1546static int configure_1394a_enhancements(struct fw_ohci *ohci)
1547{
1548 bool enable_1394a;
1549 int ret, clear, set, offset;
1550
1551 /* Check if the driver should configure link and PHY. */
1552 if (!(reg_read(ohci, OHCI1394_HCControlSet) &
1553 OHCI1394_HCControl_programPhyEnable))
1554 return 0;
1555
1556 /* Paranoia: check whether the PHY supports 1394a, too. */
1557 enable_1394a = false;
1558 ret = read_phy_reg(ohci, 2);
1559 if (ret < 0)
1560 return ret;
1561 if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
1562 ret = read_paged_phy_reg(ohci, 1, 8);
1563 if (ret < 0)
1564 return ret;
1565 if (ret >= 1)
1566 enable_1394a = true;
1567 }
1568
1569 if (ohci->quirks & QUIRK_NO_1394A)
1570 enable_1394a = false;
1571
1572 /* Configure PHY and link consistently. */
1573 if (enable_1394a) {
1574 clear = 0;
1575 set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
1576 } else {
1577 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
1578 set = 0;
1579 }
1580 ret = ohci_update_phy_reg(&ohci->card, 5, clear, set);
1581 if (ret < 0)
1582 return ret;
1583
1584 if (enable_1394a)
1585 offset = OHCI1394_HCControlSet;
1586 else
1587 offset = OHCI1394_HCControlClear;
1588 reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
1589
1590 /* Clean up: configuration has been taken care of. */
1591 reg_write(ohci, OHCI1394_HCControlClear,
1592 OHCI1394_HCControl_programPhyEnable);
1593
1594 return 0;
1595}
1596
1498static int ohci_enable(struct fw_card *card, 1597static int ohci_enable(struct fw_card *card,
1499 const __be32 *config_rom, size_t length) 1598 const __be32 *config_rom, size_t length)
1500{ 1599{
1501 struct fw_ohci *ohci = fw_ohci(card); 1600 struct fw_ohci *ohci = fw_ohci(card);
1502 struct pci_dev *dev = to_pci_dev(card->device); 1601 struct pci_dev *dev = to_pci_dev(card->device);
1503 u32 lps; 1602 u32 lps;
1504 int i; 1603 int i, ret;
1505 1604
1506 if (software_reset(ohci)) { 1605 if (software_reset(ohci)) {
1507 fw_error("Failed to reset ohci card.\n"); 1606 fw_error("Failed to reset ohci card.\n");
@@ -1565,10 +1664,14 @@ static int ohci_enable(struct fw_card *card,
1565 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) 1664 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
1566 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); 1665 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
1567 1666
1667 ret = configure_1394a_enhancements(ohci);
1668 if (ret < 0)
1669 return ret;
1670
1568 /* Activate link_on bit and contender bit in our self ID packets.*/ 1671 /* Activate link_on bit and contender bit in our self ID packets.*/
1569 if (ohci_update_phy_reg(card, 4, 0, 1672 ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
1570 PHY_LINK_ACTIVE | PHY_CONTENDER) < 0) 1673 if (ret < 0)
1571 return -EIO; 1674 return ret;
1572 1675
1573 /* 1676 /*
1574 * When the link is not yet enabled, the atomic config rom 1677 * When the link is not yet enabled, the atomic config rom
@@ -2304,7 +2407,7 @@ static const struct fw_card_driver ohci_driver = {
2304}; 2407};
2305 2408
2306#ifdef CONFIG_PPC_PMAC 2409#ifdef CONFIG_PPC_PMAC
2307static void ohci_pmac_on(struct pci_dev *dev) 2410static void pmac_ohci_on(struct pci_dev *dev)
2308{ 2411{
2309 if (machine_is(powermac)) { 2412 if (machine_is(powermac)) {
2310 struct device_node *ofn = pci_device_to_OF_node(dev); 2413 struct device_node *ofn = pci_device_to_OF_node(dev);
@@ -2316,7 +2419,7 @@ static void ohci_pmac_on(struct pci_dev *dev)
2316 } 2419 }
2317} 2420}
2318 2421
2319static void ohci_pmac_off(struct pci_dev *dev) 2422static void pmac_ohci_off(struct pci_dev *dev)
2320{ 2423{
2321 if (machine_is(powermac)) { 2424 if (machine_is(powermac)) {
2322 struct device_node *ofn = pci_device_to_OF_node(dev); 2425 struct device_node *ofn = pci_device_to_OF_node(dev);
@@ -2328,15 +2431,15 @@ static void ohci_pmac_off(struct pci_dev *dev)
2328 } 2431 }
2329} 2432}
2330#else 2433#else
2331#define ohci_pmac_on(dev) 2434static inline void pmac_ohci_on(struct pci_dev *dev) {}
2332#define ohci_pmac_off(dev) 2435static inline void pmac_ohci_off(struct pci_dev *dev) {}
2333#endif /* CONFIG_PPC_PMAC */ 2436#endif /* CONFIG_PPC_PMAC */
2334 2437
2335static int __devinit pci_probe(struct pci_dev *dev, 2438static int __devinit pci_probe(struct pci_dev *dev,
2336 const struct pci_device_id *ent) 2439 const struct pci_device_id *ent)
2337{ 2440{
2338 struct fw_ohci *ohci; 2441 struct fw_ohci *ohci;
2339 u32 bus_options, max_receive, link_speed, version; 2442 u32 bus_options, max_receive, link_speed, version, link_enh;
2340 u64 guid; 2443 u64 guid;
2341 int i, err, n_ir, n_it; 2444 int i, err, n_ir, n_it;
2342 size_t size; 2445 size_t size;
@@ -2349,7 +2452,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
2349 2452
2350 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); 2453 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
2351 2454
2352 ohci_pmac_on(dev); 2455 pmac_ohci_on(dev);
2353 2456
2354 err = pci_enable_device(dev); 2457 err = pci_enable_device(dev);
2355 if (err) { 2458 if (err) {
@@ -2389,6 +2492,23 @@ static int __devinit pci_probe(struct pci_dev *dev,
2389 if (param_quirks) 2492 if (param_quirks)
2390 ohci->quirks = param_quirks; 2493 ohci->quirks = param_quirks;
2391 2494
2495 /* TI OHCI-Lynx and compatible: set recommended configuration bits. */
2496 if (dev->vendor == PCI_VENDOR_ID_TI) {
2497 pci_read_config_dword(dev, PCI_CFG_TI_LinkEnh, &link_enh);
2498
2499 /* adjust latency of ATx FIFO: use 1.7 KB threshold */
2500 link_enh &= ~TI_LinkEnh_atx_thresh_mask;
2501 link_enh |= TI_LinkEnh_atx_thresh_1_7K;
2502
2503 /* use priority arbitration for asynchronous responses */
2504 link_enh |= TI_LinkEnh_enab_unfair;
2505
2506 /* required for aPhyEnhanceEnable to work */
2507 link_enh |= TI_LinkEnh_enab_accel;
2508
2509 pci_write_config_dword(dev, PCI_CFG_TI_LinkEnh, link_enh);
2510 }
2511
2392 ar_context_init(&ohci->ar_request_ctx, ohci, 2512 ar_context_init(&ohci->ar_request_ctx, ohci,
2393 OHCI1394_AsReqRcvContextControlSet); 2513 OHCI1394_AsReqRcvContextControlSet);
2394 2514
@@ -2466,7 +2586,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
2466 pci_disable_device(dev); 2586 pci_disable_device(dev);
2467 fail_free: 2587 fail_free:
2468 kfree(&ohci->card); 2588 kfree(&ohci->card);
2469 ohci_pmac_off(dev); 2589 pmac_ohci_off(dev);
2470 fail: 2590 fail:
2471 if (err == -ENOMEM) 2591 if (err == -ENOMEM)
2472 fw_error("Out of memory\n"); 2592 fw_error("Out of memory\n");
@@ -2509,7 +2629,7 @@ static void pci_remove(struct pci_dev *dev)
2509 pci_release_region(dev, 0); 2629 pci_release_region(dev, 0);
2510 pci_disable_device(dev); 2630 pci_disable_device(dev);
2511 kfree(&ohci->card); 2631 kfree(&ohci->card);
2512 ohci_pmac_off(dev); 2632 pmac_ohci_off(dev);
2513 2633
2514 fw_notify("Removed fw-ohci device.\n"); 2634 fw_notify("Removed fw-ohci device.\n");
2515} 2635}
@@ -2530,7 +2650,7 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state)
2530 err = pci_set_power_state(dev, pci_choose_state(dev, state)); 2650 err = pci_set_power_state(dev, pci_choose_state(dev, state));
2531 if (err) 2651 if (err)
2532 fw_error("pci_set_power_state failed with %d\n", err); 2652 fw_error("pci_set_power_state failed with %d\n", err);
2533 ohci_pmac_off(dev); 2653 pmac_ohci_off(dev);
2534 2654
2535 return 0; 2655 return 0;
2536} 2656}
@@ -2540,7 +2660,7 @@ static int pci_resume(struct pci_dev *dev)
2540 struct fw_ohci *ohci = pci_get_drvdata(dev); 2660 struct fw_ohci *ohci = pci_get_drvdata(dev);
2541 int err; 2661 int err;
2542 2662
2543 ohci_pmac_on(dev); 2663 pmac_ohci_on(dev);
2544 pci_set_power_state(dev, PCI_D0); 2664 pci_set_power_state(dev, PCI_D0);
2545 pci_restore_state(dev); 2665 pci_restore_state(dev);
2546 err = pci_enable_device(dev); 2666 err = pci_enable_device(dev);
diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h
index ba492d85c516..3bc9a5d744eb 100644
--- a/drivers/firewire/ohci.h
+++ b/drivers/firewire/ohci.h
@@ -67,7 +67,7 @@
67#define OHCI1394_PhyControl_ReadDone 0x80000000 67#define OHCI1394_PhyControl_ReadDone 0x80000000
68#define OHCI1394_PhyControl_ReadData(r) (((r) & 0x00ff0000) >> 16) 68#define OHCI1394_PhyControl_ReadData(r) (((r) & 0x00ff0000) >> 16)
69#define OHCI1394_PhyControl_Write(addr, data) (((addr) << 8) | (data) | 0x00004000) 69#define OHCI1394_PhyControl_Write(addr, data) (((addr) << 8) | (data) | 0x00004000)
70#define OHCI1394_PhyControl_WriteDone 0x00004000 70#define OHCI1394_PhyControl_WritePending 0x00004000
71#define OHCI1394_IsochronousCycleTimer 0x0F0 71#define OHCI1394_IsochronousCycleTimer 0x0F0
72#define OHCI1394_AsReqFilterHiSet 0x100 72#define OHCI1394_AsReqFilterHiSet 0x100
73#define OHCI1394_AsReqFilterHiClear 0x104 73#define OHCI1394_AsReqFilterHiClear 0x104
@@ -154,4 +154,12 @@
154 154
155#define OHCI1394_phy_tcode 0xe 155#define OHCI1394_phy_tcode 0xe
156 156
157/* TI extensions */
158
159#define PCI_CFG_TI_LinkEnh 0xf4
160#define TI_LinkEnh_enab_accel 0x00000002
161#define TI_LinkEnh_enab_unfair 0x00000080
162#define TI_LinkEnh_atx_thresh_mask 0x00003000
163#define TI_LinkEnh_atx_thresh_1_7K 0x00001000
164
157#endif /* _FIREWIRE_OHCI_H */ 165#endif /* _FIREWIRE_OHCI_H */
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index fee678f74a19..4fd0f276df5a 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -139,6 +139,13 @@ config GPIO_MAX732X
139 Board setup code must specify the model to use, and the start 139 Board setup code must specify the model to use, and the start
140 number for these GPIOs. 140 number for these GPIOs.
141 141
142config GPIO_MAX732X_IRQ
143 bool "Interrupt controller support for MAX732x"
144 depends on GPIO_MAX732X=y && GENERIC_HARDIRQS
145 help
146 Say yes here to enable the max732x to be used as an interrupt
147 controller. It requires the driver to be built in the kernel.
148
142config GPIO_PCA953X 149config GPIO_PCA953X
143 tristate "PCA953x, PCA955x, TCA64xx, and MAX7310 I/O ports" 150 tristate "PCA953x, PCA955x, TCA64xx, and MAX7310 I/O ports"
144 depends on I2C 151 depends on I2C
@@ -264,10 +271,10 @@ config GPIO_BT8XX
264 If unsure, say N. 271 If unsure, say N.
265 272
266config GPIO_LANGWELL 273config GPIO_LANGWELL
267 bool "Intel Moorestown Platform Langwell GPIO support" 274 bool "Intel Langwell/Penwell GPIO support"
268 depends on PCI 275 depends on PCI
269 help 276 help
270 Say Y here to support Intel Moorestown platform GPIO. 277 Say Y here to support Intel Langwell/Penwell GPIO.
271 278
272config GPIO_TIMBERDALE 279config GPIO_TIMBERDALE
273 bool "Support for timberdale GPIO IP" 280 bool "Support for timberdale GPIO IP"
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
index 0c3c498f2260..f73a1555e49d 100644
--- a/drivers/gpio/cs5535-gpio.c
+++ b/drivers/gpio/cs5535-gpio.c
@@ -197,7 +197,7 @@ static int chip_direction_output(struct gpio_chip *c, unsigned offset, int val)
197 return 0; 197 return 0;
198} 198}
199 199
200static char *cs5535_gpio_names[] = { 200static const char * const cs5535_gpio_names[] = {
201 "GPIO0", "GPIO1", "GPIO2", "GPIO3", 201 "GPIO0", "GPIO1", "GPIO2", "GPIO3",
202 "GPIO4", "GPIO5", "GPIO6", "GPIO7", 202 "GPIO4", "GPIO5", "GPIO6", "GPIO7",
203 "GPIO8", "GPIO9", "GPIO10", "GPIO11", 203 "GPIO8", "GPIO9", "GPIO10", "GPIO11",
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index cae1b8c5b08c..3ca36542e338 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -722,7 +722,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
722 unsigned long flags; 722 unsigned long flags;
723 struct gpio_desc *desc; 723 struct gpio_desc *desc;
724 int status = -EINVAL; 724 int status = -EINVAL;
725 char *ioname = NULL; 725 const char *ioname = NULL;
726 726
727 /* can't export until sysfs is available ... */ 727 /* can't export until sysfs is available ... */
728 if (!gpio_class.p) { 728 if (!gpio_class.p) {
@@ -753,7 +753,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
753 struct device *dev; 753 struct device *dev;
754 754
755 dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), 755 dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
756 desc, ioname ? ioname : "gpio%d", gpio); 756 desc, ioname ? ioname : "gpio%u", gpio);
757 if (!IS_ERR(dev)) { 757 if (!IS_ERR(dev)) {
758 status = sysfs_create_group(&dev->kobj, 758 status = sysfs_create_group(&dev->kobj,
759 &gpio_attr_group); 759 &gpio_attr_group);
@@ -1106,7 +1106,7 @@ unlock:
1106fail: 1106fail:
1107 /* failures here can mean systems won't boot... */ 1107 /* failures here can mean systems won't boot... */
1108 if (status) 1108 if (status)
1109 pr_err("gpiochip_add: gpios %d..%d (%s) not registered\n", 1109 pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n",
1110 chip->base, chip->base + chip->ngpio - 1, 1110 chip->base, chip->base + chip->ngpio - 1,
1111 chip->label ? : "generic"); 1111 chip->label ? : "generic");
1112 return status; 1112 return status;
@@ -1447,6 +1447,49 @@ fail:
1447} 1447}
1448EXPORT_SYMBOL_GPL(gpio_direction_output); 1448EXPORT_SYMBOL_GPL(gpio_direction_output);
1449 1449
1450/**
1451 * gpio_set_debounce - sets @debounce time for a @gpio
1452 * @gpio: the gpio to set debounce time
1453 * @debounce: debounce time is microseconds
1454 */
1455int gpio_set_debounce(unsigned gpio, unsigned debounce)
1456{
1457 unsigned long flags;
1458 struct gpio_chip *chip;
1459 struct gpio_desc *desc = &gpio_desc[gpio];
1460 int status = -EINVAL;
1461
1462 spin_lock_irqsave(&gpio_lock, flags);
1463
1464 if (!gpio_is_valid(gpio))
1465 goto fail;
1466 chip = desc->chip;
1467 if (!chip || !chip->set || !chip->set_debounce)
1468 goto fail;
1469 gpio -= chip->base;
1470 if (gpio >= chip->ngpio)
1471 goto fail;
1472 status = gpio_ensure_requested(desc, gpio);
1473 if (status < 0)
1474 goto fail;
1475
1476 /* now we know the gpio is valid and chip won't vanish */
1477
1478 spin_unlock_irqrestore(&gpio_lock, flags);
1479
1480 might_sleep_if(extra_checks && chip->can_sleep);
1481
1482 return chip->set_debounce(chip, gpio, debounce);
1483
1484fail:
1485 spin_unlock_irqrestore(&gpio_lock, flags);
1486 if (status)
1487 pr_debug("%s: gpio-%d status %d\n",
1488 __func__, gpio, status);
1489
1490 return status;
1491}
1492EXPORT_SYMBOL_GPL(gpio_set_debounce);
1450 1493
1451/* I/O calls are only valid after configuration completed; the relevant 1494/* I/O calls are only valid after configuration completed; the relevant
1452 * "is this a valid GPIO" error checks should already have been done. 1495 * "is this a valid GPIO" error checks should already have been done.
diff --git a/drivers/gpio/it8761e_gpio.c b/drivers/gpio/it8761e_gpio.c
index 41a9388f2fde..48fc43c4bdd1 100644
--- a/drivers/gpio/it8761e_gpio.c
+++ b/drivers/gpio/it8761e_gpio.c
@@ -217,7 +217,10 @@ gpiochip_add_err:
217static void __exit it8761e_gpio_exit(void) 217static void __exit it8761e_gpio_exit(void)
218{ 218{
219 if (gpio_ba) { 219 if (gpio_ba) {
220 gpiochip_remove(&it8761e_gpio_chip); 220 int ret = gpiochip_remove(&it8761e_gpio_chip);
221
222 WARN(ret, "%s(): gpiochip_remove() failed, ret=%d\n",
223 __func__, ret);
221 224
222 release_region(gpio_ba, GPIO_IOSIZE); 225 release_region(gpio_ba, GPIO_IOSIZE);
223 gpio_ba = 0; 226 gpio_ba = 0;
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 00c3a14127af..8383a8d7f994 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -17,6 +17,7 @@
17 17
18/* Supports: 18/* Supports:
19 * Moorestown platform Langwell chip. 19 * Moorestown platform Langwell chip.
20 * Medfield platform Penwell chip.
20 */ 21 */
21 22
22#include <linux/module.h> 23#include <linux/module.h>
@@ -31,44 +32,65 @@
31#include <linux/gpio.h> 32#include <linux/gpio.h>
32#include <linux/slab.h> 33#include <linux/slab.h>
33 34
34struct lnw_gpio_register { 35/*
35 u32 GPLR[2]; 36 * Langwell chip has 64 pins and thus there are 2 32bit registers to control
36 u32 GPDR[2]; 37 * each feature, while Penwell chip has 96 pins for each block, and need 3 32bit
37 u32 GPSR[2]; 38 * registers to control them, so we only define the order here instead of a
38 u32 GPCR[2]; 39 * structure, to get a bit offset for a pin (use GPDR as an example):
39 u32 GRER[2]; 40 *
40 u32 GFER[2]; 41 * nreg = ngpio / 32;
41 u32 GEDR[2]; 42 * reg = offset / 32;
43 * bit = offset % 32;
44 * reg_addr = reg_base + GPDR * nreg * 4 + reg * 4;
45 *
46 * so the bit of reg_addr is to control pin offset's GPDR feature
47*/
48
49enum GPIO_REG {
50 GPLR = 0, /* pin level read-only */
51 GPDR, /* pin direction */
52 GPSR, /* pin set */
53 GPCR, /* pin clear */
54 GRER, /* rising edge detect */
55 GFER, /* falling edge detect */
56 GEDR, /* edge detect result */
42}; 57};
43 58
44struct lnw_gpio { 59struct lnw_gpio {
45 struct gpio_chip chip; 60 struct gpio_chip chip;
46 struct lnw_gpio_register *reg_base; 61 void *reg_base;
47 spinlock_t lock; 62 spinlock_t lock;
48 unsigned irq_base; 63 unsigned irq_base;
49}; 64};
50 65
51static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset) 66static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
67 enum GPIO_REG reg_type)
52{ 68{
53 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip); 69 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
70 unsigned nreg = chip->ngpio / 32;
54 u8 reg = offset / 32; 71 u8 reg = offset / 32;
55 void __iomem *gplr; 72 void __iomem *ptr;
73
74 ptr = (void __iomem *)(lnw->reg_base + reg_type * nreg * 4 + reg * 4);
75 return ptr;
76}
77
78static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset)
79{
80 void __iomem *gplr = gpio_reg(chip, offset, GPLR);
56 81
57 gplr = (void __iomem *)(&lnw->reg_base->GPLR[reg]);
58 return readl(gplr) & BIT(offset % 32); 82 return readl(gplr) & BIT(offset % 32);
59} 83}
60 84
61static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 85static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
62{ 86{
63 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
64 u8 reg = offset / 32;
65 void __iomem *gpsr, *gpcr; 87 void __iomem *gpsr, *gpcr;
66 88
67 if (value) { 89 if (value) {
68 gpsr = (void __iomem *)(&lnw->reg_base->GPSR[reg]); 90 gpsr = gpio_reg(chip, offset, GPSR);
69 writel(BIT(offset % 32), gpsr); 91 writel(BIT(offset % 32), gpsr);
70 } else { 92 } else {
71 gpcr = (void __iomem *)(&lnw->reg_base->GPCR[reg]); 93 gpcr = gpio_reg(chip, offset, GPCR);
72 writel(BIT(offset % 32), gpcr); 94 writel(BIT(offset % 32), gpcr);
73 } 95 }
74} 96}
@@ -76,12 +98,10 @@ static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
76static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset) 98static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
77{ 99{
78 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip); 100 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
79 u8 reg = offset / 32; 101 void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
80 u32 value; 102 u32 value;
81 unsigned long flags; 103 unsigned long flags;
82 void __iomem *gpdr;
83 104
84 gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]);
85 spin_lock_irqsave(&lnw->lock, flags); 105 spin_lock_irqsave(&lnw->lock, flags);
86 value = readl(gpdr); 106 value = readl(gpdr);
87 value &= ~BIT(offset % 32); 107 value &= ~BIT(offset % 32);
@@ -94,12 +114,10 @@ static int lnw_gpio_direction_output(struct gpio_chip *chip,
94 unsigned offset, int value) 114 unsigned offset, int value)
95{ 115{
96 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip); 116 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
97 u8 reg = offset / 32; 117 void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
98 unsigned long flags; 118 unsigned long flags;
99 void __iomem *gpdr;
100 119
101 lnw_gpio_set(chip, offset, value); 120 lnw_gpio_set(chip, offset, value);
102 gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]);
103 spin_lock_irqsave(&lnw->lock, flags); 121 spin_lock_irqsave(&lnw->lock, flags);
104 value = readl(gpdr); 122 value = readl(gpdr);
105 value |= BIT(offset % 32);; 123 value |= BIT(offset % 32);;
@@ -118,11 +136,10 @@ static int lnw_irq_type(unsigned irq, unsigned type)
118{ 136{
119 struct lnw_gpio *lnw = get_irq_chip_data(irq); 137 struct lnw_gpio *lnw = get_irq_chip_data(irq);
120 u32 gpio = irq - lnw->irq_base; 138 u32 gpio = irq - lnw->irq_base;
121 u8 reg = gpio / 32;
122 unsigned long flags; 139 unsigned long flags;
123 u32 value; 140 u32 value;
124 void __iomem *grer = (void __iomem *)(&lnw->reg_base->GRER[reg]); 141 void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
125 void __iomem *gfer = (void __iomem *)(&lnw->reg_base->GFER[reg]); 142 void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER);
126 143
127 if (gpio >= lnw->chip.ngpio) 144 if (gpio >= lnw->chip.ngpio)
128 return -EINVAL; 145 return -EINVAL;
@@ -158,8 +175,10 @@ static struct irq_chip lnw_irqchip = {
158 .set_type = lnw_irq_type, 175 .set_type = lnw_irq_type,
159}; 176};
160 177
161static struct pci_device_id lnw_gpio_ids[] = { 178static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = { /* pin number */
162 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f) }, 179 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f), .driver_data = 64 },
180 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f), .driver_data = 96 },
181 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a), .driver_data = 96 },
163 { 0, } 182 { 0, }
164}; 183};
165MODULE_DEVICE_TABLE(pci, lnw_gpio_ids); 184MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
@@ -167,17 +186,17 @@ MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
167static void lnw_irq_handler(unsigned irq, struct irq_desc *desc) 186static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
168{ 187{
169 struct lnw_gpio *lnw = (struct lnw_gpio *)get_irq_data(irq); 188 struct lnw_gpio *lnw = (struct lnw_gpio *)get_irq_data(irq);
170 u32 reg, gpio; 189 u32 base, gpio;
171 void __iomem *gedr; 190 void __iomem *gedr;
172 u32 gedr_v; 191 u32 gedr_v;
173 192
174 /* check GPIO controller to check which pin triggered the interrupt */ 193 /* check GPIO controller to check which pin triggered the interrupt */
175 for (reg = 0; reg < lnw->chip.ngpio / 32; reg++) { 194 for (base = 0; base < lnw->chip.ngpio; base += 32) {
176 gedr = (void __iomem *)(&lnw->reg_base->GEDR[reg]); 195 gedr = gpio_reg(&lnw->chip, base, GEDR);
177 gedr_v = readl(gedr); 196 gedr_v = readl(gedr);
178 if (!gedr_v) 197 if (!gedr_v)
179 continue; 198 continue;
180 for (gpio = reg*32; gpio < reg*32+32; gpio++) 199 for (gpio = base; gpio < base + 32; gpio++)
181 if (gedr_v & BIT(gpio % 32)) { 200 if (gedr_v & BIT(gpio % 32)) {
182 pr_debug("pin %d triggered\n", gpio); 201 pr_debug("pin %d triggered\n", gpio);
183 generic_handle_irq(lnw->irq_base + gpio); 202 generic_handle_irq(lnw->irq_base + gpio);
@@ -245,7 +264,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
245 lnw->chip.set = lnw_gpio_set; 264 lnw->chip.set = lnw_gpio_set;
246 lnw->chip.to_irq = lnw_gpio_to_irq; 265 lnw->chip.to_irq = lnw_gpio_to_irq;
247 lnw->chip.base = gpio_base; 266 lnw->chip.base = gpio_base;
248 lnw->chip.ngpio = 64; 267 lnw->chip.ngpio = id->driver_data;
249 lnw->chip.can_sleep = 0; 268 lnw->chip.can_sleep = 0;
250 pci_set_drvdata(pdev, lnw); 269 pci_set_drvdata(pdev, lnw);
251 retval = gpiochip_add(&lnw->chip); 270 retval = gpiochip_add(&lnw->chip);
diff --git a/drivers/gpio/max732x.c b/drivers/gpio/max732x.c
index f7868243af89..9cad60f9e962 100644
--- a/drivers/gpio/max732x.c
+++ b/drivers/gpio/max732x.c
@@ -17,7 +17,8 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/gpio.h> 19#include <linux/gpio.h>
20 20#include <linux/interrupt.h>
21#include <linux/irq.h>
21#include <linux/i2c.h> 22#include <linux/i2c.h>
22#include <linux/i2c/max732x.h> 23#include <linux/i2c/max732x.h>
23 24
@@ -31,7 +32,8 @@
31 * - Open Drain I/O 32 * - Open Drain I/O
32 * 33 *
33 * designated by 'O', 'I' and 'P' individually according to MAXIM's 34 * designated by 'O', 'I' and 'P' individually according to MAXIM's
34 * datasheets. 35 * datasheets. 'I' and 'P' ports are interrupt capables, some with
36 * a dedicated interrupt mask.
35 * 37 *
36 * There are two groups of I/O ports, each group usually includes 38 * There are two groups of I/O ports, each group usually includes
37 * up to 8 I/O ports, and is accessed by a specific I2C address: 39 * up to 8 I/O ports, and is accessed by a specific I2C address:
@@ -44,7 +46,8 @@
44 * 46 *
45 * Within each group of ports, there are five known combinations of 47 * Within each group of ports, there are five known combinations of
46 * I/O ports: 4I4O, 4P4O, 8I, 8P, 8O, see the definitions below for 48 * I/O ports: 4I4O, 4P4O, 8I, 8P, 8O, see the definitions below for
47 * the detailed organization of these ports. 49 * the detailed organization of these ports. Only Goup A is interrupt
50 * capable.
48 * 51 *
49 * GPIO numbers start from 'gpio_base + 0' to 'gpio_base + 8/16', 52 * GPIO numbers start from 'gpio_base + 0' to 'gpio_base + 8/16',
50 * and GPIOs from GROUP_A are numbered before those from GROUP_B 53 * and GPIOs from GROUP_A are numbered before those from GROUP_B
@@ -68,16 +71,47 @@
68#define GROUP_A(x) ((x) & 0xffff) /* I2C Addr: 0b'110xxxx */ 71#define GROUP_A(x) ((x) & 0xffff) /* I2C Addr: 0b'110xxxx */
69#define GROUP_B(x) ((x) << 16) /* I2C Addr: 0b'101xxxx */ 72#define GROUP_B(x) ((x) << 16) /* I2C Addr: 0b'101xxxx */
70 73
74#define INT_NONE 0x0 /* No interrupt capability */
75#define INT_NO_MASK 0x1 /* Has interrupts, no mask */
76#define INT_INDEP_MASK 0x2 /* Has interrupts, independent mask */
77#define INT_MERGED_MASK 0x3 /* Has interrupts, merged mask */
78
79#define INT_CAPS(x) (((uint64_t)(x)) << 32)
80
81enum {
82 MAX7319,
83 MAX7320,
84 MAX7321,
85 MAX7322,
86 MAX7323,
87 MAX7324,
88 MAX7325,
89 MAX7326,
90 MAX7327,
91};
92
93static uint64_t max732x_features[] = {
94 [MAX7319] = GROUP_A(IO_8I) | INT_CAPS(INT_MERGED_MASK),
95 [MAX7320] = GROUP_B(IO_8O),
96 [MAX7321] = GROUP_A(IO_8P) | INT_CAPS(INT_NO_MASK),
97 [MAX7322] = GROUP_A(IO_4I4O) | INT_CAPS(INT_MERGED_MASK),
98 [MAX7323] = GROUP_A(IO_4P4O) | INT_CAPS(INT_INDEP_MASK),
99 [MAX7324] = GROUP_A(IO_8I) | GROUP_B(IO_8O) | INT_CAPS(INT_MERGED_MASK),
100 [MAX7325] = GROUP_A(IO_8P) | GROUP_B(IO_8O) | INT_CAPS(INT_NO_MASK),
101 [MAX7326] = GROUP_A(IO_4I4O) | GROUP_B(IO_8O) | INT_CAPS(INT_MERGED_MASK),
102 [MAX7327] = GROUP_A(IO_4P4O) | GROUP_B(IO_8O) | INT_CAPS(INT_NO_MASK),
103};
104
71static const struct i2c_device_id max732x_id[] = { 105static const struct i2c_device_id max732x_id[] = {
72 { "max7319", GROUP_A(IO_8I) }, 106 { "max7319", MAX7319 },
73 { "max7320", GROUP_B(IO_8O) }, 107 { "max7320", MAX7320 },
74 { "max7321", GROUP_A(IO_8P) }, 108 { "max7321", MAX7321 },
75 { "max7322", GROUP_A(IO_4I4O) }, 109 { "max7322", MAX7322 },
76 { "max7323", GROUP_A(IO_4P4O) }, 110 { "max7323", MAX7323 },
77 { "max7324", GROUP_A(IO_8I) | GROUP_B(IO_8O) }, 111 { "max7324", MAX7324 },
78 { "max7325", GROUP_A(IO_8P) | GROUP_B(IO_8O) }, 112 { "max7325", MAX7325 },
79 { "max7326", GROUP_A(IO_4I4O) | GROUP_B(IO_8O) }, 113 { "max7326", MAX7326 },
80 { "max7327", GROUP_A(IO_4P4O) | GROUP_B(IO_8O) }, 114 { "max7327", MAX7327 },
81 { }, 115 { },
82}; 116};
83MODULE_DEVICE_TABLE(i2c, max732x_id); 117MODULE_DEVICE_TABLE(i2c, max732x_id);
@@ -96,9 +130,19 @@ struct max732x_chip {
96 130
97 struct mutex lock; 131 struct mutex lock;
98 uint8_t reg_out[2]; 132 uint8_t reg_out[2];
133
134#ifdef CONFIG_GPIO_MAX732X_IRQ
135 struct mutex irq_lock;
136 int irq_base;
137 uint8_t irq_mask;
138 uint8_t irq_mask_cur;
139 uint8_t irq_trig_raise;
140 uint8_t irq_trig_fall;
141 uint8_t irq_features;
142#endif
99}; 143};
100 144
101static int max732x_write(struct max732x_chip *chip, int group_a, uint8_t val) 145static int max732x_writeb(struct max732x_chip *chip, int group_a, uint8_t val)
102{ 146{
103 struct i2c_client *client; 147 struct i2c_client *client;
104 int ret; 148 int ret;
@@ -113,7 +157,7 @@ static int max732x_write(struct max732x_chip *chip, int group_a, uint8_t val)
113 return 0; 157 return 0;
114} 158}
115 159
116static int max732x_read(struct max732x_chip *chip, int group_a, uint8_t *val) 160static int max732x_readb(struct max732x_chip *chip, int group_a, uint8_t *val)
117{ 161{
118 struct i2c_client *client; 162 struct i2c_client *client;
119 int ret; 163 int ret;
@@ -142,7 +186,7 @@ static int max732x_gpio_get_value(struct gpio_chip *gc, unsigned off)
142 186
143 chip = container_of(gc, struct max732x_chip, gpio_chip); 187 chip = container_of(gc, struct max732x_chip, gpio_chip);
144 188
145 ret = max732x_read(chip, is_group_a(chip, off), &reg_val); 189 ret = max732x_readb(chip, is_group_a(chip, off), &reg_val);
146 if (ret < 0) 190 if (ret < 0)
147 return 0; 191 return 0;
148 192
@@ -162,7 +206,7 @@ static void max732x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
162 reg_out = (off > 7) ? chip->reg_out[1] : chip->reg_out[0]; 206 reg_out = (off > 7) ? chip->reg_out[1] : chip->reg_out[0];
163 reg_out = (val) ? reg_out | mask : reg_out & ~mask; 207 reg_out = (val) ? reg_out | mask : reg_out & ~mask;
164 208
165 ret = max732x_write(chip, is_group_a(chip, off), reg_out); 209 ret = max732x_writeb(chip, is_group_a(chip, off), reg_out);
166 if (ret < 0) 210 if (ret < 0)
167 goto out; 211 goto out;
168 212
@@ -188,6 +232,13 @@ static int max732x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
188 return -EACCES; 232 return -EACCES;
189 } 233 }
190 234
235 /*
236 * Open-drain pins must be set to high impedance (which is
237 * equivalent to output-high) to be turned into an input.
238 */
239 if ((mask & chip->dir_output))
240 max732x_gpio_set_value(gc, off, 1);
241
191 return 0; 242 return 0;
192} 243}
193 244
@@ -209,12 +260,278 @@ static int max732x_gpio_direction_output(struct gpio_chip *gc,
209 return 0; 260 return 0;
210} 261}
211 262
263#ifdef CONFIG_GPIO_MAX732X_IRQ
264static int max732x_writew(struct max732x_chip *chip, uint16_t val)
265{
266 int ret;
267
268 val = cpu_to_le16(val);
269
270 ret = i2c_master_send(chip->client_group_a, (char *)&val, 2);
271 if (ret < 0) {
272 dev_err(&chip->client_group_a->dev, "failed writing\n");
273 return ret;
274 }
275
276 return 0;
277}
278
279static int max732x_readw(struct max732x_chip *chip, uint16_t *val)
280{
281 int ret;
282
283 ret = i2c_master_recv(chip->client_group_a, (char *)val, 2);
284 if (ret < 0) {
285 dev_err(&chip->client_group_a->dev, "failed reading\n");
286 return ret;
287 }
288
289 *val = le16_to_cpu(*val);
290 return 0;
291}
292
293static void max732x_irq_update_mask(struct max732x_chip *chip)
294{
295 uint16_t msg;
296
297 if (chip->irq_mask == chip->irq_mask_cur)
298 return;
299
300 chip->irq_mask = chip->irq_mask_cur;
301
302 if (chip->irq_features == INT_NO_MASK)
303 return;
304
305 mutex_lock(&chip->lock);
306
307 switch (chip->irq_features) {
308 case INT_INDEP_MASK:
309 msg = (chip->irq_mask << 8) | chip->reg_out[0];
310 max732x_writew(chip, msg);
311 break;
312
313 case INT_MERGED_MASK:
314 msg = chip->irq_mask | chip->reg_out[0];
315 max732x_writeb(chip, 1, (uint8_t)msg);
316 break;
317 }
318
319 mutex_unlock(&chip->lock);
320}
321
322static int max732x_gpio_to_irq(struct gpio_chip *gc, unsigned off)
323{
324 struct max732x_chip *chip;
325
326 chip = container_of(gc, struct max732x_chip, gpio_chip);
327 return chip->irq_base + off;
328}
329
330static void max732x_irq_mask(unsigned int irq)
331{
332 struct max732x_chip *chip = get_irq_chip_data(irq);
333
334 chip->irq_mask_cur &= ~(1 << (irq - chip->irq_base));
335}
336
337static void max732x_irq_unmask(unsigned int irq)
338{
339 struct max732x_chip *chip = get_irq_chip_data(irq);
340
341 chip->irq_mask_cur |= 1 << (irq - chip->irq_base);
342}
343
344static void max732x_irq_bus_lock(unsigned int irq)
345{
346 struct max732x_chip *chip = get_irq_chip_data(irq);
347
348 mutex_lock(&chip->irq_lock);
349 chip->irq_mask_cur = chip->irq_mask;
350}
351
352static void max732x_irq_bus_sync_unlock(unsigned int irq)
353{
354 struct max732x_chip *chip = get_irq_chip_data(irq);
355
356 max732x_irq_update_mask(chip);
357 mutex_unlock(&chip->irq_lock);
358}
359
360static int max732x_irq_set_type(unsigned int irq, unsigned int type)
361{
362 struct max732x_chip *chip = get_irq_chip_data(irq);
363 uint16_t off = irq - chip->irq_base;
364 uint16_t mask = 1 << off;
365
366 if (!(mask & chip->dir_input)) {
367 dev_dbg(&chip->client->dev, "%s port %d is output only\n",
368 chip->client->name, off);
369 return -EACCES;
370 }
371
372 if (!(type & IRQ_TYPE_EDGE_BOTH)) {
373 dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
374 irq, type);
375 return -EINVAL;
376 }
377
378 if (type & IRQ_TYPE_EDGE_FALLING)
379 chip->irq_trig_fall |= mask;
380 else
381 chip->irq_trig_fall &= ~mask;
382
383 if (type & IRQ_TYPE_EDGE_RISING)
384 chip->irq_trig_raise |= mask;
385 else
386 chip->irq_trig_raise &= ~mask;
387
388 return max732x_gpio_direction_input(&chip->gpio_chip, off);
389}
390
391static struct irq_chip max732x_irq_chip = {
392 .name = "max732x",
393 .mask = max732x_irq_mask,
394 .unmask = max732x_irq_unmask,
395 .bus_lock = max732x_irq_bus_lock,
396 .bus_sync_unlock = max732x_irq_bus_sync_unlock,
397 .set_type = max732x_irq_set_type,
398};
399
400static uint8_t max732x_irq_pending(struct max732x_chip *chip)
401{
402 uint8_t cur_stat;
403 uint8_t old_stat;
404 uint8_t trigger;
405 uint8_t pending;
406 uint16_t status;
407 int ret;
408
409 ret = max732x_readw(chip, &status);
410 if (ret)
411 return 0;
412
413 trigger = status >> 8;
414 trigger &= chip->irq_mask;
415
416 if (!trigger)
417 return 0;
418
419 cur_stat = status & 0xFF;
420 cur_stat &= chip->irq_mask;
421
422 old_stat = cur_stat ^ trigger;
423
424 pending = (old_stat & chip->irq_trig_fall) |
425 (cur_stat & chip->irq_trig_raise);
426 pending &= trigger;
427
428 return pending;
429}
430
431static irqreturn_t max732x_irq_handler(int irq, void *devid)
432{
433 struct max732x_chip *chip = devid;
434 uint8_t pending;
435 uint8_t level;
436
437 pending = max732x_irq_pending(chip);
438
439 if (!pending)
440 return IRQ_HANDLED;
441
442 do {
443 level = __ffs(pending);
444 handle_nested_irq(level + chip->irq_base);
445
446 pending &= ~(1 << level);
447 } while (pending);
448
449 return IRQ_HANDLED;
450}
451
452static int max732x_irq_setup(struct max732x_chip *chip,
453 const struct i2c_device_id *id)
454{
455 struct i2c_client *client = chip->client;
456 struct max732x_platform_data *pdata = client->dev.platform_data;
457 int has_irq = max732x_features[id->driver_data] >> 32;
458 int ret;
459
460 if (pdata->irq_base && has_irq != INT_NONE) {
461 int lvl;
462
463 chip->irq_base = pdata->irq_base;
464 chip->irq_features = has_irq;
465 mutex_init(&chip->irq_lock);
466
467 for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) {
468 int irq = lvl + chip->irq_base;
469
470 if (!(chip->dir_input & (1 << lvl)))
471 continue;
472
473 set_irq_chip_data(irq, chip);
474 set_irq_chip_and_handler(irq, &max732x_irq_chip,
475 handle_edge_irq);
476 set_irq_nested_thread(irq, 1);
477#ifdef CONFIG_ARM
478 set_irq_flags(irq, IRQF_VALID);
479#else
480 set_irq_noprobe(irq);
481#endif
482 }
483
484 ret = request_threaded_irq(client->irq,
485 NULL,
486 max732x_irq_handler,
487 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
488 dev_name(&client->dev), chip);
489 if (ret) {
490 dev_err(&client->dev, "failed to request irq %d\n",
491 client->irq);
492 goto out_failed;
493 }
494
495 chip->gpio_chip.to_irq = max732x_gpio_to_irq;
496 }
497
498 return 0;
499
500out_failed:
501 chip->irq_base = 0;
502 return ret;
503}
504
505static void max732x_irq_teardown(struct max732x_chip *chip)
506{
507 if (chip->irq_base)
508 free_irq(chip->client->irq, chip);
509}
510#else /* CONFIG_GPIO_MAX732X_IRQ */
511static int max732x_irq_setup(struct max732x_chip *chip,
512 const struct i2c_device_id *id)
513{
514 struct i2c_client *client = chip->client;
515 struct max732x_platform_data *pdata = client->dev.platform_data;
516 int has_irq = max732x_features[id->driver_data] >> 32;
517
518 if (pdata->irq_base && has_irq != INT_NONE)
519 dev_warn(&client->dev, "interrupt support not compiled in\n");
520
521 return 0;
522}
523
524static void max732x_irq_teardown(struct max732x_chip *chip)
525{
526}
527#endif
528
212static int __devinit max732x_setup_gpio(struct max732x_chip *chip, 529static int __devinit max732x_setup_gpio(struct max732x_chip *chip,
213 const struct i2c_device_id *id, 530 const struct i2c_device_id *id,
214 unsigned gpio_start) 531 unsigned gpio_start)
215{ 532{
216 struct gpio_chip *gc = &chip->gpio_chip; 533 struct gpio_chip *gc = &chip->gpio_chip;
217 uint32_t id_data = id->driver_data; 534 uint32_t id_data = (uint32_t)max732x_features[id->driver_data];
218 int i, port = 0; 535 int i, port = 0;
219 536
220 for (i = 0; i < 16; i++, id_data >>= 2) { 537 for (i = 0; i < 16; i++, id_data >>= 2) {
@@ -285,14 +602,14 @@ static int __devinit max732x_probe(struct i2c_client *client,
285 switch (client->addr & 0x70) { 602 switch (client->addr & 0x70) {
286 case 0x60: 603 case 0x60:
287 chip->client_group_a = client; 604 chip->client_group_a = client;
288 if (nr_port > 7) { 605 if (nr_port > 8) {
289 c = i2c_new_dummy(client->adapter, addr_b); 606 c = i2c_new_dummy(client->adapter, addr_b);
290 chip->client_group_b = chip->client_dummy = c; 607 chip->client_group_b = chip->client_dummy = c;
291 } 608 }
292 break; 609 break;
293 case 0x50: 610 case 0x50:
294 chip->client_group_b = client; 611 chip->client_group_b = client;
295 if (nr_port > 7) { 612 if (nr_port > 8) {
296 c = i2c_new_dummy(client->adapter, addr_a); 613 c = i2c_new_dummy(client->adapter, addr_a);
297 chip->client_group_a = chip->client_dummy = c; 614 chip->client_group_a = chip->client_dummy = c;
298 } 615 }
@@ -306,9 +623,13 @@ static int __devinit max732x_probe(struct i2c_client *client,
306 623
307 mutex_init(&chip->lock); 624 mutex_init(&chip->lock);
308 625
309 max732x_read(chip, is_group_a(chip, 0), &chip->reg_out[0]); 626 max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]);
310 if (nr_port > 7) 627 if (nr_port > 8)
311 max732x_read(chip, is_group_a(chip, 8), &chip->reg_out[1]); 628 max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]);
629
630 ret = max732x_irq_setup(chip, id);
631 if (ret)
632 goto out_failed;
312 633
313 ret = gpiochip_add(&chip->gpio_chip); 634 ret = gpiochip_add(&chip->gpio_chip);
314 if (ret) 635 if (ret)
@@ -325,6 +646,7 @@ static int __devinit max732x_probe(struct i2c_client *client,
325 return 0; 646 return 0;
326 647
327out_failed: 648out_failed:
649 max732x_irq_teardown(chip);
328 kfree(chip); 650 kfree(chip);
329 return ret; 651 return ret;
330} 652}
@@ -352,6 +674,8 @@ static int __devexit max732x_remove(struct i2c_client *client)
352 return ret; 674 return ret;
353 } 675 }
354 676
677 max732x_irq_teardown(chip);
678
355 /* unregister any dummy i2c_client */ 679 /* unregister any dummy i2c_client */
356 if (chip->client_dummy) 680 if (chip->client_dummy)
357 i2c_unregister_device(chip->client_dummy); 681 i2c_unregister_device(chip->client_dummy);
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index b827c976dc62..a2b12aa1f2b9 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -73,7 +73,7 @@ struct pca953x_chip {
73 struct i2c_client *client; 73 struct i2c_client *client;
74 struct pca953x_platform_data *dyn_pdata; 74 struct pca953x_platform_data *dyn_pdata;
75 struct gpio_chip gpio_chip; 75 struct gpio_chip gpio_chip;
76 char **names; 76 const char *const *names;
77}; 77};
78 78
79static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val) 79static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
@@ -449,7 +449,7 @@ pca953x_get_alt_pdata(struct i2c_client *client)
449 struct device_node *node; 449 struct device_node *node;
450 const uint16_t *val; 450 const uint16_t *val;
451 451
452 node = dev_archdata_get_node(&client->dev.archdata); 452 node = client->dev.of_node;
453 if (node == NULL) 453 if (node == NULL)
454 return NULL; 454 return NULL;
455 455
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index 105701a1f05b..ee568c8fcbd0 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -164,7 +164,7 @@ static int pl061_irq_type(unsigned irq, unsigned trigger)
164 unsigned long flags; 164 unsigned long flags;
165 u8 gpiois, gpioibe, gpioiev; 165 u8 gpiois, gpioibe, gpioiev;
166 166
167 if (offset < 0 || offset > PL061_GPIO_NR) 167 if (offset < 0 || offset >= PL061_GPIO_NR)
168 return -EINVAL; 168 return -EINVAL;
169 169
170 spin_lock_irqsave(&chip->irq_lock, flags); 170 spin_lock_irqsave(&chip->irq_lock, flags);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f569ae88ab38..c1981861bbbd 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -147,7 +147,10 @@ drm_edid_block_valid(u8 *raw_edid)
147 csum += raw_edid[i]; 147 csum += raw_edid[i];
148 if (csum) { 148 if (csum) {
149 DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); 149 DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
150 goto bad; 150
151 /* allow CEA to slide through, switches mangle this */
152 if (raw_edid[0] != 0x02)
153 goto bad;
151 } 154 }
152 155
153 /* per-block-type checks */ 156 /* per-block-type checks */
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 7e663a79829f..266b0ff441af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -241,7 +241,8 @@ nouveau_connector_detect(struct drm_connector *connector)
241 if (nv_encoder && nv_connector->native_mode) { 241 if (nv_encoder && nv_connector->native_mode) {
242 unsigned status = connector_status_connected; 242 unsigned status = connector_status_connected;
243 243
244#ifdef CONFIG_ACPI 244#if defined(CONFIG_ACPI_BUTTON) || \
245 (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE))
245 if (!nouveau_ignorelid && !acpi_lid_open()) 246 if (!nouveau_ignorelid && !acpi_lid_open())
246 status = connector_status_unknown; 247 status = connector_status_unknown;
247#endif 248#endif
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 0616c96e4b67..704a25d04ac9 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -253,7 +253,11 @@ nv40_graph_init(struct drm_device *dev)
253 253
254 if (!dev_priv->engine.graph.ctxprog) { 254 if (!dev_priv->engine.graph.ctxprog) {
255 struct nouveau_grctx ctx = {}; 255 struct nouveau_grctx ctx = {};
256 uint32_t cp[256]; 256 uint32_t *cp;
257
258 cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL);
259 if (!cp)
260 return -ENOMEM;
257 261
258 ctx.dev = dev; 262 ctx.dev = dev;
259 ctx.mode = NOUVEAU_GRCTX_PROG; 263 ctx.mode = NOUVEAU_GRCTX_PROG;
@@ -265,6 +269,8 @@ nv40_graph_init(struct drm_device *dev)
265 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); 269 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
266 for (i = 0; i < ctx.ctxprog_len; i++) 270 for (i = 0; i < ctx.ctxprog_len; i++)
267 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); 271 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
272
273 kfree(cp);
268 } 274 }
269 275
270 /* No context present currently */ 276 /* No context present currently */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 03dd6c41dc19..f3f2827017ef 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -707,6 +707,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
707 break; 707 break;
708 case ATOM_DCPLL: 708 case ATOM_DCPLL:
709 case ATOM_PPLL_INVALID: 709 case ATOM_PPLL_INVALID:
710 default:
710 pll = &rdev->clock.dcpll; 711 pll = &rdev->clock.dcpll;
711 break; 712 break;
712 } 713 }
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 66a37fb75839..669feb689bfc 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -576,6 +576,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
576 */ 576 */
577int radeon_agp_init(struct radeon_device *rdev); 577int radeon_agp_init(struct radeon_device *rdev);
578void radeon_agp_resume(struct radeon_device *rdev); 578void radeon_agp_resume(struct radeon_device *rdev);
579void radeon_agp_suspend(struct radeon_device *rdev);
579void radeon_agp_fini(struct radeon_device *rdev); 580void radeon_agp_fini(struct radeon_device *rdev);
580 581
581 582
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 28e473f1f56f..f40dfb77f9b1 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -270,3 +270,8 @@ void radeon_agp_fini(struct radeon_device *rdev)
270 } 270 }
271#endif 271#endif
272} 272}
273
274void radeon_agp_suspend(struct radeon_device *rdev)
275{
276 radeon_agp_fini(rdev);
277}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 6e733fdc3349..24ea683f7cf5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -680,10 +680,18 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
680 uint8_t dac; 680 uint8_t dac;
681 union atom_supported_devices *supported_devices; 681 union atom_supported_devices *supported_devices;
682 int i, j, max_device; 682 int i, j, max_device;
683 struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; 683 struct bios_connector *bios_connectors;
684 size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
684 685
685 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) 686 bios_connectors = kzalloc(bc_size, GFP_KERNEL);
687 if (!bios_connectors)
688 return false;
689
690 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev,
691 &data_offset)) {
692 kfree(bios_connectors);
686 return false; 693 return false;
694 }
687 695
688 supported_devices = 696 supported_devices =
689 (union atom_supported_devices *)(ctx->bios + data_offset); 697 (union atom_supported_devices *)(ctx->bios + data_offset);
@@ -851,6 +859,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
851 859
852 radeon_link_encoder_connector(dev); 860 radeon_link_encoder_connector(dev);
853 861
862 kfree(bios_connectors);
854 return true; 863 return true;
855} 864}
856 865
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index a20b612ffe75..fdc3fdf78acb 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -754,6 +754,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
754 /* evict remaining vram memory */ 754 /* evict remaining vram memory */
755 radeon_bo_evict_vram(rdev); 755 radeon_bo_evict_vram(rdev);
756 756
757 radeon_agp_suspend(rdev);
758
757 pci_save_state(dev->pdev); 759 pci_save_state(dev->pdev);
758 if (state.event == PM_EVENT_SUSPEND) { 760 if (state.event == PM_EVENT_SUSPEND) {
759 /* Shut down the device */ 761 /* Shut down the device */
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 76ba59b9fea1..132278fa6240 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -347,6 +347,14 @@ config HID_QUANTA
347 ---help--- 347 ---help---
348 Support for Quanta Optical Touch dual-touch panels. 348 Support for Quanta Optical Touch dual-touch panels.
349 349
350config HID_ROCCAT
351 tristate "Roccat special event support"
352 depends on USB_HID
353 ---help---
354 Support for Roccat special events.
355 Say Y here if you have a Roccat mouse or keyboard and want OSD or
356 macro execution support.
357
350config HID_ROCCAT_KONE 358config HID_ROCCAT_KONE
351 tristate "Roccat Kone Mouse support" 359 tristate "Roccat Kone Mouse support"
352 depends on USB_HID 360 depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 22e47eaeea32..987fa0627367 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_HID_QUANTA) += hid-quanta.o
48obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o 48obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
49obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o 49obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
50obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o 50obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
51obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o
51obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o 52obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o
52obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o 53obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
53obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o 54obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e10e314d38cc..aa0f7dcabcd7 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1301,6 +1301,7 @@ static const struct hid_device_id hid_blacklist[] = {
1301 { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) }, 1301 { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
1302 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, 1302 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
1303 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, 1303 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
1304 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
1304 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, 1305 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
1305 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) }, 1306 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
1306 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, 1307 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 56f314fbd4f9..c94026768570 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -811,7 +811,7 @@ static const char *relatives[REL_MAX + 1] = {
811 [REL_WHEEL] = "Wheel", [REL_MISC] = "Misc", 811 [REL_WHEEL] = "Wheel", [REL_MISC] = "Misc",
812}; 812};
813 813
814static const char *absolutes[ABS_MAX + 1] = { 814static const char *absolutes[ABS_CNT] = {
815 [ABS_X] = "X", [ABS_Y] = "Y", 815 [ABS_X] = "X", [ABS_Y] = "Y",
816 [ABS_Z] = "Z", [ABS_RX] = "Rx", 816 [ABS_Z] = "Z", [ABS_RX] = "Rx",
817 [ABS_RY] = "Ry", [ABS_RZ] = "Rz", 817 [ABS_RY] = "Ry", [ABS_RZ] = "Rz",
diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
index 62416e6baeca..3975e039c3dd 100644
--- a/drivers/hid/hid-gyration.c
+++ b/drivers/hid/hid-gyration.c
@@ -73,6 +73,7 @@ static int gyration_event(struct hid_device *hdev, struct hid_field *field,
73static const struct hid_device_id gyration_devices[] = { 73static const struct hid_device_id gyration_devices[] = {
74 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, 74 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
75 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, 75 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
76 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
76 { } 77 { }
77}; 78};
78MODULE_DEVICE_TABLE(hid, gyration_devices); 79MODULE_DEVICE_TABLE(hid, gyration_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 9776896cc4fc..6af77ed0b555 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -282,6 +282,7 @@
282#define USB_VENDOR_ID_GYRATION 0x0c16 282#define USB_VENDOR_ID_GYRATION 0x0c16
283#define USB_DEVICE_ID_GYRATION_REMOTE 0x0002 283#define USB_DEVICE_ID_GYRATION_REMOTE 0x0002
284#define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003 284#define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003
285#define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008
285 286
286#define USB_VENDOR_ID_HAPP 0x078b 287#define USB_VENDOR_ID_HAPP 0x078b
287#define USB_DEVICE_ID_UGCI_DRIVING 0x0010 288#define USB_DEVICE_ID_UGCI_DRIVING 0x0010
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 66e694054ba2..17f2dc04f883 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -37,6 +37,7 @@
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include "hid-ids.h" 39#include "hid-ids.h"
40#include "hid-roccat.h"
40#include "hid-roccat-kone.h" 41#include "hid-roccat-kone.h"
41 42
42static void kone_set_settings_checksum(struct kone_settings *settings) 43static void kone_set_settings_checksum(struct kone_settings *settings)
@@ -263,7 +264,7 @@ static int kone_get_firmware_version(struct usb_device *usb_dev, int *result)
263 return 0; 264 return 0;
264} 265}
265 266
266static ssize_t kone_sysfs_read_settings(struct kobject *kobj, 267static ssize_t kone_sysfs_read_settings(struct file *fp, struct kobject *kobj,
267 struct bin_attribute *attr, char *buf, 268 struct bin_attribute *attr, char *buf,
268 loff_t off, size_t count) { 269 loff_t off, size_t count) {
269 struct device *dev = container_of(kobj, struct device, kobj); 270 struct device *dev = container_of(kobj, struct device, kobj);
@@ -287,7 +288,7 @@ static ssize_t kone_sysfs_read_settings(struct kobject *kobj,
287 * This function keeps values in kone_device up to date and assumes that in 288 * This function keeps values in kone_device up to date and assumes that in
288 * case of error the old data is still valid 289 * case of error the old data is still valid
289 */ 290 */
290static ssize_t kone_sysfs_write_settings(struct kobject *kobj, 291static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
291 struct bin_attribute *attr, char *buf, 292 struct bin_attribute *attr, char *buf,
292 loff_t off, size_t count) { 293 loff_t off, size_t count) {
293 struct device *dev = container_of(kobj, struct device, kobj); 294 struct device *dev = container_of(kobj, struct device, kobj);
@@ -342,31 +343,31 @@ static ssize_t kone_sysfs_read_profilex(struct kobject *kobj,
342 return count; 343 return count;
343} 344}
344 345
345static ssize_t kone_sysfs_read_profile1(struct kobject *kobj, 346static ssize_t kone_sysfs_read_profile1(struct file *fp, struct kobject *kobj,
346 struct bin_attribute *attr, char *buf, 347 struct bin_attribute *attr, char *buf,
347 loff_t off, size_t count) { 348 loff_t off, size_t count) {
348 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 1); 349 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 1);
349} 350}
350 351
351static ssize_t kone_sysfs_read_profile2(struct kobject *kobj, 352static ssize_t kone_sysfs_read_profile2(struct file *fp, struct kobject *kobj,
352 struct bin_attribute *attr, char *buf, 353 struct bin_attribute *attr, char *buf,
353 loff_t off, size_t count) { 354 loff_t off, size_t count) {
354 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 2); 355 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 2);
355} 356}
356 357
357static ssize_t kone_sysfs_read_profile3(struct kobject *kobj, 358static ssize_t kone_sysfs_read_profile3(struct file *fp, struct kobject *kobj,
358 struct bin_attribute *attr, char *buf, 359 struct bin_attribute *attr, char *buf,
359 loff_t off, size_t count) { 360 loff_t off, size_t count) {
360 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 3); 361 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 3);
361} 362}
362 363
363static ssize_t kone_sysfs_read_profile4(struct kobject *kobj, 364static ssize_t kone_sysfs_read_profile4(struct file *fp, struct kobject *kobj,
364 struct bin_attribute *attr, char *buf, 365 struct bin_attribute *attr, char *buf,
365 loff_t off, size_t count) { 366 loff_t off, size_t count) {
366 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 4); 367 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 4);
367} 368}
368 369
369static ssize_t kone_sysfs_read_profile5(struct kobject *kobj, 370static ssize_t kone_sysfs_read_profile5(struct file *fp, struct kobject *kobj,
370 struct bin_attribute *attr, char *buf, 371 struct bin_attribute *attr, char *buf,
371 loff_t off, size_t count) { 372 loff_t off, size_t count) {
372 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 5); 373 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 5);
@@ -404,31 +405,31 @@ static ssize_t kone_sysfs_write_profilex(struct kobject *kobj,
404 return sizeof(struct kone_profile); 405 return sizeof(struct kone_profile);
405} 406}
406 407
407static ssize_t kone_sysfs_write_profile1(struct kobject *kobj, 408static ssize_t kone_sysfs_write_profile1(struct file *fp, struct kobject *kobj,
408 struct bin_attribute *attr, char *buf, 409 struct bin_attribute *attr, char *buf,
409 loff_t off, size_t count) { 410 loff_t off, size_t count) {
410 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 1); 411 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 1);
411} 412}
412 413
413static ssize_t kone_sysfs_write_profile2(struct kobject *kobj, 414static ssize_t kone_sysfs_write_profile2(struct file *fp, struct kobject *kobj,
414 struct bin_attribute *attr, char *buf, 415 struct bin_attribute *attr, char *buf,
415 loff_t off, size_t count) { 416 loff_t off, size_t count) {
416 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 2); 417 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 2);
417} 418}
418 419
419static ssize_t kone_sysfs_write_profile3(struct kobject *kobj, 420static ssize_t kone_sysfs_write_profile3(struct file *fp, struct kobject *kobj,
420 struct bin_attribute *attr, char *buf, 421 struct bin_attribute *attr, char *buf,
421 loff_t off, size_t count) { 422 loff_t off, size_t count) {
422 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 3); 423 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 3);
423} 424}
424 425
425static ssize_t kone_sysfs_write_profile4(struct kobject *kobj, 426static ssize_t kone_sysfs_write_profile4(struct file *fp, struct kobject *kobj,
426 struct bin_attribute *attr, char *buf, 427 struct bin_attribute *attr, char *buf,
427 loff_t off, size_t count) { 428 loff_t off, size_t count) {
428 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 4); 429 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 4);
429} 430}
430 431
431static ssize_t kone_sysfs_write_profile5(struct kobject *kobj, 432static ssize_t kone_sysfs_write_profile5(struct file *fp, struct kobject *kobj,
432 struct bin_attribute *attr, char *buf, 433 struct bin_attribute *attr, char *buf,
433 loff_t off, size_t count) { 434 loff_t off, size_t count) {
434 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 5); 435 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 5);
@@ -849,6 +850,16 @@ static int kone_init_specials(struct hid_device *hdev)
849 "couldn't init struct kone_device\n"); 850 "couldn't init struct kone_device\n");
850 goto exit_free; 851 goto exit_free;
851 } 852 }
853
854 retval = roccat_connect(hdev);
855 if (retval < 0) {
856 dev_err(&hdev->dev, "couldn't init char dev\n");
857 /* be tolerant about not getting chrdev */
858 } else {
859 kone->roccat_claimed = 1;
860 kone->chrdev_minor = retval;
861 }
862
852 retval = kone_create_sysfs_attributes(intf); 863 retval = kone_create_sysfs_attributes(intf);
853 if (retval) { 864 if (retval) {
854 dev_err(&hdev->dev, "cannot create sysfs files\n"); 865 dev_err(&hdev->dev, "cannot create sysfs files\n");
@@ -868,10 +879,14 @@ exit_free:
868static void kone_remove_specials(struct hid_device *hdev) 879static void kone_remove_specials(struct hid_device *hdev)
869{ 880{
870 struct usb_interface *intf = to_usb_interface(hdev->dev.parent); 881 struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
882 struct kone_device *kone;
871 883
872 if (intf->cur_altsetting->desc.bInterfaceProtocol 884 if (intf->cur_altsetting->desc.bInterfaceProtocol
873 == USB_INTERFACE_PROTOCOL_MOUSE) { 885 == USB_INTERFACE_PROTOCOL_MOUSE) {
874 kone_remove_sysfs_attributes(intf); 886 kone_remove_sysfs_attributes(intf);
887 kone = hid_get_drvdata(hdev);
888 if (kone->roccat_claimed)
889 roccat_disconnect(kone->chrdev_minor);
875 kfree(hid_get_drvdata(hdev)); 890 kfree(hid_get_drvdata(hdev));
876 } 891 }
877} 892}
@@ -930,6 +945,37 @@ static void kone_keep_values_up_to_date(struct kone_device *kone,
930 } 945 }
931} 946}
932 947
948static void kone_report_to_chrdev(struct kone_device const *kone,
949 struct kone_mouse_event const *event)
950{
951 struct kone_roccat_report roccat_report;
952
953 switch (event->event) {
954 case kone_mouse_event_switch_profile:
955 case kone_mouse_event_switch_dpi:
956 case kone_mouse_event_osd_profile:
957 case kone_mouse_event_osd_dpi:
958 roccat_report.event = event->event;
959 roccat_report.value = event->value;
960 roccat_report.key = 0;
961 roccat_report_event(kone->chrdev_minor,
962 (uint8_t *)&roccat_report,
963 sizeof(struct kone_roccat_report));
964 break;
965 case kone_mouse_event_call_overlong_macro:
966 if (event->value == kone_keystroke_action_press) {
967 roccat_report.event = kone_mouse_event_call_overlong_macro;
968 roccat_report.value = kone->actual_profile;
969 roccat_report.key = event->macro_key;
970 roccat_report_event(kone->chrdev_minor,
971 (uint8_t *)&roccat_report,
972 sizeof(struct kone_roccat_report));
973 }
974 break;
975 }
976
977}
978
933/* 979/*
934 * Is called for keyboard- and mousepart. 980 * Is called for keyboard- and mousepart.
935 * Only mousepart gets informations about special events in its extended event 981 * Only mousepart gets informations about special events in its extended event
@@ -958,6 +1004,9 @@ static int kone_raw_event(struct hid_device *hdev, struct hid_report *report,
958 1004
959 kone_keep_values_up_to_date(kone, event); 1005 kone_keep_values_up_to_date(kone, event);
960 1006
1007 if (kone->roccat_claimed)
1008 kone_report_to_chrdev(kone, event);
1009
961 return 0; /* always do further processing */ 1010 return 0; /* always do further processing */
962} 1011}
963 1012
diff --git a/drivers/hid/hid-roccat-kone.h b/drivers/hid/hid-roccat-kone.h
index b413b10a7f8a..003e6f81c195 100644
--- a/drivers/hid/hid-roccat-kone.h
+++ b/drivers/hid/hid-roccat-kone.h
@@ -189,6 +189,12 @@ enum kone_commands {
189 kone_command_firmware = 0xe5a 189 kone_command_firmware = 0xe5a
190}; 190};
191 191
192struct kone_roccat_report {
193 uint8_t event;
194 uint8_t value; /* holds dpi or profile value */
195 uint8_t key; /* macro key on overlong macro execution */
196};
197
192#pragma pack(pop) 198#pragma pack(pop)
193 199
194struct kone_device { 200struct kone_device {
@@ -219,6 +225,9 @@ struct kone_device {
219 * so it's read only once 225 * so it's read only once
220 */ 226 */
221 int firmware_version; 227 int firmware_version;
228
229 int roccat_claimed;
230 int chrdev_minor;
222}; 231};
223 232
224#endif 233#endif
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
new file mode 100644
index 000000000000..e05d48edb66f
--- /dev/null
+++ b/drivers/hid/hid-roccat.c
@@ -0,0 +1,428 @@
1/*
2 * Roccat driver for Linux
3 *
4 * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
5 */
6
7/*
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 */
13
14/*
15 * Module roccat is a char device used to report special events of roccat
16 * hardware to userland. These events include requests for on-screen-display of
17 * profile or dpi settings or requests for execution of macro sequences that are
18 * not stored in device. The information in these events depends on hid device
19 * implementation and contains data that is not available in a single hid event
20 * or else hidraw could have been used.
21 * It is inspired by hidraw, but uses only one circular buffer for all readers.
22 */
23
24#include <linux/cdev.h>
25#include <linux/poll.h>
26#include <linux/sched.h>
27
28#include "hid-roccat.h"
29
30#define ROCCAT_FIRST_MINOR 0
31#define ROCCAT_MAX_DEVICES 8
32
33/* should be a power of 2 for performance reason */
34#define ROCCAT_CBUF_SIZE 16
35
36struct roccat_report {
37 uint8_t *value;
38 int len;
39};
40
41struct roccat_device {
42 unsigned int minor;
43 int open;
44 int exist;
45 wait_queue_head_t wait;
46 struct device *dev;
47 struct hid_device *hid;
48 struct list_head readers;
49 /* protects modifications of readers list */
50 struct mutex readers_lock;
51
52 /*
53 * circular_buffer has one writer and multiple readers with their own
54 * read pointers
55 */
56 struct roccat_report cbuf[ROCCAT_CBUF_SIZE];
57 int cbuf_end;
58 struct mutex cbuf_lock;
59};
60
61struct roccat_reader {
62 struct list_head node;
63 struct roccat_device *device;
64 int cbuf_start;
65};
66
67static int roccat_major;
68static struct class *roccat_class;
69static struct cdev roccat_cdev;
70
71static struct roccat_device *devices[ROCCAT_MAX_DEVICES];
72/* protects modifications of devices array */
73static DEFINE_MUTEX(devices_lock);
74
75static ssize_t roccat_read(struct file *file, char __user *buffer,
76 size_t count, loff_t *ppos)
77{
78 struct roccat_reader *reader = file->private_data;
79 struct roccat_device *device = reader->device;
80 struct roccat_report *report;
81 ssize_t retval = 0, len;
82 DECLARE_WAITQUEUE(wait, current);
83
84 mutex_lock(&device->cbuf_lock);
85
86 /* no data? */
87 if (reader->cbuf_start == device->cbuf_end) {
88 add_wait_queue(&device->wait, &wait);
89 set_current_state(TASK_INTERRUPTIBLE);
90
91 /* wait for data */
92 while (reader->cbuf_start == device->cbuf_end) {
93 if (file->f_flags & O_NONBLOCK) {
94 retval = -EAGAIN;
95 break;
96 }
97 if (signal_pending(current)) {
98 retval = -ERESTARTSYS;
99 break;
100 }
101 if (!device->exist) {
102 retval = -EIO;
103 break;
104 }
105
106 mutex_unlock(&device->cbuf_lock);
107 schedule();
108 mutex_lock(&device->cbuf_lock);
109 set_current_state(TASK_INTERRUPTIBLE);
110 }
111
112 set_current_state(TASK_RUNNING);
113 remove_wait_queue(&device->wait, &wait);
114 }
115
116 /* here we either have data or a reason to return if retval is set */
117 if (retval)
118 goto exit_unlock;
119
120 report = &device->cbuf[reader->cbuf_start];
121 /*
122 * If report is larger than requested amount of data, rest of report
123 * is lost!
124 */
125 len = report->len > count ? count : report->len;
126
127 if (copy_to_user(buffer, report->value, len)) {
128 retval = -EFAULT;
129 goto exit_unlock;
130 }
131 retval += len;
132 reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE;
133
134exit_unlock:
135 mutex_unlock(&device->cbuf_lock);
136 return retval;
137}
138
139static unsigned int roccat_poll(struct file *file, poll_table *wait)
140{
141 struct roccat_reader *reader = file->private_data;
142 poll_wait(file, &reader->device->wait, wait);
143 if (reader->cbuf_start != reader->device->cbuf_end)
144 return POLLIN | POLLRDNORM;
145 if (!reader->device->exist)
146 return POLLERR | POLLHUP;
147 return 0;
148}
149
150static int roccat_open(struct inode *inode, struct file *file)
151{
152 unsigned int minor = iminor(inode);
153 struct roccat_reader *reader;
154 struct roccat_device *device;
155 int error = 0;
156
157 reader = kzalloc(sizeof(struct roccat_reader), GFP_KERNEL);
158 if (!reader)
159 return -ENOMEM;
160
161 mutex_lock(&devices_lock);
162
163 device = devices[minor];
164
165 mutex_lock(&device->readers_lock);
166
167 if (!device) {
168 printk(KERN_EMERG "roccat device with minor %d doesn't exist\n",
169 minor);
170 error = -ENODEV;
171 goto exit_unlock;
172 }
173
174 if (!device->open++) {
175 /* power on device on adding first reader */
176 if (device->hid->ll_driver->power) {
177 error = device->hid->ll_driver->power(device->hid,
178 PM_HINT_FULLON);
179 if (error < 0) {
180 --device->open;
181 goto exit_unlock;
182 }
183 }
184 error = device->hid->ll_driver->open(device->hid);
185 if (error < 0) {
186 if (device->hid->ll_driver->power)
187 device->hid->ll_driver->power(device->hid,
188 PM_HINT_NORMAL);
189 --device->open;
190 goto exit_unlock;
191 }
192 }
193
194 reader->device = device;
195 /* new reader doesn't get old events */
196 reader->cbuf_start = device->cbuf_end;
197
198 list_add_tail(&reader->node, &device->readers);
199 file->private_data = reader;
200
201exit_unlock:
202 mutex_unlock(&device->readers_lock);
203 mutex_unlock(&devices_lock);
204 return error;
205}
206
207static int roccat_release(struct inode *inode, struct file *file)
208{
209 unsigned int minor = iminor(inode);
210 struct roccat_reader *reader = file->private_data;
211 struct roccat_device *device;
212
213 mutex_lock(&devices_lock);
214
215 device = devices[minor];
216 if (!device) {
217 mutex_unlock(&devices_lock);
218 printk(KERN_EMERG "roccat device with minor %d doesn't exist\n",
219 minor);
220 return -ENODEV;
221 }
222
223 mutex_lock(&device->readers_lock);
224 list_del(&reader->node);
225 mutex_unlock(&device->readers_lock);
226 kfree(reader);
227
228 if (!--device->open) {
229 /* removing last reader */
230 if (device->exist) {
231 if (device->hid->ll_driver->power)
232 device->hid->ll_driver->power(device->hid,
233 PM_HINT_NORMAL);
234 device->hid->ll_driver->close(device->hid);
235 } else {
236 kfree(device);
237 }
238 }
239
240 mutex_unlock(&devices_lock);
241
242 return 0;
243}
244
245/*
246 * roccat_report_event() - output data to readers
247 * @minor: minor device number returned by roccat_connect()
248 * @data: pointer to data
249 * @len: size of data
250 *
251 * Return value is zero on success, a negative error code on failure.
252 *
253 * This is called from interrupt handler.
254 */
255int roccat_report_event(int minor, u8 const *data, int len)
256{
257 struct roccat_device *device;
258 struct roccat_reader *reader;
259 struct roccat_report *report;
260 uint8_t *new_value;
261
262 new_value = kmemdup(data, len, GFP_ATOMIC);
263 if (!new_value)
264 return -ENOMEM;
265
266 device = devices[minor];
267
268 report = &device->cbuf[device->cbuf_end];
269
270 /* passing NULL is safe */
271 kfree(report->value);
272
273 report->value = new_value;
274 report->len = len;
275 device->cbuf_end = (device->cbuf_end + 1) % ROCCAT_CBUF_SIZE;
276
277 list_for_each_entry(reader, &device->readers, node) {
278 /*
279 * As we already inserted one element, the buffer can't be
280 * empty. If start and end are equal, buffer is full and we
281 * increase start, so that slow reader misses one event, but
282 * gets the newer ones in the right order.
283 */
284 if (reader->cbuf_start == device->cbuf_end)
285 reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE;
286 }
287
288 wake_up_interruptible(&device->wait);
289 return 0;
290}
291EXPORT_SYMBOL_GPL(roccat_report_event);
292
293/*
294 * roccat_connect() - create a char device for special event output
295 * @hid: the hid device the char device should be connected to.
296 *
297 * Return value is minor device number in Range [0, ROCCAT_MAX_DEVICES] on
298 * success, a negative error code on failure.
299 */
300int roccat_connect(struct hid_device *hid)
301{
302 unsigned int minor;
303 struct roccat_device *device;
304 int temp;
305
306 device = kzalloc(sizeof(struct roccat_device), GFP_KERNEL);
307 if (!device)
308 return -ENOMEM;
309
310 mutex_lock(&devices_lock);
311
312 for (minor = 0; minor < ROCCAT_MAX_DEVICES; ++minor) {
313 if (devices[minor])
314 continue;
315 break;
316 }
317
318 if (minor < ROCCAT_MAX_DEVICES) {
319 devices[minor] = device;
320 } else {
321 mutex_unlock(&devices_lock);
322 kfree(device);
323 return -EINVAL;
324 }
325
326 device->dev = device_create(roccat_class, &hid->dev,
327 MKDEV(roccat_major, minor), NULL,
328 "%s%s%d", "roccat", hid->driver->name, minor);
329
330 if (IS_ERR(device->dev)) {
331 devices[minor] = NULL;
332 mutex_unlock(&devices_lock);
333 temp = PTR_ERR(device->dev);
334 kfree(device);
335 return temp;
336 }
337
338 mutex_unlock(&devices_lock);
339
340 init_waitqueue_head(&device->wait);
341 INIT_LIST_HEAD(&device->readers);
342 mutex_init(&device->readers_lock);
343 mutex_init(&device->cbuf_lock);
344 device->minor = minor;
345 device->hid = hid;
346 device->exist = 1;
347 device->cbuf_end = 0;
348
349 return minor;
350}
351EXPORT_SYMBOL_GPL(roccat_connect);
352
353/* roccat_disconnect() - remove char device from hid device
354 * @minor: the minor device number returned by roccat_connect()
355 */
356void roccat_disconnect(int minor)
357{
358 struct roccat_device *device;
359
360 mutex_lock(&devices_lock);
361 device = devices[minor];
362 devices[minor] = NULL;
363 mutex_unlock(&devices_lock);
364
365 device->exist = 0; /* TODO exist maybe not needed */
366
367 device_destroy(roccat_class, MKDEV(roccat_major, minor));
368
369 if (device->open) {
370 device->hid->ll_driver->close(device->hid);
371 wake_up_interruptible(&device->wait);
372 } else {
373 kfree(device);
374 }
375}
376EXPORT_SYMBOL_GPL(roccat_disconnect);
377
378static const struct file_operations roccat_ops = {
379 .owner = THIS_MODULE,
380 .read = roccat_read,
381 .poll = roccat_poll,
382 .open = roccat_open,
383 .release = roccat_release,
384};
385
386static int __init roccat_init(void)
387{
388 int retval;
389 dev_t dev_id;
390
391 retval = alloc_chrdev_region(&dev_id, ROCCAT_FIRST_MINOR,
392 ROCCAT_MAX_DEVICES, "roccat");
393
394 roccat_major = MAJOR(dev_id);
395
396 if (retval < 0) {
397 printk(KERN_WARNING "roccat: can't get major number\n");
398 return retval;
399 }
400
401 roccat_class = class_create(THIS_MODULE, "roccat");
402 if (IS_ERR(roccat_class)) {
403 retval = PTR_ERR(roccat_class);
404 unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES);
405 return retval;
406 }
407
408 cdev_init(&roccat_cdev, &roccat_ops);
409 cdev_add(&roccat_cdev, dev_id, ROCCAT_MAX_DEVICES);
410
411 return 0;
412}
413
414static void __exit roccat_exit(void)
415{
416 dev_t dev_id = MKDEV(roccat_major, 0);
417
418 cdev_del(&roccat_cdev);
419 class_destroy(roccat_class);
420 unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES);
421}
422
423module_init(roccat_init);
424module_exit(roccat_exit);
425
426MODULE_AUTHOR("Stefan Achatz");
427MODULE_DESCRIPTION("USB Roccat char device");
428MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat.h b/drivers/hid/hid-roccat.h
new file mode 100644
index 000000000000..d8aae0c1fa7e
--- /dev/null
+++ b/drivers/hid/hid-roccat.h
@@ -0,0 +1,31 @@
1#ifndef __HID_ROCCAT_H
2#define __HID_ROCCAT_H
3
4/*
5 * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
6 */
7
8/*
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 */
14
15#include <linux/hid.h>
16#include <linux/types.h>
17
18#if defined(CONFIG_HID_ROCCAT) || defined (CONFIG_HID_ROCCAT_MODULE)
19int roccat_connect(struct hid_device *hid);
20void roccat_disconnect(int minor);
21int roccat_report_event(int minor, u8 const *data, int len);
22#else
23static inline int roccat_connect(struct hid_device *hid) { return -1; }
24static inline void roccat_disconnect(int minor) {}
25static inline int roccat_report_event(int minor, u8 const *data, int len)
26{
27 return 0;
28}
29#endif
30
31#endif
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 9be8e1754a0b..6a9ac754ca5d 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -802,6 +802,15 @@ config SENSORS_ADS7828
802 This driver can also be built as a module. If so, the module 802 This driver can also be built as a module. If so, the module
803 will be called ads7828. 803 will be called ads7828.
804 804
805config SENSORS_ADS7871
806 tristate "Texas Instruments ADS7871 A/D converter"
807 depends on SPI
808 help
809 If you say yes here you get support for TI ADS7871 & ADS7870
810
811 This driver can also be built as a module. If so, the module
812 will be called ads7871.
813
805config SENSORS_AMC6821 814config SENSORS_AMC6821
806 tristate "Texas Instruments AMC6821" 815 tristate "Texas Instruments AMC6821"
807 depends on I2C && EXPERIMENTAL 816 depends on I2C && EXPERIMENTAL
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 4aa1a3d112ad..86920fb34118 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o
29obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o 29obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o
30obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o 30obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o
31obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o 31obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o
32obj-$(CONFIG_SENSORS_ADS7871) += ads7871.o
32obj-$(CONFIG_SENSORS_ADT7411) += adt7411.o 33obj-$(CONFIG_SENSORS_ADT7411) += adt7411.o
33obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o 34obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o
34obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o 35obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
new file mode 100644
index 000000000000..b300a2048af1
--- /dev/null
+++ b/drivers/hwmon/ads7871.c
@@ -0,0 +1,253 @@
1/*
2 * ads7871 - driver for TI ADS7871 A/D converter
3 *
4 * Copyright (c) 2010 Paul Thomas <pthomas8589@gmail.com>
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 or
13 * later as publishhed by the Free Software Foundation.
14 *
15 * You need to have something like this in struct spi_board_info
16 * {
17 * .modalias = "ads7871",
18 * .max_speed_hz = 2*1000*1000,
19 * .chip_select = 0,
20 * .bus_num = 1,
21 * },
22 */
23
24/*From figure 18 in the datasheet*/
25/*Register addresses*/
26#define REG_LS_BYTE 0 /*A/D Output Data, LS Byte*/
27#define REG_MS_BYTE 1 /*A/D Output Data, MS Byte*/
28#define REG_PGA_VALID 2 /*PGA Valid Register*/
29#define REG_AD_CONTROL 3 /*A/D Control Register*/
30#define REG_GAIN_MUX 4 /*Gain/Mux Register*/
31#define REG_IO_STATE 5 /*Digital I/O State Register*/
32#define REG_IO_CONTROL 6 /*Digital I/O Control Register*/
33#define REG_OSC_CONTROL 7 /*Rev/Oscillator Control Register*/
34#define REG_SER_CONTROL 24 /*Serial Interface Control Register*/
35#define REG_ID 31 /*ID Register*/
36
37/*From figure 17 in the datasheet
38* These bits get ORed with the address to form
39* the instruction byte */
40/*Instruction Bit masks*/
41#define INST_MODE_bm (1<<7)
42#define INST_READ_bm (1<<6)
43#define INST_16BIT_bm (1<<5)
44
45/*From figure 18 in the datasheet*/
46/*bit masks for Rev/Oscillator Control Register*/
47#define MUX_CNV_bv 7
48#define MUX_CNV_bm (1<<MUX_CNV_bv)
49#define MUX_M3_bm (1<<3) /*M3 selects single ended*/
50#define MUX_G_bv 4 /*allows for reg = (gain << MUX_G_bv) | ...*/
51
52/*From figure 18 in the datasheet*/
53/*bit masks for Rev/Oscillator Control Register*/
54#define OSC_OSCR_bm (1<<5)
55#define OSC_OSCE_bm (1<<4)
56#define OSC_REFE_bm (1<<3)
57#define OSC_BUFE_bm (1<<2)
58#define OSC_R2V_bm (1<<1)
59#define OSC_RBG_bm (1<<0)
60
61#include <linux/module.h>
62#include <linux/init.h>
63#include <linux/spi/spi.h>
64#include <linux/hwmon.h>
65#include <linux/hwmon-sysfs.h>
66#include <linux/err.h>
67#include <linux/mutex.h>
68#include <linux/delay.h>
69
70#define DEVICE_NAME "ads7871"
71
72struct ads7871_data {
73 struct device *hwmon_dev;
74 struct mutex update_lock;
75};
76
77static int ads7871_read_reg8(struct spi_device *spi, int reg)
78{
79 int ret;
80 reg = reg | INST_READ_bm;
81 ret = spi_w8r8(spi, reg);
82 return ret;
83}
84
85static int ads7871_read_reg16(struct spi_device *spi, int reg)
86{
87 int ret;
88 reg = reg | INST_READ_bm | INST_16BIT_bm;
89 ret = spi_w8r16(spi, reg);
90 return ret;
91}
92
93static int ads7871_write_reg8(struct spi_device *spi, int reg, u8 val)
94{
95 u8 tmp[2] = {reg, val};
96 return spi_write(spi, tmp, sizeof(tmp));
97}
98
99static ssize_t show_voltage(struct device *dev,
100 struct device_attribute *da, char *buf)
101{
102 struct spi_device *spi = to_spi_device(dev);
103 struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
104 int ret, val, i = 0;
105 uint8_t channel, mux_cnv;
106
107 channel = attr->index;
108 /*TODO: add support for conversions
109 *other than single ended with a gain of 1*/
110 /*MUX_M3_bm forces single ended*/
111 /*This is also where the gain of the PGA would be set*/
112 ads7871_write_reg8(spi, REG_GAIN_MUX,
113 (MUX_CNV_bm | MUX_M3_bm | channel));
114
115 ret = ads7871_read_reg8(spi, REG_GAIN_MUX);
116 mux_cnv = ((ret & MUX_CNV_bm)>>MUX_CNV_bv);
117 /*on 400MHz arm9 platform the conversion
118 *is already done when we do this test*/
119 while ((i < 2) && mux_cnv) {
120 i++;
121 ret = ads7871_read_reg8(spi, REG_GAIN_MUX);
122 mux_cnv = ((ret & MUX_CNV_bm)>>MUX_CNV_bv);
123 msleep_interruptible(1);
124 }
125
126 if (mux_cnv == 0) {
127 val = ads7871_read_reg16(spi, REG_LS_BYTE);
128 /*result in volts*10000 = (val/8192)*2.5*10000*/
129 val = ((val>>2) * 25000) / 8192;
130 return sprintf(buf, "%d\n", val);
131 } else {
132 return -1;
133 }
134}
135
136static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_voltage, NULL, 0);
137static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 1);
138static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 2);
139static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_voltage, NULL, 3);
140static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_voltage, NULL, 4);
141static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 5);
142static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 6);
143static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 7);
144
145static struct attribute *ads7871_attributes[] = {
146 &sensor_dev_attr_in0_input.dev_attr.attr,
147 &sensor_dev_attr_in1_input.dev_attr.attr,
148 &sensor_dev_attr_in2_input.dev_attr.attr,
149 &sensor_dev_attr_in3_input.dev_attr.attr,
150 &sensor_dev_attr_in4_input.dev_attr.attr,
151 &sensor_dev_attr_in5_input.dev_attr.attr,
152 &sensor_dev_attr_in6_input.dev_attr.attr,
153 &sensor_dev_attr_in7_input.dev_attr.attr,
154 NULL
155};
156
157static const struct attribute_group ads7871_group = {
158 .attrs = ads7871_attributes,
159};
160
161static int __devinit ads7871_probe(struct spi_device *spi)
162{
163 int status, ret, err = 0;
164 uint8_t val;
165 struct ads7871_data *pdata;
166
167 dev_dbg(&spi->dev, "probe\n");
168
169 pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
170 if (!pdata) {
171 err = -ENOMEM;
172 goto exit;
173 }
174
175 status = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
176 if (status < 0)
177 goto error_free;
178
179 pdata->hwmon_dev = hwmon_device_register(&spi->dev);
180 if (IS_ERR(pdata->hwmon_dev)) {
181 err = PTR_ERR(pdata->hwmon_dev);
182 goto error_remove;
183 }
184
185 spi_set_drvdata(spi, pdata);
186
187 /* Configure the SPI bus */
188 spi->mode = (SPI_MODE_0);
189 spi->bits_per_word = 8;
190 spi_setup(spi);
191
192 ads7871_write_reg8(spi, REG_SER_CONTROL, 0);
193 ads7871_write_reg8(spi, REG_AD_CONTROL, 0);
194
195 val = (OSC_OSCR_bm | OSC_OSCE_bm | OSC_REFE_bm | OSC_BUFE_bm);
196 ads7871_write_reg8(spi, REG_OSC_CONTROL, val);
197 ret = ads7871_read_reg8(spi, REG_OSC_CONTROL);
198
199 dev_dbg(&spi->dev, "REG_OSC_CONTROL write:%x, read:%x\n", val, ret);
200 /*because there is no other error checking on an SPI bus
201 we need to make sure we really have a chip*/
202 if (val != ret) {
203 err = -ENODEV;
204 goto error_remove;
205 }
206
207 return 0;
208
209error_remove:
210 sysfs_remove_group(&spi->dev.kobj, &ads7871_group);
211error_free:
212 kfree(pdata);
213exit:
214 return err;
215}
216
217static int __devexit ads7871_remove(struct spi_device *spi)
218{
219 struct ads7871_data *pdata = spi_get_drvdata(spi);
220
221 hwmon_device_unregister(pdata->hwmon_dev);
222 sysfs_remove_group(&spi->dev.kobj, &ads7871_group);
223 kfree(pdata);
224 return 0;
225}
226
227static struct spi_driver ads7871_driver = {
228 .driver = {
229 .name = DEVICE_NAME,
230 .bus = &spi_bus_type,
231 .owner = THIS_MODULE,
232 },
233
234 .probe = ads7871_probe,
235 .remove = __devexit_p(ads7871_remove),
236};
237
238static int __init ads7871_init(void)
239{
240 return spi_register_driver(&ads7871_driver);
241}
242
243static void __exit ads7871_exit(void)
244{
245 spi_unregister_driver(&ads7871_driver);
246}
247
248module_init(ads7871_init);
249module_exit(ads7871_exit);
250
251MODULE_AUTHOR("Paul Thomas <pthomas8589@gmail.com>");
252MODULE_DESCRIPTION("TI ADS7871 A/D driver");
253MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index e9b7fbc5a447..2988da150ed6 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -241,6 +241,55 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
241 return tjmax; 241 return tjmax;
242} 242}
243 243
244static int __devinit get_tjmax(struct cpuinfo_x86 *c, u32 id,
245 struct device *dev)
246{
247 /* The 100C is default for both mobile and non mobile CPUs */
248 int err;
249 u32 eax, edx;
250 u32 val;
251
252 /* A new feature of current Intel(R) processors, the
253 IA32_TEMPERATURE_TARGET contains the TjMax value */
254 err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
255 if (err) {
256 dev_warn(dev, "Unable to read TjMax from CPU.\n");
257 } else {
258 val = (eax >> 16) & 0xff;
259 /*
260 * If the TjMax is not plausible, an assumption
261 * will be used
262 */
263 if ((val > 80) && (val < 120)) {
264 dev_info(dev, "TjMax is %d C.\n", val);
265 return val * 1000;
266 }
267 }
268
269 /*
270 * An assumption is made for early CPUs and unreadable MSR.
271 * NOTE: the given value may not be correct.
272 */
273
274 switch (c->x86_model) {
275 case 0xe:
276 case 0xf:
277 case 0x16:
278 case 0x1a:
279 dev_warn(dev, "TjMax is assumed as 100 C!\n");
280 return 100000;
281 break;
282 case 0x17:
283 case 0x1c: /* Atom CPUs */
284 return adjust_tjmax(c, id, dev);
285 break;
286 default:
287 dev_warn(dev, "CPU (model=0x%x) is not supported yet,"
288 " using default TjMax of 100C.\n", c->x86_model);
289 return 100000;
290 }
291}
292
244static int __devinit coretemp_probe(struct platform_device *pdev) 293static int __devinit coretemp_probe(struct platform_device *pdev)
245{ 294{
246 struct coretemp_data *data; 295 struct coretemp_data *data;
@@ -283,14 +332,18 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
283 } 332 }
284 } 333 }
285 334
286 data->tjmax = adjust_tjmax(c, data->id, &pdev->dev); 335 data->tjmax = get_tjmax(c, data->id, &pdev->dev);
287 platform_set_drvdata(pdev, data); 336 platform_set_drvdata(pdev, data);
288 337
289 /* read the still undocumented IA32_TEMPERATURE_TARGET it exists 338 /*
290 on older CPUs but not in this register, Atoms don't have it either */ 339 * read the still undocumented IA32_TEMPERATURE_TARGET. It exists
340 * on older CPUs but not in this register,
341 * Atoms don't have it either.
342 */
291 343
292 if ((c->x86_model > 0xe) && (c->x86_model != 0x1c)) { 344 if ((c->x86_model > 0xe) && (c->x86_model != 0x1c)) {
293 err = rdmsr_safe_on_cpu(data->id, 0x1a2, &eax, &edx); 345 err = rdmsr_safe_on_cpu(data->id, MSR_IA32_TEMPERATURE_TARGET,
346 &eax, &edx);
294 if (err) { 347 if (err) {
295 dev_warn(&pdev->dev, "Unable to read" 348 dev_warn(&pdev->dev, "Unable to read"
296 " IA32_TEMPERATURE_TARGET MSR\n"); 349 " IA32_TEMPERATURE_TARGET MSR\n");
@@ -451,28 +504,20 @@ static int __init coretemp_init(void)
451 504
452 for_each_online_cpu(i) { 505 for_each_online_cpu(i) {
453 struct cpuinfo_x86 *c = &cpu_data(i); 506 struct cpuinfo_x86 *c = &cpu_data(i);
507 /*
508 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
509 * sensors. We check this bit only, all the early CPUs
510 * without thermal sensors will be filtered out.
511 */
512 if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01)) {
513 err = coretemp_device_add(i);
514 if (err)
515 goto exit_devices_unreg;
454 516
455 /* check if family 6, models 0xe (Pentium M DC), 517 } else {
456 0xf (Core 2 DC 65nm), 0x16 (Core 2 SC 65nm), 518 printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
457 0x17 (Penryn 45nm), 0x1a (Nehalem), 0x1c (Atom), 519 " has no thermal sensor.\n", c->x86_model);
458 0x1e (Lynnfield) */
459 if ((c->cpuid_level < 0) || (c->x86 != 0x6) ||
460 !((c->x86_model == 0xe) || (c->x86_model == 0xf) ||
461 (c->x86_model == 0x16) || (c->x86_model == 0x17) ||
462 (c->x86_model == 0x1a) || (c->x86_model == 0x1c) ||
463 (c->x86_model == 0x1e))) {
464
465 /* supported CPU not found, but report the unknown
466 family 6 CPU */
467 if ((c->x86 == 0x6) && (c->x86_model > 0xf))
468 printk(KERN_WARNING DRVNAME ": Unknown CPU "
469 "model 0x%x\n", c->x86_model);
470 continue;
471 } 520 }
472
473 err = coretemp_device_add(i);
474 if (err)
475 goto exit_devices_unreg;
476 } 521 }
477 if (list_empty(&pdev_list)) { 522 if (list_empty(&pdev_list)) {
478 err = -ENODEV; 523 err = -ENODEV;
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index 0627f7a5b9b8..b7ca2a9676cf 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -38,6 +38,7 @@
38#include <linux/i2c.h> 38#include <linux/i2c.h>
39#include <linux/hwmon.h> 39#include <linux/hwmon.h>
40#include <linux/hwmon-sysfs.h> 40#include <linux/hwmon-sysfs.h>
41#include <linux/smp_lock.h>
41#include <linux/err.h> 42#include <linux/err.h>
42#include <linux/mutex.h> 43#include <linux/mutex.h>
43#include <linux/sysfs.h> 44#include <linux/sysfs.h>
@@ -847,8 +848,7 @@ static ssize_t watchdog_write(struct file *filp, const char __user *buf,
847 return count; 848 return count;
848} 849}
849 850
850static int watchdog_ioctl(struct inode *inode, struct file *filp, 851static long watchdog_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
851 unsigned int cmd, unsigned long arg)
852{ 852{
853 static struct watchdog_info ident = { 853 static struct watchdog_info ident = {
854 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | 854 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
@@ -858,6 +858,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
858 int i, ret = 0; 858 int i, ret = 0;
859 struct fschmd_data *data = filp->private_data; 859 struct fschmd_data *data = filp->private_data;
860 860
861 lock_kernel();
861 switch (cmd) { 862 switch (cmd) {
862 case WDIOC_GETSUPPORT: 863 case WDIOC_GETSUPPORT:
863 ident.firmware_version = data->revision; 864 ident.firmware_version = data->revision;
@@ -914,7 +915,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
914 default: 915 default:
915 ret = -ENOTTY; 916 ret = -ENOTTY;
916 } 917 }
917 918 unlock_kernel();
918 return ret; 919 return ret;
919} 920}
920 921
@@ -924,7 +925,7 @@ static const struct file_operations watchdog_fops = {
924 .open = watchdog_open, 925 .open = watchdog_open,
925 .release = watchdog_release, 926 .release = watchdog_release,
926 .write = watchdog_write, 927 .write = watchdog_write,
927 .ioctl = watchdog_ioctl, 928 .unlocked_ioctl = watchdog_ioctl,
928}; 929};
929 930
930 931
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index b2f2277cad3c..6138f036b159 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -41,6 +41,8 @@
41 41
42/* joystick device poll interval in milliseconds */ 42/* joystick device poll interval in milliseconds */
43#define MDPS_POLL_INTERVAL 50 43#define MDPS_POLL_INTERVAL 50
44#define MDPS_POLL_MIN 0
45#define MDPS_POLL_MAX 2000
44/* 46/*
45 * The sensor can also generate interrupts (DRDY) but it's pretty pointless 47 * The sensor can also generate interrupts (DRDY) but it's pretty pointless
46 * because they are generated even if the data do not change. So it's better 48 * because they are generated even if the data do not change. So it's better
@@ -121,11 +123,9 @@ static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z)
121 int position[3]; 123 int position[3];
122 int i; 124 int i;
123 125
124 mutex_lock(&lis3->mutex);
125 position[0] = lis3->read_data(lis3, OUTX); 126 position[0] = lis3->read_data(lis3, OUTX);
126 position[1] = lis3->read_data(lis3, OUTY); 127 position[1] = lis3->read_data(lis3, OUTY);
127 position[2] = lis3->read_data(lis3, OUTZ); 128 position[2] = lis3->read_data(lis3, OUTZ);
128 mutex_unlock(&lis3->mutex);
129 129
130 for (i = 0; i < 3; i++) 130 for (i = 0; i < 3; i++)
131 position[i] = (position[i] * lis3->scale) / LIS3_ACCURACY; 131 position[i] = (position[i] * lis3->scale) / LIS3_ACCURACY;
@@ -249,8 +249,24 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)
249EXPORT_SYMBOL_GPL(lis3lv02d_poweron); 249EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
250 250
251 251
252static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
253{
254 int x, y, z;
255
256 mutex_lock(&lis3_dev.mutex);
257 lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
258 input_report_abs(pidev->input, ABS_X, x);
259 input_report_abs(pidev->input, ABS_Y, y);
260 input_report_abs(pidev->input, ABS_Z, z);
261 input_sync(pidev->input);
262 mutex_unlock(&lis3_dev.mutex);
263}
264
252static irqreturn_t lis302dl_interrupt(int irq, void *dummy) 265static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
253{ 266{
267 if (!test_bit(0, &lis3_dev.misc_opened))
268 goto out;
269
254 /* 270 /*
255 * Be careful: on some HP laptops the bios force DD when on battery and 271 * Be careful: on some HP laptops the bios force DD when on battery and
256 * the lid is closed. This leads to interrupts as soon as a little move 272 * the lid is closed. This leads to interrupts as soon as a little move
@@ -260,44 +276,93 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
260 276
261 wake_up_interruptible(&lis3_dev.misc_wait); 277 wake_up_interruptible(&lis3_dev.misc_wait);
262 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN); 278 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
279out:
280 if (lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
281 lis3_dev.idev->input->users)
282 return IRQ_WAKE_THREAD;
263 return IRQ_HANDLED; 283 return IRQ_HANDLED;
264} 284}
265 285
266static int lis3lv02d_misc_open(struct inode *inode, struct file *file) 286static void lis302dl_interrupt_handle_click(struct lis3lv02d *lis3)
267{ 287{
268 int ret; 288 struct input_dev *dev = lis3->idev->input;
289 u8 click_src;
269 290
270 if (test_and_set_bit(0, &lis3_dev.misc_opened)) 291 mutex_lock(&lis3->mutex);
271 return -EBUSY; /* already open */ 292 lis3->read(lis3, CLICK_SRC, &click_src);
272 293
273 atomic_set(&lis3_dev.count, 0); 294 if (click_src & CLICK_SINGLE_X) {
295 input_report_key(dev, lis3->mapped_btns[0], 1);
296 input_report_key(dev, lis3->mapped_btns[0], 0);
297 }
274 298
275 /* 299 if (click_src & CLICK_SINGLE_Y) {
276 * The sensor can generate interrupts for free-fall and direction 300 input_report_key(dev, lis3->mapped_btns[1], 1);
277 * detection (distinguishable with FF_WU_SRC and DD_SRC) but to keep 301 input_report_key(dev, lis3->mapped_btns[1], 0);
278 * the things simple and _fast_ we activate it only for free-fall, so 302 }
279 * no need to read register (very slow with ACPI). For the same reason,
280 * we forbid shared interrupts.
281 *
282 * IRQF_TRIGGER_RISING seems pointless on HP laptops because the
283 * io-apic is not configurable (and generates a warning) but I keep it
284 * in case of support for other hardware.
285 */
286 ret = request_irq(lis3_dev.irq, lis302dl_interrupt, IRQF_TRIGGER_RISING,
287 DRIVER_NAME, &lis3_dev);
288 303
289 if (ret) { 304 if (click_src & CLICK_SINGLE_Z) {
290 clear_bit(0, &lis3_dev.misc_opened); 305 input_report_key(dev, lis3->mapped_btns[2], 1);
291 printk(KERN_ERR DRIVER_NAME ": IRQ%d allocation failed\n", lis3_dev.irq); 306 input_report_key(dev, lis3->mapped_btns[2], 0);
292 return -EBUSY;
293 } 307 }
308 input_sync(dev);
309 mutex_unlock(&lis3->mutex);
310}
311
312static void lis302dl_interrupt_handle_ff_wu(struct lis3lv02d *lis3)
313{
314 u8 wu1_src;
315 u8 wu2_src;
316
317 lis3->read(lis3, FF_WU_SRC_1, &wu1_src);
318 lis3->read(lis3, FF_WU_SRC_2, &wu2_src);
319
320 wu1_src = wu1_src & FF_WU_SRC_IA ? wu1_src : 0;
321 wu2_src = wu2_src & FF_WU_SRC_IA ? wu2_src : 0;
322
323 /* joystick poll is internally protected by the lis3->mutex. */
324 if (wu1_src || wu2_src)
325 lis3lv02d_joystick_poll(lis3_dev.idev);
326}
327
328static irqreturn_t lis302dl_interrupt_thread1_8b(int irq, void *data)
329{
330
331 struct lis3lv02d *lis3 = data;
332
333 if ((lis3->pdata->irq_cfg & LIS3_IRQ1_MASK) == LIS3_IRQ1_CLICK)
334 lis302dl_interrupt_handle_click(lis3);
335 else
336 lis302dl_interrupt_handle_ff_wu(lis3);
337
338 return IRQ_HANDLED;
339}
340
341static irqreturn_t lis302dl_interrupt_thread2_8b(int irq, void *data)
342{
343
344 struct lis3lv02d *lis3 = data;
345
346 if ((lis3->pdata->irq_cfg & LIS3_IRQ2_MASK) == LIS3_IRQ2_CLICK)
347 lis302dl_interrupt_handle_click(lis3);
348 else
349 lis302dl_interrupt_handle_ff_wu(lis3);
350
351 return IRQ_HANDLED;
352}
353
354static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
355{
356 if (test_and_set_bit(0, &lis3_dev.misc_opened))
357 return -EBUSY; /* already open */
358
359 atomic_set(&lis3_dev.count, 0);
294 return 0; 360 return 0;
295} 361}
296 362
297static int lis3lv02d_misc_release(struct inode *inode, struct file *file) 363static int lis3lv02d_misc_release(struct inode *inode, struct file *file)
298{ 364{
299 fasync_helper(-1, file, 0, &lis3_dev.async_queue); 365 fasync_helper(-1, file, 0, &lis3_dev.async_queue);
300 free_irq(lis3_dev.irq, &lis3_dev);
301 clear_bit(0, &lis3_dev.misc_opened); /* release the device */ 366 clear_bit(0, &lis3_dev.misc_opened); /* release the device */
302 return 0; 367 return 0;
303} 368}
@@ -380,22 +445,12 @@ static struct miscdevice lis3lv02d_misc_device = {
380 .fops = &lis3lv02d_misc_fops, 445 .fops = &lis3lv02d_misc_fops,
381}; 446};
382 447
383static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
384{
385 int x, y, z;
386
387 lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
388 input_report_abs(pidev->input, ABS_X, x);
389 input_report_abs(pidev->input, ABS_Y, y);
390 input_report_abs(pidev->input, ABS_Z, z);
391 input_sync(pidev->input);
392}
393
394int lis3lv02d_joystick_enable(void) 448int lis3lv02d_joystick_enable(void)
395{ 449{
396 struct input_dev *input_dev; 450 struct input_dev *input_dev;
397 int err; 451 int err;
398 int max_val, fuzz, flat; 452 int max_val, fuzz, flat;
453 int btns[] = {BTN_X, BTN_Y, BTN_Z};
399 454
400 if (lis3_dev.idev) 455 if (lis3_dev.idev)
401 return -EINVAL; 456 return -EINVAL;
@@ -406,6 +461,8 @@ int lis3lv02d_joystick_enable(void)
406 461
407 lis3_dev.idev->poll = lis3lv02d_joystick_poll; 462 lis3_dev.idev->poll = lis3lv02d_joystick_poll;
408 lis3_dev.idev->poll_interval = MDPS_POLL_INTERVAL; 463 lis3_dev.idev->poll_interval = MDPS_POLL_INTERVAL;
464 lis3_dev.idev->poll_interval_min = MDPS_POLL_MIN;
465 lis3_dev.idev->poll_interval_max = MDPS_POLL_MAX;
409 input_dev = lis3_dev.idev->input; 466 input_dev = lis3_dev.idev->input;
410 467
411 input_dev->name = "ST LIS3LV02DL Accelerometer"; 468 input_dev->name = "ST LIS3LV02DL Accelerometer";
@@ -422,6 +479,10 @@ int lis3lv02d_joystick_enable(void)
422 input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat); 479 input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat);
423 input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat); 480 input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat);
424 481
482 lis3_dev.mapped_btns[0] = lis3lv02d_get_axis(abs(lis3_dev.ac.x), btns);
483 lis3_dev.mapped_btns[1] = lis3lv02d_get_axis(abs(lis3_dev.ac.y), btns);
484 lis3_dev.mapped_btns[2] = lis3lv02d_get_axis(abs(lis3_dev.ac.z), btns);
485
425 err = input_register_polled_device(lis3_dev.idev); 486 err = input_register_polled_device(lis3_dev.idev);
426 if (err) { 487 if (err) {
427 input_free_polled_device(lis3_dev.idev); 488 input_free_polled_device(lis3_dev.idev);
@@ -434,6 +495,11 @@ EXPORT_SYMBOL_GPL(lis3lv02d_joystick_enable);
434 495
435void lis3lv02d_joystick_disable(void) 496void lis3lv02d_joystick_disable(void)
436{ 497{
498 if (lis3_dev.irq)
499 free_irq(lis3_dev.irq, &lis3_dev);
500 if (lis3_dev.pdata && lis3_dev.pdata->irq2)
501 free_irq(lis3_dev.pdata->irq2, &lis3_dev);
502
437 if (!lis3_dev.idev) 503 if (!lis3_dev.idev)
438 return; 504 return;
439 505
@@ -462,7 +528,9 @@ static ssize_t lis3lv02d_position_show(struct device *dev,
462{ 528{
463 int x, y, z; 529 int x, y, z;
464 530
531 mutex_lock(&lis3_dev.mutex);
465 lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z); 532 lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
533 mutex_unlock(&lis3_dev.mutex);
466 return sprintf(buf, "(%d,%d,%d)\n", x, y, z); 534 return sprintf(buf, "(%d,%d,%d)\n", x, y, z);
467} 535}
468 536
@@ -521,12 +589,70 @@ int lis3lv02d_remove_fs(struct lis3lv02d *lis3)
521} 589}
522EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs); 590EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs);
523 591
592static void lis3lv02d_8b_configure(struct lis3lv02d *dev,
593 struct lis3lv02d_platform_data *p)
594{
595 int err;
596 int ctrl2 = p->hipass_ctrl;
597
598 if (p->click_flags) {
599 dev->write(dev, CLICK_CFG, p->click_flags);
600 dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit);
601 dev->write(dev, CLICK_LATENCY, p->click_latency);
602 dev->write(dev, CLICK_WINDOW, p->click_window);
603 dev->write(dev, CLICK_THSZ, p->click_thresh_z & 0xf);
604 dev->write(dev, CLICK_THSY_X,
605 (p->click_thresh_x & 0xf) |
606 (p->click_thresh_y << 4));
607
608 if (dev->idev) {
609 struct input_dev *input_dev = lis3_dev.idev->input;
610 input_set_capability(input_dev, EV_KEY, BTN_X);
611 input_set_capability(input_dev, EV_KEY, BTN_Y);
612 input_set_capability(input_dev, EV_KEY, BTN_Z);
613 }
614 }
615
616 if (p->wakeup_flags) {
617 dev->write(dev, FF_WU_CFG_1, p->wakeup_flags);
618 dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f);
619 /* default to 2.5ms for now */
620 dev->write(dev, FF_WU_DURATION_1, 1);
621 ctrl2 ^= HP_FF_WU1; /* Xor to keep compatible with old pdata*/
622 }
623
624 if (p->wakeup_flags2) {
625 dev->write(dev, FF_WU_CFG_2, p->wakeup_flags2);
626 dev->write(dev, FF_WU_THS_2, p->wakeup_thresh2 & 0x7f);
627 /* default to 2.5ms for now */
628 dev->write(dev, FF_WU_DURATION_2, 1);
629 ctrl2 ^= HP_FF_WU2; /* Xor to keep compatible with old pdata*/
630 }
631 /* Configure hipass filters */
632 dev->write(dev, CTRL_REG2, ctrl2);
633
634 if (p->irq2) {
635 err = request_threaded_irq(p->irq2,
636 NULL,
637 lis302dl_interrupt_thread2_8b,
638 IRQF_TRIGGER_RISING |
639 IRQF_ONESHOT,
640 DRIVER_NAME, &lis3_dev);
641 if (err < 0)
642 printk(KERN_ERR DRIVER_NAME
643 "No second IRQ. Limited functionality\n");
644 }
645}
646
524/* 647/*
525 * Initialise the accelerometer and the various subsystems. 648 * Initialise the accelerometer and the various subsystems.
526 * Should be rather independent of the bus system. 649 * Should be rather independent of the bus system.
527 */ 650 */
528int lis3lv02d_init_device(struct lis3lv02d *dev) 651int lis3lv02d_init_device(struct lis3lv02d *dev)
529{ 652{
653 int err;
654 irq_handler_t thread_fn;
655
530 dev->whoami = lis3lv02d_read_8(dev, WHO_AM_I); 656 dev->whoami = lis3lv02d_read_8(dev, WHO_AM_I);
531 657
532 switch (dev->whoami) { 658 switch (dev->whoami) {
@@ -567,25 +693,8 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
567 if (dev->pdata) { 693 if (dev->pdata) {
568 struct lis3lv02d_platform_data *p = dev->pdata; 694 struct lis3lv02d_platform_data *p = dev->pdata;
569 695
570 if (p->click_flags && (dev->whoami == WAI_8B)) { 696 if (dev->whoami == WAI_8B)
571 dev->write(dev, CLICK_CFG, p->click_flags); 697 lis3lv02d_8b_configure(dev, p);
572 dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit);
573 dev->write(dev, CLICK_LATENCY, p->click_latency);
574 dev->write(dev, CLICK_WINDOW, p->click_window);
575 dev->write(dev, CLICK_THSZ, p->click_thresh_z & 0xf);
576 dev->write(dev, CLICK_THSY_X,
577 (p->click_thresh_x & 0xf) |
578 (p->click_thresh_y << 4));
579 }
580
581 if (p->wakeup_flags && (dev->whoami == WAI_8B)) {
582 dev->write(dev, FF_WU_CFG_1, p->wakeup_flags);
583 dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f);
584 /* default to 2.5ms for now */
585 dev->write(dev, FF_WU_DURATION_1, 1);
586 /* enable high pass filter for both free-fall units */
587 dev->write(dev, CTRL_REG2, HP_FF_WU1 | HP_FF_WU2);
588 }
589 698
590 if (p->irq_cfg) 699 if (p->irq_cfg)
591 dev->write(dev, CTRL_REG3, p->irq_cfg); 700 dev->write(dev, CTRL_REG3, p->irq_cfg);
@@ -598,6 +707,32 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
598 goto out; 707 goto out;
599 } 708 }
600 709
710 /*
711 * The sensor can generate interrupts for free-fall and direction
712 * detection (distinguishable with FF_WU_SRC and DD_SRC) but to keep
713 * the things simple and _fast_ we activate it only for free-fall, so
714 * no need to read register (very slow with ACPI). For the same reason,
715 * we forbid shared interrupts.
716 *
717 * IRQF_TRIGGER_RISING seems pointless on HP laptops because the
718 * io-apic is not configurable (and generates a warning) but I keep it
719 * in case of support for other hardware.
720 */
721 if (dev->whoami == WAI_8B)
722 thread_fn = lis302dl_interrupt_thread1_8b;
723 else
724 thread_fn = NULL;
725
726 err = request_threaded_irq(dev->irq, lis302dl_interrupt,
727 thread_fn,
728 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
729 DRIVER_NAME, &lis3_dev);
730
731 if (err < 0) {
732 printk(KERN_ERR DRIVER_NAME "Cannot get IRQ\n");
733 goto out;
734 }
735
601 if (misc_register(&lis3lv02d_misc_device)) 736 if (misc_register(&lis3lv02d_misc_device))
602 printk(KERN_ERR DRIVER_NAME ": misc_register failed\n"); 737 printk(KERN_ERR DRIVER_NAME ": misc_register failed\n");
603out: 738out:
diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
index e6a01f44709b..854091380e33 100644
--- a/drivers/hwmon/lis3lv02d.h
+++ b/drivers/hwmon/lis3lv02d.h
@@ -196,6 +196,16 @@ enum lis3lv02d_dd_src {
196 DD_SRC_IA = 0x40, 196 DD_SRC_IA = 0x40,
197}; 197};
198 198
199enum lis3lv02d_click_src_8b {
200 CLICK_SINGLE_X = 0x01,
201 CLICK_DOUBLE_X = 0x02,
202 CLICK_SINGLE_Y = 0x04,
203 CLICK_DOUBLE_Y = 0x08,
204 CLICK_SINGLE_Z = 0x10,
205 CLICK_DOUBLE_Z = 0x20,
206 CLICK_IA = 0x40,
207};
208
199struct axis_conversion { 209struct axis_conversion {
200 s8 x; 210 s8 x;
201 s8 y; 211 s8 y;
@@ -223,6 +233,7 @@ struct lis3lv02d {
223 struct platform_device *pdev; /* platform device */ 233 struct platform_device *pdev; /* platform device */
224 atomic_t count; /* interrupt count after last read */ 234 atomic_t count; /* interrupt count after last read */
225 struct axis_conversion ac; /* hw -> logical axis */ 235 struct axis_conversion ac; /* hw -> logical axis */
236 int mapped_btns[3];
226 237
227 u32 irq; /* IRQ number */ 238 u32 irq; /* IRQ number */
228 struct fasync_struct *async_queue; /* queue for the misc device */ 239 struct fasync_struct *async_queue; /* queue for the misc device */
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index 68e90abeba96..5da5942cf970 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -300,8 +300,11 @@ static const struct of_device_id env_match[] = {
300MODULE_DEVICE_TABLE(of, env_match); 300MODULE_DEVICE_TABLE(of, env_match);
301 301
302static struct of_platform_driver env_driver = { 302static struct of_platform_driver env_driver = {
303 .name = "ultra45_env", 303 .driver = {
304 .match_table = env_match, 304 .name = "ultra45_env",
305 .owner = THIS_MODULE,
306 .of_match_table = env_match,
307 },
305 .probe = env_probe, 308 .probe = env_probe,
306 .remove = __devexit_p(env_remove), 309 .remove = __devexit_p(env_remove),
307}; 310};
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 612807d97155..697202e27891 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -35,6 +35,7 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/i2c.h> 36#include <linux/i2c.h>
37#include <linux/hwmon.h> 37#include <linux/hwmon.h>
38#include <linux/smp_lock.h>
38#include <linux/hwmon-vid.h> 39#include <linux/hwmon-vid.h>
39#include <linux/hwmon-sysfs.h> 40#include <linux/hwmon-sysfs.h>
40#include <linux/err.h> 41#include <linux/err.h>
@@ -1319,8 +1320,8 @@ static ssize_t watchdog_write(struct file *filp, const char __user *buf,
1319 return count; 1320 return count;
1320} 1321}
1321 1322
1322static int watchdog_ioctl(struct inode *inode, struct file *filp, 1323static long watchdog_ioctl(struct file *filp, unsigned int cmd,
1323 unsigned int cmd, unsigned long arg) 1324 unsigned long arg)
1324{ 1325{
1325 static struct watchdog_info ident = { 1326 static struct watchdog_info ident = {
1326 .options = WDIOF_KEEPALIVEPING | 1327 .options = WDIOF_KEEPALIVEPING |
@@ -1332,6 +1333,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
1332 int val, ret = 0; 1333 int val, ret = 0;
1333 struct w83793_data *data = filp->private_data; 1334 struct w83793_data *data = filp->private_data;
1334 1335
1336 lock_kernel();
1335 switch (cmd) { 1337 switch (cmd) {
1336 case WDIOC_GETSUPPORT: 1338 case WDIOC_GETSUPPORT:
1337 if (!nowayout) 1339 if (!nowayout)
@@ -1385,7 +1387,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
1385 default: 1387 default:
1386 ret = -ENOTTY; 1388 ret = -ENOTTY;
1387 } 1389 }
1388 1390 unlock_kernel();
1389 return ret; 1391 return ret;
1390} 1392}
1391 1393
@@ -1395,7 +1397,7 @@ static const struct file_operations watchdog_fops = {
1395 .open = watchdog_open, 1397 .open = watchdog_open,
1396 .release = watchdog_close, 1398 .release = watchdog_close,
1397 .write = watchdog_write, 1399 .write = watchdog_write,
1398 .ioctl = watchdog_ioctl, 1400 .unlocked_ioctl = watchdog_ioctl,
1399}; 1401};
1400 1402
1401/* 1403/*
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 16948db38973..b02b4533651d 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -440,7 +440,7 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
440 440
441 init_waitqueue_head(&cpm->i2c_wait); 441 init_waitqueue_head(&cpm->i2c_wait);
442 442
443 cpm->irq = of_irq_to_resource(ofdev->node, 0, NULL); 443 cpm->irq = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
444 if (!cpm->irq) 444 if (!cpm->irq)
445 return -EINVAL; 445 return -EINVAL;
446 446
@@ -451,13 +451,13 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
451 return ret; 451 return ret;
452 452
453 /* I2C parameter RAM */ 453 /* I2C parameter RAM */
454 i2c_base = of_iomap(ofdev->node, 1); 454 i2c_base = of_iomap(ofdev->dev.of_node, 1);
455 if (i2c_base == NULL) { 455 if (i2c_base == NULL) {
456 ret = -EINVAL; 456 ret = -EINVAL;
457 goto out_irq; 457 goto out_irq;
458 } 458 }
459 459
460 if (of_device_is_compatible(ofdev->node, "fsl,cpm1-i2c")) { 460 if (of_device_is_compatible(ofdev->dev.of_node, "fsl,cpm1-i2c")) {
461 461
462 /* Check for and use a microcode relocation patch. */ 462 /* Check for and use a microcode relocation patch. */
463 cpm->i2c_ram = i2c_base; 463 cpm->i2c_ram = i2c_base;
@@ -474,7 +474,7 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
474 474
475 cpm->version = 1; 475 cpm->version = 1;
476 476
477 } else if (of_device_is_compatible(ofdev->node, "fsl,cpm2-i2c")) { 477 } else if (of_device_is_compatible(ofdev->dev.of_node, "fsl,cpm2-i2c")) {
478 cpm->i2c_addr = cpm_muram_alloc(sizeof(struct i2c_ram), 64); 478 cpm->i2c_addr = cpm_muram_alloc(sizeof(struct i2c_ram), 64);
479 cpm->i2c_ram = cpm_muram_addr(cpm->i2c_addr); 479 cpm->i2c_ram = cpm_muram_addr(cpm->i2c_addr);
480 out_be16(i2c_base, cpm->i2c_addr); 480 out_be16(i2c_base, cpm->i2c_addr);
@@ -489,24 +489,24 @@ static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
489 } 489 }
490 490
491 /* I2C control/status registers */ 491 /* I2C control/status registers */
492 cpm->i2c_reg = of_iomap(ofdev->node, 0); 492 cpm->i2c_reg = of_iomap(ofdev->dev.of_node, 0);
493 if (cpm->i2c_reg == NULL) { 493 if (cpm->i2c_reg == NULL) {
494 ret = -EINVAL; 494 ret = -EINVAL;
495 goto out_ram; 495 goto out_ram;
496 } 496 }
497 497
498 data = of_get_property(ofdev->node, "fsl,cpm-command", &len); 498 data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
499 if (!data || len != 4) { 499 if (!data || len != 4) {
500 ret = -EINVAL; 500 ret = -EINVAL;
501 goto out_reg; 501 goto out_reg;
502 } 502 }
503 cpm->cp_command = *data; 503 cpm->cp_command = *data;
504 504
505 data = of_get_property(ofdev->node, "linux,i2c-class", &len); 505 data = of_get_property(ofdev->dev.of_node, "linux,i2c-class", &len);
506 if (data && len == 4) 506 if (data && len == 4)
507 cpm->adap.class = *data; 507 cpm->adap.class = *data;
508 508
509 data = of_get_property(ofdev->node, "clock-frequency", &len); 509 data = of_get_property(ofdev->dev.of_node, "clock-frequency", &len);
510 if (data && len == 4) 510 if (data && len == 4)
511 cpm->freq = *data; 511 cpm->freq = *data;
512 else 512 else
@@ -661,7 +661,7 @@ static int __devinit cpm_i2c_probe(struct of_device *ofdev,
661 661
662 /* register new adapter to i2c module... */ 662 /* register new adapter to i2c module... */
663 663
664 data = of_get_property(ofdev->node, "linux,i2c-index", &len); 664 data = of_get_property(ofdev->dev.of_node, "linux,i2c-index", &len);
665 if (data && len == 4) { 665 if (data && len == 4) {
666 cpm->adap.nr = *data; 666 cpm->adap.nr = *data;
667 result = i2c_add_numbered_adapter(&cpm->adap); 667 result = i2c_add_numbered_adapter(&cpm->adap);
@@ -679,7 +679,7 @@ static int __devinit cpm_i2c_probe(struct of_device *ofdev,
679 /* 679 /*
680 * register OF I2C devices 680 * register OF I2C devices
681 */ 681 */
682 of_register_i2c_devices(&cpm->adap, ofdev->node); 682 of_register_i2c_devices(&cpm->adap, ofdev->dev.of_node);
683 683
684 return 0; 684 return 0;
685out_shut: 685out_shut:
@@ -718,13 +718,13 @@ static const struct of_device_id cpm_i2c_match[] = {
718MODULE_DEVICE_TABLE(of, cpm_i2c_match); 718MODULE_DEVICE_TABLE(of, cpm_i2c_match);
719 719
720static struct of_platform_driver cpm_i2c_driver = { 720static struct of_platform_driver cpm_i2c_driver = {
721 .match_table = cpm_i2c_match,
722 .probe = cpm_i2c_probe, 721 .probe = cpm_i2c_probe,
723 .remove = __devexit_p(cpm_i2c_remove), 722 .remove = __devexit_p(cpm_i2c_remove),
724 .driver = { 723 .driver = {
725 .name = "fsl-i2c-cpm", 724 .name = "fsl-i2c-cpm",
726 .owner = THIS_MODULE, 725 .owner = THIS_MODULE,
727 } 726 .of_match_table = cpm_i2c_match,
727 },
728}; 728};
729 729
730static int __init cpm_i2c_init(void) 730static int __init cpm_i2c_init(void)
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index f8ccc0fe95a8..bf344135647a 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -664,7 +664,7 @@ static inline u8 iic_clckdiv(unsigned int opb)
664static int __devinit iic_request_irq(struct of_device *ofdev, 664static int __devinit iic_request_irq(struct of_device *ofdev,
665 struct ibm_iic_private *dev) 665 struct ibm_iic_private *dev)
666{ 666{
667 struct device_node *np = ofdev->node; 667 struct device_node *np = ofdev->dev.of_node;
668 int irq; 668 int irq;
669 669
670 if (iic_force_poll) 670 if (iic_force_poll)
@@ -695,7 +695,7 @@ static int __devinit iic_request_irq(struct of_device *ofdev,
695static int __devinit iic_probe(struct of_device *ofdev, 695static int __devinit iic_probe(struct of_device *ofdev,
696 const struct of_device_id *match) 696 const struct of_device_id *match)
697{ 697{
698 struct device_node *np = ofdev->node; 698 struct device_node *np = ofdev->dev.of_node;
699 struct ibm_iic_private *dev; 699 struct ibm_iic_private *dev;
700 struct i2c_adapter *adap; 700 struct i2c_adapter *adap;
701 const u32 *freq; 701 const u32 *freq;
@@ -807,8 +807,11 @@ static const struct of_device_id ibm_iic_match[] = {
807}; 807};
808 808
809static struct of_platform_driver ibm_iic_driver = { 809static struct of_platform_driver ibm_iic_driver = {
810 .name = "ibm-iic", 810 .driver = {
811 .match_table = ibm_iic_match, 811 .name = "ibm-iic",
812 .owner = THIS_MODULE,
813 .of_match_table = ibm_iic_match,
814 },
812 .probe = iic_probe, 815 .probe = iic_probe,
813 .remove = __devexit_p(iic_remove), 816 .remove = __devexit_p(iic_remove),
814}; 817};
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index e86cef300c7d..df00eb1f11f9 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -560,14 +560,14 @@ static int __devinit fsl_i2c_probe(struct of_device *op,
560 560
561 init_waitqueue_head(&i2c->queue); 561 init_waitqueue_head(&i2c->queue);
562 562
563 i2c->base = of_iomap(op->node, 0); 563 i2c->base = of_iomap(op->dev.of_node, 0);
564 if (!i2c->base) { 564 if (!i2c->base) {
565 dev_err(i2c->dev, "failed to map controller\n"); 565 dev_err(i2c->dev, "failed to map controller\n");
566 result = -ENOMEM; 566 result = -ENOMEM;
567 goto fail_map; 567 goto fail_map;
568 } 568 }
569 569
570 i2c->irq = irq_of_parse_and_map(op->node, 0); 570 i2c->irq = irq_of_parse_and_map(op->dev.of_node, 0);
571 if (i2c->irq) { /* no i2c->irq implies polling */ 571 if (i2c->irq) { /* no i2c->irq implies polling */
572 result = request_irq(i2c->irq, mpc_i2c_isr, 572 result = request_irq(i2c->irq, mpc_i2c_isr,
573 IRQF_SHARED, "i2c-mpc", i2c); 573 IRQF_SHARED, "i2c-mpc", i2c);
@@ -577,21 +577,22 @@ static int __devinit fsl_i2c_probe(struct of_device *op,
577 } 577 }
578 } 578 }
579 579
580 if (of_get_property(op->node, "fsl,preserve-clocking", NULL)) { 580 if (of_get_property(op->dev.of_node, "fsl,preserve-clocking", NULL)) {
581 clock = MPC_I2C_CLOCK_PRESERVE; 581 clock = MPC_I2C_CLOCK_PRESERVE;
582 } else { 582 } else {
583 prop = of_get_property(op->node, "clock-frequency", &plen); 583 prop = of_get_property(op->dev.of_node, "clock-frequency",
584 &plen);
584 if (prop && plen == sizeof(u32)) 585 if (prop && plen == sizeof(u32))
585 clock = *prop; 586 clock = *prop;
586 } 587 }
587 588
588 if (match->data) { 589 if (match->data) {
589 struct mpc_i2c_data *data = match->data; 590 struct mpc_i2c_data *data = match->data;
590 data->setup(op->node, i2c, clock, data->prescaler); 591 data->setup(op->dev.of_node, i2c, clock, data->prescaler);
591 } else { 592 } else {
592 /* Backwards compatibility */ 593 /* Backwards compatibility */
593 if (of_get_property(op->node, "dfsrr", NULL)) 594 if (of_get_property(op->dev.of_node, "dfsrr", NULL))
594 mpc_i2c_setup_8xxx(op->node, i2c, clock, 0); 595 mpc_i2c_setup_8xxx(op->dev.of_node, i2c, clock, 0);
595 } 596 }
596 597
597 dev_set_drvdata(&op->dev, i2c); 598 dev_set_drvdata(&op->dev, i2c);
@@ -605,7 +606,7 @@ static int __devinit fsl_i2c_probe(struct of_device *op,
605 dev_err(i2c->dev, "failed to add adapter\n"); 606 dev_err(i2c->dev, "failed to add adapter\n");
606 goto fail_add; 607 goto fail_add;
607 } 608 }
608 of_register_i2c_devices(&i2c->adap, op->node); 609 of_register_i2c_devices(&i2c->adap, op->dev.of_node);
609 610
610 return result; 611 return result;
611 612
@@ -674,12 +675,12 @@ MODULE_DEVICE_TABLE(of, mpc_i2c_of_match);
674 675
675/* Structure for a device driver */ 676/* Structure for a device driver */
676static struct of_platform_driver mpc_i2c_driver = { 677static struct of_platform_driver mpc_i2c_driver = {
677 .match_table = mpc_i2c_of_match,
678 .probe = fsl_i2c_probe, 678 .probe = fsl_i2c_probe,
679 .remove = __devexit_p(fsl_i2c_remove), 679 .remove = __devexit_p(fsl_i2c_remove),
680 .driver = { 680 .driver = {
681 .owner = THIS_MODULE, 681 .owner = THIS_MODULE,
682 .name = DRV_NAME, 682 .name = DRV_NAME,
683 .of_match_table = mpc_i2c_of_match,
683 }, 684 },
684}; 685};
685 686
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index db3c9f3a7647..e0f833cca3f1 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -418,6 +418,9 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
418 client->dev.parent = &client->adapter->dev; 418 client->dev.parent = &client->adapter->dev;
419 client->dev.bus = &i2c_bus_type; 419 client->dev.bus = &i2c_bus_type;
420 client->dev.type = &i2c_client_type; 420 client->dev.type = &i2c_client_type;
421#ifdef CONFIG_OF
422 client->dev.of_node = info->of_node;
423#endif
421 424
422 dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), 425 dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
423 client->addr); 426 client->addr);
diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c
index d2b8b272bc27..cb10201a15ed 100644
--- a/drivers/ide/cmd640.c
+++ b/drivers/ide/cmd640.c
@@ -633,12 +633,10 @@ static void __init cmd640_init_dev(ide_drive_t *drive)
633 633
634static int cmd640_test_irq(ide_hwif_t *hwif) 634static int cmd640_test_irq(ide_hwif_t *hwif)
635{ 635{
636 struct pci_dev *dev = to_pci_dev(hwif->dev);
637 int irq_reg = hwif->channel ? ARTTIM23 : CFR; 636 int irq_reg = hwif->channel ? ARTTIM23 : CFR;
638 u8 irq_stat, irq_mask = hwif->channel ? ARTTIM23_IDE23INTR : 637 u8 irq_mask = hwif->channel ? ARTTIM23_IDE23INTR :
639 CFR_IDE01INTR; 638 CFR_IDE01INTR;
640 639 u8 irq_stat = get_cmd640_reg(irq_reg);
641 pci_read_config_byte(dev, irq_reg, &irq_stat);
642 640
643 return (irq_stat & irq_mask) ? 1 : 0; 641 return (irq_stat & irq_mask) ? 1 : 0;
644} 642}
diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c
index b9e517de6a82..3feaa26410be 100644
--- a/drivers/ide/gayle.c
+++ b/drivers/ide/gayle.c
@@ -16,6 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/zorro.h> 17#include <linux/zorro.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/platform_device.h>
19 20
20#include <asm/setup.h> 21#include <asm/setup.h>
21#include <asm/amigahw.h> 22#include <asm/amigahw.h>
@@ -24,15 +25,6 @@
24 25
25 26
26 /* 27 /*
27 * Bases of the IDE interfaces
28 */
29
30#define GAYLE_BASE_4000 0xdd2020 /* A4000/A4000T */
31#define GAYLE_BASE_1200 0xda0000 /* A1200/A600 and E-Matrix 530 */
32
33#define GAYLE_IDEREG_SIZE 0x2000
34
35 /*
36 * Offsets from one of the above bases 28 * Offsets from one of the above bases
37 */ 29 */
38 30
@@ -68,20 +60,20 @@ MODULE_PARM_DESC(doubler, "enable support for IDE doublers");
68 60
69static int gayle_test_irq(ide_hwif_t *hwif) 61static int gayle_test_irq(ide_hwif_t *hwif)
70{ 62{
71 unsigned char ch; 63 unsigned char ch;
72 64
73 ch = z_readb(hwif->io_ports.irq_addr); 65 ch = z_readb(hwif->io_ports.irq_addr);
74 if (!(ch & GAYLE_IRQ_IDE)) 66 if (!(ch & GAYLE_IRQ_IDE))
75 return 0; 67 return 0;
76 return 1; 68 return 1;
77} 69}
78 70
79static void gayle_a1200_clear_irq(ide_drive_t *drive) 71static void gayle_a1200_clear_irq(ide_drive_t *drive)
80{ 72{
81 ide_hwif_t *hwif = drive->hwif; 73 ide_hwif_t *hwif = drive->hwif;
82 74
83 (void)z_readb(hwif->io_ports.status_addr); 75 (void)z_readb(hwif->io_ports.status_addr);
84 z_writeb(0x7c, hwif->io_ports.irq_addr); 76 z_writeb(0x7c, hwif->io_ports.irq_addr);
85} 77}
86 78
87static void __init gayle_setup_ports(struct ide_hw *hw, unsigned long base, 79static void __init gayle_setup_ports(struct ide_hw *hw, unsigned long base,
@@ -122,64 +114,89 @@ static const struct ide_port_info gayle_port_info = {
122 * Probe for a Gayle IDE interface (and optionally for an IDE doubler) 114 * Probe for a Gayle IDE interface (and optionally for an IDE doubler)
123 */ 115 */
124 116
125static int __init gayle_init(void) 117static int __init amiga_gayle_ide_probe(struct platform_device *pdev)
126{ 118{
127 unsigned long phys_base, res_start, res_n; 119 struct resource *res;
128 unsigned long base, ctrlport, irqport; 120 struct gayle_ide_platform_data *pdata;
129 int a4000, i, rc; 121 unsigned long base, ctrlport, irqport;
130 struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS]; 122 unsigned int i;
131 struct ide_port_info d = gayle_port_info; 123 int error;
132 124 struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS];
133 if (!MACH_IS_AMIGA) 125 struct ide_port_info d = gayle_port_info;
134 return -ENODEV; 126 struct ide_host *host;
135 127
136 if ((a4000 = AMIGAHW_PRESENT(A4000_IDE)) || AMIGAHW_PRESENT(A1200_IDE)) 128 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
137 goto found; 129 if (!res)
138 130 return -ENODEV;
139#ifdef CONFIG_ZORRO 131
140 if (zorro_find_device(ZORRO_PROD_MTEC_VIPER_MK_V_E_MATRIX_530_SCSI_IDE, 132 if (!request_mem_region(res->start, resource_size(res), "IDE"))
141 NULL)) 133 return -EBUSY;
142 goto found; 134
143#endif 135 pdata = pdev->dev.platform_data;
144 return -ENODEV; 136 pr_info("ide: Gayle IDE controller (A%u style%s)\n",
145 137 pdata->explicit_ack ? 1200 : 4000,
146found: 138 ide_doubler ? ", IDE doubler" : "");
147 printk(KERN_INFO "ide: Gayle IDE controller (A%d style%s)\n", 139
148 a4000 ? 4000 : 1200, 140 base = (unsigned long)ZTWO_VADDR(pdata->base);
149 ide_doubler ? ", IDE doubler" : ""); 141 ctrlport = 0;
150 142 irqport = (unsigned long)ZTWO_VADDR(pdata->irqport);
151 if (a4000) { 143 if (pdata->explicit_ack)
152 phys_base = GAYLE_BASE_4000; 144 d.port_ops = &gayle_a1200_port_ops;
153 irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_4000); 145 else
154 d.port_ops = &gayle_a4000_port_ops; 146 d.port_ops = &gayle_a4000_port_ops;
155 } else { 147
156 phys_base = GAYLE_BASE_1200; 148 for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++, base += GAYLE_NEXT_PORT) {
157 irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_1200); 149 if (GAYLE_HAS_CONTROL_REG)
158 d.port_ops = &gayle_a1200_port_ops; 150 ctrlport = base + GAYLE_CONTROL;
151
152 gayle_setup_ports(&hw[i], base, ctrlport, irqport);
153 hws[i] = &hw[i];
159 } 154 }
160 155
161 res_start = ((unsigned long)phys_base) & ~(GAYLE_NEXT_PORT-1); 156 error = ide_host_add(&d, hws, i, &host);
162 res_n = GAYLE_IDEREG_SIZE; 157 if (error)
158 goto out;
163 159
164 if (!request_mem_region(res_start, res_n, "IDE")) 160 platform_set_drvdata(pdev, host);
165 return -EBUSY; 161 return 0;
166 162
167 for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) { 163out:
168 base = (unsigned long)ZTWO_VADDR(phys_base + i * GAYLE_NEXT_PORT); 164 release_mem_region(res->start, resource_size(res));
169 ctrlport = GAYLE_HAS_CONTROL_REG ? (base + GAYLE_CONTROL) : 0; 165 return error;
166}
167
168static int __exit amiga_gayle_ide_remove(struct platform_device *pdev)
169{
170 struct ide_host *host = platform_get_drvdata(pdev);
171 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
172
173 ide_host_remove(host);
174 release_mem_region(res->start, resource_size(res));
175 return 0;
176}
170 177
171 gayle_setup_ports(&hw[i], base, ctrlport, irqport); 178static struct platform_driver amiga_gayle_ide_driver = {
179 .remove = __exit_p(amiga_gayle_ide_remove),
180 .driver = {
181 .name = "amiga-gayle-ide",
182 .owner = THIS_MODULE,
183 },
184};
172 185
173 hws[i] = &hw[i]; 186static int __init amiga_gayle_ide_init(void)
174 } 187{
188 return platform_driver_probe(&amiga_gayle_ide_driver,
189 amiga_gayle_ide_probe);
190}
175 191
176 rc = ide_host_add(&d, hws, i, NULL); 192module_init(amiga_gayle_ide_init);
177 if (rc)
178 release_mem_region(res_start, res_n);
179 193
180 return rc; 194static void __exit amiga_gayle_ide_exit(void)
195{
196 platform_driver_unregister(&amiga_gayle_ide_driver);
181} 197}
182 198
183module_init(gayle_init); 199module_exit(amiga_gayle_ide_exit);
184 200
185MODULE_LICENSE("GPL"); 201MODULE_LICENSE("GPL");
202MODULE_ALIAS("platform:amiga-gayle-ide");
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c
index 42965b3e30b9..542603b394e4 100644
--- a/drivers/ide/ide_platform.c
+++ b/drivers/ide/ide_platform.c
@@ -95,6 +95,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
95 plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start); 95 plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
96 hw.dev = &pdev->dev; 96 hw.dev = &pdev->dev;
97 97
98 d.irq_flags = res_irq->flags;
98 if (mmio) 99 if (mmio)
99 d.host_flags |= IDE_HFLAG_MMIO; 100 d.host_flags |= IDE_HFLAG_MMIO;
100 101
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index c5f3841af360..3a35ec6193d2 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -93,13 +93,13 @@ static int pdc202xx_test_irq(ide_hwif_t *hwif)
93 * bit 7: error, bit 6: interrupting, 93 * bit 7: error, bit 6: interrupting,
94 * bit 5: FIFO full, bit 4: FIFO empty 94 * bit 5: FIFO full, bit 4: FIFO empty
95 */ 95 */
96 return ((sc1d & 0x50) == 0x50) ? 1 : 0; 96 return (sc1d & 0x40) ? 1 : 0;
97 } else { 97 } else {
98 /* 98 /*
99 * bit 3: error, bit 2: interrupting, 99 * bit 3: error, bit 2: interrupting,
100 * bit 1: FIFO full, bit 0: FIFO empty 100 * bit 1: FIFO full, bit 0: FIFO empty
101 */ 101 */
102 return ((sc1d & 0x05) == 0x05) ? 1 : 0; 102 return (sc1d & 0x04) ? 1 : 0;
103 } 103 }
104} 104}
105 105
@@ -241,6 +241,7 @@ static const struct ide_port_ops pdc20246_port_ops = {
241static const struct ide_port_ops pdc2026x_port_ops = { 241static const struct ide_port_ops pdc2026x_port_ops = {
242 .set_pio_mode = pdc202xx_set_pio_mode, 242 .set_pio_mode = pdc202xx_set_pio_mode,
243 .set_dma_mode = pdc202xx_set_mode, 243 .set_dma_mode = pdc202xx_set_mode,
244 .test_irq = pdc202xx_test_irq,
244 .cable_detect = pdc2026x_cable_detect, 245 .cable_detect = pdc2026x_cable_detect,
245}; 246};
246 247
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 159955d16c47..183fa38760d8 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -1153,7 +1153,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1153 1153
1154 if (macio_resource_count(mdev) == 0) { 1154 if (macio_resource_count(mdev) == 0) {
1155 printk(KERN_WARNING "ide-pmac: no address for %s\n", 1155 printk(KERN_WARNING "ide-pmac: no address for %s\n",
1156 mdev->ofdev.node->full_name); 1156 mdev->ofdev.dev.of_node->full_name);
1157 rc = -ENXIO; 1157 rc = -ENXIO;
1158 goto out_free_pmif; 1158 goto out_free_pmif;
1159 } 1159 }
@@ -1161,7 +1161,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1161 /* Request memory resource for IO ports */ 1161 /* Request memory resource for IO ports */
1162 if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) { 1162 if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) {
1163 printk(KERN_ERR "ide-pmac: can't request MMIO resource for " 1163 printk(KERN_ERR "ide-pmac: can't request MMIO resource for "
1164 "%s!\n", mdev->ofdev.node->full_name); 1164 "%s!\n", mdev->ofdev.dev.of_node->full_name);
1165 rc = -EBUSY; 1165 rc = -EBUSY;
1166 goto out_free_pmif; 1166 goto out_free_pmif;
1167 } 1167 }
@@ -1173,7 +1173,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1173 */ 1173 */
1174 if (macio_irq_count(mdev) == 0) { 1174 if (macio_irq_count(mdev) == 0) {
1175 printk(KERN_WARNING "ide-pmac: no intrs for device %s, using " 1175 printk(KERN_WARNING "ide-pmac: no intrs for device %s, using "
1176 "13\n", mdev->ofdev.node->full_name); 1176 "13\n", mdev->ofdev.dev.of_node->full_name);
1177 irq = irq_create_mapping(NULL, 13); 1177 irq = irq_create_mapping(NULL, 13);
1178 } else 1178 } else
1179 irq = macio_irq(mdev, 0); 1179 irq = macio_irq(mdev, 0);
@@ -1182,7 +1182,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1182 regbase = (unsigned long) base; 1182 regbase = (unsigned long) base;
1183 1183
1184 pmif->mdev = mdev; 1184 pmif->mdev = mdev;
1185 pmif->node = mdev->ofdev.node; 1185 pmif->node = mdev->ofdev.dev.of_node;
1186 pmif->regbase = regbase; 1186 pmif->regbase = regbase;
1187 pmif->irq = irq; 1187 pmif->irq = irq;
1188 pmif->kauai_fcr = NULL; 1188 pmif->kauai_fcr = NULL;
@@ -1191,7 +1191,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1191 if (macio_request_resource(mdev, 1, "ide-pmac (dma)")) 1191 if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
1192 printk(KERN_WARNING "ide-pmac: can't request DMA " 1192 printk(KERN_WARNING "ide-pmac: can't request DMA "
1193 "resource for %s!\n", 1193 "resource for %s!\n",
1194 mdev->ofdev.node->full_name); 1194 mdev->ofdev.dev.of_node->full_name);
1195 else 1195 else
1196 pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000); 1196 pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
1197 } else 1197 } else
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 9fd4a0d3206e..adaefabc40e9 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -1824,7 +1824,7 @@ static int dv1394_open(struct inode *inode, struct file *file)
1824 "and will not be available in the new firewire driver stack. " 1824 "and will not be available in the new firewire driver stack. "
1825 "Try libraw1394 based programs instead.\n", current->comm); 1825 "Try libraw1394 based programs instead.\n", current->comm);
1826 1826
1827 return 0; 1827 return nonseekable_open(inode, file);
1828} 1828}
1829 1829
1830 1830
@@ -2153,17 +2153,18 @@ static struct cdev dv1394_cdev;
2153static const struct file_operations dv1394_fops= 2153static const struct file_operations dv1394_fops=
2154{ 2154{
2155 .owner = THIS_MODULE, 2155 .owner = THIS_MODULE,
2156 .poll = dv1394_poll, 2156 .poll = dv1394_poll,
2157 .unlocked_ioctl = dv1394_ioctl, 2157 .unlocked_ioctl = dv1394_ioctl,
2158#ifdef CONFIG_COMPAT 2158#ifdef CONFIG_COMPAT
2159 .compat_ioctl = dv1394_compat_ioctl, 2159 .compat_ioctl = dv1394_compat_ioctl,
2160#endif 2160#endif
2161 .mmap = dv1394_mmap, 2161 .mmap = dv1394_mmap,
2162 .open = dv1394_open, 2162 .open = dv1394_open,
2163 .write = dv1394_write, 2163 .write = dv1394_write,
2164 .read = dv1394_read, 2164 .read = dv1394_read,
2165 .release = dv1394_release, 2165 .release = dv1394_release,
2166 .fasync = dv1394_fasync, 2166 .fasync = dv1394_fasync,
2167 .llseek = no_llseek,
2167}; 2168};
2168 2169
2169 2170
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 8aa56ac07e29..b563d5e9fa2e 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -2834,7 +2834,7 @@ static int raw1394_open(struct inode *inode, struct file *file)
2834 2834
2835 file->private_data = fi; 2835 file->private_data = fi;
2836 2836
2837 return 0; 2837 return nonseekable_open(inode, file);
2838} 2838}
2839 2839
2840static int raw1394_release(struct inode *inode, struct file *file) 2840static int raw1394_release(struct inode *inode, struct file *file)
@@ -3035,6 +3035,7 @@ static const struct file_operations raw1394_fops = {
3035 .poll = raw1394_poll, 3035 .poll = raw1394_poll,
3036 .open = raw1394_open, 3036 .open = raw1394_open,
3037 .release = raw1394_release, 3037 .release = raw1394_release,
3038 .llseek = no_llseek,
3038}; 3039};
3039 3040
3040static int __init init_raw1394(void) 3041static int __init init_raw1394(void)
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 949064a05675..a42bd6893bcf 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -1239,7 +1239,7 @@ static int video1394_open(struct inode *inode, struct file *file)
1239 ctx->current_ctx = NULL; 1239 ctx->current_ctx = NULL;
1240 file->private_data = ctx; 1240 file->private_data = ctx;
1241 1241
1242 return 0; 1242 return nonseekable_open(inode, file);
1243} 1243}
1244 1244
1245static int video1394_release(struct inode *inode, struct file *file) 1245static int video1394_release(struct inode *inode, struct file *file)
@@ -1287,7 +1287,8 @@ static const struct file_operations video1394_fops=
1287 .poll = video1394_poll, 1287 .poll = video1394_poll,
1288 .mmap = video1394_mmap, 1288 .mmap = video1394_mmap,
1289 .open = video1394_open, 1289 .open = video1394_open,
1290 .release = video1394_release 1290 .release = video1394_release,
1291 .llseek = no_llseek,
1291}; 1292};
1292 1293
1293/*** HOTPLUG STUFF **********************************************************/ 1294/*** HOTPLUG STUFF **********************************************************/
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 330d2a423362..89d70de5e235 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -43,6 +43,7 @@ config INFINIBAND_ADDR_TRANS
43 43
44source "drivers/infiniband/hw/mthca/Kconfig" 44source "drivers/infiniband/hw/mthca/Kconfig"
45source "drivers/infiniband/hw/ipath/Kconfig" 45source "drivers/infiniband/hw/ipath/Kconfig"
46source "drivers/infiniband/hw/qib/Kconfig"
46source "drivers/infiniband/hw/ehca/Kconfig" 47source "drivers/infiniband/hw/ehca/Kconfig"
47source "drivers/infiniband/hw/amso1100/Kconfig" 48source "drivers/infiniband/hw/amso1100/Kconfig"
48source "drivers/infiniband/hw/cxgb3/Kconfig" 49source "drivers/infiniband/hw/cxgb3/Kconfig"
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index 0c4e589d746e..9cc7a47d3e67 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -1,6 +1,7 @@
1obj-$(CONFIG_INFINIBAND) += core/ 1obj-$(CONFIG_INFINIBAND) += core/
2obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/ 2obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/
3obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/ 3obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/
4obj-$(CONFIG_INFINIBAND_QIB) += hw/qib/
4obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/ 5obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
5obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/ 6obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
6obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/ 7obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 05ac36e6acdb..a565af5c2d2e 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -38,7 +38,9 @@
38 38
39#include <rdma/ib_verbs.h> 39#include <rdma/ib_verbs.h>
40 40
41int ib_device_register_sysfs(struct ib_device *device); 41int ib_device_register_sysfs(struct ib_device *device,
42 int (*port_callback)(struct ib_device *,
43 u8, struct kobject *));
42void ib_device_unregister_sysfs(struct ib_device *device); 44void ib_device_unregister_sysfs(struct ib_device *device);
43 45
44int ib_sysfs_setup(void); 46int ib_sysfs_setup(void);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index d1fba4153332..a19effad0811 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -267,7 +267,9 @@ out:
267 * callback for each device that is added. @device must be allocated 267 * callback for each device that is added. @device must be allocated
268 * with ib_alloc_device(). 268 * with ib_alloc_device().
269 */ 269 */
270int ib_register_device(struct ib_device *device) 270int ib_register_device(struct ib_device *device,
271 int (*port_callback)(struct ib_device *,
272 u8, struct kobject *))
271{ 273{
272 int ret; 274 int ret;
273 275
@@ -296,7 +298,7 @@ int ib_register_device(struct ib_device *device)
296 goto out; 298 goto out;
297 } 299 }
298 300
299 ret = ib_device_register_sysfs(device); 301 ret = ib_device_register_sysfs(device, port_callback);
300 if (ret) { 302 if (ret) {
301 printk(KERN_WARNING "Couldn't register device %s with driver model\n", 303 printk(KERN_WARNING "Couldn't register device %s with driver model\n",
302 device->name); 304 device->name);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 6dc7b77d5d29..ef1304f151dc 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -47,8 +47,8 @@ MODULE_DESCRIPTION("kernel IB MAD API");
47MODULE_AUTHOR("Hal Rosenstock"); 47MODULE_AUTHOR("Hal Rosenstock");
48MODULE_AUTHOR("Sean Hefty"); 48MODULE_AUTHOR("Sean Hefty");
49 49
50int mad_sendq_size = IB_MAD_QP_SEND_SIZE; 50static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
51int mad_recvq_size = IB_MAD_QP_RECV_SIZE; 51static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
52 52
53module_param_named(send_queue_size, mad_sendq_size, int, 0444); 53module_param_named(send_queue_size, mad_sendq_size, int, 0444);
54MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); 54MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index f901957abc8b..3627300e2a10 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -475,7 +475,9 @@ err:
475 return NULL; 475 return NULL;
476} 476}
477 477
478static int add_port(struct ib_device *device, int port_num) 478static int add_port(struct ib_device *device, int port_num,
479 int (*port_callback)(struct ib_device *,
480 u8, struct kobject *))
479{ 481{
480 struct ib_port *p; 482 struct ib_port *p;
481 struct ib_port_attr attr; 483 struct ib_port_attr attr;
@@ -522,11 +524,20 @@ static int add_port(struct ib_device *device, int port_num)
522 if (ret) 524 if (ret)
523 goto err_free_pkey; 525 goto err_free_pkey;
524 526
527 if (port_callback) {
528 ret = port_callback(device, port_num, &p->kobj);
529 if (ret)
530 goto err_remove_pkey;
531 }
532
525 list_add_tail(&p->kobj.entry, &device->port_list); 533 list_add_tail(&p->kobj.entry, &device->port_list);
526 534
527 kobject_uevent(&p->kobj, KOBJ_ADD); 535 kobject_uevent(&p->kobj, KOBJ_ADD);
528 return 0; 536 return 0;
529 537
538err_remove_pkey:
539 sysfs_remove_group(&p->kobj, &p->pkey_group);
540
530err_free_pkey: 541err_free_pkey:
531 for (i = 0; i < attr.pkey_tbl_len; ++i) 542 for (i = 0; i < attr.pkey_tbl_len; ++i)
532 kfree(p->pkey_group.attrs[i]); 543 kfree(p->pkey_group.attrs[i]);
@@ -754,7 +765,9 @@ static struct attribute_group iw_stats_group = {
754 .attrs = iw_proto_stats_attrs, 765 .attrs = iw_proto_stats_attrs,
755}; 766};
756 767
757int ib_device_register_sysfs(struct ib_device *device) 768int ib_device_register_sysfs(struct ib_device *device,
769 int (*port_callback)(struct ib_device *,
770 u8, struct kobject *))
758{ 771{
759 struct device *class_dev = &device->dev; 772 struct device *class_dev = &device->dev;
760 int ret; 773 int ret;
@@ -785,12 +798,12 @@ int ib_device_register_sysfs(struct ib_device *device)
785 } 798 }
786 799
787 if (device->node_type == RDMA_NODE_IB_SWITCH) { 800 if (device->node_type == RDMA_NODE_IB_SWITCH) {
788 ret = add_port(device, 0); 801 ret = add_port(device, 0, port_callback);
789 if (ret) 802 if (ret)
790 goto err_put; 803 goto err_put;
791 } else { 804 } else {
792 for (i = 1; i <= device->phys_port_cnt; ++i) { 805 for (i = 1; i <= device->phys_port_cnt; ++i) {
793 ret = add_port(device, i); 806 ret = add_port(device, i, port_callback);
794 if (ret) 807 if (ret)
795 goto err_put; 808 goto err_put;
796 } 809 }
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index c47f618d12e8..aeebc4d37e33 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -865,7 +865,7 @@ int c2_register_device(struct c2_dev *dev)
865 dev->ibdev.iwcm->create_listen = c2_service_create; 865 dev->ibdev.iwcm->create_listen = c2_service_create;
866 dev->ibdev.iwcm->destroy_listen = c2_service_destroy; 866 dev->ibdev.iwcm->destroy_listen = c2_service_destroy;
867 867
868 ret = ib_register_device(&dev->ibdev); 868 ret = ib_register_device(&dev->ibdev, NULL);
869 if (ret) 869 if (ret)
870 goto out_free_iwcm; 870 goto out_free_iwcm;
871 871
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 19b1c4a62a23..fca0b4b747e4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1428,7 +1428,7 @@ int iwch_register_device(struct iwch_dev *dev)
1428 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref; 1428 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1429 dev->ibdev.iwcm->get_qp = iwch_get_qp; 1429 dev->ibdev.iwcm->get_qp = iwch_get_qp;
1430 1430
1431 ret = ib_register_device(&dev->ibdev); 1431 ret = ib_register_device(&dev->ibdev, NULL);
1432 if (ret) 1432 if (ret)
1433 goto bail1; 1433 goto bail1;
1434 1434
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index fb1aafcc294f..2447f5295482 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -373,6 +373,7 @@ static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
373 V_CQE_SWCQE(SW_CQE(hw_cqe)) | 373 V_CQE_SWCQE(SW_CQE(hw_cqe)) |
374 V_CQE_OPCODE(FW_RI_READ_REQ) | 374 V_CQE_OPCODE(FW_RI_READ_REQ) |
375 V_CQE_TYPE(1)); 375 V_CQE_TYPE(1));
376 read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
376} 377}
377 378
378/* 379/*
@@ -780,6 +781,9 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
780 /* account for the status page. */ 781 /* account for the status page. */
781 entries++; 782 entries++;
782 783
784 /* IQ needs one extra entry to differentiate full vs empty. */
785 entries++;
786
783 /* 787 /*
784 * entries must be multiple of 16 for HW. 788 * entries must be multiple of 16 for HW.
785 */ 789 */
@@ -801,7 +805,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
801 805
802 chp->rhp = rhp; 806 chp->rhp = rhp;
803 chp->cq.size--; /* status page */ 807 chp->cq.size--; /* status page */
804 chp->ibcq.cqe = chp->cq.size; 808 chp->ibcq.cqe = chp->cq.size - 1;
805 spin_lock_init(&chp->lock); 809 spin_lock_init(&chp->lock);
806 atomic_set(&chp->refcnt, 1); 810 atomic_set(&chp->refcnt, 1);
807 init_waitqueue_head(&chp->wait); 811 init_waitqueue_head(&chp->wait);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index be23b5eab13b..d870f9c17c1e 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -306,7 +306,8 @@ static void c4iw_remove(struct c4iw_dev *dev)
306 PDBG("%s c4iw_dev %p\n", __func__, dev); 306 PDBG("%s c4iw_dev %p\n", __func__, dev);
307 cancel_delayed_work_sync(&dev->db_drop_task); 307 cancel_delayed_work_sync(&dev->db_drop_task);
308 list_del(&dev->entry); 308 list_del(&dev->entry);
309 c4iw_unregister_device(dev); 309 if (dev->registered)
310 c4iw_unregister_device(dev);
310 c4iw_rdev_close(&dev->rdev); 311 c4iw_rdev_close(&dev->rdev);
311 idr_destroy(&dev->cqidr); 312 idr_destroy(&dev->cqidr);
312 idr_destroy(&dev->qpidr); 313 idr_destroy(&dev->qpidr);
@@ -343,12 +344,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
343 list_add_tail(&devp->entry, &dev_list); 344 list_add_tail(&devp->entry, &dev_list);
344 mutex_unlock(&dev_mutex); 345 mutex_unlock(&dev_mutex);
345 346
346 if (c4iw_register_device(devp)) {
347 printk(KERN_ERR MOD "Unable to register device\n");
348 mutex_lock(&dev_mutex);
349 c4iw_remove(devp);
350 mutex_unlock(&dev_mutex);
351 }
352 if (c4iw_debugfs_root) { 347 if (c4iw_debugfs_root) {
353 devp->debugfs_root = debugfs_create_dir( 348 devp->debugfs_root = debugfs_create_dir(
354 pci_name(devp->rdev.lldi.pdev), 349 pci_name(devp->rdev.lldi.pdev),
@@ -379,9 +374,6 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
379 374
380 for (i = 0; i < dev->rdev.lldi.nrxq; i++) 375 for (i = 0; i < dev->rdev.lldi.nrxq; i++)
381 PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]); 376 PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]);
382
383 printk(KERN_INFO MOD "Initialized device %s\n",
384 pci_name(dev->rdev.lldi.pdev));
385out: 377out:
386 return dev; 378 return dev;
387} 379}
@@ -471,7 +463,41 @@ nomem:
471 463
472static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) 464static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
473{ 465{
466 struct c4iw_dev *dev = handle;
467
474 PDBG("%s new_state %u\n", __func__, new_state); 468 PDBG("%s new_state %u\n", __func__, new_state);
469 switch (new_state) {
470 case CXGB4_STATE_UP:
471 printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev));
472 if (!dev->registered) {
473 int ret;
474 ret = c4iw_register_device(dev);
475 if (ret)
476 printk(KERN_ERR MOD
477 "%s: RDMA registration failed: %d\n",
478 pci_name(dev->rdev.lldi.pdev), ret);
479 }
480 break;
481 case CXGB4_STATE_DOWN:
482 printk(KERN_INFO MOD "%s: Down\n",
483 pci_name(dev->rdev.lldi.pdev));
484 if (dev->registered)
485 c4iw_unregister_device(dev);
486 break;
487 case CXGB4_STATE_START_RECOVERY:
488 printk(KERN_INFO MOD "%s: Fatal Error\n",
489 pci_name(dev->rdev.lldi.pdev));
490 if (dev->registered)
491 c4iw_unregister_device(dev);
492 break;
493 case CXGB4_STATE_DETACH:
494 printk(KERN_INFO MOD "%s: Detach\n",
495 pci_name(dev->rdev.lldi.pdev));
496 mutex_lock(&dev_mutex);
497 c4iw_remove(dev);
498 mutex_unlock(&dev_mutex);
499 break;
500 }
475 return 0; 501 return 0;
476} 502}
477 503
@@ -504,14 +530,12 @@ static void __exit c4iw_exit_module(void)
504{ 530{
505 struct c4iw_dev *dev, *tmp; 531 struct c4iw_dev *dev, *tmp;
506 532
507 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
508
509 mutex_lock(&dev_mutex); 533 mutex_lock(&dev_mutex);
510 list_for_each_entry_safe(dev, tmp, &dev_list, entry) { 534 list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
511 c4iw_remove(dev); 535 c4iw_remove(dev);
512 } 536 }
513 mutex_unlock(&dev_mutex); 537 mutex_unlock(&dev_mutex);
514 538 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
515 c4iw_cm_term(); 539 c4iw_cm_term();
516 debugfs_remove_recursive(c4iw_debugfs_root); 540 debugfs_remove_recursive(c4iw_debugfs_root);
517} 541}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index a6269981e815..277ab589b44d 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -152,6 +152,7 @@ struct c4iw_dev {
152 struct list_head entry; 152 struct list_head entry;
153 struct delayed_work db_drop_task; 153 struct delayed_work db_drop_task;
154 struct dentry *debugfs_root; 154 struct dentry *debugfs_root;
155 u8 registered;
155}; 156};
156 157
157static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) 158static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index e54ff6d25691..7f94da1a2437 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -712,8 +712,10 @@ struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
712 php = to_c4iw_pd(pd); 712 php = to_c4iw_pd(pd);
713 rhp = php->rhp; 713 rhp = php->rhp;
714 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 714 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
715 if (!mhp) 715 if (!mhp) {
716 ret = -ENOMEM;
716 goto err; 717 goto err;
718 }
717 719
718 mhp->rhp = rhp; 720 mhp->rhp = rhp;
719 ret = alloc_pbl(mhp, pbl_depth); 721 ret = alloc_pbl(mhp, pbl_depth);
@@ -730,8 +732,10 @@ struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
730 mhp->attr.state = 1; 732 mhp->attr.state = 1;
731 mmid = (stag) >> 8; 733 mmid = (stag) >> 8;
732 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 734 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
733 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) 735 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
736 ret = -ENOMEM;
734 goto err3; 737 goto err3;
738 }
735 739
736 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); 740 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
737 return &(mhp->ibmr); 741 return &(mhp->ibmr);
@@ -755,9 +759,6 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
755 dma_addr_t dma_addr; 759 dma_addr_t dma_addr;
756 int size = sizeof *c4pl + page_list_len * sizeof(u64); 760 int size = sizeof *c4pl + page_list_len * sizeof(u64);
757 761
758 if (page_list_len > T4_MAX_FR_DEPTH)
759 return ERR_PTR(-EINVAL);
760
761 c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size, 762 c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size,
762 &dma_addr, GFP_KERNEL); 763 &dma_addr, GFP_KERNEL);
763 if (!c4pl) 764 if (!c4pl)
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index dfc49020bb9c..8f645c83a125 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -486,7 +486,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
486 dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref; 486 dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
487 dev->ibdev.iwcm->get_qp = c4iw_get_qp; 487 dev->ibdev.iwcm->get_qp = c4iw_get_qp;
488 488
489 ret = ib_register_device(&dev->ibdev); 489 ret = ib_register_device(&dev->ibdev, NULL);
490 if (ret) 490 if (ret)
491 goto bail1; 491 goto bail1;
492 492
@@ -496,6 +496,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
496 if (ret) 496 if (ret)
497 goto bail2; 497 goto bail2;
498 } 498 }
499 dev->registered = 1;
499 return 0; 500 return 0;
500bail2: 501bail2:
501 ib_unregister_device(&dev->ibdev); 502 ib_unregister_device(&dev->ibdev);
@@ -514,5 +515,6 @@ void c4iw_unregister_device(struct c4iw_dev *dev)
514 c4iw_class_attributes[i]); 515 c4iw_class_attributes[i]);
515 ib_unregister_device(&dev->ibdev); 516 ib_unregister_device(&dev->ibdev);
516 kfree(dev->ibdev.iwcm); 517 kfree(dev->ibdev.iwcm);
518 dev->registered = 0;
517 return; 519 return;
518} 520}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 83a01dc0c4c1..0c28ed1eafa6 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -572,9 +572,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
572 err = build_rdma_write(wqe, wr, &len16); 572 err = build_rdma_write(wqe, wr, &len16);
573 break; 573 break;
574 case IB_WR_RDMA_READ: 574 case IB_WR_RDMA_READ:
575 case IB_WR_RDMA_READ_WITH_INV:
575 fw_opcode = FW_RI_RDMA_READ_WR; 576 fw_opcode = FW_RI_RDMA_READ_WR;
576 swsqe->opcode = FW_RI_READ_REQ; 577 swsqe->opcode = FW_RI_READ_REQ;
577 fw_flags = 0; 578 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
579 fw_flags |= FW_RI_RDMA_READ_INVALIDATE;
580 else
581 fw_flags = 0;
578 err = build_rdma_read(wqe, wr, &len16); 582 err = build_rdma_read(wqe, wr, &len16);
579 if (err) 583 if (err)
580 break; 584 break;
@@ -588,6 +592,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
588 err = build_fastreg(wqe, wr, &len16); 592 err = build_fastreg(wqe, wr, &len16);
589 break; 593 break;
590 case IB_WR_LOCAL_INV: 594 case IB_WR_LOCAL_INV:
595 if (wr->send_flags & IB_SEND_FENCE)
596 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
591 fw_opcode = FW_RI_INV_LSTAG_WR; 597 fw_opcode = FW_RI_INV_LSTAG_WR;
592 swsqe->opcode = FW_RI_LOCAL_INV; 598 swsqe->opcode = FW_RI_LOCAL_INV;
593 err = build_inv_stag(wqe, wr, &len16); 599 err = build_inv_stag(wqe, wr, &len16);
@@ -1339,7 +1345,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
1339 wait_event(qhp->wait, !qhp->ep); 1345 wait_event(qhp->wait, !qhp->ep);
1340 1346
1341 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); 1347 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1342 remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid);
1343 atomic_dec(&qhp->refcnt); 1348 atomic_dec(&qhp->refcnt);
1344 wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); 1349 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1345 1350
@@ -1442,30 +1447,26 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1442 if (ret) 1447 if (ret)
1443 goto err2; 1448 goto err2;
1444 1449
1445 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.rq.qid);
1446 if (ret)
1447 goto err3;
1448
1449 if (udata) { 1450 if (udata) {
1450 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); 1451 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1451 if (!mm1) { 1452 if (!mm1) {
1452 ret = -ENOMEM; 1453 ret = -ENOMEM;
1453 goto err4; 1454 goto err3;
1454 } 1455 }
1455 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); 1456 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1456 if (!mm2) { 1457 if (!mm2) {
1457 ret = -ENOMEM; 1458 ret = -ENOMEM;
1458 goto err5; 1459 goto err4;
1459 } 1460 }
1460 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL); 1461 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1461 if (!mm3) { 1462 if (!mm3) {
1462 ret = -ENOMEM; 1463 ret = -ENOMEM;
1463 goto err6; 1464 goto err5;
1464 } 1465 }
1465 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL); 1466 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1466 if (!mm4) { 1467 if (!mm4) {
1467 ret = -ENOMEM; 1468 ret = -ENOMEM;
1468 goto err7; 1469 goto err6;
1469 } 1470 }
1470 1471
1471 uresp.qid_mask = rhp->rdev.qpmask; 1472 uresp.qid_mask = rhp->rdev.qpmask;
@@ -1487,7 +1488,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1487 spin_unlock(&ucontext->mmap_lock); 1488 spin_unlock(&ucontext->mmap_lock);
1488 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); 1489 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1489 if (ret) 1490 if (ret)
1490 goto err8; 1491 goto err7;
1491 mm1->key = uresp.sq_key; 1492 mm1->key = uresp.sq_key;
1492 mm1->addr = virt_to_phys(qhp->wq.sq.queue); 1493 mm1->addr = virt_to_phys(qhp->wq.sq.queue);
1493 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); 1494 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
@@ -1511,16 +1512,14 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1511 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, 1512 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
1512 qhp->wq.sq.qid); 1513 qhp->wq.sq.qid);
1513 return &qhp->ibqp; 1514 return &qhp->ibqp;
1514err8:
1515 kfree(mm4);
1516err7: 1515err7:
1517 kfree(mm3); 1516 kfree(mm4);
1518err6: 1517err6:
1519 kfree(mm2); 1518 kfree(mm3);
1520err5: 1519err5:
1521 kfree(mm1); 1520 kfree(mm2);
1522err4: 1521err4:
1523 remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid); 1522 kfree(mm1);
1524err3: 1523err3:
1525 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); 1524 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1526err2: 1525err2:
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index d0e8af352408..1057cb96302e 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -41,11 +41,13 @@
41#define T4_MAX_NUM_QP (1<<16) 41#define T4_MAX_NUM_QP (1<<16)
42#define T4_MAX_NUM_CQ (1<<15) 42#define T4_MAX_NUM_CQ (1<<15)
43#define T4_MAX_NUM_PD (1<<15) 43#define T4_MAX_NUM_PD (1<<15)
44#define T4_MAX_PBL_SIZE 256 44#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
45#define T4_MAX_RQ_SIZE 1024 45#define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES)
46#define T4_MAX_SQ_SIZE 1024 46#define T4_MAX_IQ_SIZE (65520 - 1)
47#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE-1) 47#define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES)
48#define T4_MAX_CQ_DEPTH 8192 48#define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1)
49#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1)
50#define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1)
49#define T4_MAX_NUM_STAG (1<<15) 51#define T4_MAX_NUM_STAG (1<<15)
50#define T4_MAX_MR_SIZE (~0ULL - 1) 52#define T4_MAX_MR_SIZE (~0ULL - 1)
51#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ 53#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
@@ -79,12 +81,11 @@ struct t4_status_page {
79 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) 81 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
80#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \ 82#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
81 sizeof(struct fw_ri_immd))) 83 sizeof(struct fw_ri_immd)))
82#define T4_MAX_FR_DEPTH 255 84#define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
83 85
84#define T4_RQ_NUM_SLOTS 2 86#define T4_RQ_NUM_SLOTS 2
85#define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS) 87#define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS)
86#define T4_MAX_RECV_SGE ((T4_RQ_NUM_BYTES - sizeof(struct fw_ri_recv_wr) - \ 88#define T4_MAX_RECV_SGE 4
87 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
88 89
89union t4_wr { 90union t4_wr {
90 struct fw_ri_res_wr res; 91 struct fw_ri_res_wr res;
@@ -434,7 +435,7 @@ struct t4_cq {
434 struct c4iw_rdev *rdev; 435 struct c4iw_rdev *rdev;
435 u64 ugts; 436 u64 ugts;
436 size_t memsize; 437 size_t memsize;
437 u64 timestamp; 438 __be64 bits_type_ts;
438 u32 cqid; 439 u32 cqid;
439 u16 size; /* including status page */ 440 u16 size; /* including status page */
440 u16 cidx; 441 u16 cidx;
@@ -449,25 +450,17 @@ struct t4_cq {
449static inline int t4_arm_cq(struct t4_cq *cq, int se) 450static inline int t4_arm_cq(struct t4_cq *cq, int se)
450{ 451{
451 u32 val; 452 u32 val;
452 u16 inc; 453
453 454 while (cq->cidx_inc > CIDXINC_MASK) {
454 do { 455 val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
455 /* 456 INGRESSQID(cq->cqid);
456 * inc must be less the both the max update value -and-
457 * the size of the CQ.
458 */
459 inc = cq->cidx_inc <= CIDXINC_MASK ? cq->cidx_inc :
460 CIDXINC_MASK;
461 inc = inc <= (cq->size - 1) ? inc : (cq->size - 1);
462 if (inc == cq->cidx_inc)
463 val = SEINTARM(se) | CIDXINC(inc) | TIMERREG(6) |
464 INGRESSQID(cq->cqid);
465 else
466 val = SEINTARM(0) | CIDXINC(inc) | TIMERREG(7) |
467 INGRESSQID(cq->cqid);
468 cq->cidx_inc -= inc;
469 writel(val, cq->gts); 457 writel(val, cq->gts);
470 } while (cq->cidx_inc); 458 cq->cidx_inc -= CIDXINC_MASK;
459 }
460 val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) |
461 INGRESSQID(cq->cqid);
462 writel(val, cq->gts);
463 cq->cidx_inc = 0;
471 return 0; 464 return 0;
472} 465}
473 466
@@ -487,7 +480,9 @@ static inline void t4_swcq_consume(struct t4_cq *cq)
487 480
488static inline void t4_hwcq_consume(struct t4_cq *cq) 481static inline void t4_hwcq_consume(struct t4_cq *cq)
489{ 482{
490 cq->cidx_inc++; 483 cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
484 if (++cq->cidx_inc == cq->size)
485 cq->cidx_inc = 0;
491 if (++cq->cidx == cq->size) { 486 if (++cq->cidx == cq->size) {
492 cq->cidx = 0; 487 cq->cidx = 0;
493 cq->gen ^= 1; 488 cq->gen ^= 1;
@@ -501,20 +496,23 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
501 496
502static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) 497static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
503{ 498{
504 int ret = 0; 499 int ret;
505 u64 bits_type_ts = be64_to_cpu(cq->queue[cq->cidx].bits_type_ts); 500 u16 prev_cidx;
506 501
507 if (G_CQE_GENBIT(bits_type_ts) == cq->gen) { 502 if (cq->cidx == 0)
508 *cqe = &cq->queue[cq->cidx]; 503 prev_cidx = cq->size - 1;
509 cq->timestamp = G_CQE_TS(bits_type_ts);
510 } else if (G_CQE_TS(bits_type_ts) > cq->timestamp)
511 ret = -EOVERFLOW;
512 else 504 else
513 ret = -ENODATA; 505 prev_cidx = cq->cidx - 1;
514 if (ret == -EOVERFLOW) { 506
515 printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); 507 if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
508 ret = -EOVERFLOW;
516 cq->error = 1; 509 cq->error = 1;
517 } 510 printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
511 } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
512 *cqe = &cq->queue[cq->cidx];
513 ret = 0;
514 } else
515 ret = -ENODATA;
518 return ret; 516 return ret;
519} 517}
520 518
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 07cae552cafb..e571e60ecb88 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -847,7 +847,7 @@ static int __cpuinit comp_pool_callback(struct notifier_block *nfb,
847 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu); 847 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
848 if (!create_comp_task(pool, cpu)) { 848 if (!create_comp_task(pool, cpu)) {
849 ehca_gen_err("Can't create comp_task for cpu: %x", cpu); 849 ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
850 return NOTIFY_BAD; 850 return notifier_from_errno(-ENOMEM);
851 } 851 }
852 break; 852 break;
853 case CPU_UP_CANCELED: 853 case CPU_UP_CANCELED:
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 129a6bebd6e3..ecb51b396c42 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -291,8 +291,9 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
291 }; 291 };
292 292
293 ehca_gen_dbg("Probing adapter %s...", 293 ehca_gen_dbg("Probing adapter %s...",
294 shca->ofdev->node->full_name); 294 shca->ofdev->dev.of_node->full_name);
295 loc_code = of_get_property(shca->ofdev->node, "ibm,loc-code", NULL); 295 loc_code = of_get_property(shca->ofdev->dev.of_node, "ibm,loc-code",
296 NULL);
296 if (loc_code) 297 if (loc_code)
297 ehca_gen_dbg(" ... location lode=%s", loc_code); 298 ehca_gen_dbg(" ... location lode=%s", loc_code);
298 299
@@ -720,16 +721,16 @@ static int __devinit ehca_probe(struct of_device *dev,
720 int ret, i, eq_size; 721 int ret, i, eq_size;
721 unsigned long flags; 722 unsigned long flags;
722 723
723 handle = of_get_property(dev->node, "ibm,hca-handle", NULL); 724 handle = of_get_property(dev->dev.of_node, "ibm,hca-handle", NULL);
724 if (!handle) { 725 if (!handle) {
725 ehca_gen_err("Cannot get eHCA handle for adapter: %s.", 726 ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
726 dev->node->full_name); 727 dev->dev.of_node->full_name);
727 return -ENODEV; 728 return -ENODEV;
728 } 729 }
729 730
730 if (!(*handle)) { 731 if (!(*handle)) {
731 ehca_gen_err("Wrong eHCA handle for adapter: %s.", 732 ehca_gen_err("Wrong eHCA handle for adapter: %s.",
732 dev->node->full_name); 733 dev->dev.of_node->full_name);
733 return -ENODEV; 734 return -ENODEV;
734 } 735 }
735 736
@@ -798,7 +799,7 @@ static int __devinit ehca_probe(struct of_device *dev,
798 goto probe5; 799 goto probe5;
799 } 800 }
800 801
801 ret = ib_register_device(&shca->ib_device); 802 ret = ib_register_device(&shca->ib_device, NULL);
802 if (ret) { 803 if (ret) {
803 ehca_err(&shca->ib_device, 804 ehca_err(&shca->ib_device,
804 "ib_register_device() failed ret=%i", ret); 805 "ib_register_device() failed ret=%i", ret);
@@ -936,12 +937,13 @@ static struct of_device_id ehca_device_table[] =
936MODULE_DEVICE_TABLE(of, ehca_device_table); 937MODULE_DEVICE_TABLE(of, ehca_device_table);
937 938
938static struct of_platform_driver ehca_driver = { 939static struct of_platform_driver ehca_driver = {
939 .name = "ehca",
940 .match_table = ehca_device_table,
941 .probe = ehca_probe, 940 .probe = ehca_probe,
942 .remove = ehca_remove, 941 .remove = ehca_remove,
943 .driver = { 942 .driver = {
943 .name = "ehca",
944 .owner = THIS_MODULE,
944 .groups = ehca_drv_attr_groups, 945 .groups = ehca_drv_attr_groups,
946 .of_match_table = ehca_device_table,
945 }, 947 },
946}; 948};
947 949
diff --git a/drivers/infiniband/hw/ipath/Kconfig b/drivers/infiniband/hw/ipath/Kconfig
index 3c7968f25ec2..1d9bb115cbf6 100644
--- a/drivers/infiniband/hw/ipath/Kconfig
+++ b/drivers/infiniband/hw/ipath/Kconfig
@@ -1,9 +1,11 @@
1config INFINIBAND_IPATH 1config INFINIBAND_IPATH
2 tristate "QLogic InfiniPath Driver" 2 tristate "QLogic HTX HCA support"
3 depends on 64BIT && NET 3 depends on 64BIT && NET && HT_IRQ
4 ---help--- 4 ---help---
5 This is a driver for QLogic InfiniPath host channel adapters, 5 This is a driver for the obsolete QLogic Hyper-Transport
6 IB host channel adapter (model QHT7140),
6 including InfiniBand verbs support. This driver allows these 7 including InfiniBand verbs support. This driver allows these
7 devices to be used with both kernel upper level protocols such 8 devices to be used with both kernel upper level protocols such
8 as IP-over-InfiniBand as well as with userspace applications 9 as IP-over-InfiniBand as well as with userspace applications
9 (in conjunction with InfiniBand userspace access). 10 (in conjunction with InfiniBand userspace access).
11 For QLogic PCIe QLE based cards, use the QIB driver instead.
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index bf9450061986..fa3df82681df 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -29,13 +29,9 @@ ib_ipath-y := \
29 ipath_user_pages.o \ 29 ipath_user_pages.o \
30 ipath_user_sdma.o \ 30 ipath_user_sdma.o \
31 ipath_verbs_mcast.o \ 31 ipath_verbs_mcast.o \
32 ipath_verbs.o \ 32 ipath_verbs.o
33 ipath_iba7220.o \
34 ipath_sd7220.o \
35 ipath_sd7220_img.o
36 33
37ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o 34ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o
38ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6120.o
39 35
40ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o 36ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
41ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o 37ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 6302626d17f0..21337468c652 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -132,18 +132,13 @@ static int __devinit ipath_init_one(struct pci_dev *,
132 132
133/* Only needed for registration, nothing else needs this info */ 133/* Only needed for registration, nothing else needs this info */
134#define PCI_VENDOR_ID_PATHSCALE 0x1fc1 134#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
135#define PCI_VENDOR_ID_QLOGIC 0x1077
136#define PCI_DEVICE_ID_INFINIPATH_HT 0xd 135#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
137#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
138#define PCI_DEVICE_ID_INFINIPATH_7220 0x7220
139 136
140/* Number of seconds before our card status check... */ 137/* Number of seconds before our card status check... */
141#define STATUS_TIMEOUT 60 138#define STATUS_TIMEOUT 60
142 139
143static const struct pci_device_id ipath_pci_tbl[] = { 140static const struct pci_device_id ipath_pci_tbl[] = {
144 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) }, 141 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
145 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
146 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_INFINIPATH_7220) },
147 { 0, } 142 { 0, }
148}; 143};
149 144
@@ -521,30 +516,9 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
521 /* setup the chip-specific functions, as early as possible. */ 516 /* setup the chip-specific functions, as early as possible. */
522 switch (ent->device) { 517 switch (ent->device) {
523 case PCI_DEVICE_ID_INFINIPATH_HT: 518 case PCI_DEVICE_ID_INFINIPATH_HT:
524#ifdef CONFIG_HT_IRQ
525 ipath_init_iba6110_funcs(dd); 519 ipath_init_iba6110_funcs(dd);
526 break; 520 break;
527#else 521
528 ipath_dev_err(dd, "QLogic HT device 0x%x cannot work if "
529 "CONFIG_HT_IRQ is not enabled\n", ent->device);
530 return -ENODEV;
531#endif
532 case PCI_DEVICE_ID_INFINIPATH_PE800:
533#ifdef CONFIG_PCI_MSI
534 ipath_init_iba6120_funcs(dd);
535 break;
536#else
537 ipath_dev_err(dd, "QLogic PCIE device 0x%x cannot work if "
538 "CONFIG_PCI_MSI is not enabled\n", ent->device);
539 return -ENODEV;
540#endif
541 case PCI_DEVICE_ID_INFINIPATH_7220:
542#ifndef CONFIG_PCI_MSI
543 ipath_dbg("CONFIG_PCI_MSI is not enabled, "
544 "using INTx for unit %u\n", dd->ipath_unit);
545#endif
546 ipath_init_iba7220_funcs(dd);
547 break;
548 default: 522 default:
549 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, " 523 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
550 "failing\n", ent->device); 524 "failing\n", ent->device);
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
deleted file mode 100644
index 4b4a30b0dabd..000000000000
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ /dev/null
@@ -1,1862 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33/*
34 * This file contains all of the code that is specific to the
35 * InfiniPath PCIe chip.
36 */
37
38#include <linux/interrupt.h>
39#include <linux/pci.h>
40#include <linux/delay.h>
41#include <rdma/ib_verbs.h>
42
43#include "ipath_kernel.h"
44#include "ipath_registers.h"
45
46static void ipath_setup_pe_setextled(struct ipath_devdata *, u64, u64);
47
48/*
49 * This file contains all the chip-specific register information and
50 * access functions for the QLogic InfiniPath PCI-Express chip.
51 *
52 * This lists the InfiniPath registers, in the actual chip layout.
53 * This structure should never be directly accessed.
54 */
55struct _infinipath_do_not_use_kernel_regs {
56 unsigned long long Revision;
57 unsigned long long Control;
58 unsigned long long PageAlign;
59 unsigned long long PortCnt;
60 unsigned long long DebugPortSelect;
61 unsigned long long Reserved0;
62 unsigned long long SendRegBase;
63 unsigned long long UserRegBase;
64 unsigned long long CounterRegBase;
65 unsigned long long Scratch;
66 unsigned long long Reserved1;
67 unsigned long long Reserved2;
68 unsigned long long IntBlocked;
69 unsigned long long IntMask;
70 unsigned long long IntStatus;
71 unsigned long long IntClear;
72 unsigned long long ErrorMask;
73 unsigned long long ErrorStatus;
74 unsigned long long ErrorClear;
75 unsigned long long HwErrMask;
76 unsigned long long HwErrStatus;
77 unsigned long long HwErrClear;
78 unsigned long long HwDiagCtrl;
79 unsigned long long MDIO;
80 unsigned long long IBCStatus;
81 unsigned long long IBCCtrl;
82 unsigned long long ExtStatus;
83 unsigned long long ExtCtrl;
84 unsigned long long GPIOOut;
85 unsigned long long GPIOMask;
86 unsigned long long GPIOStatus;
87 unsigned long long GPIOClear;
88 unsigned long long RcvCtrl;
89 unsigned long long RcvBTHQP;
90 unsigned long long RcvHdrSize;
91 unsigned long long RcvHdrCnt;
92 unsigned long long RcvHdrEntSize;
93 unsigned long long RcvTIDBase;
94 unsigned long long RcvTIDCnt;
95 unsigned long long RcvEgrBase;
96 unsigned long long RcvEgrCnt;
97 unsigned long long RcvBufBase;
98 unsigned long long RcvBufSize;
99 unsigned long long RxIntMemBase;
100 unsigned long long RxIntMemSize;
101 unsigned long long RcvPartitionKey;
102 unsigned long long Reserved3;
103 unsigned long long RcvPktLEDCnt;
104 unsigned long long Reserved4[8];
105 unsigned long long SendCtrl;
106 unsigned long long SendPIOBufBase;
107 unsigned long long SendPIOSize;
108 unsigned long long SendPIOBufCnt;
109 unsigned long long SendPIOAvailAddr;
110 unsigned long long TxIntMemBase;
111 unsigned long long TxIntMemSize;
112 unsigned long long Reserved5;
113 unsigned long long PCIeRBufTestReg0;
114 unsigned long long PCIeRBufTestReg1;
115 unsigned long long Reserved51[6];
116 unsigned long long SendBufferError;
117 unsigned long long SendBufferErrorCONT1;
118 unsigned long long Reserved6SBE[6];
119 unsigned long long RcvHdrAddr0;
120 unsigned long long RcvHdrAddr1;
121 unsigned long long RcvHdrAddr2;
122 unsigned long long RcvHdrAddr3;
123 unsigned long long RcvHdrAddr4;
124 unsigned long long Reserved7RHA[11];
125 unsigned long long RcvHdrTailAddr0;
126 unsigned long long RcvHdrTailAddr1;
127 unsigned long long RcvHdrTailAddr2;
128 unsigned long long RcvHdrTailAddr3;
129 unsigned long long RcvHdrTailAddr4;
130 unsigned long long Reserved8RHTA[11];
131 unsigned long long Reserved9SW[8];
132 unsigned long long SerdesConfig0;
133 unsigned long long SerdesConfig1;
134 unsigned long long SerdesStatus;
135 unsigned long long XGXSConfig;
136 unsigned long long IBPLLCfg;
137 unsigned long long Reserved10SW2[3];
138 unsigned long long PCIEQ0SerdesConfig0;
139 unsigned long long PCIEQ0SerdesConfig1;
140 unsigned long long PCIEQ0SerdesStatus;
141 unsigned long long Reserved11;
142 unsigned long long PCIEQ1SerdesConfig0;
143 unsigned long long PCIEQ1SerdesConfig1;
144 unsigned long long PCIEQ1SerdesStatus;
145 unsigned long long Reserved12;
146};
147
148struct _infinipath_do_not_use_counters {
149 __u64 LBIntCnt;
150 __u64 LBFlowStallCnt;
151 __u64 Reserved1;
152 __u64 TxUnsupVLErrCnt;
153 __u64 TxDataPktCnt;
154 __u64 TxFlowPktCnt;
155 __u64 TxDwordCnt;
156 __u64 TxLenErrCnt;
157 __u64 TxMaxMinLenErrCnt;
158 __u64 TxUnderrunCnt;
159 __u64 TxFlowStallCnt;
160 __u64 TxDroppedPktCnt;
161 __u64 RxDroppedPktCnt;
162 __u64 RxDataPktCnt;
163 __u64 RxFlowPktCnt;
164 __u64 RxDwordCnt;
165 __u64 RxLenErrCnt;
166 __u64 RxMaxMinLenErrCnt;
167 __u64 RxICRCErrCnt;
168 __u64 RxVCRCErrCnt;
169 __u64 RxFlowCtrlErrCnt;
170 __u64 RxBadFormatCnt;
171 __u64 RxLinkProblemCnt;
172 __u64 RxEBPCnt;
173 __u64 RxLPCRCErrCnt;
174 __u64 RxBufOvflCnt;
175 __u64 RxTIDFullErrCnt;
176 __u64 RxTIDValidErrCnt;
177 __u64 RxPKeyMismatchCnt;
178 __u64 RxP0HdrEgrOvflCnt;
179 __u64 RxP1HdrEgrOvflCnt;
180 __u64 RxP2HdrEgrOvflCnt;
181 __u64 RxP3HdrEgrOvflCnt;
182 __u64 RxP4HdrEgrOvflCnt;
183 __u64 RxP5HdrEgrOvflCnt;
184 __u64 RxP6HdrEgrOvflCnt;
185 __u64 RxP7HdrEgrOvflCnt;
186 __u64 RxP8HdrEgrOvflCnt;
187 __u64 Reserved6;
188 __u64 Reserved7;
189 __u64 IBStatusChangeCnt;
190 __u64 IBLinkErrRecoveryCnt;
191 __u64 IBLinkDownedCnt;
192 __u64 IBSymbolErrCnt;
193};
194
195#define IPATH_KREG_OFFSET(field) (offsetof( \
196 struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
197#define IPATH_CREG_OFFSET(field) (offsetof( \
198 struct _infinipath_do_not_use_counters, field) / sizeof(u64))
199
200static const struct ipath_kregs ipath_pe_kregs = {
201 .kr_control = IPATH_KREG_OFFSET(Control),
202 .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
203 .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
204 .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
205 .kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
206 .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
207 .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
208 .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
209 .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
210 .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
211 .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
212 .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
213 .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
214 .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
215 .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
216 .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
217 .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
218 .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
219 .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
220 .kr_intclear = IPATH_KREG_OFFSET(IntClear),
221 .kr_intmask = IPATH_KREG_OFFSET(IntMask),
222 .kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
223 .kr_mdio = IPATH_KREG_OFFSET(MDIO),
224 .kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
225 .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
226 .kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
227 .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
228 .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
229 .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
230 .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
231 .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
232 .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
233 .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
234 .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
235 .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
236 .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
237 .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
238 .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
239 .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
240 .kr_revision = IPATH_KREG_OFFSET(Revision),
241 .kr_scratch = IPATH_KREG_OFFSET(Scratch),
242 .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
243 .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
244 .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr),
245 .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase),
246 .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt),
247 .kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize),
248 .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
249 .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
250 .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
251 .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
252 .kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0),
253 .kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1),
254 .kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus),
255 .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
256 .kr_ibpllcfg = IPATH_KREG_OFFSET(IBPLLCfg),
257
258 /*
259 * These should not be used directly via ipath_write_kreg64(),
260 * use them with ipath_write_kreg64_port(),
261 */
262 .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
263 .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
264
265 /* The rcvpktled register controls one of the debug port signals, so
266 * a packet activity LED can be connected to it. */
267 .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt),
268 .kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0),
269 .kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1),
270 .kr_pcieq0serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig0),
271 .kr_pcieq0serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ0SerdesConfig1),
272 .kr_pcieq0serdesstatus = IPATH_KREG_OFFSET(PCIEQ0SerdesStatus),
273 .kr_pcieq1serdesconfig0 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig0),
274 .kr_pcieq1serdesconfig1 = IPATH_KREG_OFFSET(PCIEQ1SerdesConfig1),
275 .kr_pcieq1serdesstatus = IPATH_KREG_OFFSET(PCIEQ1SerdesStatus)
276};
277
278static const struct ipath_cregs ipath_pe_cregs = {
279 .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
280 .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
281 .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
282 .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
283 .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
284 .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
285 .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
286 .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
287 .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
288 .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
289 .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
290 .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
291 .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
292 .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
293 .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
294 .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
295 .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
296 .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
297 .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
298 .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
299 .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
300 .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
301 .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
302 .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
303 .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
304 .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
305 .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
306 .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
307 .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
308 .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
309 .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
310 .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
311 .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
312};
313
314/* kr_control bits */
315#define INFINIPATH_C_RESET 1U
316
317/* kr_intstatus, kr_intclear, kr_intmask bits */
318#define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1)
319#define INFINIPATH_I_RCVURG_SHIFT 0
320#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1)
321#define INFINIPATH_I_RCVAVAIL_SHIFT 12
322
323/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
324#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
325#define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0
326#define INFINIPATH_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
327#define INFINIPATH_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
328#define INFINIPATH_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
329#define INFINIPATH_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
330#define INFINIPATH_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
331#define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
332#define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
333#define INFINIPATH_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
334#define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
335#define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL
336
337#define IBA6120_IBCS_LINKTRAININGSTATE_MASK 0xf
338#define IBA6120_IBCS_LINKSTATE_SHIFT 4
339
340/* kr_extstatus bits */
341#define INFINIPATH_EXTS_FREQSEL 0x2
342#define INFINIPATH_EXTS_SERDESSEL 0x4
343#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
344#define INFINIPATH_EXTS_MEMBIST_FOUND 0x0000000000008000
345
346/* kr_xgxsconfig bits */
347#define INFINIPATH_XGXS_RESET 0x5ULL
348
349#define _IPATH_GPIO_SDA_NUM 1
350#define _IPATH_GPIO_SCL_NUM 0
351
352#define IPATH_GPIO_SDA (1ULL << \
353 (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
354#define IPATH_GPIO_SCL (1ULL << \
355 (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
356
357#define INFINIPATH_RT_BUFSIZE_MASK 0xe0000000ULL
358#define INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid) \
359 ((((tid) & INFINIPATH_RT_BUFSIZE_MASK) >> 29) + 11 - 1)
360#define INFINIPATH_RT_BUFSIZE(tid) (1 << INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid))
361#define INFINIPATH_RT_IS_VALID(tid) \
362 (((tid) & INFINIPATH_RT_BUFSIZE_MASK) && \
363 ((((tid) & INFINIPATH_RT_BUFSIZE_MASK) != INFINIPATH_RT_BUFSIZE_MASK)))
364#define INFINIPATH_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */
365#define INFINIPATH_RT_ADDR_SHIFT 10
366
367#define INFINIPATH_R_INTRAVAIL_SHIFT 16
368#define INFINIPATH_R_TAILUPD_SHIFT 31
369
370/* 6120 specific hardware errors... */
371static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
372 INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"),
373 INFINIPATH_HWE_MSG(PCIECPLTIMEOUT, "PCIe completion timeout"),
374 /*
375 * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
376 * parity or memory parity error failures, because most likely we
377 * won't be able to talk to the core of the chip. Nonetheless, we
378 * might see them, if they are in parts of the PCIe core that aren't
379 * essential.
380 */
381 INFINIPATH_HWE_MSG(PCIE1PLLFAILED, "PCIePLL1"),
382 INFINIPATH_HWE_MSG(PCIE0PLLFAILED, "PCIePLL0"),
383 INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH, "PCIe XTLH core parity"),
384 INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM, "PCIe ADM TX core parity"),
385 INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM, "PCIe ADM RX core parity"),
386 INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"),
387 INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
388};
389
390#define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \
391 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
392 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
393#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \
394 << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)
395
396static void ipath_pe_put_tid_2(struct ipath_devdata *, u64 __iomem *,
397 u32, unsigned long);
398
399/*
400 * On platforms using this chip, and not having ordered WC stores, we
401 * can get TXE parity errors due to speculative reads to the PIO buffers,
402 * and this, due to a chip bug can result in (many) false parity error
403 * reports. So it's a debug print on those, and an info print on systems
404 * where the speculative reads don't occur.
405 */
406static void ipath_pe_txe_recover(struct ipath_devdata *dd)
407{
408 if (ipath_unordered_wc())
409 ipath_dbg("Recovering from TXE PIO parity error\n");
410 else {
411 ++ipath_stats.sps_txeparity;
412 dev_info(&dd->pcidev->dev,
413 "Recovering from TXE PIO parity error\n");
414 }
415}
416
417
418/**
419 * ipath_pe_handle_hwerrors - display hardware errors.
420 * @dd: the infinipath device
421 * @msg: the output buffer
422 * @msgl: the size of the output buffer
423 *
424 * Use same msg buffer as regular errors to avoid excessive stack
425 * use. Most hardware errors are catastrophic, but for right now,
426 * we'll print them and continue. We reuse the same message buffer as
427 * ipath_handle_errors() to avoid excessive stack usage.
428 */
429static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
430 size_t msgl)
431{
432 ipath_err_t hwerrs;
433 u32 bits, ctrl;
434 int isfatal = 0;
435 char bitsmsg[64];
436 int log_idx;
437
438 hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
439 if (!hwerrs) {
440 /*
441 * better than printing cofusing messages
442 * This seems to be related to clearing the crc error, or
443 * the pll error during init.
444 */
445 ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
446 return;
447 } else if (hwerrs == ~0ULL) {
448 ipath_dev_err(dd, "Read of hardware error status failed "
449 "(all bits set); ignoring\n");
450 return;
451 }
452 ipath_stats.sps_hwerrs++;
453
454 /* Always clear the error status register, except MEMBISTFAIL,
455 * regardless of whether we continue or stop using the chip.
456 * We want that set so we know it failed, even across driver reload.
457 * We'll still ignore it in the hwerrmask. We do this partly for
458 * diagnostics, but also for support */
459 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
460 hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
461
462 hwerrs &= dd->ipath_hwerrmask;
463
464 /* We log some errors to EEPROM, check if we have any of those. */
465 for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
466 if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
467 ipath_inc_eeprom_err(dd, log_idx, 1);
468
469 /*
470 * make sure we get this much out, unless told to be quiet,
471 * or it's occurred within the last 5 seconds
472 */
473 if ((hwerrs & ~(dd->ipath_lasthwerror | TXE_PIO_PARITY |
474 RXE_EAGER_PARITY)) ||
475 (ipath_debug & __IPATH_VERBDBG))
476 dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
477 "(cleared)\n", (unsigned long long) hwerrs);
478 dd->ipath_lasthwerror |= hwerrs;
479
480 if (hwerrs & ~dd->ipath_hwe_bitsextant)
481 ipath_dev_err(dd, "hwerror interrupt with unknown errors "
482 "%llx set\n", (unsigned long long)
483 (hwerrs & ~dd->ipath_hwe_bitsextant));
484
485 ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
486 if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
487 /*
488 * parity errors in send memory are recoverable,
489 * just cancel the send (if indicated in * sendbuffererror),
490 * count the occurrence, unfreeze (if no other handled
491 * hardware error bits are set), and continue. They can
492 * occur if a processor speculative read is done to the PIO
493 * buffer while we are sending a packet, for example.
494 */
495 if (hwerrs & TXE_PIO_PARITY) {
496 ipath_pe_txe_recover(dd);
497 hwerrs &= ~TXE_PIO_PARITY;
498 }
499 if (!hwerrs) {
500 static u32 freeze_cnt;
501
502 freeze_cnt++;
503 ipath_dbg("Clearing freezemode on ignored or recovered "
504 "hardware error (%u)\n", freeze_cnt);
505 ipath_clear_freeze(dd);
506 }
507 }
508
509 *msg = '\0';
510
511 if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
512 strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]",
513 msgl);
514 /* ignore from now on, so disable until driver reloaded */
515 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
516 dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
517 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
518 dd->ipath_hwerrmask);
519 }
520
521 ipath_format_hwerrors(hwerrs,
522 ipath_6120_hwerror_msgs,
523 sizeof(ipath_6120_hwerror_msgs)/
524 sizeof(ipath_6120_hwerror_msgs[0]),
525 msg, msgl);
526
527 if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
528 << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) {
529 bits = (u32) ((hwerrs >>
530 INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) &
531 INFINIPATH_HWE_PCIEMEMPARITYERR_MASK);
532 snprintf(bitsmsg, sizeof bitsmsg,
533 "[PCIe Mem Parity Errs %x] ", bits);
534 strlcat(msg, bitsmsg, msgl);
535 }
536
537#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \
538 INFINIPATH_HWE_COREPLL_RFSLIP )
539
540 if (hwerrs & _IPATH_PLL_FAIL) {
541 snprintf(bitsmsg, sizeof bitsmsg,
542 "[PLL failed (%llx), InfiniPath hardware unusable]",
543 (unsigned long long) hwerrs & _IPATH_PLL_FAIL);
544 strlcat(msg, bitsmsg, msgl);
545 /* ignore from now on, so disable until driver reloaded */
546 dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
547 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
548 dd->ipath_hwerrmask);
549 }
550
551 if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
552 /*
553 * If it occurs, it is left masked since the external
554 * interface is unused
555 */
556 dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
557 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
558 dd->ipath_hwerrmask);
559 }
560
561 if (hwerrs) {
562 /*
563 * if any set that we aren't ignoring; only
564 * make the complaint once, in case it's stuck
565 * or recurring, and we get here multiple
566 * times.
567 */
568 ipath_dev_err(dd, "%s hardware error\n", msg);
569 if (dd->ipath_flags & IPATH_INITTED) {
570 ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
571 ipath_setup_pe_setextled(dd,
572 INFINIPATH_IBCS_L_STATE_DOWN,
573 INFINIPATH_IBCS_LT_STATE_DISABLED);
574 ipath_dev_err(dd, "Fatal Hardware Error (freeze "
575 "mode), no longer usable, SN %.16s\n",
576 dd->ipath_serial);
577 isfatal = 1;
578 }
579 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
580 /* mark as having had error */
581 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
582 /*
583 * mark as not usable, at a minimum until driver
584 * is reloaded, probably until reboot, since no
585 * other reset is possible.
586 */
587 dd->ipath_flags &= ~IPATH_INITTED;
588 } else
589 *msg = 0; /* recovered from all of them */
590
591 if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg && msg) {
592 /*
593 * for /sys status file ; if no trailing brace is copied,
594 * we'll know it was truncated.
595 */
596 snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
597 "{%s}", msg);
598 }
599}
600
601/**
602 * ipath_pe_boardname - fill in the board name
603 * @dd: the infinipath device
604 * @name: the output buffer
605 * @namelen: the size of the output buffer
606 *
607 * info is based on the board revision register
608 */
609static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
610 size_t namelen)
611{
612 char *n = NULL;
613 u8 boardrev = dd->ipath_boardrev;
614 int ret;
615
616 switch (boardrev) {
617 case 0:
618 n = "InfiniPath_Emulation";
619 break;
620 case 1:
621 n = "InfiniPath_QLE7140-Bringup";
622 break;
623 case 2:
624 n = "InfiniPath_QLE7140";
625 break;
626 case 3:
627 n = "InfiniPath_QMI7140";
628 break;
629 case 4:
630 n = "InfiniPath_QEM7140";
631 break;
632 case 5:
633 n = "InfiniPath_QMH7140";
634 break;
635 case 6:
636 n = "InfiniPath_QLE7142";
637 break;
638 default:
639 ipath_dev_err(dd,
640 "Don't yet know about board with ID %u\n",
641 boardrev);
642 snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u",
643 boardrev);
644 break;
645 }
646 if (n)
647 snprintf(name, namelen, "%s", n);
648
649 if (dd->ipath_majrev != 4 || !dd->ipath_minrev || dd->ipath_minrev>2) {
650 ipath_dev_err(dd, "Unsupported InfiniPath hardware revision %u.%u!\n",
651 dd->ipath_majrev, dd->ipath_minrev);
652 ret = 1;
653 } else {
654 ret = 0;
655 if (dd->ipath_minrev >= 2)
656 dd->ipath_f_put_tid = ipath_pe_put_tid_2;
657 }
658
659 /*
660 * set here, not in ipath_init_*_funcs because we have to do
661 * it after we can read chip registers.
662 */
663 dd->ipath_ureg_align =
664 ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
665
666 return ret;
667}
668
669/**
670 * ipath_pe_init_hwerrors - enable hardware errors
671 * @dd: the infinipath device
672 *
673 * now that we have finished initializing everything that might reasonably
674 * cause a hardware error, and cleared those errors bits as they occur,
675 * we can enable hardware errors in the mask (potentially enabling
676 * freeze mode), and enable hardware errors as errors (along with
677 * everything else) in errormask
678 */
679static void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
680{
681 ipath_err_t val;
682 u64 extsval;
683
684 extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
685
686 if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
687 ipath_dev_err(dd, "MemBIST did not complete!\n");
688 if (extsval & INFINIPATH_EXTS_MEMBIST_FOUND)
689 ipath_dbg("MemBIST corrected\n");
690
691 val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */
692
693 if (!dd->ipath_boardrev) // no PLL for Emulator
694 val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
695
696 if (dd->ipath_minrev < 2) {
697 /* workaround bug 9460 in internal interface bus parity
698 * checking. Fixed (HW bug 9490) in Rev2.
699 */
700 val &= ~INFINIPATH_HWE_PCIEBUSPARITYRADM;
701 }
702 dd->ipath_hwerrmask = val;
703}
704
705/**
706 * ipath_pe_bringup_serdes - bring up the serdes
707 * @dd: the infinipath device
708 */
709static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
710{
711 u64 val, config1, prev_val;
712 int ret = 0;
713
714 ipath_dbg("Trying to bringup serdes\n");
715
716 if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
717 INFINIPATH_HWE_SERDESPLLFAILED) {
718 ipath_dbg("At start, serdes PLL failed bit set "
719 "in hwerrstatus, clearing and continuing\n");
720 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
721 INFINIPATH_HWE_SERDESPLLFAILED);
722 }
723
724 dd->ibdeltainprog = 1;
725 dd->ibsymsnap =
726 ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
727 dd->iblnkerrsnap =
728 ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
729
730 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
731 config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1);
732
733 ipath_cdbg(VERBOSE, "SerDes status config0=%llx config1=%llx, "
734 "xgxsconfig %llx\n", (unsigned long long) val,
735 (unsigned long long) config1, (unsigned long long)
736 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
737
738 /*
739 * Force reset on, also set rxdetect enable. Must do before reading
740 * serdesstatus at least for simulation, or some of the bits in
741 * serdes status will come back as undefined and cause simulation
742 * failures
743 */
744 val |= INFINIPATH_SERDC0_RESET_PLL | INFINIPATH_SERDC0_RXDETECT_EN
745 | INFINIPATH_SERDC0_L1PWR_DN;
746 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
747 /* be sure chip saw it */
748 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
749 udelay(5); /* need pll reset set at least for a bit */
750 /*
751 * after PLL is reset, set the per-lane Resets and TxIdle and
752 * clear the PLL reset and rxdetect (to get falling edge).
753 * Leave L1PWR bits set (permanently)
754 */
755 val &= ~(INFINIPATH_SERDC0_RXDETECT_EN | INFINIPATH_SERDC0_RESET_PLL
756 | INFINIPATH_SERDC0_L1PWR_DN);
757 val |= INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE;
758 ipath_cdbg(VERBOSE, "Clearing pll reset and setting lane resets "
759 "and txidle (%llx)\n", (unsigned long long) val);
760 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
761 /* be sure chip saw it */
762 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
763 /* need PLL reset clear for at least 11 usec before lane
764 * resets cleared; give it a few more to be sure */
765 udelay(15);
766 val &= ~(INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE);
767
768 ipath_cdbg(VERBOSE, "Clearing lane resets and txidle "
769 "(writing %llx)\n", (unsigned long long) val);
770 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
771 /* be sure chip saw it */
772 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
773
774 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
775 prev_val = val;
776 if (val & INFINIPATH_XGXS_RESET)
777 val &= ~INFINIPATH_XGXS_RESET;
778 if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
779 INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
780 /* need to compensate for Tx inversion in partner */
781 val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
782 INFINIPATH_XGXS_RX_POL_SHIFT);
783 val |= dd->ipath_rx_pol_inv <<
784 INFINIPATH_XGXS_RX_POL_SHIFT;
785 }
786 if (val != prev_val)
787 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
788
789 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
790
791 /* clear current and de-emphasis bits */
792 config1 &= ~0x0ffffffff00ULL;
793 /* set current to 20ma */
794 config1 |= 0x00000000000ULL;
795 /* set de-emphasis to -5.68dB */
796 config1 |= 0x0cccc000000ULL;
797 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1);
798
799 ipath_cdbg(VERBOSE, "done: SerDes status config0=%llx "
800 "config1=%llx, sstatus=%llx xgxs=%llx\n",
801 (unsigned long long) val, (unsigned long long) config1,
802 (unsigned long long)
803 ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
804 (unsigned long long)
805 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
806
807 return ret;
808}
809
810/**
811 * ipath_pe_quiet_serdes - set serdes to txidle
812 * @dd: the infinipath device
813 * Called when driver is being unloaded
814 */
815static void ipath_pe_quiet_serdes(struct ipath_devdata *dd)
816{
817 u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
818
819 if (dd->ibsymdelta || dd->iblnkerrdelta ||
820 dd->ibdeltainprog) {
821 u64 diagc;
822 /* enable counter writes */
823 diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl);
824 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl,
825 diagc | INFINIPATH_DC_COUNTERWREN);
826
827 if (dd->ibsymdelta || dd->ibdeltainprog) {
828 val = ipath_read_creg32(dd,
829 dd->ipath_cregs->cr_ibsymbolerrcnt);
830 if (dd->ibdeltainprog)
831 val -= val - dd->ibsymsnap;
832 val -= dd->ibsymdelta;
833 ipath_write_creg(dd,
834 dd->ipath_cregs->cr_ibsymbolerrcnt, val);
835 }
836 if (dd->iblnkerrdelta || dd->ibdeltainprog) {
837 val = ipath_read_creg32(dd,
838 dd->ipath_cregs->cr_iblinkerrrecovcnt);
839 if (dd->ibdeltainprog)
840 val -= val - dd->iblnkerrsnap;
841 val -= dd->iblnkerrdelta;
842 ipath_write_creg(dd,
843 dd->ipath_cregs->cr_iblinkerrrecovcnt, val);
844 }
845
846 /* and disable counter writes */
847 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, diagc);
848 }
849 val |= INFINIPATH_SERDC0_TXIDLE;
850 ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n",
851 (unsigned long long) val);
852 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
853}
854
855static int ipath_pe_intconfig(struct ipath_devdata *dd)
856{
857 u32 chiprev;
858
859 /*
860 * If the chip supports added error indication via GPIO pins,
861 * enable interrupts on those bits so the interrupt routine
862 * can count the events. Also set flag so interrupt routine
863 * can know they are expected.
864 */
865 chiprev = dd->ipath_revision >> INFINIPATH_R_CHIPREVMINOR_SHIFT;
866 if ((chiprev & INFINIPATH_R_CHIPREVMINOR_MASK) > 1) {
867 /* Rev2+ reports extra errors via internal GPIO pins */
868 dd->ipath_flags |= IPATH_GPIO_ERRINTRS;
869 dd->ipath_gpio_mask |= IPATH_GPIO_ERRINTR_MASK;
870 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
871 dd->ipath_gpio_mask);
872 }
873 return 0;
874}
875
876/**
877 * ipath_setup_pe_setextled - set the state of the two external LEDs
878 * @dd: the infinipath device
879 * @lst: the L state
880 * @ltst: the LT state
881
882 * These LEDs indicate the physical and logical state of IB link.
883 * For this chip (at least with recommended board pinouts), LED1
884 * is Yellow (logical state) and LED2 is Green (physical state),
885 *
886 * Note: We try to match the Mellanox HCA LED behavior as best
887 * we can. Green indicates physical link state is OK (something is
888 * plugged in, and we can train).
889 * Amber indicates the link is logically up (ACTIVE).
890 * Mellanox further blinks the amber LED to indicate data packet
891 * activity, but we have no hardware support for that, so it would
892 * require waking up every 10-20 msecs and checking the counters
893 * on the chip, and then turning the LED off if appropriate. That's
894 * visible overhead, so not something we will do.
895 *
896 */
897static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst,
898 u64 ltst)
899{
900 u64 extctl;
901 unsigned long flags = 0;
902
903 /* the diags use the LED to indicate diag info, so we leave
904 * the external LED alone when the diags are running */
905 if (ipath_diag_inuse)
906 return;
907
908 /* Allow override of LED display for, e.g. Locating system in rack */
909 if (dd->ipath_led_override) {
910 ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
911 ? INFINIPATH_IBCS_LT_STATE_LINKUP
912 : INFINIPATH_IBCS_LT_STATE_DISABLED;
913 lst = (dd->ipath_led_override & IPATH_LED_LOG)
914 ? INFINIPATH_IBCS_L_STATE_ACTIVE
915 : INFINIPATH_IBCS_L_STATE_DOWN;
916 }
917
918 spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
919 extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
920 INFINIPATH_EXTC_LED2PRIPORT_ON);
921
922 if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
923 extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
924 if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
925 extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
926 dd->ipath_extctrl = extctl;
927 ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
928 spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
929}
930
931/**
932 * ipath_setup_pe_cleanup - clean up any per-chip chip-specific stuff
933 * @dd: the infinipath device
934 *
935 * This is called during driver unload.
936 * We do the pci_disable_msi here, not in generic code, because it
937 * isn't used for the HT chips. If we do end up needing pci_enable_msi
938 * at some point in the future for HT, we'll move the call back
939 * into the main init_one code.
940 */
941static void ipath_setup_pe_cleanup(struct ipath_devdata *dd)
942{
943 dd->ipath_msi_lo = 0; /* just in case unload fails */
944 pci_disable_msi(dd->pcidev);
945}
946
947static void ipath_6120_pcie_params(struct ipath_devdata *dd)
948{
949 u16 linkstat, speed;
950 int pos;
951
952 pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
953 if (!pos) {
954 ipath_dev_err(dd, "Can't find PCI Express capability!\n");
955 goto bail;
956 }
957
958 pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
959 &linkstat);
960 /*
961 * speed is bits 0-4, linkwidth is bits 4-8
962 * no defines for them in headers
963 */
964 speed = linkstat & 0xf;
965 linkstat >>= 4;
966 linkstat &= 0x1f;
967 dd->ipath_lbus_width = linkstat;
968
969 switch (speed) {
970 case 1:
971 dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */
972 break;
973 case 2:
974 dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */
975 break;
976 default: /* not defined, assume gen1 */
977 dd->ipath_lbus_speed = 2500;
978 break;
979 }
980
981 if (linkstat < 8)
982 ipath_dev_err(dd,
983 "PCIe width %u (x8 HCA), performance reduced\n",
984 linkstat);
985 else
986 ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x8 HCA)\n",
987 dd->ipath_lbus_speed, linkstat);
988
989 if (speed != 1)
990 ipath_dev_err(dd,
991 "PCIe linkspeed %u is incorrect; "
992 "should be 1 (2500)!\n", speed);
993bail:
994 /* fill in string, even on errors */
995 snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
996 "PCIe,%uMHz,x%u\n",
997 dd->ipath_lbus_speed,
998 dd->ipath_lbus_width);
999
1000 return;
1001}
1002
1003/**
1004 * ipath_setup_pe_config - setup PCIe config related stuff
1005 * @dd: the infinipath device
1006 * @pdev: the PCI device
1007 *
1008 * The pci_enable_msi() call will fail on systems with MSI quirks
1009 * such as those with AMD8131, even if the device of interest is not
1010 * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed
1011 * late in 2.6.16).
1012 * All that can be done is to edit the kernel source to remove the quirk
1013 * check until that is fixed.
1014 * We do not need to call enable_msi() for our HyperTransport chip,
1015 * even though it uses MSI, and we want to avoid the quirk warning, so
1016 * So we call enable_msi only for PCIe. If we do end up needing
1017 * pci_enable_msi at some point in the future for HT, we'll move the
1018 * call back into the main init_one code.
1019 * We save the msi lo and hi values, so we can restore them after
1020 * chip reset (the kernel PCI infrastructure doesn't yet handle that
1021 * correctly).
1022 */
1023static int ipath_setup_pe_config(struct ipath_devdata *dd,
1024 struct pci_dev *pdev)
1025{
1026 int pos, ret;
1027
1028 dd->ipath_msi_lo = 0; /* used as a flag during reset processing */
1029 ret = pci_enable_msi(dd->pcidev);
1030 if (ret)
1031 ipath_dev_err(dd, "pci_enable_msi failed: %d, "
1032 "interrupts may not work\n", ret);
1033 /* continue even if it fails, we may still be OK... */
1034 dd->ipath_irq = pdev->irq;
1035
1036 if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
1037 u16 control;
1038 pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
1039 &dd->ipath_msi_lo);
1040 pci_read_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
1041 &dd->ipath_msi_hi);
1042 pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
1043 &control);
1044 /* now save the data (vector) info */
1045 pci_read_config_word(dd->pcidev,
1046 pos + ((control & PCI_MSI_FLAGS_64BIT)
1047 ? 12 : 8),
1048 &dd->ipath_msi_data);
1049 ipath_cdbg(VERBOSE, "Read msi data 0x%x from config offset "
1050 "0x%x, control=0x%x\n", dd->ipath_msi_data,
1051 pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
1052 control);
1053 /* we save the cachelinesize also, although it doesn't
1054 * really matter */
1055 pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
1056 &dd->ipath_pci_cacheline);
1057 } else
1058 ipath_dev_err(dd, "Can't find MSI capability, "
1059 "can't save MSI settings for reset\n");
1060
1061 ipath_6120_pcie_params(dd);
1062
1063 dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
1064 dd->ipath_link_speed_supported = IPATH_IB_SDR;
1065 dd->ipath_link_width_enabled = IB_WIDTH_4X;
1066 dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
1067 /* these can't change for this chip, so set once */
1068 dd->ipath_link_width_active = dd->ipath_link_width_enabled;
1069 dd->ipath_link_speed_active = dd->ipath_link_speed_enabled;
1070 return 0;
1071}
1072
1073static void ipath_init_pe_variables(struct ipath_devdata *dd)
1074{
1075 /*
1076 * setup the register offsets, since they are different for each
1077 * chip
1078 */
1079 dd->ipath_kregs = &ipath_pe_kregs;
1080 dd->ipath_cregs = &ipath_pe_cregs;
1081
1082 /*
1083 * bits for selecting i2c direction and values,
1084 * used for I2C serial flash
1085 */
1086 dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
1087 dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
1088 dd->ipath_gpio_sda = IPATH_GPIO_SDA;
1089 dd->ipath_gpio_scl = IPATH_GPIO_SCL;
1090
1091 /*
1092 * Fill in data for field-values that change in newer chips.
1093 * We dynamically specify only the mask for LINKTRAININGSTATE
1094 * and only the shift for LINKSTATE, as they are the only ones
1095 * that change. Also precalculate the 3 link states of interest
1096 * and the combined mask.
1097 */
1098 dd->ibcs_ls_shift = IBA6120_IBCS_LINKSTATE_SHIFT;
1099 dd->ibcs_lts_mask = IBA6120_IBCS_LINKTRAININGSTATE_MASK;
1100 dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
1101 dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
1102 dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
1103 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
1104 (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
1105 dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
1106 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
1107 (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
1108 dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
1109 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
1110 (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
1111
1112 /*
1113 * Fill in data for ibcc field-values that change in newer chips.
1114 * We dynamically specify only the mask for LINKINITCMD
1115 * and only the shift for LINKCMD and MAXPKTLEN, as they are
1116 * the only ones that change.
1117 */
1118 dd->ibcc_lic_mask = INFINIPATH_IBCC_LINKINITCMD_MASK;
1119 dd->ibcc_lc_shift = INFINIPATH_IBCC_LINKCMD_SHIFT;
1120 dd->ibcc_mpl_shift = INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
1121
1122 /* Fill in shifts for RcvCtrl. */
1123 dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
1124 dd->ipath_r_intravail_shift = INFINIPATH_R_INTRAVAIL_SHIFT;
1125 dd->ipath_r_tailupd_shift = INFINIPATH_R_TAILUPD_SHIFT;
1126 dd->ipath_r_portcfg_shift = 0; /* Not on IBA6120 */
1127
1128 /* variables for sanity checking interrupt and errors */
1129 dd->ipath_hwe_bitsextant =
1130 (INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
1131 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
1132 (INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
1133 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
1134 (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK <<
1135 INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) |
1136 INFINIPATH_HWE_PCIE1PLLFAILED |
1137 INFINIPATH_HWE_PCIE0PLLFAILED |
1138 INFINIPATH_HWE_PCIEPOISONEDTLP |
1139 INFINIPATH_HWE_PCIECPLTIMEOUT |
1140 INFINIPATH_HWE_PCIEBUSPARITYXTLH |
1141 INFINIPATH_HWE_PCIEBUSPARITYXADM |
1142 INFINIPATH_HWE_PCIEBUSPARITYRADM |
1143 INFINIPATH_HWE_MEMBISTFAILED |
1144 INFINIPATH_HWE_COREPLL_FBSLIP |
1145 INFINIPATH_HWE_COREPLL_RFSLIP |
1146 INFINIPATH_HWE_SERDESPLLFAILED |
1147 INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
1148 INFINIPATH_HWE_IBCBUSFRSPCPARITYERR;
1149 dd->ipath_i_bitsextant =
1150 (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
1151 (INFINIPATH_I_RCVAVAIL_MASK <<
1152 INFINIPATH_I_RCVAVAIL_SHIFT) |
1153 INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
1154 INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO;
1155 dd->ipath_e_bitsextant =
1156 INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
1157 INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
1158 INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
1159 INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
1160 INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
1161 INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
1162 INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
1163 INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
1164 INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
1165 INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN |
1166 INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN |
1167 INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT |
1168 INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
1169 INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED |
1170 INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET |
1171 INFINIPATH_E_HARDWARE;
1172
1173 dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
1174 dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
1175 dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
1176 dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
1177
1178 /*
1179 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
1180 * 2 is Some Misc, 3 is reserved for future.
1181 */
1182 dd->ipath_eep_st_masks[0].hwerrs_to_log =
1183 INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
1184 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
1185
1186 /* Ignore errors in PIO/PBC on systems with unordered write-combining */
1187 if (ipath_unordered_wc())
1188 dd->ipath_eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY;
1189
1190 dd->ipath_eep_st_masks[1].hwerrs_to_log =
1191 INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
1192 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
1193
1194 dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
1195 dd->delay_mult = 2; /* SDR, 4X, can't change */
1196}
1197
1198/* setup the MSI stuff again after a reset. I'd like to just call
1199 * pci_enable_msi() and request_irq() again, but when I do that,
1200 * the MSI enable bit doesn't get set in the command word, and
1201 * we switch to to a different interrupt vector, which is confusing,
1202 * so I instead just do it all inline. Perhaps somehow can tie this
1203 * into the PCIe hotplug support at some point
1204 * Note, because I'm doing it all here, I don't call pci_disable_msi()
1205 * or free_irq() at the start of ipath_setup_pe_reset().
1206 */
1207static int ipath_reinit_msi(struct ipath_devdata *dd)
1208{
1209 int pos;
1210 u16 control;
1211 int ret;
1212
1213 if (!dd->ipath_msi_lo) {
1214 dev_info(&dd->pcidev->dev, "Can't restore MSI config, "
1215 "initial setup failed?\n");
1216 ret = 0;
1217 goto bail;
1218 }
1219
1220 if (!(pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) {
1221 ipath_dev_err(dd, "Can't find MSI capability, "
1222 "can't restore MSI settings\n");
1223 ret = 0;
1224 goto bail;
1225 }
1226 ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
1227 dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO);
1228 pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
1229 dd->ipath_msi_lo);
1230 ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
1231 dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI);
1232 pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
1233 dd->ipath_msi_hi);
1234 pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
1235 if (!(control & PCI_MSI_FLAGS_ENABLE)) {
1236 ipath_cdbg(VERBOSE, "MSI control at off %x was %x, "
1237 "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS,
1238 control, control | PCI_MSI_FLAGS_ENABLE);
1239 control |= PCI_MSI_FLAGS_ENABLE;
1240 pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
1241 control);
1242 }
1243 /* now rewrite the data (vector) info */
1244 pci_write_config_word(dd->pcidev, pos +
1245 ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
1246 dd->ipath_msi_data);
1247 /* we restore the cachelinesize also, although it doesn't really
1248 * matter */
1249 pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
1250 dd->ipath_pci_cacheline);
1251 /* and now set the pci master bit again */
1252 pci_set_master(dd->pcidev);
1253 ret = 1;
1254
1255bail:
1256 return ret;
1257}
1258
1259/* This routine sleeps, so it can only be called from user context, not
1260 * from interrupt context. If we need interrupt context, we can split
1261 * it into two routines.
1262*/
1263static int ipath_setup_pe_reset(struct ipath_devdata *dd)
1264{
1265 u64 val;
1266 int i;
1267 int ret;
1268 u16 cmdval;
1269
1270 pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval);
1271
1272 /* Use ERROR so it shows up in logs, etc. */
1273 ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit);
1274 /* keep chip from being accessed in a few places */
1275 dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT);
1276 val = dd->ipath_control | INFINIPATH_C_RESET;
1277 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
1278 mb();
1279
1280 for (i = 1; i <= 5; i++) {
1281 int r;
1282 /* allow MBIST, etc. to complete; longer on each retry.
1283 * We sometimes get machine checks from bus timeout if no
1284 * response, so for now, make it *really* long.
1285 */
1286 msleep(1000 + (1 + i) * 2000);
1287 if ((r =
1288 pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
1289 dd->ipath_pcibar0)))
1290 ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n",
1291 r);
1292 if ((r =
1293 pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
1294 dd->ipath_pcibar1)))
1295 ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n",
1296 r);
1297 /* now re-enable memory access */
1298 pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval);
1299 if ((r = pci_enable_device(dd->pcidev)))
1300 ipath_dev_err(dd, "pci_enable_device failed after "
1301 "reset: %d\n", r);
1302 /*
1303 * whether it fully enabled or not, mark as present,
1304 * again (but not INITTED)
1305 */
1306 dd->ipath_flags |= IPATH_PRESENT;
1307 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
1308 if (val == dd->ipath_revision) {
1309 ipath_cdbg(VERBOSE, "Got matching revision "
1310 "register %llx on try %d\n",
1311 (unsigned long long) val, i);
1312 ret = ipath_reinit_msi(dd);
1313 goto bail;
1314 }
1315 /* Probably getting -1 back */
1316 ipath_dbg("Didn't get expected revision register, "
1317 "got %llx, try %d\n", (unsigned long long) val,
1318 i + 1);
1319 }
1320 ret = 0; /* failed */
1321
1322bail:
1323 if (ret)
1324 ipath_6120_pcie_params(dd);
1325 return ret;
1326}
1327
1328/**
1329 * ipath_pe_put_tid - write a TID in chip
1330 * @dd: the infinipath device
1331 * @tidptr: pointer to the expected TID (in chip) to update
1332 * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
1333 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
1334 *
1335 * This exists as a separate routine to allow for special locking etc.
1336 * It's used for both the full cleanup on exit, as well as the normal
1337 * setup and teardown.
1338 */
1339static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
1340 u32 type, unsigned long pa)
1341{
1342 u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
1343 unsigned long flags = 0; /* keep gcc quiet */
1344 int tidx;
1345 spinlock_t *tidlockp;
1346
1347 if (!dd->ipath_kregbase)
1348 return;
1349
1350 if (pa != dd->ipath_tidinvalid) {
1351 if (pa & ((1U << 11) - 1)) {
1352 dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
1353 "not 2KB aligned!\n", pa);
1354 return;
1355 }
1356 pa >>= 11;
1357 /* paranoia check */
1358 if (pa & ~INFINIPATH_RT_ADDR_MASK)
1359 ipath_dev_err(dd,
1360 "BUG: Physical page address 0x%lx "
1361 "has bits set in 31-29\n", pa);
1362
1363 if (type == RCVHQ_RCV_TYPE_EAGER)
1364 pa |= dd->ipath_tidtemplate;
1365 else /* for now, always full 4KB page */
1366 pa |= 2 << 29;
1367 }
1368
1369 /*
1370 * Workaround chip bug 9437 by writing the scratch register
1371 * before and after the TID, and with an io write barrier.
1372 * We use a spinlock around the writes, so they can't intermix
1373 * with other TID (eager or expected) writes (the chip bug
1374 * is triggered by back to back TID writes). Unfortunately, this
1375 * call can be done from interrupt level for the port 0 eager TIDs,
1376 * so we have to use irqsave locks.
1377 */
1378 /*
1379 * Assumes tidptr always > ipath_egrtidbase
1380 * if type == RCVHQ_RCV_TYPE_EAGER.
1381 */
1382 tidx = tidptr - dd->ipath_egrtidbase;
1383
1384 tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->ipath_rcvegrcnt)
1385 ? &dd->ipath_kernel_tid_lock : &dd->ipath_user_tid_lock;
1386 spin_lock_irqsave(tidlockp, flags);
1387 ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeeddeaf);
1388 writel(pa, tidp32);
1389 ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xdeadbeef);
1390 mmiowb();
1391 spin_unlock_irqrestore(tidlockp, flags);
1392}
1393
1394/**
1395 * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher
1396 * @dd: the infinipath device
1397 * @tidptr: pointer to the expected TID (in chip) to update
1398 * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
1399 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
1400 *
1401 * This exists as a separate routine to allow for selection of the
1402 * appropriate "flavor". The static calls in cleanup just use the
1403 * revision-agnostic form, as they are not performance critical.
1404 */
1405static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr,
1406 u32 type, unsigned long pa)
1407{
1408 u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
1409 u32 tidx;
1410
1411 if (!dd->ipath_kregbase)
1412 return;
1413
1414 if (pa != dd->ipath_tidinvalid) {
1415 if (pa & ((1U << 11) - 1)) {
1416 dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
1417 "not 2KB aligned!\n", pa);
1418 return;
1419 }
1420 pa >>= 11;
1421 /* paranoia check */
1422 if (pa & ~INFINIPATH_RT_ADDR_MASK)
1423 ipath_dev_err(dd,
1424 "BUG: Physical page address 0x%lx "
1425 "has bits set in 31-29\n", pa);
1426
1427 if (type == RCVHQ_RCV_TYPE_EAGER)
1428 pa |= dd->ipath_tidtemplate;
1429 else /* for now, always full 4KB page */
1430 pa |= 2 << 29;
1431 }
1432 tidx = tidptr - dd->ipath_egrtidbase;
1433 writel(pa, tidp32);
1434 mmiowb();
1435}
1436
1437
1438/**
1439 * ipath_pe_clear_tid - clear all TID entries for a port, expected and eager
1440 * @dd: the infinipath device
1441 * @port: the port
1442 *
1443 * clear all TID entries for a port, expected and eager.
1444 * Used from ipath_close(). On this chip, TIDs are only 32 bits,
1445 * not 64, but they are still on 64 bit boundaries, so tidbase
1446 * is declared as u64 * for the pointer math, even though we write 32 bits
1447 */
1448static void ipath_pe_clear_tids(struct ipath_devdata *dd, unsigned port)
1449{
1450 u64 __iomem *tidbase;
1451 unsigned long tidinv;
1452 int i;
1453
1454 if (!dd->ipath_kregbase)
1455 return;
1456
1457 ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
1458
1459 tidinv = dd->ipath_tidinvalid;
1460 tidbase = (u64 __iomem *)
1461 ((char __iomem *)(dd->ipath_kregbase) +
1462 dd->ipath_rcvtidbase +
1463 port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
1464
1465 for (i = 0; i < dd->ipath_rcvtidcnt; i++)
1466 dd->ipath_f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
1467 tidinv);
1468
1469 tidbase = (u64 __iomem *)
1470 ((char __iomem *)(dd->ipath_kregbase) +
1471 dd->ipath_rcvegrbase +
1472 port * dd->ipath_rcvegrcnt * sizeof(*tidbase));
1473
1474 for (i = 0; i < dd->ipath_rcvegrcnt; i++)
1475 dd->ipath_f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
1476 tidinv);
1477}
1478
1479/**
1480 * ipath_pe_tidtemplate - setup constants for TID updates
1481 * @dd: the infinipath device
1482 *
1483 * We setup stuff that we use a lot, to avoid calculating each time
1484 */
1485static void ipath_pe_tidtemplate(struct ipath_devdata *dd)
1486{
1487 u32 egrsize = dd->ipath_rcvegrbufsize;
1488
1489 /* For now, we always allocate 4KB buffers (at init) so we can
1490 * receive max size packets. We may want a module parameter to
1491 * specify 2KB or 4KB and/or make be per port instead of per device
1492 * for those who want to reduce memory footprint. Note that the
1493 * ipath_rcvhdrentsize size must be large enough to hold the largest
1494 * IB header (currently 96 bytes) that we expect to handle (plus of
1495 * course the 2 dwords of RHF).
1496 */
1497 if (egrsize == 2048)
1498 dd->ipath_tidtemplate = 1U << 29;
1499 else if (egrsize == 4096)
1500 dd->ipath_tidtemplate = 2U << 29;
1501 else {
1502 egrsize = 4096;
1503 dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize "
1504 "%u, using %u\n", dd->ipath_rcvegrbufsize,
1505 egrsize);
1506 dd->ipath_tidtemplate = 2U << 29;
1507 }
1508 dd->ipath_tidinvalid = 0;
1509}
1510
1511static int ipath_pe_early_init(struct ipath_devdata *dd)
1512{
1513 dd->ipath_flags |= IPATH_4BYTE_TID;
1514 if (ipath_unordered_wc())
1515 dd->ipath_flags |= IPATH_PIO_FLUSH_WC;
1516
1517 /*
1518 * For openfabrics, we need to be able to handle an IB header of
1519 * 24 dwords. HT chip has arbitrary sized receive buffers, so we
1520 * made them the same size as the PIO buffers. This chip does not
1521 * handle arbitrary size buffers, so we need the header large enough
1522 * to handle largest IB header, but still have room for a 2KB MTU
1523 * standard IB packet.
1524 */
1525 dd->ipath_rcvhdrentsize = 24;
1526 dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
1527 dd->ipath_rhf_offset = 0;
1528 dd->ipath_egrtidbase = (u64 __iomem *)
1529 ((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase);
1530
1531 dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048;
1532 /*
1533 * the min() check here is currently a nop, but it may not always
1534 * be, depending on just how we do ipath_rcvegrbufsize
1535 */
1536 dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k :
1537 dd->ipath_piosize2k,
1538 dd->ipath_rcvegrbufsize +
1539 (dd->ipath_rcvhdrentsize << 2));
1540 dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
1541
1542 /*
1543 * We can request a receive interrupt for 1 or
1544 * more packets from current offset. For now, we set this
1545 * up for a single packet.
1546 */
1547 dd->ipath_rhdrhead_intr_off = 1ULL<<32;
1548
1549 ipath_get_eeprom_info(dd);
1550
1551 return 0;
1552}
1553
1554int __attribute__((weak)) ipath_unordered_wc(void)
1555{
1556 return 0;
1557}
1558
1559/**
1560 * ipath_init_pe_get_base_info - set chip-specific flags for user code
1561 * @pd: the infinipath port
1562 * @kbase: ipath_base_info pointer
1563 *
1564 * We set the PCIE flag because the lower bandwidth on PCIe vs
1565 * HyperTransport can affect some user packet algorithms.
1566 */
1567static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase)
1568{
1569 struct ipath_base_info *kinfo = kbase;
1570 struct ipath_devdata *dd;
1571
1572 if (ipath_unordered_wc()) {
1573 kinfo->spi_runtime_flags |= IPATH_RUNTIME_FORCE_WC_ORDER;
1574 ipath_cdbg(PROC, "Intel processor, forcing WC order\n");
1575 }
1576 else
1577 ipath_cdbg(PROC, "Not Intel processor, WC ordered\n");
1578
1579 if (pd == NULL)
1580 goto done;
1581
1582 dd = pd->port_dd;
1583
1584done:
1585 kinfo->spi_runtime_flags |= IPATH_RUNTIME_PCIE |
1586 IPATH_RUNTIME_FORCE_PIOAVAIL | IPATH_RUNTIME_PIO_REGSWAPPED;
1587 return 0;
1588}
1589
1590static void ipath_pe_free_irq(struct ipath_devdata *dd)
1591{
1592 free_irq(dd->ipath_irq, dd);
1593 dd->ipath_irq = 0;
1594}
1595
1596
1597static struct ipath_message_header *
1598ipath_pe_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
1599{
1600 return (struct ipath_message_header *)
1601 &rhf_addr[sizeof(u64) / sizeof(u32)];
1602}
1603
1604static void ipath_pe_config_ports(struct ipath_devdata *dd, ushort cfgports)
1605{
1606 dd->ipath_portcnt =
1607 ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
1608 dd->ipath_p0_rcvegrcnt =
1609 ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
1610}
1611
1612static void ipath_pe_read_counters(struct ipath_devdata *dd,
1613 struct infinipath_counters *cntrs)
1614{
1615 cntrs->LBIntCnt =
1616 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBIntCnt));
1617 cntrs->LBFlowStallCnt =
1618 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBFlowStallCnt));
1619 cntrs->TxSDmaDescCnt = 0;
1620 cntrs->TxUnsupVLErrCnt =
1621 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnsupVLErrCnt));
1622 cntrs->TxDataPktCnt =
1623 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDataPktCnt));
1624 cntrs->TxFlowPktCnt =
1625 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowPktCnt));
1626 cntrs->TxDwordCnt =
1627 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDwordCnt));
1628 cntrs->TxLenErrCnt =
1629 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxLenErrCnt));
1630 cntrs->TxMaxMinLenErrCnt =
1631 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxMaxMinLenErrCnt));
1632 cntrs->TxUnderrunCnt =
1633 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnderrunCnt));
1634 cntrs->TxFlowStallCnt =
1635 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowStallCnt));
1636 cntrs->TxDroppedPktCnt =
1637 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDroppedPktCnt));
1638 cntrs->RxDroppedPktCnt =
1639 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDroppedPktCnt));
1640 cntrs->RxDataPktCnt =
1641 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDataPktCnt));
1642 cntrs->RxFlowPktCnt =
1643 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowPktCnt));
1644 cntrs->RxDwordCnt =
1645 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDwordCnt));
1646 cntrs->RxLenErrCnt =
1647 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLenErrCnt));
1648 cntrs->RxMaxMinLenErrCnt =
1649 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxMaxMinLenErrCnt));
1650 cntrs->RxICRCErrCnt =
1651 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxICRCErrCnt));
1652 cntrs->RxVCRCErrCnt =
1653 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxVCRCErrCnt));
1654 cntrs->RxFlowCtrlErrCnt =
1655 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowCtrlErrCnt));
1656 cntrs->RxBadFormatCnt =
1657 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBadFormatCnt));
1658 cntrs->RxLinkProblemCnt =
1659 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLinkProblemCnt));
1660 cntrs->RxEBPCnt =
1661 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxEBPCnt));
1662 cntrs->RxLPCRCErrCnt =
1663 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLPCRCErrCnt));
1664 cntrs->RxBufOvflCnt =
1665 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBufOvflCnt));
1666 cntrs->RxTIDFullErrCnt =
1667 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDFullErrCnt));
1668 cntrs->RxTIDValidErrCnt =
1669 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDValidErrCnt));
1670 cntrs->RxPKeyMismatchCnt =
1671 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxPKeyMismatchCnt));
1672 cntrs->RxP0HdrEgrOvflCnt =
1673 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt));
1674 cntrs->RxP1HdrEgrOvflCnt =
1675 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP1HdrEgrOvflCnt));
1676 cntrs->RxP2HdrEgrOvflCnt =
1677 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP2HdrEgrOvflCnt));
1678 cntrs->RxP3HdrEgrOvflCnt =
1679 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP3HdrEgrOvflCnt));
1680 cntrs->RxP4HdrEgrOvflCnt =
1681 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP4HdrEgrOvflCnt));
1682 cntrs->RxP5HdrEgrOvflCnt = 0;
1683 cntrs->RxP6HdrEgrOvflCnt = 0;
1684 cntrs->RxP7HdrEgrOvflCnt = 0;
1685 cntrs->RxP8HdrEgrOvflCnt = 0;
1686 cntrs->RxP9HdrEgrOvflCnt = 0;
1687 cntrs->RxP10HdrEgrOvflCnt = 0;
1688 cntrs->RxP11HdrEgrOvflCnt = 0;
1689 cntrs->RxP12HdrEgrOvflCnt = 0;
1690 cntrs->RxP13HdrEgrOvflCnt = 0;
1691 cntrs->RxP14HdrEgrOvflCnt = 0;
1692 cntrs->RxP15HdrEgrOvflCnt = 0;
1693 cntrs->RxP16HdrEgrOvflCnt = 0;
1694 cntrs->IBStatusChangeCnt =
1695 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBStatusChangeCnt));
1696 cntrs->IBLinkErrRecoveryCnt =
1697 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt));
1698 cntrs->IBLinkDownedCnt =
1699 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkDownedCnt));
1700 cntrs->IBSymbolErrCnt =
1701 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBSymbolErrCnt));
1702 cntrs->RxVL15DroppedPktCnt = 0;
1703 cntrs->RxOtherLocalPhyErrCnt = 0;
1704 cntrs->PcieRetryBufDiagQwordCnt = 0;
1705 cntrs->ExcessBufferOvflCnt = dd->ipath_overrun_thresh_errs;
1706 cntrs->LocalLinkIntegrityErrCnt = dd->ipath_lli_errs;
1707 cntrs->RxVlErrCnt = 0;
1708 cntrs->RxDlidFltrCnt = 0;
1709}
1710
1711
1712/* no interrupt fallback for these chips */
1713static int ipath_pe_nointr_fallback(struct ipath_devdata *dd)
1714{
1715 return 0;
1716}
1717
1718
1719/*
1720 * reset the XGXS (between serdes and IBC). Slightly less intrusive
1721 * than resetting the IBC or external link state, and useful in some
1722 * cases to cause some retraining. To do this right, we reset IBC
1723 * as well.
1724 */
1725static void ipath_pe_xgxs_reset(struct ipath_devdata *dd)
1726{
1727 u64 val, prev_val;
1728
1729 prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
1730 val = prev_val | INFINIPATH_XGXS_RESET;
1731 prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
1732 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
1733 dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
1734 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
1735 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1736 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
1737 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
1738 dd->ipath_control);
1739}
1740
1741
1742static int ipath_pe_get_ib_cfg(struct ipath_devdata *dd, int which)
1743{
1744 int ret;
1745
1746 switch (which) {
1747 case IPATH_IB_CFG_LWID:
1748 ret = dd->ipath_link_width_active;
1749 break;
1750 case IPATH_IB_CFG_SPD:
1751 ret = dd->ipath_link_speed_active;
1752 break;
1753 case IPATH_IB_CFG_LWID_ENB:
1754 ret = dd->ipath_link_width_enabled;
1755 break;
1756 case IPATH_IB_CFG_SPD_ENB:
1757 ret = dd->ipath_link_speed_enabled;
1758 break;
1759 default:
1760 ret = -ENOTSUPP;
1761 break;
1762 }
1763 return ret;
1764}
1765
1766
1767/* we assume range checking is already done, if needed */
1768static int ipath_pe_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
1769{
1770 int ret = 0;
1771
1772 if (which == IPATH_IB_CFG_LWID_ENB)
1773 dd->ipath_link_width_enabled = val;
1774 else if (which == IPATH_IB_CFG_SPD_ENB)
1775 dd->ipath_link_speed_enabled = val;
1776 else
1777 ret = -ENOTSUPP;
1778 return ret;
1779}
1780
1781static void ipath_pe_config_jint(struct ipath_devdata *dd, u16 a, u16 b)
1782{
1783}
1784
1785
1786static int ipath_pe_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
1787{
1788 if (ibup) {
1789 if (dd->ibdeltainprog) {
1790 dd->ibdeltainprog = 0;
1791 dd->ibsymdelta +=
1792 ipath_read_creg32(dd,
1793 dd->ipath_cregs->cr_ibsymbolerrcnt) -
1794 dd->ibsymsnap;
1795 dd->iblnkerrdelta +=
1796 ipath_read_creg32(dd,
1797 dd->ipath_cregs->cr_iblinkerrrecovcnt) -
1798 dd->iblnkerrsnap;
1799 }
1800 } else {
1801 dd->ipath_lli_counter = 0;
1802 if (!dd->ibdeltainprog) {
1803 dd->ibdeltainprog = 1;
1804 dd->ibsymsnap =
1805 ipath_read_creg32(dd,
1806 dd->ipath_cregs->cr_ibsymbolerrcnt);
1807 dd->iblnkerrsnap =
1808 ipath_read_creg32(dd,
1809 dd->ipath_cregs->cr_iblinkerrrecovcnt);
1810 }
1811 }
1812
1813 ipath_setup_pe_setextled(dd, ipath_ib_linkstate(dd, ibcs),
1814 ipath_ib_linktrstate(dd, ibcs));
1815 return 0;
1816}
1817
1818
1819/**
1820 * ipath_init_iba6120_funcs - set up the chip-specific function pointers
1821 * @dd: the infinipath device
1822 *
1823 * This is global, and is called directly at init to set up the
1824 * chip-specific function pointers for later use.
1825 */
1826void ipath_init_iba6120_funcs(struct ipath_devdata *dd)
1827{
1828 dd->ipath_f_intrsetup = ipath_pe_intconfig;
1829 dd->ipath_f_bus = ipath_setup_pe_config;
1830 dd->ipath_f_reset = ipath_setup_pe_reset;
1831 dd->ipath_f_get_boardname = ipath_pe_boardname;
1832 dd->ipath_f_init_hwerrors = ipath_pe_init_hwerrors;
1833 dd->ipath_f_early_init = ipath_pe_early_init;
1834 dd->ipath_f_handle_hwerrors = ipath_pe_handle_hwerrors;
1835 dd->ipath_f_quiet_serdes = ipath_pe_quiet_serdes;
1836 dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes;
1837 dd->ipath_f_clear_tids = ipath_pe_clear_tids;
1838 /*
1839 * _f_put_tid may get changed after we read the chip revision,
1840 * but we start with the safe version for all revs
1841 */
1842 dd->ipath_f_put_tid = ipath_pe_put_tid;
1843 dd->ipath_f_cleanup = ipath_setup_pe_cleanup;
1844 dd->ipath_f_setextled = ipath_setup_pe_setextled;
1845 dd->ipath_f_get_base_info = ipath_pe_get_base_info;
1846 dd->ipath_f_free_irq = ipath_pe_free_irq;
1847 dd->ipath_f_tidtemplate = ipath_pe_tidtemplate;
1848 dd->ipath_f_intr_fallback = ipath_pe_nointr_fallback;
1849 dd->ipath_f_xgxs_reset = ipath_pe_xgxs_reset;
1850 dd->ipath_f_get_msgheader = ipath_pe_get_msgheader;
1851 dd->ipath_f_config_ports = ipath_pe_config_ports;
1852 dd->ipath_f_read_counters = ipath_pe_read_counters;
1853 dd->ipath_f_get_ib_cfg = ipath_pe_get_ib_cfg;
1854 dd->ipath_f_set_ib_cfg = ipath_pe_set_ib_cfg;
1855 dd->ipath_f_config_jint = ipath_pe_config_jint;
1856 dd->ipath_f_ib_updown = ipath_pe_ib_updown;
1857
1858
1859 /* initialize chip-specific variables */
1860 ipath_init_pe_variables(dd);
1861}
1862
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c
deleted file mode 100644
index 34b778ed97fc..000000000000
--- a/drivers/infiniband/hw/ipath/ipath_iba7220.c
+++ /dev/null
@@ -1,2631 +0,0 @@
1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33/*
34 * This file contains all of the code that is specific to the
35 * InfiniPath 7220 chip (except that specific to the SerDes)
36 */
37
38#include <linux/interrupt.h>
39#include <linux/pci.h>
40#include <linux/sched.h>
41#include <linux/delay.h>
42#include <linux/io.h>
43#include <rdma/ib_verbs.h>
44
45#include "ipath_kernel.h"
46#include "ipath_registers.h"
47#include "ipath_7220.h"
48
49static void ipath_setup_7220_setextled(struct ipath_devdata *, u64, u64);
50
51static unsigned ipath_compat_ddr_negotiate = 1;
52
53module_param_named(compat_ddr_negotiate, ipath_compat_ddr_negotiate, uint,
54 S_IWUSR | S_IRUGO);
55MODULE_PARM_DESC(compat_ddr_negotiate,
56 "Attempt pre-IBTA 1.2 DDR speed negotiation");
57
58static unsigned ipath_sdma_fetch_arb = 1;
59module_param_named(fetch_arb, ipath_sdma_fetch_arb, uint, S_IRUGO);
60MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
61
62/*
63 * This file contains almost all the chip-specific register information and
64 * access functions for the QLogic InfiniPath 7220 PCI-Express chip, with the
65 * exception of SerDes support, which in in ipath_sd7220.c.
66 *
67 * This lists the InfiniPath registers, in the actual chip layout.
68 * This structure should never be directly accessed.
69 */
70struct _infinipath_do_not_use_kernel_regs {
71 unsigned long long Revision;
72 unsigned long long Control;
73 unsigned long long PageAlign;
74 unsigned long long PortCnt;
75 unsigned long long DebugPortSelect;
76 unsigned long long DebugSigsIntSel; /* was Reserved0;*/
77 unsigned long long SendRegBase;
78 unsigned long long UserRegBase;
79 unsigned long long CounterRegBase;
80 unsigned long long Scratch;
81 unsigned long long EEPROMAddrCmd; /* was Reserved1; */
82 unsigned long long EEPROMData; /* was Reserved2; */
83 unsigned long long IntBlocked;
84 unsigned long long IntMask;
85 unsigned long long IntStatus;
86 unsigned long long IntClear;
87 unsigned long long ErrorMask;
88 unsigned long long ErrorStatus;
89 unsigned long long ErrorClear;
90 unsigned long long HwErrMask;
91 unsigned long long HwErrStatus;
92 unsigned long long HwErrClear;
93 unsigned long long HwDiagCtrl;
94 unsigned long long MDIO;
95 unsigned long long IBCStatus;
96 unsigned long long IBCCtrl;
97 unsigned long long ExtStatus;
98 unsigned long long ExtCtrl;
99 unsigned long long GPIOOut;
100 unsigned long long GPIOMask;
101 unsigned long long GPIOStatus;
102 unsigned long long GPIOClear;
103 unsigned long long RcvCtrl;
104 unsigned long long RcvBTHQP;
105 unsigned long long RcvHdrSize;
106 unsigned long long RcvHdrCnt;
107 unsigned long long RcvHdrEntSize;
108 unsigned long long RcvTIDBase;
109 unsigned long long RcvTIDCnt;
110 unsigned long long RcvEgrBase;
111 unsigned long long RcvEgrCnt;
112 unsigned long long RcvBufBase;
113 unsigned long long RcvBufSize;
114 unsigned long long RxIntMemBase;
115 unsigned long long RxIntMemSize;
116 unsigned long long RcvPartitionKey;
117 unsigned long long RcvQPMulticastPort;
118 unsigned long long RcvPktLEDCnt;
119 unsigned long long IBCDDRCtrl;
120 unsigned long long HRTBT_GUID;
121 unsigned long long IB_SDTEST_IF_TX;
122 unsigned long long IB_SDTEST_IF_RX;
123 unsigned long long IBCDDRCtrl2;
124 unsigned long long IBCDDRStatus;
125 unsigned long long JIntReload;
126 unsigned long long IBNCModeCtrl;
127 unsigned long long SendCtrl;
128 unsigned long long SendBufBase;
129 unsigned long long SendBufSize;
130 unsigned long long SendBufCnt;
131 unsigned long long SendAvailAddr;
132 unsigned long long TxIntMemBase;
133 unsigned long long TxIntMemSize;
134 unsigned long long SendDmaBase;
135 unsigned long long SendDmaLenGen;
136 unsigned long long SendDmaTail;
137 unsigned long long SendDmaHead;
138 unsigned long long SendDmaHeadAddr;
139 unsigned long long SendDmaBufMask0;
140 unsigned long long SendDmaBufMask1;
141 unsigned long long SendDmaBufMask2;
142 unsigned long long SendDmaStatus;
143 unsigned long long SendBufferError;
144 unsigned long long SendBufferErrorCONT1;
145 unsigned long long SendBufErr2; /* was Reserved6SBE[0/6] */
146 unsigned long long Reserved6L[2];
147 unsigned long long AvailUpdCount;
148 unsigned long long RcvHdrAddr0;
149 unsigned long long RcvHdrAddrs[16]; /* Why enumerate? */
150 unsigned long long Reserved7hdtl; /* Align next to 300 */
151 unsigned long long RcvHdrTailAddr0; /* 300, like others */
152 unsigned long long RcvHdrTailAddrs[16];
153 unsigned long long Reserved9SW[7]; /* was [8]; we have 17 ports */
154 unsigned long long IbsdEpbAccCtl; /* IB Serdes EPB access control */
155 unsigned long long IbsdEpbTransReg; /* IB Serdes EPB Transaction */
156 unsigned long long Reserved10sds; /* was SerdesStatus on */
157 unsigned long long XGXSConfig;
158 unsigned long long IBSerDesCtrl; /* Was IBPLLCfg on Monty */
159 unsigned long long EEPCtlStat; /* for "boot" EEPROM/FLASH */
160 unsigned long long EEPAddrCmd;
161 unsigned long long EEPData;
162 unsigned long long PcieEpbAccCtl;
163 unsigned long long PcieEpbTransCtl;
164 unsigned long long EfuseCtl; /* E-Fuse control */
165 unsigned long long EfuseData[4];
166 unsigned long long ProcMon;
167 /* this chip moves following two from previous 200, 208 */
168 unsigned long long PCIeRBufTestReg0;
169 unsigned long long PCIeRBufTestReg1;
170 /* added for this chip */
171 unsigned long long PCIeRBufTestReg2;
172 unsigned long long PCIeRBufTestReg3;
173 /* added for this chip, debug only */
174 unsigned long long SPC_JTAG_ACCESS_REG;
175 unsigned long long LAControlReg;
176 unsigned long long GPIODebugSelReg;
177 unsigned long long DebugPortValueReg;
178 /* added for this chip, DMA */
179 unsigned long long SendDmaBufUsed[3];
180 unsigned long long SendDmaReqTagUsed;
181 /*
182 * added for this chip, EFUSE: note that these program 64-bit
183 * words 2 and 3 */
184 unsigned long long efuse_pgm_data[2];
185 unsigned long long Reserved11LAalign[10]; /* Skip 4B0..4F8 */
186 /* we have 30 regs for DDS and RXEQ in IB SERDES */
187 unsigned long long SerDesDDSRXEQ[30];
188 unsigned long long Reserved12LAalign[2]; /* Skip 5F0, 5F8 */
189 /* added for LA debug support */
190 unsigned long long LAMemory[32];
191};
192
193struct _infinipath_do_not_use_counters {
194 __u64 LBIntCnt;
195 __u64 LBFlowStallCnt;
196 __u64 TxSDmaDescCnt; /* was Reserved1 */
197 __u64 TxUnsupVLErrCnt;
198 __u64 TxDataPktCnt;
199 __u64 TxFlowPktCnt;
200 __u64 TxDwordCnt;
201 __u64 TxLenErrCnt;
202 __u64 TxMaxMinLenErrCnt;
203 __u64 TxUnderrunCnt;
204 __u64 TxFlowStallCnt;
205 __u64 TxDroppedPktCnt;
206 __u64 RxDroppedPktCnt;
207 __u64 RxDataPktCnt;
208 __u64 RxFlowPktCnt;
209 __u64 RxDwordCnt;
210 __u64 RxLenErrCnt;
211 __u64 RxMaxMinLenErrCnt;
212 __u64 RxICRCErrCnt;
213 __u64 RxVCRCErrCnt;
214 __u64 RxFlowCtrlErrCnt;
215 __u64 RxBadFormatCnt;
216 __u64 RxLinkProblemCnt;
217 __u64 RxEBPCnt;
218 __u64 RxLPCRCErrCnt;
219 __u64 RxBufOvflCnt;
220 __u64 RxTIDFullErrCnt;
221 __u64 RxTIDValidErrCnt;
222 __u64 RxPKeyMismatchCnt;
223 __u64 RxP0HdrEgrOvflCnt;
224 __u64 RxP1HdrEgrOvflCnt;
225 __u64 RxP2HdrEgrOvflCnt;
226 __u64 RxP3HdrEgrOvflCnt;
227 __u64 RxP4HdrEgrOvflCnt;
228 __u64 RxP5HdrEgrOvflCnt;
229 __u64 RxP6HdrEgrOvflCnt;
230 __u64 RxP7HdrEgrOvflCnt;
231 __u64 RxP8HdrEgrOvflCnt;
232 __u64 RxP9HdrEgrOvflCnt; /* was Reserved6 */
233 __u64 RxP10HdrEgrOvflCnt; /* was Reserved7 */
234 __u64 RxP11HdrEgrOvflCnt; /* new for IBA7220 */
235 __u64 RxP12HdrEgrOvflCnt; /* new for IBA7220 */
236 __u64 RxP13HdrEgrOvflCnt; /* new for IBA7220 */
237 __u64 RxP14HdrEgrOvflCnt; /* new for IBA7220 */
238 __u64 RxP15HdrEgrOvflCnt; /* new for IBA7220 */
239 __u64 RxP16HdrEgrOvflCnt; /* new for IBA7220 */
240 __u64 IBStatusChangeCnt;
241 __u64 IBLinkErrRecoveryCnt;
242 __u64 IBLinkDownedCnt;
243 __u64 IBSymbolErrCnt;
244 /* The following are new for IBA7220 */
245 __u64 RxVL15DroppedPktCnt;
246 __u64 RxOtherLocalPhyErrCnt;
247 __u64 PcieRetryBufDiagQwordCnt;
248 __u64 ExcessBufferOvflCnt;
249 __u64 LocalLinkIntegrityErrCnt;
250 __u64 RxVlErrCnt;
251 __u64 RxDlidFltrCnt;
252 __u64 Reserved8[7];
253 __u64 PSStat;
254 __u64 PSStart;
255 __u64 PSInterval;
256 __u64 PSRcvDataCount;
257 __u64 PSRcvPktsCount;
258 __u64 PSXmitDataCount;
259 __u64 PSXmitPktsCount;
260 __u64 PSXmitWaitCount;
261};
262
263#define IPATH_KREG_OFFSET(field) (offsetof( \
264 struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
265#define IPATH_CREG_OFFSET(field) (offsetof( \
266 struct _infinipath_do_not_use_counters, field) / sizeof(u64))
267
268static const struct ipath_kregs ipath_7220_kregs = {
269 .kr_control = IPATH_KREG_OFFSET(Control),
270 .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
271 .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
272 .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
273 .kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
274 .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
275 .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
276 .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
277 .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
278 .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
279 .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
280 .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
281 .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
282 .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
283 .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
284 .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
285 .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
286 .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
287 .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
288 .kr_intclear = IPATH_KREG_OFFSET(IntClear),
289 .kr_intmask = IPATH_KREG_OFFSET(IntMask),
290 .kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
291 .kr_mdio = IPATH_KREG_OFFSET(MDIO),
292 .kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
293 .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
294 .kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
295 .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
296 .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
297 .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
298 .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
299 .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
300 .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
301 .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
302 .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
303 .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
304 .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
305 .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
306 .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
307 .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
308 .kr_revision = IPATH_KREG_OFFSET(Revision),
309 .kr_scratch = IPATH_KREG_OFFSET(Scratch),
310 .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
311 .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
312 .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendAvailAddr),
313 .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendBufBase),
314 .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendBufCnt),
315 .kr_sendpiosize = IPATH_KREG_OFFSET(SendBufSize),
316 .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
317 .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
318 .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
319 .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
320
321 .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
322
323 /* send dma related regs */
324 .kr_senddmabase = IPATH_KREG_OFFSET(SendDmaBase),
325 .kr_senddmalengen = IPATH_KREG_OFFSET(SendDmaLenGen),
326 .kr_senddmatail = IPATH_KREG_OFFSET(SendDmaTail),
327 .kr_senddmahead = IPATH_KREG_OFFSET(SendDmaHead),
328 .kr_senddmaheadaddr = IPATH_KREG_OFFSET(SendDmaHeadAddr),
329 .kr_senddmabufmask0 = IPATH_KREG_OFFSET(SendDmaBufMask0),
330 .kr_senddmabufmask1 = IPATH_KREG_OFFSET(SendDmaBufMask1),
331 .kr_senddmabufmask2 = IPATH_KREG_OFFSET(SendDmaBufMask2),
332 .kr_senddmastatus = IPATH_KREG_OFFSET(SendDmaStatus),
333
334 /* SerDes related regs */
335 .kr_ibserdesctrl = IPATH_KREG_OFFSET(IBSerDesCtrl),
336 .kr_ib_epbacc = IPATH_KREG_OFFSET(IbsdEpbAccCtl),
337 .kr_ib_epbtrans = IPATH_KREG_OFFSET(IbsdEpbTransReg),
338 .kr_pcie_epbacc = IPATH_KREG_OFFSET(PcieEpbAccCtl),
339 .kr_pcie_epbtrans = IPATH_KREG_OFFSET(PcieEpbTransCtl),
340 .kr_ib_ddsrxeq = IPATH_KREG_OFFSET(SerDesDDSRXEQ),
341
342 /*
343 * These should not be used directly via ipath_read_kreg64(),
344 * use them with ipath_read_kreg64_port()
345 */
346 .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
347 .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
348
349 /*
350 * The rcvpktled register controls one of the debug port signals, so
351 * a packet activity LED can be connected to it.
352 */
353 .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt),
354 .kr_pcierbuftestreg0 = IPATH_KREG_OFFSET(PCIeRBufTestReg0),
355 .kr_pcierbuftestreg1 = IPATH_KREG_OFFSET(PCIeRBufTestReg1),
356
357 .kr_hrtbt_guid = IPATH_KREG_OFFSET(HRTBT_GUID),
358 .kr_ibcddrctrl = IPATH_KREG_OFFSET(IBCDDRCtrl),
359 .kr_ibcddrstatus = IPATH_KREG_OFFSET(IBCDDRStatus),
360 .kr_jintreload = IPATH_KREG_OFFSET(JIntReload)
361};
362
363static const struct ipath_cregs ipath_7220_cregs = {
364 .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
365 .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
366 .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
367 .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
368 .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
369 .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
370 .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
371 .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
372 .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
373 .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
374 .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
375 .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
376 .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
377 .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
378 .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
379 .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
380 .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
381 .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
382 .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
383 .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
384 .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
385 .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
386 .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
387 .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
388 .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
389 .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
390 .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
391 .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
392 .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
393 .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
394 .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
395 .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
396 .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt),
397 .cr_vl15droppedpktcnt = IPATH_CREG_OFFSET(RxVL15DroppedPktCnt),
398 .cr_rxotherlocalphyerrcnt =
399 IPATH_CREG_OFFSET(RxOtherLocalPhyErrCnt),
400 .cr_excessbufferovflcnt = IPATH_CREG_OFFSET(ExcessBufferOvflCnt),
401 .cr_locallinkintegrityerrcnt =
402 IPATH_CREG_OFFSET(LocalLinkIntegrityErrCnt),
403 .cr_rxvlerrcnt = IPATH_CREG_OFFSET(RxVlErrCnt),
404 .cr_rxdlidfltrcnt = IPATH_CREG_OFFSET(RxDlidFltrCnt),
405 .cr_psstat = IPATH_CREG_OFFSET(PSStat),
406 .cr_psstart = IPATH_CREG_OFFSET(PSStart),
407 .cr_psinterval = IPATH_CREG_OFFSET(PSInterval),
408 .cr_psrcvdatacount = IPATH_CREG_OFFSET(PSRcvDataCount),
409 .cr_psrcvpktscount = IPATH_CREG_OFFSET(PSRcvPktsCount),
410 .cr_psxmitdatacount = IPATH_CREG_OFFSET(PSXmitDataCount),
411 .cr_psxmitpktscount = IPATH_CREG_OFFSET(PSXmitPktsCount),
412 .cr_psxmitwaitcount = IPATH_CREG_OFFSET(PSXmitWaitCount),
413};
414
415/* kr_control bits */
416#define INFINIPATH_C_RESET (1U<<7)
417
418/* kr_intstatus, kr_intclear, kr_intmask bits */
419#define INFINIPATH_I_RCVURG_MASK ((1ULL<<17)-1)
420#define INFINIPATH_I_RCVURG_SHIFT 32
421#define INFINIPATH_I_RCVAVAIL_MASK ((1ULL<<17)-1)
422#define INFINIPATH_I_RCVAVAIL_SHIFT 0
423#define INFINIPATH_I_SERDESTRIMDONE (1ULL<<27)
424
425/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
426#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL
427#define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0
428#define INFINIPATH_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
429#define INFINIPATH_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
430#define INFINIPATH_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
431#define INFINIPATH_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
432#define INFINIPATH_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
433#define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
434#define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
435#define INFINIPATH_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
436#define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
437#define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL
438/* specific to this chip */
439#define INFINIPATH_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL
440#define INFINIPATH_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL
441#define INFINIPATH_HWE_SDMAMEMREADERR 0x0000000010000000ULL
442#define INFINIPATH_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL
443#define INFINIPATH_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL
444#define INFINIPATH_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL
445#define INFINIPATH_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL
446#define INFINIPATH_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL
447#define INFINIPATH_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL
448#define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
449#define INFINIPATH_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL
450#define INFINIPATH_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL
451
452#define IBA7220_IBCS_LINKTRAININGSTATE_MASK 0x1F
453#define IBA7220_IBCS_LINKSTATE_SHIFT 5
454#define IBA7220_IBCS_LINKSPEED_SHIFT 8
455#define IBA7220_IBCS_LINKWIDTH_SHIFT 9
456
457#define IBA7220_IBCC_LINKINITCMD_MASK 0x7ULL
458#define IBA7220_IBCC_LINKCMD_SHIFT 19
459#define IBA7220_IBCC_MAXPKTLEN_SHIFT 21
460
461/* kr_ibcddrctrl bits */
462#define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL
463#define IBA7220_IBC_DLIDLMC_SHIFT 32
464#define IBA7220_IBC_HRTBT_MASK 3
465#define IBA7220_IBC_HRTBT_SHIFT 16
466#define IBA7220_IBC_HRTBT_ENB 0x10000UL
467#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)
468#define IBA7220_IBC_LREV_MASK 1
469#define IBA7220_IBC_LREV_SHIFT 8
470#define IBA7220_IBC_RXPOL_MASK 1
471#define IBA7220_IBC_RXPOL_SHIFT 7
472#define IBA7220_IBC_WIDTH_SHIFT 5
473#define IBA7220_IBC_WIDTH_MASK 0x3
474#define IBA7220_IBC_WIDTH_1X_ONLY (0<<IBA7220_IBC_WIDTH_SHIFT)
475#define IBA7220_IBC_WIDTH_4X_ONLY (1<<IBA7220_IBC_WIDTH_SHIFT)
476#define IBA7220_IBC_WIDTH_AUTONEG (2<<IBA7220_IBC_WIDTH_SHIFT)
477#define IBA7220_IBC_SPEED_AUTONEG (1<<1)
478#define IBA7220_IBC_SPEED_SDR (1<<2)
479#define IBA7220_IBC_SPEED_DDR (1<<3)
480#define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7<<1)
481#define IBA7220_IBC_IBTA_1_2_MASK (1)
482
483/* kr_ibcddrstatus */
484/* link latency shift is 0, don't bother defining */
485#define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff
486
487/* kr_extstatus bits */
488#define INFINIPATH_EXTS_FREQSEL 0x2
489#define INFINIPATH_EXTS_SERDESSEL 0x4
490#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
491#define INFINIPATH_EXTS_MEMBIST_DISABLED 0x0000000000008000
492
493/* kr_xgxsconfig bits */
494#define INFINIPATH_XGXS_RESET 0x5ULL
495#define INFINIPATH_XGXS_FC_SAFE (1ULL<<63)
496
497/* kr_rcvpktledcnt */
498#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */
499#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */
500
501#define _IPATH_GPIO_SDA_NUM 1
502#define _IPATH_GPIO_SCL_NUM 0
503
504#define IPATH_GPIO_SDA (1ULL << \
505 (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
506#define IPATH_GPIO_SCL (1ULL << \
507 (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
508
509#define IBA7220_R_INTRAVAIL_SHIFT 17
510#define IBA7220_R_TAILUPD_SHIFT 35
511#define IBA7220_R_PORTCFG_SHIFT 36
512
513#define INFINIPATH_JINT_PACKETSHIFT 16
514#define INFINIPATH_JINT_DEFAULT_IDLE_TICKS 0
515#define INFINIPATH_JINT_DEFAULT_MAX_PACKETS 0
516
517#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
518
519/*
520 * the size bits give us 2^N, in KB units. 0 marks as invalid,
521 * and 7 is reserved. We currently use only 2KB and 4KB
522 */
523#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */
524#define IBA7220_TID_SZ_2K (1UL<<IBA7220_TID_SZ_SHIFT) /* 2KB */
525#define IBA7220_TID_SZ_4K (2UL<<IBA7220_TID_SZ_SHIFT) /* 4KB */
526#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
527
528#define IPATH_AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */
529
530static char int_type[16] = "auto";
531module_param_string(interrupt_type, int_type, sizeof(int_type), 0444);
532MODULE_PARM_DESC(int_type, " interrupt_type=auto|force_msi|force_intx");
533
534/* packet rate matching delay; chip has support */
535static u8 rate_to_delay[2][2] = {
536 /* 1x, 4x */
537 { 8, 2 }, /* SDR */
538 { 4, 1 } /* DDR */
539};
540
541/* 7220 specific hardware errors... */
542static const struct ipath_hwerror_msgs ipath_7220_hwerror_msgs[] = {
543 INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"),
544 INFINIPATH_HWE_MSG(PCIECPLTIMEOUT, "PCIe completion timeout"),
545 /*
546 * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
547 * parity or memory parity error failures, because most likely we
548 * won't be able to talk to the core of the chip. Nonetheless, we
549 * might see them, if they are in parts of the PCIe core that aren't
550 * essential.
551 */
552 INFINIPATH_HWE_MSG(PCIE1PLLFAILED, "PCIePLL1"),
553 INFINIPATH_HWE_MSG(PCIE0PLLFAILED, "PCIePLL0"),
554 INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH, "PCIe XTLH core parity"),
555 INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM, "PCIe ADM TX core parity"),
556 INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM, "PCIe ADM RX core parity"),
557 INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"),
558 INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
559 INFINIPATH_HWE_MSG(PCIECPLDATAQUEUEERR, "PCIe cpl header queue"),
560 INFINIPATH_HWE_MSG(PCIECPLHDRQUEUEERR, "PCIe cpl data queue"),
561 INFINIPATH_HWE_MSG(SDMAMEMREADERR, "Send DMA memory read"),
562 INFINIPATH_HWE_MSG(CLK_UC_PLLNOTLOCKED, "uC PLL clock not locked"),
563 INFINIPATH_HWE_MSG(PCIESERDESQ0PCLKNOTDETECT,
564 "PCIe serdes Q0 no clock"),
565 INFINIPATH_HWE_MSG(PCIESERDESQ1PCLKNOTDETECT,
566 "PCIe serdes Q1 no clock"),
567 INFINIPATH_HWE_MSG(PCIESERDESQ2PCLKNOTDETECT,
568 "PCIe serdes Q2 no clock"),
569 INFINIPATH_HWE_MSG(PCIESERDESQ3PCLKNOTDETECT,
570 "PCIe serdes Q3 no clock"),
571 INFINIPATH_HWE_MSG(DDSRXEQMEMORYPARITYERR,
572 "DDS RXEQ memory parity"),
573 INFINIPATH_HWE_MSG(IB_UC_MEMORYPARITYERR, "IB uC memory parity"),
574 INFINIPATH_HWE_MSG(PCIE_UC_OCT0MEMORYPARITYERR,
575 "PCIe uC oct0 memory parity"),
576 INFINIPATH_HWE_MSG(PCIE_UC_OCT1MEMORYPARITYERR,
577 "PCIe uC oct1 memory parity"),
578};
579
580static void autoneg_work(struct work_struct *);
581
582/*
583 * the offset is different for different configured port numbers, since
584 * port0 is fixed in size, but others can vary. Make it a function to
585 * make the issue more obvious.
586*/
587static inline u32 port_egrtid_idx(struct ipath_devdata *dd, unsigned port)
588{
589 return port ? dd->ipath_p0_rcvegrcnt +
590 (port-1) * dd->ipath_rcvegrcnt : 0;
591}
592
593static void ipath_7220_txe_recover(struct ipath_devdata *dd)
594{
595 ++ipath_stats.sps_txeparity;
596
597 dev_info(&dd->pcidev->dev,
598 "Recovering from TXE PIO parity error\n");
599 ipath_disarm_senderrbufs(dd);
600}
601
602
603/**
604 * ipath_7220_handle_hwerrors - display hardware errors.
605 * @dd: the infinipath device
606 * @msg: the output buffer
607 * @msgl: the size of the output buffer
608 *
609 * Use same msg buffer as regular errors to avoid excessive stack
610 * use. Most hardware errors are catastrophic, but for right now,
611 * we'll print them and continue. We reuse the same message buffer as
612 * ipath_handle_errors() to avoid excessive stack usage.
613 */
614static void ipath_7220_handle_hwerrors(struct ipath_devdata *dd, char *msg,
615 size_t msgl)
616{
617 ipath_err_t hwerrs;
618 u32 bits, ctrl;
619 int isfatal = 0;
620 char bitsmsg[64];
621 int log_idx;
622
623 hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
624 if (!hwerrs) {
625 /*
626 * better than printing cofusing messages
627 * This seems to be related to clearing the crc error, or
628 * the pll error during init.
629 */
630 ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
631 goto bail;
632 } else if (hwerrs == ~0ULL) {
633 ipath_dev_err(dd, "Read of hardware error status failed "
634 "(all bits set); ignoring\n");
635 goto bail;
636 }
637 ipath_stats.sps_hwerrs++;
638
639 /*
640 * Always clear the error status register, except MEMBISTFAIL,
641 * regardless of whether we continue or stop using the chip.
642 * We want that set so we know it failed, even across driver reload.
643 * We'll still ignore it in the hwerrmask. We do this partly for
644 * diagnostics, but also for support.
645 */
646 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
647 hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
648
649 hwerrs &= dd->ipath_hwerrmask;
650
651 /* We log some errors to EEPROM, check if we have any of those. */
652 for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
653 if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
654 ipath_inc_eeprom_err(dd, log_idx, 1);
655 /*
656 * Make sure we get this much out, unless told to be quiet,
657 * or it's occurred within the last 5 seconds.
658 */
659 if ((hwerrs & ~(dd->ipath_lasthwerror |
660 ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
661 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
662 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) ||
663 (ipath_debug & __IPATH_VERBDBG))
664 dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
665 "(cleared)\n", (unsigned long long) hwerrs);
666 dd->ipath_lasthwerror |= hwerrs;
667
668 if (hwerrs & ~dd->ipath_hwe_bitsextant)
669 ipath_dev_err(dd, "hwerror interrupt with unknown errors "
670 "%llx set\n", (unsigned long long)
671 (hwerrs & ~dd->ipath_hwe_bitsextant));
672
673 if (hwerrs & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR)
674 ipath_sd7220_clr_ibpar(dd);
675
676 ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
677 if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
678 /*
679 * Parity errors in send memory are recoverable by h/w
680 * just do housekeeping, exit freeze mode and continue.
681 */
682 if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
683 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
684 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
685 ipath_7220_txe_recover(dd);
686 hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
687 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
688 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT);
689 }
690 if (hwerrs) {
691 /*
692 * If any set that we aren't ignoring only make the
693 * complaint once, in case it's stuck or recurring,
694 * and we get here multiple times
695 * Force link down, so switch knows, and
696 * LEDs are turned off.
697 */
698 if (dd->ipath_flags & IPATH_INITTED) {
699 ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
700 ipath_setup_7220_setextled(dd,
701 INFINIPATH_IBCS_L_STATE_DOWN,
702 INFINIPATH_IBCS_LT_STATE_DISABLED);
703 ipath_dev_err(dd, "Fatal Hardware Error "
704 "(freeze mode), no longer"
705 " usable, SN %.16s\n",
706 dd->ipath_serial);
707 isfatal = 1;
708 }
709 /*
710 * Mark as having had an error for driver, and also
711 * for /sys and status word mapped to user programs.
712 * This marks unit as not usable, until reset.
713 */
714 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
715 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
716 dd->ipath_flags &= ~IPATH_INITTED;
717 } else {
718 ipath_dbg("Clearing freezemode on ignored or "
719 "recovered hardware error\n");
720 ipath_clear_freeze(dd);
721 }
722 }
723
724 *msg = '\0';
725
726 if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
727 strlcat(msg, "[Memory BIST test failed, "
728 "InfiniPath hardware unusable]", msgl);
729 /* ignore from now on, so disable until driver reloaded */
730 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
731 dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
732 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
733 dd->ipath_hwerrmask);
734 }
735
736 ipath_format_hwerrors(hwerrs,
737 ipath_7220_hwerror_msgs,
738 ARRAY_SIZE(ipath_7220_hwerror_msgs),
739 msg, msgl);
740
741 if (hwerrs & (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
742 << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT)) {
743 bits = (u32) ((hwerrs >>
744 INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) &
745 INFINIPATH_HWE_PCIEMEMPARITYERR_MASK);
746 snprintf(bitsmsg, sizeof bitsmsg,
747 "[PCIe Mem Parity Errs %x] ", bits);
748 strlcat(msg, bitsmsg, msgl);
749 }
750
751#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \
752 INFINIPATH_HWE_COREPLL_RFSLIP)
753
754 if (hwerrs & _IPATH_PLL_FAIL) {
755 snprintf(bitsmsg, sizeof bitsmsg,
756 "[PLL failed (%llx), InfiniPath hardware unusable]",
757 (unsigned long long) hwerrs & _IPATH_PLL_FAIL);
758 strlcat(msg, bitsmsg, msgl);
759 /* ignore from now on, so disable until driver reloaded */
760 dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
761 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
762 dd->ipath_hwerrmask);
763 }
764
765 if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
766 /*
767 * If it occurs, it is left masked since the eternal
768 * interface is unused.
769 */
770 dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
771 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
772 dd->ipath_hwerrmask);
773 }
774
775 ipath_dev_err(dd, "%s hardware error\n", msg);
776 /*
777 * For /sys status file. if no trailing } is copied, we'll
778 * know it was truncated.
779 */
780 if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg)
781 snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
782 "{%s}", msg);
783bail:;
784}
785
786/**
787 * ipath_7220_boardname - fill in the board name
788 * @dd: the infinipath device
789 * @name: the output buffer
790 * @namelen: the size of the output buffer
791 *
792 * info is based on the board revision register
793 */
794static int ipath_7220_boardname(struct ipath_devdata *dd, char *name,
795 size_t namelen)
796{
797 char *n = NULL;
798 u8 boardrev = dd->ipath_boardrev;
799 int ret;
800
801 if (boardrev == 15) {
802 /*
803 * Emulator sometimes comes up all-ones, rather than zero.
804 */
805 boardrev = 0;
806 dd->ipath_boardrev = boardrev;
807 }
808 switch (boardrev) {
809 case 0:
810 n = "InfiniPath_7220_Emulation";
811 break;
812 case 1:
813 n = "InfiniPath_QLE7240";
814 break;
815 case 2:
816 n = "InfiniPath_QLE7280";
817 break;
818 case 3:
819 n = "InfiniPath_QLE7242";
820 break;
821 case 4:
822 n = "InfiniPath_QEM7240";
823 break;
824 case 5:
825 n = "InfiniPath_QMI7240";
826 break;
827 case 6:
828 n = "InfiniPath_QMI7264";
829 break;
830 case 7:
831 n = "InfiniPath_QMH7240";
832 break;
833 case 8:
834 n = "InfiniPath_QME7240";
835 break;
836 case 9:
837 n = "InfiniPath_QLE7250";
838 break;
839 case 10:
840 n = "InfiniPath_QLE7290";
841 break;
842 case 11:
843 n = "InfiniPath_QEM7250";
844 break;
845 case 12:
846 n = "InfiniPath_QLE-Bringup";
847 break;
848 default:
849 ipath_dev_err(dd,
850 "Don't yet know about board with ID %u\n",
851 boardrev);
852 snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u",
853 boardrev);
854 break;
855 }
856 if (n)
857 snprintf(name, namelen, "%s", n);
858
859 if (dd->ipath_majrev != 5 || !dd->ipath_minrev ||
860 dd->ipath_minrev > 2) {
861 ipath_dev_err(dd, "Unsupported InfiniPath hardware "
862 "revision %u.%u!\n",
863 dd->ipath_majrev, dd->ipath_minrev);
864 ret = 1;
865 } else if (dd->ipath_minrev == 1 &&
866 !(dd->ipath_flags & IPATH_INITTED)) {
867 /* Rev1 chips are prototype. Complain at init, but allow use */
868 ipath_dev_err(dd, "Unsupported hardware "
869 "revision %u.%u, Contact support@qlogic.com\n",
870 dd->ipath_majrev, dd->ipath_minrev);
871 ret = 0;
872 } else
873 ret = 0;
874
875 /*
876 * Set here not in ipath_init_*_funcs because we have to do
877 * it after we can read chip registers.
878 */
879 dd->ipath_ureg_align = 0x10000; /* 64KB alignment */
880
881 return ret;
882}
883
884/**
885 * ipath_7220_init_hwerrors - enable hardware errors
886 * @dd: the infinipath device
887 *
888 * now that we have finished initializing everything that might reasonably
889 * cause a hardware error, and cleared those errors bits as they occur,
890 * we can enable hardware errors in the mask (potentially enabling
891 * freeze mode), and enable hardware errors as errors (along with
892 * everything else) in errormask
893 */
894static void ipath_7220_init_hwerrors(struct ipath_devdata *dd)
895{
896 ipath_err_t val;
897 u64 extsval;
898
899 extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
900
901 if (!(extsval & (INFINIPATH_EXTS_MEMBIST_ENDTEST |
902 INFINIPATH_EXTS_MEMBIST_DISABLED)))
903 ipath_dev_err(dd, "MemBIST did not complete!\n");
904 if (extsval & INFINIPATH_EXTS_MEMBIST_DISABLED)
905 dev_info(&dd->pcidev->dev, "MemBIST is disabled.\n");
906
907 val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */
908
909 if (!dd->ipath_boardrev) /* no PLL for Emulator */
910 val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
911
912 if (dd->ipath_minrev == 1)
913 val &= ~(1ULL << 42); /* TXE LaunchFIFO Parity rev1 issue */
914
915 val &= ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR;
916 dd->ipath_hwerrmask = val;
917
918 /*
919 * special trigger "error" is for debugging purposes. It
920 * works around a processor/chipset problem. The error
921 * interrupt allows us to count occurrences, but we don't
922 * want to pay the overhead for normal use. Emulation only
923 */
924 if (!dd->ipath_boardrev)
925 dd->ipath_maskederrs = INFINIPATH_E_SENDSPECIALTRIGGER;
926}
927
928/*
929 * All detailed interaction with the SerDes has been moved to ipath_sd7220.c
930 *
931 * The portion of IBA7220-specific bringup_serdes() that actually deals with
932 * registers and memory within the SerDes itself is ipath_sd7220_init().
933 */
934
935/**
936 * ipath_7220_bringup_serdes - bring up the serdes
937 * @dd: the infinipath device
938 */
939static int ipath_7220_bringup_serdes(struct ipath_devdata *dd)
940{
941 int ret = 0;
942 u64 val, prev_val, guid;
943 int was_reset; /* Note whether uC was reset */
944
945 ipath_dbg("Trying to bringup serdes\n");
946
947 if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
948 INFINIPATH_HWE_SERDESPLLFAILED) {
949 ipath_dbg("At start, serdes PLL failed bit set "
950 "in hwerrstatus, clearing and continuing\n");
951 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
952 INFINIPATH_HWE_SERDESPLLFAILED);
953 }
954
955 dd->ibdeltainprog = 1;
956 dd->ibsymsnap =
957 ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
958 dd->iblnkerrsnap =
959 ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
960
961 if (!dd->ipath_ibcddrctrl) {
962 /* not on re-init after reset */
963 dd->ipath_ibcddrctrl =
964 ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcddrctrl);
965
966 if (dd->ipath_link_speed_enabled ==
967 (IPATH_IB_SDR | IPATH_IB_DDR))
968 dd->ipath_ibcddrctrl |=
969 IBA7220_IBC_SPEED_AUTONEG_MASK |
970 IBA7220_IBC_IBTA_1_2_MASK;
971 else
972 dd->ipath_ibcddrctrl |=
973 dd->ipath_link_speed_enabled == IPATH_IB_DDR
974 ? IBA7220_IBC_SPEED_DDR :
975 IBA7220_IBC_SPEED_SDR;
976 if ((dd->ipath_link_width_enabled & (IB_WIDTH_1X |
977 IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X))
978 dd->ipath_ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;
979 else
980 dd->ipath_ibcddrctrl |=
981 dd->ipath_link_width_enabled == IB_WIDTH_4X
982 ? IBA7220_IBC_WIDTH_4X_ONLY :
983 IBA7220_IBC_WIDTH_1X_ONLY;
984
985 /* always enable these on driver reload, not sticky */
986 dd->ipath_ibcddrctrl |=
987 IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;
988 dd->ipath_ibcddrctrl |=
989 IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
990 /*
991 * automatic lane reversal detection for receive
992 * doesn't work correctly in rev 1, so disable it
993 * on that rev, otherwise enable (disabling not
994 * sticky across reload for >rev1)
995 */
996 if (dd->ipath_minrev == 1)
997 dd->ipath_ibcddrctrl &=
998 ~IBA7220_IBC_LANE_REV_SUPPORTED;
999 else
1000 dd->ipath_ibcddrctrl |=
1001 IBA7220_IBC_LANE_REV_SUPPORTED;
1002 }
1003
1004 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
1005 dd->ipath_ibcddrctrl);
1006
1007 ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl), 0Ull);
1008
1009 /* IBA7220 has SERDES MPU reset in D0 of what _was_ IBPLLCfg */
1010 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl);
1011 /* remember if uC was in Reset or not, for dactrim */
1012 was_reset = (val & 1);
1013 ipath_cdbg(VERBOSE, "IBReset %s xgxsconfig %llx\n",
1014 was_reset ? "Asserted" : "Negated", (unsigned long long)
1015 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
1016
1017 if (dd->ipath_boardrev) {
1018 /*
1019 * Hardware is not emulator, and may have been reset. Init it.
1020 * Below will release reset, but needs to know if chip was
1021 * originally in reset, to only trim DACs on first time
1022 * after chip reset or powercycle (not driver reload)
1023 */
1024 ret = ipath_sd7220_init(dd, was_reset);
1025 }
1026
1027 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
1028 prev_val = val;
1029 val |= INFINIPATH_XGXS_FC_SAFE;
1030 if (val != prev_val) {
1031 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
1032 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1033 }
1034 if (val & INFINIPATH_XGXS_RESET)
1035 val &= ~INFINIPATH_XGXS_RESET;
1036 if (val != prev_val)
1037 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
1038
1039 ipath_cdbg(VERBOSE, "done: xgxs=%llx from %llx\n",
1040 (unsigned long long)
1041 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig),
1042 (unsigned long long) prev_val);
1043
1044 guid = be64_to_cpu(dd->ipath_guid);
1045
1046 if (!guid) {
1047 /* have to have something, so use likely unique tsc */
1048 guid = get_cycles();
1049 ipath_dbg("No GUID for heartbeat, faking %llx\n",
1050 (unsigned long long)guid);
1051 } else
1052 ipath_cdbg(VERBOSE, "Wrote %llX to HRTBT_GUID\n",
1053 (unsigned long long) guid);
1054 ipath_write_kreg(dd, dd->ipath_kregs->kr_hrtbt_guid, guid);
1055 return ret;
1056}
1057
1058static void ipath_7220_config_jint(struct ipath_devdata *dd,
1059 u16 idle_ticks, u16 max_packets)
1060{
1061
1062 /*
1063 * We can request a receive interrupt for 1 or more packets
1064 * from current offset.
1065 */
1066 if (idle_ticks == 0 || max_packets == 0)
1067 /* interrupt after one packet if no mitigation */
1068 dd->ipath_rhdrhead_intr_off =
1069 1ULL << IBA7220_HDRHEAD_PKTINT_SHIFT;
1070 else
1071 /* Turn off RcvHdrHead interrupts if using mitigation */
1072 dd->ipath_rhdrhead_intr_off = 0ULL;
1073
1074 /* refresh kernel RcvHdrHead registers... */
1075 ipath_write_ureg(dd, ur_rcvhdrhead,
1076 dd->ipath_rhdrhead_intr_off |
1077 dd->ipath_pd[0]->port_head, 0);
1078
1079 dd->ipath_jint_max_packets = max_packets;
1080 dd->ipath_jint_idle_ticks = idle_ticks;
1081 ipath_write_kreg(dd, dd->ipath_kregs->kr_jintreload,
1082 ((u64) max_packets << INFINIPATH_JINT_PACKETSHIFT) |
1083 idle_ticks);
1084}
1085
1086/**
1087 * ipath_7220_quiet_serdes - set serdes to txidle
1088 * @dd: the infinipath device
1089 * Called when driver is being unloaded
1090 */
1091static void ipath_7220_quiet_serdes(struct ipath_devdata *dd)
1092{
1093 u64 val;
1094 if (dd->ibsymdelta || dd->iblnkerrdelta ||
1095 dd->ibdeltainprog) {
1096 u64 diagc;
1097 /* enable counter writes */
1098 diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl);
1099 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl,
1100 diagc | INFINIPATH_DC_COUNTERWREN);
1101
1102 if (dd->ibsymdelta || dd->ibdeltainprog) {
1103 val = ipath_read_creg32(dd,
1104 dd->ipath_cregs->cr_ibsymbolerrcnt);
1105 if (dd->ibdeltainprog)
1106 val -= val - dd->ibsymsnap;
1107 val -= dd->ibsymdelta;
1108 ipath_write_creg(dd,
1109 dd->ipath_cregs->cr_ibsymbolerrcnt, val);
1110 }
1111 if (dd->iblnkerrdelta || dd->ibdeltainprog) {
1112 val = ipath_read_creg32(dd,
1113 dd->ipath_cregs->cr_iblinkerrrecovcnt);
1114 if (dd->ibdeltainprog)
1115 val -= val - dd->iblnkerrsnap;
1116 val -= dd->iblnkerrdelta;
1117 ipath_write_creg(dd,
1118 dd->ipath_cregs->cr_iblinkerrrecovcnt, val);
1119 }
1120
1121 /* and disable counter writes */
1122 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, diagc);
1123 }
1124
1125 dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG;
1126 wake_up(&dd->ipath_autoneg_wait);
1127 cancel_delayed_work(&dd->ipath_autoneg_work);
1128 flush_scheduled_work();
1129 ipath_shutdown_relock_poll(dd);
1130 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
1131 val |= INFINIPATH_XGXS_RESET;
1132 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
1133}
1134
1135static int ipath_7220_intconfig(struct ipath_devdata *dd)
1136{
1137 ipath_7220_config_jint(dd, dd->ipath_jint_idle_ticks,
1138 dd->ipath_jint_max_packets);
1139 return 0;
1140}
1141
1142/**
1143 * ipath_setup_7220_setextled - set the state of the two external LEDs
1144 * @dd: the infinipath device
1145 * @lst: the L state
1146 * @ltst: the LT state
1147 *
1148 * These LEDs indicate the physical and logical state of IB link.
1149 * For this chip (at least with recommended board pinouts), LED1
1150 * is Yellow (logical state) and LED2 is Green (physical state),
1151 *
1152 * Note: We try to match the Mellanox HCA LED behavior as best
1153 * we can. Green indicates physical link state is OK (something is
1154 * plugged in, and we can train).
1155 * Amber indicates the link is logically up (ACTIVE).
1156 * Mellanox further blinks the amber LED to indicate data packet
1157 * activity, but we have no hardware support for that, so it would
1158 * require waking up every 10-20 msecs and checking the counters
1159 * on the chip, and then turning the LED off if appropriate. That's
1160 * visible overhead, so not something we will do.
1161 *
1162 */
1163static void ipath_setup_7220_setextled(struct ipath_devdata *dd, u64 lst,
1164 u64 ltst)
1165{
1166 u64 extctl, ledblink = 0;
1167 unsigned long flags = 0;
1168
1169 /* the diags use the LED to indicate diag info, so we leave
1170 * the external LED alone when the diags are running */
1171 if (ipath_diag_inuse)
1172 return;
1173
1174 /* Allow override of LED display for, e.g. Locating system in rack */
1175 if (dd->ipath_led_override) {
1176 ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
1177 ? INFINIPATH_IBCS_LT_STATE_LINKUP
1178 : INFINIPATH_IBCS_LT_STATE_DISABLED;
1179 lst = (dd->ipath_led_override & IPATH_LED_LOG)
1180 ? INFINIPATH_IBCS_L_STATE_ACTIVE
1181 : INFINIPATH_IBCS_L_STATE_DOWN;
1182 }
1183
1184 spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
1185 extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
1186 INFINIPATH_EXTC_LED2PRIPORT_ON);
1187 if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP) {
1188 extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
1189 /*
1190 * counts are in chip clock (4ns) periods.
1191 * This is 1/16 sec (66.6ms) on,
1192 * 3/16 sec (187.5 ms) off, with packets rcvd
1193 */
1194 ledblink = ((66600*1000UL/4) << IBA7220_LEDBLINK_ON_SHIFT)
1195 | ((187500*1000UL/4) << IBA7220_LEDBLINK_OFF_SHIFT);
1196 }
1197 if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
1198 extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
1199 dd->ipath_extctrl = extctl;
1200 ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
1201 spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
1202
1203 if (ledblink) /* blink the LED on packet receive */
1204 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvpktledcnt,
1205 ledblink);
1206}
1207
1208/*
1209 * Similar to pci_intx(pdev, 1), except that we make sure
1210 * msi is off...
1211 */
1212static void ipath_enable_intx(struct pci_dev *pdev)
1213{
1214 u16 cw, new;
1215 int pos;
1216
1217 /* first, turn on INTx */
1218 pci_read_config_word(pdev, PCI_COMMAND, &cw);
1219 new = cw & ~PCI_COMMAND_INTX_DISABLE;
1220 if (new != cw)
1221 pci_write_config_word(pdev, PCI_COMMAND, new);
1222
1223 /* then turn off MSI */
1224 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
1225 if (pos) {
1226 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
1227 new = cw & ~PCI_MSI_FLAGS_ENABLE;
1228 if (new != cw)
1229 pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
1230 }
1231}
1232
1233static int ipath_msi_enabled(struct pci_dev *pdev)
1234{
1235 int pos, ret = 0;
1236
1237 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
1238 if (pos) {
1239 u16 cw;
1240
1241 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
1242 ret = !!(cw & PCI_MSI_FLAGS_ENABLE);
1243 }
1244 return ret;
1245}
1246
1247/*
1248 * disable msi interrupt if enabled, and clear the flag.
1249 * flag is used primarily for the fallback to INTx, but
1250 * is also used in reinit after reset as a flag.
1251 */
1252static void ipath_7220_nomsi(struct ipath_devdata *dd)
1253{
1254 dd->ipath_msi_lo = 0;
1255
1256 if (ipath_msi_enabled(dd->pcidev)) {
1257 /*
1258 * free, but don't zero; later kernels require
1259 * it be freed before disable_msi, so the intx
1260 * setup has to request it again.
1261 */
1262 if (dd->ipath_irq)
1263 free_irq(dd->ipath_irq, dd);
1264 pci_disable_msi(dd->pcidev);
1265 }
1266}
1267
1268/*
1269 * ipath_setup_7220_cleanup - clean up any per-chip chip-specific stuff
1270 * @dd: the infinipath device
1271 *
1272 * Nothing but msi interrupt cleanup for now.
1273 *
1274 * This is called during driver unload.
1275 */
1276static void ipath_setup_7220_cleanup(struct ipath_devdata *dd)
1277{
1278 ipath_7220_nomsi(dd);
1279}
1280
1281
1282static void ipath_7220_pcie_params(struct ipath_devdata *dd, u32 boardrev)
1283{
1284 u16 linkstat, minwidth, speed;
1285 int pos;
1286
1287 pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
1288 if (!pos) {
1289 ipath_dev_err(dd, "Can't find PCI Express capability!\n");
1290 goto bail;
1291 }
1292
1293 pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
1294 &linkstat);
1295 /*
1296 * speed is bits 0-4, linkwidth is bits 4-8
1297 * no defines for them in headers
1298 */
1299 speed = linkstat & 0xf;
1300 linkstat >>= 4;
1301 linkstat &= 0x1f;
1302 dd->ipath_lbus_width = linkstat;
1303 switch (boardrev) {
1304 case 0:
1305 case 2:
1306 case 10:
1307 case 12:
1308 minwidth = 16; /* x16 capable boards */
1309 break;
1310 default:
1311 minwidth = 8; /* x8 capable boards */
1312 break;
1313 }
1314
1315 switch (speed) {
1316 case 1:
1317 dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */
1318 break;
1319 case 2:
1320 dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */
1321 break;
1322 default: /* not defined, assume gen1 */
1323 dd->ipath_lbus_speed = 2500;
1324 break;
1325 }
1326
1327 if (linkstat < minwidth)
1328 ipath_dev_err(dd,
1329 "PCIe width %u (x%u HCA), performance "
1330 "reduced\n", linkstat, minwidth);
1331 else
1332 ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x%u HCA)\n",
1333 dd->ipath_lbus_speed, linkstat, minwidth);
1334
1335 if (speed != 1)
1336 ipath_dev_err(dd,
1337 "PCIe linkspeed %u is incorrect; "
1338 "should be 1 (2500)!\n", speed);
1339
1340bail:
1341 /* fill in string, even on errors */
1342 snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
1343 "PCIe,%uMHz,x%u\n",
1344 dd->ipath_lbus_speed,
1345 dd->ipath_lbus_width);
1346 return;
1347}
1348
1349
1350/**
1351 * ipath_setup_7220_config - setup PCIe config related stuff
1352 * @dd: the infinipath device
1353 * @pdev: the PCI device
1354 *
1355 * The pci_enable_msi() call will fail on systems with MSI quirks
1356 * such as those with AMD8131, even if the device of interest is not
1357 * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed
1358 * late in 2.6.16).
1359 * All that can be done is to edit the kernel source to remove the quirk
1360 * check until that is fixed.
1361 * We do not need to call enable_msi() for our HyperTransport chip,
1362 * even though it uses MSI, and we want to avoid the quirk warning, so
1363 * So we call enable_msi only for PCIe. If we do end up needing
1364 * pci_enable_msi at some point in the future for HT, we'll move the
1365 * call back into the main init_one code.
1366 * We save the msi lo and hi values, so we can restore them after
1367 * chip reset (the kernel PCI infrastructure doesn't yet handle that
1368 * correctly).
1369 */
1370static int ipath_setup_7220_config(struct ipath_devdata *dd,
1371 struct pci_dev *pdev)
1372{
1373 int pos, ret = -1;
1374 u32 boardrev;
1375
1376 dd->ipath_msi_lo = 0; /* used as a flag during reset processing */
1377
1378 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
1379 if (!strcmp(int_type, "force_msi") || !strcmp(int_type, "auto"))
1380 ret = pci_enable_msi(pdev);
1381 if (ret) {
1382 if (!strcmp(int_type, "force_msi")) {
1383 ipath_dev_err(dd, "pci_enable_msi failed: %d, "
1384 "force_msi is on, so not continuing.\n",
1385 ret);
1386 return ret;
1387 }
1388
1389 ipath_enable_intx(pdev);
1390 if (!strcmp(int_type, "auto"))
1391 ipath_dev_err(dd, "pci_enable_msi failed: %d, "
1392 "falling back to INTx\n", ret);
1393 } else if (pos) {
1394 u16 control;
1395 pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
1396 &dd->ipath_msi_lo);
1397 pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI,
1398 &dd->ipath_msi_hi);
1399 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS,
1400 &control);
1401 /* now save the data (vector) info */
1402 pci_read_config_word(pdev,
1403 pos + ((control & PCI_MSI_FLAGS_64BIT)
1404 ? PCI_MSI_DATA_64 :
1405 PCI_MSI_DATA_32),
1406 &dd->ipath_msi_data);
1407 } else
1408 ipath_dev_err(dd, "Can't find MSI capability, "
1409 "can't save MSI settings for reset\n");
1410
1411 dd->ipath_irq = pdev->irq;
1412
1413 /*
1414 * We save the cachelinesize also, although it doesn't
1415 * really matter.
1416 */
1417 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
1418 &dd->ipath_pci_cacheline);
1419
1420 /*
1421 * this function called early, ipath_boardrev not set yet. Can't
1422 * use ipath_read_kreg64() yet, too early in init, so use readq()
1423 */
1424 boardrev = (readq(&dd->ipath_kregbase[dd->ipath_kregs->kr_revision])
1425 >> INFINIPATH_R_BOARDID_SHIFT) & INFINIPATH_R_BOARDID_MASK;
1426
1427 ipath_7220_pcie_params(dd, boardrev);
1428
1429 dd->ipath_flags |= IPATH_NODMA_RTAIL | IPATH_HAS_SEND_DMA |
1430 IPATH_HAS_PBC_CNT | IPATH_HAS_THRESH_UPDATE;
1431 dd->ipath_pioupd_thresh = 4U; /* set default update threshold */
1432 return 0;
1433}
1434
1435static void ipath_init_7220_variables(struct ipath_devdata *dd)
1436{
1437 /*
1438 * setup the register offsets, since they are different for each
1439 * chip
1440 */
1441 dd->ipath_kregs = &ipath_7220_kregs;
1442 dd->ipath_cregs = &ipath_7220_cregs;
1443
1444 /*
1445 * bits for selecting i2c direction and values,
1446 * used for I2C serial flash
1447 */
1448 dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
1449 dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
1450 dd->ipath_gpio_sda = IPATH_GPIO_SDA;
1451 dd->ipath_gpio_scl = IPATH_GPIO_SCL;
1452
1453 /*
1454 * Fill in data for field-values that change in IBA7220.
1455 * We dynamically specify only the mask for LINKTRAININGSTATE
1456 * and only the shift for LINKSTATE, as they are the only ones
1457 * that change. Also precalculate the 3 link states of interest
1458 * and the combined mask.
1459 */
1460 dd->ibcs_ls_shift = IBA7220_IBCS_LINKSTATE_SHIFT;
1461 dd->ibcs_lts_mask = IBA7220_IBCS_LINKTRAININGSTATE_MASK;
1462 dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
1463 dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
1464 dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
1465 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
1466 (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
1467 dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
1468 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
1469 (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
1470 dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
1471 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
1472 (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
1473
1474 /*
1475 * Fill in data for ibcc field-values that change in IBA7220.
1476 * We dynamically specify only the mask for LINKINITCMD
1477 * and only the shift for LINKCMD and MAXPKTLEN, as they are
1478 * the only ones that change.
1479 */
1480 dd->ibcc_lic_mask = IBA7220_IBCC_LINKINITCMD_MASK;
1481 dd->ibcc_lc_shift = IBA7220_IBCC_LINKCMD_SHIFT;
1482 dd->ibcc_mpl_shift = IBA7220_IBCC_MAXPKTLEN_SHIFT;
1483
1484 /* Fill in shifts for RcvCtrl. */
1485 dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
1486 dd->ipath_r_intravail_shift = IBA7220_R_INTRAVAIL_SHIFT;
1487 dd->ipath_r_tailupd_shift = IBA7220_R_TAILUPD_SHIFT;
1488 dd->ipath_r_portcfg_shift = IBA7220_R_PORTCFG_SHIFT;
1489
1490 /* variables for sanity checking interrupt and errors */
1491 dd->ipath_hwe_bitsextant =
1492 (INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
1493 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
1494 (INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
1495 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
1496 (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK <<
1497 INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT) |
1498 INFINIPATH_HWE_PCIE1PLLFAILED |
1499 INFINIPATH_HWE_PCIE0PLLFAILED |
1500 INFINIPATH_HWE_PCIEPOISONEDTLP |
1501 INFINIPATH_HWE_PCIECPLTIMEOUT |
1502 INFINIPATH_HWE_PCIEBUSPARITYXTLH |
1503 INFINIPATH_HWE_PCIEBUSPARITYXADM |
1504 INFINIPATH_HWE_PCIEBUSPARITYRADM |
1505 INFINIPATH_HWE_MEMBISTFAILED |
1506 INFINIPATH_HWE_COREPLL_FBSLIP |
1507 INFINIPATH_HWE_COREPLL_RFSLIP |
1508 INFINIPATH_HWE_SERDESPLLFAILED |
1509 INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
1510 INFINIPATH_HWE_IBCBUSFRSPCPARITYERR |
1511 INFINIPATH_HWE_PCIECPLDATAQUEUEERR |
1512 INFINIPATH_HWE_PCIECPLHDRQUEUEERR |
1513 INFINIPATH_HWE_SDMAMEMREADERR |
1514 INFINIPATH_HWE_CLK_UC_PLLNOTLOCKED |
1515 INFINIPATH_HWE_PCIESERDESQ0PCLKNOTDETECT |
1516 INFINIPATH_HWE_PCIESERDESQ1PCLKNOTDETECT |
1517 INFINIPATH_HWE_PCIESERDESQ2PCLKNOTDETECT |
1518 INFINIPATH_HWE_PCIESERDESQ3PCLKNOTDETECT |
1519 INFINIPATH_HWE_DDSRXEQMEMORYPARITYERR |
1520 INFINIPATH_HWE_IB_UC_MEMORYPARITYERR |
1521 INFINIPATH_HWE_PCIE_UC_OCT0MEMORYPARITYERR |
1522 INFINIPATH_HWE_PCIE_UC_OCT1MEMORYPARITYERR;
1523 dd->ipath_i_bitsextant =
1524 INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED |
1525 (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
1526 (INFINIPATH_I_RCVAVAIL_MASK <<
1527 INFINIPATH_I_RCVAVAIL_SHIFT) |
1528 INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
1529 INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO |
1530 INFINIPATH_I_JINT | INFINIPATH_I_SERDESTRIMDONE;
1531 dd->ipath_e_bitsextant =
1532 INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
1533 INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
1534 INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
1535 INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
1536 INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
1537 INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
1538 INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
1539 INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
1540 INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
1541 INFINIPATH_E_SENDSPECIALTRIGGER |
1542 INFINIPATH_E_SDMADISABLED | INFINIPATH_E_SMINPKTLEN |
1543 INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNDERRUN |
1544 INFINIPATH_E_SPKTLEN | INFINIPATH_E_SDROPPEDSMPPKT |
1545 INFINIPATH_E_SDROPPEDDATAPKT |
1546 INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
1547 INFINIPATH_E_SUNSUPVL | INFINIPATH_E_SENDBUFMISUSE |
1548 INFINIPATH_E_SDMAGENMISMATCH | INFINIPATH_E_SDMAOUTOFBOUND |
1549 INFINIPATH_E_SDMATAILOUTOFBOUND | INFINIPATH_E_SDMABASE |
1550 INFINIPATH_E_SDMA1STDESC | INFINIPATH_E_SDMARPYTAG |
1551 INFINIPATH_E_SDMADWEN | INFINIPATH_E_SDMAMISSINGDW |
1552 INFINIPATH_E_SDMAUNEXPDATA |
1553 INFINIPATH_E_IBSTATUSCHANGED | INFINIPATH_E_INVALIDADDR |
1554 INFINIPATH_E_RESET | INFINIPATH_E_HARDWARE |
1555 INFINIPATH_E_SDMADESCADDRMISALIGN |
1556 INFINIPATH_E_INVALIDEEPCMD;
1557
1558 dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
1559 dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
1560 dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
1561 dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
1562 dd->ipath_flags |= IPATH_INTREG_64 | IPATH_HAS_MULT_IB_SPEED
1563 | IPATH_HAS_LINK_LATENCY;
1564
1565 /*
1566 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
1567 * 2 is Some Misc, 3 is reserved for future.
1568 */
1569 dd->ipath_eep_st_masks[0].hwerrs_to_log =
1570 INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
1571 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
1572
1573 dd->ipath_eep_st_masks[1].hwerrs_to_log =
1574 INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
1575 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
1576
1577 dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
1578
1579 ipath_linkrecovery = 0;
1580
1581 init_waitqueue_head(&dd->ipath_autoneg_wait);
1582 INIT_DELAYED_WORK(&dd->ipath_autoneg_work, autoneg_work);
1583
1584 dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
1585 dd->ipath_link_speed_supported = IPATH_IB_SDR | IPATH_IB_DDR;
1586
1587 dd->ipath_link_width_enabled = dd->ipath_link_width_supported;
1588 dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
1589 /*
1590 * set the initial values to reasonable default, will be set
1591 * for real when link is up.
1592 */
1593 dd->ipath_link_width_active = IB_WIDTH_4X;
1594 dd->ipath_link_speed_active = IPATH_IB_SDR;
1595 dd->delay_mult = rate_to_delay[0][1];
1596}
1597
1598
1599/*
1600 * Setup the MSI stuff again after a reset. I'd like to just call
1601 * pci_enable_msi() and request_irq() again, but when I do that,
1602 * the MSI enable bit doesn't get set in the command word, and
1603 * we switch to to a different interrupt vector, which is confusing,
1604 * so I instead just do it all inline. Perhaps somehow can tie this
1605 * into the PCIe hotplug support at some point
1606 * Note, because I'm doing it all here, I don't call pci_disable_msi()
1607 * or free_irq() at the start of ipath_setup_7220_reset().
1608 */
1609static int ipath_reinit_msi(struct ipath_devdata *dd)
1610{
1611 int ret = 0;
1612
1613 int pos;
1614 u16 control;
1615 if (!dd->ipath_msi_lo) /* Using intX, or init problem */
1616 goto bail;
1617
1618 pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
1619 if (!pos) {
1620 ipath_dev_err(dd, "Can't find MSI capability, "
1621 "can't restore MSI settings\n");
1622 goto bail;
1623 }
1624 ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
1625 dd->ipath_msi_lo, pos + PCI_MSI_ADDRESS_LO);
1626 pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
1627 dd->ipath_msi_lo);
1628 ipath_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n",
1629 dd->ipath_msi_hi, pos + PCI_MSI_ADDRESS_HI);
1630 pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
1631 dd->ipath_msi_hi);
1632 pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
1633 if (!(control & PCI_MSI_FLAGS_ENABLE)) {
1634 ipath_cdbg(VERBOSE, "MSI control at off %x was %x, "
1635 "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS,
1636 control, control | PCI_MSI_FLAGS_ENABLE);
1637 control |= PCI_MSI_FLAGS_ENABLE;
1638 pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
1639 control);
1640 }
1641 /* now rewrite the data (vector) info */
1642 pci_write_config_word(dd->pcidev, pos +
1643 ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
1644 dd->ipath_msi_data);
1645 ret = 1;
1646
1647bail:
1648 if (!ret) {
1649 ipath_dbg("Using INTx, MSI disabled or not configured\n");
1650 ipath_enable_intx(dd->pcidev);
1651 ret = 1;
1652 }
1653 /*
1654 * We restore the cachelinesize also, although it doesn't really
1655 * matter.
1656 */
1657 pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE,
1658 dd->ipath_pci_cacheline);
1659 /* and now set the pci master bit again */
1660 pci_set_master(dd->pcidev);
1661
1662 return ret;
1663}
1664
1665/*
1666 * This routine sleeps, so it can only be called from user context, not
1667 * from interrupt context. If we need interrupt context, we can split
1668 * it into two routines.
1669 */
1670static int ipath_setup_7220_reset(struct ipath_devdata *dd)
1671{
1672 u64 val;
1673 int i;
1674 int ret;
1675 u16 cmdval;
1676
1677 pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval);
1678
1679 /* Use dev_err so it shows up in logs, etc. */
1680 ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit);
1681
1682 /* keep chip from being accessed in a few places */
1683 dd->ipath_flags &= ~(IPATH_INITTED | IPATH_PRESENT);
1684 val = dd->ipath_control | INFINIPATH_C_RESET;
1685 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
1686 mb();
1687
1688 for (i = 1; i <= 5; i++) {
1689 int r;
1690
1691 /*
1692 * Allow MBIST, etc. to complete; longer on each retry.
1693 * We sometimes get machine checks from bus timeout if no
1694 * response, so for now, make it *really* long.
1695 */
1696 msleep(1000 + (1 + i) * 2000);
1697 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
1698 dd->ipath_pcibar0);
1699 if (r)
1700 ipath_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
1701 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
1702 dd->ipath_pcibar1);
1703 if (r)
1704 ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
1705 /* now re-enable memory access */
1706 pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval);
1707 r = pci_enable_device(dd->pcidev);
1708 if (r)
1709 ipath_dev_err(dd, "pci_enable_device failed after "
1710 "reset: %d\n", r);
1711 /*
1712 * whether it fully enabled or not, mark as present,
1713 * again (but not INITTED)
1714 */
1715 dd->ipath_flags |= IPATH_PRESENT;
1716 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
1717 if (val == dd->ipath_revision) {
1718 ipath_cdbg(VERBOSE, "Got matching revision "
1719 "register %llx on try %d\n",
1720 (unsigned long long) val, i);
1721 ret = ipath_reinit_msi(dd);
1722 goto bail;
1723 }
1724 /* Probably getting -1 back */
1725 ipath_dbg("Didn't get expected revision register, "
1726 "got %llx, try %d\n", (unsigned long long) val,
1727 i + 1);
1728 }
1729 ret = 0; /* failed */
1730
1731bail:
1732 if (ret)
1733 ipath_7220_pcie_params(dd, dd->ipath_boardrev);
1734
1735 return ret;
1736}
1737
1738/**
1739 * ipath_7220_put_tid - write a TID to the chip
1740 * @dd: the infinipath device
1741 * @tidptr: pointer to the expected TID (in chip) to update
1742 * @tidtype: 0 for eager, 1 for expected
1743 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
1744 *
1745 * This exists as a separate routine to allow for selection of the
1746 * appropriate "flavor". The static calls in cleanup just use the
1747 * revision-agnostic form, as they are not performance critical.
1748 */
1749static void ipath_7220_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
1750 u32 type, unsigned long pa)
1751{
1752 if (pa != dd->ipath_tidinvalid) {
1753 u64 chippa = pa >> IBA7220_TID_PA_SHIFT;
1754
1755 /* paranoia checks */
1756 if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {
1757 dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
1758 "not 2KB aligned!\n", pa);
1759 return;
1760 }
1761 if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
1762 ipath_dev_err(dd,
1763 "BUG: Physical page address 0x%lx "
1764 "larger than supported\n", pa);
1765 return;
1766 }
1767
1768 if (type == RCVHQ_RCV_TYPE_EAGER)
1769 chippa |= dd->ipath_tidtemplate;
1770 else /* for now, always full 4KB page */
1771 chippa |= IBA7220_TID_SZ_4K;
1772 writeq(chippa, tidptr);
1773 } else
1774 writeq(pa, tidptr);
1775 mmiowb();
1776}
1777
1778/**
1779 * ipath_7220_clear_tid - clear all TID entries for a port, expected and eager
1780 * @dd: the infinipath device
1781 * @port: the port
1782 *
1783 * clear all TID entries for a port, expected and eager.
1784 * Used from ipath_close(). On this chip, TIDs are only 32 bits,
1785 * not 64, but they are still on 64 bit boundaries, so tidbase
1786 * is declared as u64 * for the pointer math, even though we write 32 bits
1787 */
1788static void ipath_7220_clear_tids(struct ipath_devdata *dd, unsigned port)
1789{
1790 u64 __iomem *tidbase;
1791 unsigned long tidinv;
1792 int i;
1793
1794 if (!dd->ipath_kregbase)
1795 return;
1796
1797 ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
1798
1799 tidinv = dd->ipath_tidinvalid;
1800 tidbase = (u64 __iomem *)
1801 ((char __iomem *)(dd->ipath_kregbase) +
1802 dd->ipath_rcvtidbase +
1803 port * dd->ipath_rcvtidcnt * sizeof(*tidbase));
1804
1805 for (i = 0; i < dd->ipath_rcvtidcnt; i++)
1806 ipath_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
1807 tidinv);
1808
1809 tidbase = (u64 __iomem *)
1810 ((char __iomem *)(dd->ipath_kregbase) +
1811 dd->ipath_rcvegrbase + port_egrtid_idx(dd, port)
1812 * sizeof(*tidbase));
1813
1814 for (i = port ? dd->ipath_rcvegrcnt : dd->ipath_p0_rcvegrcnt; i; i--)
1815 ipath_7220_put_tid(dd, &tidbase[i-1], RCVHQ_RCV_TYPE_EAGER,
1816 tidinv);
1817}
1818
1819/**
1820 * ipath_7220_tidtemplate - setup constants for TID updates
1821 * @dd: the infinipath device
1822 *
1823 * We setup stuff that we use a lot, to avoid calculating each time
1824 */
1825static void ipath_7220_tidtemplate(struct ipath_devdata *dd)
1826{
1827 /* For now, we always allocate 4KB buffers (at init) so we can
1828 * receive max size packets. We may want a module parameter to
1829 * specify 2KB or 4KB and/or make be per port instead of per device
1830 * for those who want to reduce memory footprint. Note that the
1831 * ipath_rcvhdrentsize size must be large enough to hold the largest
1832 * IB header (currently 96 bytes) that we expect to handle (plus of
1833 * course the 2 dwords of RHF).
1834 */
1835 if (dd->ipath_rcvegrbufsize == 2048)
1836 dd->ipath_tidtemplate = IBA7220_TID_SZ_2K;
1837 else if (dd->ipath_rcvegrbufsize == 4096)
1838 dd->ipath_tidtemplate = IBA7220_TID_SZ_4K;
1839 else {
1840 dev_info(&dd->pcidev->dev, "BUG: unsupported egrbufsize "
1841 "%u, using %u\n", dd->ipath_rcvegrbufsize,
1842 4096);
1843 dd->ipath_tidtemplate = IBA7220_TID_SZ_4K;
1844 }
1845 dd->ipath_tidinvalid = 0;
1846}
1847
1848static int ipath_7220_early_init(struct ipath_devdata *dd)
1849{
1850 u32 i, s;
1851
1852 if (strcmp(int_type, "auto") &&
1853 strcmp(int_type, "force_msi") &&
1854 strcmp(int_type, "force_intx")) {
1855 ipath_dev_err(dd, "Invalid interrupt_type: '%s', expecting "
1856 "auto, force_msi or force_intx\n", int_type);
1857 return -EINVAL;
1858 }
1859
1860 /*
1861 * Control[4] has been added to change the arbitration within
1862 * the SDMA engine between favoring data fetches over descriptor
1863 * fetches. ipath_sdma_fetch_arb==0 gives data fetches priority.
1864 */
1865 if (ipath_sdma_fetch_arb && (dd->ipath_minrev > 1))
1866 dd->ipath_control |= 1<<4;
1867
1868 dd->ipath_flags |= IPATH_4BYTE_TID;
1869
1870 /*
1871 * For openfabrics, we need to be able to handle an IB header of
1872 * 24 dwords. HT chip has arbitrary sized receive buffers, so we
1873 * made them the same size as the PIO buffers. This chip does not
1874 * handle arbitrary size buffers, so we need the header large enough
1875 * to handle largest IB header, but still have room for a 2KB MTU
1876 * standard IB packet.
1877 */
1878 dd->ipath_rcvhdrentsize = 24;
1879 dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
1880 dd->ipath_rhf_offset =
1881 dd->ipath_rcvhdrentsize - sizeof(u64) / sizeof(u32);
1882
1883 dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048;
1884 /*
1885 * the min() check here is currently a nop, but it may not always
1886 * be, depending on just how we do ipath_rcvegrbufsize
1887 */
1888 dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k :
1889 dd->ipath_piosize2k,
1890 dd->ipath_rcvegrbufsize +
1891 (dd->ipath_rcvhdrentsize << 2));
1892 dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
1893
1894 ipath_7220_config_jint(dd, INFINIPATH_JINT_DEFAULT_IDLE_TICKS,
1895 INFINIPATH_JINT_DEFAULT_MAX_PACKETS);
1896
1897 if (dd->ipath_boardrev) /* no eeprom on emulator */
1898 ipath_get_eeprom_info(dd);
1899
1900 /* start of code to check and print procmon */
1901 s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
1902 s &= ~(1U<<31); /* clear done bit */
1903 s |= 1U<<14; /* clear counter (write 1 to clear) */
1904 ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
1905 /* make sure clear_counter low long enough before start */
1906 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1907 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1908
1909 s &= ~(1U<<14); /* allow counter to count (before starting) */
1910 ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
1911 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1912 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1913 s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
1914
1915 s |= 1U<<15; /* start the counter */
1916 s &= ~(1U<<31); /* clear done bit */
1917 s &= ~0x7ffU; /* clear frequency bits */
1918 s |= 0xe29; /* set frequency bits, in case cleared */
1919 ipath_write_kreg(dd, IPATH_KREG_OFFSET(ProcMon), s);
1920
1921 s = 0;
1922 for (i = 500; i > 0 && !(s&(1ULL<<31)); i--) {
1923 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1924 s = ipath_read_kreg32(dd, IPATH_KREG_OFFSET(ProcMon));
1925 }
1926 if (!(s&(1U<<31)))
1927 ipath_dev_err(dd, "ProcMon register not valid: 0x%x\n", s);
1928 else
1929 ipath_dbg("ProcMon=0x%x, count=0x%x\n", s, (s>>16)&0x1ff);
1930
1931 return 0;
1932}
1933
1934/**
1935 * ipath_init_7220_get_base_info - set chip-specific flags for user code
1936 * @pd: the infinipath port
1937 * @kbase: ipath_base_info pointer
1938 *
1939 * We set the PCIE flag because the lower bandwidth on PCIe vs
1940 * HyperTransport can affect some user packet algorithims.
1941 */
1942static int ipath_7220_get_base_info(struct ipath_portdata *pd, void *kbase)
1943{
1944 struct ipath_base_info *kinfo = kbase;
1945
1946 kinfo->spi_runtime_flags |=
1947 IPATH_RUNTIME_PCIE | IPATH_RUNTIME_NODMA_RTAIL |
1948 IPATH_RUNTIME_SDMA;
1949
1950 return 0;
1951}
1952
1953static void ipath_7220_free_irq(struct ipath_devdata *dd)
1954{
1955 free_irq(dd->ipath_irq, dd);
1956 dd->ipath_irq = 0;
1957}
1958
1959static struct ipath_message_header *
1960ipath_7220_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
1961{
1962 u32 offset = ipath_hdrget_offset(rhf_addr);
1963
1964 return (struct ipath_message_header *)
1965 (rhf_addr - dd->ipath_rhf_offset + offset);
1966}
1967
1968static void ipath_7220_config_ports(struct ipath_devdata *dd, ushort cfgports)
1969{
1970 u32 nchipports;
1971
1972 nchipports = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
1973 if (!cfgports) {
1974 int ncpus = num_online_cpus();
1975
1976 if (ncpus <= 4)
1977 dd->ipath_portcnt = 5;
1978 else if (ncpus <= 8)
1979 dd->ipath_portcnt = 9;
1980 if (dd->ipath_portcnt)
1981 ipath_dbg("Auto-configured for %u ports, %d cpus "
1982 "online\n", dd->ipath_portcnt, ncpus);
1983 } else if (cfgports <= nchipports)
1984 dd->ipath_portcnt = cfgports;
1985 if (!dd->ipath_portcnt) /* none of the above, set to max */
1986 dd->ipath_portcnt = nchipports;
1987 /*
1988 * chip can be configured for 5, 9, or 17 ports, and choice
1989 * affects number of eager TIDs per port (1K, 2K, 4K).
1990 */
1991 if (dd->ipath_portcnt > 9)
1992 dd->ipath_rcvctrl |= 2ULL << IBA7220_R_PORTCFG_SHIFT;
1993 else if (dd->ipath_portcnt > 5)
1994 dd->ipath_rcvctrl |= 1ULL << IBA7220_R_PORTCFG_SHIFT;
1995 /* else configure for default 5 receive ports */
1996 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1997 dd->ipath_rcvctrl);
1998 dd->ipath_p0_rcvegrcnt = 2048; /* always */
1999 if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
2000 dd->ipath_pioreserved = 3; /* kpiobufs used for PIO */
2001}
2002
2003
2004static int ipath_7220_get_ib_cfg(struct ipath_devdata *dd, int which)
2005{
2006 int lsb, ret = 0;
2007 u64 maskr; /* right-justified mask */
2008
2009 switch (which) {
2010 case IPATH_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
2011 lsb = IBA7220_IBC_HRTBT_SHIFT;
2012 maskr = IBA7220_IBC_HRTBT_MASK;
2013 break;
2014
2015 case IPATH_IB_CFG_LWID_ENB: /* Get allowed Link-width */
2016 ret = dd->ipath_link_width_enabled;
2017 goto done;
2018
2019 case IPATH_IB_CFG_LWID: /* Get currently active Link-width */
2020 ret = dd->ipath_link_width_active;
2021 goto done;
2022
2023 case IPATH_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
2024 ret = dd->ipath_link_speed_enabled;
2025 goto done;
2026
2027 case IPATH_IB_CFG_SPD: /* Get current Link spd */
2028 ret = dd->ipath_link_speed_active;
2029 goto done;
2030
2031 case IPATH_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
2032 lsb = IBA7220_IBC_RXPOL_SHIFT;
2033 maskr = IBA7220_IBC_RXPOL_MASK;
2034 break;
2035
2036 case IPATH_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
2037 lsb = IBA7220_IBC_LREV_SHIFT;
2038 maskr = IBA7220_IBC_LREV_MASK;
2039 break;
2040
2041 case IPATH_IB_CFG_LINKLATENCY:
2042 ret = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcddrstatus)
2043 & IBA7220_DDRSTAT_LINKLAT_MASK;
2044 goto done;
2045
2046 default:
2047 ret = -ENOTSUPP;
2048 goto done;
2049 }
2050 ret = (int)((dd->ipath_ibcddrctrl >> lsb) & maskr);
2051done:
2052 return ret;
2053}
2054
2055static int ipath_7220_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
2056{
2057 int lsb, ret = 0, setforce = 0;
2058 u64 maskr; /* right-justified mask */
2059
2060 switch (which) {
2061 case IPATH_IB_CFG_LIDLMC:
2062 /*
2063 * Set LID and LMC. Combined to avoid possible hazard
2064 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
2065 */
2066 lsb = IBA7220_IBC_DLIDLMC_SHIFT;
2067 maskr = IBA7220_IBC_DLIDLMC_MASK;
2068 break;
2069
2070 case IPATH_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
2071 if (val & IPATH_IB_HRTBT_ON &&
2072 (dd->ipath_flags & IPATH_NO_HRTBT))
2073 goto bail;
2074 lsb = IBA7220_IBC_HRTBT_SHIFT;
2075 maskr = IBA7220_IBC_HRTBT_MASK;
2076 break;
2077
2078 case IPATH_IB_CFG_LWID_ENB: /* set allowed Link-width */
2079 /*
2080 * As with speed, only write the actual register if
2081 * the link is currently down, otherwise takes effect
2082 * on next link change.
2083 */
2084 dd->ipath_link_width_enabled = val;
2085 if ((dd->ipath_flags & (IPATH_LINKDOWN|IPATH_LINKINIT)) !=
2086 IPATH_LINKDOWN)
2087 goto bail;
2088 /*
2089 * We set the IPATH_IB_FORCE_NOTIFY bit so updown
2090 * will get called because we want update
2091 * link_width_active, and the change may not take
2092 * effect for some time (if we are in POLL), so this
2093 * flag will force the updown routine to be called
2094 * on the next ibstatuschange down interrupt, even
2095 * if it's not an down->up transition.
2096 */
2097 val--; /* convert from IB to chip */
2098 maskr = IBA7220_IBC_WIDTH_MASK;
2099 lsb = IBA7220_IBC_WIDTH_SHIFT;
2100 setforce = 1;
2101 dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY;
2102 break;
2103
2104 case IPATH_IB_CFG_SPD_ENB: /* set allowed Link speeds */
2105 /*
2106 * If we turn off IB1.2, need to preset SerDes defaults,
2107 * but not right now. Set a flag for the next time
2108 * we command the link down. As with width, only write the
2109 * actual register if the link is currently down, otherwise
2110 * takes effect on next link change. Since setting is being
2111 * explictly requested (via MAD or sysfs), clear autoneg
2112 * failure status if speed autoneg is enabled.
2113 */
2114 dd->ipath_link_speed_enabled = val;
2115 if (dd->ipath_ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK &&
2116 !(val & (val - 1)))
2117 dd->ipath_presets_needed = 1;
2118 if ((dd->ipath_flags & (IPATH_LINKDOWN|IPATH_LINKINIT)) !=
2119 IPATH_LINKDOWN)
2120 goto bail;
2121 /*
2122 * We set the IPATH_IB_FORCE_NOTIFY bit so updown
2123 * will get called because we want update
2124 * link_speed_active, and the change may not take
2125 * effect for some time (if we are in POLL), so this
2126 * flag will force the updown routine to be called
2127 * on the next ibstatuschange down interrupt, even
2128 * if it's not an down->up transition. When setting
2129 * speed autoneg, clear AUTONEG_FAILED.
2130 */
2131 if (val == (IPATH_IB_SDR | IPATH_IB_DDR)) {
2132 val = IBA7220_IBC_SPEED_AUTONEG_MASK |
2133 IBA7220_IBC_IBTA_1_2_MASK;
2134 dd->ipath_flags &= ~IPATH_IB_AUTONEG_FAILED;
2135 } else
2136 val = val == IPATH_IB_DDR ? IBA7220_IBC_SPEED_DDR
2137 : IBA7220_IBC_SPEED_SDR;
2138 maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |
2139 IBA7220_IBC_IBTA_1_2_MASK;
2140 lsb = 0; /* speed bits are low bits */
2141 setforce = 1;
2142 break;
2143
2144 case IPATH_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
2145 lsb = IBA7220_IBC_RXPOL_SHIFT;
2146 maskr = IBA7220_IBC_RXPOL_MASK;
2147 break;
2148
2149 case IPATH_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
2150 lsb = IBA7220_IBC_LREV_SHIFT;
2151 maskr = IBA7220_IBC_LREV_MASK;
2152 break;
2153
2154 default:
2155 ret = -ENOTSUPP;
2156 goto bail;
2157 }
2158 dd->ipath_ibcddrctrl &= ~(maskr << lsb);
2159 dd->ipath_ibcddrctrl |= (((u64) val & maskr) << lsb);
2160 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
2161 dd->ipath_ibcddrctrl);
2162 if (setforce)
2163 dd->ipath_flags |= IPATH_IB_FORCE_NOTIFY;
2164bail:
2165 return ret;
2166}
2167
2168static void ipath_7220_read_counters(struct ipath_devdata *dd,
2169 struct infinipath_counters *cntrs)
2170{
2171 u64 *counters = (u64 *) cntrs;
2172 int i;
2173
2174 for (i = 0; i < sizeof(*cntrs) / sizeof(u64); i++)
2175 counters[i] = ipath_snap_cntr(dd, i);
2176}
2177
2178/* if we are using MSI, try to fallback to INTx */
2179static int ipath_7220_intr_fallback(struct ipath_devdata *dd)
2180{
2181 if (dd->ipath_msi_lo) {
2182 dev_info(&dd->pcidev->dev, "MSI interrupt not detected,"
2183 " trying INTx interrupts\n");
2184 ipath_7220_nomsi(dd);
2185 ipath_enable_intx(dd->pcidev);
2186 /*
2187 * some newer kernels require free_irq before disable_msi,
2188 * and irq can be changed during disable and intx enable
2189 * and we need to therefore use the pcidev->irq value,
2190 * not our saved MSI value.
2191 */
2192 dd->ipath_irq = dd->pcidev->irq;
2193 if (request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
2194 IPATH_DRV_NAME, dd))
2195 ipath_dev_err(dd,
2196 "Could not re-request_irq for INTx\n");
2197 return 1;
2198 }
2199 return 0;
2200}
2201
2202/*
2203 * reset the XGXS (between serdes and IBC). Slightly less intrusive
2204 * than resetting the IBC or external link state, and useful in some
2205 * cases to cause some retraining. To do this right, we reset IBC
2206 * as well.
2207 */
2208static void ipath_7220_xgxs_reset(struct ipath_devdata *dd)
2209{
2210 u64 val, prev_val;
2211
2212 prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
2213 val = prev_val | INFINIPATH_XGXS_RESET;
2214 prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
2215 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
2216 dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
2217 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
2218 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
2219 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
2220 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
2221 dd->ipath_control);
2222}
2223
2224
2225/* Still needs cleanup, too much hardwired stuff */
2226static void autoneg_send(struct ipath_devdata *dd,
2227 u32 *hdr, u32 dcnt, u32 *data)
2228{
2229 int i;
2230 u64 cnt;
2231 u32 __iomem *piobuf;
2232 u32 pnum;
2233
2234 i = 0;
2235 cnt = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
2236 while (!(piobuf = ipath_getpiobuf(dd, cnt, &pnum))) {
2237 if (i++ > 15) {
2238 ipath_dbg("Couldn't get pio buffer for send\n");
2239 return;
2240 }
2241 udelay(2);
2242 }
2243 if (dd->ipath_flags&IPATH_HAS_PBC_CNT)
2244 cnt |= 0x80000000UL<<32; /* mark as VL15 */
2245 writeq(cnt, piobuf);
2246 ipath_flush_wc();
2247 __iowrite32_copy(piobuf + 2, hdr, 7);
2248 __iowrite32_copy(piobuf + 9, data, dcnt);
2249 ipath_flush_wc();
2250}
2251
2252/*
2253 * _start packet gets sent twice at start, _done gets sent twice at end
2254 */
2255static void ipath_autoneg_send(struct ipath_devdata *dd, int which)
2256{
2257 static u32 swapped;
2258 u32 dw, i, hcnt, dcnt, *data;
2259 static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
2260 static u32 madpayload_start[0x40] = {
2261 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
2262 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
2263 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
2264 };
2265 static u32 madpayload_done[0x40] = {
2266 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
2267 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
2268 0x40000001, 0x1388, 0x15e, /* rest 0's */
2269 };
2270 dcnt = ARRAY_SIZE(madpayload_start);
2271 hcnt = ARRAY_SIZE(hdr);
2272 if (!swapped) {
2273 /* for maintainability, do it at runtime */
2274 for (i = 0; i < hcnt; i++) {
2275 dw = (__force u32) cpu_to_be32(hdr[i]);
2276 hdr[i] = dw;
2277 }
2278 for (i = 0; i < dcnt; i++) {
2279 dw = (__force u32) cpu_to_be32(madpayload_start[i]);
2280 madpayload_start[i] = dw;
2281 dw = (__force u32) cpu_to_be32(madpayload_done[i]);
2282 madpayload_done[i] = dw;
2283 }
2284 swapped = 1;
2285 }
2286
2287 data = which ? madpayload_done : madpayload_start;
2288 ipath_cdbg(PKT, "Sending %s special MADs\n", which?"done":"start");
2289
2290 autoneg_send(dd, hdr, dcnt, data);
2291 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
2292 udelay(2);
2293 autoneg_send(dd, hdr, dcnt, data);
2294 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
2295 udelay(2);
2296}
2297
2298
2299
2300/*
2301 * Do the absolute minimum to cause an IB speed change, and make it
2302 * ready, but don't actually trigger the change. The caller will
2303 * do that when ready (if link is in Polling training state, it will
2304 * happen immediately, otherwise when link next goes down)
2305 *
2306 * This routine should only be used as part of the DDR autonegotation
2307 * code for devices that are not compliant with IB 1.2 (or code that
2308 * fixes things up for same).
2309 *
2310 * When link has gone down, and autoneg enabled, or autoneg has
2311 * failed and we give up until next time we set both speeds, and
2312 * then we want IBTA enabled as well as "use max enabled speed.
2313 */
2314static void set_speed_fast(struct ipath_devdata *dd, u32 speed)
2315{
2316 dd->ipath_ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |
2317 IBA7220_IBC_IBTA_1_2_MASK |
2318 (IBA7220_IBC_WIDTH_MASK << IBA7220_IBC_WIDTH_SHIFT));
2319
2320 if (speed == (IPATH_IB_SDR | IPATH_IB_DDR))
2321 dd->ipath_ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |
2322 IBA7220_IBC_IBTA_1_2_MASK;
2323 else
2324 dd->ipath_ibcddrctrl |= speed == IPATH_IB_DDR ?
2325 IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
2326
2327 /*
2328 * Convert from IB-style 1 = 1x, 2 = 4x, 3 = auto
2329 * to chip-centric 0 = 1x, 1 = 4x, 2 = auto
2330 */
2331 dd->ipath_ibcddrctrl |= (u64)(dd->ipath_link_width_enabled - 1) <<
2332 IBA7220_IBC_WIDTH_SHIFT;
2333 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcddrctrl,
2334 dd->ipath_ibcddrctrl);
2335 ipath_cdbg(VERBOSE, "setup for IB speed (%x) done\n", speed);
2336}
2337
2338
2339/*
2340 * this routine is only used when we are not talking to another
2341 * IB 1.2-compliant device that we think can do DDR.
2342 * (This includes all existing switch chips as of Oct 2007.)
2343 * 1.2-compliant devices go directly to DDR prior to reaching INIT
2344 */
2345static void try_auto_neg(struct ipath_devdata *dd)
2346{
2347 /*
2348 * required for older non-IB1.2 DDR switches. Newer
2349 * non-IB-compliant switches don't need it, but so far,
2350 * aren't bothered by it either. "Magic constant"
2351 */
2352 ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl),
2353 0x3b9dc07);
2354 dd->ipath_flags |= IPATH_IB_AUTONEG_INPROG;
2355 ipath_autoneg_send(dd, 0);
2356 set_speed_fast(dd, IPATH_IB_DDR);
2357 ipath_toggle_rclkrls(dd);
2358 /* 2 msec is minimum length of a poll cycle */
2359 schedule_delayed_work(&dd->ipath_autoneg_work,
2360 msecs_to_jiffies(2));
2361}
2362
2363
2364static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
2365{
2366 int ret = 0, symadj = 0;
2367 u32 ltstate = ipath_ib_linkstate(dd, ibcs);
2368
2369 dd->ipath_link_width_active =
2370 ((ibcs >> IBA7220_IBCS_LINKWIDTH_SHIFT) & 1) ?
2371 IB_WIDTH_4X : IB_WIDTH_1X;
2372 dd->ipath_link_speed_active =
2373 ((ibcs >> IBA7220_IBCS_LINKSPEED_SHIFT) & 1) ?
2374 IPATH_IB_DDR : IPATH_IB_SDR;
2375
2376 if (!ibup) {
2377 /*
2378 * when link goes down we don't want aeq running, so it
2379 * won't't interfere with IBC training, etc., and we need
2380 * to go back to the static SerDes preset values
2381 */
2382 if (dd->ipath_x1_fix_tries &&
2383 ltstate <= INFINIPATH_IBCS_LT_STATE_SLEEPQUIET &&
2384 ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP)
2385 dd->ipath_x1_fix_tries = 0;
2386 if (!(dd->ipath_flags & (IPATH_IB_AUTONEG_FAILED |
2387 IPATH_IB_AUTONEG_INPROG)))
2388 set_speed_fast(dd, dd->ipath_link_speed_enabled);
2389 if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) {
2390 ipath_cdbg(VERBOSE, "Setting RXEQ defaults\n");
2391 ipath_sd7220_presets(dd);
2392 }
2393 /* this might better in ipath_sd7220_presets() */
2394 ipath_set_relock_poll(dd, ibup);
2395 } else {
2396 if (ipath_compat_ddr_negotiate &&
2397 !(dd->ipath_flags & (IPATH_IB_AUTONEG_FAILED |
2398 IPATH_IB_AUTONEG_INPROG)) &&
2399 dd->ipath_link_speed_active == IPATH_IB_SDR &&
2400 (dd->ipath_link_speed_enabled &
2401 (IPATH_IB_DDR | IPATH_IB_SDR)) ==
2402 (IPATH_IB_DDR | IPATH_IB_SDR) &&
2403 dd->ipath_autoneg_tries < IPATH_AUTONEG_TRIES) {
2404 /* we are SDR, and DDR auto-negotiation enabled */
2405 ++dd->ipath_autoneg_tries;
2406 ipath_dbg("DDR negotiation try, %u/%u\n",
2407 dd->ipath_autoneg_tries,
2408 IPATH_AUTONEG_TRIES);
2409 if (!dd->ibdeltainprog) {
2410 dd->ibdeltainprog = 1;
2411 dd->ibsymsnap = ipath_read_creg32(dd,
2412 dd->ipath_cregs->cr_ibsymbolerrcnt);
2413 dd->iblnkerrsnap = ipath_read_creg32(dd,
2414 dd->ipath_cregs->cr_iblinkerrrecovcnt);
2415 }
2416 try_auto_neg(dd);
2417 ret = 1; /* no other IB status change processing */
2418 } else if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)
2419 && dd->ipath_link_speed_active == IPATH_IB_SDR) {
2420 ipath_autoneg_send(dd, 1);
2421 set_speed_fast(dd, IPATH_IB_DDR);
2422 udelay(2);
2423 ipath_toggle_rclkrls(dd);
2424 ret = 1; /* no other IB status change processing */
2425 } else {
2426 if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
2427 (dd->ipath_link_speed_active & IPATH_IB_DDR)) {
2428 ipath_dbg("Got to INIT with DDR autoneg\n");
2429 dd->ipath_flags &= ~(IPATH_IB_AUTONEG_INPROG
2430 | IPATH_IB_AUTONEG_FAILED);
2431 dd->ipath_autoneg_tries = 0;
2432 /* re-enable SDR, for next link down */
2433 set_speed_fast(dd,
2434 dd->ipath_link_speed_enabled);
2435 wake_up(&dd->ipath_autoneg_wait);
2436 symadj = 1;
2437 } else if (dd->ipath_flags & IPATH_IB_AUTONEG_FAILED) {
2438 /*
2439 * clear autoneg failure flag, and do setup
2440 * so we'll try next time link goes down and
2441 * back to INIT (possibly connected to different
2442 * device).
2443 */
2444 ipath_dbg("INIT %sDR after autoneg failure\n",
2445 (dd->ipath_link_speed_active &
2446 IPATH_IB_DDR) ? "D" : "S");
2447 dd->ipath_flags &= ~IPATH_IB_AUTONEG_FAILED;
2448 dd->ipath_ibcddrctrl |=
2449 IBA7220_IBC_IBTA_1_2_MASK;
2450 ipath_write_kreg(dd,
2451 IPATH_KREG_OFFSET(IBNCModeCtrl), 0);
2452 symadj = 1;
2453 }
2454 }
2455 /*
2456 * if we are in 1X on rev1 only, and are in autoneg width,
2457 * it could be due to an xgxs problem, so if we haven't
2458 * already tried, try twice to get to 4X; if we
2459 * tried, and couldn't, report it, since it will
2460 * probably not be what is desired.
2461 */
2462 if (dd->ipath_minrev == 1 &&
2463 (dd->ipath_link_width_enabled & (IB_WIDTH_1X |
2464 IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X)
2465 && dd->ipath_link_width_active == IB_WIDTH_1X
2466 && dd->ipath_x1_fix_tries < 3) {
2467 if (++dd->ipath_x1_fix_tries == 3) {
2468 dev_info(&dd->pcidev->dev,
2469 "IB link is in 1X mode\n");
2470 if (!(dd->ipath_flags &
2471 IPATH_IB_AUTONEG_INPROG))
2472 symadj = 1;
2473 }
2474 else {
2475 ipath_cdbg(VERBOSE, "IB 1X in "
2476 "auto-width, try %u to be "
2477 "sure it's really 1X; "
2478 "ltstate %u\n",
2479 dd->ipath_x1_fix_tries,
2480 ltstate);
2481 dd->ipath_f_xgxs_reset(dd);
2482 ret = 1; /* skip other processing */
2483 }
2484 } else if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG))
2485 symadj = 1;
2486
2487 if (!ret) {
2488 dd->delay_mult = rate_to_delay
2489 [(ibcs >> IBA7220_IBCS_LINKSPEED_SHIFT) & 1]
2490 [(ibcs >> IBA7220_IBCS_LINKWIDTH_SHIFT) & 1];
2491
2492 ipath_set_relock_poll(dd, ibup);
2493 }
2494 }
2495
2496 if (symadj) {
2497 if (dd->ibdeltainprog) {
2498 dd->ibdeltainprog = 0;
2499 dd->ibsymdelta += ipath_read_creg32(dd,
2500 dd->ipath_cregs->cr_ibsymbolerrcnt) -
2501 dd->ibsymsnap;
2502 dd->iblnkerrdelta += ipath_read_creg32(dd,
2503 dd->ipath_cregs->cr_iblinkerrrecovcnt) -
2504 dd->iblnkerrsnap;
2505 }
2506 } else if (!ibup && !dd->ibdeltainprog
2507 && !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) {
2508 dd->ibdeltainprog = 1;
2509 dd->ibsymsnap = ipath_read_creg32(dd,
2510 dd->ipath_cregs->cr_ibsymbolerrcnt);
2511 dd->iblnkerrsnap = ipath_read_creg32(dd,
2512 dd->ipath_cregs->cr_iblinkerrrecovcnt);
2513 }
2514
2515 if (!ret)
2516 ipath_setup_7220_setextled(dd, ipath_ib_linkstate(dd, ibcs),
2517 ltstate);
2518 return ret;
2519}
2520
2521
2522/*
2523 * Handle the empirically determined mechanism for auto-negotiation
2524 * of DDR speed with switches.
2525 */
2526static void autoneg_work(struct work_struct *work)
2527{
2528 struct ipath_devdata *dd;
2529 u64 startms;
2530 u32 lastlts, i;
2531
2532 dd = container_of(work, struct ipath_devdata,
2533 ipath_autoneg_work.work);
2534
2535 startms = jiffies_to_msecs(jiffies);
2536
2537 /*
2538 * busy wait for this first part, it should be at most a
2539 * few hundred usec, since we scheduled ourselves for 2msec.
2540 */
2541 for (i = 0; i < 25; i++) {
2542 lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
2543 if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
2544 ipath_set_linkstate(dd, IPATH_IB_LINKDOWN_DISABLE);
2545 break;
2546 }
2547 udelay(100);
2548 }
2549
2550 if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG))
2551 goto done; /* we got there early or told to stop */
2552
2553 /* we expect this to timeout */
2554 if (wait_event_timeout(dd->ipath_autoneg_wait,
2555 !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
2556 msecs_to_jiffies(90)))
2557 goto done;
2558
2559 ipath_toggle_rclkrls(dd);
2560
2561 /* we expect this to timeout */
2562 if (wait_event_timeout(dd->ipath_autoneg_wait,
2563 !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
2564 msecs_to_jiffies(1700)))
2565 goto done;
2566
2567 set_speed_fast(dd, IPATH_IB_SDR);
2568 ipath_toggle_rclkrls(dd);
2569
2570 /*
2571 * wait up to 250 msec for link to train and get to INIT at DDR;
2572 * this should terminate early
2573 */
2574 wait_event_timeout(dd->ipath_autoneg_wait,
2575 !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG),
2576 msecs_to_jiffies(250));
2577done:
2578 if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
2579 ipath_dbg("Did not get to DDR INIT (%x) after %Lu msecs\n",
2580 ipath_ib_state(dd, dd->ipath_lastibcstat),
2581 (unsigned long long) jiffies_to_msecs(jiffies)-startms);
2582 dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG;
2583 if (dd->ipath_autoneg_tries == IPATH_AUTONEG_TRIES) {
2584 dd->ipath_flags |= IPATH_IB_AUTONEG_FAILED;
2585 ipath_dbg("Giving up on DDR until next IB "
2586 "link Down\n");
2587 dd->ipath_autoneg_tries = 0;
2588 }
2589 set_speed_fast(dd, dd->ipath_link_speed_enabled);
2590 }
2591}
2592
2593
2594/**
2595 * ipath_init_iba7220_funcs - set up the chip-specific function pointers
2596 * @dd: the infinipath device
2597 *
2598 * This is global, and is called directly at init to set up the
2599 * chip-specific function pointers for later use.
2600 */
2601void ipath_init_iba7220_funcs(struct ipath_devdata *dd)
2602{
2603 dd->ipath_f_intrsetup = ipath_7220_intconfig;
2604 dd->ipath_f_bus = ipath_setup_7220_config;
2605 dd->ipath_f_reset = ipath_setup_7220_reset;
2606 dd->ipath_f_get_boardname = ipath_7220_boardname;
2607 dd->ipath_f_init_hwerrors = ipath_7220_init_hwerrors;
2608 dd->ipath_f_early_init = ipath_7220_early_init;
2609 dd->ipath_f_handle_hwerrors = ipath_7220_handle_hwerrors;
2610 dd->ipath_f_quiet_serdes = ipath_7220_quiet_serdes;
2611 dd->ipath_f_bringup_serdes = ipath_7220_bringup_serdes;
2612 dd->ipath_f_clear_tids = ipath_7220_clear_tids;
2613 dd->ipath_f_put_tid = ipath_7220_put_tid;
2614 dd->ipath_f_cleanup = ipath_setup_7220_cleanup;
2615 dd->ipath_f_setextled = ipath_setup_7220_setextled;
2616 dd->ipath_f_get_base_info = ipath_7220_get_base_info;
2617 dd->ipath_f_free_irq = ipath_7220_free_irq;
2618 dd->ipath_f_tidtemplate = ipath_7220_tidtemplate;
2619 dd->ipath_f_intr_fallback = ipath_7220_intr_fallback;
2620 dd->ipath_f_xgxs_reset = ipath_7220_xgxs_reset;
2621 dd->ipath_f_get_ib_cfg = ipath_7220_get_ib_cfg;
2622 dd->ipath_f_set_ib_cfg = ipath_7220_set_ib_cfg;
2623 dd->ipath_f_config_jint = ipath_7220_config_jint;
2624 dd->ipath_f_config_ports = ipath_7220_config_ports;
2625 dd->ipath_f_read_counters = ipath_7220_read_counters;
2626 dd->ipath_f_get_msgheader = ipath_7220_get_msgheader;
2627 dd->ipath_f_ib_updown = ipath_7220_ib_updown;
2628
2629 /* initialize chip-specific variables */
2630 ipath_init_7220_variables(dd);
2631}
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index b3d7efcdf021..6559af60bffd 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -1030,8 +1030,6 @@ void ipath_free_data(struct ipath_portdata *dd);
1030u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *); 1030u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *);
1031void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start, 1031void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
1032 unsigned len, int avail); 1032 unsigned len, int avail);
1033void ipath_init_iba7220_funcs(struct ipath_devdata *);
1034void ipath_init_iba6120_funcs(struct ipath_devdata *);
1035void ipath_init_iba6110_funcs(struct ipath_devdata *); 1033void ipath_init_iba6110_funcs(struct ipath_devdata *);
1036void ipath_get_eeprom_info(struct ipath_devdata *); 1034void ipath_get_eeprom_info(struct ipath_devdata *);
1037int ipath_update_eeprom_log(struct ipath_devdata *dd); 1035int ipath_update_eeprom_log(struct ipath_devdata *dd);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 559f39be0dcc..dd7f26d04d46 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -2182,7 +2182,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
2182 snprintf(dev->node_desc, sizeof(dev->node_desc), 2182 snprintf(dev->node_desc, sizeof(dev->node_desc),
2183 IPATH_IDSTR " %s", init_utsname()->nodename); 2183 IPATH_IDSTR " %s", init_utsname()->nodename);
2184 2184
2185 ret = ib_register_device(dev); 2185 ret = ib_register_device(dev, NULL);
2186 if (ret) 2186 if (ret)
2187 goto err_reg; 2187 goto err_reg;
2188 2188
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 39051417054c..4e94e360e43b 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -662,7 +662,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
662 spin_lock_init(&ibdev->sm_lock); 662 spin_lock_init(&ibdev->sm_lock);
663 mutex_init(&ibdev->cap_mask_mutex); 663 mutex_init(&ibdev->cap_mask_mutex);
664 664
665 if (ib_register_device(&ibdev->ib_dev)) 665 if (ib_register_device(&ibdev->ib_dev, NULL))
666 goto err_map; 666 goto err_map;
667 667
668 if (mlx4_ib_mad_init(ibdev)) 668 if (mlx4_ib_mad_init(ibdev))
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index f080a784bc79..1e0b4b6074ad 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1403,7 +1403,7 @@ int mthca_register_device(struct mthca_dev *dev)
1403 1403
1404 mutex_init(&dev->cap_mask_mutex); 1404 mutex_init(&dev->cap_mask_mutex);
1405 1405
1406 ret = ib_register_device(&dev->ib_dev); 1406 ret = ib_register_device(&dev->ib_dev, NULL);
1407 if (ret) 1407 if (ret)
1408 return ret; 1408 return ret;
1409 1409
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 86acb7d57064..57874a165083 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -2584,7 +2584,6 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2584 break; 2584 break;
2585 } 2585 }
2586 } 2586 }
2587 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
2588 2587
2589 if (phy_data & 0x0004) { 2588 if (phy_data & 0x0004) {
2590 if (wide_ppm_offset && 2589 if (wide_ppm_offset &&
@@ -2639,6 +2638,8 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2639 } 2638 }
2640 } 2639 }
2641 2640
2641 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
2642
2642 nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE; 2643 nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE;
2643} 2644}
2644 2645
@@ -3422,6 +3423,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3422 struct nes_adapter *nesadapter = nesdev->nesadapter; 3423 struct nes_adapter *nesadapter = nesdev->nesadapter;
3423 u32 aeq_info; 3424 u32 aeq_info;
3424 u32 next_iwarp_state = 0; 3425 u32 next_iwarp_state = 0;
3426 u32 aeqe_cq_id;
3425 u16 async_event_id; 3427 u16 async_event_id;
3426 u8 tcp_state; 3428 u8 tcp_state;
3427 u8 iwarp_state; 3429 u8 iwarp_state;
@@ -3449,6 +3451,14 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3449 le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe, 3451 le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe,
3450 nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]); 3452 nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]);
3451 3453
3454 aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]);
3455 if (aeq_info & NES_AEQE_QP) {
3456 if ((!nes_is_resource_allocated(nesadapter, nesadapter->allocated_qps,
3457 aeqe_cq_id)) ||
3458 (atomic_read(&nesqp->close_timer_started)))
3459 return;
3460 }
3461
3452 switch (async_event_id) { 3462 switch (async_event_id) {
3453 case NES_AEQE_AEID_LLP_FIN_RECEIVED: 3463 case NES_AEQE_AEID_LLP_FIN_RECEIVED:
3454 if (nesqp->term_flags) 3464 if (nesqp->term_flags)
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index e95e8d09ff38..5cc0a9ae5bb1 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1001,6 +1001,7 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
1001 return ret; 1001 return ret;
1002} 1002}
1003 1003
1004
1004static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = { 1005static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
1005 "Link Change Interrupts", 1006 "Link Change Interrupts",
1006 "Linearized SKBs", 1007 "Linearized SKBs",
@@ -1015,11 +1016,15 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
1015 "Rx Jabber Errors", 1016 "Rx Jabber Errors",
1016 "Rx Oversized Frames", 1017 "Rx Oversized Frames",
1017 "Rx Short Frames", 1018 "Rx Short Frames",
1019 "Rx Length Errors",
1020 "Rx CRC Errors",
1021 "Rx Port Discard",
1018 "Endnode Rx Discards", 1022 "Endnode Rx Discards",
1019 "Endnode Rx Octets", 1023 "Endnode Rx Octets",
1020 "Endnode Rx Frames", 1024 "Endnode Rx Frames",
1021 "Endnode Tx Octets", 1025 "Endnode Tx Octets",
1022 "Endnode Tx Frames", 1026 "Endnode Tx Frames",
1027 "Tx Errors",
1023 "mh detected", 1028 "mh detected",
1024 "mh pauses", 1029 "mh pauses",
1025 "Retransmission Count", 1030 "Retransmission Count",
@@ -1048,19 +1053,13 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
1048 "CM Nodes Destroyed", 1053 "CM Nodes Destroyed",
1049 "CM Accel Drops", 1054 "CM Accel Drops",
1050 "CM Resets Received", 1055 "CM Resets Received",
1056 "Free 4Kpbls",
1057 "Free 256pbls",
1051 "Timer Inits", 1058 "Timer Inits",
1052 "CQ Depth 1",
1053 "CQ Depth 4",
1054 "CQ Depth 16",
1055 "CQ Depth 24",
1056 "CQ Depth 32",
1057 "CQ Depth 128",
1058 "CQ Depth 256",
1059 "LRO aggregated", 1059 "LRO aggregated",
1060 "LRO flushed", 1060 "LRO flushed",
1061 "LRO no_desc", 1061 "LRO no_desc",
1062}; 1062};
1063
1064#define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset) 1063#define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset)
1065 1064
1066/** 1065/**
@@ -1120,12 +1119,14 @@ static void nes_netdev_get_strings(struct net_device *netdev, u32 stringset,
1120/** 1119/**
1121 * nes_netdev_get_ethtool_stats 1120 * nes_netdev_get_ethtool_stats
1122 */ 1121 */
1122
1123static void nes_netdev_get_ethtool_stats(struct net_device *netdev, 1123static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
1124 struct ethtool_stats *target_ethtool_stats, u64 *target_stat_values) 1124 struct ethtool_stats *target_ethtool_stats, u64 *target_stat_values)
1125{ 1125{
1126 u64 u64temp; 1126 u64 u64temp;
1127 struct nes_vnic *nesvnic = netdev_priv(netdev); 1127 struct nes_vnic *nesvnic = netdev_priv(netdev);
1128 struct nes_device *nesdev = nesvnic->nesdev; 1128 struct nes_device *nesdev = nesvnic->nesdev;
1129 struct nes_adapter *nesadapter = nesdev->nesadapter;
1129 u32 nic_count; 1130 u32 nic_count;
1130 u32 u32temp; 1131 u32 u32temp;
1131 u32 index = 0; 1132 u32 index = 0;
@@ -1154,6 +1155,46 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
1154 nesvnic->nesdev->port_tx_discards += u32temp; 1155 nesvnic->nesdev->port_tx_discards += u32temp;
1155 nesvnic->netstats.tx_dropped += u32temp; 1156 nesvnic->netstats.tx_dropped += u32temp;
1156 1157
1158 u32temp = nes_read_indexed(nesdev,
1159 NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200));
1160 nesvnic->netstats.rx_dropped += u32temp;
1161 nesvnic->nesdev->mac_rx_errors += u32temp;
1162 nesvnic->nesdev->mac_rx_short_frames += u32temp;
1163
1164 u32temp = nes_read_indexed(nesdev,
1165 NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200));
1166 nesvnic->netstats.rx_dropped += u32temp;
1167 nesvnic->nesdev->mac_rx_errors += u32temp;
1168 nesvnic->nesdev->mac_rx_oversized_frames += u32temp;
1169
1170 u32temp = nes_read_indexed(nesdev,
1171 NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200));
1172 nesvnic->netstats.rx_dropped += u32temp;
1173 nesvnic->nesdev->mac_rx_errors += u32temp;
1174 nesvnic->nesdev->mac_rx_jabber_frames += u32temp;
1175
1176 u32temp = nes_read_indexed(nesdev,
1177 NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
1178 nesvnic->netstats.rx_dropped += u32temp;
1179 nesvnic->nesdev->mac_rx_errors += u32temp;
1180 nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp;
1181
1182 u32temp = nes_read_indexed(nesdev,
1183 NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
1184 nesvnic->netstats.rx_length_errors += u32temp;
1185 nesvnic->nesdev->mac_rx_errors += u32temp;
1186
1187 u32temp = nes_read_indexed(nesdev,
1188 NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
1189 nesvnic->nesdev->mac_rx_errors += u32temp;
1190 nesvnic->nesdev->mac_rx_crc_errors += u32temp;
1191 nesvnic->netstats.rx_crc_errors += u32temp;
1192
1193 u32temp = nes_read_indexed(nesdev,
1194 NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200));
1195 nesvnic->nesdev->mac_tx_errors += u32temp;
1196 nesvnic->netstats.tx_errors += u32temp;
1197
1157 for (nic_count = 0; nic_count < NES_MAX_PORT_COUNT; nic_count++) { 1198 for (nic_count = 0; nic_count < NES_MAX_PORT_COUNT; nic_count++) {
1158 if (nesvnic->qp_nic_index[nic_count] == 0xf) 1199 if (nesvnic->qp_nic_index[nic_count] == 0xf)
1159 break; 1200 break;
@@ -1218,11 +1259,15 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
1218 target_stat_values[++index] = nesvnic->nesdev->mac_rx_jabber_frames; 1259 target_stat_values[++index] = nesvnic->nesdev->mac_rx_jabber_frames;
1219 target_stat_values[++index] = nesvnic->nesdev->mac_rx_oversized_frames; 1260 target_stat_values[++index] = nesvnic->nesdev->mac_rx_oversized_frames;
1220 target_stat_values[++index] = nesvnic->nesdev->mac_rx_short_frames; 1261 target_stat_values[++index] = nesvnic->nesdev->mac_rx_short_frames;
1262 target_stat_values[++index] = nesvnic->netstats.rx_length_errors;
1263 target_stat_values[++index] = nesvnic->nesdev->mac_rx_crc_errors;
1264 target_stat_values[++index] = nesvnic->nesdev->port_rx_discards;
1221 target_stat_values[++index] = nesvnic->endnode_nstat_rx_discard; 1265 target_stat_values[++index] = nesvnic->endnode_nstat_rx_discard;
1222 target_stat_values[++index] = nesvnic->endnode_nstat_rx_octets; 1266 target_stat_values[++index] = nesvnic->endnode_nstat_rx_octets;
1223 target_stat_values[++index] = nesvnic->endnode_nstat_rx_frames; 1267 target_stat_values[++index] = nesvnic->endnode_nstat_rx_frames;
1224 target_stat_values[++index] = nesvnic->endnode_nstat_tx_octets; 1268 target_stat_values[++index] = nesvnic->endnode_nstat_tx_octets;
1225 target_stat_values[++index] = nesvnic->endnode_nstat_tx_frames; 1269 target_stat_values[++index] = nesvnic->endnode_nstat_tx_frames;
1270 target_stat_values[++index] = nesvnic->nesdev->mac_tx_errors;
1226 target_stat_values[++index] = mh_detected; 1271 target_stat_values[++index] = mh_detected;
1227 target_stat_values[++index] = mh_pauses_sent; 1272 target_stat_values[++index] = mh_pauses_sent;
1228 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits; 1273 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
@@ -1251,21 +1296,14 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
1251 target_stat_values[++index] = atomic_read(&cm_nodes_destroyed); 1296 target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
1252 target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts); 1297 target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
1253 target_stat_values[++index] = atomic_read(&cm_resets_recvd); 1298 target_stat_values[++index] = atomic_read(&cm_resets_recvd);
1299 target_stat_values[++index] = nesadapter->free_4kpbl;
1300 target_stat_values[++index] = nesadapter->free_256pbl;
1254 target_stat_values[++index] = int_mod_timer_init; 1301 target_stat_values[++index] = int_mod_timer_init;
1255 target_stat_values[++index] = int_mod_cq_depth_1;
1256 target_stat_values[++index] = int_mod_cq_depth_4;
1257 target_stat_values[++index] = int_mod_cq_depth_16;
1258 target_stat_values[++index] = int_mod_cq_depth_24;
1259 target_stat_values[++index] = int_mod_cq_depth_32;
1260 target_stat_values[++index] = int_mod_cq_depth_128;
1261 target_stat_values[++index] = int_mod_cq_depth_256;
1262 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated; 1302 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
1263 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed; 1303 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
1264 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc; 1304 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
1265
1266} 1305}
1267 1306
1268
1269/** 1307/**
1270 * nes_netdev_get_drvinfo 1308 * nes_netdev_get_drvinfo
1271 */ 1309 */
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 925e1f2d1d55..9bc2d744b2ea 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -3962,7 +3962,7 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev)
3962 struct nes_adapter *nesadapter = nesdev->nesadapter; 3962 struct nes_adapter *nesadapter = nesdev->nesadapter;
3963 int i, ret; 3963 int i, ret;
3964 3964
3965 ret = ib_register_device(&nesvnic->nesibdev->ibdev); 3965 ret = ib_register_device(&nesvnic->nesibdev->ibdev, NULL);
3966 if (ret) { 3966 if (ret) {
3967 return ret; 3967 return ret;
3968 } 3968 }
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig
new file mode 100644
index 000000000000..7c03a70c55a2
--- /dev/null
+++ b/drivers/infiniband/hw/qib/Kconfig
@@ -0,0 +1,7 @@
1config INFINIBAND_QIB
2 tristate "QLogic PCIe HCA support"
3 depends on 64BIT && NET
4 ---help---
5 This is a low-level driver for QLogic PCIe QLE InfiniBand host
6 channel adapters. This driver does not support the QLogic
7 HyperTransport card (model QHT7140).
diff --git a/drivers/infiniband/hw/qib/Makefile b/drivers/infiniband/hw/qib/Makefile
new file mode 100644
index 000000000000..c6515a1b9a6a
--- /dev/null
+++ b/drivers/infiniband/hw/qib/Makefile
@@ -0,0 +1,15 @@
1obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o
2
3ib_qib-y := qib_cq.o qib_diag.o qib_dma.o qib_driver.o qib_eeprom.o \
4 qib_file_ops.o qib_fs.o qib_init.o qib_intr.o qib_keys.o \
5 qib_mad.o qib_mmap.o qib_mr.o qib_pcie.o qib_pio_copy.o \
6 qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o qib_srq.o \
7 qib_sysfs.o qib_twsi.o qib_tx.o qib_uc.o qib_ud.o \
8 qib_user_pages.o qib_user_sdma.o qib_verbs_mcast.o qib_iba7220.o \
9 qib_sd7220.o qib_sd7220_img.o qib_iba7322.o qib_verbs.o
10
11# 6120 has no fallback if no MSI interrupts, others can do INTx
12ib_qib-$(CONFIG_PCI_MSI) += qib_iba6120.o
13
14ib_qib-$(CONFIG_X86_64) += qib_wc_x86_64.o
15ib_qib-$(CONFIG_PPC64) += qib_wc_ppc64.o
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
new file mode 100644
index 000000000000..32d9208efcff
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -0,0 +1,1439 @@
1#ifndef _QIB_KERNEL_H
2#define _QIB_KERNEL_H
3/*
4 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
5 * All rights reserved.
6 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
37/*
38 * This header file is the base header file for qlogic_ib kernel code
39 * qib_user.h serves a similar purpose for user code.
40 */
41
42#include <linux/interrupt.h>
43#include <linux/pci.h>
44#include <linux/dma-mapping.h>
45#include <linux/mutex.h>
46#include <linux/list.h>
47#include <linux/scatterlist.h>
48#include <linux/io.h>
49#include <linux/fs.h>
50#include <linux/completion.h>
51#include <linux/kref.h>
52#include <linux/sched.h>
53
54#include "qib_common.h"
55#include "qib_verbs.h"
56
57/* only s/w major version of QLogic_IB we can handle */
58#define QIB_CHIP_VERS_MAJ 2U
59
60/* don't care about this except printing */
61#define QIB_CHIP_VERS_MIN 0U
62
63/* The Organization Unique Identifier (Mfg code), and its position in GUID */
64#define QIB_OUI 0x001175
65#define QIB_OUI_LSB 40
66
67/*
68 * per driver stats, either not device nor port-specific, or
69 * summed over all of the devices and ports.
70 * They are described by name via ipathfs filesystem, so layout
71 * and number of elements can change without breaking compatibility.
72 * If members are added or deleted qib_statnames[] in qib_fs.c must
73 * change to match.
74 */
75struct qlogic_ib_stats {
76 __u64 sps_ints; /* number of interrupts handled */
77 __u64 sps_errints; /* number of error interrupts */
78 __u64 sps_txerrs; /* tx-related packet errors */
79 __u64 sps_rcverrs; /* non-crc rcv packet errors */
80 __u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */
81 __u64 sps_nopiobufs; /* no pio bufs avail from kernel */
82 __u64 sps_ctxts; /* number of contexts currently open */
83 __u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */
84 __u64 sps_buffull;
85 __u64 sps_hdrfull;
86};
87
88extern struct qlogic_ib_stats qib_stats;
89extern struct pci_error_handlers qib_pci_err_handler;
90extern struct pci_driver qib_driver;
91
92#define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
93/*
94 * First-cut critierion for "device is active" is
95 * two thousand dwords combined Tx, Rx traffic per
96 * 5-second interval. SMA packets are 64 dwords,
97 * and occur "a few per second", presumably each way.
98 */
99#define QIB_TRAFFIC_ACTIVE_THRESHOLD (2000)
100
101/*
102 * Struct used to indicate which errors are logged in each of the
103 * error-counters that are logged to EEPROM. A counter is incremented
104 * _once_ (saturating at 255) for each event with any bits set in
105 * the error or hwerror register masks below.
106 */
107#define QIB_EEP_LOG_CNT (4)
108struct qib_eep_log_mask {
109 u64 errs_to_log;
110 u64 hwerrs_to_log;
111};
112
113/*
114 * Below contains all data related to a single context (formerly called port).
115 */
116struct qib_ctxtdata {
117 void **rcvegrbuf;
118 dma_addr_t *rcvegrbuf_phys;
119 /* rcvhdrq base, needs mmap before useful */
120 void *rcvhdrq;
121 /* kernel virtual address where hdrqtail is updated */
122 void *rcvhdrtail_kvaddr;
123 /*
124 * temp buffer for expected send setup, allocated at open, instead
125 * of each setup call
126 */
127 void *tid_pg_list;
128 /*
129 * Shared page for kernel to signal user processes that send buffers
130 * need disarming. The process should call QIB_CMD_DISARM_BUFS
131 * or QIB_CMD_ACK_EVENT with IPATH_EVENT_DISARM_BUFS set.
132 */
133 unsigned long *user_event_mask;
134 /* when waiting for rcv or pioavail */
135 wait_queue_head_t wait;
136 /*
137 * rcvegr bufs base, physical, must fit
138 * in 44 bits so 32 bit programs mmap64 44 bit works)
139 */
140 dma_addr_t rcvegr_phys;
141 /* mmap of hdrq, must fit in 44 bits */
142 dma_addr_t rcvhdrq_phys;
143 dma_addr_t rcvhdrqtailaddr_phys;
144
145 /*
146 * number of opens (including slave sub-contexts) on this instance
147 * (ignoring forks, dup, etc. for now)
148 */
149 int cnt;
150 /*
151 * how much space to leave at start of eager TID entries for
152 * protocol use, on each TID
153 */
154 /* instead of calculating it */
155 unsigned ctxt;
156 /* non-zero if ctxt is being shared. */
157 u16 subctxt_cnt;
158 /* non-zero if ctxt is being shared. */
159 u16 subctxt_id;
160 /* number of eager TID entries. */
161 u16 rcvegrcnt;
162 /* index of first eager TID entry. */
163 u16 rcvegr_tid_base;
164 /* number of pio bufs for this ctxt (all procs, if shared) */
165 u32 piocnt;
166 /* first pio buffer for this ctxt */
167 u32 pio_base;
168 /* chip offset of PIO buffers for this ctxt */
169 u32 piobufs;
170 /* how many alloc_pages() chunks in rcvegrbuf_pages */
171 u32 rcvegrbuf_chunks;
172 /* how many egrbufs per chunk */
173 u32 rcvegrbufs_perchunk;
174 /* order for rcvegrbuf_pages */
175 size_t rcvegrbuf_size;
176 /* rcvhdrq size (for freeing) */
177 size_t rcvhdrq_size;
178 /* per-context flags for fileops/intr communication */
179 unsigned long flag;
180 /* next expected TID to check when looking for free */
181 u32 tidcursor;
182 /* WAIT_RCV that timed out, no interrupt */
183 u32 rcvwait_to;
184 /* WAIT_PIO that timed out, no interrupt */
185 u32 piowait_to;
186 /* WAIT_RCV already happened, no wait */
187 u32 rcvnowait;
188 /* WAIT_PIO already happened, no wait */
189 u32 pionowait;
190 /* total number of polled urgent packets */
191 u32 urgent;
192 /* saved total number of polled urgent packets for poll edge trigger */
193 u32 urgent_poll;
194 /* pid of process using this ctxt */
195 pid_t pid;
196 pid_t subpid[QLOGIC_IB_MAX_SUBCTXT];
197 /* same size as task_struct .comm[], command that opened context */
198 char comm[16];
199 /* pkeys set by this use of this ctxt */
200 u16 pkeys[4];
201 /* so file ops can get at unit */
202 struct qib_devdata *dd;
203 /* so funcs that need physical port can get it easily */
204 struct qib_pportdata *ppd;
205 /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
206 void *subctxt_uregbase;
207 /* An array of pages for the eager receive buffers * N */
208 void *subctxt_rcvegrbuf;
209 /* An array of pages for the eager header queue entries * N */
210 void *subctxt_rcvhdr_base;
211 /* The version of the library which opened this ctxt */
212 u32 userversion;
213 /* Bitmask of active slaves */
214 u32 active_slaves;
215 /* Type of packets or conditions we want to poll for */
216 u16 poll_type;
217 /* receive packet sequence counter */
218 u8 seq_cnt;
219 u8 redirect_seq_cnt;
220 /* ctxt rcvhdrq head offset */
221 u32 head;
222 u32 pkt_count;
223 /* QPs waiting for context processing */
224 struct list_head qp_wait_list;
225};
226
227struct qib_sge_state;
228
229struct qib_sdma_txreq {
230 int flags;
231 int sg_count;
232 dma_addr_t addr;
233 void (*callback)(struct qib_sdma_txreq *, int);
234 u16 start_idx; /* sdma private */
235 u16 next_descq_idx; /* sdma private */
236 struct list_head list; /* sdma private */
237};
238
239struct qib_sdma_desc {
240 __le64 qw[2];
241};
242
243struct qib_verbs_txreq {
244 struct qib_sdma_txreq txreq;
245 struct qib_qp *qp;
246 struct qib_swqe *wqe;
247 u32 dwords;
248 u16 hdr_dwords;
249 u16 hdr_inx;
250 struct qib_pio_header *align_buf;
251 struct qib_mregion *mr;
252 struct qib_sge_state *ss;
253};
254
255#define QIB_SDMA_TXREQ_F_USELARGEBUF 0x1
256#define QIB_SDMA_TXREQ_F_HEADTOHOST 0x2
257#define QIB_SDMA_TXREQ_F_INTREQ 0x4
258#define QIB_SDMA_TXREQ_F_FREEBUF 0x8
259#define QIB_SDMA_TXREQ_F_FREEDESC 0x10
260
261#define QIB_SDMA_TXREQ_S_OK 0
262#define QIB_SDMA_TXREQ_S_SENDERROR 1
263#define QIB_SDMA_TXREQ_S_ABORTED 2
264#define QIB_SDMA_TXREQ_S_SHUTDOWN 3
265
266/*
267 * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
268 * Mostly for MADs that set or query link parameters, also ipath
269 * config interfaces
270 */
271#define QIB_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */
272#define QIB_IB_CFG_LWID_ENB 2 /* allowed Link-width */
273#define QIB_IB_CFG_LWID 3 /* currently active Link-width */
274#define QIB_IB_CFG_SPD_ENB 4 /* allowed Link speeds */
275#define QIB_IB_CFG_SPD 5 /* current Link spd */
276#define QIB_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
277#define QIB_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
278#define QIB_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */
279#define QIB_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */
280#define QIB_IB_CFG_OP_VLS 10 /* operational VLs */
281#define QIB_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */
282#define QIB_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */
283#define QIB_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */
284#define QIB_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */
285#define QIB_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */
286#define QIB_IB_CFG_PKEYS 16 /* update partition keys */
287#define QIB_IB_CFG_MTU 17 /* update MTU in IBC */
288#define QIB_IB_CFG_LSTATE 18 /* update linkcmd and linkinitcmd in IBC */
289#define QIB_IB_CFG_VL_HIGH_LIMIT 19
290#define QIB_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */
291#define QIB_IB_CFG_PORT 21 /* switch port we are connected to */
292
293/*
294 * for CFG_LSTATE: LINKCMD in upper 16 bits, LINKINITCMD in lower 16
295 * IB_LINKINITCMD_POLL and SLEEP are also used as set/get values for
296 * QIB_IB_CFG_LINKDEFAULT cmd
297 */
298#define IB_LINKCMD_DOWN (0 << 16)
299#define IB_LINKCMD_ARMED (1 << 16)
300#define IB_LINKCMD_ACTIVE (2 << 16)
301#define IB_LINKINITCMD_NOP 0
302#define IB_LINKINITCMD_POLL 1
303#define IB_LINKINITCMD_SLEEP 2
304#define IB_LINKINITCMD_DISABLE 3
305
306/*
307 * valid states passed to qib_set_linkstate() user call
308 */
309#define QIB_IB_LINKDOWN 0
310#define QIB_IB_LINKARM 1
311#define QIB_IB_LINKACTIVE 2
312#define QIB_IB_LINKDOWN_ONLY 3
313#define QIB_IB_LINKDOWN_SLEEP 4
314#define QIB_IB_LINKDOWN_DISABLE 5
315
316/*
317 * These 7 values (SDR, DDR, and QDR may be ORed for auto-speed
318 * negotiation) are used for the 3rd argument to path_f_set_ib_cfg
319 * with cmd QIB_IB_CFG_SPD_ENB, by direct calls or via sysfs. They
320 * are also the the possible values for qib_link_speed_enabled and active
321 * The values were chosen to match values used within the IB spec.
322 */
323#define QIB_IB_SDR 1
324#define QIB_IB_DDR 2
325#define QIB_IB_QDR 4
326
327#define QIB_DEFAULT_MTU 4096
328
329/*
330 * Possible IB config parameters for f_get/set_ib_table()
331 */
332#define QIB_IB_TBL_VL_HIGH_ARB 1 /* Get/set VL high priority weights */
333#define QIB_IB_TBL_VL_LOW_ARB 2 /* Get/set VL low priority weights */
334
335/*
336 * Possible "operations" for f_rcvctrl(ppd, op, ctxt)
337 * these are bits so they can be combined, e.g.
338 * QIB_RCVCTRL_INTRAVAIL_ENB | QIB_RCVCTRL_CTXT_ENB
339 */
340#define QIB_RCVCTRL_TAILUPD_ENB 0x01
341#define QIB_RCVCTRL_TAILUPD_DIS 0x02
342#define QIB_RCVCTRL_CTXT_ENB 0x04
343#define QIB_RCVCTRL_CTXT_DIS 0x08
344#define QIB_RCVCTRL_INTRAVAIL_ENB 0x10
345#define QIB_RCVCTRL_INTRAVAIL_DIS 0x20
346#define QIB_RCVCTRL_PKEY_ENB 0x40 /* Note, default is enabled */
347#define QIB_RCVCTRL_PKEY_DIS 0x80
348#define QIB_RCVCTRL_BP_ENB 0x0100
349#define QIB_RCVCTRL_BP_DIS 0x0200
350#define QIB_RCVCTRL_TIDFLOW_ENB 0x0400
351#define QIB_RCVCTRL_TIDFLOW_DIS 0x0800
352
353/*
354 * Possible "operations" for f_sendctrl(ppd, op, var)
355 * these are bits so they can be combined, e.g.
356 * QIB_SENDCTRL_BUFAVAIL_ENB | QIB_SENDCTRL_ENB
357 * Some operations (e.g. DISARM, ABORT) are known to
358 * be "one-shot", so do not modify shadow.
359 */
360#define QIB_SENDCTRL_DISARM (0x1000)
361#define QIB_SENDCTRL_DISARM_BUF(bufn) ((bufn) | QIB_SENDCTRL_DISARM)
362 /* available (0x2000) */
363#define QIB_SENDCTRL_AVAIL_DIS (0x4000)
364#define QIB_SENDCTRL_AVAIL_ENB (0x8000)
365#define QIB_SENDCTRL_AVAIL_BLIP (0x10000)
366#define QIB_SENDCTRL_SEND_DIS (0x20000)
367#define QIB_SENDCTRL_SEND_ENB (0x40000)
368#define QIB_SENDCTRL_FLUSH (0x80000)
369#define QIB_SENDCTRL_CLEAR (0x100000)
370#define QIB_SENDCTRL_DISARM_ALL (0x200000)
371
372/*
373 * These are the generic indices for requesting per-port
374 * counter values via the f_portcntr function. They
375 * are always returned as 64 bit values, although most
376 * are 32 bit counters.
377 */
378/* send-related counters */
379#define QIBPORTCNTR_PKTSEND 0U
380#define QIBPORTCNTR_WORDSEND 1U
381#define QIBPORTCNTR_PSXMITDATA 2U
382#define QIBPORTCNTR_PSXMITPKTS 3U
383#define QIBPORTCNTR_PSXMITWAIT 4U
384#define QIBPORTCNTR_SENDSTALL 5U
385/* receive-related counters */
386#define QIBPORTCNTR_PKTRCV 6U
387#define QIBPORTCNTR_PSRCVDATA 7U
388#define QIBPORTCNTR_PSRCVPKTS 8U
389#define QIBPORTCNTR_RCVEBP 9U
390#define QIBPORTCNTR_RCVOVFL 10U
391#define QIBPORTCNTR_WORDRCV 11U
392/* IB link related error counters */
393#define QIBPORTCNTR_RXLOCALPHYERR 12U
394#define QIBPORTCNTR_RXVLERR 13U
395#define QIBPORTCNTR_ERRICRC 14U
396#define QIBPORTCNTR_ERRVCRC 15U
397#define QIBPORTCNTR_ERRLPCRC 16U
398#define QIBPORTCNTR_BADFORMAT 17U
399#define QIBPORTCNTR_ERR_RLEN 18U
400#define QIBPORTCNTR_IBSYMBOLERR 19U
401#define QIBPORTCNTR_INVALIDRLEN 20U
402#define QIBPORTCNTR_UNSUPVL 21U
403#define QIBPORTCNTR_EXCESSBUFOVFL 22U
404#define QIBPORTCNTR_ERRLINK 23U
405#define QIBPORTCNTR_IBLINKDOWN 24U
406#define QIBPORTCNTR_IBLINKERRRECOV 25U
407#define QIBPORTCNTR_LLI 26U
408/* other error counters */
409#define QIBPORTCNTR_RXDROPPKT 27U
410#define QIBPORTCNTR_VL15PKTDROP 28U
411#define QIBPORTCNTR_ERRPKEY 29U
412#define QIBPORTCNTR_KHDROVFL 30U
413/* sampling counters (these are actually control registers) */
414#define QIBPORTCNTR_PSINTERVAL 31U
415#define QIBPORTCNTR_PSSTART 32U
416#define QIBPORTCNTR_PSSTAT 33U
417
418/* how often we check for packet activity for "power on hours (in seconds) */
419#define ACTIVITY_TIMER 5
420
421/* Below is an opaque struct. Each chip (device) can maintain
422 * private data needed for its operation, but not germane to the
423 * rest of the driver. For convenience, we define another that
424 * is chip-specific, per-port
425 */
426struct qib_chip_specific;
427struct qib_chipport_specific;
428
429enum qib_sdma_states {
430 qib_sdma_state_s00_hw_down,
431 qib_sdma_state_s10_hw_start_up_wait,
432 qib_sdma_state_s20_idle,
433 qib_sdma_state_s30_sw_clean_up_wait,
434 qib_sdma_state_s40_hw_clean_up_wait,
435 qib_sdma_state_s50_hw_halt_wait,
436 qib_sdma_state_s99_running,
437};
438
439enum qib_sdma_events {
440 qib_sdma_event_e00_go_hw_down,
441 qib_sdma_event_e10_go_hw_start,
442 qib_sdma_event_e20_hw_started,
443 qib_sdma_event_e30_go_running,
444 qib_sdma_event_e40_sw_cleaned,
445 qib_sdma_event_e50_hw_cleaned,
446 qib_sdma_event_e60_hw_halted,
447 qib_sdma_event_e70_go_idle,
448 qib_sdma_event_e7220_err_halted,
449 qib_sdma_event_e7322_err_halted,
450 qib_sdma_event_e90_timer_tick,
451};
452
453extern char *qib_sdma_state_names[];
454extern char *qib_sdma_event_names[];
455
456struct sdma_set_state_action {
457 unsigned op_enable:1;
458 unsigned op_intenable:1;
459 unsigned op_halt:1;
460 unsigned op_drain:1;
461 unsigned go_s99_running_tofalse:1;
462 unsigned go_s99_running_totrue:1;
463};
464
465struct qib_sdma_state {
466 struct kref kref;
467 struct completion comp;
468 enum qib_sdma_states current_state;
469 struct sdma_set_state_action *set_state_action;
470 unsigned current_op;
471 unsigned go_s99_running;
472 unsigned first_sendbuf;
473 unsigned last_sendbuf; /* really last +1 */
474 /* debugging/devel */
475 enum qib_sdma_states previous_state;
476 unsigned previous_op;
477 enum qib_sdma_events last_event;
478};
479
480struct xmit_wait {
481 struct timer_list timer;
482 u64 counter;
483 u8 flags;
484 struct cache {
485 u64 psxmitdata;
486 u64 psrcvdata;
487 u64 psxmitpkts;
488 u64 psrcvpkts;
489 u64 psxmitwait;
490 } counter_cache;
491};
492
493/*
494 * The structure below encapsulates data relevant to a physical IB Port.
495 * Current chips support only one such port, but the separation
496 * clarifies things a bit. Note that to conform to IB conventions,
497 * port-numbers are one-based. The first or only port is port1.
498 */
499struct qib_pportdata {
500 struct qib_ibport ibport_data;
501
502 struct qib_devdata *dd;
503 struct qib_chippport_specific *cpspec; /* chip-specific per-port */
504 struct kobject pport_kobj;
505 struct kobject sl2vl_kobj;
506 struct kobject diagc_kobj;
507
508 /* GUID for this interface, in network order */
509 __be64 guid;
510
511 /* QIB_POLL, etc. link-state specific flags, per port */
512 u32 lflags;
513 /* qib_lflags driver is waiting for */
514 u32 state_wanted;
515 spinlock_t lflags_lock;
516 /* number of (port-specific) interrupts for this port -- saturates... */
517 u32 int_counter;
518
519 /* ref count for each pkey */
520 atomic_t pkeyrefs[4];
521
522 /*
523 * this address is mapped readonly into user processes so they can
524 * get status cheaply, whenever they want. One qword of status per port
525 */
526 u64 *statusp;
527
528 /* SendDMA related entries */
529 spinlock_t sdma_lock;
530 struct qib_sdma_state sdma_state;
531 unsigned long sdma_buf_jiffies;
532 struct qib_sdma_desc *sdma_descq;
533 u64 sdma_descq_added;
534 u64 sdma_descq_removed;
535 u16 sdma_descq_cnt;
536 u16 sdma_descq_tail;
537 u16 sdma_descq_head;
538 u16 sdma_next_intr;
539 u16 sdma_reset_wait;
540 u8 sdma_generation;
541 struct tasklet_struct sdma_sw_clean_up_task;
542 struct list_head sdma_activelist;
543
544 dma_addr_t sdma_descq_phys;
545 volatile __le64 *sdma_head_dma; /* DMA'ed by chip */
546 dma_addr_t sdma_head_phys;
547
548 wait_queue_head_t state_wait; /* for state_wanted */
549
550 /* HoL blocking for SMP replies */
551 unsigned hol_state;
552 struct timer_list hol_timer;
553
554 /*
555 * Shadow copies of registers; size indicates read access size.
556 * Most of them are readonly, but some are write-only register,
557 * where we manipulate the bits in the shadow copy, and then write
558 * the shadow copy to qlogic_ib.
559 *
560 * We deliberately make most of these 32 bits, since they have
561 * restricted range. For any that we read, we won't to generate 32
562 * bit accesses, since Opteron will generate 2 separate 32 bit HT
563 * transactions for a 64 bit read, and we want to avoid unnecessary
564 * bus transactions.
565 */
566
567 /* This is the 64 bit group */
568 /* last ibcstatus. opaque outside chip-specific code */
569 u64 lastibcstat;
570
571 /* these are the "32 bit" regs */
572
573 /*
574 * the following two are 32-bit bitmasks, but {test,clear,set}_bit
575 * all expect bit fields to be "unsigned long"
576 */
577 unsigned long p_rcvctrl; /* shadow per-port rcvctrl */
578 unsigned long p_sendctrl; /* shadow per-port sendctrl */
579
580 u32 ibmtu; /* The MTU programmed for this unit */
581 /*
582 * Current max size IB packet (in bytes) including IB headers, that
583 * we can send. Changes when ibmtu changes.
584 */
585 u32 ibmaxlen;
586 /*
587 * ibmaxlen at init time, limited by chip and by receive buffer
588 * size. Not changed after init.
589 */
590 u32 init_ibmaxlen;
591 /* LID programmed for this instance */
592 u16 lid;
593 /* list of pkeys programmed; 0 if not set */
594 u16 pkeys[4];
595 /* LID mask control */
596 u8 lmc;
597 u8 link_width_supported;
598 u8 link_speed_supported;
599 u8 link_width_enabled;
600 u8 link_speed_enabled;
601 u8 link_width_active;
602 u8 link_speed_active;
603 u8 vls_supported;
604 u8 vls_operational;
605 /* Rx Polarity inversion (compensate for ~tx on partner) */
606 u8 rx_pol_inv;
607
608 u8 hw_pidx; /* physical port index */
609 u8 port; /* IB port number and index into dd->pports - 1 */
610
611 u8 delay_mult;
612
613 /* used to override LED behavior */
614 u8 led_override; /* Substituted for normal value, if non-zero */
615 u16 led_override_timeoff; /* delta to next timer event */
616 u8 led_override_vals[2]; /* Alternates per blink-frame */
617 u8 led_override_phase; /* Just counts, LSB picks from vals[] */
618 atomic_t led_override_timer_active;
619 /* Used to flash LEDs in override mode */
620 struct timer_list led_override_timer;
621 struct xmit_wait cong_stats;
622 struct timer_list symerr_clear_timer;
623};
624
625/* Observers. Not to be taken lightly, possibly not to ship. */
626/*
627 * If a diag read or write is to (bottom <= offset <= top),
628 * the "hoook" is called, allowing, e.g. shadows to be
629 * updated in sync with the driver. struct diag_observer
630 * is the "visible" part.
631 */
632struct diag_observer;
633
634typedef int (*diag_hook) (struct qib_devdata *dd,
635 const struct diag_observer *op,
636 u32 offs, u64 *data, u64 mask, int only_32);
637
638struct diag_observer {
639 diag_hook hook;
640 u32 bottom;
641 u32 top;
642};
643
644extern int qib_register_observer(struct qib_devdata *dd,
645 const struct diag_observer *op);
646
647/* Only declared here, not defined. Private to diags */
648struct diag_observer_list_elt;
649
650/* device data struct now contains only "general per-device" info.
651 * fields related to a physical IB port are in a qib_pportdata struct,
652 * described above) while fields only used by a particualr chip-type are in
653 * a qib_chipdata struct, whose contents are opaque to this file.
654 */
655struct qib_devdata {
656 struct qib_ibdev verbs_dev; /* must be first */
657 struct list_head list;
658 /* pointers to related structs for this device */
659 /* pci access data structure */
660 struct pci_dev *pcidev;
661 struct cdev *user_cdev;
662 struct cdev *diag_cdev;
663 struct device *user_device;
664 struct device *diag_device;
665
666 /* mem-mapped pointer to base of chip regs */
667 u64 __iomem *kregbase;
668 /* end of mem-mapped chip space excluding sendbuf and user regs */
669 u64 __iomem *kregend;
670 /* physical address of chip for io_remap, etc. */
671 resource_size_t physaddr;
672 /* qib_cfgctxts pointers */
673 struct qib_ctxtdata **rcd; /* Receive Context Data */
674
675 /* qib_pportdata, points to array of (physical) port-specific
676 * data structs, indexed by pidx (0..n-1)
677 */
678 struct qib_pportdata *pport;
679 struct qib_chip_specific *cspec; /* chip-specific */
680
681 /* kvirt address of 1st 2k pio buffer */
682 void __iomem *pio2kbase;
683 /* kvirt address of 1st 4k pio buffer */
684 void __iomem *pio4kbase;
685 /* mem-mapped pointer to base of PIO buffers (if using WC PAT) */
686 void __iomem *piobase;
687 /* mem-mapped pointer to base of user chip regs (if using WC PAT) */
688 u64 __iomem *userbase;
689 /*
690 * points to area where PIOavail registers will be DMA'ed.
691 * Has to be on a page of it's own, because the page will be
692 * mapped into user program space. This copy is *ONLY* ever
693 * written by DMA, not by the driver! Need a copy per device
694 * when we get to multiple devices
695 */
696 volatile __le64 *pioavailregs_dma; /* DMA'ed by chip */
697 /* physical address where updates occur */
698 dma_addr_t pioavailregs_phys;
699
700 /* device-specific implementations of functions needed by
701 * common code. Contrary to previous consensus, we can't
702 * really just point to a device-specific table, because we
703 * may need to "bend", e.g. *_f_put_tid
704 */
705 /* fallback to alternate interrupt type if possible */
706 int (*f_intr_fallback)(struct qib_devdata *);
707 /* hard reset chip */
708 int (*f_reset)(struct qib_devdata *);
709 void (*f_quiet_serdes)(struct qib_pportdata *);
710 int (*f_bringup_serdes)(struct qib_pportdata *);
711 int (*f_early_init)(struct qib_devdata *);
712 void (*f_clear_tids)(struct qib_devdata *, struct qib_ctxtdata *);
713 void (*f_put_tid)(struct qib_devdata *, u64 __iomem*,
714 u32, unsigned long);
715 void (*f_cleanup)(struct qib_devdata *);
716 void (*f_setextled)(struct qib_pportdata *, u32);
717 /* fill out chip-specific fields */
718 int (*f_get_base_info)(struct qib_ctxtdata *, struct qib_base_info *);
719 /* free irq */
720 void (*f_free_irq)(struct qib_devdata *);
721 struct qib_message_header *(*f_get_msgheader)
722 (struct qib_devdata *, __le32 *);
723 void (*f_config_ctxts)(struct qib_devdata *);
724 int (*f_get_ib_cfg)(struct qib_pportdata *, int);
725 int (*f_set_ib_cfg)(struct qib_pportdata *, int, u32);
726 int (*f_set_ib_loopback)(struct qib_pportdata *, const char *);
727 int (*f_get_ib_table)(struct qib_pportdata *, int, void *);
728 int (*f_set_ib_table)(struct qib_pportdata *, int, void *);
729 u32 (*f_iblink_state)(u64);
730 u8 (*f_ibphys_portstate)(u64);
731 void (*f_xgxs_reset)(struct qib_pportdata *);
732 /* per chip actions needed for IB Link up/down changes */
733 int (*f_ib_updown)(struct qib_pportdata *, int, u64);
734 u32 __iomem *(*f_getsendbuf)(struct qib_pportdata *, u64, u32 *);
735 /* Read/modify/write of GPIO pins (potentially chip-specific */
736 int (*f_gpio_mod)(struct qib_devdata *dd, u32 out, u32 dir,
737 u32 mask);
738 /* Enable writes to config EEPROM (if supported) */
739 int (*f_eeprom_wen)(struct qib_devdata *dd, int wen);
740 /*
741 * modify rcvctrl shadow[s] and write to appropriate chip-regs.
742 * see above QIB_RCVCTRL_xxx_ENB/DIS for operations.
743 * (ctxt == -1) means "all contexts", only meaningful for
744 * clearing. Could remove if chip_spec shutdown properly done.
745 */
746 void (*f_rcvctrl)(struct qib_pportdata *, unsigned int op,
747 int ctxt);
748 /* Read/modify/write sendctrl appropriately for op and port. */
749 void (*f_sendctrl)(struct qib_pportdata *, u32 op);
750 void (*f_set_intr_state)(struct qib_devdata *, u32);
751 void (*f_set_armlaunch)(struct qib_devdata *, u32);
752 void (*f_wantpiobuf_intr)(struct qib_devdata *, u32);
753 int (*f_late_initreg)(struct qib_devdata *);
754 int (*f_init_sdma_regs)(struct qib_pportdata *);
755 u16 (*f_sdma_gethead)(struct qib_pportdata *);
756 int (*f_sdma_busy)(struct qib_pportdata *);
757 void (*f_sdma_update_tail)(struct qib_pportdata *, u16);
758 void (*f_sdma_set_desc_cnt)(struct qib_pportdata *, unsigned);
759 void (*f_sdma_sendctrl)(struct qib_pportdata *, unsigned);
760 void (*f_sdma_hw_clean_up)(struct qib_pportdata *);
761 void (*f_sdma_hw_start_up)(struct qib_pportdata *);
762 void (*f_sdma_init_early)(struct qib_pportdata *);
763 void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
764 void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32);
765 u32 (*f_hdrqempty)(struct qib_ctxtdata *);
766 u64 (*f_portcntr)(struct qib_pportdata *, u32);
767 u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
768 u64 **);
769 u32 (*f_read_portcntrs)(struct qib_devdata *, loff_t, u32,
770 char **, u64 **);
771 u32 (*f_setpbc_control)(struct qib_pportdata *, u32, u8, u8);
772 void (*f_initvl15_bufs)(struct qib_devdata *);
773 void (*f_init_ctxt)(struct qib_ctxtdata *);
774 void (*f_txchk_change)(struct qib_devdata *, u32, u32, u32,
775 struct qib_ctxtdata *);
776 void (*f_writescratch)(struct qib_devdata *, u32);
777 int (*f_tempsense_rd)(struct qib_devdata *, int regnum);
778
779 char *boardname; /* human readable board info */
780
781 /* template for writing TIDs */
782 u64 tidtemplate;
783 /* value to write to free TIDs */
784 u64 tidinvalid;
785
786 /* number of registers used for pioavail */
787 u32 pioavregs;
788 /* device (not port) flags, basically device capabilities */
789 u32 flags;
790 /* last buffer for user use */
791 u32 lastctxt_piobuf;
792
793 /* saturating counter of (non-port-specific) device interrupts */
794 u32 int_counter;
795
796 /* pio bufs allocated per ctxt */
797 u32 pbufsctxt;
798 /* if remainder on bufs/ctxt, ctxts < extrabuf get 1 extra */
799 u32 ctxts_extrabuf;
800 /*
801 * number of ctxts configured as max; zero is set to number chip
802 * supports, less gives more pio bufs/ctxt, etc.
803 */
804 u32 cfgctxts;
805
806 /*
807 * hint that we should update pioavailshadow before
808 * looking for a PIO buffer
809 */
810 u32 upd_pio_shadow;
811
812 /* internal debugging stats */
813 u32 maxpkts_call;
814 u32 avgpkts_call;
815 u64 nopiobufs;
816
817 /* PCI Vendor ID (here for NodeInfo) */
818 u16 vendorid;
819 /* PCI Device ID (here for NodeInfo) */
820 u16 deviceid;
821 /* for write combining settings */
822 unsigned long wc_cookie;
823 unsigned long wc_base;
824 unsigned long wc_len;
825
826 /* shadow copy of struct page *'s for exp tid pages */
827 struct page **pageshadow;
828 /* shadow copy of dma handles for exp tid pages */
829 dma_addr_t *physshadow;
830 u64 __iomem *egrtidbase;
831 spinlock_t sendctrl_lock; /* protect changes to sendctrl shadow */
832 /* around rcd and (user ctxts) ctxt_cnt use (intr vs free) */
833 spinlock_t uctxt_lock; /* rcd and user context changes */
834 /*
835 * per unit status, see also portdata statusp
836 * mapped readonly into user processes so they can get unit and
837 * IB link status cheaply
838 */
839 u64 *devstatusp;
840 char *freezemsg; /* freeze msg if hw error put chip in freeze */
841 u32 freezelen; /* max length of freezemsg */
842 /* timer used to prevent stats overflow, error throttling, etc. */
843 struct timer_list stats_timer;
844
845 /* timer to verify interrupts work, and fallback if possible */
846 struct timer_list intrchk_timer;
847 unsigned long ureg_align; /* user register alignment */
848
849 /*
850 * Protects pioavailshadow, pioavailkernel, pio_need_disarm, and
851 * pio_writing.
852 */
853 spinlock_t pioavail_lock;
854
855 /*
856 * Shadow copies of registers; size indicates read access size.
857 * Most of them are readonly, but some are write-only register,
858 * where we manipulate the bits in the shadow copy, and then write
859 * the shadow copy to qlogic_ib.
860 *
861 * We deliberately make most of these 32 bits, since they have
862 * restricted range. For any that we read, we won't to generate 32
863 * bit accesses, since Opteron will generate 2 separate 32 bit HT
864 * transactions for a 64 bit read, and we want to avoid unnecessary
865 * bus transactions.
866 */
867
868 /* This is the 64 bit group */
869
870 unsigned long pioavailshadow[6];
871 /* bitmap of send buffers available for the kernel to use with PIO. */
872 unsigned long pioavailkernel[6];
873 /* bitmap of send buffers which need to be disarmed. */
874 unsigned long pio_need_disarm[3];
875 /* bitmap of send buffers which are being written to. */
876 unsigned long pio_writing[3];
877 /* kr_revision shadow */
878 u64 revision;
879 /* Base GUID for device (from eeprom, network order) */
880 __be64 base_guid;
881
882 /*
883 * kr_sendpiobufbase value (chip offset of pio buffers), and the
884 * base of the 2KB buffer s(user processes only use 2K)
885 */
886 u64 piobufbase;
887 u32 pio2k_bufbase;
888
889 /* these are the "32 bit" regs */
890
891 /* number of GUIDs in the flash for this interface */
892 u32 nguid;
893 /*
894 * the following two are 32-bit bitmasks, but {test,clear,set}_bit
895 * all expect bit fields to be "unsigned long"
896 */
897 unsigned long rcvctrl; /* shadow per device rcvctrl */
898 unsigned long sendctrl; /* shadow per device sendctrl */
899
900 /* value we put in kr_rcvhdrcnt */
901 u32 rcvhdrcnt;
902 /* value we put in kr_rcvhdrsize */
903 u32 rcvhdrsize;
904 /* value we put in kr_rcvhdrentsize */
905 u32 rcvhdrentsize;
906 /* kr_ctxtcnt value */
907 u32 ctxtcnt;
908 /* kr_pagealign value */
909 u32 palign;
910 /* number of "2KB" PIO buffers */
911 u32 piobcnt2k;
912 /* size in bytes of "2KB" PIO buffers */
913 u32 piosize2k;
914 /* max usable size in dwords of a "2KB" PIO buffer before going "4KB" */
915 u32 piosize2kmax_dwords;
916 /* number of "4KB" PIO buffers */
917 u32 piobcnt4k;
918 /* size in bytes of "4KB" PIO buffers */
919 u32 piosize4k;
920 /* kr_rcvegrbase value */
921 u32 rcvegrbase;
922 /* kr_rcvtidbase value */
923 u32 rcvtidbase;
924 /* kr_rcvtidcnt value */
925 u32 rcvtidcnt;
926 /* kr_userregbase */
927 u32 uregbase;
928 /* shadow the control register contents */
929 u32 control;
930
931 /* chip address space used by 4k pio buffers */
932 u32 align4k;
933 /* size of each rcvegrbuffer */
934 u32 rcvegrbufsize;
935 /* localbus width (1, 2,4,8,16,32) from config space */
936 u32 lbus_width;
937 /* localbus speed in MHz */
938 u32 lbus_speed;
939 int unit; /* unit # of this chip */
940
941 /* start of CHIP_SPEC move to chipspec, but need code changes */
942 /* low and high portions of MSI capability/vector */
943 u32 msi_lo;
944 /* saved after PCIe init for restore after reset */
945 u32 msi_hi;
946 /* MSI data (vector) saved for restore */
947 u16 msi_data;
948 /* so we can rewrite it after a chip reset */
949 u32 pcibar0;
950 /* so we can rewrite it after a chip reset */
951 u32 pcibar1;
952 u64 rhdrhead_intr_off;
953
954 /*
955 * ASCII serial number, from flash, large enough for original
956 * all digit strings, and longer QLogic serial number format
957 */
958 u8 serial[16];
959 /* human readable board version */
960 u8 boardversion[96];
961 u8 lbus_info[32]; /* human readable localbus info */
962 /* chip major rev, from qib_revision */
963 u8 majrev;
964 /* chip minor rev, from qib_revision */
965 u8 minrev;
966
967 /* Misc small ints */
968 /* Number of physical ports available */
969 u8 num_pports;
970 /* Lowest context number which can be used by user processes */
971 u8 first_user_ctxt;
972 u8 n_krcv_queues;
973 u8 qpn_mask;
974 u8 skip_kctxt_mask;
975
976 u16 rhf_offset; /* offset of RHF within receive header entry */
977
978 /*
979 * GPIO pins for twsi-connected devices, and device code for eeprom
980 */
981 u8 gpio_sda_num;
982 u8 gpio_scl_num;
983 u8 twsi_eeprom_dev;
984 u8 board_atten;
985
986 /* Support (including locks) for EEPROM logging of errors and time */
987 /* control access to actual counters, timer */
988 spinlock_t eep_st_lock;
989 /* control high-level access to EEPROM */
990 struct mutex eep_lock;
991 uint64_t traffic_wds;
992 /* active time is kept in seconds, but logged in hours */
993 atomic_t active_time;
994 /* Below are nominal shadow of EEPROM, new since last EEPROM update */
995 uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
996 uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
997 uint16_t eep_hrs;
998 /*
999 * masks for which bits of errs, hwerrs that cause
1000 * each of the counters to increment.
1001 */
1002 struct qib_eep_log_mask eep_st_masks[QIB_EEP_LOG_CNT];
1003 struct qib_diag_client *diag_client;
1004 spinlock_t qib_diag_trans_lock; /* protect diag observer ops */
1005 struct diag_observer_list_elt *diag_observer_list;
1006
1007 u8 psxmitwait_supported;
1008 /* cycle length of PS* counters in HW (in picoseconds) */
1009 u16 psxmitwait_check_rate;
1010};
1011
1012/* hol_state values */
1013#define QIB_HOL_UP 0
1014#define QIB_HOL_INIT 1
1015
1016#define QIB_SDMA_SENDCTRL_OP_ENABLE (1U << 0)
1017#define QIB_SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
1018#define QIB_SDMA_SENDCTRL_OP_HALT (1U << 2)
1019#define QIB_SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
1020#define QIB_SDMA_SENDCTRL_OP_DRAIN (1U << 4)
1021
1022/* operation types for f_txchk_change() */
1023#define TXCHK_CHG_TYPE_DIS1 3
1024#define TXCHK_CHG_TYPE_ENAB1 2
1025#define TXCHK_CHG_TYPE_KERN 1
1026#define TXCHK_CHG_TYPE_USER 0
1027
1028#define QIB_CHASE_TIME msecs_to_jiffies(145)
1029#define QIB_CHASE_DIS_TIME msecs_to_jiffies(160)
1030
1031/* Private data for file operations */
1032struct qib_filedata {
1033 struct qib_ctxtdata *rcd;
1034 unsigned subctxt;
1035 unsigned tidcursor;
1036 struct qib_user_sdma_queue *pq;
1037 int rec_cpu_num; /* for cpu affinity; -1 if none */
1038};
1039
1040extern struct list_head qib_dev_list;
1041extern spinlock_t qib_devs_lock;
1042extern struct qib_devdata *qib_lookup(int unit);
1043extern u32 qib_cpulist_count;
1044extern unsigned long *qib_cpulist;
1045
1046extern unsigned qib_wc_pat;
1047int qib_init(struct qib_devdata *, int);
1048int init_chip_wc_pat(struct qib_devdata *dd, u32);
1049int qib_enable_wc(struct qib_devdata *dd);
1050void qib_disable_wc(struct qib_devdata *dd);
1051int qib_count_units(int *npresentp, int *nupp);
1052int qib_count_active_units(void);
1053
1054int qib_cdev_init(int minor, const char *name,
1055 const struct file_operations *fops,
1056 struct cdev **cdevp, struct device **devp);
1057void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp);
1058int qib_dev_init(void);
1059void qib_dev_cleanup(void);
1060
1061int qib_diag_add(struct qib_devdata *);
1062void qib_diag_remove(struct qib_devdata *);
1063void qib_handle_e_ibstatuschanged(struct qib_pportdata *, u64);
1064void qib_sdma_update_tail(struct qib_pportdata *, u16); /* hold sdma_lock */
1065
1066int qib_decode_err(struct qib_devdata *dd, char *buf, size_t blen, u64 err);
1067void qib_bad_intrstatus(struct qib_devdata *);
1068void qib_handle_urcv(struct qib_devdata *, u64);
1069
1070/* clean up any per-chip chip-specific stuff */
1071void qib_chip_cleanup(struct qib_devdata *);
1072/* clean up any chip type-specific stuff */
1073void qib_chip_done(void);
1074
1075/* check to see if we have to force ordering for write combining */
1076int qib_unordered_wc(void);
1077void qib_pio_copy(void __iomem *to, const void *from, size_t count);
1078
1079void qib_disarm_piobufs(struct qib_devdata *, unsigned, unsigned);
1080int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *);
1081void qib_disarm_piobufs_set(struct qib_devdata *, unsigned long *, unsigned);
1082void qib_cancel_sends(struct qib_pportdata *);
1083
1084int qib_create_rcvhdrq(struct qib_devdata *, struct qib_ctxtdata *);
1085int qib_setup_eagerbufs(struct qib_ctxtdata *);
1086void qib_set_ctxtcnt(struct qib_devdata *);
1087int qib_create_ctxts(struct qib_devdata *dd);
1088struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32);
1089void qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8);
1090void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *);
1091
1092u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *);
1093int qib_reset_device(int);
1094int qib_wait_linkstate(struct qib_pportdata *, u32, int);
1095int qib_set_linkstate(struct qib_pportdata *, u8);
1096int qib_set_mtu(struct qib_pportdata *, u16);
1097int qib_set_lid(struct qib_pportdata *, u32, u8);
1098void qib_hol_down(struct qib_pportdata *);
1099void qib_hol_init(struct qib_pportdata *);
1100void qib_hol_up(struct qib_pportdata *);
1101void qib_hol_event(unsigned long);
1102void qib_disable_after_error(struct qib_devdata *);
1103int qib_set_uevent_bits(struct qib_pportdata *, const int);
1104
1105/* for use in system calls, where we want to know device type, etc. */
1106#define ctxt_fp(fp) \
1107 (((struct qib_filedata *)(fp)->private_data)->rcd)
1108#define subctxt_fp(fp) \
1109 (((struct qib_filedata *)(fp)->private_data)->subctxt)
1110#define tidcursor_fp(fp) \
1111 (((struct qib_filedata *)(fp)->private_data)->tidcursor)
1112#define user_sdma_queue_fp(fp) \
1113 (((struct qib_filedata *)(fp)->private_data)->pq)
1114
1115static inline struct qib_devdata *dd_from_ppd(struct qib_pportdata *ppd)
1116{
1117 return ppd->dd;
1118}
1119
1120static inline struct qib_devdata *dd_from_dev(struct qib_ibdev *dev)
1121{
1122 return container_of(dev, struct qib_devdata, verbs_dev);
1123}
1124
1125static inline struct qib_devdata *dd_from_ibdev(struct ib_device *ibdev)
1126{
1127 return dd_from_dev(to_idev(ibdev));
1128}
1129
1130static inline struct qib_pportdata *ppd_from_ibp(struct qib_ibport *ibp)
1131{
1132 return container_of(ibp, struct qib_pportdata, ibport_data);
1133}
1134
1135static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
1136{
1137 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1138 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
1139
1140 WARN_ON(pidx >= dd->num_pports);
1141 return &dd->pport[pidx].ibport_data;
1142}
1143
1144/*
1145 * values for dd->flags (_device_ related flags) and
1146 */
1147#define QIB_HAS_LINK_LATENCY 0x1 /* supports link latency (IB 1.2) */
1148#define QIB_INITTED 0x2 /* chip and driver up and initted */
1149#define QIB_DOING_RESET 0x4 /* in the middle of doing chip reset */
1150#define QIB_PRESENT 0x8 /* chip accesses can be done */
1151#define QIB_PIO_FLUSH_WC 0x10 /* Needs Write combining flush for PIO */
1152#define QIB_HAS_THRESH_UPDATE 0x40
1153#define QIB_HAS_SDMA_TIMEOUT 0x80
1154#define QIB_USE_SPCL_TRIG 0x100 /* SpecialTrigger launch enabled */
1155#define QIB_NODMA_RTAIL 0x200 /* rcvhdrtail register DMA enabled */
1156#define QIB_HAS_INTX 0x800 /* Supports INTx interrupts */
1157#define QIB_HAS_SEND_DMA 0x1000 /* Supports Send DMA */
1158#define QIB_HAS_VLSUPP 0x2000 /* Supports multiple VLs; PBC different */
1159#define QIB_HAS_HDRSUPP 0x4000 /* Supports header suppression */
1160#define QIB_BADINTR 0x8000 /* severe interrupt problems */
1161#define QIB_DCA_ENABLED 0x10000 /* Direct Cache Access enabled */
1162#define QIB_HAS_QSFP 0x20000 /* device (card instance) has QSFP */
1163
1164/*
1165 * values for ppd->lflags (_ib_port_ related flags)
1166 */
1167#define QIBL_LINKV 0x1 /* IB link state valid */
1168#define QIBL_LINKDOWN 0x8 /* IB link is down */
1169#define QIBL_LINKINIT 0x10 /* IB link level is up */
1170#define QIBL_LINKARMED 0x20 /* IB link is ARMED */
1171#define QIBL_LINKACTIVE 0x40 /* IB link is ACTIVE */
1172/* leave a gap for more IB-link state */
1173#define QIBL_IB_AUTONEG_INPROG 0x1000 /* non-IBTA DDR/QDR neg active */
1174#define QIBL_IB_AUTONEG_FAILED 0x2000 /* non-IBTA DDR/QDR neg failed */
1175#define QIBL_IB_LINK_DISABLED 0x4000 /* Linkdown-disable forced,
1176 * Do not try to bring up */
1177#define QIBL_IB_FORCE_NOTIFY 0x8000 /* force notify on next ib change */
1178
1179/* IB dword length mask in PBC (lower 11 bits); same for all chips */
1180#define QIB_PBC_LENGTH_MASK ((1 << 11) - 1)
1181
1182
1183/* ctxt_flag bit offsets */
1184 /* waiting for a packet to arrive */
1185#define QIB_CTXT_WAITING_RCV 2
1186 /* master has not finished initializing */
1187#define QIB_CTXT_MASTER_UNINIT 4
1188 /* waiting for an urgent packet to arrive */
1189#define QIB_CTXT_WAITING_URG 5
1190
1191/* free up any allocated data at closes */
1192void qib_free_data(struct qib_ctxtdata *dd);
1193void qib_chg_pioavailkernel(struct qib_devdata *, unsigned, unsigned,
1194 u32, struct qib_ctxtdata *);
1195struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *,
1196 const struct pci_device_id *);
1197struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *,
1198 const struct pci_device_id *);
1199struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *,
1200 const struct pci_device_id *);
1201void qib_free_devdata(struct qib_devdata *);
1202struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra);
1203
1204#define QIB_TWSI_NO_DEV 0xFF
1205/* Below qib_twsi_ functions must be called with eep_lock held */
1206int qib_twsi_reset(struct qib_devdata *dd);
1207int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
1208 int len);
1209int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
1210 const void *buffer, int len);
1211void qib_get_eeprom_info(struct qib_devdata *);
1212int qib_update_eeprom_log(struct qib_devdata *dd);
1213void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
1214void qib_dump_lookup_output_queue(struct qib_devdata *);
1215void qib_force_pio_avail_update(struct qib_devdata *);
1216void qib_clear_symerror_on_linkup(unsigned long opaque);
1217
1218/*
1219 * Set LED override, only the two LSBs have "public" meaning, but
1220 * any non-zero value substitutes them for the Link and LinkTrain
1221 * LED states.
1222 */
1223#define QIB_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
1224#define QIB_LED_LOG 2 /* Logical (link) YELLOW LED */
1225void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val);
1226
1227/* send dma routines */
1228int qib_setup_sdma(struct qib_pportdata *);
1229void qib_teardown_sdma(struct qib_pportdata *);
1230void __qib_sdma_intr(struct qib_pportdata *);
1231void qib_sdma_intr(struct qib_pportdata *);
1232int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *,
1233 u32, struct qib_verbs_txreq *);
1234/* ppd->sdma_lock should be locked before calling this. */
1235int qib_sdma_make_progress(struct qib_pportdata *dd);
1236
1237/* must be called under qib_sdma_lock */
1238static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd)
1239{
1240 return ppd->sdma_descq_cnt -
1241 (ppd->sdma_descq_added - ppd->sdma_descq_removed) - 1;
1242}
1243
1244static inline int __qib_sdma_running(struct qib_pportdata *ppd)
1245{
1246 return ppd->sdma_state.current_state == qib_sdma_state_s99_running;
1247}
1248int qib_sdma_running(struct qib_pportdata *);
1249
1250void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
1251void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
1252
1253/*
1254 * number of words used for protocol header if not set by qib_userinit();
1255 */
1256#define QIB_DFLT_RCVHDRSIZE 9
1257
1258/*
1259 * We need to be able to handle an IB header of at least 24 dwords.
1260 * We need the rcvhdrq large enough to handle largest IB header, but
1261 * still have room for a 2KB MTU standard IB packet.
1262 * Additionally, some processor/memory controller combinations
1263 * benefit quite strongly from having the DMA'ed data be cacheline
1264 * aligned and a cacheline multiple, so we set the size to 32 dwords
1265 * (2 64-byte primary cachelines for pretty much all processors of
1266 * interest). The alignment hurts nothing, other than using somewhat
1267 * more memory.
1268 */
1269#define QIB_RCVHDR_ENTSIZE 32
1270
1271int qib_get_user_pages(unsigned long, size_t, struct page **);
1272void qib_release_user_pages(struct page **, size_t);
1273int qib_eeprom_read(struct qib_devdata *, u8, void *, int);
1274int qib_eeprom_write(struct qib_devdata *, u8, const void *, int);
1275u32 __iomem *qib_getsendbuf_range(struct qib_devdata *, u32 *, u32, u32);
1276void qib_sendbuf_done(struct qib_devdata *, unsigned);
1277
1278static inline void qib_clear_rcvhdrtail(const struct qib_ctxtdata *rcd)
1279{
1280 *((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL;
1281}
1282
1283static inline u32 qib_get_rcvhdrtail(const struct qib_ctxtdata *rcd)
1284{
1285 /*
1286 * volatile because it's a DMA target from the chip, routine is
1287 * inlined, and don't want register caching or reordering.
1288 */
1289 return (u32) le64_to_cpu(
1290 *((volatile __le64 *)rcd->rcvhdrtail_kvaddr)); /* DMA'ed */
1291}
1292
1293static inline u32 qib_get_hdrqtail(const struct qib_ctxtdata *rcd)
1294{
1295 const struct qib_devdata *dd = rcd->dd;
1296 u32 hdrqtail;
1297
1298 if (dd->flags & QIB_NODMA_RTAIL) {
1299 __le32 *rhf_addr;
1300 u32 seq;
1301
1302 rhf_addr = (__le32 *) rcd->rcvhdrq +
1303 rcd->head + dd->rhf_offset;
1304 seq = qib_hdrget_seq(rhf_addr);
1305 hdrqtail = rcd->head;
1306 if (seq == rcd->seq_cnt)
1307 hdrqtail++;
1308 } else
1309 hdrqtail = qib_get_rcvhdrtail(rcd);
1310
1311 return hdrqtail;
1312}
1313
1314/*
1315 * sysfs interface.
1316 */
1317
1318extern const char ib_qib_version[];
1319
1320int qib_device_create(struct qib_devdata *);
1321void qib_device_remove(struct qib_devdata *);
1322
1323int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
1324 struct kobject *kobj);
1325int qib_verbs_register_sysfs(struct qib_devdata *);
1326void qib_verbs_unregister_sysfs(struct qib_devdata *);
1327/* Hook for sysfs read of QSFP */
1328extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len);
1329
1330int __init qib_init_qibfs(void);
1331int __exit qib_exit_qibfs(void);
1332
1333int qibfs_add(struct qib_devdata *);
1334int qibfs_remove(struct qib_devdata *);
1335
1336int qib_pcie_init(struct pci_dev *, const struct pci_device_id *);
1337int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
1338 const struct pci_device_id *);
1339void qib_pcie_ddcleanup(struct qib_devdata *);
1340int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct msix_entry *);
1341int qib_reinit_intr(struct qib_devdata *);
1342void qib_enable_intx(struct pci_dev *);
1343void qib_nomsi(struct qib_devdata *);
1344void qib_nomsix(struct qib_devdata *);
1345void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *);
1346void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8);
1347
1348/*
1349 * dma_addr wrappers - all 0's invalid for hw
1350 */
1351dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
1352 size_t, int);
1353const char *qib_get_unit_name(int unit);
1354
1355/*
1356 * Flush write combining store buffers (if present) and perform a write
1357 * barrier.
1358 */
1359#if defined(CONFIG_X86_64)
1360#define qib_flush_wc() asm volatile("sfence" : : : "memory")
1361#else
1362#define qib_flush_wc() wmb() /* no reorder around wc flush */
1363#endif
1364
1365/* global module parameter variables */
1366extern unsigned qib_ibmtu;
1367extern ushort qib_cfgctxts;
1368extern ushort qib_num_cfg_vls;
1369extern ushort qib_mini_init; /* If set, do few (ideally 0) writes to chip */
1370extern unsigned qib_n_krcv_queues;
1371extern unsigned qib_sdma_fetch_arb;
1372extern unsigned qib_compat_ddr_negotiate;
1373extern int qib_special_trigger;
1374
1375extern struct mutex qib_mutex;
1376
1377/* Number of seconds before our card status check... */
1378#define STATUS_TIMEOUT 60
1379
1380#define QIB_DRV_NAME "ib_qib"
1381#define QIB_USER_MINOR_BASE 0
1382#define QIB_TRACE_MINOR 127
1383#define QIB_DIAGPKT_MINOR 128
1384#define QIB_DIAG_MINOR_BASE 129
1385#define QIB_NMINORS 255
1386
1387#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
1388#define PCI_VENDOR_ID_QLOGIC 0x1077
1389#define PCI_DEVICE_ID_QLOGIC_IB_6120 0x10
1390#define PCI_DEVICE_ID_QLOGIC_IB_7220 0x7220
1391#define PCI_DEVICE_ID_QLOGIC_IB_7322 0x7322
1392
1393/*
1394 * qib_early_err is used (only!) to print early errors before devdata is
1395 * allocated, or when dd->pcidev may not be valid, and at the tail end of
1396 * cleanup when devdata may have been freed, etc. qib_dev_porterr is
1397 * the same as qib_dev_err, but is used when the message really needs
1398 * the IB port# to be definitive as to what's happening..
1399 * All of these go to the trace log, and the trace log entry is done
1400 * first to avoid possible serial port delays from printk.
1401 */
1402#define qib_early_err(dev, fmt, ...) \
1403 do { \
1404 dev_info(dev, KERN_ERR QIB_DRV_NAME ": " fmt, ##__VA_ARGS__); \
1405 } while (0)
1406
1407#define qib_dev_err(dd, fmt, ...) \
1408 do { \
1409 dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
1410 qib_get_unit_name((dd)->unit), ##__VA_ARGS__); \
1411 } while (0)
1412
1413#define qib_dev_porterr(dd, port, fmt, ...) \
1414 do { \
1415 dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
1416 qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
1417 ##__VA_ARGS__); \
1418 } while (0)
1419
1420#define qib_devinfo(pcidev, fmt, ...) \
1421 do { \
1422 dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__); \
1423 } while (0)
1424
1425/*
1426 * this is used for formatting hw error messages...
1427 */
1428struct qib_hwerror_msgs {
1429 u64 mask;
1430 const char *msg;
1431};
1432
1433#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
1434
1435/* in qib_intr.c... */
1436void qib_format_hwerrors(u64 hwerrs,
1437 const struct qib_hwerror_msgs *hwerrmsgs,
1438 size_t nhwerrmsgs, char *msg, size_t lmsg);
1439#endif /* _QIB_KERNEL_H */
diff --git a/drivers/infiniband/hw/qib/qib_6120_regs.h b/drivers/infiniband/hw/qib/qib_6120_regs.h
new file mode 100644
index 000000000000..e16cb6f7de2c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_6120_regs.h
@@ -0,0 +1,977 @@
1/*
2 * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
34
35#define QIB_6120_Revision_OFFS 0x0
36#define QIB_6120_Revision_R_Simulator_LSB 0x3F
37#define QIB_6120_Revision_R_Simulator_RMASK 0x1
38#define QIB_6120_Revision_Reserved_LSB 0x28
39#define QIB_6120_Revision_Reserved_RMASK 0x7FFFFF
40#define QIB_6120_Revision_BoardID_LSB 0x20
41#define QIB_6120_Revision_BoardID_RMASK 0xFF
42#define QIB_6120_Revision_R_SW_LSB 0x18
43#define QIB_6120_Revision_R_SW_RMASK 0xFF
44#define QIB_6120_Revision_R_Arch_LSB 0x10
45#define QIB_6120_Revision_R_Arch_RMASK 0xFF
46#define QIB_6120_Revision_R_ChipRevMajor_LSB 0x8
47#define QIB_6120_Revision_R_ChipRevMajor_RMASK 0xFF
48#define QIB_6120_Revision_R_ChipRevMinor_LSB 0x0
49#define QIB_6120_Revision_R_ChipRevMinor_RMASK 0xFF
50
51#define QIB_6120_Control_OFFS 0x8
52#define QIB_6120_Control_TxLatency_LSB 0x4
53#define QIB_6120_Control_TxLatency_RMASK 0x1
54#define QIB_6120_Control_PCIERetryBufDiagEn_LSB 0x3
55#define QIB_6120_Control_PCIERetryBufDiagEn_RMASK 0x1
56#define QIB_6120_Control_LinkEn_LSB 0x2
57#define QIB_6120_Control_LinkEn_RMASK 0x1
58#define QIB_6120_Control_FreezeMode_LSB 0x1
59#define QIB_6120_Control_FreezeMode_RMASK 0x1
60#define QIB_6120_Control_SyncReset_LSB 0x0
61#define QIB_6120_Control_SyncReset_RMASK 0x1
62
63#define QIB_6120_PageAlign_OFFS 0x10
64
65#define QIB_6120_PortCnt_OFFS 0x18
66
67#define QIB_6120_SendRegBase_OFFS 0x30
68
69#define QIB_6120_UserRegBase_OFFS 0x38
70
71#define QIB_6120_CntrRegBase_OFFS 0x40
72
73#define QIB_6120_Scratch_OFFS 0x48
74#define QIB_6120_Scratch_TopHalf_LSB 0x20
75#define QIB_6120_Scratch_TopHalf_RMASK 0xFFFFFFFF
76#define QIB_6120_Scratch_BottomHalf_LSB 0x0
77#define QIB_6120_Scratch_BottomHalf_RMASK 0xFFFFFFFF
78
79#define QIB_6120_IntBlocked_OFFS 0x60
80#define QIB_6120_IntBlocked_ErrorIntBlocked_LSB 0x1F
81#define QIB_6120_IntBlocked_ErrorIntBlocked_RMASK 0x1
82#define QIB_6120_IntBlocked_PioSetIntBlocked_LSB 0x1E
83#define QIB_6120_IntBlocked_PioSetIntBlocked_RMASK 0x1
84#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_LSB 0x1D
85#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_RMASK 0x1
86#define QIB_6120_IntBlocked_assertGPIOIntBlocked_LSB 0x1C
87#define QIB_6120_IntBlocked_assertGPIOIntBlocked_RMASK 0x1
88#define QIB_6120_IntBlocked_Reserved_LSB 0xF
89#define QIB_6120_IntBlocked_Reserved_RMASK 0x1FFF
90#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_LSB 0x10
91#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_RMASK 0x1
92#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_LSB 0xF
93#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_RMASK 0x1
94#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_LSB 0xE
95#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_RMASK 0x1
96#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_LSB 0xD
97#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_RMASK 0x1
98#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_LSB 0xC
99#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_RMASK 0x1
100#define QIB_6120_IntBlocked_Reserved1_LSB 0x5
101#define QIB_6120_IntBlocked_Reserved1_RMASK 0x7F
102#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_LSB 0x4
103#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_RMASK 0x1
104#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_LSB 0x3
105#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_RMASK 0x1
106#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_LSB 0x2
107#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_RMASK 0x1
108#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_LSB 0x1
109#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_RMASK 0x1
110#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_LSB 0x0
111#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_RMASK 0x1
112
113#define QIB_6120_IntMask_OFFS 0x68
114#define QIB_6120_IntMask_ErrorIntMask_LSB 0x1F
115#define QIB_6120_IntMask_ErrorIntMask_RMASK 0x1
116#define QIB_6120_IntMask_PioSetIntMask_LSB 0x1E
117#define QIB_6120_IntMask_PioSetIntMask_RMASK 0x1
118#define QIB_6120_IntMask_PioBufAvailIntMask_LSB 0x1D
119#define QIB_6120_IntMask_PioBufAvailIntMask_RMASK 0x1
120#define QIB_6120_IntMask_assertGPIOIntMask_LSB 0x1C
121#define QIB_6120_IntMask_assertGPIOIntMask_RMASK 0x1
122#define QIB_6120_IntMask_Reserved_LSB 0x11
123#define QIB_6120_IntMask_Reserved_RMASK 0x7FF
124#define QIB_6120_IntMask_RcvAvail4IntMask_LSB 0x10
125#define QIB_6120_IntMask_RcvAvail4IntMask_RMASK 0x1
126#define QIB_6120_IntMask_RcvAvail3IntMask_LSB 0xF
127#define QIB_6120_IntMask_RcvAvail3IntMask_RMASK 0x1
128#define QIB_6120_IntMask_RcvAvail2IntMask_LSB 0xE
129#define QIB_6120_IntMask_RcvAvail2IntMask_RMASK 0x1
130#define QIB_6120_IntMask_RcvAvail1IntMask_LSB 0xD
131#define QIB_6120_IntMask_RcvAvail1IntMask_RMASK 0x1
132#define QIB_6120_IntMask_RcvAvail0IntMask_LSB 0xC
133#define QIB_6120_IntMask_RcvAvail0IntMask_RMASK 0x1
134#define QIB_6120_IntMask_Reserved1_LSB 0x5
135#define QIB_6120_IntMask_Reserved1_RMASK 0x7F
136#define QIB_6120_IntMask_RcvUrg4IntMask_LSB 0x4
137#define QIB_6120_IntMask_RcvUrg4IntMask_RMASK 0x1
138#define QIB_6120_IntMask_RcvUrg3IntMask_LSB 0x3
139#define QIB_6120_IntMask_RcvUrg3IntMask_RMASK 0x1
140#define QIB_6120_IntMask_RcvUrg2IntMask_LSB 0x2
141#define QIB_6120_IntMask_RcvUrg2IntMask_RMASK 0x1
142#define QIB_6120_IntMask_RcvUrg1IntMask_LSB 0x1
143#define QIB_6120_IntMask_RcvUrg1IntMask_RMASK 0x1
144#define QIB_6120_IntMask_RcvUrg0IntMask_LSB 0x0
145#define QIB_6120_IntMask_RcvUrg0IntMask_RMASK 0x1
146
147#define QIB_6120_IntStatus_OFFS 0x70
148#define QIB_6120_IntStatus_Error_LSB 0x1F
149#define QIB_6120_IntStatus_Error_RMASK 0x1
150#define QIB_6120_IntStatus_PioSent_LSB 0x1E
151#define QIB_6120_IntStatus_PioSent_RMASK 0x1
152#define QIB_6120_IntStatus_PioBufAvail_LSB 0x1D
153#define QIB_6120_IntStatus_PioBufAvail_RMASK 0x1
154#define QIB_6120_IntStatus_assertGPIO_LSB 0x1C
155#define QIB_6120_IntStatus_assertGPIO_RMASK 0x1
156#define QIB_6120_IntStatus_Reserved_LSB 0xF
157#define QIB_6120_IntStatus_Reserved_RMASK 0x1FFF
158#define QIB_6120_IntStatus_RcvAvail4_LSB 0x10
159#define QIB_6120_IntStatus_RcvAvail4_RMASK 0x1
160#define QIB_6120_IntStatus_RcvAvail3_LSB 0xF
161#define QIB_6120_IntStatus_RcvAvail3_RMASK 0x1
162#define QIB_6120_IntStatus_RcvAvail2_LSB 0xE
163#define QIB_6120_IntStatus_RcvAvail2_RMASK 0x1
164#define QIB_6120_IntStatus_RcvAvail1_LSB 0xD
165#define QIB_6120_IntStatus_RcvAvail1_RMASK 0x1
166#define QIB_6120_IntStatus_RcvAvail0_LSB 0xC
167#define QIB_6120_IntStatus_RcvAvail0_RMASK 0x1
168#define QIB_6120_IntStatus_Reserved1_LSB 0x5
169#define QIB_6120_IntStatus_Reserved1_RMASK 0x7F
170#define QIB_6120_IntStatus_RcvUrg4_LSB 0x4
171#define QIB_6120_IntStatus_RcvUrg4_RMASK 0x1
172#define QIB_6120_IntStatus_RcvUrg3_LSB 0x3
173#define QIB_6120_IntStatus_RcvUrg3_RMASK 0x1
174#define QIB_6120_IntStatus_RcvUrg2_LSB 0x2
175#define QIB_6120_IntStatus_RcvUrg2_RMASK 0x1
176#define QIB_6120_IntStatus_RcvUrg1_LSB 0x1
177#define QIB_6120_IntStatus_RcvUrg1_RMASK 0x1
178#define QIB_6120_IntStatus_RcvUrg0_LSB 0x0
179#define QIB_6120_IntStatus_RcvUrg0_RMASK 0x1
180
181#define QIB_6120_IntClear_OFFS 0x78
182#define QIB_6120_IntClear_ErrorIntClear_LSB 0x1F
183#define QIB_6120_IntClear_ErrorIntClear_RMASK 0x1
184#define QIB_6120_IntClear_PioSetIntClear_LSB 0x1E
185#define QIB_6120_IntClear_PioSetIntClear_RMASK 0x1
186#define QIB_6120_IntClear_PioBufAvailIntClear_LSB 0x1D
187#define QIB_6120_IntClear_PioBufAvailIntClear_RMASK 0x1
188#define QIB_6120_IntClear_assertGPIOIntClear_LSB 0x1C
189#define QIB_6120_IntClear_assertGPIOIntClear_RMASK 0x1
190#define QIB_6120_IntClear_Reserved_LSB 0xF
191#define QIB_6120_IntClear_Reserved_RMASK 0x1FFF
192#define QIB_6120_IntClear_RcvAvail4IntClear_LSB 0x10
193#define QIB_6120_IntClear_RcvAvail4IntClear_RMASK 0x1
194#define QIB_6120_IntClear_RcvAvail3IntClear_LSB 0xF
195#define QIB_6120_IntClear_RcvAvail3IntClear_RMASK 0x1
196#define QIB_6120_IntClear_RcvAvail2IntClear_LSB 0xE
197#define QIB_6120_IntClear_RcvAvail2IntClear_RMASK 0x1
198#define QIB_6120_IntClear_RcvAvail1IntClear_LSB 0xD
199#define QIB_6120_IntClear_RcvAvail1IntClear_RMASK 0x1
200#define QIB_6120_IntClear_RcvAvail0IntClear_LSB 0xC
201#define QIB_6120_IntClear_RcvAvail0IntClear_RMASK 0x1
202#define QIB_6120_IntClear_Reserved1_LSB 0x5
203#define QIB_6120_IntClear_Reserved1_RMASK 0x7F
204#define QIB_6120_IntClear_RcvUrg4IntClear_LSB 0x4
205#define QIB_6120_IntClear_RcvUrg4IntClear_RMASK 0x1
206#define QIB_6120_IntClear_RcvUrg3IntClear_LSB 0x3
207#define QIB_6120_IntClear_RcvUrg3IntClear_RMASK 0x1
208#define QIB_6120_IntClear_RcvUrg2IntClear_LSB 0x2
209#define QIB_6120_IntClear_RcvUrg2IntClear_RMASK 0x1
210#define QIB_6120_IntClear_RcvUrg1IntClear_LSB 0x1
211#define QIB_6120_IntClear_RcvUrg1IntClear_RMASK 0x1
212#define QIB_6120_IntClear_RcvUrg0IntClear_LSB 0x0
213#define QIB_6120_IntClear_RcvUrg0IntClear_RMASK 0x1
214
215#define QIB_6120_ErrMask_OFFS 0x80
216#define QIB_6120_ErrMask_Reserved_LSB 0x34
217#define QIB_6120_ErrMask_Reserved_RMASK 0xFFF
218#define QIB_6120_ErrMask_HardwareErrMask_LSB 0x33
219#define QIB_6120_ErrMask_HardwareErrMask_RMASK 0x1
220#define QIB_6120_ErrMask_ResetNegatedMask_LSB 0x32
221#define QIB_6120_ErrMask_ResetNegatedMask_RMASK 0x1
222#define QIB_6120_ErrMask_InvalidAddrErrMask_LSB 0x31
223#define QIB_6120_ErrMask_InvalidAddrErrMask_RMASK 0x1
224#define QIB_6120_ErrMask_IBStatusChangedMask_LSB 0x30
225#define QIB_6120_ErrMask_IBStatusChangedMask_RMASK 0x1
226#define QIB_6120_ErrMask_Reserved1_LSB 0x26
227#define QIB_6120_ErrMask_Reserved1_RMASK 0x3FF
228#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_LSB 0x25
229#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1
230#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24
231#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1
232#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_LSB 0x23
233#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1
234#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_LSB 0x22
235#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1
236#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21
237#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1
238#define QIB_6120_ErrMask_SendPktLenErrMask_LSB 0x20
239#define QIB_6120_ErrMask_SendPktLenErrMask_RMASK 0x1
240#define QIB_6120_ErrMask_SendUnderRunErrMask_LSB 0x1F
241#define QIB_6120_ErrMask_SendUnderRunErrMask_RMASK 0x1
242#define QIB_6120_ErrMask_SendMaxPktLenErrMask_LSB 0x1E
243#define QIB_6120_ErrMask_SendMaxPktLenErrMask_RMASK 0x1
244#define QIB_6120_ErrMask_SendMinPktLenErrMask_LSB 0x1D
245#define QIB_6120_ErrMask_SendMinPktLenErrMask_RMASK 0x1
246#define QIB_6120_ErrMask_Reserved2_LSB 0x12
247#define QIB_6120_ErrMask_Reserved2_RMASK 0x7FF
248#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_LSB 0x11
249#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1
250#define QIB_6120_ErrMask_RcvHdrErrMask_LSB 0x10
251#define QIB_6120_ErrMask_RcvHdrErrMask_RMASK 0x1
252#define QIB_6120_ErrMask_RcvHdrLenErrMask_LSB 0xF
253#define QIB_6120_ErrMask_RcvHdrLenErrMask_RMASK 0x1
254#define QIB_6120_ErrMask_RcvBadTidErrMask_LSB 0xE
255#define QIB_6120_ErrMask_RcvBadTidErrMask_RMASK 0x1
256#define QIB_6120_ErrMask_RcvHdrFullErrMask_LSB 0xD
257#define QIB_6120_ErrMask_RcvHdrFullErrMask_RMASK 0x1
258#define QIB_6120_ErrMask_RcvEgrFullErrMask_LSB 0xC
259#define QIB_6120_ErrMask_RcvEgrFullErrMask_RMASK 0x1
260#define QIB_6120_ErrMask_RcvBadVersionErrMask_LSB 0xB
261#define QIB_6120_ErrMask_RcvBadVersionErrMask_RMASK 0x1
262#define QIB_6120_ErrMask_RcvIBFlowErrMask_LSB 0xA
263#define QIB_6120_ErrMask_RcvIBFlowErrMask_RMASK 0x1
264#define QIB_6120_ErrMask_RcvEBPErrMask_LSB 0x9
265#define QIB_6120_ErrMask_RcvEBPErrMask_RMASK 0x1
266#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8
267#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1
268#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7
269#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1
270#define QIB_6120_ErrMask_RcvShortPktLenErrMask_LSB 0x6
271#define QIB_6120_ErrMask_RcvShortPktLenErrMask_RMASK 0x1
272#define QIB_6120_ErrMask_RcvLongPktLenErrMask_LSB 0x5
273#define QIB_6120_ErrMask_RcvLongPktLenErrMask_RMASK 0x1
274#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_LSB 0x4
275#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1
276#define QIB_6120_ErrMask_RcvMinPktLenErrMask_LSB 0x3
277#define QIB_6120_ErrMask_RcvMinPktLenErrMask_RMASK 0x1
278#define QIB_6120_ErrMask_RcvICRCErrMask_LSB 0x2
279#define QIB_6120_ErrMask_RcvICRCErrMask_RMASK 0x1
280#define QIB_6120_ErrMask_RcvVCRCErrMask_LSB 0x1
281#define QIB_6120_ErrMask_RcvVCRCErrMask_RMASK 0x1
282#define QIB_6120_ErrMask_RcvFormatErrMask_LSB 0x0
283#define QIB_6120_ErrMask_RcvFormatErrMask_RMASK 0x1
284
285#define QIB_6120_ErrStatus_OFFS 0x88
286#define QIB_6120_ErrStatus_Reserved_LSB 0x34
287#define QIB_6120_ErrStatus_Reserved_RMASK 0xFFF
288#define QIB_6120_ErrStatus_HardwareErr_LSB 0x33
289#define QIB_6120_ErrStatus_HardwareErr_RMASK 0x1
290#define QIB_6120_ErrStatus_ResetNegated_LSB 0x32
291#define QIB_6120_ErrStatus_ResetNegated_RMASK 0x1
292#define QIB_6120_ErrStatus_InvalidAddrErr_LSB 0x31
293#define QIB_6120_ErrStatus_InvalidAddrErr_RMASK 0x1
294#define QIB_6120_ErrStatus_IBStatusChanged_LSB 0x30
295#define QIB_6120_ErrStatus_IBStatusChanged_RMASK 0x1
296#define QIB_6120_ErrStatus_Reserved1_LSB 0x26
297#define QIB_6120_ErrStatus_Reserved1_RMASK 0x3FF
298#define QIB_6120_ErrStatus_SendUnsupportedVLErr_LSB 0x25
299#define QIB_6120_ErrStatus_SendUnsupportedVLErr_RMASK 0x1
300#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24
301#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1
302#define QIB_6120_ErrStatus_SendPioArmLaunchErr_LSB 0x23
303#define QIB_6120_ErrStatus_SendPioArmLaunchErr_RMASK 0x1
304#define QIB_6120_ErrStatus_SendDroppedDataPktErr_LSB 0x22
305#define QIB_6120_ErrStatus_SendDroppedDataPktErr_RMASK 0x1
306#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_LSB 0x21
307#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1
308#define QIB_6120_ErrStatus_SendPktLenErr_LSB 0x20
309#define QIB_6120_ErrStatus_SendPktLenErr_RMASK 0x1
310#define QIB_6120_ErrStatus_SendUnderRunErr_LSB 0x1F
311#define QIB_6120_ErrStatus_SendUnderRunErr_RMASK 0x1
312#define QIB_6120_ErrStatus_SendMaxPktLenErr_LSB 0x1E
313#define QIB_6120_ErrStatus_SendMaxPktLenErr_RMASK 0x1
314#define QIB_6120_ErrStatus_SendMinPktLenErr_LSB 0x1D
315#define QIB_6120_ErrStatus_SendMinPktLenErr_RMASK 0x1
316#define QIB_6120_ErrStatus_Reserved2_LSB 0x12
317#define QIB_6120_ErrStatus_Reserved2_RMASK 0x7FF
318#define QIB_6120_ErrStatus_RcvIBLostLinkErr_LSB 0x11
319#define QIB_6120_ErrStatus_RcvIBLostLinkErr_RMASK 0x1
320#define QIB_6120_ErrStatus_RcvHdrErr_LSB 0x10
321#define QIB_6120_ErrStatus_RcvHdrErr_RMASK 0x1
322#define QIB_6120_ErrStatus_RcvHdrLenErr_LSB 0xF
323#define QIB_6120_ErrStatus_RcvHdrLenErr_RMASK 0x1
324#define QIB_6120_ErrStatus_RcvBadTidErr_LSB 0xE
325#define QIB_6120_ErrStatus_RcvBadTidErr_RMASK 0x1
326#define QIB_6120_ErrStatus_RcvHdrFullErr_LSB 0xD
327#define QIB_6120_ErrStatus_RcvHdrFullErr_RMASK 0x1
328#define QIB_6120_ErrStatus_RcvEgrFullErr_LSB 0xC
329#define QIB_6120_ErrStatus_RcvEgrFullErr_RMASK 0x1
330#define QIB_6120_ErrStatus_RcvBadVersionErr_LSB 0xB
331#define QIB_6120_ErrStatus_RcvBadVersionErr_RMASK 0x1
332#define QIB_6120_ErrStatus_RcvIBFlowErr_LSB 0xA
333#define QIB_6120_ErrStatus_RcvIBFlowErr_RMASK 0x1
334#define QIB_6120_ErrStatus_RcvEBPErr_LSB 0x9
335#define QIB_6120_ErrStatus_RcvEBPErr_RMASK 0x1
336#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_LSB 0x8
337#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1
338#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_LSB 0x7
339#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1
340#define QIB_6120_ErrStatus_RcvShortPktLenErr_LSB 0x6
341#define QIB_6120_ErrStatus_RcvShortPktLenErr_RMASK 0x1
342#define QIB_6120_ErrStatus_RcvLongPktLenErr_LSB 0x5
343#define QIB_6120_ErrStatus_RcvLongPktLenErr_RMASK 0x1
344#define QIB_6120_ErrStatus_RcvMaxPktLenErr_LSB 0x4
345#define QIB_6120_ErrStatus_RcvMaxPktLenErr_RMASK 0x1
346#define QIB_6120_ErrStatus_RcvMinPktLenErr_LSB 0x3
347#define QIB_6120_ErrStatus_RcvMinPktLenErr_RMASK 0x1
348#define QIB_6120_ErrStatus_RcvICRCErr_LSB 0x2
349#define QIB_6120_ErrStatus_RcvICRCErr_RMASK 0x1
350#define QIB_6120_ErrStatus_RcvVCRCErr_LSB 0x1
351#define QIB_6120_ErrStatus_RcvVCRCErr_RMASK 0x1
352#define QIB_6120_ErrStatus_RcvFormatErr_LSB 0x0
353#define QIB_6120_ErrStatus_RcvFormatErr_RMASK 0x1
354
355#define QIB_6120_ErrClear_OFFS 0x90
356#define QIB_6120_ErrClear_Reserved_LSB 0x34
357#define QIB_6120_ErrClear_Reserved_RMASK 0xFFF
358#define QIB_6120_ErrClear_HardwareErrClear_LSB 0x33
359#define QIB_6120_ErrClear_HardwareErrClear_RMASK 0x1
360#define QIB_6120_ErrClear_ResetNegatedClear_LSB 0x32
361#define QIB_6120_ErrClear_ResetNegatedClear_RMASK 0x1
362#define QIB_6120_ErrClear_InvalidAddrErrClear_LSB 0x31
363#define QIB_6120_ErrClear_InvalidAddrErrClear_RMASK 0x1
364#define QIB_6120_ErrClear_IBStatusChangedClear_LSB 0x30
365#define QIB_6120_ErrClear_IBStatusChangedClear_RMASK 0x1
366#define QIB_6120_ErrClear_Reserved1_LSB 0x26
367#define QIB_6120_ErrClear_Reserved1_RMASK 0x3FF
368#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_LSB 0x25
369#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1
370#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24
371#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1
372#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_LSB 0x23
373#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1
374#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_LSB 0x22
375#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1
376#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21
377#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1
378#define QIB_6120_ErrClear_SendPktLenErrClear_LSB 0x20
379#define QIB_6120_ErrClear_SendPktLenErrClear_RMASK 0x1
380#define QIB_6120_ErrClear_SendUnderRunErrClear_LSB 0x1F
381#define QIB_6120_ErrClear_SendUnderRunErrClear_RMASK 0x1
382#define QIB_6120_ErrClear_SendMaxPktLenErrClear_LSB 0x1E
383#define QIB_6120_ErrClear_SendMaxPktLenErrClear_RMASK 0x1
384#define QIB_6120_ErrClear_SendMinPktLenErrClear_LSB 0x1D
385#define QIB_6120_ErrClear_SendMinPktLenErrClear_RMASK 0x1
386#define QIB_6120_ErrClear_Reserved2_LSB 0x12
387#define QIB_6120_ErrClear_Reserved2_RMASK 0x7FF
388#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_LSB 0x11
389#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1
390#define QIB_6120_ErrClear_RcvHdrErrClear_LSB 0x10
391#define QIB_6120_ErrClear_RcvHdrErrClear_RMASK 0x1
392#define QIB_6120_ErrClear_RcvHdrLenErrClear_LSB 0xF
393#define QIB_6120_ErrClear_RcvHdrLenErrClear_RMASK 0x1
394#define QIB_6120_ErrClear_RcvBadTidErrClear_LSB 0xE
395#define QIB_6120_ErrClear_RcvBadTidErrClear_RMASK 0x1
396#define QIB_6120_ErrClear_RcvHdrFullErrClear_LSB 0xD
397#define QIB_6120_ErrClear_RcvHdrFullErrClear_RMASK 0x1
398#define QIB_6120_ErrClear_RcvEgrFullErrClear_LSB 0xC
399#define QIB_6120_ErrClear_RcvEgrFullErrClear_RMASK 0x1
400#define QIB_6120_ErrClear_RcvBadVersionErrClear_LSB 0xB
401#define QIB_6120_ErrClear_RcvBadVersionErrClear_RMASK 0x1
402#define QIB_6120_ErrClear_RcvIBFlowErrClear_LSB 0xA
403#define QIB_6120_ErrClear_RcvIBFlowErrClear_RMASK 0x1
404#define QIB_6120_ErrClear_RcvEBPErrClear_LSB 0x9
405#define QIB_6120_ErrClear_RcvEBPErrClear_RMASK 0x1
406#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8
407#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1
408#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7
409#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1
410#define QIB_6120_ErrClear_RcvShortPktLenErrClear_LSB 0x6
411#define QIB_6120_ErrClear_RcvShortPktLenErrClear_RMASK 0x1
412#define QIB_6120_ErrClear_RcvLongPktLenErrClear_LSB 0x5
413#define QIB_6120_ErrClear_RcvLongPktLenErrClear_RMASK 0x1
414#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_LSB 0x4
415#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1
416#define QIB_6120_ErrClear_RcvMinPktLenErrClear_LSB 0x3
417#define QIB_6120_ErrClear_RcvMinPktLenErrClear_RMASK 0x1
418#define QIB_6120_ErrClear_RcvICRCErrClear_LSB 0x2
419#define QIB_6120_ErrClear_RcvICRCErrClear_RMASK 0x1
420#define QIB_6120_ErrClear_RcvVCRCErrClear_LSB 0x1
421#define QIB_6120_ErrClear_RcvVCRCErrClear_RMASK 0x1
422#define QIB_6120_ErrClear_RcvFormatErrClear_LSB 0x0
423#define QIB_6120_ErrClear_RcvFormatErrClear_RMASK 0x1
424
425#define QIB_6120_HwErrMask_OFFS 0x98
426#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F
427#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1
428#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E
429#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1
430#define QIB_6120_HwErrMask_Reserved_LSB 0x3D
431#define QIB_6120_HwErrMask_Reserved_RMASK 0x1
432#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C
433#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1
434#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x3B
435#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1
436#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x3A
437#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1
438#define QIB_6120_HwErrMask_Reserved1_LSB 0x39
439#define QIB_6120_HwErrMask_Reserved1_RMASK 0x1
440#define QIB_6120_HwErrMask_IBPLLrfSlipMask_LSB 0x38
441#define QIB_6120_HwErrMask_IBPLLrfSlipMask_RMASK 0x1
442#define QIB_6120_HwErrMask_IBPLLfbSlipMask_LSB 0x37
443#define QIB_6120_HwErrMask_IBPLLfbSlipMask_RMASK 0x1
444#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
445#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
446#define QIB_6120_HwErrMask_Reserved2_LSB 0x33
447#define QIB_6120_HwErrMask_Reserved2_RMASK 0x7
448#define QIB_6120_HwErrMask_RXEMemParityErrMask_LSB 0x2C
449#define QIB_6120_HwErrMask_RXEMemParityErrMask_RMASK 0x7F
450#define QIB_6120_HwErrMask_TXEMemParityErrMask_LSB 0x28
451#define QIB_6120_HwErrMask_TXEMemParityErrMask_RMASK 0xF
452#define QIB_6120_HwErrMask_Reserved3_LSB 0x22
453#define QIB_6120_HwErrMask_Reserved3_RMASK 0x3F
454#define QIB_6120_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
455#define QIB_6120_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
456#define QIB_6120_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
457#define QIB_6120_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
458#define QIB_6120_HwErrMask_PoisonedTLPMask_LSB 0x1D
459#define QIB_6120_HwErrMask_PoisonedTLPMask_RMASK 0x1
460#define QIB_6120_HwErrMask_Reserved4_LSB 0x6
461#define QIB_6120_HwErrMask_Reserved4_RMASK 0x7FFFFF
462#define QIB_6120_HwErrMask_PCIeMemParityErrMask_LSB 0x0
463#define QIB_6120_HwErrMask_PCIeMemParityErrMask_RMASK 0x3F
464
465#define QIB_6120_HwErrStatus_OFFS 0xA0
466#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F
467#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1
468#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E
469#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1
470#define QIB_6120_HwErrStatus_Reserved_LSB 0x3D
471#define QIB_6120_HwErrStatus_Reserved_RMASK 0x1
472#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C
473#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1
474#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x3B
475#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1
476#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x3A
477#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1
478#define QIB_6120_HwErrStatus_Reserved1_LSB 0x39
479#define QIB_6120_HwErrStatus_Reserved1_RMASK 0x1
480#define QIB_6120_HwErrStatus_IBPLLrfSlip_LSB 0x38
481#define QIB_6120_HwErrStatus_IBPLLrfSlip_RMASK 0x1
482#define QIB_6120_HwErrStatus_IBPLLfbSlip_LSB 0x37
483#define QIB_6120_HwErrStatus_IBPLLfbSlip_RMASK 0x1
484#define QIB_6120_HwErrStatus_PowerOnBISTFailed_LSB 0x36
485#define QIB_6120_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
486#define QIB_6120_HwErrStatus_Reserved2_LSB 0x33
487#define QIB_6120_HwErrStatus_Reserved2_RMASK 0x7
488#define QIB_6120_HwErrStatus_RXEMemParity_LSB 0x2C
489#define QIB_6120_HwErrStatus_RXEMemParity_RMASK 0x7F
490#define QIB_6120_HwErrStatus_TXEMemParity_LSB 0x28
491#define QIB_6120_HwErrStatus_TXEMemParity_RMASK 0xF
492#define QIB_6120_HwErrStatus_Reserved3_LSB 0x22
493#define QIB_6120_HwErrStatus_Reserved3_RMASK 0x3F
494#define QIB_6120_HwErrStatus_PCIeBusParity_LSB 0x1F
495#define QIB_6120_HwErrStatus_PCIeBusParity_RMASK 0x7
496#define QIB_6120_HwErrStatus_PcieCplTimeout_LSB 0x1E
497#define QIB_6120_HwErrStatus_PcieCplTimeout_RMASK 0x1
498#define QIB_6120_HwErrStatus_PoisenedTLP_LSB 0x1D
499#define QIB_6120_HwErrStatus_PoisenedTLP_RMASK 0x1
500#define QIB_6120_HwErrStatus_Reserved4_LSB 0x6
501#define QIB_6120_HwErrStatus_Reserved4_RMASK 0x7FFFFF
502#define QIB_6120_HwErrStatus_PCIeMemParity_LSB 0x0
503#define QIB_6120_HwErrStatus_PCIeMemParity_RMASK 0x3F
504
505#define QIB_6120_HwErrClear_OFFS 0xA8
506#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F
507#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1
508#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E
509#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1
510#define QIB_6120_HwErrClear_Reserved_LSB 0x3D
511#define QIB_6120_HwErrClear_Reserved_RMASK 0x1
512#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C
513#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1
514#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x3B
515#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1
516#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x3A
517#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1
518#define QIB_6120_HwErrClear_Reserved1_LSB 0x39
519#define QIB_6120_HwErrClear_Reserved1_RMASK 0x1
520#define QIB_6120_HwErrClear_IBPLLrfSlipClear_LSB 0x38
521#define QIB_6120_HwErrClear_IBPLLrfSlipClear_RMASK 0x1
522#define QIB_6120_HwErrClear_IBPLLfbSlipClear_LSB 0x37
523#define QIB_6120_HwErrClear_IBPLLfbSlipClear_RMASK 0x1
524#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
525#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
526#define QIB_6120_HwErrClear_Reserved2_LSB 0x33
527#define QIB_6120_HwErrClear_Reserved2_RMASK 0x7
528#define QIB_6120_HwErrClear_RXEMemParityClear_LSB 0x2C
529#define QIB_6120_HwErrClear_RXEMemParityClear_RMASK 0x7F
530#define QIB_6120_HwErrClear_TXEMemParityClear_LSB 0x28
531#define QIB_6120_HwErrClear_TXEMemParityClear_RMASK 0xF
532#define QIB_6120_HwErrClear_Reserved3_LSB 0x22
533#define QIB_6120_HwErrClear_Reserved3_RMASK 0x3F
534#define QIB_6120_HwErrClear_PCIeBusParityClr_LSB 0x1F
535#define QIB_6120_HwErrClear_PCIeBusParityClr_RMASK 0x7
536#define QIB_6120_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
537#define QIB_6120_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
538#define QIB_6120_HwErrClear_PoisonedTLPClear_LSB 0x1D
539#define QIB_6120_HwErrClear_PoisonedTLPClear_RMASK 0x1
540#define QIB_6120_HwErrClear_Reserved4_LSB 0x6
541#define QIB_6120_HwErrClear_Reserved4_RMASK 0x7FFFFF
542#define QIB_6120_HwErrClear_PCIeMemParityClr_LSB 0x0
543#define QIB_6120_HwErrClear_PCIeMemParityClr_RMASK 0x3F
544
545#define QIB_6120_HwDiagCtrl_OFFS 0xB0
546#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F
547#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1
548#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E
549#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1
550#define QIB_6120_HwDiagCtrl_CounterWrEnable_LSB 0x3D
551#define QIB_6120_HwDiagCtrl_CounterWrEnable_RMASK 0x1
552#define QIB_6120_HwDiagCtrl_CounterDisable_LSB 0x3C
553#define QIB_6120_HwDiagCtrl_CounterDisable_RMASK 0x1
554#define QIB_6120_HwDiagCtrl_Reserved_LSB 0x33
555#define QIB_6120_HwDiagCtrl_Reserved_RMASK 0x1FF
556#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C
557#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F
558#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28
559#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF
560#define QIB_6120_HwDiagCtrl_Reserved1_LSB 0x23
561#define QIB_6120_HwDiagCtrl_Reserved1_RMASK 0x1F
562#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
563#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
564#define QIB_6120_HwDiagCtrl_Reserved2_LSB 0x6
565#define QIB_6120_HwDiagCtrl_Reserved2_RMASK 0x1FFFFFF
566#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_LSB 0x0
567#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_RMASK 0x3F
568
569#define QIB_6120_IBCStatus_OFFS 0xC0
570#define QIB_6120_IBCStatus_TxCreditOk_LSB 0x1F
571#define QIB_6120_IBCStatus_TxCreditOk_RMASK 0x1
572#define QIB_6120_IBCStatus_TxReady_LSB 0x1E
573#define QIB_6120_IBCStatus_TxReady_RMASK 0x1
574#define QIB_6120_IBCStatus_Reserved_LSB 0x7
575#define QIB_6120_IBCStatus_Reserved_RMASK 0x7FFFFF
576#define QIB_6120_IBCStatus_LinkState_LSB 0x4
577#define QIB_6120_IBCStatus_LinkState_RMASK 0x7
578#define QIB_6120_IBCStatus_LinkTrainingState_LSB 0x0
579#define QIB_6120_IBCStatus_LinkTrainingState_RMASK 0xF
580
581#define QIB_6120_IBCCtrl_OFFS 0xC8
582#define QIB_6120_IBCCtrl_Loopback_LSB 0x3F
583#define QIB_6120_IBCCtrl_Loopback_RMASK 0x1
584#define QIB_6120_IBCCtrl_LinkDownDefaultState_LSB 0x3E
585#define QIB_6120_IBCCtrl_LinkDownDefaultState_RMASK 0x1
586#define QIB_6120_IBCCtrl_Reserved_LSB 0x2B
587#define QIB_6120_IBCCtrl_Reserved_RMASK 0x7FFFF
588#define QIB_6120_IBCCtrl_CreditScale_LSB 0x28
589#define QIB_6120_IBCCtrl_CreditScale_RMASK 0x7
590#define QIB_6120_IBCCtrl_OverrunThreshold_LSB 0x24
591#define QIB_6120_IBCCtrl_OverrunThreshold_RMASK 0xF
592#define QIB_6120_IBCCtrl_PhyerrThreshold_LSB 0x20
593#define QIB_6120_IBCCtrl_PhyerrThreshold_RMASK 0xF
594#define QIB_6120_IBCCtrl_Reserved1_LSB 0x1F
595#define QIB_6120_IBCCtrl_Reserved1_RMASK 0x1
596#define QIB_6120_IBCCtrl_MaxPktLen_LSB 0x14
597#define QIB_6120_IBCCtrl_MaxPktLen_RMASK 0x7FF
598#define QIB_6120_IBCCtrl_LinkCmd_LSB 0x12
599#define QIB_6120_IBCCtrl_LinkCmd_RMASK 0x3
600#define QIB_6120_IBCCtrl_LinkInitCmd_LSB 0x10
601#define QIB_6120_IBCCtrl_LinkInitCmd_RMASK 0x3
602#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_LSB 0x8
603#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF
604#define QIB_6120_IBCCtrl_FlowCtrlPeriod_LSB 0x0
605#define QIB_6120_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF
606
607#define QIB_6120_EXTStatus_OFFS 0xD0
608#define QIB_6120_EXTStatus_GPIOIn_LSB 0x30
609#define QIB_6120_EXTStatus_GPIOIn_RMASK 0xFFFF
610#define QIB_6120_EXTStatus_Reserved_LSB 0x20
611#define QIB_6120_EXTStatus_Reserved_RMASK 0xFFFF
612#define QIB_6120_EXTStatus_Reserved1_LSB 0x10
613#define QIB_6120_EXTStatus_Reserved1_RMASK 0xFFFF
614#define QIB_6120_EXTStatus_MemBISTFoundErr_LSB 0xF
615#define QIB_6120_EXTStatus_MemBISTFoundErr_RMASK 0x1
616#define QIB_6120_EXTStatus_MemBISTEndTest_LSB 0xE
617#define QIB_6120_EXTStatus_MemBISTEndTest_RMASK 0x1
618#define QIB_6120_EXTStatus_Reserved2_LSB 0x0
619#define QIB_6120_EXTStatus_Reserved2_RMASK 0x3FFF
620
621#define QIB_6120_EXTCtrl_OFFS 0xD8
622#define QIB_6120_EXTCtrl_GPIOOe_LSB 0x30
623#define QIB_6120_EXTCtrl_GPIOOe_RMASK 0xFFFF
624#define QIB_6120_EXTCtrl_GPIOInvert_LSB 0x20
625#define QIB_6120_EXTCtrl_GPIOInvert_RMASK 0xFFFF
626#define QIB_6120_EXTCtrl_Reserved_LSB 0x4
627#define QIB_6120_EXTCtrl_Reserved_RMASK 0xFFFFFFF
628#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_LSB 0x3
629#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1
630#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_LSB 0x2
631#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1
632#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_LSB 0x1
633#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1
634#define QIB_6120_EXTCtrl_LEDGblErrRedOff_LSB 0x0
635#define QIB_6120_EXTCtrl_LEDGblErrRedOff_RMASK 0x1
636
637#define QIB_6120_GPIOOut_OFFS 0xE0
638
639#define QIB_6120_GPIOMask_OFFS 0xE8
640
641#define QIB_6120_GPIOStatus_OFFS 0xF0
642
643#define QIB_6120_GPIOClear_OFFS 0xF8
644
645#define QIB_6120_RcvCtrl_OFFS 0x100
646#define QIB_6120_RcvCtrl_TailUpd_LSB 0x1F
647#define QIB_6120_RcvCtrl_TailUpd_RMASK 0x1
648#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_LSB 0x1E
649#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1
650#define QIB_6120_RcvCtrl_Reserved_LSB 0x15
651#define QIB_6120_RcvCtrl_Reserved_RMASK 0x1FF
652#define QIB_6120_RcvCtrl_IntrAvail_LSB 0x10
653#define QIB_6120_RcvCtrl_IntrAvail_RMASK 0x1F
654#define QIB_6120_RcvCtrl_Reserved1_LSB 0x9
655#define QIB_6120_RcvCtrl_Reserved1_RMASK 0x7F
656#define QIB_6120_RcvCtrl_Reserved2_LSB 0x5
657#define QIB_6120_RcvCtrl_Reserved2_RMASK 0xF
658#define QIB_6120_RcvCtrl_PortEnable_LSB 0x0
659#define QIB_6120_RcvCtrl_PortEnable_RMASK 0x1F
660
661#define QIB_6120_RcvBTHQP_OFFS 0x108
662#define QIB_6120_RcvBTHQP_BTHQP_Mask_LSB 0x1E
663#define QIB_6120_RcvBTHQP_BTHQP_Mask_RMASK 0x3
664#define QIB_6120_RcvBTHQP_Reserved_LSB 0x18
665#define QIB_6120_RcvBTHQP_Reserved_RMASK 0x3F
666#define QIB_6120_RcvBTHQP_RcvBTHQP_LSB 0x0
667#define QIB_6120_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF
668
669#define QIB_6120_RcvHdrSize_OFFS 0x110
670
671#define QIB_6120_RcvHdrCnt_OFFS 0x118
672
673#define QIB_6120_RcvHdrEntSize_OFFS 0x120
674
675#define QIB_6120_RcvTIDBase_OFFS 0x128
676
677#define QIB_6120_RcvTIDCnt_OFFS 0x130
678
679#define QIB_6120_RcvEgrBase_OFFS 0x138
680
681#define QIB_6120_RcvEgrCnt_OFFS 0x140
682
683#define QIB_6120_RcvBufBase_OFFS 0x148
684
685#define QIB_6120_RcvBufSize_OFFS 0x150
686
687#define QIB_6120_RxIntMemBase_OFFS 0x158
688
689#define QIB_6120_RxIntMemSize_OFFS 0x160
690
691#define QIB_6120_RcvPartitionKey_OFFS 0x168
692
693#define QIB_6120_RcvPktLEDCnt_OFFS 0x178
694#define QIB_6120_RcvPktLEDCnt_ONperiod_LSB 0x20
695#define QIB_6120_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF
696#define QIB_6120_RcvPktLEDCnt_OFFperiod_LSB 0x0
697#define QIB_6120_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF
698
699#define QIB_6120_SendCtrl_OFFS 0x1C0
700#define QIB_6120_SendCtrl_Disarm_LSB 0x1F
701#define QIB_6120_SendCtrl_Disarm_RMASK 0x1
702#define QIB_6120_SendCtrl_Reserved_LSB 0x17
703#define QIB_6120_SendCtrl_Reserved_RMASK 0xFF
704#define QIB_6120_SendCtrl_DisarmPIOBuf_LSB 0x10
705#define QIB_6120_SendCtrl_DisarmPIOBuf_RMASK 0x7F
706#define QIB_6120_SendCtrl_Reserved1_LSB 0x4
707#define QIB_6120_SendCtrl_Reserved1_RMASK 0xFFF
708#define QIB_6120_SendCtrl_PIOEnable_LSB 0x3
709#define QIB_6120_SendCtrl_PIOEnable_RMASK 0x1
710#define QIB_6120_SendCtrl_PIOBufAvailUpd_LSB 0x2
711#define QIB_6120_SendCtrl_PIOBufAvailUpd_RMASK 0x1
712#define QIB_6120_SendCtrl_PIOIntBufAvail_LSB 0x1
713#define QIB_6120_SendCtrl_PIOIntBufAvail_RMASK 0x1
714#define QIB_6120_SendCtrl_Abort_LSB 0x0
715#define QIB_6120_SendCtrl_Abort_RMASK 0x1
716
717#define QIB_6120_SendPIOBufBase_OFFS 0x1C8
718#define QIB_6120_SendPIOBufBase_Reserved_LSB 0x35
719#define QIB_6120_SendPIOBufBase_Reserved_RMASK 0x7FF
720#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_LSB 0x20
721#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
722#define QIB_6120_SendPIOBufBase_Reserved1_LSB 0x15
723#define QIB_6120_SendPIOBufBase_Reserved1_RMASK 0x7FF
724#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_LSB 0x0
725#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
726
727#define QIB_6120_SendPIOSize_OFFS 0x1D0
728#define QIB_6120_SendPIOSize_Reserved_LSB 0x2D
729#define QIB_6120_SendPIOSize_Reserved_RMASK 0xFFFFF
730#define QIB_6120_SendPIOSize_Size_LargePIO_LSB 0x20
731#define QIB_6120_SendPIOSize_Size_LargePIO_RMASK 0x1FFF
732#define QIB_6120_SendPIOSize_Reserved1_LSB 0xC
733#define QIB_6120_SendPIOSize_Reserved1_RMASK 0xFFFFF
734#define QIB_6120_SendPIOSize_Size_SmallPIO_LSB 0x0
735#define QIB_6120_SendPIOSize_Size_SmallPIO_RMASK 0xFFF
736
737#define QIB_6120_SendPIOBufCnt_OFFS 0x1D8
738#define QIB_6120_SendPIOBufCnt_Reserved_LSB 0x24
739#define QIB_6120_SendPIOBufCnt_Reserved_RMASK 0xFFFFFFF
740#define QIB_6120_SendPIOBufCnt_Num_LargePIO_LSB 0x20
741#define QIB_6120_SendPIOBufCnt_Num_LargePIO_RMASK 0xF
742#define QIB_6120_SendPIOBufCnt_Reserved1_LSB 0x9
743#define QIB_6120_SendPIOBufCnt_Reserved1_RMASK 0x7FFFFF
744#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_LSB 0x0
745#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_RMASK 0x1FF
746
747#define QIB_6120_SendPIOAvailAddr_OFFS 0x1E0
748#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_LSB 0x6
749#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_RMASK 0x3FFFFFFFF
750#define QIB_6120_SendPIOAvailAddr_Reserved_LSB 0x0
751#define QIB_6120_SendPIOAvailAddr_Reserved_RMASK 0x3F
752
753#define QIB_6120_SendBufErr0_OFFS 0x240
754#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_LSB 0x0
755#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_RMASK 0x0
756
757#define QIB_6120_RcvHdrAddr0_OFFS 0x280
758#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2
759#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF
760#define QIB_6120_RcvHdrAddr0_Reserved_LSB 0x0
761#define QIB_6120_RcvHdrAddr0_Reserved_RMASK 0x3
762
763#define QIB_6120_RcvHdrTailAddr0_OFFS 0x300
764#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2
765#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF
766#define QIB_6120_RcvHdrTailAddr0_Reserved_LSB 0x0
767#define QIB_6120_RcvHdrTailAddr0_Reserved_RMASK 0x3
768
769#define QIB_6120_SerdesCfg0_OFFS 0x3C0
770#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_LSB 0x3F
771#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_RMASK 0x1
772#define QIB_6120_SerdesCfg0_Reserved_LSB 0x38
773#define QIB_6120_SerdesCfg0_Reserved_RMASK 0x7F
774#define QIB_6120_SerdesCfg0_RxEqCtl_LSB 0x36
775#define QIB_6120_SerdesCfg0_RxEqCtl_RMASK 0x3
776#define QIB_6120_SerdesCfg0_TxTermAdj_LSB 0x34
777#define QIB_6120_SerdesCfg0_TxTermAdj_RMASK 0x3
778#define QIB_6120_SerdesCfg0_RxTermAdj_LSB 0x32
779#define QIB_6120_SerdesCfg0_RxTermAdj_RMASK 0x3
780#define QIB_6120_SerdesCfg0_TermAdj1_LSB 0x31
781#define QIB_6120_SerdesCfg0_TermAdj1_RMASK 0x1
782#define QIB_6120_SerdesCfg0_TermAdj0_LSB 0x30
783#define QIB_6120_SerdesCfg0_TermAdj0_RMASK 0x1
784#define QIB_6120_SerdesCfg0_LPBKA_LSB 0x2F
785#define QIB_6120_SerdesCfg0_LPBKA_RMASK 0x1
786#define QIB_6120_SerdesCfg0_LPBKB_LSB 0x2E
787#define QIB_6120_SerdesCfg0_LPBKB_RMASK 0x1
788#define QIB_6120_SerdesCfg0_LPBKC_LSB 0x2D
789#define QIB_6120_SerdesCfg0_LPBKC_RMASK 0x1
790#define QIB_6120_SerdesCfg0_LPBKD_LSB 0x2C
791#define QIB_6120_SerdesCfg0_LPBKD_RMASK 0x1
792#define QIB_6120_SerdesCfg0_PW_LSB 0x2B
793#define QIB_6120_SerdesCfg0_PW_RMASK 0x1
794#define QIB_6120_SerdesCfg0_RefSel_LSB 0x29
795#define QIB_6120_SerdesCfg0_RefSel_RMASK 0x3
796#define QIB_6120_SerdesCfg0_ParReset_LSB 0x28
797#define QIB_6120_SerdesCfg0_ParReset_RMASK 0x1
798#define QIB_6120_SerdesCfg0_ParLPBK_LSB 0x27
799#define QIB_6120_SerdesCfg0_ParLPBK_RMASK 0x1
800#define QIB_6120_SerdesCfg0_OffsetEn_LSB 0x26
801#define QIB_6120_SerdesCfg0_OffsetEn_RMASK 0x1
802#define QIB_6120_SerdesCfg0_Offset_LSB 0x1E
803#define QIB_6120_SerdesCfg0_Offset_RMASK 0xFF
804#define QIB_6120_SerdesCfg0_L2PwrDn_LSB 0x1D
805#define QIB_6120_SerdesCfg0_L2PwrDn_RMASK 0x1
806#define QIB_6120_SerdesCfg0_ResetPLL_LSB 0x1C
807#define QIB_6120_SerdesCfg0_ResetPLL_RMASK 0x1
808#define QIB_6120_SerdesCfg0_RxTermEnX_LSB 0x18
809#define QIB_6120_SerdesCfg0_RxTermEnX_RMASK 0xF
810#define QIB_6120_SerdesCfg0_BeaconTxEnX_LSB 0x14
811#define QIB_6120_SerdesCfg0_BeaconTxEnX_RMASK 0xF
812#define QIB_6120_SerdesCfg0_RxDetEnX_LSB 0x10
813#define QIB_6120_SerdesCfg0_RxDetEnX_RMASK 0xF
814#define QIB_6120_SerdesCfg0_TxIdeEnX_LSB 0xC
815#define QIB_6120_SerdesCfg0_TxIdeEnX_RMASK 0xF
816#define QIB_6120_SerdesCfg0_RxIdleEnX_LSB 0x8
817#define QIB_6120_SerdesCfg0_RxIdleEnX_RMASK 0xF
818#define QIB_6120_SerdesCfg0_L1PwrDnA_LSB 0x7
819#define QIB_6120_SerdesCfg0_L1PwrDnA_RMASK 0x1
820#define QIB_6120_SerdesCfg0_L1PwrDnB_LSB 0x6
821#define QIB_6120_SerdesCfg0_L1PwrDnB_RMASK 0x1
822#define QIB_6120_SerdesCfg0_L1PwrDnC_LSB 0x5
823#define QIB_6120_SerdesCfg0_L1PwrDnC_RMASK 0x1
824#define QIB_6120_SerdesCfg0_L1PwrDnD_LSB 0x4
825#define QIB_6120_SerdesCfg0_L1PwrDnD_RMASK 0x1
826#define QIB_6120_SerdesCfg0_ResetA_LSB 0x3
827#define QIB_6120_SerdesCfg0_ResetA_RMASK 0x1
828#define QIB_6120_SerdesCfg0_ResetB_LSB 0x2
829#define QIB_6120_SerdesCfg0_ResetB_RMASK 0x1
830#define QIB_6120_SerdesCfg0_ResetC_LSB 0x1
831#define QIB_6120_SerdesCfg0_ResetC_RMASK 0x1
832#define QIB_6120_SerdesCfg0_ResetD_LSB 0x0
833#define QIB_6120_SerdesCfg0_ResetD_RMASK 0x1
834
835#define QIB_6120_SerdesStat_OFFS 0x3D0
836#define QIB_6120_SerdesStat_Reserved_LSB 0xC
837#define QIB_6120_SerdesStat_Reserved_RMASK 0xFFFFFFFFFFFFF
838#define QIB_6120_SerdesStat_BeaconDetA_LSB 0xB
839#define QIB_6120_SerdesStat_BeaconDetA_RMASK 0x1
840#define QIB_6120_SerdesStat_BeaconDetB_LSB 0xA
841#define QIB_6120_SerdesStat_BeaconDetB_RMASK 0x1
842#define QIB_6120_SerdesStat_BeaconDetC_LSB 0x9
843#define QIB_6120_SerdesStat_BeaconDetC_RMASK 0x1
844#define QIB_6120_SerdesStat_BeaconDetD_LSB 0x8
845#define QIB_6120_SerdesStat_BeaconDetD_RMASK 0x1
846#define QIB_6120_SerdesStat_RxDetA_LSB 0x7
847#define QIB_6120_SerdesStat_RxDetA_RMASK 0x1
848#define QIB_6120_SerdesStat_RxDetB_LSB 0x6
849#define QIB_6120_SerdesStat_RxDetB_RMASK 0x1
850#define QIB_6120_SerdesStat_RxDetC_LSB 0x5
851#define QIB_6120_SerdesStat_RxDetC_RMASK 0x1
852#define QIB_6120_SerdesStat_RxDetD_LSB 0x4
853#define QIB_6120_SerdesStat_RxDetD_RMASK 0x1
854#define QIB_6120_SerdesStat_TxIdleDetA_LSB 0x3
855#define QIB_6120_SerdesStat_TxIdleDetA_RMASK 0x1
856#define QIB_6120_SerdesStat_TxIdleDetB_LSB 0x2
857#define QIB_6120_SerdesStat_TxIdleDetB_RMASK 0x1
858#define QIB_6120_SerdesStat_TxIdleDetC_LSB 0x1
859#define QIB_6120_SerdesStat_TxIdleDetC_RMASK 0x1
860#define QIB_6120_SerdesStat_TxIdleDetD_LSB 0x0
861#define QIB_6120_SerdesStat_TxIdleDetD_RMASK 0x1
862
863#define QIB_6120_XGXSCfg_OFFS 0x3D8
864#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_LSB 0x3F
865#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_RMASK 0x1
866#define QIB_6120_XGXSCfg_Reserved_LSB 0x17
867#define QIB_6120_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFF
868#define QIB_6120_XGXSCfg_polarity_inv_LSB 0x13
869#define QIB_6120_XGXSCfg_polarity_inv_RMASK 0xF
870#define QIB_6120_XGXSCfg_link_sync_mask_LSB 0x9
871#define QIB_6120_XGXSCfg_link_sync_mask_RMASK 0x3FF
872#define QIB_6120_XGXSCfg_port_addr_LSB 0x4
873#define QIB_6120_XGXSCfg_port_addr_RMASK 0x1F
874#define QIB_6120_XGXSCfg_mdd_30_LSB 0x3
875#define QIB_6120_XGXSCfg_mdd_30_RMASK 0x1
876#define QIB_6120_XGXSCfg_xcv_resetn_LSB 0x2
877#define QIB_6120_XGXSCfg_xcv_resetn_RMASK 0x1
878#define QIB_6120_XGXSCfg_Reserved1_LSB 0x1
879#define QIB_6120_XGXSCfg_Reserved1_RMASK 0x1
880#define QIB_6120_XGXSCfg_tx_rx_resetn_LSB 0x0
881#define QIB_6120_XGXSCfg_tx_rx_resetn_RMASK 0x1
882
883#define QIB_6120_LBIntCnt_OFFS 0x12000
884
885#define QIB_6120_LBFlowStallCnt_OFFS 0x12008
886
887#define QIB_6120_TxUnsupVLErrCnt_OFFS 0x12018
888
889#define QIB_6120_TxDataPktCnt_OFFS 0x12020
890
891#define QIB_6120_TxFlowPktCnt_OFFS 0x12028
892
893#define QIB_6120_TxDwordCnt_OFFS 0x12030
894
895#define QIB_6120_TxLenErrCnt_OFFS 0x12038
896
897#define QIB_6120_TxMaxMinLenErrCnt_OFFS 0x12040
898
899#define QIB_6120_TxUnderrunCnt_OFFS 0x12048
900
901#define QIB_6120_TxFlowStallCnt_OFFS 0x12050
902
903#define QIB_6120_TxDroppedPktCnt_OFFS 0x12058
904
905#define QIB_6120_RxDroppedPktCnt_OFFS 0x12060
906
907#define QIB_6120_RxDataPktCnt_OFFS 0x12068
908
909#define QIB_6120_RxFlowPktCnt_OFFS 0x12070
910
911#define QIB_6120_RxDwordCnt_OFFS 0x12078
912
913#define QIB_6120_RxLenErrCnt_OFFS 0x12080
914
915#define QIB_6120_RxMaxMinLenErrCnt_OFFS 0x12088
916
917#define QIB_6120_RxICRCErrCnt_OFFS 0x12090
918
919#define QIB_6120_RxVCRCErrCnt_OFFS 0x12098
920
921#define QIB_6120_RxFlowCtrlErrCnt_OFFS 0x120A0
922
923#define QIB_6120_RxBadFormatCnt_OFFS 0x120A8
924
925#define QIB_6120_RxLinkProblemCnt_OFFS 0x120B0
926
927#define QIB_6120_RxEBPCnt_OFFS 0x120B8
928
929#define QIB_6120_RxLPCRCErrCnt_OFFS 0x120C0
930
931#define QIB_6120_RxBufOvflCnt_OFFS 0x120C8
932
933#define QIB_6120_RxTIDFullErrCnt_OFFS 0x120D0
934
935#define QIB_6120_RxTIDValidErrCnt_OFFS 0x120D8
936
937#define QIB_6120_RxPKeyMismatchCnt_OFFS 0x120E0
938
939#define QIB_6120_RxP0HdrEgrOvflCnt_OFFS 0x120E8
940
941#define QIB_6120_IBStatusChangeCnt_OFFS 0x12140
942
943#define QIB_6120_IBLinkErrRecoveryCnt_OFFS 0x12148
944
945#define QIB_6120_IBLinkDownedCnt_OFFS 0x12150
946
947#define QIB_6120_IBSymbolErrCnt_OFFS 0x12158
948
949#define QIB_6120_PcieRetryBufDiagQwordCnt_OFFS 0x12170
950
951#define QIB_6120_RcvEgrArray0_OFFS 0x14000
952
953#define QIB_6120_RcvTIDArray0_OFFS 0x54000
954
955#define QIB_6120_PIOLaunchFIFO_OFFS 0x64000
956
957#define QIB_6120_SendPIOpbcCache_OFFS 0x64800
958
959#define QIB_6120_RcvBuf1_OFFS 0x72000
960
961#define QIB_6120_RcvBuf2_OFFS 0x75000
962
963#define QIB_6120_RcvFlags_OFFS 0x77000
964
965#define QIB_6120_RcvLookupBuf1_OFFS 0x79000
966
967#define QIB_6120_RcvDMABuf_OFFS 0x7B000
968
969#define QIB_6120_MiscRXEIntMem_OFFS 0x7C000
970
971#define QIB_6120_PCIERcvBuf_OFFS 0x80000
972
973#define QIB_6120_PCIERetryBuf_OFFS 0x82000
974
975#define QIB_6120_PCIERcvBufRdToWrAddr_OFFS 0x84000
976
977#define QIB_6120_PIOBuf0_MA_OFFS 0x100000
diff --git a/drivers/infiniband/hw/qib/qib_7220.h b/drivers/infiniband/hw/qib/qib_7220.h
new file mode 100644
index 000000000000..ea0bfd896f92
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_7220.h
@@ -0,0 +1,156 @@
1#ifndef _QIB_7220_H
2#define _QIB_7220_H
3/*
4 * Copyright (c) 2007, 2009, 2010 QLogic Corporation. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35/* grab register-defs auto-generated by HW */
36#include "qib_7220_regs.h"
37
38/* The number of eager receive TIDs for context zero. */
39#define IBA7220_KRCVEGRCNT 2048U
40
41#define IB_7220_LT_STATE_CFGRCVFCFG 0x09
42#define IB_7220_LT_STATE_CFGWAITRMT 0x0a
43#define IB_7220_LT_STATE_TXREVLANES 0x0d
44#define IB_7220_LT_STATE_CFGENH 0x10
45
46struct qib_chip_specific {
47 u64 __iomem *cregbase;
48 u64 *cntrs;
49 u64 *portcntrs;
50 spinlock_t sdepb_lock; /* serdes EPB bus */
51 spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
52 spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
53 u64 hwerrmask;
54 u64 errormask;
55 u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
56 u64 gpio_mask; /* shadow the gpio mask register */
57 u64 extctrl; /* shadow the gpio output enable, etc... */
58 u32 ncntrs;
59 u32 nportcntrs;
60 u32 cntrnamelen;
61 u32 portcntrnamelen;
62 u32 numctxts;
63 u32 rcvegrcnt;
64 u32 autoneg_tries;
65 u32 serdes_first_init_done;
66 u32 sdmabufcnt;
67 u32 lastbuf_for_pio;
68 u32 updthresh; /* current AvailUpdThld */
69 u32 updthresh_dflt; /* default AvailUpdThld */
70 int irq;
71 u8 presets_needed;
72 u8 relock_timer_active;
73 char emsgbuf[128];
74 char sdmamsgbuf[192];
75 char bitsmsgbuf[64];
76 struct timer_list relock_timer;
77 unsigned int relock_interval; /* in jiffies */
78};
79
80struct qib_chippport_specific {
81 struct qib_pportdata pportdata;
82 wait_queue_head_t autoneg_wait;
83 struct delayed_work autoneg_work;
84 struct timer_list chase_timer;
85 /*
86 * these 5 fields are used to establish deltas for IB symbol
87 * errors and linkrecovery errors. They can be reported on
88 * some chips during link negotiation prior to INIT, and with
89 * DDR when faking DDR negotiations with non-IBTA switches.
90 * The chip counters are adjusted at driver unload if there is
91 * a non-zero delta.
92 */
93 u64 ibdeltainprog;
94 u64 ibsymdelta;
95 u64 ibsymsnap;
96 u64 iblnkerrdelta;
97 u64 iblnkerrsnap;
98 u64 ibcctrl; /* kr_ibcctrl shadow */
99 u64 ibcddrctrl; /* kr_ibcddrctrl shadow */
100 u64 chase_end;
101 u32 last_delay_mult;
102};
103
104/*
105 * This header file provides the declarations and common definitions
106 * for (mostly) manipulation of the SerDes blocks within the IBA7220.
107 * the functions declared should only be called from within other
108 * 7220-related files such as qib_iba7220.c or qib_sd7220.c.
109 */
110int qib_sd7220_presets(struct qib_devdata *dd);
111int qib_sd7220_init(struct qib_devdata *dd);
112int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum, u8 *img,
113 int len, int offset);
114int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum, const u8 *img,
115 int len, int offset);
116void qib_sd7220_clr_ibpar(struct qib_devdata *);
117/*
118 * Below used for sdnum parameter, selecting one of the two sections
119 * used for PCIe, or the single SerDes used for IB, which is the
120 * only one currently used
121 */
122#define IB_7220_SERDES 2
123
124int qib_sd7220_ib_load(struct qib_devdata *dd);
125int qib_sd7220_ib_vfy(struct qib_devdata *dd);
126
127static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
128 const u16 regno)
129{
130 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
131 return -1;
132 return readl((u32 __iomem *)&dd->kregbase[regno]);
133}
134
135static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
136 const u16 regno)
137{
138 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
139 return -1;
140
141 return readq(&dd->kregbase[regno]);
142}
143
144static inline void qib_write_kreg(const struct qib_devdata *dd,
145 const u16 regno, u64 value)
146{
147 if (dd->kregbase)
148 writeq(value, &dd->kregbase[regno]);
149}
150
151void set_7220_relock_poll(struct qib_devdata *, int);
152void shutdown_7220_relock_poll(struct qib_devdata *);
153void toggle_7220_rclkrls(struct qib_devdata *);
154
155
156#endif /* _QIB_7220_H */
diff --git a/drivers/infiniband/hw/qib/qib_7220_regs.h b/drivers/infiniband/hw/qib/qib_7220_regs.h
new file mode 100644
index 000000000000..0da5bb750e52
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_7220_regs.h
@@ -0,0 +1,1496 @@
1/*
2 * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3 *
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34
35/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
36
37#define QIB_7220_Revision_OFFS 0x0
38#define QIB_7220_Revision_R_Simulator_LSB 0x3F
39#define QIB_7220_Revision_R_Simulator_RMASK 0x1
40#define QIB_7220_Revision_R_Emulation_LSB 0x3E
41#define QIB_7220_Revision_R_Emulation_RMASK 0x1
42#define QIB_7220_Revision_R_Emulation_Revcode_LSB 0x28
43#define QIB_7220_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF
44#define QIB_7220_Revision_BoardID_LSB 0x20
45#define QIB_7220_Revision_BoardID_RMASK 0xFF
46#define QIB_7220_Revision_R_SW_LSB 0x18
47#define QIB_7220_Revision_R_SW_RMASK 0xFF
48#define QIB_7220_Revision_R_Arch_LSB 0x10
49#define QIB_7220_Revision_R_Arch_RMASK 0xFF
50#define QIB_7220_Revision_R_ChipRevMajor_LSB 0x8
51#define QIB_7220_Revision_R_ChipRevMajor_RMASK 0xFF
52#define QIB_7220_Revision_R_ChipRevMinor_LSB 0x0
53#define QIB_7220_Revision_R_ChipRevMinor_RMASK 0xFF
54
55#define QIB_7220_Control_OFFS 0x8
56#define QIB_7220_Control_SyncResetExceptPcieIRAMRST_LSB 0x7
57#define QIB_7220_Control_SyncResetExceptPcieIRAMRST_RMASK 0x1
58#define QIB_7220_Control_PCIECplQDiagEn_LSB 0x6
59#define QIB_7220_Control_PCIECplQDiagEn_RMASK 0x1
60#define QIB_7220_Control_Reserved_LSB 0x5
61#define QIB_7220_Control_Reserved_RMASK 0x1
62#define QIB_7220_Control_TxLatency_LSB 0x4
63#define QIB_7220_Control_TxLatency_RMASK 0x1
64#define QIB_7220_Control_PCIERetryBufDiagEn_LSB 0x3
65#define QIB_7220_Control_PCIERetryBufDiagEn_RMASK 0x1
66#define QIB_7220_Control_LinkEn_LSB 0x2
67#define QIB_7220_Control_LinkEn_RMASK 0x1
68#define QIB_7220_Control_FreezeMode_LSB 0x1
69#define QIB_7220_Control_FreezeMode_RMASK 0x1
70#define QIB_7220_Control_SyncReset_LSB 0x0
71#define QIB_7220_Control_SyncReset_RMASK 0x1
72
73#define QIB_7220_PageAlign_OFFS 0x10
74
75#define QIB_7220_PortCnt_OFFS 0x18
76
77#define QIB_7220_SendRegBase_OFFS 0x30
78
79#define QIB_7220_UserRegBase_OFFS 0x38
80
81#define QIB_7220_CntrRegBase_OFFS 0x40
82
83#define QIB_7220_Scratch_OFFS 0x48
84
85#define QIB_7220_IntMask_OFFS 0x68
86#define QIB_7220_IntMask_SDmaIntMask_LSB 0x3F
87#define QIB_7220_IntMask_SDmaIntMask_RMASK 0x1
88#define QIB_7220_IntMask_SDmaDisabledMasked_LSB 0x3E
89#define QIB_7220_IntMask_SDmaDisabledMasked_RMASK 0x1
90#define QIB_7220_IntMask_Reserved_LSB 0x31
91#define QIB_7220_IntMask_Reserved_RMASK 0x1FFF
92#define QIB_7220_IntMask_RcvUrg16IntMask_LSB 0x30
93#define QIB_7220_IntMask_RcvUrg16IntMask_RMASK 0x1
94#define QIB_7220_IntMask_RcvUrg15IntMask_LSB 0x2F
95#define QIB_7220_IntMask_RcvUrg15IntMask_RMASK 0x1
96#define QIB_7220_IntMask_RcvUrg14IntMask_LSB 0x2E
97#define QIB_7220_IntMask_RcvUrg14IntMask_RMASK 0x1
98#define QIB_7220_IntMask_RcvUrg13IntMask_LSB 0x2D
99#define QIB_7220_IntMask_RcvUrg13IntMask_RMASK 0x1
100#define QIB_7220_IntMask_RcvUrg12IntMask_LSB 0x2C
101#define QIB_7220_IntMask_RcvUrg12IntMask_RMASK 0x1
102#define QIB_7220_IntMask_RcvUrg11IntMask_LSB 0x2B
103#define QIB_7220_IntMask_RcvUrg11IntMask_RMASK 0x1
104#define QIB_7220_IntMask_RcvUrg10IntMask_LSB 0x2A
105#define QIB_7220_IntMask_RcvUrg10IntMask_RMASK 0x1
106#define QIB_7220_IntMask_RcvUrg9IntMask_LSB 0x29
107#define QIB_7220_IntMask_RcvUrg9IntMask_RMASK 0x1
108#define QIB_7220_IntMask_RcvUrg8IntMask_LSB 0x28
109#define QIB_7220_IntMask_RcvUrg8IntMask_RMASK 0x1
110#define QIB_7220_IntMask_RcvUrg7IntMask_LSB 0x27
111#define QIB_7220_IntMask_RcvUrg7IntMask_RMASK 0x1
112#define QIB_7220_IntMask_RcvUrg6IntMask_LSB 0x26
113#define QIB_7220_IntMask_RcvUrg6IntMask_RMASK 0x1
114#define QIB_7220_IntMask_RcvUrg5IntMask_LSB 0x25
115#define QIB_7220_IntMask_RcvUrg5IntMask_RMASK 0x1
116#define QIB_7220_IntMask_RcvUrg4IntMask_LSB 0x24
117#define QIB_7220_IntMask_RcvUrg4IntMask_RMASK 0x1
118#define QIB_7220_IntMask_RcvUrg3IntMask_LSB 0x23
119#define QIB_7220_IntMask_RcvUrg3IntMask_RMASK 0x1
120#define QIB_7220_IntMask_RcvUrg2IntMask_LSB 0x22
121#define QIB_7220_IntMask_RcvUrg2IntMask_RMASK 0x1
122#define QIB_7220_IntMask_RcvUrg1IntMask_LSB 0x21
123#define QIB_7220_IntMask_RcvUrg1IntMask_RMASK 0x1
124#define QIB_7220_IntMask_RcvUrg0IntMask_LSB 0x20
125#define QIB_7220_IntMask_RcvUrg0IntMask_RMASK 0x1
126#define QIB_7220_IntMask_ErrorIntMask_LSB 0x1F
127#define QIB_7220_IntMask_ErrorIntMask_RMASK 0x1
128#define QIB_7220_IntMask_PioSetIntMask_LSB 0x1E
129#define QIB_7220_IntMask_PioSetIntMask_RMASK 0x1
130#define QIB_7220_IntMask_PioBufAvailIntMask_LSB 0x1D
131#define QIB_7220_IntMask_PioBufAvailIntMask_RMASK 0x1
132#define QIB_7220_IntMask_assertGPIOIntMask_LSB 0x1C
133#define QIB_7220_IntMask_assertGPIOIntMask_RMASK 0x1
134#define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_LSB 0x1B
135#define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_RMASK 0x1
136#define QIB_7220_IntMask_JIntMask_LSB 0x1A
137#define QIB_7220_IntMask_JIntMask_RMASK 0x1
138#define QIB_7220_IntMask_Reserved1_LSB 0x11
139#define QIB_7220_IntMask_Reserved1_RMASK 0x1FF
140#define QIB_7220_IntMask_RcvAvail16IntMask_LSB 0x10
141#define QIB_7220_IntMask_RcvAvail16IntMask_RMASK 0x1
142#define QIB_7220_IntMask_RcvAvail15IntMask_LSB 0xF
143#define QIB_7220_IntMask_RcvAvail15IntMask_RMASK 0x1
144#define QIB_7220_IntMask_RcvAvail14IntMask_LSB 0xE
145#define QIB_7220_IntMask_RcvAvail14IntMask_RMASK 0x1
146#define QIB_7220_IntMask_RcvAvail13IntMask_LSB 0xD
147#define QIB_7220_IntMask_RcvAvail13IntMask_RMASK 0x1
148#define QIB_7220_IntMask_RcvAvail12IntMask_LSB 0xC
149#define QIB_7220_IntMask_RcvAvail12IntMask_RMASK 0x1
150#define QIB_7220_IntMask_RcvAvail11IntMask_LSB 0xB
151#define QIB_7220_IntMask_RcvAvail11IntMask_RMASK 0x1
152#define QIB_7220_IntMask_RcvAvail10IntMask_LSB 0xA
153#define QIB_7220_IntMask_RcvAvail10IntMask_RMASK 0x1
154#define QIB_7220_IntMask_RcvAvail9IntMask_LSB 0x9
155#define QIB_7220_IntMask_RcvAvail9IntMask_RMASK 0x1
156#define QIB_7220_IntMask_RcvAvail8IntMask_LSB 0x8
157#define QIB_7220_IntMask_RcvAvail8IntMask_RMASK 0x1
158#define QIB_7220_IntMask_RcvAvail7IntMask_LSB 0x7
159#define QIB_7220_IntMask_RcvAvail7IntMask_RMASK 0x1
160#define QIB_7220_IntMask_RcvAvail6IntMask_LSB 0x6
161#define QIB_7220_IntMask_RcvAvail6IntMask_RMASK 0x1
162#define QIB_7220_IntMask_RcvAvail5IntMask_LSB 0x5
163#define QIB_7220_IntMask_RcvAvail5IntMask_RMASK 0x1
164#define QIB_7220_IntMask_RcvAvail4IntMask_LSB 0x4
165#define QIB_7220_IntMask_RcvAvail4IntMask_RMASK 0x1
166#define QIB_7220_IntMask_RcvAvail3IntMask_LSB 0x3
167#define QIB_7220_IntMask_RcvAvail3IntMask_RMASK 0x1
168#define QIB_7220_IntMask_RcvAvail2IntMask_LSB 0x2
169#define QIB_7220_IntMask_RcvAvail2IntMask_RMASK 0x1
170#define QIB_7220_IntMask_RcvAvail1IntMask_LSB 0x1
171#define QIB_7220_IntMask_RcvAvail1IntMask_RMASK 0x1
172#define QIB_7220_IntMask_RcvAvail0IntMask_LSB 0x0
173#define QIB_7220_IntMask_RcvAvail0IntMask_RMASK 0x1
174
175#define QIB_7220_IntStatus_OFFS 0x70
176#define QIB_7220_IntStatus_SDmaInt_LSB 0x3F
177#define QIB_7220_IntStatus_SDmaInt_RMASK 0x1
178#define QIB_7220_IntStatus_SDmaDisabled_LSB 0x3E
179#define QIB_7220_IntStatus_SDmaDisabled_RMASK 0x1
180#define QIB_7220_IntStatus_Reserved_LSB 0x31
181#define QIB_7220_IntStatus_Reserved_RMASK 0x1FFF
182#define QIB_7220_IntStatus_RcvUrg16_LSB 0x30
183#define QIB_7220_IntStatus_RcvUrg16_RMASK 0x1
184#define QIB_7220_IntStatus_RcvUrg15_LSB 0x2F
185#define QIB_7220_IntStatus_RcvUrg15_RMASK 0x1
186#define QIB_7220_IntStatus_RcvUrg14_LSB 0x2E
187#define QIB_7220_IntStatus_RcvUrg14_RMASK 0x1
188#define QIB_7220_IntStatus_RcvUrg13_LSB 0x2D
189#define QIB_7220_IntStatus_RcvUrg13_RMASK 0x1
190#define QIB_7220_IntStatus_RcvUrg12_LSB 0x2C
191#define QIB_7220_IntStatus_RcvUrg12_RMASK 0x1
192#define QIB_7220_IntStatus_RcvUrg11_LSB 0x2B
193#define QIB_7220_IntStatus_RcvUrg11_RMASK 0x1
194#define QIB_7220_IntStatus_RcvUrg10_LSB 0x2A
195#define QIB_7220_IntStatus_RcvUrg10_RMASK 0x1
196#define QIB_7220_IntStatus_RcvUrg9_LSB 0x29
197#define QIB_7220_IntStatus_RcvUrg9_RMASK 0x1
198#define QIB_7220_IntStatus_RcvUrg8_LSB 0x28
199#define QIB_7220_IntStatus_RcvUrg8_RMASK 0x1
200#define QIB_7220_IntStatus_RcvUrg7_LSB 0x27
201#define QIB_7220_IntStatus_RcvUrg7_RMASK 0x1
202#define QIB_7220_IntStatus_RcvUrg6_LSB 0x26
203#define QIB_7220_IntStatus_RcvUrg6_RMASK 0x1
204#define QIB_7220_IntStatus_RcvUrg5_LSB 0x25
205#define QIB_7220_IntStatus_RcvUrg5_RMASK 0x1
206#define QIB_7220_IntStatus_RcvUrg4_LSB 0x24
207#define QIB_7220_IntStatus_RcvUrg4_RMASK 0x1
208#define QIB_7220_IntStatus_RcvUrg3_LSB 0x23
209#define QIB_7220_IntStatus_RcvUrg3_RMASK 0x1
210#define QIB_7220_IntStatus_RcvUrg2_LSB 0x22
211#define QIB_7220_IntStatus_RcvUrg2_RMASK 0x1
212#define QIB_7220_IntStatus_RcvUrg1_LSB 0x21
213#define QIB_7220_IntStatus_RcvUrg1_RMASK 0x1
214#define QIB_7220_IntStatus_RcvUrg0_LSB 0x20
215#define QIB_7220_IntStatus_RcvUrg0_RMASK 0x1
216#define QIB_7220_IntStatus_Error_LSB 0x1F
217#define QIB_7220_IntStatus_Error_RMASK 0x1
218#define QIB_7220_IntStatus_PioSent_LSB 0x1E
219#define QIB_7220_IntStatus_PioSent_RMASK 0x1
220#define QIB_7220_IntStatus_PioBufAvail_LSB 0x1D
221#define QIB_7220_IntStatus_PioBufAvail_RMASK 0x1
222#define QIB_7220_IntStatus_assertGPIO_LSB 0x1C
223#define QIB_7220_IntStatus_assertGPIO_RMASK 0x1
224#define QIB_7220_IntStatus_IBSerdesTrimDone_LSB 0x1B
225#define QIB_7220_IntStatus_IBSerdesTrimDone_RMASK 0x1
226#define QIB_7220_IntStatus_JInt_LSB 0x1A
227#define QIB_7220_IntStatus_JInt_RMASK 0x1
228#define QIB_7220_IntStatus_Reserved1_LSB 0x11
229#define QIB_7220_IntStatus_Reserved1_RMASK 0x1FF
230#define QIB_7220_IntStatus_RcvAvail16_LSB 0x10
231#define QIB_7220_IntStatus_RcvAvail16_RMASK 0x1
232#define QIB_7220_IntStatus_RcvAvail15_LSB 0xF
233#define QIB_7220_IntStatus_RcvAvail15_RMASK 0x1
234#define QIB_7220_IntStatus_RcvAvail14_LSB 0xE
235#define QIB_7220_IntStatus_RcvAvail14_RMASK 0x1
236#define QIB_7220_IntStatus_RcvAvail13_LSB 0xD
237#define QIB_7220_IntStatus_RcvAvail13_RMASK 0x1
238#define QIB_7220_IntStatus_RcvAvail12_LSB 0xC
239#define QIB_7220_IntStatus_RcvAvail12_RMASK 0x1
240#define QIB_7220_IntStatus_RcvAvail11_LSB 0xB
241#define QIB_7220_IntStatus_RcvAvail11_RMASK 0x1
242#define QIB_7220_IntStatus_RcvAvail10_LSB 0xA
243#define QIB_7220_IntStatus_RcvAvail10_RMASK 0x1
244#define QIB_7220_IntStatus_RcvAvail9_LSB 0x9
245#define QIB_7220_IntStatus_RcvAvail9_RMASK 0x1
246#define QIB_7220_IntStatus_RcvAvail8_LSB 0x8
247#define QIB_7220_IntStatus_RcvAvail8_RMASK 0x1
248#define QIB_7220_IntStatus_RcvAvail7_LSB 0x7
249#define QIB_7220_IntStatus_RcvAvail7_RMASK 0x1
250#define QIB_7220_IntStatus_RcvAvail6_LSB 0x6
251#define QIB_7220_IntStatus_RcvAvail6_RMASK 0x1
252#define QIB_7220_IntStatus_RcvAvail5_LSB 0x5
253#define QIB_7220_IntStatus_RcvAvail5_RMASK 0x1
254#define QIB_7220_IntStatus_RcvAvail4_LSB 0x4
255#define QIB_7220_IntStatus_RcvAvail4_RMASK 0x1
256#define QIB_7220_IntStatus_RcvAvail3_LSB 0x3
257#define QIB_7220_IntStatus_RcvAvail3_RMASK 0x1
258#define QIB_7220_IntStatus_RcvAvail2_LSB 0x2
259#define QIB_7220_IntStatus_RcvAvail2_RMASK 0x1
260#define QIB_7220_IntStatus_RcvAvail1_LSB 0x1
261#define QIB_7220_IntStatus_RcvAvail1_RMASK 0x1
262#define QIB_7220_IntStatus_RcvAvail0_LSB 0x0
263#define QIB_7220_IntStatus_RcvAvail0_RMASK 0x1
264
265#define QIB_7220_IntClear_OFFS 0x78
266#define QIB_7220_IntClear_SDmaIntClear_LSB 0x3F
267#define QIB_7220_IntClear_SDmaIntClear_RMASK 0x1
268#define QIB_7220_IntClear_SDmaDisabledClear_LSB 0x3E
269#define QIB_7220_IntClear_SDmaDisabledClear_RMASK 0x1
270#define QIB_7220_IntClear_Reserved_LSB 0x31
271#define QIB_7220_IntClear_Reserved_RMASK 0x1FFF
272#define QIB_7220_IntClear_RcvUrg16IntClear_LSB 0x30
273#define QIB_7220_IntClear_RcvUrg16IntClear_RMASK 0x1
274#define QIB_7220_IntClear_RcvUrg15IntClear_LSB 0x2F
275#define QIB_7220_IntClear_RcvUrg15IntClear_RMASK 0x1
276#define QIB_7220_IntClear_RcvUrg14IntClear_LSB 0x2E
277#define QIB_7220_IntClear_RcvUrg14IntClear_RMASK 0x1
278#define QIB_7220_IntClear_RcvUrg13IntClear_LSB 0x2D
279#define QIB_7220_IntClear_RcvUrg13IntClear_RMASK 0x1
280#define QIB_7220_IntClear_RcvUrg12IntClear_LSB 0x2C
281#define QIB_7220_IntClear_RcvUrg12IntClear_RMASK 0x1
282#define QIB_7220_IntClear_RcvUrg11IntClear_LSB 0x2B
283#define QIB_7220_IntClear_RcvUrg11IntClear_RMASK 0x1
284#define QIB_7220_IntClear_RcvUrg10IntClear_LSB 0x2A
285#define QIB_7220_IntClear_RcvUrg10IntClear_RMASK 0x1
286#define QIB_7220_IntClear_RcvUrg9IntClear_LSB 0x29
287#define QIB_7220_IntClear_RcvUrg9IntClear_RMASK 0x1
288#define QIB_7220_IntClear_RcvUrg8IntClear_LSB 0x28
289#define QIB_7220_IntClear_RcvUrg8IntClear_RMASK 0x1
290#define QIB_7220_IntClear_RcvUrg7IntClear_LSB 0x27
291#define QIB_7220_IntClear_RcvUrg7IntClear_RMASK 0x1
292#define QIB_7220_IntClear_RcvUrg6IntClear_LSB 0x26
293#define QIB_7220_IntClear_RcvUrg6IntClear_RMASK 0x1
294#define QIB_7220_IntClear_RcvUrg5IntClear_LSB 0x25
295#define QIB_7220_IntClear_RcvUrg5IntClear_RMASK 0x1
296#define QIB_7220_IntClear_RcvUrg4IntClear_LSB 0x24
297#define QIB_7220_IntClear_RcvUrg4IntClear_RMASK 0x1
298#define QIB_7220_IntClear_RcvUrg3IntClear_LSB 0x23
299#define QIB_7220_IntClear_RcvUrg3IntClear_RMASK 0x1
300#define QIB_7220_IntClear_RcvUrg2IntClear_LSB 0x22
301#define QIB_7220_IntClear_RcvUrg2IntClear_RMASK 0x1
302#define QIB_7220_IntClear_RcvUrg1IntClear_LSB 0x21
303#define QIB_7220_IntClear_RcvUrg1IntClear_RMASK 0x1
304#define QIB_7220_IntClear_RcvUrg0IntClear_LSB 0x20
305#define QIB_7220_IntClear_RcvUrg0IntClear_RMASK 0x1
306#define QIB_7220_IntClear_ErrorIntClear_LSB 0x1F
307#define QIB_7220_IntClear_ErrorIntClear_RMASK 0x1
308#define QIB_7220_IntClear_PioSetIntClear_LSB 0x1E
309#define QIB_7220_IntClear_PioSetIntClear_RMASK 0x1
310#define QIB_7220_IntClear_PioBufAvailIntClear_LSB 0x1D
311#define QIB_7220_IntClear_PioBufAvailIntClear_RMASK 0x1
312#define QIB_7220_IntClear_assertGPIOIntClear_LSB 0x1C
313#define QIB_7220_IntClear_assertGPIOIntClear_RMASK 0x1
314#define QIB_7220_IntClear_IBSerdesTrimDoneClear_LSB 0x1B
315#define QIB_7220_IntClear_IBSerdesTrimDoneClear_RMASK 0x1
316#define QIB_7220_IntClear_JIntClear_LSB 0x1A
317#define QIB_7220_IntClear_JIntClear_RMASK 0x1
318#define QIB_7220_IntClear_Reserved1_LSB 0x11
319#define QIB_7220_IntClear_Reserved1_RMASK 0x1FF
320#define QIB_7220_IntClear_RcvAvail16IntClear_LSB 0x10
321#define QIB_7220_IntClear_RcvAvail16IntClear_RMASK 0x1
322#define QIB_7220_IntClear_RcvAvail15IntClear_LSB 0xF
323#define QIB_7220_IntClear_RcvAvail15IntClear_RMASK 0x1
324#define QIB_7220_IntClear_RcvAvail14IntClear_LSB 0xE
325#define QIB_7220_IntClear_RcvAvail14IntClear_RMASK 0x1
326#define QIB_7220_IntClear_RcvAvail13IntClear_LSB 0xD
327#define QIB_7220_IntClear_RcvAvail13IntClear_RMASK 0x1
328#define QIB_7220_IntClear_RcvAvail12IntClear_LSB 0xC
329#define QIB_7220_IntClear_RcvAvail12IntClear_RMASK 0x1
330#define QIB_7220_IntClear_RcvAvail11IntClear_LSB 0xB
331#define QIB_7220_IntClear_RcvAvail11IntClear_RMASK 0x1
332#define QIB_7220_IntClear_RcvAvail10IntClear_LSB 0xA
333#define QIB_7220_IntClear_RcvAvail10IntClear_RMASK 0x1
334#define QIB_7220_IntClear_RcvAvail9IntClear_LSB 0x9
335#define QIB_7220_IntClear_RcvAvail9IntClear_RMASK 0x1
336#define QIB_7220_IntClear_RcvAvail8IntClear_LSB 0x8
337#define QIB_7220_IntClear_RcvAvail8IntClear_RMASK 0x1
338#define QIB_7220_IntClear_RcvAvail7IntClear_LSB 0x7
339#define QIB_7220_IntClear_RcvAvail7IntClear_RMASK 0x1
340#define QIB_7220_IntClear_RcvAvail6IntClear_LSB 0x6
341#define QIB_7220_IntClear_RcvAvail6IntClear_RMASK 0x1
342#define QIB_7220_IntClear_RcvAvail5IntClear_LSB 0x5
343#define QIB_7220_IntClear_RcvAvail5IntClear_RMASK 0x1
344#define QIB_7220_IntClear_RcvAvail4IntClear_LSB 0x4
345#define QIB_7220_IntClear_RcvAvail4IntClear_RMASK 0x1
346#define QIB_7220_IntClear_RcvAvail3IntClear_LSB 0x3
347#define QIB_7220_IntClear_RcvAvail3IntClear_RMASK 0x1
348#define QIB_7220_IntClear_RcvAvail2IntClear_LSB 0x2
349#define QIB_7220_IntClear_RcvAvail2IntClear_RMASK 0x1
350#define QIB_7220_IntClear_RcvAvail1IntClear_LSB 0x1
351#define QIB_7220_IntClear_RcvAvail1IntClear_RMASK 0x1
352#define QIB_7220_IntClear_RcvAvail0IntClear_LSB 0x0
353#define QIB_7220_IntClear_RcvAvail0IntClear_RMASK 0x1
354
355#define QIB_7220_ErrMask_OFFS 0x80
356#define QIB_7220_ErrMask_Reserved_LSB 0x36
357#define QIB_7220_ErrMask_Reserved_RMASK 0x3FF
358#define QIB_7220_ErrMask_InvalidEEPCmdMask_LSB 0x35
359#define QIB_7220_ErrMask_InvalidEEPCmdMask_RMASK 0x1
360#define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_LSB 0x34
361#define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_RMASK 0x1
362#define QIB_7220_ErrMask_HardwareErrMask_LSB 0x33
363#define QIB_7220_ErrMask_HardwareErrMask_RMASK 0x1
364#define QIB_7220_ErrMask_ResetNegatedMask_LSB 0x32
365#define QIB_7220_ErrMask_ResetNegatedMask_RMASK 0x1
366#define QIB_7220_ErrMask_InvalidAddrErrMask_LSB 0x31
367#define QIB_7220_ErrMask_InvalidAddrErrMask_RMASK 0x1
368#define QIB_7220_ErrMask_IBStatusChangedMask_LSB 0x30
369#define QIB_7220_ErrMask_IBStatusChangedMask_RMASK 0x1
370#define QIB_7220_ErrMask_SDmaUnexpDataErrMask_LSB 0x2F
371#define QIB_7220_ErrMask_SDmaUnexpDataErrMask_RMASK 0x1
372#define QIB_7220_ErrMask_SDmaMissingDwErrMask_LSB 0x2E
373#define QIB_7220_ErrMask_SDmaMissingDwErrMask_RMASK 0x1
374#define QIB_7220_ErrMask_SDmaDwEnErrMask_LSB 0x2D
375#define QIB_7220_ErrMask_SDmaDwEnErrMask_RMASK 0x1
376#define QIB_7220_ErrMask_SDmaRpyTagErrMask_LSB 0x2C
377#define QIB_7220_ErrMask_SDmaRpyTagErrMask_RMASK 0x1
378#define QIB_7220_ErrMask_SDma1stDescErrMask_LSB 0x2B
379#define QIB_7220_ErrMask_SDma1stDescErrMask_RMASK 0x1
380#define QIB_7220_ErrMask_SDmaBaseErrMask_LSB 0x2A
381#define QIB_7220_ErrMask_SDmaBaseErrMask_RMASK 0x1
382#define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_LSB 0x29
383#define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_RMASK 0x1
384#define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_LSB 0x28
385#define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_RMASK 0x1
386#define QIB_7220_ErrMask_SDmaGenMismatchErrMask_LSB 0x27
387#define QIB_7220_ErrMask_SDmaGenMismatchErrMask_RMASK 0x1
388#define QIB_7220_ErrMask_SendBufMisuseErrMask_LSB 0x26
389#define QIB_7220_ErrMask_SendBufMisuseErrMask_RMASK 0x1
390#define QIB_7220_ErrMask_SendUnsupportedVLErrMask_LSB 0x25
391#define QIB_7220_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1
392#define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24
393#define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1
394#define QIB_7220_ErrMask_SendPioArmLaunchErrMask_LSB 0x23
395#define QIB_7220_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1
396#define QIB_7220_ErrMask_SendDroppedDataPktErrMask_LSB 0x22
397#define QIB_7220_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1
398#define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21
399#define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1
400#define QIB_7220_ErrMask_SendPktLenErrMask_LSB 0x20
401#define QIB_7220_ErrMask_SendPktLenErrMask_RMASK 0x1
402#define QIB_7220_ErrMask_SendUnderRunErrMask_LSB 0x1F
403#define QIB_7220_ErrMask_SendUnderRunErrMask_RMASK 0x1
404#define QIB_7220_ErrMask_SendMaxPktLenErrMask_LSB 0x1E
405#define QIB_7220_ErrMask_SendMaxPktLenErrMask_RMASK 0x1
406#define QIB_7220_ErrMask_SendMinPktLenErrMask_LSB 0x1D
407#define QIB_7220_ErrMask_SendMinPktLenErrMask_RMASK 0x1
408#define QIB_7220_ErrMask_SDmaDisabledErrMask_LSB 0x1C
409#define QIB_7220_ErrMask_SDmaDisabledErrMask_RMASK 0x1
410#define QIB_7220_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B
411#define QIB_7220_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1
412#define QIB_7220_ErrMask_Reserved1_LSB 0x12
413#define QIB_7220_ErrMask_Reserved1_RMASK 0x1FF
414#define QIB_7220_ErrMask_RcvIBLostLinkErrMask_LSB 0x11
415#define QIB_7220_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1
416#define QIB_7220_ErrMask_RcvHdrErrMask_LSB 0x10
417#define QIB_7220_ErrMask_RcvHdrErrMask_RMASK 0x1
418#define QIB_7220_ErrMask_RcvHdrLenErrMask_LSB 0xF
419#define QIB_7220_ErrMask_RcvHdrLenErrMask_RMASK 0x1
420#define QIB_7220_ErrMask_RcvBadTidErrMask_LSB 0xE
421#define QIB_7220_ErrMask_RcvBadTidErrMask_RMASK 0x1
422#define QIB_7220_ErrMask_RcvHdrFullErrMask_LSB 0xD
423#define QIB_7220_ErrMask_RcvHdrFullErrMask_RMASK 0x1
424#define QIB_7220_ErrMask_RcvEgrFullErrMask_LSB 0xC
425#define QIB_7220_ErrMask_RcvEgrFullErrMask_RMASK 0x1
426#define QIB_7220_ErrMask_RcvBadVersionErrMask_LSB 0xB
427#define QIB_7220_ErrMask_RcvBadVersionErrMask_RMASK 0x1
428#define QIB_7220_ErrMask_RcvIBFlowErrMask_LSB 0xA
429#define QIB_7220_ErrMask_RcvIBFlowErrMask_RMASK 0x1
430#define QIB_7220_ErrMask_RcvEBPErrMask_LSB 0x9
431#define QIB_7220_ErrMask_RcvEBPErrMask_RMASK 0x1
432#define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8
433#define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1
434#define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7
435#define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1
436#define QIB_7220_ErrMask_RcvShortPktLenErrMask_LSB 0x6
437#define QIB_7220_ErrMask_RcvShortPktLenErrMask_RMASK 0x1
438#define QIB_7220_ErrMask_RcvLongPktLenErrMask_LSB 0x5
439#define QIB_7220_ErrMask_RcvLongPktLenErrMask_RMASK 0x1
440#define QIB_7220_ErrMask_RcvMaxPktLenErrMask_LSB 0x4
441#define QIB_7220_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1
442#define QIB_7220_ErrMask_RcvMinPktLenErrMask_LSB 0x3
443#define QIB_7220_ErrMask_RcvMinPktLenErrMask_RMASK 0x1
444#define QIB_7220_ErrMask_RcvICRCErrMask_LSB 0x2
445#define QIB_7220_ErrMask_RcvICRCErrMask_RMASK 0x1
446#define QIB_7220_ErrMask_RcvVCRCErrMask_LSB 0x1
447#define QIB_7220_ErrMask_RcvVCRCErrMask_RMASK 0x1
448#define QIB_7220_ErrMask_RcvFormatErrMask_LSB 0x0
449#define QIB_7220_ErrMask_RcvFormatErrMask_RMASK 0x1
450
451#define QIB_7220_ErrStatus_OFFS 0x88
452#define QIB_7220_ErrStatus_Reserved_LSB 0x36
453#define QIB_7220_ErrStatus_Reserved_RMASK 0x3FF
454#define QIB_7220_ErrStatus_InvalidEEPCmdErr_LSB 0x35
455#define QIB_7220_ErrStatus_InvalidEEPCmdErr_RMASK 0x1
456#define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_LSB 0x34
457#define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_RMASK 0x1
458#define QIB_7220_ErrStatus_HardwareErr_LSB 0x33
459#define QIB_7220_ErrStatus_HardwareErr_RMASK 0x1
460#define QIB_7220_ErrStatus_ResetNegated_LSB 0x32
461#define QIB_7220_ErrStatus_ResetNegated_RMASK 0x1
462#define QIB_7220_ErrStatus_InvalidAddrErr_LSB 0x31
463#define QIB_7220_ErrStatus_InvalidAddrErr_RMASK 0x1
464#define QIB_7220_ErrStatus_IBStatusChanged_LSB 0x30
465#define QIB_7220_ErrStatus_IBStatusChanged_RMASK 0x1
466#define QIB_7220_ErrStatus_SDmaUnexpDataErr_LSB 0x2F
467#define QIB_7220_ErrStatus_SDmaUnexpDataErr_RMASK 0x1
468#define QIB_7220_ErrStatus_SDmaMissingDwErr_LSB 0x2E
469#define QIB_7220_ErrStatus_SDmaMissingDwErr_RMASK 0x1
470#define QIB_7220_ErrStatus_SDmaDwEnErr_LSB 0x2D
471#define QIB_7220_ErrStatus_SDmaDwEnErr_RMASK 0x1
472#define QIB_7220_ErrStatus_SDmaRpyTagErr_LSB 0x2C
473#define QIB_7220_ErrStatus_SDmaRpyTagErr_RMASK 0x1
474#define QIB_7220_ErrStatus_SDma1stDescErr_LSB 0x2B
475#define QIB_7220_ErrStatus_SDma1stDescErr_RMASK 0x1
476#define QIB_7220_ErrStatus_SDmaBaseErr_LSB 0x2A
477#define QIB_7220_ErrStatus_SDmaBaseErr_RMASK 0x1
478#define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_LSB 0x29
479#define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_RMASK 0x1
480#define QIB_7220_ErrStatus_SDmaOutOfBoundErr_LSB 0x28
481#define QIB_7220_ErrStatus_SDmaOutOfBoundErr_RMASK 0x1
482#define QIB_7220_ErrStatus_SDmaGenMismatchErr_LSB 0x27
483#define QIB_7220_ErrStatus_SDmaGenMismatchErr_RMASK 0x1
484#define QIB_7220_ErrStatus_SendBufMisuseErr_LSB 0x26
485#define QIB_7220_ErrStatus_SendBufMisuseErr_RMASK 0x1
486#define QIB_7220_ErrStatus_SendUnsupportedVLErr_LSB 0x25
487#define QIB_7220_ErrStatus_SendUnsupportedVLErr_RMASK 0x1
488#define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24
489#define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1
490#define QIB_7220_ErrStatus_SendPioArmLaunchErr_LSB 0x23
491#define QIB_7220_ErrStatus_SendPioArmLaunchErr_RMASK 0x1
492#define QIB_7220_ErrStatus_SendDroppedDataPktErr_LSB 0x22
493#define QIB_7220_ErrStatus_SendDroppedDataPktErr_RMASK 0x1
494#define QIB_7220_ErrStatus_SendDroppedSmpPktErr_LSB 0x21
495#define QIB_7220_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1
496#define QIB_7220_ErrStatus_SendPktLenErr_LSB 0x20
497#define QIB_7220_ErrStatus_SendPktLenErr_RMASK 0x1
498#define QIB_7220_ErrStatus_SendUnderRunErr_LSB 0x1F
499#define QIB_7220_ErrStatus_SendUnderRunErr_RMASK 0x1
500#define QIB_7220_ErrStatus_SendMaxPktLenErr_LSB 0x1E
501#define QIB_7220_ErrStatus_SendMaxPktLenErr_RMASK 0x1
502#define QIB_7220_ErrStatus_SendMinPktLenErr_LSB 0x1D
503#define QIB_7220_ErrStatus_SendMinPktLenErr_RMASK 0x1
504#define QIB_7220_ErrStatus_SDmaDisabledErr_LSB 0x1C
505#define QIB_7220_ErrStatus_SDmaDisabledErr_RMASK 0x1
506#define QIB_7220_ErrStatus_SendSpecialTriggerErr_LSB 0x1B
507#define QIB_7220_ErrStatus_SendSpecialTriggerErr_RMASK 0x1
508#define QIB_7220_ErrStatus_Reserved1_LSB 0x12
509#define QIB_7220_ErrStatus_Reserved1_RMASK 0x1FF
510#define QIB_7220_ErrStatus_RcvIBLostLinkErr_LSB 0x11
511#define QIB_7220_ErrStatus_RcvIBLostLinkErr_RMASK 0x1
512#define QIB_7220_ErrStatus_RcvHdrErr_LSB 0x10
513#define QIB_7220_ErrStatus_RcvHdrErr_RMASK 0x1
514#define QIB_7220_ErrStatus_RcvHdrLenErr_LSB 0xF
515#define QIB_7220_ErrStatus_RcvHdrLenErr_RMASK 0x1
516#define QIB_7220_ErrStatus_RcvBadTidErr_LSB 0xE
517#define QIB_7220_ErrStatus_RcvBadTidErr_RMASK 0x1
518#define QIB_7220_ErrStatus_RcvHdrFullErr_LSB 0xD
519#define QIB_7220_ErrStatus_RcvHdrFullErr_RMASK 0x1
520#define QIB_7220_ErrStatus_RcvEgrFullErr_LSB 0xC
521#define QIB_7220_ErrStatus_RcvEgrFullErr_RMASK 0x1
522#define QIB_7220_ErrStatus_RcvBadVersionErr_LSB 0xB
523#define QIB_7220_ErrStatus_RcvBadVersionErr_RMASK 0x1
524#define QIB_7220_ErrStatus_RcvIBFlowErr_LSB 0xA
525#define QIB_7220_ErrStatus_RcvIBFlowErr_RMASK 0x1
526#define QIB_7220_ErrStatus_RcvEBPErr_LSB 0x9
527#define QIB_7220_ErrStatus_RcvEBPErr_RMASK 0x1
528#define QIB_7220_ErrStatus_RcvUnsupportedVLErr_LSB 0x8
529#define QIB_7220_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1
530#define QIB_7220_ErrStatus_RcvUnexpectedCharErr_LSB 0x7
531#define QIB_7220_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1
532#define QIB_7220_ErrStatus_RcvShortPktLenErr_LSB 0x6
533#define QIB_7220_ErrStatus_RcvShortPktLenErr_RMASK 0x1
534#define QIB_7220_ErrStatus_RcvLongPktLenErr_LSB 0x5
535#define QIB_7220_ErrStatus_RcvLongPktLenErr_RMASK 0x1
536#define QIB_7220_ErrStatus_RcvMaxPktLenErr_LSB 0x4
537#define QIB_7220_ErrStatus_RcvMaxPktLenErr_RMASK 0x1
538#define QIB_7220_ErrStatus_RcvMinPktLenErr_LSB 0x3
539#define QIB_7220_ErrStatus_RcvMinPktLenErr_RMASK 0x1
540#define QIB_7220_ErrStatus_RcvICRCErr_LSB 0x2
541#define QIB_7220_ErrStatus_RcvICRCErr_RMASK 0x1
542#define QIB_7220_ErrStatus_RcvVCRCErr_LSB 0x1
543#define QIB_7220_ErrStatus_RcvVCRCErr_RMASK 0x1
544#define QIB_7220_ErrStatus_RcvFormatErr_LSB 0x0
545#define QIB_7220_ErrStatus_RcvFormatErr_RMASK 0x1
546
547#define QIB_7220_ErrClear_OFFS 0x90
548#define QIB_7220_ErrClear_Reserved_LSB 0x36
549#define QIB_7220_ErrClear_Reserved_RMASK 0x3FF
550#define QIB_7220_ErrClear_InvalidEEPCmdErrClear_LSB 0x35
551#define QIB_7220_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1
552#define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_LSB 0x34
553#define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_RMASK 0x1
554#define QIB_7220_ErrClear_HardwareErrClear_LSB 0x33
555#define QIB_7220_ErrClear_HardwareErrClear_RMASK 0x1
556#define QIB_7220_ErrClear_ResetNegatedClear_LSB 0x32
557#define QIB_7220_ErrClear_ResetNegatedClear_RMASK 0x1
558#define QIB_7220_ErrClear_InvalidAddrErrClear_LSB 0x31
559#define QIB_7220_ErrClear_InvalidAddrErrClear_RMASK 0x1
560#define QIB_7220_ErrClear_IBStatusChangedClear_LSB 0x30
561#define QIB_7220_ErrClear_IBStatusChangedClear_RMASK 0x1
562#define QIB_7220_ErrClear_SDmaUnexpDataErrClear_LSB 0x2F
563#define QIB_7220_ErrClear_SDmaUnexpDataErrClear_RMASK 0x1
564#define QIB_7220_ErrClear_SDmaMissingDwErrClear_LSB 0x2E
565#define QIB_7220_ErrClear_SDmaMissingDwErrClear_RMASK 0x1
566#define QIB_7220_ErrClear_SDmaDwEnErrClear_LSB 0x2D
567#define QIB_7220_ErrClear_SDmaDwEnErrClear_RMASK 0x1
568#define QIB_7220_ErrClear_SDmaRpyTagErrClear_LSB 0x2C
569#define QIB_7220_ErrClear_SDmaRpyTagErrClear_RMASK 0x1
570#define QIB_7220_ErrClear_SDma1stDescErrClear_LSB 0x2B
571#define QIB_7220_ErrClear_SDma1stDescErrClear_RMASK 0x1
572#define QIB_7220_ErrClear_SDmaBaseErrClear_LSB 0x2A
573#define QIB_7220_ErrClear_SDmaBaseErrClear_RMASK 0x1
574#define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_LSB 0x29
575#define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_RMASK 0x1
576#define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_LSB 0x28
577#define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_RMASK 0x1
578#define QIB_7220_ErrClear_SDmaGenMismatchErrClear_LSB 0x27
579#define QIB_7220_ErrClear_SDmaGenMismatchErrClear_RMASK 0x1
580#define QIB_7220_ErrClear_SendBufMisuseErrClear_LSB 0x26
581#define QIB_7220_ErrClear_SendBufMisuseErrClear_RMASK 0x1
582#define QIB_7220_ErrClear_SendUnsupportedVLErrClear_LSB 0x25
583#define QIB_7220_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1
584#define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24
585#define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1
586#define QIB_7220_ErrClear_SendPioArmLaunchErrClear_LSB 0x23
587#define QIB_7220_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1
588#define QIB_7220_ErrClear_SendDroppedDataPktErrClear_LSB 0x22
589#define QIB_7220_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1
590#define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21
591#define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1
592#define QIB_7220_ErrClear_SendPktLenErrClear_LSB 0x20
593#define QIB_7220_ErrClear_SendPktLenErrClear_RMASK 0x1
594#define QIB_7220_ErrClear_SendUnderRunErrClear_LSB 0x1F
595#define QIB_7220_ErrClear_SendUnderRunErrClear_RMASK 0x1
596#define QIB_7220_ErrClear_SendMaxPktLenErrClear_LSB 0x1E
597#define QIB_7220_ErrClear_SendMaxPktLenErrClear_RMASK 0x1
598#define QIB_7220_ErrClear_SendMinPktLenErrClear_LSB 0x1D
599#define QIB_7220_ErrClear_SendMinPktLenErrClear_RMASK 0x1
600#define QIB_7220_ErrClear_SDmaDisabledErrClear_LSB 0x1C
601#define QIB_7220_ErrClear_SDmaDisabledErrClear_RMASK 0x1
602#define QIB_7220_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B
603#define QIB_7220_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1
604#define QIB_7220_ErrClear_Reserved1_LSB 0x12
605#define QIB_7220_ErrClear_Reserved1_RMASK 0x1FF
606#define QIB_7220_ErrClear_RcvIBLostLinkErrClear_LSB 0x11
607#define QIB_7220_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1
608#define QIB_7220_ErrClear_RcvHdrErrClear_LSB 0x10
609#define QIB_7220_ErrClear_RcvHdrErrClear_RMASK 0x1
610#define QIB_7220_ErrClear_RcvHdrLenErrClear_LSB 0xF
611#define QIB_7220_ErrClear_RcvHdrLenErrClear_RMASK 0x1
612#define QIB_7220_ErrClear_RcvBadTidErrClear_LSB 0xE
613#define QIB_7220_ErrClear_RcvBadTidErrClear_RMASK 0x1
614#define QIB_7220_ErrClear_RcvHdrFullErrClear_LSB 0xD
615#define QIB_7220_ErrClear_RcvHdrFullErrClear_RMASK 0x1
616#define QIB_7220_ErrClear_RcvEgrFullErrClear_LSB 0xC
617#define QIB_7220_ErrClear_RcvEgrFullErrClear_RMASK 0x1
618#define QIB_7220_ErrClear_RcvBadVersionErrClear_LSB 0xB
619#define QIB_7220_ErrClear_RcvBadVersionErrClear_RMASK 0x1
620#define QIB_7220_ErrClear_RcvIBFlowErrClear_LSB 0xA
621#define QIB_7220_ErrClear_RcvIBFlowErrClear_RMASK 0x1
622#define QIB_7220_ErrClear_RcvEBPErrClear_LSB 0x9
623#define QIB_7220_ErrClear_RcvEBPErrClear_RMASK 0x1
624#define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8
625#define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1
626#define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7
627#define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1
628#define QIB_7220_ErrClear_RcvShortPktLenErrClear_LSB 0x6
629#define QIB_7220_ErrClear_RcvShortPktLenErrClear_RMASK 0x1
630#define QIB_7220_ErrClear_RcvLongPktLenErrClear_LSB 0x5
631#define QIB_7220_ErrClear_RcvLongPktLenErrClear_RMASK 0x1
632#define QIB_7220_ErrClear_RcvMaxPktLenErrClear_LSB 0x4
633#define QIB_7220_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1
634#define QIB_7220_ErrClear_RcvMinPktLenErrClear_LSB 0x3
635#define QIB_7220_ErrClear_RcvMinPktLenErrClear_RMASK 0x1
636#define QIB_7220_ErrClear_RcvICRCErrClear_LSB 0x2
637#define QIB_7220_ErrClear_RcvICRCErrClear_RMASK 0x1
638#define QIB_7220_ErrClear_RcvVCRCErrClear_LSB 0x1
639#define QIB_7220_ErrClear_RcvVCRCErrClear_RMASK 0x1
640#define QIB_7220_ErrClear_RcvFormatErrClear_LSB 0x0
641#define QIB_7220_ErrClear_RcvFormatErrClear_RMASK 0x1
642
643#define QIB_7220_HwErrMask_OFFS 0x98
644#define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F
645#define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1
646#define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E
647#define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1
648#define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_LSB 0x3D
649#define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_RMASK 0x1
650#define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C
651#define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1
652#define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_LSB 0x3B
653#define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_RMASK 0x1
654#define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_LSB 0x3A
655#define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_RMASK 0x1
656#define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x39
657#define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1
658#define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x38
659#define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1
660#define QIB_7220_HwErrMask_Reserved_LSB 0x37
661#define QIB_7220_HwErrMask_Reserved_RMASK 0x1
662#define QIB_7220_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
663#define QIB_7220_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
664#define QIB_7220_HwErrMask_Reserved1_LSB 0x33
665#define QIB_7220_HwErrMask_Reserved1_RMASK 0x7
666#define QIB_7220_HwErrMask_RXEMemParityErrMask_LSB 0x2C
667#define QIB_7220_HwErrMask_RXEMemParityErrMask_RMASK 0x7F
668#define QIB_7220_HwErrMask_TXEMemParityErrMask_LSB 0x28
669#define QIB_7220_HwErrMask_TXEMemParityErrMask_RMASK 0xF
670#define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_LSB 0x27
671#define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_RMASK 0x1
672#define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_LSB 0x26
673#define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_RMASK 0x1
674#define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_LSB 0x25
675#define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_RMASK 0x1
676#define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_LSB 0x24
677#define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_RMASK 0x1
678#define QIB_7220_HwErrMask_Reserved2_LSB 0x22
679#define QIB_7220_HwErrMask_Reserved2_RMASK 0x3
680#define QIB_7220_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
681#define QIB_7220_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
682#define QIB_7220_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
683#define QIB_7220_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
684#define QIB_7220_HwErrMask_PoisonedTLPMask_LSB 0x1D
685#define QIB_7220_HwErrMask_PoisonedTLPMask_RMASK 0x1
686#define QIB_7220_HwErrMask_SDmaMemReadErrMask_LSB 0x1C
687#define QIB_7220_HwErrMask_SDmaMemReadErrMask_RMASK 0x1
688#define QIB_7220_HwErrMask_Reserved3_LSB 0x8
689#define QIB_7220_HwErrMask_Reserved3_RMASK 0xFFFFF
690#define QIB_7220_HwErrMask_PCIeMemParityErrMask_LSB 0x0
691#define QIB_7220_HwErrMask_PCIeMemParityErrMask_RMASK 0xFF
692
693#define QIB_7220_HwErrStatus_OFFS 0xA0
694#define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F
695#define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1
696#define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E
697#define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1
698#define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_LSB 0x3D
699#define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_RMASK 0x1
700#define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C
701#define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1
702#define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_LSB 0x3B
703#define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_RMASK 0x1
704#define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_LSB 0x3A
705#define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_RMASK 0x1
706#define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x39
707#define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1
708#define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x38
709#define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1
710#define QIB_7220_HwErrStatus_Reserved_LSB 0x37
711#define QIB_7220_HwErrStatus_Reserved_RMASK 0x1
712#define QIB_7220_HwErrStatus_PowerOnBISTFailed_LSB 0x36
713#define QIB_7220_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
714#define QIB_7220_HwErrStatus_Reserved1_LSB 0x33
715#define QIB_7220_HwErrStatus_Reserved1_RMASK 0x7
716#define QIB_7220_HwErrStatus_RXEMemParity_LSB 0x2C
717#define QIB_7220_HwErrStatus_RXEMemParity_RMASK 0x7F
718#define QIB_7220_HwErrStatus_TXEMemParity_LSB 0x28
719#define QIB_7220_HwErrStatus_TXEMemParity_RMASK 0xF
720#define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_LSB 0x27
721#define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_RMASK 0x1
722#define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_LSB 0x26
723#define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_RMASK 0x1
724#define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_LSB 0x25
725#define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_RMASK 0x1
726#define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_LSB 0x24
727#define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_RMASK 0x1
728#define QIB_7220_HwErrStatus_Reserved2_LSB 0x22
729#define QIB_7220_HwErrStatus_Reserved2_RMASK 0x3
730#define QIB_7220_HwErrStatus_PCIeBusParity_LSB 0x1F
731#define QIB_7220_HwErrStatus_PCIeBusParity_RMASK 0x7
732#define QIB_7220_HwErrStatus_PcieCplTimeout_LSB 0x1E
733#define QIB_7220_HwErrStatus_PcieCplTimeout_RMASK 0x1
734#define QIB_7220_HwErrStatus_PoisenedTLP_LSB 0x1D
735#define QIB_7220_HwErrStatus_PoisenedTLP_RMASK 0x1
736#define QIB_7220_HwErrStatus_SDmaMemReadErr_LSB 0x1C
737#define QIB_7220_HwErrStatus_SDmaMemReadErr_RMASK 0x1
738#define QIB_7220_HwErrStatus_Reserved3_LSB 0x8
739#define QIB_7220_HwErrStatus_Reserved3_RMASK 0xFFFFF
740#define QIB_7220_HwErrStatus_PCIeMemParity_LSB 0x0
741#define QIB_7220_HwErrStatus_PCIeMemParity_RMASK 0xFF
742
743#define QIB_7220_HwErrClear_OFFS 0xA8
744#define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F
745#define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1
746#define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E
747#define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1
748#define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_LSB 0x3D
749#define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_RMASK 0x1
750#define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C
751#define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1
752#define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_LSB 0x3B
753#define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_RMASK 0x1
754#define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_LSB 0x3A
755#define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_RMASK 0x1
756#define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x39
757#define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1
758#define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x38
759#define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1
760#define QIB_7220_HwErrClear_Reserved_LSB 0x37
761#define QIB_7220_HwErrClear_Reserved_RMASK 0x1
762#define QIB_7220_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
763#define QIB_7220_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
764#define QIB_7220_HwErrClear_Reserved1_LSB 0x33
765#define QIB_7220_HwErrClear_Reserved1_RMASK 0x7
766#define QIB_7220_HwErrClear_RXEMemParityClear_LSB 0x2C
767#define QIB_7220_HwErrClear_RXEMemParityClear_RMASK 0x7F
768#define QIB_7220_HwErrClear_TXEMemParityClear_LSB 0x28
769#define QIB_7220_HwErrClear_TXEMemParityClear_RMASK 0xF
770#define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_LSB 0x27
771#define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_RMASK 0x1
772#define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_LSB 0x26
773#define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_RMASK 0x1
774#define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_LSB 0x25
775#define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_RMASK 0x1
776#define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_LSB 0x24
777#define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_RMASK 0x1
778#define QIB_7220_HwErrClear_Reserved2_LSB 0x22
779#define QIB_7220_HwErrClear_Reserved2_RMASK 0x3
780#define QIB_7220_HwErrClear_PCIeBusParityClr_LSB 0x1F
781#define QIB_7220_HwErrClear_PCIeBusParityClr_RMASK 0x7
782#define QIB_7220_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
783#define QIB_7220_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
784#define QIB_7220_HwErrClear_PoisonedTLPClear_LSB 0x1D
785#define QIB_7220_HwErrClear_PoisonedTLPClear_RMASK 0x1
786#define QIB_7220_HwErrClear_SDmaMemReadErrClear_LSB 0x1C
787#define QIB_7220_HwErrClear_SDmaMemReadErrClear_RMASK 0x1
788#define QIB_7220_HwErrClear_Reserved3_LSB 0x8
789#define QIB_7220_HwErrClear_Reserved3_RMASK 0xFFFFF
790#define QIB_7220_HwErrClear_PCIeMemParityClr_LSB 0x0
791#define QIB_7220_HwErrClear_PCIeMemParityClr_RMASK 0xFF
792
793#define QIB_7220_HwDiagCtrl_OFFS 0xB0
794#define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F
795#define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1
796#define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E
797#define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1
798#define QIB_7220_HwDiagCtrl_CounterWrEnable_LSB 0x3D
799#define QIB_7220_HwDiagCtrl_CounterWrEnable_RMASK 0x1
800#define QIB_7220_HwDiagCtrl_CounterDisable_LSB 0x3C
801#define QIB_7220_HwDiagCtrl_CounterDisable_RMASK 0x1
802#define QIB_7220_HwDiagCtrl_Reserved_LSB 0x33
803#define QIB_7220_HwDiagCtrl_Reserved_RMASK 0x1FF
804#define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C
805#define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F
806#define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28
807#define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF
808#define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_LSB 0x27
809#define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_RMASK 0x1
810#define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_LSB 0x26
811#define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_RMASK 0x1
812#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_LSB 0x25
813#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_RMASK 0x1
814#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_LSB 0x24
815#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_RMASK 0x1
816#define QIB_7220_HwDiagCtrl_Reserved1_LSB 0x23
817#define QIB_7220_HwDiagCtrl_Reserved1_RMASK 0x1
818#define QIB_7220_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
819#define QIB_7220_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
820#define QIB_7220_HwDiagCtrl_Reserved2_LSB 0x8
821#define QIB_7220_HwDiagCtrl_Reserved2_RMASK 0x7FFFFF
822#define QIB_7220_HwDiagCtrl_forcePCIeMemParity_LSB 0x0
823#define QIB_7220_HwDiagCtrl_forcePCIeMemParity_RMASK 0xFF
824
825#define QIB_7220_REG_0000B8_OFFS 0xB8
826
827#define QIB_7220_IBCStatus_OFFS 0xC0
828#define QIB_7220_IBCStatus_TxCreditOk_LSB 0x1F
829#define QIB_7220_IBCStatus_TxCreditOk_RMASK 0x1
830#define QIB_7220_IBCStatus_TxReady_LSB 0x1E
831#define QIB_7220_IBCStatus_TxReady_RMASK 0x1
832#define QIB_7220_IBCStatus_Reserved_LSB 0xE
833#define QIB_7220_IBCStatus_Reserved_RMASK 0xFFFF
834#define QIB_7220_IBCStatus_IBTxLaneReversed_LSB 0xD
835#define QIB_7220_IBCStatus_IBTxLaneReversed_RMASK 0x1
836#define QIB_7220_IBCStatus_IBRxLaneReversed_LSB 0xC
837#define QIB_7220_IBCStatus_IBRxLaneReversed_RMASK 0x1
838#define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_LSB 0xB
839#define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_RMASK 0x1
840#define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_LSB 0xA
841#define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_RMASK 0x1
842#define QIB_7220_IBCStatus_LinkWidthActive_LSB 0x9
843#define QIB_7220_IBCStatus_LinkWidthActive_RMASK 0x1
844#define QIB_7220_IBCStatus_LinkSpeedActive_LSB 0x8
845#define QIB_7220_IBCStatus_LinkSpeedActive_RMASK 0x1
846#define QIB_7220_IBCStatus_LinkState_LSB 0x5
847#define QIB_7220_IBCStatus_LinkState_RMASK 0x7
848#define QIB_7220_IBCStatus_LinkTrainingState_LSB 0x0
849#define QIB_7220_IBCStatus_LinkTrainingState_RMASK 0x1F
850
851#define QIB_7220_IBCCtrl_OFFS 0xC8
852#define QIB_7220_IBCCtrl_Loopback_LSB 0x3F
853#define QIB_7220_IBCCtrl_Loopback_RMASK 0x1
854#define QIB_7220_IBCCtrl_LinkDownDefaultState_LSB 0x3E
855#define QIB_7220_IBCCtrl_LinkDownDefaultState_RMASK 0x1
856#define QIB_7220_IBCCtrl_Reserved_LSB 0x2B
857#define QIB_7220_IBCCtrl_Reserved_RMASK 0x7FFFF
858#define QIB_7220_IBCCtrl_CreditScale_LSB 0x28
859#define QIB_7220_IBCCtrl_CreditScale_RMASK 0x7
860#define QIB_7220_IBCCtrl_OverrunThreshold_LSB 0x24
861#define QIB_7220_IBCCtrl_OverrunThreshold_RMASK 0xF
862#define QIB_7220_IBCCtrl_PhyerrThreshold_LSB 0x20
863#define QIB_7220_IBCCtrl_PhyerrThreshold_RMASK 0xF
864#define QIB_7220_IBCCtrl_MaxPktLen_LSB 0x15
865#define QIB_7220_IBCCtrl_MaxPktLen_RMASK 0x7FF
866#define QIB_7220_IBCCtrl_LinkCmd_LSB 0x13
867#define QIB_7220_IBCCtrl_LinkCmd_RMASK 0x3
868#define QIB_7220_IBCCtrl_LinkInitCmd_LSB 0x10
869#define QIB_7220_IBCCtrl_LinkInitCmd_RMASK 0x7
870#define QIB_7220_IBCCtrl_FlowCtrlWaterMark_LSB 0x8
871#define QIB_7220_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF
872#define QIB_7220_IBCCtrl_FlowCtrlPeriod_LSB 0x0
873#define QIB_7220_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF
874
875#define QIB_7220_EXTStatus_OFFS 0xD0
876#define QIB_7220_EXTStatus_GPIOIn_LSB 0x30
877#define QIB_7220_EXTStatus_GPIOIn_RMASK 0xFFFF
878#define QIB_7220_EXTStatus_Reserved_LSB 0x20
879#define QIB_7220_EXTStatus_Reserved_RMASK 0xFFFF
880#define QIB_7220_EXTStatus_Reserved1_LSB 0x10
881#define QIB_7220_EXTStatus_Reserved1_RMASK 0xFFFF
882#define QIB_7220_EXTStatus_MemBISTDisabled_LSB 0xF
883#define QIB_7220_EXTStatus_MemBISTDisabled_RMASK 0x1
884#define QIB_7220_EXTStatus_MemBISTEndTest_LSB 0xE
885#define QIB_7220_EXTStatus_MemBISTEndTest_RMASK 0x1
886#define QIB_7220_EXTStatus_Reserved2_LSB 0x0
887#define QIB_7220_EXTStatus_Reserved2_RMASK 0x3FFF
888
889#define QIB_7220_EXTCtrl_OFFS 0xD8
890#define QIB_7220_EXTCtrl_GPIOOe_LSB 0x30
891#define QIB_7220_EXTCtrl_GPIOOe_RMASK 0xFFFF
892#define QIB_7220_EXTCtrl_GPIOInvert_LSB 0x20
893#define QIB_7220_EXTCtrl_GPIOInvert_RMASK 0xFFFF
894#define QIB_7220_EXTCtrl_Reserved_LSB 0x4
895#define QIB_7220_EXTCtrl_Reserved_RMASK 0xFFFFFFF
896#define QIB_7220_EXTCtrl_LEDPriPortGreenOn_LSB 0x3
897#define QIB_7220_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1
898#define QIB_7220_EXTCtrl_LEDPriPortYellowOn_LSB 0x2
899#define QIB_7220_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1
900#define QIB_7220_EXTCtrl_LEDGblOkGreenOn_LSB 0x1
901#define QIB_7220_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1
902#define QIB_7220_EXTCtrl_LEDGblErrRedOff_LSB 0x0
903#define QIB_7220_EXTCtrl_LEDGblErrRedOff_RMASK 0x1
904
905#define QIB_7220_GPIOOut_OFFS 0xE0
906
907#define QIB_7220_GPIOMask_OFFS 0xE8
908
909#define QIB_7220_GPIOStatus_OFFS 0xF0
910
911#define QIB_7220_GPIOClear_OFFS 0xF8
912
913#define QIB_7220_RcvCtrl_OFFS 0x100
914#define QIB_7220_RcvCtrl_Reserved_LSB 0x27
915#define QIB_7220_RcvCtrl_Reserved_RMASK 0x1FFFFFF
916#define QIB_7220_RcvCtrl_RcvQPMapEnable_LSB 0x26
917#define QIB_7220_RcvCtrl_RcvQPMapEnable_RMASK 0x1
918#define QIB_7220_RcvCtrl_PortCfg_LSB 0x24
919#define QIB_7220_RcvCtrl_PortCfg_RMASK 0x3
920#define QIB_7220_RcvCtrl_TailUpd_LSB 0x23
921#define QIB_7220_RcvCtrl_TailUpd_RMASK 0x1
922#define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_LSB 0x22
923#define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1
924#define QIB_7220_RcvCtrl_IntrAvail_LSB 0x11
925#define QIB_7220_RcvCtrl_IntrAvail_RMASK 0x1FFFF
926#define QIB_7220_RcvCtrl_PortEnable_LSB 0x0
927#define QIB_7220_RcvCtrl_PortEnable_RMASK 0x1FFFF
928
929#define QIB_7220_RcvBTHQP_OFFS 0x108
930#define QIB_7220_RcvBTHQP_Reserved_LSB 0x18
931#define QIB_7220_RcvBTHQP_Reserved_RMASK 0xFF
932#define QIB_7220_RcvBTHQP_RcvBTHQP_LSB 0x0
933#define QIB_7220_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF
934
935#define QIB_7220_RcvHdrSize_OFFS 0x110
936
937#define QIB_7220_RcvHdrCnt_OFFS 0x118
938
939#define QIB_7220_RcvHdrEntSize_OFFS 0x120
940
941#define QIB_7220_RcvTIDBase_OFFS 0x128
942
943#define QIB_7220_RcvTIDCnt_OFFS 0x130
944
945#define QIB_7220_RcvEgrBase_OFFS 0x138
946
947#define QIB_7220_RcvEgrCnt_OFFS 0x140
948
949#define QIB_7220_RcvBufBase_OFFS 0x148
950
951#define QIB_7220_RcvBufSize_OFFS 0x150
952
953#define QIB_7220_RxIntMemBase_OFFS 0x158
954
955#define QIB_7220_RxIntMemSize_OFFS 0x160
956
957#define QIB_7220_RcvPartitionKey_OFFS 0x168
958
959#define QIB_7220_RcvQPMulticastPort_OFFS 0x170
960#define QIB_7220_RcvQPMulticastPort_Reserved_LSB 0x5
961#define QIB_7220_RcvQPMulticastPort_Reserved_RMASK 0x7FFFFFFFFFFFFFF
962#define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_LSB 0x0
963#define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_RMASK 0x1F
964
965#define QIB_7220_RcvPktLEDCnt_OFFS 0x178
966#define QIB_7220_RcvPktLEDCnt_ONperiod_LSB 0x20
967#define QIB_7220_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF
968#define QIB_7220_RcvPktLEDCnt_OFFperiod_LSB 0x0
969#define QIB_7220_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF
970
971#define QIB_7220_IBCDDRCtrl_OFFS 0x180
972#define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_LSB 0x30
973#define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_RMASK 0xFFFF
974#define QIB_7220_IBCDDRCtrl_IB_DLID_LSB 0x20
975#define QIB_7220_IBCDDRCtrl_IB_DLID_RMASK 0xFFFF
976#define QIB_7220_IBCDDRCtrl_Reserved_LSB 0x1B
977#define QIB_7220_IBCDDRCtrl_Reserved_RMASK 0x1F
978#define QIB_7220_IBCDDRCtrl_HRTBT_REQ_LSB 0x1A
979#define QIB_7220_IBCDDRCtrl_HRTBT_REQ_RMASK 0x1
980#define QIB_7220_IBCDDRCtrl_HRTBT_PORT_LSB 0x12
981#define QIB_7220_IBCDDRCtrl_HRTBT_PORT_RMASK 0xFF
982#define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_LSB 0x11
983#define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_RMASK 0x1
984#define QIB_7220_IBCDDRCtrl_HRTBT_ENB_LSB 0x10
985#define QIB_7220_IBCDDRCtrl_HRTBT_ENB_RMASK 0x1
986#define QIB_7220_IBCDDRCtrl_SD_DDS_LSB 0xC
987#define QIB_7220_IBCDDRCtrl_SD_DDS_RMASK 0xF
988#define QIB_7220_IBCDDRCtrl_SD_DDSV_LSB 0xB
989#define QIB_7220_IBCDDRCtrl_SD_DDSV_RMASK 0x1
990#define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_LSB 0xA
991#define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_RMASK 0x1
992#define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_LSB 0x9
993#define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_RMASK 0x1
994#define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_LSB 0x8
995#define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_RMASK 0x1
996#define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_LSB 0x7
997#define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_RMASK 0x1
998#define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_LSB 0x5
999#define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_RMASK 0x3
1000#define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_LSB 0x4
1001#define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_RMASK 0x1
1002#define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_LSB 0x3
1003#define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_RMASK 0x1
1004#define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_LSB 0x2
1005#define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_RMASK 0x1
1006#define QIB_7220_IBCDDRCtrl_SD_SPEED_LSB 0x1
1007#define QIB_7220_IBCDDRCtrl_SD_SPEED_RMASK 0x1
1008#define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_LSB 0x0
1009#define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_RMASK 0x1
1010
1011#define QIB_7220_HRTBT_GUID_OFFS 0x188
1012
1013#define QIB_7220_IBCDDRCtrl2_OFFS 0x1A0
1014#define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_LSB 0x5
1015#define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_RMASK 0x1F
1016#define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_LSB 0x0
1017#define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_RMASK 0x1F
1018
1019#define QIB_7220_IBCDDRStatus_OFFS 0x1A8
1020#define QIB_7220_IBCDDRStatus_heartbeat_timed_out_LSB 0x24
1021#define QIB_7220_IBCDDRStatus_heartbeat_timed_out_RMASK 0x1
1022#define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_LSB 0x20
1023#define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_RMASK 0xF
1024#define QIB_7220_IBCDDRStatus_RxEqLocalDevice_LSB 0x1E
1025#define QIB_7220_IBCDDRStatus_RxEqLocalDevice_RMASK 0x3
1026#define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_LSB 0x1A
1027#define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_RMASK 0xF
1028#define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_LSB 0x0
1029#define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_RMASK 0x3FFFFFF
1030
1031#define QIB_7220_JIntReload_OFFS 0x1B0
1032#define QIB_7220_JIntReload_J_limit_reload_LSB 0x10
1033#define QIB_7220_JIntReload_J_limit_reload_RMASK 0xFFFF
1034#define QIB_7220_JIntReload_J_reload_LSB 0x0
1035#define QIB_7220_JIntReload_J_reload_RMASK 0xFFFF
1036
1037#define QIB_7220_IBNCModeCtrl_OFFS 0x1B8
1038#define QIB_7220_IBNCModeCtrl_Reserved_LSB 0x1A
1039#define QIB_7220_IBNCModeCtrl_Reserved_RMASK 0x3FFFFFFFFF
1040#define QIB_7220_IBNCModeCtrl_TSMCode_TS2_LSB 0x11
1041#define QIB_7220_IBNCModeCtrl_TSMCode_TS2_RMASK 0x1FF
1042#define QIB_7220_IBNCModeCtrl_TSMCode_TS1_LSB 0x8
1043#define QIB_7220_IBNCModeCtrl_TSMCode_TS1_RMASK 0x1FF
1044#define QIB_7220_IBNCModeCtrl_Reserved1_LSB 0x3
1045#define QIB_7220_IBNCModeCtrl_Reserved1_RMASK 0x1F
1046#define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_LSB 0x2
1047#define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_RMASK 0x1
1048#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_LSB 0x1
1049#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_RMASK 0x1
1050#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_LSB 0x0
1051#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_RMASK 0x1
1052
1053#define QIB_7220_SendCtrl_OFFS 0x1C0
1054#define QIB_7220_SendCtrl_Disarm_LSB 0x1F
1055#define QIB_7220_SendCtrl_Disarm_RMASK 0x1
1056#define QIB_7220_SendCtrl_Reserved_LSB 0x1D
1057#define QIB_7220_SendCtrl_Reserved_RMASK 0x3
1058#define QIB_7220_SendCtrl_AvailUpdThld_LSB 0x18
1059#define QIB_7220_SendCtrl_AvailUpdThld_RMASK 0x1F
1060#define QIB_7220_SendCtrl_DisarmPIOBuf_LSB 0x10
1061#define QIB_7220_SendCtrl_DisarmPIOBuf_RMASK 0xFF
1062#define QIB_7220_SendCtrl_Reserved1_LSB 0xD
1063#define QIB_7220_SendCtrl_Reserved1_RMASK 0x7
1064#define QIB_7220_SendCtrl_SDmaHalt_LSB 0xC
1065#define QIB_7220_SendCtrl_SDmaHalt_RMASK 0x1
1066#define QIB_7220_SendCtrl_SDmaEnable_LSB 0xB
1067#define QIB_7220_SendCtrl_SDmaEnable_RMASK 0x1
1068#define QIB_7220_SendCtrl_SDmaSingleDescriptor_LSB 0xA
1069#define QIB_7220_SendCtrl_SDmaSingleDescriptor_RMASK 0x1
1070#define QIB_7220_SendCtrl_SDmaIntEnable_LSB 0x9
1071#define QIB_7220_SendCtrl_SDmaIntEnable_RMASK 0x1
1072#define QIB_7220_SendCtrl_Reserved2_LSB 0x5
1073#define QIB_7220_SendCtrl_Reserved2_RMASK 0xF
1074#define QIB_7220_SendCtrl_SSpecialTriggerEn_LSB 0x4
1075#define QIB_7220_SendCtrl_SSpecialTriggerEn_RMASK 0x1
1076#define QIB_7220_SendCtrl_SPioEnable_LSB 0x3
1077#define QIB_7220_SendCtrl_SPioEnable_RMASK 0x1
1078#define QIB_7220_SendCtrl_SendBufAvailUpd_LSB 0x2
1079#define QIB_7220_SendCtrl_SendBufAvailUpd_RMASK 0x1
1080#define QIB_7220_SendCtrl_SendIntBufAvail_LSB 0x1
1081#define QIB_7220_SendCtrl_SendIntBufAvail_RMASK 0x1
1082#define QIB_7220_SendCtrl_Abort_LSB 0x0
1083#define QIB_7220_SendCtrl_Abort_RMASK 0x1
1084
1085#define QIB_7220_SendBufBase_OFFS 0x1C8
1086#define QIB_7220_SendBufBase_Reserved_LSB 0x35
1087#define QIB_7220_SendBufBase_Reserved_RMASK 0x7FF
1088#define QIB_7220_SendBufBase_BaseAddr_LargePIO_LSB 0x20
1089#define QIB_7220_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
1090#define QIB_7220_SendBufBase_Reserved1_LSB 0x15
1091#define QIB_7220_SendBufBase_Reserved1_RMASK 0x7FF
1092#define QIB_7220_SendBufBase_BaseAddr_SmallPIO_LSB 0x0
1093#define QIB_7220_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
1094
1095#define QIB_7220_SendBufSize_OFFS 0x1D0
1096#define QIB_7220_SendBufSize_Reserved_LSB 0x2D
1097#define QIB_7220_SendBufSize_Reserved_RMASK 0xFFFFF
1098#define QIB_7220_SendBufSize_Size_LargePIO_LSB 0x20
1099#define QIB_7220_SendBufSize_Size_LargePIO_RMASK 0x1FFF
1100#define QIB_7220_SendBufSize_Reserved1_LSB 0xC
1101#define QIB_7220_SendBufSize_Reserved1_RMASK 0xFFFFF
1102#define QIB_7220_SendBufSize_Size_SmallPIO_LSB 0x0
1103#define QIB_7220_SendBufSize_Size_SmallPIO_RMASK 0xFFF
1104
1105#define QIB_7220_SendBufCnt_OFFS 0x1D8
1106#define QIB_7220_SendBufCnt_Reserved_LSB 0x24
1107#define QIB_7220_SendBufCnt_Reserved_RMASK 0xFFFFFFF
1108#define QIB_7220_SendBufCnt_Num_LargeBuffers_LSB 0x20
1109#define QIB_7220_SendBufCnt_Num_LargeBuffers_RMASK 0xF
1110#define QIB_7220_SendBufCnt_Reserved1_LSB 0x9
1111#define QIB_7220_SendBufCnt_Reserved1_RMASK 0x7FFFFF
1112#define QIB_7220_SendBufCnt_Num_SmallBuffers_LSB 0x0
1113#define QIB_7220_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF
1114
1115#define QIB_7220_SendBufAvailAddr_OFFS 0x1E0
1116#define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6
1117#define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF
1118#define QIB_7220_SendBufAvailAddr_Reserved_LSB 0x0
1119#define QIB_7220_SendBufAvailAddr_Reserved_RMASK 0x3F
1120
1121#define QIB_7220_TxIntMemBase_OFFS 0x1E8
1122
1123#define QIB_7220_TxIntMemSize_OFFS 0x1F0
1124
1125#define QIB_7220_SendDmaBase_OFFS 0x1F8
1126#define QIB_7220_SendDmaBase_Reserved_LSB 0x30
1127#define QIB_7220_SendDmaBase_Reserved_RMASK 0xFFFF
1128#define QIB_7220_SendDmaBase_SendDmaBase_LSB 0x0
1129#define QIB_7220_SendDmaBase_SendDmaBase_RMASK 0xFFFFFFFFFFFF
1130
1131#define QIB_7220_SendDmaLenGen_OFFS 0x200
1132#define QIB_7220_SendDmaLenGen_Reserved_LSB 0x13
1133#define QIB_7220_SendDmaLenGen_Reserved_RMASK 0x1FFFFFFFFFFF
1134#define QIB_7220_SendDmaLenGen_Generation_LSB 0x10
1135#define QIB_7220_SendDmaLenGen_Generation_MSB 0x12
1136#define QIB_7220_SendDmaLenGen_Generation_RMASK 0x7
1137#define QIB_7220_SendDmaLenGen_Length_LSB 0x0
1138#define QIB_7220_SendDmaLenGen_Length_RMASK 0xFFFF
1139
1140#define QIB_7220_SendDmaTail_OFFS 0x208
1141#define QIB_7220_SendDmaTail_Reserved_LSB 0x10
1142#define QIB_7220_SendDmaTail_Reserved_RMASK 0xFFFFFFFFFFFF
1143#define QIB_7220_SendDmaTail_SendDmaTail_LSB 0x0
1144#define QIB_7220_SendDmaTail_SendDmaTail_RMASK 0xFFFF
1145
1146#define QIB_7220_SendDmaHead_OFFS 0x210
1147#define QIB_7220_SendDmaHead_Reserved_LSB 0x30
1148#define QIB_7220_SendDmaHead_Reserved_RMASK 0xFFFF
1149#define QIB_7220_SendDmaHead_InternalSendDmaHead_LSB 0x20
1150#define QIB_7220_SendDmaHead_InternalSendDmaHead_RMASK 0xFFFF
1151#define QIB_7220_SendDmaHead_Reserved1_LSB 0x10
1152#define QIB_7220_SendDmaHead_Reserved1_RMASK 0xFFFF
1153#define QIB_7220_SendDmaHead_SendDmaHead_LSB 0x0
1154#define QIB_7220_SendDmaHead_SendDmaHead_RMASK 0xFFFF
1155
1156#define QIB_7220_SendDmaHeadAddr_OFFS 0x218
1157#define QIB_7220_SendDmaHeadAddr_Reserved_LSB 0x30
1158#define QIB_7220_SendDmaHeadAddr_Reserved_RMASK 0xFFFF
1159#define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_LSB 0x0
1160#define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF
1161
1162#define QIB_7220_SendDmaBufMask0_OFFS 0x220
1163#define QIB_7220_SendDmaBufMask0_BufMask_63_0_LSB 0x0
1164#define QIB_7220_SendDmaBufMask0_BufMask_63_0_RMASK 0x0
1165
1166#define QIB_7220_SendDmaStatus_OFFS 0x238
1167#define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_LSB 0x3F
1168#define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_RMASK 0x1
1169#define QIB_7220_SendDmaStatus_AbortInProg_LSB 0x3E
1170#define QIB_7220_SendDmaStatus_AbortInProg_RMASK 0x1
1171#define QIB_7220_SendDmaStatus_InternalSDmaEnable_LSB 0x3D
1172#define QIB_7220_SendDmaStatus_InternalSDmaEnable_RMASK 0x1
1173#define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_LSB 0x2F
1174#define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_RMASK 0x3FFF
1175#define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_LSB 0x28
1176#define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_RMASK 0x7F
1177#define QIB_7220_SendDmaStatus_RpyTag_7_0_LSB 0x20
1178#define QIB_7220_SendDmaStatus_RpyTag_7_0_RMASK 0xFF
1179#define QIB_7220_SendDmaStatus_ScbFull_LSB 0x1F
1180#define QIB_7220_SendDmaStatus_ScbFull_RMASK 0x1
1181#define QIB_7220_SendDmaStatus_ScbEmpty_LSB 0x1E
1182#define QIB_7220_SendDmaStatus_ScbEmpty_RMASK 0x1
1183#define QIB_7220_SendDmaStatus_ScbEntryValid_LSB 0x1D
1184#define QIB_7220_SendDmaStatus_ScbEntryValid_RMASK 0x1
1185#define QIB_7220_SendDmaStatus_ScbFetchDescFlag_LSB 0x1C
1186#define QIB_7220_SendDmaStatus_ScbFetchDescFlag_RMASK 0x1
1187#define QIB_7220_SendDmaStatus_SplFifoReadyToGo_LSB 0x1B
1188#define QIB_7220_SendDmaStatus_SplFifoReadyToGo_RMASK 0x1
1189#define QIB_7220_SendDmaStatus_SplFifoDisarmed_LSB 0x1A
1190#define QIB_7220_SendDmaStatus_SplFifoDisarmed_RMASK 0x1
1191#define QIB_7220_SendDmaStatus_SplFifoEmpty_LSB 0x19
1192#define QIB_7220_SendDmaStatus_SplFifoEmpty_RMASK 0x1
1193#define QIB_7220_SendDmaStatus_SplFifoFull_LSB 0x18
1194#define QIB_7220_SendDmaStatus_SplFifoFull_RMASK 0x1
1195#define QIB_7220_SendDmaStatus_SplFifoBufNum_LSB 0x10
1196#define QIB_7220_SendDmaStatus_SplFifoBufNum_RMASK 0xFF
1197#define QIB_7220_SendDmaStatus_SplFifoDescIndex_LSB 0x0
1198#define QIB_7220_SendDmaStatus_SplFifoDescIndex_RMASK 0xFFFF
1199
1200#define QIB_7220_SendBufErr0_OFFS 0x240
1201#define QIB_7220_SendBufErr0_SendBufErr_63_0_LSB 0x0
1202#define QIB_7220_SendBufErr0_SendBufErr_63_0_RMASK 0x0
1203
1204#define QIB_7220_RcvHdrAddr0_OFFS 0x270
1205#define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2
1206#define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF
1207#define QIB_7220_RcvHdrAddr0_Reserved_LSB 0x0
1208#define QIB_7220_RcvHdrAddr0_Reserved_RMASK 0x3
1209
1210#define QIB_7220_RcvHdrTailAddr0_OFFS 0x300
1211#define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2
1212#define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF
1213#define QIB_7220_RcvHdrTailAddr0_Reserved_LSB 0x0
1214#define QIB_7220_RcvHdrTailAddr0_Reserved_RMASK 0x3
1215
1216#define QIB_7220_ibsd_epb_access_ctrl_OFFS 0x3C0
1217#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_LSB 0x8
1218#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_RMASK 0x1
1219#define QIB_7220_ibsd_epb_access_ctrl_Reserved_LSB 0x1
1220#define QIB_7220_ibsd_epb_access_ctrl_Reserved_RMASK 0x7F
1221#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_LSB 0x0
1222#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_RMASK 0x1
1223
1224#define QIB_7220_ibsd_epb_transaction_reg_OFFS 0x3C8
1225#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_LSB 0x1F
1226#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_RMASK 0x1
1227#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_LSB 0x1E
1228#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_RMASK 0x1
1229#define QIB_7220_ibsd_epb_transaction_reg_Reserved_LSB 0x1D
1230#define QIB_7220_ibsd_epb_transaction_reg_Reserved_RMASK 0x1
1231#define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_LSB 0x1C
1232#define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_RMASK 0x1
1233#define QIB_7220_ibsd_epb_transaction_reg_Reserved1_LSB 0x1B
1234#define QIB_7220_ibsd_epb_transaction_reg_Reserved1_RMASK 0x1
1235#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_LSB 0x19
1236#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_RMASK 0x3
1237#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_LSB 0x18
1238#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_RMASK 0x1
1239#define QIB_7220_ibsd_epb_transaction_reg_Reserved2_LSB 0x17
1240#define QIB_7220_ibsd_epb_transaction_reg_Reserved2_RMASK 0x1
1241#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_LSB 0x8
1242#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_RMASK 0x7FFF
1243#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_LSB 0x0
1244#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_RMASK 0xFF
1245
1246#define QIB_7220_XGXSCfg_OFFS 0x3D8
1247#define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_LSB 0x3F
1248#define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_RMASK 0x1
1249#define QIB_7220_XGXSCfg_Reserved_LSB 0x13
1250#define QIB_7220_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFFF
1251#define QIB_7220_XGXSCfg_link_sync_mask_LSB 0x9
1252#define QIB_7220_XGXSCfg_link_sync_mask_RMASK 0x3FF
1253#define QIB_7220_XGXSCfg_Reserved1_LSB 0x3
1254#define QIB_7220_XGXSCfg_Reserved1_RMASK 0x3F
1255#define QIB_7220_XGXSCfg_xcv_reset_LSB 0x2
1256#define QIB_7220_XGXSCfg_xcv_reset_RMASK 0x1
1257#define QIB_7220_XGXSCfg_Reserved2_LSB 0x1
1258#define QIB_7220_XGXSCfg_Reserved2_RMASK 0x1
1259#define QIB_7220_XGXSCfg_tx_rx_reset_LSB 0x0
1260#define QIB_7220_XGXSCfg_tx_rx_reset_RMASK 0x1
1261
1262#define QIB_7220_IBSerDesCtrl_OFFS 0x3E0
1263#define QIB_7220_IBSerDesCtrl_Reserved_LSB 0x2D
1264#define QIB_7220_IBSerDesCtrl_Reserved_RMASK 0x7FFFF
1265#define QIB_7220_IBSerDesCtrl_INT_uC_LSB 0x2C
1266#define QIB_7220_IBSerDesCtrl_INT_uC_RMASK 0x1
1267#define QIB_7220_IBSerDesCtrl_CKSEL_uC_LSB 0x2A
1268#define QIB_7220_IBSerDesCtrl_CKSEL_uC_RMASK 0x3
1269#define QIB_7220_IBSerDesCtrl_PLLN_LSB 0x28
1270#define QIB_7220_IBSerDesCtrl_PLLN_RMASK 0x3
1271#define QIB_7220_IBSerDesCtrl_PLLM_LSB 0x25
1272#define QIB_7220_IBSerDesCtrl_PLLM_RMASK 0x7
1273#define QIB_7220_IBSerDesCtrl_TXOBPD_LSB 0x24
1274#define QIB_7220_IBSerDesCtrl_TXOBPD_RMASK 0x1
1275#define QIB_7220_IBSerDesCtrl_TWC_LSB 0x23
1276#define QIB_7220_IBSerDesCtrl_TWC_RMASK 0x1
1277#define QIB_7220_IBSerDesCtrl_RXIDLE_LSB 0x22
1278#define QIB_7220_IBSerDesCtrl_RXIDLE_RMASK 0x1
1279#define QIB_7220_IBSerDesCtrl_RXINV_LSB 0x21
1280#define QIB_7220_IBSerDesCtrl_RXINV_RMASK 0x1
1281#define QIB_7220_IBSerDesCtrl_TXINV_LSB 0x20
1282#define QIB_7220_IBSerDesCtrl_TXINV_RMASK 0x1
1283#define QIB_7220_IBSerDesCtrl_Reserved1_LSB 0x12
1284#define QIB_7220_IBSerDesCtrl_Reserved1_RMASK 0x3FFF
1285#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_LSB 0xD
1286#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_RMASK 0x1F
1287#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_LSB 0x8
1288#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_RMASK 0x1F
1289#define QIB_7220_IBSerDesCtrl_Reserved2_LSB 0x1
1290#define QIB_7220_IBSerDesCtrl_Reserved2_RMASK 0x7F
1291#define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_LSB 0x0
1292#define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_RMASK 0x1
1293
1294#define QIB_7220_pciesd_epb_access_ctrl_OFFS 0x400
1295#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_LSB 0x8
1296#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_RMASK 0x1
1297#define QIB_7220_pciesd_epb_access_ctrl_Reserved_LSB 0x3
1298#define QIB_7220_pciesd_epb_access_ctrl_Reserved_RMASK 0x1F
1299#define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_LSB 0x1
1300#define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_RMASK 0x3
1301#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_LSB 0x0
1302#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_RMASK 0x1
1303
1304#define QIB_7220_pciesd_epb_transaction_reg_OFFS 0x408
1305#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_LSB 0x1F
1306#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_RMASK 0x1
1307#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_LSB 0x1E
1308#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_RMASK 0x1
1309#define QIB_7220_pciesd_epb_transaction_reg_Reserved_LSB 0x1D
1310#define QIB_7220_pciesd_epb_transaction_reg_Reserved_RMASK 0x1
1311#define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_LSB 0x1C
1312#define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_RMASK 0x1
1313#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_LSB 0x19
1314#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_RMASK 0x7
1315#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_LSB 0x18
1316#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_RMASK 0x1
1317#define QIB_7220_pciesd_epb_transaction_reg_Reserved1_LSB 0x17
1318#define QIB_7220_pciesd_epb_transaction_reg_Reserved1_RMASK 0x1
1319#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_LSB 0x8
1320#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_RMASK 0x7FFF
1321#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_LSB 0x0
1322#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_RMASK 0xFF
1323
1324#define QIB_7220_SerDes_DDSRXEQ0_OFFS 0x500
1325#define QIB_7220_SerDes_DDSRXEQ0_reg_addr_LSB 0x4
1326#define QIB_7220_SerDes_DDSRXEQ0_reg_addr_RMASK 0x3F
1327#define QIB_7220_SerDes_DDSRXEQ0_element_num_LSB 0x0
1328#define QIB_7220_SerDes_DDSRXEQ0_element_num_RMASK 0xF
1329
1330#define QIB_7220_LBIntCnt_OFFS 0x13000
1331
1332#define QIB_7220_LBFlowStallCnt_OFFS 0x13008
1333
1334#define QIB_7220_TxSDmaDescCnt_OFFS 0x13010
1335
1336#define QIB_7220_TxUnsupVLErrCnt_OFFS 0x13018
1337
1338#define QIB_7220_TxDataPktCnt_OFFS 0x13020
1339
1340#define QIB_7220_TxFlowPktCnt_OFFS 0x13028
1341
1342#define QIB_7220_TxDwordCnt_OFFS 0x13030
1343
1344#define QIB_7220_TxLenErrCnt_OFFS 0x13038
1345
1346#define QIB_7220_TxMaxMinLenErrCnt_OFFS 0x13040
1347
1348#define QIB_7220_TxUnderrunCnt_OFFS 0x13048
1349
1350#define QIB_7220_TxFlowStallCnt_OFFS 0x13050
1351
1352#define QIB_7220_TxDroppedPktCnt_OFFS 0x13058
1353
1354#define QIB_7220_RxDroppedPktCnt_OFFS 0x13060
1355
1356#define QIB_7220_RxDataPktCnt_OFFS 0x13068
1357
1358#define QIB_7220_RxFlowPktCnt_OFFS 0x13070
1359
1360#define QIB_7220_RxDwordCnt_OFFS 0x13078
1361
1362#define QIB_7220_RxLenErrCnt_OFFS 0x13080
1363
1364#define QIB_7220_RxMaxMinLenErrCnt_OFFS 0x13088
1365
1366#define QIB_7220_RxICRCErrCnt_OFFS 0x13090
1367
1368#define QIB_7220_RxVCRCErrCnt_OFFS 0x13098
1369
1370#define QIB_7220_RxFlowCtrlViolCnt_OFFS 0x130A0
1371
1372#define QIB_7220_RxVersionErrCnt_OFFS 0x130A8
1373
1374#define QIB_7220_RxLinkMalformCnt_OFFS 0x130B0
1375
1376#define QIB_7220_RxEBPCnt_OFFS 0x130B8
1377
1378#define QIB_7220_RxLPCRCErrCnt_OFFS 0x130C0
1379
1380#define QIB_7220_RxBufOvflCnt_OFFS 0x130C8
1381
1382#define QIB_7220_RxTIDFullErrCnt_OFFS 0x130D0
1383
1384#define QIB_7220_RxTIDValidErrCnt_OFFS 0x130D8
1385
1386#define QIB_7220_RxPKeyMismatchCnt_OFFS 0x130E0
1387
1388#define QIB_7220_RxP0HdrEgrOvflCnt_OFFS 0x130E8
1389
1390#define QIB_7220_IBStatusChangeCnt_OFFS 0x13170
1391
1392#define QIB_7220_IBLinkErrRecoveryCnt_OFFS 0x13178
1393
1394#define QIB_7220_IBLinkDownedCnt_OFFS 0x13180
1395
1396#define QIB_7220_IBSymbolErrCnt_OFFS 0x13188
1397
1398#define QIB_7220_RxVL15DroppedPktCnt_OFFS 0x13190
1399
1400#define QIB_7220_RxOtherLocalPhyErrCnt_OFFS 0x13198
1401
1402#define QIB_7220_PcieRetryBufDiagQwordCnt_OFFS 0x131A0
1403
1404#define QIB_7220_ExcessBufferOvflCnt_OFFS 0x131A8
1405
1406#define QIB_7220_LocalLinkIntegrityErrCnt_OFFS 0x131B0
1407
1408#define QIB_7220_RxVlErrCnt_OFFS 0x131B8
1409
1410#define QIB_7220_RxDlidFltrCnt_OFFS 0x131C0
1411
1412#define QIB_7220_CNT_0131C8_OFFS 0x131C8
1413
1414#define QIB_7220_PSStat_OFFS 0x13200
1415
1416#define QIB_7220_PSStart_OFFS 0x13208
1417
1418#define QIB_7220_PSInterval_OFFS 0x13210
1419
1420#define QIB_7220_PSRcvDataCount_OFFS 0x13218
1421
1422#define QIB_7220_PSRcvPktsCount_OFFS 0x13220
1423
1424#define QIB_7220_PSXmitDataCount_OFFS 0x13228
1425
1426#define QIB_7220_PSXmitPktsCount_OFFS 0x13230
1427
1428#define QIB_7220_PSXmitWaitCount_OFFS 0x13238
1429
1430#define QIB_7220_CNT_013240_OFFS 0x13240
1431
1432#define QIB_7220_RcvEgrArray_OFFS 0x14000
1433
1434#define QIB_7220_MEM_038000_OFFS 0x38000
1435
1436#define QIB_7220_RcvTIDArray0_OFFS 0x53000
1437
1438#define QIB_7220_PIOLaunchFIFO_OFFS 0x64000
1439
1440#define QIB_7220_MEM_064480_OFFS 0x64480
1441
1442#define QIB_7220_SendPIOpbcCache_OFFS 0x64800
1443
1444#define QIB_7220_MEM_064C80_OFFS 0x64C80
1445
1446#define QIB_7220_PreLaunchFIFO_OFFS 0x65000
1447
1448#define QIB_7220_MEM_065080_OFFS 0x65080
1449
1450#define QIB_7220_ScoreBoard_OFFS 0x65400
1451
1452#define QIB_7220_MEM_065440_OFFS 0x65440
1453
1454#define QIB_7220_DescriptorFIFO_OFFS 0x65800
1455
1456#define QIB_7220_MEM_065880_OFFS 0x65880
1457
1458#define QIB_7220_RcvBuf1_OFFS 0x72000
1459
1460#define QIB_7220_MEM_074800_OFFS 0x74800
1461
1462#define QIB_7220_RcvBuf2_OFFS 0x75000
1463
1464#define QIB_7220_MEM_076400_OFFS 0x76400
1465
1466#define QIB_7220_RcvFlags_OFFS 0x77000
1467
1468#define QIB_7220_MEM_078400_OFFS 0x78400
1469
1470#define QIB_7220_RcvLookupBuf1_OFFS 0x79000
1471
1472#define QIB_7220_MEM_07A400_OFFS 0x7A400
1473
1474#define QIB_7220_RcvDMADatBuf_OFFS 0x7B000
1475
1476#define QIB_7220_RcvDMAHdrBuf_OFFS 0x7B800
1477
1478#define QIB_7220_MiscRXEIntMem_OFFS 0x7C000
1479
1480#define QIB_7220_MEM_07D400_OFFS 0x7D400
1481
1482#define QIB_7220_PCIERcvBuf_OFFS 0x80000
1483
1484#define QIB_7220_PCIERetryBuf_OFFS 0x84000
1485
1486#define QIB_7220_PCIERcvBufRdToWrAddr_OFFS 0x88000
1487
1488#define QIB_7220_PCIECplBuf_OFFS 0x90000
1489
1490#define QIB_7220_IBSerDesMappTable_OFFS 0x94000
1491
1492#define QIB_7220_MEM_095000_OFFS 0x95000
1493
1494#define QIB_7220_SendBuf0_MA_OFFS 0x100000
1495
1496#define QIB_7220_MEM_1A0000_OFFS 0x1A0000
diff --git a/drivers/infiniband/hw/qib/qib_7322_regs.h b/drivers/infiniband/hw/qib/qib_7322_regs.h
new file mode 100644
index 000000000000..a97440ba924c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_7322_regs.h
@@ -0,0 +1,3163 @@
1/*
2 * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
34
35#define QIB_7322_Revision_OFFS 0x0
36#define QIB_7322_Revision_DEF 0x0000000002010601
37#define QIB_7322_Revision_R_Simulator_LSB 0x3F
38#define QIB_7322_Revision_R_Simulator_MSB 0x3F
39#define QIB_7322_Revision_R_Simulator_RMASK 0x1
40#define QIB_7322_Revision_R_Emulation_LSB 0x3E
41#define QIB_7322_Revision_R_Emulation_MSB 0x3E
42#define QIB_7322_Revision_R_Emulation_RMASK 0x1
43#define QIB_7322_Revision_R_Emulation_Revcode_LSB 0x28
44#define QIB_7322_Revision_R_Emulation_Revcode_MSB 0x3D
45#define QIB_7322_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF
46#define QIB_7322_Revision_BoardID_LSB 0x20
47#define QIB_7322_Revision_BoardID_MSB 0x27
48#define QIB_7322_Revision_BoardID_RMASK 0xFF
49#define QIB_7322_Revision_R_SW_LSB 0x18
50#define QIB_7322_Revision_R_SW_MSB 0x1F
51#define QIB_7322_Revision_R_SW_RMASK 0xFF
52#define QIB_7322_Revision_R_Arch_LSB 0x10
53#define QIB_7322_Revision_R_Arch_MSB 0x17
54#define QIB_7322_Revision_R_Arch_RMASK 0xFF
55#define QIB_7322_Revision_R_ChipRevMajor_LSB 0x8
56#define QIB_7322_Revision_R_ChipRevMajor_MSB 0xF
57#define QIB_7322_Revision_R_ChipRevMajor_RMASK 0xFF
58#define QIB_7322_Revision_R_ChipRevMinor_LSB 0x0
59#define QIB_7322_Revision_R_ChipRevMinor_MSB 0x7
60#define QIB_7322_Revision_R_ChipRevMinor_RMASK 0xFF
61
62#define QIB_7322_Control_OFFS 0x8
63#define QIB_7322_Control_DEF 0x0000000000000000
64#define QIB_7322_Control_PCIECplQDiagEn_LSB 0x6
65#define QIB_7322_Control_PCIECplQDiagEn_MSB 0x6
66#define QIB_7322_Control_PCIECplQDiagEn_RMASK 0x1
67#define QIB_7322_Control_PCIEPostQDiagEn_LSB 0x5
68#define QIB_7322_Control_PCIEPostQDiagEn_MSB 0x5
69#define QIB_7322_Control_PCIEPostQDiagEn_RMASK 0x1
70#define QIB_7322_Control_SDmaDescFetchPriorityEn_LSB 0x4
71#define QIB_7322_Control_SDmaDescFetchPriorityEn_MSB 0x4
72#define QIB_7322_Control_SDmaDescFetchPriorityEn_RMASK 0x1
73#define QIB_7322_Control_PCIERetryBufDiagEn_LSB 0x3
74#define QIB_7322_Control_PCIERetryBufDiagEn_MSB 0x3
75#define QIB_7322_Control_PCIERetryBufDiagEn_RMASK 0x1
76#define QIB_7322_Control_FreezeMode_LSB 0x1
77#define QIB_7322_Control_FreezeMode_MSB 0x1
78#define QIB_7322_Control_FreezeMode_RMASK 0x1
79#define QIB_7322_Control_SyncReset_LSB 0x0
80#define QIB_7322_Control_SyncReset_MSB 0x0
81#define QIB_7322_Control_SyncReset_RMASK 0x1
82
83#define QIB_7322_PageAlign_OFFS 0x10
84#define QIB_7322_PageAlign_DEF 0x0000000000001000
85
86#define QIB_7322_ContextCnt_OFFS 0x18
87#define QIB_7322_ContextCnt_DEF 0x0000000000000012
88
89#define QIB_7322_Scratch_OFFS 0x20
90#define QIB_7322_Scratch_DEF 0x0000000000000000
91
92#define QIB_7322_CntrRegBase_OFFS 0x28
93#define QIB_7322_CntrRegBase_DEF 0x0000000000011000
94
95#define QIB_7322_SendRegBase_OFFS 0x30
96#define QIB_7322_SendRegBase_DEF 0x0000000000003000
97
98#define QIB_7322_UserRegBase_OFFS 0x38
99#define QIB_7322_UserRegBase_DEF 0x0000000000200000
100
101#define QIB_7322_IntMask_OFFS 0x68
102#define QIB_7322_IntMask_DEF 0x0000000000000000
103#define QIB_7322_IntMask_SDmaIntMask_1_LSB 0x3F
104#define QIB_7322_IntMask_SDmaIntMask_1_MSB 0x3F
105#define QIB_7322_IntMask_SDmaIntMask_1_RMASK 0x1
106#define QIB_7322_IntMask_SDmaIntMask_0_LSB 0x3E
107#define QIB_7322_IntMask_SDmaIntMask_0_MSB 0x3E
108#define QIB_7322_IntMask_SDmaIntMask_0_RMASK 0x1
109#define QIB_7322_IntMask_SDmaProgressIntMask_1_LSB 0x3D
110#define QIB_7322_IntMask_SDmaProgressIntMask_1_MSB 0x3D
111#define QIB_7322_IntMask_SDmaProgressIntMask_1_RMASK 0x1
112#define QIB_7322_IntMask_SDmaProgressIntMask_0_LSB 0x3C
113#define QIB_7322_IntMask_SDmaProgressIntMask_0_MSB 0x3C
114#define QIB_7322_IntMask_SDmaProgressIntMask_0_RMASK 0x1
115#define QIB_7322_IntMask_SDmaIdleIntMask_1_LSB 0x3B
116#define QIB_7322_IntMask_SDmaIdleIntMask_1_MSB 0x3B
117#define QIB_7322_IntMask_SDmaIdleIntMask_1_RMASK 0x1
118#define QIB_7322_IntMask_SDmaIdleIntMask_0_LSB 0x3A
119#define QIB_7322_IntMask_SDmaIdleIntMask_0_MSB 0x3A
120#define QIB_7322_IntMask_SDmaIdleIntMask_0_RMASK 0x1
121#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_LSB 0x39
122#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_MSB 0x39
123#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_RMASK 0x1
124#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_LSB 0x38
125#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_MSB 0x38
126#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_RMASK 0x1
127#define QIB_7322_IntMask_RcvUrg17IntMask_LSB 0x31
128#define QIB_7322_IntMask_RcvUrg17IntMask_MSB 0x31
129#define QIB_7322_IntMask_RcvUrg17IntMask_RMASK 0x1
130#define QIB_7322_IntMask_RcvUrg16IntMask_LSB 0x30
131#define QIB_7322_IntMask_RcvUrg16IntMask_MSB 0x30
132#define QIB_7322_IntMask_RcvUrg16IntMask_RMASK 0x1
133#define QIB_7322_IntMask_RcvUrg15IntMask_LSB 0x2F
134#define QIB_7322_IntMask_RcvUrg15IntMask_MSB 0x2F
135#define QIB_7322_IntMask_RcvUrg15IntMask_RMASK 0x1
136#define QIB_7322_IntMask_RcvUrg14IntMask_LSB 0x2E
137#define QIB_7322_IntMask_RcvUrg14IntMask_MSB 0x2E
138#define QIB_7322_IntMask_RcvUrg14IntMask_RMASK 0x1
139#define QIB_7322_IntMask_RcvUrg13IntMask_LSB 0x2D
140#define QIB_7322_IntMask_RcvUrg13IntMask_MSB 0x2D
141#define QIB_7322_IntMask_RcvUrg13IntMask_RMASK 0x1
142#define QIB_7322_IntMask_RcvUrg12IntMask_LSB 0x2C
143#define QIB_7322_IntMask_RcvUrg12IntMask_MSB 0x2C
144#define QIB_7322_IntMask_RcvUrg12IntMask_RMASK 0x1
145#define QIB_7322_IntMask_RcvUrg11IntMask_LSB 0x2B
146#define QIB_7322_IntMask_RcvUrg11IntMask_MSB 0x2B
147#define QIB_7322_IntMask_RcvUrg11IntMask_RMASK 0x1
148#define QIB_7322_IntMask_RcvUrg10IntMask_LSB 0x2A
149#define QIB_7322_IntMask_RcvUrg10IntMask_MSB 0x2A
150#define QIB_7322_IntMask_RcvUrg10IntMask_RMASK 0x1
151#define QIB_7322_IntMask_RcvUrg9IntMask_LSB 0x29
152#define QIB_7322_IntMask_RcvUrg9IntMask_MSB 0x29
153#define QIB_7322_IntMask_RcvUrg9IntMask_RMASK 0x1
154#define QIB_7322_IntMask_RcvUrg8IntMask_LSB 0x28
155#define QIB_7322_IntMask_RcvUrg8IntMask_MSB 0x28
156#define QIB_7322_IntMask_RcvUrg8IntMask_RMASK 0x1
157#define QIB_7322_IntMask_RcvUrg7IntMask_LSB 0x27
158#define QIB_7322_IntMask_RcvUrg7IntMask_MSB 0x27
159#define QIB_7322_IntMask_RcvUrg7IntMask_RMASK 0x1
160#define QIB_7322_IntMask_RcvUrg6IntMask_LSB 0x26
161#define QIB_7322_IntMask_RcvUrg6IntMask_MSB 0x26
162#define QIB_7322_IntMask_RcvUrg6IntMask_RMASK 0x1
163#define QIB_7322_IntMask_RcvUrg5IntMask_LSB 0x25
164#define QIB_7322_IntMask_RcvUrg5IntMask_MSB 0x25
165#define QIB_7322_IntMask_RcvUrg5IntMask_RMASK 0x1
166#define QIB_7322_IntMask_RcvUrg4IntMask_LSB 0x24
167#define QIB_7322_IntMask_RcvUrg4IntMask_MSB 0x24
168#define QIB_7322_IntMask_RcvUrg4IntMask_RMASK 0x1
169#define QIB_7322_IntMask_RcvUrg3IntMask_LSB 0x23
170#define QIB_7322_IntMask_RcvUrg3IntMask_MSB 0x23
171#define QIB_7322_IntMask_RcvUrg3IntMask_RMASK 0x1
172#define QIB_7322_IntMask_RcvUrg2IntMask_LSB 0x22
173#define QIB_7322_IntMask_RcvUrg2IntMask_MSB 0x22
174#define QIB_7322_IntMask_RcvUrg2IntMask_RMASK 0x1
175#define QIB_7322_IntMask_RcvUrg1IntMask_LSB 0x21
176#define QIB_7322_IntMask_RcvUrg1IntMask_MSB 0x21
177#define QIB_7322_IntMask_RcvUrg1IntMask_RMASK 0x1
178#define QIB_7322_IntMask_RcvUrg0IntMask_LSB 0x20
179#define QIB_7322_IntMask_RcvUrg0IntMask_MSB 0x20
180#define QIB_7322_IntMask_RcvUrg0IntMask_RMASK 0x1
181#define QIB_7322_IntMask_ErrIntMask_1_LSB 0x1F
182#define QIB_7322_IntMask_ErrIntMask_1_MSB 0x1F
183#define QIB_7322_IntMask_ErrIntMask_1_RMASK 0x1
184#define QIB_7322_IntMask_ErrIntMask_0_LSB 0x1E
185#define QIB_7322_IntMask_ErrIntMask_0_MSB 0x1E
186#define QIB_7322_IntMask_ErrIntMask_0_RMASK 0x1
187#define QIB_7322_IntMask_ErrIntMask_LSB 0x1D
188#define QIB_7322_IntMask_ErrIntMask_MSB 0x1D
189#define QIB_7322_IntMask_ErrIntMask_RMASK 0x1
190#define QIB_7322_IntMask_AssertGPIOIntMask_LSB 0x1C
191#define QIB_7322_IntMask_AssertGPIOIntMask_MSB 0x1C
192#define QIB_7322_IntMask_AssertGPIOIntMask_RMASK 0x1
193#define QIB_7322_IntMask_SendDoneIntMask_1_LSB 0x19
194#define QIB_7322_IntMask_SendDoneIntMask_1_MSB 0x19
195#define QIB_7322_IntMask_SendDoneIntMask_1_RMASK 0x1
196#define QIB_7322_IntMask_SendDoneIntMask_0_LSB 0x18
197#define QIB_7322_IntMask_SendDoneIntMask_0_MSB 0x18
198#define QIB_7322_IntMask_SendDoneIntMask_0_RMASK 0x1
199#define QIB_7322_IntMask_SendBufAvailIntMask_LSB 0x17
200#define QIB_7322_IntMask_SendBufAvailIntMask_MSB 0x17
201#define QIB_7322_IntMask_SendBufAvailIntMask_RMASK 0x1
202#define QIB_7322_IntMask_RcvAvail17IntMask_LSB 0x11
203#define QIB_7322_IntMask_RcvAvail17IntMask_MSB 0x11
204#define QIB_7322_IntMask_RcvAvail17IntMask_RMASK 0x1
205#define QIB_7322_IntMask_RcvAvail16IntMask_LSB 0x10
206#define QIB_7322_IntMask_RcvAvail16IntMask_MSB 0x10
207#define QIB_7322_IntMask_RcvAvail16IntMask_RMASK 0x1
208#define QIB_7322_IntMask_RcvAvail15IntMask_LSB 0xF
209#define QIB_7322_IntMask_RcvAvail15IntMask_MSB 0xF
210#define QIB_7322_IntMask_RcvAvail15IntMask_RMASK 0x1
211#define QIB_7322_IntMask_RcvAvail14IntMask_LSB 0xE
212#define QIB_7322_IntMask_RcvAvail14IntMask_MSB 0xE
213#define QIB_7322_IntMask_RcvAvail14IntMask_RMASK 0x1
214#define QIB_7322_IntMask_RcvAvail13IntMask_LSB 0xD
215#define QIB_7322_IntMask_RcvAvail13IntMask_MSB 0xD
216#define QIB_7322_IntMask_RcvAvail13IntMask_RMASK 0x1
217#define QIB_7322_IntMask_RcvAvail12IntMask_LSB 0xC
218#define QIB_7322_IntMask_RcvAvail12IntMask_MSB 0xC
219#define QIB_7322_IntMask_RcvAvail12IntMask_RMASK 0x1
220#define QIB_7322_IntMask_RcvAvail11IntMask_LSB 0xB
221#define QIB_7322_IntMask_RcvAvail11IntMask_MSB 0xB
222#define QIB_7322_IntMask_RcvAvail11IntMask_RMASK 0x1
223#define QIB_7322_IntMask_RcvAvail10IntMask_LSB 0xA
224#define QIB_7322_IntMask_RcvAvail10IntMask_MSB 0xA
225#define QIB_7322_IntMask_RcvAvail10IntMask_RMASK 0x1
226#define QIB_7322_IntMask_RcvAvail9IntMask_LSB 0x9
227#define QIB_7322_IntMask_RcvAvail9IntMask_MSB 0x9
228#define QIB_7322_IntMask_RcvAvail9IntMask_RMASK 0x1
229#define QIB_7322_IntMask_RcvAvail8IntMask_LSB 0x8
230#define QIB_7322_IntMask_RcvAvail8IntMask_MSB 0x8
231#define QIB_7322_IntMask_RcvAvail8IntMask_RMASK 0x1
232#define QIB_7322_IntMask_RcvAvail7IntMask_LSB 0x7
233#define QIB_7322_IntMask_RcvAvail7IntMask_MSB 0x7
234#define QIB_7322_IntMask_RcvAvail7IntMask_RMASK 0x1
235#define QIB_7322_IntMask_RcvAvail6IntMask_LSB 0x6
236#define QIB_7322_IntMask_RcvAvail6IntMask_MSB 0x6
237#define QIB_7322_IntMask_RcvAvail6IntMask_RMASK 0x1
238#define QIB_7322_IntMask_RcvAvail5IntMask_LSB 0x5
239#define QIB_7322_IntMask_RcvAvail5IntMask_MSB 0x5
240#define QIB_7322_IntMask_RcvAvail5IntMask_RMASK 0x1
241#define QIB_7322_IntMask_RcvAvail4IntMask_LSB 0x4
242#define QIB_7322_IntMask_RcvAvail4IntMask_MSB 0x4
243#define QIB_7322_IntMask_RcvAvail4IntMask_RMASK 0x1
244#define QIB_7322_IntMask_RcvAvail3IntMask_LSB 0x3
245#define QIB_7322_IntMask_RcvAvail3IntMask_MSB 0x3
246#define QIB_7322_IntMask_RcvAvail3IntMask_RMASK 0x1
247#define QIB_7322_IntMask_RcvAvail2IntMask_LSB 0x2
248#define QIB_7322_IntMask_RcvAvail2IntMask_MSB 0x2
249#define QIB_7322_IntMask_RcvAvail2IntMask_RMASK 0x1
250#define QIB_7322_IntMask_RcvAvail1IntMask_LSB 0x1
251#define QIB_7322_IntMask_RcvAvail1IntMask_MSB 0x1
252#define QIB_7322_IntMask_RcvAvail1IntMask_RMASK 0x1
253#define QIB_7322_IntMask_RcvAvail0IntMask_LSB 0x0
254#define QIB_7322_IntMask_RcvAvail0IntMask_MSB 0x0
255#define QIB_7322_IntMask_RcvAvail0IntMask_RMASK 0x1
256
257#define QIB_7322_IntStatus_OFFS 0x70
258#define QIB_7322_IntStatus_DEF 0x0000000000000000
259#define QIB_7322_IntStatus_SDmaInt_1_LSB 0x3F
260#define QIB_7322_IntStatus_SDmaInt_1_MSB 0x3F
261#define QIB_7322_IntStatus_SDmaInt_1_RMASK 0x1
262#define QIB_7322_IntStatus_SDmaInt_0_LSB 0x3E
263#define QIB_7322_IntStatus_SDmaInt_0_MSB 0x3E
264#define QIB_7322_IntStatus_SDmaInt_0_RMASK 0x1
265#define QIB_7322_IntStatus_SDmaProgressInt_1_LSB 0x3D
266#define QIB_7322_IntStatus_SDmaProgressInt_1_MSB 0x3D
267#define QIB_7322_IntStatus_SDmaProgressInt_1_RMASK 0x1
268#define QIB_7322_IntStatus_SDmaProgressInt_0_LSB 0x3C
269#define QIB_7322_IntStatus_SDmaProgressInt_0_MSB 0x3C
270#define QIB_7322_IntStatus_SDmaProgressInt_0_RMASK 0x1
271#define QIB_7322_IntStatus_SDmaIdleInt_1_LSB 0x3B
272#define QIB_7322_IntStatus_SDmaIdleInt_1_MSB 0x3B
273#define QIB_7322_IntStatus_SDmaIdleInt_1_RMASK 0x1
274#define QIB_7322_IntStatus_SDmaIdleInt_0_LSB 0x3A
275#define QIB_7322_IntStatus_SDmaIdleInt_0_MSB 0x3A
276#define QIB_7322_IntStatus_SDmaIdleInt_0_RMASK 0x1
277#define QIB_7322_IntStatus_SDmaCleanupDone_1_LSB 0x39
278#define QIB_7322_IntStatus_SDmaCleanupDone_1_MSB 0x39
279#define QIB_7322_IntStatus_SDmaCleanupDone_1_RMASK 0x1
280#define QIB_7322_IntStatus_SDmaCleanupDone_0_LSB 0x38
281#define QIB_7322_IntStatus_SDmaCleanupDone_0_MSB 0x38
282#define QIB_7322_IntStatus_SDmaCleanupDone_0_RMASK 0x1
283#define QIB_7322_IntStatus_RcvUrg17_LSB 0x31
284#define QIB_7322_IntStatus_RcvUrg17_MSB 0x31
285#define QIB_7322_IntStatus_RcvUrg17_RMASK 0x1
286#define QIB_7322_IntStatus_RcvUrg16_LSB 0x30
287#define QIB_7322_IntStatus_RcvUrg16_MSB 0x30
288#define QIB_7322_IntStatus_RcvUrg16_RMASK 0x1
289#define QIB_7322_IntStatus_RcvUrg15_LSB 0x2F
290#define QIB_7322_IntStatus_RcvUrg15_MSB 0x2F
291#define QIB_7322_IntStatus_RcvUrg15_RMASK 0x1
292#define QIB_7322_IntStatus_RcvUrg14_LSB 0x2E
293#define QIB_7322_IntStatus_RcvUrg14_MSB 0x2E
294#define QIB_7322_IntStatus_RcvUrg14_RMASK 0x1
295#define QIB_7322_IntStatus_RcvUrg13_LSB 0x2D
296#define QIB_7322_IntStatus_RcvUrg13_MSB 0x2D
297#define QIB_7322_IntStatus_RcvUrg13_RMASK 0x1
298#define QIB_7322_IntStatus_RcvUrg12_LSB 0x2C
299#define QIB_7322_IntStatus_RcvUrg12_MSB 0x2C
300#define QIB_7322_IntStatus_RcvUrg12_RMASK 0x1
301#define QIB_7322_IntStatus_RcvUrg11_LSB 0x2B
302#define QIB_7322_IntStatus_RcvUrg11_MSB 0x2B
303#define QIB_7322_IntStatus_RcvUrg11_RMASK 0x1
304#define QIB_7322_IntStatus_RcvUrg10_LSB 0x2A
305#define QIB_7322_IntStatus_RcvUrg10_MSB 0x2A
306#define QIB_7322_IntStatus_RcvUrg10_RMASK 0x1
307#define QIB_7322_IntStatus_RcvUrg9_LSB 0x29
308#define QIB_7322_IntStatus_RcvUrg9_MSB 0x29
309#define QIB_7322_IntStatus_RcvUrg9_RMASK 0x1
310#define QIB_7322_IntStatus_RcvUrg8_LSB 0x28
311#define QIB_7322_IntStatus_RcvUrg8_MSB 0x28
312#define QIB_7322_IntStatus_RcvUrg8_RMASK 0x1
313#define QIB_7322_IntStatus_RcvUrg7_LSB 0x27
314#define QIB_7322_IntStatus_RcvUrg7_MSB 0x27
315#define QIB_7322_IntStatus_RcvUrg7_RMASK 0x1
316#define QIB_7322_IntStatus_RcvUrg6_LSB 0x26
317#define QIB_7322_IntStatus_RcvUrg6_MSB 0x26
318#define QIB_7322_IntStatus_RcvUrg6_RMASK 0x1
319#define QIB_7322_IntStatus_RcvUrg5_LSB 0x25
320#define QIB_7322_IntStatus_RcvUrg5_MSB 0x25
321#define QIB_7322_IntStatus_RcvUrg5_RMASK 0x1
322#define QIB_7322_IntStatus_RcvUrg4_LSB 0x24
323#define QIB_7322_IntStatus_RcvUrg4_MSB 0x24
324#define QIB_7322_IntStatus_RcvUrg4_RMASK 0x1
325#define QIB_7322_IntStatus_RcvUrg3_LSB 0x23
326#define QIB_7322_IntStatus_RcvUrg3_MSB 0x23
327#define QIB_7322_IntStatus_RcvUrg3_RMASK 0x1
328#define QIB_7322_IntStatus_RcvUrg2_LSB 0x22
329#define QIB_7322_IntStatus_RcvUrg2_MSB 0x22
330#define QIB_7322_IntStatus_RcvUrg2_RMASK 0x1
331#define QIB_7322_IntStatus_RcvUrg1_LSB 0x21
332#define QIB_7322_IntStatus_RcvUrg1_MSB 0x21
333#define QIB_7322_IntStatus_RcvUrg1_RMASK 0x1
334#define QIB_7322_IntStatus_RcvUrg0_LSB 0x20
335#define QIB_7322_IntStatus_RcvUrg0_MSB 0x20
336#define QIB_7322_IntStatus_RcvUrg0_RMASK 0x1
337#define QIB_7322_IntStatus_Err_1_LSB 0x1F
338#define QIB_7322_IntStatus_Err_1_MSB 0x1F
339#define QIB_7322_IntStatus_Err_1_RMASK 0x1
340#define QIB_7322_IntStatus_Err_0_LSB 0x1E
341#define QIB_7322_IntStatus_Err_0_MSB 0x1E
342#define QIB_7322_IntStatus_Err_0_RMASK 0x1
343#define QIB_7322_IntStatus_Err_LSB 0x1D
344#define QIB_7322_IntStatus_Err_MSB 0x1D
345#define QIB_7322_IntStatus_Err_RMASK 0x1
346#define QIB_7322_IntStatus_AssertGPIO_LSB 0x1C
347#define QIB_7322_IntStatus_AssertGPIO_MSB 0x1C
348#define QIB_7322_IntStatus_AssertGPIO_RMASK 0x1
349#define QIB_7322_IntStatus_SendDone_1_LSB 0x19
350#define QIB_7322_IntStatus_SendDone_1_MSB 0x19
351#define QIB_7322_IntStatus_SendDone_1_RMASK 0x1
352#define QIB_7322_IntStatus_SendDone_0_LSB 0x18
353#define QIB_7322_IntStatus_SendDone_0_MSB 0x18
354#define QIB_7322_IntStatus_SendDone_0_RMASK 0x1
355#define QIB_7322_IntStatus_SendBufAvail_LSB 0x17
356#define QIB_7322_IntStatus_SendBufAvail_MSB 0x17
357#define QIB_7322_IntStatus_SendBufAvail_RMASK 0x1
358#define QIB_7322_IntStatus_RcvAvail17_LSB 0x11
359#define QIB_7322_IntStatus_RcvAvail17_MSB 0x11
360#define QIB_7322_IntStatus_RcvAvail17_RMASK 0x1
361#define QIB_7322_IntStatus_RcvAvail16_LSB 0x10
362#define QIB_7322_IntStatus_RcvAvail16_MSB 0x10
363#define QIB_7322_IntStatus_RcvAvail16_RMASK 0x1
364#define QIB_7322_IntStatus_RcvAvail15_LSB 0xF
365#define QIB_7322_IntStatus_RcvAvail15_MSB 0xF
366#define QIB_7322_IntStatus_RcvAvail15_RMASK 0x1
367#define QIB_7322_IntStatus_RcvAvail14_LSB 0xE
368#define QIB_7322_IntStatus_RcvAvail14_MSB 0xE
369#define QIB_7322_IntStatus_RcvAvail14_RMASK 0x1
370#define QIB_7322_IntStatus_RcvAvail13_LSB 0xD
371#define QIB_7322_IntStatus_RcvAvail13_MSB 0xD
372#define QIB_7322_IntStatus_RcvAvail13_RMASK 0x1
373#define QIB_7322_IntStatus_RcvAvail12_LSB 0xC
374#define QIB_7322_IntStatus_RcvAvail12_MSB 0xC
375#define QIB_7322_IntStatus_RcvAvail12_RMASK 0x1
376#define QIB_7322_IntStatus_RcvAvail11_LSB 0xB
377#define QIB_7322_IntStatus_RcvAvail11_MSB 0xB
378#define QIB_7322_IntStatus_RcvAvail11_RMASK 0x1
379#define QIB_7322_IntStatus_RcvAvail10_LSB 0xA
380#define QIB_7322_IntStatus_RcvAvail10_MSB 0xA
381#define QIB_7322_IntStatus_RcvAvail10_RMASK 0x1
382#define QIB_7322_IntStatus_RcvAvail9_LSB 0x9
383#define QIB_7322_IntStatus_RcvAvail9_MSB 0x9
384#define QIB_7322_IntStatus_RcvAvail9_RMASK 0x1
385#define QIB_7322_IntStatus_RcvAvail8_LSB 0x8
386#define QIB_7322_IntStatus_RcvAvail8_MSB 0x8
387#define QIB_7322_IntStatus_RcvAvail8_RMASK 0x1
388#define QIB_7322_IntStatus_RcvAvail7_LSB 0x7
389#define QIB_7322_IntStatus_RcvAvail7_MSB 0x7
390#define QIB_7322_IntStatus_RcvAvail7_RMASK 0x1
391#define QIB_7322_IntStatus_RcvAvail6_LSB 0x6
392#define QIB_7322_IntStatus_RcvAvail6_MSB 0x6
393#define QIB_7322_IntStatus_RcvAvail6_RMASK 0x1
394#define QIB_7322_IntStatus_RcvAvail5_LSB 0x5
395#define QIB_7322_IntStatus_RcvAvail5_MSB 0x5
396#define QIB_7322_IntStatus_RcvAvail5_RMASK 0x1
397#define QIB_7322_IntStatus_RcvAvail4_LSB 0x4
398#define QIB_7322_IntStatus_RcvAvail4_MSB 0x4
399#define QIB_7322_IntStatus_RcvAvail4_RMASK 0x1
400#define QIB_7322_IntStatus_RcvAvail3_LSB 0x3
401#define QIB_7322_IntStatus_RcvAvail3_MSB 0x3
402#define QIB_7322_IntStatus_RcvAvail3_RMASK 0x1
403#define QIB_7322_IntStatus_RcvAvail2_LSB 0x2
404#define QIB_7322_IntStatus_RcvAvail2_MSB 0x2
405#define QIB_7322_IntStatus_RcvAvail2_RMASK 0x1
406#define QIB_7322_IntStatus_RcvAvail1_LSB 0x1
407#define QIB_7322_IntStatus_RcvAvail1_MSB 0x1
408#define QIB_7322_IntStatus_RcvAvail1_RMASK 0x1
409#define QIB_7322_IntStatus_RcvAvail0_LSB 0x0
410#define QIB_7322_IntStatus_RcvAvail0_MSB 0x0
411#define QIB_7322_IntStatus_RcvAvail0_RMASK 0x1
412
413#define QIB_7322_IntClear_OFFS 0x78
414#define QIB_7322_IntClear_DEF 0x0000000000000000
415#define QIB_7322_IntClear_SDmaIntClear_1_LSB 0x3F
416#define QIB_7322_IntClear_SDmaIntClear_1_MSB 0x3F
417#define QIB_7322_IntClear_SDmaIntClear_1_RMASK 0x1
418#define QIB_7322_IntClear_SDmaIntClear_0_LSB 0x3E
419#define QIB_7322_IntClear_SDmaIntClear_0_MSB 0x3E
420#define QIB_7322_IntClear_SDmaIntClear_0_RMASK 0x1
421#define QIB_7322_IntClear_SDmaProgressIntClear_1_LSB 0x3D
422#define QIB_7322_IntClear_SDmaProgressIntClear_1_MSB 0x3D
423#define QIB_7322_IntClear_SDmaProgressIntClear_1_RMASK 0x1
424#define QIB_7322_IntClear_SDmaProgressIntClear_0_LSB 0x3C
425#define QIB_7322_IntClear_SDmaProgressIntClear_0_MSB 0x3C
426#define QIB_7322_IntClear_SDmaProgressIntClear_0_RMASK 0x1
427#define QIB_7322_IntClear_SDmaIdleIntClear_1_LSB 0x3B
428#define QIB_7322_IntClear_SDmaIdleIntClear_1_MSB 0x3B
429#define QIB_7322_IntClear_SDmaIdleIntClear_1_RMASK 0x1
430#define QIB_7322_IntClear_SDmaIdleIntClear_0_LSB 0x3A
431#define QIB_7322_IntClear_SDmaIdleIntClear_0_MSB 0x3A
432#define QIB_7322_IntClear_SDmaIdleIntClear_0_RMASK 0x1
433#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_LSB 0x39
434#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_MSB 0x39
435#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_RMASK 0x1
436#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_LSB 0x38
437#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_MSB 0x38
438#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_RMASK 0x1
439#define QIB_7322_IntClear_RcvUrg17IntClear_LSB 0x31
440#define QIB_7322_IntClear_RcvUrg17IntClear_MSB 0x31
441#define QIB_7322_IntClear_RcvUrg17IntClear_RMASK 0x1
442#define QIB_7322_IntClear_RcvUrg16IntClear_LSB 0x30
443#define QIB_7322_IntClear_RcvUrg16IntClear_MSB 0x30
444#define QIB_7322_IntClear_RcvUrg16IntClear_RMASK 0x1
445#define QIB_7322_IntClear_RcvUrg15IntClear_LSB 0x2F
446#define QIB_7322_IntClear_RcvUrg15IntClear_MSB 0x2F
447#define QIB_7322_IntClear_RcvUrg15IntClear_RMASK 0x1
448#define QIB_7322_IntClear_RcvUrg14IntClear_LSB 0x2E
449#define QIB_7322_IntClear_RcvUrg14IntClear_MSB 0x2E
450#define QIB_7322_IntClear_RcvUrg14IntClear_RMASK 0x1
451#define QIB_7322_IntClear_RcvUrg13IntClear_LSB 0x2D
452#define QIB_7322_IntClear_RcvUrg13IntClear_MSB 0x2D
453#define QIB_7322_IntClear_RcvUrg13IntClear_RMASK 0x1
454#define QIB_7322_IntClear_RcvUrg12IntClear_LSB 0x2C
455#define QIB_7322_IntClear_RcvUrg12IntClear_MSB 0x2C
456#define QIB_7322_IntClear_RcvUrg12IntClear_RMASK 0x1
457#define QIB_7322_IntClear_RcvUrg11IntClear_LSB 0x2B
458#define QIB_7322_IntClear_RcvUrg11IntClear_MSB 0x2B
459#define QIB_7322_IntClear_RcvUrg11IntClear_RMASK 0x1
460#define QIB_7322_IntClear_RcvUrg10IntClear_LSB 0x2A
461#define QIB_7322_IntClear_RcvUrg10IntClear_MSB 0x2A
462#define QIB_7322_IntClear_RcvUrg10IntClear_RMASK 0x1
463#define QIB_7322_IntClear_RcvUrg9IntClear_LSB 0x29
464#define QIB_7322_IntClear_RcvUrg9IntClear_MSB 0x29
465#define QIB_7322_IntClear_RcvUrg9IntClear_RMASK 0x1
466#define QIB_7322_IntClear_RcvUrg8IntClear_LSB 0x28
467#define QIB_7322_IntClear_RcvUrg8IntClear_MSB 0x28
468#define QIB_7322_IntClear_RcvUrg8IntClear_RMASK 0x1
469#define QIB_7322_IntClear_RcvUrg7IntClear_LSB 0x27
470#define QIB_7322_IntClear_RcvUrg7IntClear_MSB 0x27
471#define QIB_7322_IntClear_RcvUrg7IntClear_RMASK 0x1
472#define QIB_7322_IntClear_RcvUrg6IntClear_LSB 0x26
473#define QIB_7322_IntClear_RcvUrg6IntClear_MSB 0x26
474#define QIB_7322_IntClear_RcvUrg6IntClear_RMASK 0x1
475#define QIB_7322_IntClear_RcvUrg5IntClear_LSB 0x25
476#define QIB_7322_IntClear_RcvUrg5IntClear_MSB 0x25
477#define QIB_7322_IntClear_RcvUrg5IntClear_RMASK 0x1
478#define QIB_7322_IntClear_RcvUrg4IntClear_LSB 0x24
479#define QIB_7322_IntClear_RcvUrg4IntClear_MSB 0x24
480#define QIB_7322_IntClear_RcvUrg4IntClear_RMASK 0x1
481#define QIB_7322_IntClear_RcvUrg3IntClear_LSB 0x23
482#define QIB_7322_IntClear_RcvUrg3IntClear_MSB 0x23
483#define QIB_7322_IntClear_RcvUrg3IntClear_RMASK 0x1
484#define QIB_7322_IntClear_RcvUrg2IntClear_LSB 0x22
485#define QIB_7322_IntClear_RcvUrg2IntClear_MSB 0x22
486#define QIB_7322_IntClear_RcvUrg2IntClear_RMASK 0x1
487#define QIB_7322_IntClear_RcvUrg1IntClear_LSB 0x21
488#define QIB_7322_IntClear_RcvUrg1IntClear_MSB 0x21
489#define QIB_7322_IntClear_RcvUrg1IntClear_RMASK 0x1
490#define QIB_7322_IntClear_RcvUrg0IntClear_LSB 0x20
491#define QIB_7322_IntClear_RcvUrg0IntClear_MSB 0x20
492#define QIB_7322_IntClear_RcvUrg0IntClear_RMASK 0x1
493#define QIB_7322_IntClear_ErrIntClear_1_LSB 0x1F
494#define QIB_7322_IntClear_ErrIntClear_1_MSB 0x1F
495#define QIB_7322_IntClear_ErrIntClear_1_RMASK 0x1
496#define QIB_7322_IntClear_ErrIntClear_0_LSB 0x1E
497#define QIB_7322_IntClear_ErrIntClear_0_MSB 0x1E
498#define QIB_7322_IntClear_ErrIntClear_0_RMASK 0x1
499#define QIB_7322_IntClear_ErrIntClear_LSB 0x1D
500#define QIB_7322_IntClear_ErrIntClear_MSB 0x1D
501#define QIB_7322_IntClear_ErrIntClear_RMASK 0x1
502#define QIB_7322_IntClear_AssertGPIOIntClear_LSB 0x1C
503#define QIB_7322_IntClear_AssertGPIOIntClear_MSB 0x1C
504#define QIB_7322_IntClear_AssertGPIOIntClear_RMASK 0x1
505#define QIB_7322_IntClear_SendDoneIntClear_1_LSB 0x19
506#define QIB_7322_IntClear_SendDoneIntClear_1_MSB 0x19
507#define QIB_7322_IntClear_SendDoneIntClear_1_RMASK 0x1
508#define QIB_7322_IntClear_SendDoneIntClear_0_LSB 0x18
509#define QIB_7322_IntClear_SendDoneIntClear_0_MSB 0x18
510#define QIB_7322_IntClear_SendDoneIntClear_0_RMASK 0x1
511#define QIB_7322_IntClear_SendBufAvailIntClear_LSB 0x17
512#define QIB_7322_IntClear_SendBufAvailIntClear_MSB 0x17
513#define QIB_7322_IntClear_SendBufAvailIntClear_RMASK 0x1
514#define QIB_7322_IntClear_RcvAvail17IntClear_LSB 0x11
515#define QIB_7322_IntClear_RcvAvail17IntClear_MSB 0x11
516#define QIB_7322_IntClear_RcvAvail17IntClear_RMASK 0x1
517#define QIB_7322_IntClear_RcvAvail16IntClear_LSB 0x10
518#define QIB_7322_IntClear_RcvAvail16IntClear_MSB 0x10
519#define QIB_7322_IntClear_RcvAvail16IntClear_RMASK 0x1
520#define QIB_7322_IntClear_RcvAvail15IntClear_LSB 0xF
521#define QIB_7322_IntClear_RcvAvail15IntClear_MSB 0xF
522#define QIB_7322_IntClear_RcvAvail15IntClear_RMASK 0x1
523#define QIB_7322_IntClear_RcvAvail14IntClear_LSB 0xE
524#define QIB_7322_IntClear_RcvAvail14IntClear_MSB 0xE
525#define QIB_7322_IntClear_RcvAvail14IntClear_RMASK 0x1
526#define QIB_7322_IntClear_RcvAvail13IntClear_LSB 0xD
527#define QIB_7322_IntClear_RcvAvail13IntClear_MSB 0xD
528#define QIB_7322_IntClear_RcvAvail13IntClear_RMASK 0x1
529#define QIB_7322_IntClear_RcvAvail12IntClear_LSB 0xC
530#define QIB_7322_IntClear_RcvAvail12IntClear_MSB 0xC
531#define QIB_7322_IntClear_RcvAvail12IntClear_RMASK 0x1
532#define QIB_7322_IntClear_RcvAvail11IntClear_LSB 0xB
533#define QIB_7322_IntClear_RcvAvail11IntClear_MSB 0xB
534#define QIB_7322_IntClear_RcvAvail11IntClear_RMASK 0x1
535#define QIB_7322_IntClear_RcvAvail10IntClear_LSB 0xA
536#define QIB_7322_IntClear_RcvAvail10IntClear_MSB 0xA
537#define QIB_7322_IntClear_RcvAvail10IntClear_RMASK 0x1
538#define QIB_7322_IntClear_RcvAvail9IntClear_LSB 0x9
539#define QIB_7322_IntClear_RcvAvail9IntClear_MSB 0x9
540#define QIB_7322_IntClear_RcvAvail9IntClear_RMASK 0x1
541#define QIB_7322_IntClear_RcvAvail8IntClear_LSB 0x8
542#define QIB_7322_IntClear_RcvAvail8IntClear_MSB 0x8
543#define QIB_7322_IntClear_RcvAvail8IntClear_RMASK 0x1
544#define QIB_7322_IntClear_RcvAvail7IntClear_LSB 0x7
545#define QIB_7322_IntClear_RcvAvail7IntClear_MSB 0x7
546#define QIB_7322_IntClear_RcvAvail7IntClear_RMASK 0x1
547#define QIB_7322_IntClear_RcvAvail6IntClear_LSB 0x6
548#define QIB_7322_IntClear_RcvAvail6IntClear_MSB 0x6
549#define QIB_7322_IntClear_RcvAvail6IntClear_RMASK 0x1
550#define QIB_7322_IntClear_RcvAvail5IntClear_LSB 0x5
551#define QIB_7322_IntClear_RcvAvail5IntClear_MSB 0x5
552#define QIB_7322_IntClear_RcvAvail5IntClear_RMASK 0x1
553#define QIB_7322_IntClear_RcvAvail4IntClear_LSB 0x4
554#define QIB_7322_IntClear_RcvAvail4IntClear_MSB 0x4
555#define QIB_7322_IntClear_RcvAvail4IntClear_RMASK 0x1
556#define QIB_7322_IntClear_RcvAvail3IntClear_LSB 0x3
557#define QIB_7322_IntClear_RcvAvail3IntClear_MSB 0x3
558#define QIB_7322_IntClear_RcvAvail3IntClear_RMASK 0x1
559#define QIB_7322_IntClear_RcvAvail2IntClear_LSB 0x2
560#define QIB_7322_IntClear_RcvAvail2IntClear_MSB 0x2
561#define QIB_7322_IntClear_RcvAvail2IntClear_RMASK 0x1
562#define QIB_7322_IntClear_RcvAvail1IntClear_LSB 0x1
563#define QIB_7322_IntClear_RcvAvail1IntClear_MSB 0x1
564#define QIB_7322_IntClear_RcvAvail1IntClear_RMASK 0x1
565#define QIB_7322_IntClear_RcvAvail0IntClear_LSB 0x0
566#define QIB_7322_IntClear_RcvAvail0IntClear_MSB 0x0
567#define QIB_7322_IntClear_RcvAvail0IntClear_RMASK 0x1
568
569#define QIB_7322_ErrMask_OFFS 0x80
570#define QIB_7322_ErrMask_DEF 0x0000000000000000
571#define QIB_7322_ErrMask_ResetNegatedMask_LSB 0x3F
572#define QIB_7322_ErrMask_ResetNegatedMask_MSB 0x3F
573#define QIB_7322_ErrMask_ResetNegatedMask_RMASK 0x1
574#define QIB_7322_ErrMask_HardwareErrMask_LSB 0x3E
575#define QIB_7322_ErrMask_HardwareErrMask_MSB 0x3E
576#define QIB_7322_ErrMask_HardwareErrMask_RMASK 0x1
577#define QIB_7322_ErrMask_InvalidAddrErrMask_LSB 0x3D
578#define QIB_7322_ErrMask_InvalidAddrErrMask_MSB 0x3D
579#define QIB_7322_ErrMask_InvalidAddrErrMask_RMASK 0x1
580#define QIB_7322_ErrMask_SDmaVL15ErrMask_LSB 0x38
581#define QIB_7322_ErrMask_SDmaVL15ErrMask_MSB 0x38
582#define QIB_7322_ErrMask_SDmaVL15ErrMask_RMASK 0x1
583#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_LSB 0x37
584#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_MSB 0x37
585#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_RMASK 0x1
586#define QIB_7322_ErrMask_InvalidEEPCmdMask_LSB 0x35
587#define QIB_7322_ErrMask_InvalidEEPCmdMask_MSB 0x35
588#define QIB_7322_ErrMask_InvalidEEPCmdMask_RMASK 0x1
589#define QIB_7322_ErrMask_RcvContextShareErrMask_LSB 0x34
590#define QIB_7322_ErrMask_RcvContextShareErrMask_MSB 0x34
591#define QIB_7322_ErrMask_RcvContextShareErrMask_RMASK 0x1
592#define QIB_7322_ErrMask_SendVLMismatchErrMask_LSB 0x24
593#define QIB_7322_ErrMask_SendVLMismatchErrMask_MSB 0x24
594#define QIB_7322_ErrMask_SendVLMismatchErrMask_RMASK 0x1
595#define QIB_7322_ErrMask_SendArmLaunchErrMask_LSB 0x23
596#define QIB_7322_ErrMask_SendArmLaunchErrMask_MSB 0x23
597#define QIB_7322_ErrMask_SendArmLaunchErrMask_RMASK 0x1
598#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B
599#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_MSB 0x1B
600#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1
601#define QIB_7322_ErrMask_SDmaWrongPortErrMask_LSB 0x1A
602#define QIB_7322_ErrMask_SDmaWrongPortErrMask_MSB 0x1A
603#define QIB_7322_ErrMask_SDmaWrongPortErrMask_RMASK 0x1
604#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_LSB 0x19
605#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_MSB 0x19
606#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_RMASK 0x1
607#define QIB_7322_ErrMask_RcvHdrFullErrMask_LSB 0xD
608#define QIB_7322_ErrMask_RcvHdrFullErrMask_MSB 0xD
609#define QIB_7322_ErrMask_RcvHdrFullErrMask_RMASK 0x1
610#define QIB_7322_ErrMask_RcvEgrFullErrMask_LSB 0xC
611#define QIB_7322_ErrMask_RcvEgrFullErrMask_MSB 0xC
612#define QIB_7322_ErrMask_RcvEgrFullErrMask_RMASK 0x1
613
614#define QIB_7322_ErrStatus_OFFS 0x88
615#define QIB_7322_ErrStatus_DEF 0x0000000000000000
616#define QIB_7322_ErrStatus_ResetNegated_LSB 0x3F
617#define QIB_7322_ErrStatus_ResetNegated_MSB 0x3F
618#define QIB_7322_ErrStatus_ResetNegated_RMASK 0x1
619#define QIB_7322_ErrStatus_HardwareErr_LSB 0x3E
620#define QIB_7322_ErrStatus_HardwareErr_MSB 0x3E
621#define QIB_7322_ErrStatus_HardwareErr_RMASK 0x1
622#define QIB_7322_ErrStatus_InvalidAddrErr_LSB 0x3D
623#define QIB_7322_ErrStatus_InvalidAddrErr_MSB 0x3D
624#define QIB_7322_ErrStatus_InvalidAddrErr_RMASK 0x1
625#define QIB_7322_ErrStatus_SDmaVL15Err_LSB 0x38
626#define QIB_7322_ErrStatus_SDmaVL15Err_MSB 0x38
627#define QIB_7322_ErrStatus_SDmaVL15Err_RMASK 0x1
628#define QIB_7322_ErrStatus_SBufVL15MisUseErr_LSB 0x37
629#define QIB_7322_ErrStatus_SBufVL15MisUseErr_MSB 0x37
630#define QIB_7322_ErrStatus_SBufVL15MisUseErr_RMASK 0x1
631#define QIB_7322_ErrStatus_InvalidEEPCmdErr_LSB 0x35
632#define QIB_7322_ErrStatus_InvalidEEPCmdErr_MSB 0x35
633#define QIB_7322_ErrStatus_InvalidEEPCmdErr_RMASK 0x1
634#define QIB_7322_ErrStatus_RcvContextShareErr_LSB 0x34
635#define QIB_7322_ErrStatus_RcvContextShareErr_MSB 0x34
636#define QIB_7322_ErrStatus_RcvContextShareErr_RMASK 0x1
637#define QIB_7322_ErrStatus_SendVLMismatchErr_LSB 0x24
638#define QIB_7322_ErrStatus_SendVLMismatchErr_MSB 0x24
639#define QIB_7322_ErrStatus_SendVLMismatchErr_RMASK 0x1
640#define QIB_7322_ErrStatus_SendArmLaunchErr_LSB 0x23
641#define QIB_7322_ErrStatus_SendArmLaunchErr_MSB 0x23
642#define QIB_7322_ErrStatus_SendArmLaunchErr_RMASK 0x1
643#define QIB_7322_ErrStatus_SendSpecialTriggerErr_LSB 0x1B
644#define QIB_7322_ErrStatus_SendSpecialTriggerErr_MSB 0x1B
645#define QIB_7322_ErrStatus_SendSpecialTriggerErr_RMASK 0x1
646#define QIB_7322_ErrStatus_SDmaWrongPortErr_LSB 0x1A
647#define QIB_7322_ErrStatus_SDmaWrongPortErr_MSB 0x1A
648#define QIB_7322_ErrStatus_SDmaWrongPortErr_RMASK 0x1
649#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_LSB 0x19
650#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_MSB 0x19
651#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_RMASK 0x1
652#define QIB_7322_ErrStatus_RcvHdrFullErr_LSB 0xD
653#define QIB_7322_ErrStatus_RcvHdrFullErr_MSB 0xD
654#define QIB_7322_ErrStatus_RcvHdrFullErr_RMASK 0x1
655#define QIB_7322_ErrStatus_RcvEgrFullErr_LSB 0xC
656#define QIB_7322_ErrStatus_RcvEgrFullErr_MSB 0xC
657#define QIB_7322_ErrStatus_RcvEgrFullErr_RMASK 0x1
658
659#define QIB_7322_ErrClear_OFFS 0x90
660#define QIB_7322_ErrClear_DEF 0x0000000000000000
661#define QIB_7322_ErrClear_ResetNegatedClear_LSB 0x3F
662#define QIB_7322_ErrClear_ResetNegatedClear_MSB 0x3F
663#define QIB_7322_ErrClear_ResetNegatedClear_RMASK 0x1
664#define QIB_7322_ErrClear_HardwareErrClear_LSB 0x3E
665#define QIB_7322_ErrClear_HardwareErrClear_MSB 0x3E
666#define QIB_7322_ErrClear_HardwareErrClear_RMASK 0x1
667#define QIB_7322_ErrClear_InvalidAddrErrClear_LSB 0x3D
668#define QIB_7322_ErrClear_InvalidAddrErrClear_MSB 0x3D
669#define QIB_7322_ErrClear_InvalidAddrErrClear_RMASK 0x1
670#define QIB_7322_ErrClear_SDmaVL15ErrClear_LSB 0x38
671#define QIB_7322_ErrClear_SDmaVL15ErrClear_MSB 0x38
672#define QIB_7322_ErrClear_SDmaVL15ErrClear_RMASK 0x1
673#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_LSB 0x37
674#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_MSB 0x37
675#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_RMASK 0x1
676#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_LSB 0x35
677#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_MSB 0x35
678#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1
679#define QIB_7322_ErrClear_RcvContextShareErrClear_LSB 0x34
680#define QIB_7322_ErrClear_RcvContextShareErrClear_MSB 0x34
681#define QIB_7322_ErrClear_RcvContextShareErrClear_RMASK 0x1
682#define QIB_7322_ErrClear_SendVLMismatchErrMask_LSB 0x24
683#define QIB_7322_ErrClear_SendVLMismatchErrMask_MSB 0x24
684#define QIB_7322_ErrClear_SendVLMismatchErrMask_RMASK 0x1
685#define QIB_7322_ErrClear_SendArmLaunchErrClear_LSB 0x23
686#define QIB_7322_ErrClear_SendArmLaunchErrClear_MSB 0x23
687#define QIB_7322_ErrClear_SendArmLaunchErrClear_RMASK 0x1
688#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B
689#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_MSB 0x1B
690#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1
691#define QIB_7322_ErrClear_SDmaWrongPortErrClear_LSB 0x1A
692#define QIB_7322_ErrClear_SDmaWrongPortErrClear_MSB 0x1A
693#define QIB_7322_ErrClear_SDmaWrongPortErrClear_RMASK 0x1
694#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_LSB 0x19
695#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_MSB 0x19
696#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_RMASK 0x1
697#define QIB_7322_ErrClear_RcvHdrFullErrClear_LSB 0xD
698#define QIB_7322_ErrClear_RcvHdrFullErrClear_MSB 0xD
699#define QIB_7322_ErrClear_RcvHdrFullErrClear_RMASK 0x1
700#define QIB_7322_ErrClear_RcvEgrFullErrClear_LSB 0xC
701#define QIB_7322_ErrClear_RcvEgrFullErrClear_MSB 0xC
702#define QIB_7322_ErrClear_RcvEgrFullErrClear_RMASK 0x1
703
704#define QIB_7322_HwErrMask_OFFS 0x98
705#define QIB_7322_HwErrMask_DEF 0x0000000000000000
706#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_LSB 0x3F
707#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_MSB 0x3F
708#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_RMASK 0x1
709#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_LSB 0x3E
710#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_MSB 0x3E
711#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_RMASK 0x1
712#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_LSB 0x37
713#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_MSB 0x37
714#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_RMASK 0x1
715#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
716#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_MSB 0x36
717#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
718#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_LSB 0x35
719#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_MSB 0x35
720#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_RMASK 0x1
721#define QIB_7322_HwErrMask_MemoryErrMask_LSB 0x30
722#define QIB_7322_HwErrMask_MemoryErrMask_MSB 0x30
723#define QIB_7322_HwErrMask_MemoryErrMask_RMASK 0x1
724#define QIB_7322_HwErrMask_pcie_phy_txParityErr_LSB 0x22
725#define QIB_7322_HwErrMask_pcie_phy_txParityErr_MSB 0x22
726#define QIB_7322_HwErrMask_pcie_phy_txParityErr_RMASK 0x1
727#define QIB_7322_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
728#define QIB_7322_HwErrMask_PCIeBusParityErrMask_MSB 0x21
729#define QIB_7322_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
730#define QIB_7322_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
731#define QIB_7322_HwErrMask_PcieCplTimeoutMask_MSB 0x1E
732#define QIB_7322_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
733#define QIB_7322_HwErrMask_PciePoisonedTLPMask_LSB 0x1D
734#define QIB_7322_HwErrMask_PciePoisonedTLPMask_MSB 0x1D
735#define QIB_7322_HwErrMask_PciePoisonedTLPMask_RMASK 0x1
736#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_LSB 0x1C
737#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_MSB 0x1C
738#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_RMASK 0x1
739#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_LSB 0x1B
740#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_MSB 0x1B
741#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_RMASK 0x1
742#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_LSB 0xF
743#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_MSB 0xF
744#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_RMASK 0x1
745#define QIB_7322_HwErrMask_statusValidNoEopMask_1_LSB 0xE
746#define QIB_7322_HwErrMask_statusValidNoEopMask_1_MSB 0xE
747#define QIB_7322_HwErrMask_statusValidNoEopMask_1_RMASK 0x1
748#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_LSB 0xD
749#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_MSB 0xD
750#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_RMASK 0x1
751#define QIB_7322_HwErrMask_statusValidNoEopMask_0_LSB 0xC
752#define QIB_7322_HwErrMask_statusValidNoEopMask_0_MSB 0xC
753#define QIB_7322_HwErrMask_statusValidNoEopMask_0_RMASK 0x1
754#define QIB_7322_HwErrMask_LATriggeredMask_LSB 0xB
755#define QIB_7322_HwErrMask_LATriggeredMask_MSB 0xB
756#define QIB_7322_HwErrMask_LATriggeredMask_RMASK 0x1
757
758#define QIB_7322_HwErrStatus_OFFS 0xA0
759#define QIB_7322_HwErrStatus_DEF 0x0000000000000000
760#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_LSB 0x3F
761#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_MSB 0x3F
762#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_RMASK 0x1
763#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_LSB 0x3E
764#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_MSB 0x3E
765#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_RMASK 0x1
766#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_LSB 0x37
767#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_MSB 0x37
768#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_RMASK 0x1
769#define QIB_7322_HwErrStatus_PowerOnBISTFailed_LSB 0x36
770#define QIB_7322_HwErrStatus_PowerOnBISTFailed_MSB 0x36
771#define QIB_7322_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
772#define QIB_7322_HwErrStatus_TempsenseTholdReached_LSB 0x35
773#define QIB_7322_HwErrStatus_TempsenseTholdReached_MSB 0x35
774#define QIB_7322_HwErrStatus_TempsenseTholdReached_RMASK 0x1
775#define QIB_7322_HwErrStatus_MemoryErr_LSB 0x30
776#define QIB_7322_HwErrStatus_MemoryErr_MSB 0x30
777#define QIB_7322_HwErrStatus_MemoryErr_RMASK 0x1
778#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_LSB 0x22
779#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_MSB 0x22
780#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_RMASK 0x1
781#define QIB_7322_HwErrStatus_PCIeBusParity_LSB 0x1F
782#define QIB_7322_HwErrStatus_PCIeBusParity_MSB 0x21
783#define QIB_7322_HwErrStatus_PCIeBusParity_RMASK 0x7
784#define QIB_7322_HwErrStatus_PcieCplTimeout_LSB 0x1E
785#define QIB_7322_HwErrStatus_PcieCplTimeout_MSB 0x1E
786#define QIB_7322_HwErrStatus_PcieCplTimeout_RMASK 0x1
787#define QIB_7322_HwErrStatus_PciePoisonedTLP_LSB 0x1D
788#define QIB_7322_HwErrStatus_PciePoisonedTLP_MSB 0x1D
789#define QIB_7322_HwErrStatus_PciePoisonedTLP_RMASK 0x1
790#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_LSB 0x1C
791#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_MSB 0x1C
792#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_RMASK 0x1
793#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_LSB 0x1B
794#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_MSB 0x1B
795#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_RMASK 0x1
796#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_LSB 0xF
797#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_MSB 0xF
798#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_RMASK 0x1
799#define QIB_7322_HwErrStatus_statusValidNoEop_1_LSB 0xE
800#define QIB_7322_HwErrStatus_statusValidNoEop_1_MSB 0xE
801#define QIB_7322_HwErrStatus_statusValidNoEop_1_RMASK 0x1
802#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_LSB 0xD
803#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_MSB 0xD
804#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_RMASK 0x1
805#define QIB_7322_HwErrStatus_statusValidNoEop_0_LSB 0xC
806#define QIB_7322_HwErrStatus_statusValidNoEop_0_MSB 0xC
807#define QIB_7322_HwErrStatus_statusValidNoEop_0_RMASK 0x1
808#define QIB_7322_HwErrStatus_LATriggered_LSB 0xB
809#define QIB_7322_HwErrStatus_LATriggered_MSB 0xB
810#define QIB_7322_HwErrStatus_LATriggered_RMASK 0x1
811
812#define QIB_7322_HwErrClear_OFFS 0xA8
813#define QIB_7322_HwErrClear_DEF 0x0000000000000000
814#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_LSB 0x3F
815#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_MSB 0x3F
816#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_RMASK 0x1
817#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_LSB 0x3E
818#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_MSB 0x3E
819#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_RMASK 0x1
820#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_LSB 0x37
821#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_MSB 0x37
822#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_RMASK 0x1
823#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
824#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_MSB 0x36
825#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
826#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_LSB 0x35
827#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_MSB 0x35
828#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_RMASK 0x1
829#define QIB_7322_HwErrClear_MemoryErrClear_LSB 0x30
830#define QIB_7322_HwErrClear_MemoryErrClear_MSB 0x30
831#define QIB_7322_HwErrClear_MemoryErrClear_RMASK 0x1
832#define QIB_7322_HwErrClear_pcie_phy_txParityErr_LSB 0x22
833#define QIB_7322_HwErrClear_pcie_phy_txParityErr_MSB 0x22
834#define QIB_7322_HwErrClear_pcie_phy_txParityErr_RMASK 0x1
835#define QIB_7322_HwErrClear_PCIeBusParityClear_LSB 0x1F
836#define QIB_7322_HwErrClear_PCIeBusParityClear_MSB 0x21
837#define QIB_7322_HwErrClear_PCIeBusParityClear_RMASK 0x7
838#define QIB_7322_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
839#define QIB_7322_HwErrClear_PcieCplTimeoutClear_MSB 0x1E
840#define QIB_7322_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
841#define QIB_7322_HwErrClear_PciePoisonedTLPClear_LSB 0x1D
842#define QIB_7322_HwErrClear_PciePoisonedTLPClear_MSB 0x1D
843#define QIB_7322_HwErrClear_PciePoisonedTLPClear_RMASK 0x1
844#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_LSB 0x1C
845#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_MSB 0x1C
846#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_RMASK 0x1
847#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_LSB 0x1B
848#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_MSB 0x1B
849#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_RMASK 0x1
850#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_LSB 0xF
851#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_MSB 0xF
852#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_RMASK 0x1
853#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_LSB 0xE
854#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_MSB 0xE
855#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_RMASK 0x1
856#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_LSB 0xD
857#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_MSB 0xD
858#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_RMASK 0x1
859#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_LSB 0xC
860#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_MSB 0xC
861#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_RMASK 0x1
862#define QIB_7322_HwErrClear_LATriggeredClear_LSB 0xB
863#define QIB_7322_HwErrClear_LATriggeredClear_MSB 0xB
864#define QIB_7322_HwErrClear_LATriggeredClear_RMASK 0x1
865
866#define QIB_7322_HwDiagCtrl_OFFS 0xB0
867#define QIB_7322_HwDiagCtrl_DEF 0x0000000000000000
868#define QIB_7322_HwDiagCtrl_Diagnostic_LSB 0x3F
869#define QIB_7322_HwDiagCtrl_Diagnostic_MSB 0x3F
870#define QIB_7322_HwDiagCtrl_Diagnostic_RMASK 0x1
871#define QIB_7322_HwDiagCtrl_CounterWrEnable_LSB 0x3D
872#define QIB_7322_HwDiagCtrl_CounterWrEnable_MSB 0x3D
873#define QIB_7322_HwDiagCtrl_CounterWrEnable_RMASK 0x1
874#define QIB_7322_HwDiagCtrl_CounterDisable_LSB 0x3C
875#define QIB_7322_HwDiagCtrl_CounterDisable_MSB 0x3C
876#define QIB_7322_HwDiagCtrl_CounterDisable_RMASK 0x1
877#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
878#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_MSB 0x22
879#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
880#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_LSB 0xF
881#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_MSB 0xF
882#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_RMASK 0x1
883#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_LSB 0xE
884#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_MSB 0xE
885#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_RMASK 0x1
886#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_LSB 0xD
887#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_MSB 0xD
888#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_RMASK 0x1
889#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_LSB 0xC
890#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_MSB 0xC
891#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_RMASK 0x1
892
893#define QIB_7322_EXTStatus_OFFS 0xC0
894#define QIB_7322_EXTStatus_DEF 0x000000000000X000
895#define QIB_7322_EXTStatus_GPIOIn_LSB 0x30
896#define QIB_7322_EXTStatus_GPIOIn_MSB 0x3F
897#define QIB_7322_EXTStatus_GPIOIn_RMASK 0xFFFF
898#define QIB_7322_EXTStatus_MemBISTDisabled_LSB 0xF
899#define QIB_7322_EXTStatus_MemBISTDisabled_MSB 0xF
900#define QIB_7322_EXTStatus_MemBISTDisabled_RMASK 0x1
901#define QIB_7322_EXTStatus_MemBISTEndTest_LSB 0xE
902#define QIB_7322_EXTStatus_MemBISTEndTest_MSB 0xE
903#define QIB_7322_EXTStatus_MemBISTEndTest_RMASK 0x1
904
905#define QIB_7322_EXTCtrl_OFFS 0xC8
906#define QIB_7322_EXTCtrl_DEF 0x0000000000000000
907#define QIB_7322_EXTCtrl_GPIOOe_LSB 0x30
908#define QIB_7322_EXTCtrl_GPIOOe_MSB 0x3F
909#define QIB_7322_EXTCtrl_GPIOOe_RMASK 0xFFFF
910#define QIB_7322_EXTCtrl_GPIOInvert_LSB 0x20
911#define QIB_7322_EXTCtrl_GPIOInvert_MSB 0x2F
912#define QIB_7322_EXTCtrl_GPIOInvert_RMASK 0xFFFF
913#define QIB_7322_EXTCtrl_LEDPort1GreenOn_LSB 0x3
914#define QIB_7322_EXTCtrl_LEDPort1GreenOn_MSB 0x3
915#define QIB_7322_EXTCtrl_LEDPort1GreenOn_RMASK 0x1
916#define QIB_7322_EXTCtrl_LEDPort1YellowOn_LSB 0x2
917#define QIB_7322_EXTCtrl_LEDPort1YellowOn_MSB 0x2
918#define QIB_7322_EXTCtrl_LEDPort1YellowOn_RMASK 0x1
919#define QIB_7322_EXTCtrl_LEDPort0GreenOn_LSB 0x1
920#define QIB_7322_EXTCtrl_LEDPort0GreenOn_MSB 0x1
921#define QIB_7322_EXTCtrl_LEDPort0GreenOn_RMASK 0x1
922#define QIB_7322_EXTCtrl_LEDPort0YellowOn_LSB 0x0
923#define QIB_7322_EXTCtrl_LEDPort0YellowOn_MSB 0x0
924#define QIB_7322_EXTCtrl_LEDPort0YellowOn_RMASK 0x1
925
926#define QIB_7322_GPIOOut_OFFS 0xE0
927#define QIB_7322_GPIOOut_DEF 0x0000000000000000
928
929#define QIB_7322_GPIOMask_OFFS 0xE8
930#define QIB_7322_GPIOMask_DEF 0x0000000000000000
931
932#define QIB_7322_GPIOStatus_OFFS 0xF0
933#define QIB_7322_GPIOStatus_DEF 0x0000000000000000
934
935#define QIB_7322_GPIOClear_OFFS 0xF8
936#define QIB_7322_GPIOClear_DEF 0x0000000000000000
937
938#define QIB_7322_RcvCtrl_OFFS 0x100
939#define QIB_7322_RcvCtrl_DEF 0x0000000000000000
940#define QIB_7322_RcvCtrl_TidReDirect_LSB 0x30
941#define QIB_7322_RcvCtrl_TidReDirect_MSB 0x3F
942#define QIB_7322_RcvCtrl_TidReDirect_RMASK 0xFFFF
943#define QIB_7322_RcvCtrl_TailUpd_LSB 0x2F
944#define QIB_7322_RcvCtrl_TailUpd_MSB 0x2F
945#define QIB_7322_RcvCtrl_TailUpd_RMASK 0x1
946#define QIB_7322_RcvCtrl_XrcTypeCode_LSB 0x2C
947#define QIB_7322_RcvCtrl_XrcTypeCode_MSB 0x2E
948#define QIB_7322_RcvCtrl_XrcTypeCode_RMASK 0x7
949#define QIB_7322_RcvCtrl_TidFlowEnable_LSB 0x2B
950#define QIB_7322_RcvCtrl_TidFlowEnable_MSB 0x2B
951#define QIB_7322_RcvCtrl_TidFlowEnable_RMASK 0x1
952#define QIB_7322_RcvCtrl_ContextCfg_LSB 0x29
953#define QIB_7322_RcvCtrl_ContextCfg_MSB 0x2A
954#define QIB_7322_RcvCtrl_ContextCfg_RMASK 0x3
955#define QIB_7322_RcvCtrl_IntrAvail_LSB 0x14
956#define QIB_7322_RcvCtrl_IntrAvail_MSB 0x25
957#define QIB_7322_RcvCtrl_IntrAvail_RMASK 0x3FFFF
958#define QIB_7322_RcvCtrl_dontDropRHQFull_LSB 0x0
959#define QIB_7322_RcvCtrl_dontDropRHQFull_MSB 0x11
960#define QIB_7322_RcvCtrl_dontDropRHQFull_RMASK 0x3FFFF
961
962#define QIB_7322_RcvHdrSize_OFFS 0x110
963#define QIB_7322_RcvHdrSize_DEF 0x0000000000000000
964
965#define QIB_7322_RcvHdrCnt_OFFS 0x118
966#define QIB_7322_RcvHdrCnt_DEF 0x0000000000000000
967
968#define QIB_7322_RcvHdrEntSize_OFFS 0x120
969#define QIB_7322_RcvHdrEntSize_DEF 0x0000000000000000
970
971#define QIB_7322_RcvTIDBase_OFFS 0x128
972#define QIB_7322_RcvTIDBase_DEF 0x0000000000050000
973
974#define QIB_7322_RcvTIDCnt_OFFS 0x130
975#define QIB_7322_RcvTIDCnt_DEF 0x0000000000000200
976
977#define QIB_7322_RcvEgrBase_OFFS 0x138
978#define QIB_7322_RcvEgrBase_DEF 0x0000000000014000
979
980#define QIB_7322_RcvEgrCnt_OFFS 0x140
981#define QIB_7322_RcvEgrCnt_DEF 0x0000000000001000
982
983#define QIB_7322_RcvBufBase_OFFS 0x148
984#define QIB_7322_RcvBufBase_DEF 0x0000000000080000
985
986#define QIB_7322_RcvBufSize_OFFS 0x150
987#define QIB_7322_RcvBufSize_DEF 0x0000000000005000
988
989#define QIB_7322_RxIntMemBase_OFFS 0x158
990#define QIB_7322_RxIntMemBase_DEF 0x0000000000077000
991
992#define QIB_7322_RxIntMemSize_OFFS 0x160
993#define QIB_7322_RxIntMemSize_DEF 0x0000000000007000
994
995#define QIB_7322_feature_mask_OFFS 0x190
996#define QIB_7322_feature_mask_DEF 0x00000000000000XX
997
998#define QIB_7322_active_feature_mask_OFFS 0x198
999#define QIB_7322_active_feature_mask_DEF 0x00000000000000XX
1000#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_LSB 0x5
1001#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_MSB 0x5
1002#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_RMASK 0x1
1003#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_LSB 0x4
1004#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_MSB 0x4
1005#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_RMASK 0x1
1006#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_LSB 0x3
1007#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_MSB 0x3
1008#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_RMASK 0x1
1009#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_LSB 0x2
1010#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_MSB 0x2
1011#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_RMASK 0x1
1012#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_LSB 0x1
1013#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_MSB 0x1
1014#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_RMASK 0x1
1015#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_LSB 0x0
1016#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_MSB 0x0
1017#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_RMASK 0x1
1018
1019#define QIB_7322_SendCtrl_OFFS 0x1C0
1020#define QIB_7322_SendCtrl_DEF 0x0000000000000000
1021#define QIB_7322_SendCtrl_Disarm_LSB 0x1F
1022#define QIB_7322_SendCtrl_Disarm_MSB 0x1F
1023#define QIB_7322_SendCtrl_Disarm_RMASK 0x1
1024#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_LSB 0x1D
1025#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_MSB 0x1D
1026#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_RMASK 0x1
1027#define QIB_7322_SendCtrl_AvailUpdThld_LSB 0x18
1028#define QIB_7322_SendCtrl_AvailUpdThld_MSB 0x1C
1029#define QIB_7322_SendCtrl_AvailUpdThld_RMASK 0x1F
1030#define QIB_7322_SendCtrl_DisarmSendBuf_LSB 0x10
1031#define QIB_7322_SendCtrl_DisarmSendBuf_MSB 0x17
1032#define QIB_7322_SendCtrl_DisarmSendBuf_RMASK 0xFF
1033#define QIB_7322_SendCtrl_SpecialTriggerEn_LSB 0x4
1034#define QIB_7322_SendCtrl_SpecialTriggerEn_MSB 0x4
1035#define QIB_7322_SendCtrl_SpecialTriggerEn_RMASK 0x1
1036#define QIB_7322_SendCtrl_SendBufAvailUpd_LSB 0x2
1037#define QIB_7322_SendCtrl_SendBufAvailUpd_MSB 0x2
1038#define QIB_7322_SendCtrl_SendBufAvailUpd_RMASK 0x1
1039#define QIB_7322_SendCtrl_SendIntBufAvail_LSB 0x1
1040#define QIB_7322_SendCtrl_SendIntBufAvail_MSB 0x1
1041#define QIB_7322_SendCtrl_SendIntBufAvail_RMASK 0x1
1042
1043#define QIB_7322_SendBufBase_OFFS 0x1C8
1044#define QIB_7322_SendBufBase_DEF 0x0018000000100000
1045#define QIB_7322_SendBufBase_BaseAddr_LargePIO_LSB 0x20
1046#define QIB_7322_SendBufBase_BaseAddr_LargePIO_MSB 0x34
1047#define QIB_7322_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
1048#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_LSB 0x0
1049#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_MSB 0x14
1050#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
1051
1052#define QIB_7322_SendBufSize_OFFS 0x1D0
1053#define QIB_7322_SendBufSize_DEF 0x0000108000000880
1054#define QIB_7322_SendBufSize_Size_LargePIO_LSB 0x20
1055#define QIB_7322_SendBufSize_Size_LargePIO_MSB 0x2C
1056#define QIB_7322_SendBufSize_Size_LargePIO_RMASK 0x1FFF
1057#define QIB_7322_SendBufSize_Size_SmallPIO_LSB 0x0
1058#define QIB_7322_SendBufSize_Size_SmallPIO_MSB 0xB
1059#define QIB_7322_SendBufSize_Size_SmallPIO_RMASK 0xFFF
1060
1061#define QIB_7322_SendBufCnt_OFFS 0x1D8
1062#define QIB_7322_SendBufCnt_DEF 0x0000002000000080
1063#define QIB_7322_SendBufCnt_Num_LargeBuffers_LSB 0x20
1064#define QIB_7322_SendBufCnt_Num_LargeBuffers_MSB 0x25
1065#define QIB_7322_SendBufCnt_Num_LargeBuffers_RMASK 0x3F
1066#define QIB_7322_SendBufCnt_Num_SmallBuffers_LSB 0x0
1067#define QIB_7322_SendBufCnt_Num_SmallBuffers_MSB 0x8
1068#define QIB_7322_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF
1069
1070#define QIB_7322_SendBufAvailAddr_OFFS 0x1E0
1071#define QIB_7322_SendBufAvailAddr_DEF 0x0000000000000000
1072#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6
1073#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_MSB 0x27
1074#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF
1075
1076#define QIB_7322_SendBufErr0_OFFS 0x240
1077#define QIB_7322_SendBufErr0_DEF 0x0000000000000000
1078#define QIB_7322_SendBufErr0_SendBufErr_63_0_LSB 0x0
1079#define QIB_7322_SendBufErr0_SendBufErr_63_0_MSB 0x3F
1080#define QIB_7322_SendBufErr0_SendBufErr_63_0_RMASK 0x0
1081
1082#define QIB_7322_AvailUpdCount_OFFS 0x268
1083#define QIB_7322_AvailUpdCount_DEF 0x0000000000000000
1084#define QIB_7322_AvailUpdCount_AvailUpdCount_LSB 0x0
1085#define QIB_7322_AvailUpdCount_AvailUpdCount_MSB 0x4
1086#define QIB_7322_AvailUpdCount_AvailUpdCount_RMASK 0x1F
1087
1088#define QIB_7322_RcvHdrAddr0_OFFS 0x280
1089#define QIB_7322_RcvHdrAddr0_DEF 0x0000000000000000
1090#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_LSB 0x2
1091#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_MSB 0x27
1092#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_RMASK 0x3FFFFFFFFF
1093
1094#define QIB_7322_RcvHdrTailAddr0_OFFS 0x340
1095#define QIB_7322_RcvHdrTailAddr0_DEF 0x0000000000000000
1096#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_LSB 0x2
1097#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_MSB 0x27
1098#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_RMASK 0x3FFFFFFFFF
1099
1100#define QIB_7322_ahb_access_ctrl_OFFS 0x460
1101#define QIB_7322_ahb_access_ctrl_DEF 0x0000000000000000
1102#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_LSB 0x1
1103#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_MSB 0x2
1104#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_RMASK 0x3
1105#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_LSB 0x0
1106#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_MSB 0x0
1107#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_RMASK 0x1
1108
1109#define QIB_7322_ahb_transaction_reg_OFFS 0x468
1110#define QIB_7322_ahb_transaction_reg_DEF 0x0000000080000000
1111#define QIB_7322_ahb_transaction_reg_ahb_data_LSB 0x20
1112#define QIB_7322_ahb_transaction_reg_ahb_data_MSB 0x3F
1113#define QIB_7322_ahb_transaction_reg_ahb_data_RMASK 0xFFFFFFFF
1114#define QIB_7322_ahb_transaction_reg_ahb_rdy_LSB 0x1F
1115#define QIB_7322_ahb_transaction_reg_ahb_rdy_MSB 0x1F
1116#define QIB_7322_ahb_transaction_reg_ahb_rdy_RMASK 0x1
1117#define QIB_7322_ahb_transaction_reg_ahb_req_err_LSB 0x1E
1118#define QIB_7322_ahb_transaction_reg_ahb_req_err_MSB 0x1E
1119#define QIB_7322_ahb_transaction_reg_ahb_req_err_RMASK 0x1
1120#define QIB_7322_ahb_transaction_reg_write_not_read_LSB 0x1B
1121#define QIB_7322_ahb_transaction_reg_write_not_read_MSB 0x1B
1122#define QIB_7322_ahb_transaction_reg_write_not_read_RMASK 0x1
1123#define QIB_7322_ahb_transaction_reg_ahb_address_LSB 0x10
1124#define QIB_7322_ahb_transaction_reg_ahb_address_MSB 0x1A
1125#define QIB_7322_ahb_transaction_reg_ahb_address_RMASK 0x7FF
1126
1127#define QIB_7322_SPC_JTAG_ACCESS_REG_OFFS 0x470
1128#define QIB_7322_SPC_JTAG_ACCESS_REG_DEF 0x0000000000000001
1129#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_LSB 0xA
1130#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_MSB 0xA
1131#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_RMASK 0x1
1132#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_LSB 0x5
1133#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_MSB 0x9
1134#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_RMASK 0x1F
1135#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_LSB 0x3
1136#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_MSB 0x4
1137#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_RMASK 0x3
1138#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_LSB 0x2
1139#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_MSB 0x2
1140#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_RMASK 0x1
1141#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_LSB 0x1
1142#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_MSB 0x1
1143#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_RMASK 0x1
1144#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_LSB 0x0
1145#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_MSB 0x0
1146#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_RMASK 0x1
1147
1148#define QIB_7322_SendCheckMask0_OFFS 0x4C0
1149#define QIB_7322_SendCheckMask0_DEF 0x0000000000000000
1150#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_LSB 0x0
1151#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_MSB 0x3F
1152#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_RMASK 0x0
1153
1154#define QIB_7322_SendGRHCheckMask0_OFFS 0x4E0
1155#define QIB_7322_SendGRHCheckMask0_DEF 0x0000000000000000
1156#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_LSB 0x0
1157#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_MSB 0x3F
1158#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_RMASK 0x0
1159
1160#define QIB_7322_SendIBPacketMask0_OFFS 0x500
1161#define QIB_7322_SendIBPacketMask0_DEF 0x0000000000000000
1162#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_LSB 0x0
1163#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_MSB 0x3F
1164#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_RMASK 0x0
1165
1166#define QIB_7322_IntRedirect0_OFFS 0x540
1167#define QIB_7322_IntRedirect0_DEF 0x0000000000000000
1168#define QIB_7322_IntRedirect0_vec11_LSB 0x37
1169#define QIB_7322_IntRedirect0_vec11_MSB 0x3B
1170#define QIB_7322_IntRedirect0_vec11_RMASK 0x1F
1171#define QIB_7322_IntRedirect0_vec10_LSB 0x32
1172#define QIB_7322_IntRedirect0_vec10_MSB 0x36
1173#define QIB_7322_IntRedirect0_vec10_RMASK 0x1F
1174#define QIB_7322_IntRedirect0_vec9_LSB 0x2D
1175#define QIB_7322_IntRedirect0_vec9_MSB 0x31
1176#define QIB_7322_IntRedirect0_vec9_RMASK 0x1F
1177#define QIB_7322_IntRedirect0_vec8_LSB 0x28
1178#define QIB_7322_IntRedirect0_vec8_MSB 0x2C
1179#define QIB_7322_IntRedirect0_vec8_RMASK 0x1F
1180#define QIB_7322_IntRedirect0_vec7_LSB 0x23
1181#define QIB_7322_IntRedirect0_vec7_MSB 0x27
1182#define QIB_7322_IntRedirect0_vec7_RMASK 0x1F
1183#define QIB_7322_IntRedirect0_vec6_LSB 0x1E
1184#define QIB_7322_IntRedirect0_vec6_MSB 0x22
1185#define QIB_7322_IntRedirect0_vec6_RMASK 0x1F
1186#define QIB_7322_IntRedirect0_vec5_LSB 0x19
1187#define QIB_7322_IntRedirect0_vec5_MSB 0x1D
1188#define QIB_7322_IntRedirect0_vec5_RMASK 0x1F
1189#define QIB_7322_IntRedirect0_vec4_LSB 0x14
1190#define QIB_7322_IntRedirect0_vec4_MSB 0x18
1191#define QIB_7322_IntRedirect0_vec4_RMASK 0x1F
1192#define QIB_7322_IntRedirect0_vec3_LSB 0xF
1193#define QIB_7322_IntRedirect0_vec3_MSB 0x13
1194#define QIB_7322_IntRedirect0_vec3_RMASK 0x1F
1195#define QIB_7322_IntRedirect0_vec2_LSB 0xA
1196#define QIB_7322_IntRedirect0_vec2_MSB 0xE
1197#define QIB_7322_IntRedirect0_vec2_RMASK 0x1F
1198#define QIB_7322_IntRedirect0_vec1_LSB 0x5
1199#define QIB_7322_IntRedirect0_vec1_MSB 0x9
1200#define QIB_7322_IntRedirect0_vec1_RMASK 0x1F
1201#define QIB_7322_IntRedirect0_vec0_LSB 0x0
1202#define QIB_7322_IntRedirect0_vec0_MSB 0x4
1203#define QIB_7322_IntRedirect0_vec0_RMASK 0x1F
1204
1205#define QIB_7322_Int_Granted_OFFS 0x570
1206#define QIB_7322_Int_Granted_DEF 0x0000000000000000
1207
1208#define QIB_7322_vec_clr_without_int_OFFS 0x578
1209#define QIB_7322_vec_clr_without_int_DEF 0x0000000000000000
1210
1211#define QIB_7322_DCACtrlA_OFFS 0x580
1212#define QIB_7322_DCACtrlA_DEF 0x0000000000000000
1213#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_LSB 0x4
1214#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_MSB 0x4
1215#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_RMASK 0x1
1216#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_LSB 0x3
1217#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_MSB 0x3
1218#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_RMASK 0x1
1219#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_LSB 0x2
1220#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_MSB 0x2
1221#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_RMASK 0x1
1222#define QIB_7322_DCACtrlA_EagerDCAEnable_LSB 0x1
1223#define QIB_7322_DCACtrlA_EagerDCAEnable_MSB 0x1
1224#define QIB_7322_DCACtrlA_EagerDCAEnable_RMASK 0x1
1225#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_LSB 0x0
1226#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_MSB 0x0
1227#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_RMASK 0x1
1228
1229#define QIB_7322_DCACtrlB_OFFS 0x588
1230#define QIB_7322_DCACtrlB_DEF 0x0000000000000000
1231#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_LSB 0x36
1232#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_MSB 0x3B
1233#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_RMASK 0x3F
1234#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_LSB 0x2E
1235#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_MSB 0x35
1236#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_RMASK 0xFF
1237#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_LSB 0x28
1238#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_MSB 0x2D
1239#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_RMASK 0x3F
1240#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_LSB 0x20
1241#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_MSB 0x27
1242#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_RMASK 0xFF
1243#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_LSB 0x16
1244#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_MSB 0x1B
1245#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_RMASK 0x3F
1246#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_LSB 0xE
1247#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_MSB 0x15
1248#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_RMASK 0xFF
1249#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_LSB 0x8
1250#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_MSB 0xD
1251#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_RMASK 0x3F
1252#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_LSB 0x0
1253#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_MSB 0x7
1254#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_RMASK 0xFF
1255
1256#define QIB_7322_DCACtrlC_OFFS 0x590
1257#define QIB_7322_DCACtrlC_DEF 0x0000000000000000
1258#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_LSB 0x36
1259#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_MSB 0x3B
1260#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_RMASK 0x3F
1261#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_LSB 0x2E
1262#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_MSB 0x35
1263#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_RMASK 0xFF
1264#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_LSB 0x28
1265#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_MSB 0x2D
1266#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_RMASK 0x3F
1267#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_LSB 0x20
1268#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_MSB 0x27
1269#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_RMASK 0xFF
1270#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_LSB 0x16
1271#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_MSB 0x1B
1272#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_RMASK 0x3F
1273#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_LSB 0xE
1274#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_MSB 0x15
1275#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_RMASK 0xFF
1276#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_LSB 0x8
1277#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_MSB 0xD
1278#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_RMASK 0x3F
1279#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_LSB 0x0
1280#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_MSB 0x7
1281#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_RMASK 0xFF
1282
1283#define QIB_7322_DCACtrlD_OFFS 0x598
1284#define QIB_7322_DCACtrlD_DEF 0x0000000000000000
1285#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_LSB 0x36
1286#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_MSB 0x3B
1287#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_RMASK 0x3F
1288#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_LSB 0x2E
1289#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_MSB 0x35
1290#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_RMASK 0xFF
1291#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_LSB 0x28
1292#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_MSB 0x2D
1293#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_RMASK 0x3F
1294#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_LSB 0x20
1295#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_MSB 0x27
1296#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_RMASK 0xFF
1297#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_LSB 0x16
1298#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_MSB 0x1B
1299#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_RMASK 0x3F
1300#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_LSB 0xE
1301#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_MSB 0x15
1302#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_RMASK 0xFF
1303#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_LSB 0x8
1304#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_MSB 0xD
1305#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_RMASK 0x3F
1306#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_LSB 0x0
1307#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_MSB 0x7
1308#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_RMASK 0xFF
1309
1310#define QIB_7322_DCACtrlE_OFFS 0x5A0
1311#define QIB_7322_DCACtrlE_DEF 0x0000000000000000
1312#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_LSB 0x36
1313#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_MSB 0x3B
1314#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_RMASK 0x3F
1315#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_LSB 0x2E
1316#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_MSB 0x35
1317#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_RMASK 0xFF
1318#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_LSB 0x28
1319#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_MSB 0x2D
1320#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_RMASK 0x3F
1321#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_LSB 0x20
1322#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_MSB 0x27
1323#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_RMASK 0xFF
1324#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_LSB 0x16
1325#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_MSB 0x1B
1326#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_RMASK 0x3F
1327#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_LSB 0xE
1328#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_MSB 0x15
1329#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_RMASK 0xFF
1330#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_LSB 0x8
1331#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_MSB 0xD
1332#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_RMASK 0x3F
1333#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_LSB 0x0
1334#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_MSB 0x7
1335#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_RMASK 0xFF
1336
1337#define QIB_7322_DCACtrlF_OFFS 0x5A8
1338#define QIB_7322_DCACtrlF_DEF 0x0000000000000000
1339#define QIB_7322_DCACtrlF_SendDma1DCAOPH_LSB 0x28
1340#define QIB_7322_DCACtrlF_SendDma1DCAOPH_MSB 0x2F
1341#define QIB_7322_DCACtrlF_SendDma1DCAOPH_RMASK 0xFF
1342#define QIB_7322_DCACtrlF_SendDma0DCAOPH_LSB 0x20
1343#define QIB_7322_DCACtrlF_SendDma0DCAOPH_MSB 0x27
1344#define QIB_7322_DCACtrlF_SendDma0DCAOPH_RMASK 0xFF
1345#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_LSB 0x16
1346#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_MSB 0x1B
1347#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_RMASK 0x3F
1348#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_LSB 0xE
1349#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_MSB 0x15
1350#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_RMASK 0xFF
1351#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_LSB 0x8
1352#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_MSB 0xD
1353#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_RMASK 0x3F
1354#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_LSB 0x0
1355#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_MSB 0x7
1356#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_RMASK 0xFF
1357
1358#define QIB_7322_RcvAvailTimeOut0_OFFS 0xC00
1359#define QIB_7322_RcvAvailTimeOut0_DEF 0x0000000000000000
1360#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_LSB 0x10
1361#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_MSB 0x1F
1362#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_RMASK 0xFFFF
1363#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_LSB 0x0
1364#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_MSB 0xF
1365#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_RMASK 0xFFFF
1366
1367#define QIB_7322_CntrRegBase_0_OFFS 0x1028
1368#define QIB_7322_CntrRegBase_0_DEF 0x0000000000012000
1369
1370#define QIB_7322_ErrMask_0_OFFS 0x1080
1371#define QIB_7322_ErrMask_0_DEF 0x0000000000000000
1372#define QIB_7322_ErrMask_0_IBStatusChangedMask_LSB 0x3A
1373#define QIB_7322_ErrMask_0_IBStatusChangedMask_MSB 0x3A
1374#define QIB_7322_ErrMask_0_IBStatusChangedMask_RMASK 0x1
1375#define QIB_7322_ErrMask_0_SHeadersErrMask_LSB 0x39
1376#define QIB_7322_ErrMask_0_SHeadersErrMask_MSB 0x39
1377#define QIB_7322_ErrMask_0_SHeadersErrMask_RMASK 0x1
1378#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_LSB 0x36
1379#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_MSB 0x36
1380#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_RMASK 0x1
1381#define QIB_7322_ErrMask_0_SDmaHaltErrMask_LSB 0x31
1382#define QIB_7322_ErrMask_0_SDmaHaltErrMask_MSB 0x31
1383#define QIB_7322_ErrMask_0_SDmaHaltErrMask_RMASK 0x1
1384#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_LSB 0x30
1385#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_MSB 0x30
1386#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_RMASK 0x1
1387#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_LSB 0x2F
1388#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_MSB 0x2F
1389#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_RMASK 0x1
1390#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_LSB 0x2E
1391#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_MSB 0x2E
1392#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_RMASK 0x1
1393#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_LSB 0x2D
1394#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_MSB 0x2D
1395#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_RMASK 0x1
1396#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_LSB 0x2C
1397#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_MSB 0x2C
1398#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_RMASK 0x1
1399#define QIB_7322_ErrMask_0_SDma1stDescErrMask_LSB 0x2B
1400#define QIB_7322_ErrMask_0_SDma1stDescErrMask_MSB 0x2B
1401#define QIB_7322_ErrMask_0_SDma1stDescErrMask_RMASK 0x1
1402#define QIB_7322_ErrMask_0_SDmaBaseErrMask_LSB 0x2A
1403#define QIB_7322_ErrMask_0_SDmaBaseErrMask_MSB 0x2A
1404#define QIB_7322_ErrMask_0_SDmaBaseErrMask_RMASK 0x1
1405#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_LSB 0x29
1406#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_MSB 0x29
1407#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_RMASK 0x1
1408#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_LSB 0x28
1409#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_MSB 0x28
1410#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_RMASK 0x1
1411#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_LSB 0x27
1412#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_MSB 0x27
1413#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_RMASK 0x1
1414#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_LSB 0x26
1415#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_MSB 0x26
1416#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_RMASK 0x1
1417#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_LSB 0x25
1418#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_MSB 0x25
1419#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_RMASK 0x1
1420#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_LSB 0x24
1421#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_MSB 0x24
1422#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_RMASK 0x1
1423#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_LSB 0x22
1424#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_MSB 0x22
1425#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_RMASK 0x1
1426#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_LSB 0x21
1427#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_MSB 0x21
1428#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_RMASK 0x1
1429#define QIB_7322_ErrMask_0_SendPktLenErrMask_LSB 0x20
1430#define QIB_7322_ErrMask_0_SendPktLenErrMask_MSB 0x20
1431#define QIB_7322_ErrMask_0_SendPktLenErrMask_RMASK 0x1
1432#define QIB_7322_ErrMask_0_SendUnderRunErrMask_LSB 0x1F
1433#define QIB_7322_ErrMask_0_SendUnderRunErrMask_MSB 0x1F
1434#define QIB_7322_ErrMask_0_SendUnderRunErrMask_RMASK 0x1
1435#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_LSB 0x1E
1436#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_MSB 0x1E
1437#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_RMASK 0x1
1438#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_LSB 0x1D
1439#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_MSB 0x1D
1440#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_RMASK 0x1
1441#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_LSB 0x11
1442#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_MSB 0x11
1443#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_RMASK 0x1
1444#define QIB_7322_ErrMask_0_RcvHdrErrMask_LSB 0x10
1445#define QIB_7322_ErrMask_0_RcvHdrErrMask_MSB 0x10
1446#define QIB_7322_ErrMask_0_RcvHdrErrMask_RMASK 0x1
1447#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_LSB 0xF
1448#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_MSB 0xF
1449#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_RMASK 0x1
1450#define QIB_7322_ErrMask_0_RcvBadTidErrMask_LSB 0xE
1451#define QIB_7322_ErrMask_0_RcvBadTidErrMask_MSB 0xE
1452#define QIB_7322_ErrMask_0_RcvBadTidErrMask_RMASK 0x1
1453#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_LSB 0xB
1454#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_MSB 0xB
1455#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_RMASK 0x1
1456#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_LSB 0xA
1457#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_MSB 0xA
1458#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_RMASK 0x1
1459#define QIB_7322_ErrMask_0_RcvEBPErrMask_LSB 0x9
1460#define QIB_7322_ErrMask_0_RcvEBPErrMask_MSB 0x9
1461#define QIB_7322_ErrMask_0_RcvEBPErrMask_RMASK 0x1
1462#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_LSB 0x8
1463#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_MSB 0x8
1464#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_RMASK 0x1
1465#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_LSB 0x7
1466#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_MSB 0x7
1467#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_RMASK 0x1
1468#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_LSB 0x6
1469#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_MSB 0x6
1470#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_RMASK 0x1
1471#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_LSB 0x5
1472#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_MSB 0x5
1473#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_RMASK 0x1
1474#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_LSB 0x4
1475#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_MSB 0x4
1476#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_RMASK 0x1
1477#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_LSB 0x3
1478#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_MSB 0x3
1479#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_RMASK 0x1
1480#define QIB_7322_ErrMask_0_RcvICRCErrMask_LSB 0x2
1481#define QIB_7322_ErrMask_0_RcvICRCErrMask_MSB 0x2
1482#define QIB_7322_ErrMask_0_RcvICRCErrMask_RMASK 0x1
1483#define QIB_7322_ErrMask_0_RcvVCRCErrMask_LSB 0x1
1484#define QIB_7322_ErrMask_0_RcvVCRCErrMask_MSB 0x1
1485#define QIB_7322_ErrMask_0_RcvVCRCErrMask_RMASK 0x1
1486#define QIB_7322_ErrMask_0_RcvFormatErrMask_LSB 0x0
1487#define QIB_7322_ErrMask_0_RcvFormatErrMask_MSB 0x0
1488#define QIB_7322_ErrMask_0_RcvFormatErrMask_RMASK 0x1
1489
1490#define QIB_7322_ErrStatus_0_OFFS 0x1088
1491#define QIB_7322_ErrStatus_0_DEF 0x0000000000000000
1492#define QIB_7322_ErrStatus_0_IBStatusChanged_LSB 0x3A
1493#define QIB_7322_ErrStatus_0_IBStatusChanged_MSB 0x3A
1494#define QIB_7322_ErrStatus_0_IBStatusChanged_RMASK 0x1
1495#define QIB_7322_ErrStatus_0_SHeadersErr_LSB 0x39
1496#define QIB_7322_ErrStatus_0_SHeadersErr_MSB 0x39
1497#define QIB_7322_ErrStatus_0_SHeadersErr_RMASK 0x1
1498#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_LSB 0x36
1499#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_MSB 0x36
1500#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_RMASK 0x1
1501#define QIB_7322_ErrStatus_0_SDmaHaltErr_LSB 0x31
1502#define QIB_7322_ErrStatus_0_SDmaHaltErr_MSB 0x31
1503#define QIB_7322_ErrStatus_0_SDmaHaltErr_RMASK 0x1
1504#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_LSB 0x30
1505#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_MSB 0x30
1506#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_RMASK 0x1
1507#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_LSB 0x2F
1508#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_MSB 0x2F
1509#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_RMASK 0x1
1510#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_LSB 0x2E
1511#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_MSB 0x2E
1512#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_RMASK 0x1
1513#define QIB_7322_ErrStatus_0_SDmaDwEnErr_LSB 0x2D
1514#define QIB_7322_ErrStatus_0_SDmaDwEnErr_MSB 0x2D
1515#define QIB_7322_ErrStatus_0_SDmaDwEnErr_RMASK 0x1
1516#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_LSB 0x2C
1517#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_MSB 0x2C
1518#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_RMASK 0x1
1519#define QIB_7322_ErrStatus_0_SDma1stDescErr_LSB 0x2B
1520#define QIB_7322_ErrStatus_0_SDma1stDescErr_MSB 0x2B
1521#define QIB_7322_ErrStatus_0_SDma1stDescErr_RMASK 0x1
1522#define QIB_7322_ErrStatus_0_SDmaBaseErr_LSB 0x2A
1523#define QIB_7322_ErrStatus_0_SDmaBaseErr_MSB 0x2A
1524#define QIB_7322_ErrStatus_0_SDmaBaseErr_RMASK 0x1
1525#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_LSB 0x29
1526#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_MSB 0x29
1527#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_RMASK 0x1
1528#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_LSB 0x28
1529#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_MSB 0x28
1530#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_RMASK 0x1
1531#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_LSB 0x27
1532#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_MSB 0x27
1533#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_RMASK 0x1
1534#define QIB_7322_ErrStatus_0_SendBufMisuseErr_LSB 0x26
1535#define QIB_7322_ErrStatus_0_SendBufMisuseErr_MSB 0x26
1536#define QIB_7322_ErrStatus_0_SendBufMisuseErr_RMASK 0x1
1537#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_LSB 0x25
1538#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_MSB 0x25
1539#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_RMASK 0x1
1540#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_LSB 0x24
1541#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_MSB 0x24
1542#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_RMASK 0x1
1543#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_LSB 0x22
1544#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_MSB 0x22
1545#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_RMASK 0x1
1546#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_LSB 0x21
1547#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_MSB 0x21
1548#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_RMASK 0x1
1549#define QIB_7322_ErrStatus_0_SendPktLenErr_LSB 0x20
1550#define QIB_7322_ErrStatus_0_SendPktLenErr_MSB 0x20
1551#define QIB_7322_ErrStatus_0_SendPktLenErr_RMASK 0x1
1552#define QIB_7322_ErrStatus_0_SendUnderRunErr_LSB 0x1F
1553#define QIB_7322_ErrStatus_0_SendUnderRunErr_MSB 0x1F
1554#define QIB_7322_ErrStatus_0_SendUnderRunErr_RMASK 0x1
1555#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_LSB 0x1E
1556#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_MSB 0x1E
1557#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_RMASK 0x1
1558#define QIB_7322_ErrStatus_0_SendMinPktLenErr_LSB 0x1D
1559#define QIB_7322_ErrStatus_0_SendMinPktLenErr_MSB 0x1D
1560#define QIB_7322_ErrStatus_0_SendMinPktLenErr_RMASK 0x1
1561#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_LSB 0x11
1562#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_MSB 0x11
1563#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_RMASK 0x1
1564#define QIB_7322_ErrStatus_0_RcvHdrErr_LSB 0x10
1565#define QIB_7322_ErrStatus_0_RcvHdrErr_MSB 0x10
1566#define QIB_7322_ErrStatus_0_RcvHdrErr_RMASK 0x1
1567#define QIB_7322_ErrStatus_0_RcvHdrLenErr_LSB 0xF
1568#define QIB_7322_ErrStatus_0_RcvHdrLenErr_MSB 0xF
1569#define QIB_7322_ErrStatus_0_RcvHdrLenErr_RMASK 0x1
1570#define QIB_7322_ErrStatus_0_RcvBadTidErr_LSB 0xE
1571#define QIB_7322_ErrStatus_0_RcvBadTidErr_MSB 0xE
1572#define QIB_7322_ErrStatus_0_RcvBadTidErr_RMASK 0x1
1573#define QIB_7322_ErrStatus_0_RcvBadVersionErr_LSB 0xB
1574#define QIB_7322_ErrStatus_0_RcvBadVersionErr_MSB 0xB
1575#define QIB_7322_ErrStatus_0_RcvBadVersionErr_RMASK 0x1
1576#define QIB_7322_ErrStatus_0_RcvIBFlowErr_LSB 0xA
1577#define QIB_7322_ErrStatus_0_RcvIBFlowErr_MSB 0xA
1578#define QIB_7322_ErrStatus_0_RcvIBFlowErr_RMASK 0x1
1579#define QIB_7322_ErrStatus_0_RcvEBPErr_LSB 0x9
1580#define QIB_7322_ErrStatus_0_RcvEBPErr_MSB 0x9
1581#define QIB_7322_ErrStatus_0_RcvEBPErr_RMASK 0x1
1582#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_LSB 0x8
1583#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_MSB 0x8
1584#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_RMASK 0x1
1585#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_LSB 0x7
1586#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_MSB 0x7
1587#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_RMASK 0x1
1588#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_LSB 0x6
1589#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_MSB 0x6
1590#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_RMASK 0x1
1591#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_LSB 0x5
1592#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_MSB 0x5
1593#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_RMASK 0x1
1594#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_LSB 0x4
1595#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_MSB 0x4
1596#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_RMASK 0x1
1597#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_LSB 0x3
1598#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_MSB 0x3
1599#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_RMASK 0x1
1600#define QIB_7322_ErrStatus_0_RcvICRCErr_LSB 0x2
1601#define QIB_7322_ErrStatus_0_RcvICRCErr_MSB 0x2
1602#define QIB_7322_ErrStatus_0_RcvICRCErr_RMASK 0x1
1603#define QIB_7322_ErrStatus_0_RcvVCRCErr_LSB 0x1
1604#define QIB_7322_ErrStatus_0_RcvVCRCErr_MSB 0x1
1605#define QIB_7322_ErrStatus_0_RcvVCRCErr_RMASK 0x1
1606#define QIB_7322_ErrStatus_0_RcvFormatErr_LSB 0x0
1607#define QIB_7322_ErrStatus_0_RcvFormatErr_MSB 0x0
1608#define QIB_7322_ErrStatus_0_RcvFormatErr_RMASK 0x1
1609
1610#define QIB_7322_ErrClear_0_OFFS 0x1090
1611#define QIB_7322_ErrClear_0_DEF 0x0000000000000000
1612#define QIB_7322_ErrClear_0_IBStatusChangedClear_LSB 0x3A
1613#define QIB_7322_ErrClear_0_IBStatusChangedClear_MSB 0x3A
1614#define QIB_7322_ErrClear_0_IBStatusChangedClear_RMASK 0x1
1615#define QIB_7322_ErrClear_0_SHeadersErrClear_LSB 0x39
1616#define QIB_7322_ErrClear_0_SHeadersErrClear_MSB 0x39
1617#define QIB_7322_ErrClear_0_SHeadersErrClear_RMASK 0x1
1618#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_LSB 0x36
1619#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_MSB 0x36
1620#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_RMASK 0x1
1621#define QIB_7322_ErrClear_0_SDmaHaltErrClear_LSB 0x31
1622#define QIB_7322_ErrClear_0_SDmaHaltErrClear_MSB 0x31
1623#define QIB_7322_ErrClear_0_SDmaHaltErrClear_RMASK 0x1
1624#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_LSB 0x30
1625#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_MSB 0x30
1626#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_RMASK 0x1
1627#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_LSB 0x2F
1628#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_MSB 0x2F
1629#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_RMASK 0x1
1630#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_LSB 0x2E
1631#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_MSB 0x2E
1632#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_RMASK 0x1
1633#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_LSB 0x2D
1634#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_MSB 0x2D
1635#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_RMASK 0x1
1636#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_LSB 0x2C
1637#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_MSB 0x2C
1638#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_RMASK 0x1
1639#define QIB_7322_ErrClear_0_SDma1stDescErrClear_LSB 0x2B
1640#define QIB_7322_ErrClear_0_SDma1stDescErrClear_MSB 0x2B
1641#define QIB_7322_ErrClear_0_SDma1stDescErrClear_RMASK 0x1
1642#define QIB_7322_ErrClear_0_SDmaBaseErrClear_LSB 0x2A
1643#define QIB_7322_ErrClear_0_SDmaBaseErrClear_MSB 0x2A
1644#define QIB_7322_ErrClear_0_SDmaBaseErrClear_RMASK 0x1
1645#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_LSB 0x29
1646#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_MSB 0x29
1647#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_RMASK 0x1
1648#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_LSB 0x28
1649#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_MSB 0x28
1650#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_RMASK 0x1
1651#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_LSB 0x27
1652#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_MSB 0x27
1653#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_RMASK 0x1
1654#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_LSB 0x26
1655#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_MSB 0x26
1656#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_RMASK 0x1
1657#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_LSB 0x25
1658#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_MSB 0x25
1659#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_RMASK 0x1
1660#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_LSB 0x24
1661#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_MSB 0x24
1662#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_RMASK 0x1
1663#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_LSB 0x22
1664#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_MSB 0x22
1665#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_RMASK 0x1
1666#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_LSB 0x21
1667#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_MSB 0x21
1668#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_RMASK 0x1
1669#define QIB_7322_ErrClear_0_SendPktLenErrClear_LSB 0x20
1670#define QIB_7322_ErrClear_0_SendPktLenErrClear_MSB 0x20
1671#define QIB_7322_ErrClear_0_SendPktLenErrClear_RMASK 0x1
1672#define QIB_7322_ErrClear_0_SendUnderRunErrClear_LSB 0x1F
1673#define QIB_7322_ErrClear_0_SendUnderRunErrClear_MSB 0x1F
1674#define QIB_7322_ErrClear_0_SendUnderRunErrClear_RMASK 0x1
1675#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_LSB 0x1E
1676#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_MSB 0x1E
1677#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_RMASK 0x1
1678#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_LSB 0x1D
1679#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_MSB 0x1D
1680#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_RMASK 0x1
1681#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_LSB 0x11
1682#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_MSB 0x11
1683#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_RMASK 0x1
1684#define QIB_7322_ErrClear_0_RcvHdrErrClear_LSB 0x10
1685#define QIB_7322_ErrClear_0_RcvHdrErrClear_MSB 0x10
1686#define QIB_7322_ErrClear_0_RcvHdrErrClear_RMASK 0x1
1687#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_LSB 0xF
1688#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_MSB 0xF
1689#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_RMASK 0x1
1690#define QIB_7322_ErrClear_0_RcvBadTidErrClear_LSB 0xE
1691#define QIB_7322_ErrClear_0_RcvBadTidErrClear_MSB 0xE
1692#define QIB_7322_ErrClear_0_RcvBadTidErrClear_RMASK 0x1
1693#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_LSB 0xB
1694#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_MSB 0xB
1695#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_RMASK 0x1
1696#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_LSB 0xA
1697#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_MSB 0xA
1698#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_RMASK 0x1
1699#define QIB_7322_ErrClear_0_RcvEBPErrClear_LSB 0x9
1700#define QIB_7322_ErrClear_0_RcvEBPErrClear_MSB 0x9
1701#define QIB_7322_ErrClear_0_RcvEBPErrClear_RMASK 0x1
1702#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_LSB 0x8
1703#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_MSB 0x8
1704#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_RMASK 0x1
1705#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_LSB 0x7
1706#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_MSB 0x7
1707#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_RMASK 0x1
1708#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_LSB 0x6
1709#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_MSB 0x6
1710#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_RMASK 0x1
1711#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_LSB 0x5
1712#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_MSB 0x5
1713#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_RMASK 0x1
1714#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_LSB 0x4
1715#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_MSB 0x4
1716#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_RMASK 0x1
1717#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_LSB 0x3
1718#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_MSB 0x3
1719#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_RMASK 0x1
1720#define QIB_7322_ErrClear_0_RcvICRCErrClear_LSB 0x2
1721#define QIB_7322_ErrClear_0_RcvICRCErrClear_MSB 0x2
1722#define QIB_7322_ErrClear_0_RcvICRCErrClear_RMASK 0x1
1723#define QIB_7322_ErrClear_0_RcvVCRCErrClear_LSB 0x1
1724#define QIB_7322_ErrClear_0_RcvVCRCErrClear_MSB 0x1
1725#define QIB_7322_ErrClear_0_RcvVCRCErrClear_RMASK 0x1
1726#define QIB_7322_ErrClear_0_RcvFormatErrClear_LSB 0x0
1727#define QIB_7322_ErrClear_0_RcvFormatErrClear_MSB 0x0
1728#define QIB_7322_ErrClear_0_RcvFormatErrClear_RMASK 0x1
1729
1730#define QIB_7322_TXEStatus_0_OFFS 0x10B8
1731#define QIB_7322_TXEStatus_0_DEF 0x0000000XC00080FF
1732#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_LSB 0x1F
1733#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_MSB 0x1F
1734#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_RMASK 0x1
1735#define QIB_7322_TXEStatus_0_RmFifoEmpty_LSB 0x1E
1736#define QIB_7322_TXEStatus_0_RmFifoEmpty_MSB 0x1E
1737#define QIB_7322_TXEStatus_0_RmFifoEmpty_RMASK 0x1
1738#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_LSB 0xF
1739#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_MSB 0xF
1740#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_RMASK 0x1
1741#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_LSB 0x7
1742#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_MSB 0x7
1743#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_RMASK 0x1
1744#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_LSB 0x6
1745#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_MSB 0x6
1746#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_RMASK 0x1
1747#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_LSB 0x5
1748#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_MSB 0x5
1749#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_RMASK 0x1
1750#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_LSB 0x4
1751#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_MSB 0x4
1752#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_RMASK 0x1
1753#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_LSB 0x3
1754#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_MSB 0x3
1755#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_RMASK 0x1
1756#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_LSB 0x2
1757#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_MSB 0x2
1758#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_RMASK 0x1
1759#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_LSB 0x1
1760#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_MSB 0x1
1761#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_RMASK 0x1
1762#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_LSB 0x0
1763#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_MSB 0x0
1764#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_RMASK 0x1
1765
1766#define QIB_7322_RcvCtrl_0_OFFS 0x1100
1767#define QIB_7322_RcvCtrl_0_DEF 0x0000000000000000
1768#define QIB_7322_RcvCtrl_0_RcvResetCredit_LSB 0x2A
1769#define QIB_7322_RcvCtrl_0_RcvResetCredit_MSB 0x2A
1770#define QIB_7322_RcvCtrl_0_RcvResetCredit_RMASK 0x1
1771#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_LSB 0x29
1772#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_MSB 0x29
1773#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_RMASK 0x1
1774#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_LSB 0x28
1775#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_MSB 0x28
1776#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_RMASK 0x1
1777#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_LSB 0x27
1778#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_MSB 0x27
1779#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_RMASK 0x1
1780#define QIB_7322_RcvCtrl_0_ContextEnableUser_LSB 0x2
1781#define QIB_7322_RcvCtrl_0_ContextEnableUser_MSB 0x11
1782#define QIB_7322_RcvCtrl_0_ContextEnableUser_RMASK 0xFFFF
1783#define QIB_7322_RcvCtrl_0_ContextEnableKernel_LSB 0x0
1784#define QIB_7322_RcvCtrl_0_ContextEnableKernel_MSB 0x0
1785#define QIB_7322_RcvCtrl_0_ContextEnableKernel_RMASK 0x1
1786
1787#define QIB_7322_RcvBTHQP_0_OFFS 0x1108
1788#define QIB_7322_RcvBTHQP_0_DEF 0x0000000000000000
1789#define QIB_7322_RcvBTHQP_0_RcvBTHQP_LSB 0x0
1790#define QIB_7322_RcvBTHQP_0_RcvBTHQP_MSB 0x17
1791#define QIB_7322_RcvBTHQP_0_RcvBTHQP_RMASK 0xFFFFFF
1792
1793#define QIB_7322_RcvQPMapTableA_0_OFFS 0x1110
1794#define QIB_7322_RcvQPMapTableA_0_DEF 0x0000000000000000
1795#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_LSB 0x19
1796#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_MSB 0x1D
1797#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_RMASK 0x1F
1798#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_LSB 0x14
1799#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_MSB 0x18
1800#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_RMASK 0x1F
1801#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_LSB 0xF
1802#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_MSB 0x13
1803#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_RMASK 0x1F
1804#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_LSB 0xA
1805#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_MSB 0xE
1806#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_RMASK 0x1F
1807#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_LSB 0x5
1808#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_MSB 0x9
1809#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_RMASK 0x1F
1810#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_LSB 0x0
1811#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_MSB 0x4
1812#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_RMASK 0x1F
1813
1814#define QIB_7322_RcvQPMapTableB_0_OFFS 0x1118
1815#define QIB_7322_RcvQPMapTableB_0_DEF 0x0000000000000000
1816#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_LSB 0x19
1817#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_MSB 0x1D
1818#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_RMASK 0x1F
1819#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_LSB 0x14
1820#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_MSB 0x18
1821#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_RMASK 0x1F
1822#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_LSB 0xF
1823#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_MSB 0x13
1824#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_RMASK 0x1F
1825#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_LSB 0xA
1826#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_MSB 0xE
1827#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_RMASK 0x1F
1828#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_LSB 0x5
1829#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_MSB 0x9
1830#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_RMASK 0x1F
1831#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_LSB 0x0
1832#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_MSB 0x4
1833#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_RMASK 0x1F
1834
1835#define QIB_7322_RcvQPMapTableC_0_OFFS 0x1120
1836#define QIB_7322_RcvQPMapTableC_0_DEF 0x0000000000000000
1837#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_LSB 0x19
1838#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_MSB 0x1D
1839#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_RMASK 0x1F
1840#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_LSB 0x14
1841#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_MSB 0x18
1842#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_RMASK 0x1F
1843#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_LSB 0xF
1844#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_MSB 0x13
1845#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_RMASK 0x1F
1846#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_LSB 0xA
1847#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_MSB 0xE
1848#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_RMASK 0x1F
1849#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_LSB 0x5
1850#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_MSB 0x9
1851#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_RMASK 0x1F
1852#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_LSB 0x0
1853#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_MSB 0x4
1854#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_RMASK 0x1F
1855
1856#define QIB_7322_RcvQPMapTableD_0_OFFS 0x1128
1857#define QIB_7322_RcvQPMapTableD_0_DEF 0x0000000000000000
1858#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_LSB 0x19
1859#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_MSB 0x1D
1860#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_RMASK 0x1F
1861#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_LSB 0x14
1862#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_MSB 0x18
1863#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_RMASK 0x1F
1864#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_LSB 0xF
1865#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_MSB 0x13
1866#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_RMASK 0x1F
1867#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_LSB 0xA
1868#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_MSB 0xE
1869#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_RMASK 0x1F
1870#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_LSB 0x5
1871#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_MSB 0x9
1872#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_RMASK 0x1F
1873#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_LSB 0x0
1874#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_MSB 0x4
1875#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_RMASK 0x1F
1876
1877#define QIB_7322_RcvQPMapTableE_0_OFFS 0x1130
1878#define QIB_7322_RcvQPMapTableE_0_DEF 0x0000000000000000
1879#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_LSB 0x19
1880#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_MSB 0x1D
1881#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_RMASK 0x1F
1882#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_LSB 0x14
1883#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_MSB 0x18
1884#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_RMASK 0x1F
1885#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_LSB 0xF
1886#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_MSB 0x13
1887#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_RMASK 0x1F
1888#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_LSB 0xA
1889#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_MSB 0xE
1890#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_RMASK 0x1F
1891#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_LSB 0x5
1892#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_MSB 0x9
1893#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_RMASK 0x1F
1894#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_LSB 0x0
1895#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_MSB 0x4
1896#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_RMASK 0x1F
1897
1898#define QIB_7322_RcvQPMapTableF_0_OFFS 0x1138
1899#define QIB_7322_RcvQPMapTableF_0_DEF 0x0000000000000000
1900#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_LSB 0x5
1901#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_MSB 0x9
1902#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_RMASK 0x1F
1903#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_LSB 0x0
1904#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_MSB 0x4
1905#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_RMASK 0x1F
1906
1907#define QIB_7322_PSStat_0_OFFS 0x1140
1908#define QIB_7322_PSStat_0_DEF 0x0000000000000000
1909
1910#define QIB_7322_PSStart_0_OFFS 0x1148
1911#define QIB_7322_PSStart_0_DEF 0x0000000000000000
1912
1913#define QIB_7322_PSInterval_0_OFFS 0x1150
1914#define QIB_7322_PSInterval_0_DEF 0x0000000000000000
1915
1916#define QIB_7322_RcvStatus_0_OFFS 0x1160
1917#define QIB_7322_RcvStatus_0_DEF 0x0000000000000000
1918#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_LSB 0x1
1919#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_MSB 0x5
1920#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_RMASK 0x1F
1921#define QIB_7322_RcvStatus_0_RxPktInProgress_LSB 0x0
1922#define QIB_7322_RcvStatus_0_RxPktInProgress_MSB 0x0
1923#define QIB_7322_RcvStatus_0_RxPktInProgress_RMASK 0x1
1924
1925#define QIB_7322_RcvPartitionKey_0_OFFS 0x1168
1926#define QIB_7322_RcvPartitionKey_0_DEF 0x0000000000000000
1927
1928#define QIB_7322_RcvQPMulticastContext_0_OFFS 0x1170
1929#define QIB_7322_RcvQPMulticastContext_0_DEF 0x0000000000000000
1930#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_LSB 0x0
1931#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_MSB 0x4
1932#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_RMASK 0x1F
1933
1934#define QIB_7322_RcvPktLEDCnt_0_OFFS 0x1178
1935#define QIB_7322_RcvPktLEDCnt_0_DEF 0x0000000000000000
1936#define QIB_7322_RcvPktLEDCnt_0_ONperiod_LSB 0x20
1937#define QIB_7322_RcvPktLEDCnt_0_ONperiod_MSB 0x3F
1938#define QIB_7322_RcvPktLEDCnt_0_ONperiod_RMASK 0xFFFFFFFF
1939#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_LSB 0x0
1940#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_MSB 0x1F
1941#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_RMASK 0xFFFFFFFF
1942
1943#define QIB_7322_SendDmaIdleCnt_0_OFFS 0x1180
1944#define QIB_7322_SendDmaIdleCnt_0_DEF 0x0000000000000000
1945#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_LSB 0x0
1946#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_MSB 0xF
1947#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_RMASK 0xFFFF
1948
1949#define QIB_7322_SendDmaReloadCnt_0_OFFS 0x1188
1950#define QIB_7322_SendDmaReloadCnt_0_DEF 0x0000000000000000
1951#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_LSB 0x0
1952#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_MSB 0xF
1953#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_RMASK 0xFFFF
1954
1955#define QIB_7322_SendDmaDescCnt_0_OFFS 0x1190
1956#define QIB_7322_SendDmaDescCnt_0_DEF 0x0000000000000000
1957#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_LSB 0x0
1958#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_MSB 0xF
1959#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_RMASK 0xFFFF
1960
1961#define QIB_7322_SendCtrl_0_OFFS 0x11C0
1962#define QIB_7322_SendCtrl_0_DEF 0x0000000000000000
1963#define QIB_7322_SendCtrl_0_IBVLArbiterEn_LSB 0xF
1964#define QIB_7322_SendCtrl_0_IBVLArbiterEn_MSB 0xF
1965#define QIB_7322_SendCtrl_0_IBVLArbiterEn_RMASK 0x1
1966#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_LSB 0xE
1967#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_MSB 0xE
1968#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_RMASK 0x1
1969#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_LSB 0xD
1970#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_MSB 0xD
1971#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_RMASK 0x1
1972#define QIB_7322_SendCtrl_0_SDmaHalt_LSB 0xC
1973#define QIB_7322_SendCtrl_0_SDmaHalt_MSB 0xC
1974#define QIB_7322_SendCtrl_0_SDmaHalt_RMASK 0x1
1975#define QIB_7322_SendCtrl_0_SDmaEnable_LSB 0xB
1976#define QIB_7322_SendCtrl_0_SDmaEnable_MSB 0xB
1977#define QIB_7322_SendCtrl_0_SDmaEnable_RMASK 0x1
1978#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_LSB 0xA
1979#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_MSB 0xA
1980#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_RMASK 0x1
1981#define QIB_7322_SendCtrl_0_SDmaIntEnable_LSB 0x9
1982#define QIB_7322_SendCtrl_0_SDmaIntEnable_MSB 0x9
1983#define QIB_7322_SendCtrl_0_SDmaIntEnable_RMASK 0x1
1984#define QIB_7322_SendCtrl_0_SDmaCleanup_LSB 0x8
1985#define QIB_7322_SendCtrl_0_SDmaCleanup_MSB 0x8
1986#define QIB_7322_SendCtrl_0_SDmaCleanup_RMASK 0x1
1987#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_LSB 0x7
1988#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_MSB 0x7
1989#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_RMASK 0x1
1990#define QIB_7322_SendCtrl_0_SendEnable_LSB 0x3
1991#define QIB_7322_SendCtrl_0_SendEnable_MSB 0x3
1992#define QIB_7322_SendCtrl_0_SendEnable_RMASK 0x1
1993#define QIB_7322_SendCtrl_0_TxeBypassIbc_LSB 0x1
1994#define QIB_7322_SendCtrl_0_TxeBypassIbc_MSB 0x1
1995#define QIB_7322_SendCtrl_0_TxeBypassIbc_RMASK 0x1
1996#define QIB_7322_SendCtrl_0_TxeAbortIbc_LSB 0x0
1997#define QIB_7322_SendCtrl_0_TxeAbortIbc_MSB 0x0
1998#define QIB_7322_SendCtrl_0_TxeAbortIbc_RMASK 0x1
1999
2000#define QIB_7322_SendDmaBase_0_OFFS 0x11F8
2001#define QIB_7322_SendDmaBase_0_DEF 0x0000000000000000
2002#define QIB_7322_SendDmaBase_0_SendDmaBase_LSB 0x0
2003#define QIB_7322_SendDmaBase_0_SendDmaBase_MSB 0x2F
2004#define QIB_7322_SendDmaBase_0_SendDmaBase_RMASK 0xFFFFFFFFFFFF
2005
2006#define QIB_7322_SendDmaLenGen_0_OFFS 0x1200
2007#define QIB_7322_SendDmaLenGen_0_DEF 0x0000000000000000
2008#define QIB_7322_SendDmaLenGen_0_Generation_LSB 0x10
2009#define QIB_7322_SendDmaLenGen_0_Generation_MSB 0x12
2010#define QIB_7322_SendDmaLenGen_0_Generation_RMASK 0x7
2011#define QIB_7322_SendDmaLenGen_0_Length_LSB 0x0
2012#define QIB_7322_SendDmaLenGen_0_Length_MSB 0xF
2013#define QIB_7322_SendDmaLenGen_0_Length_RMASK 0xFFFF
2014
2015#define QIB_7322_SendDmaTail_0_OFFS 0x1208
2016#define QIB_7322_SendDmaTail_0_DEF 0x0000000000000000
2017#define QIB_7322_SendDmaTail_0_SendDmaTail_LSB 0x0
2018#define QIB_7322_SendDmaTail_0_SendDmaTail_MSB 0xF
2019#define QIB_7322_SendDmaTail_0_SendDmaTail_RMASK 0xFFFF
2020
2021#define QIB_7322_SendDmaHead_0_OFFS 0x1210
2022#define QIB_7322_SendDmaHead_0_DEF 0x0000000000000000
2023#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_LSB 0x20
2024#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_MSB 0x2F
2025#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_RMASK 0xFFFF
2026#define QIB_7322_SendDmaHead_0_SendDmaHead_LSB 0x0
2027#define QIB_7322_SendDmaHead_0_SendDmaHead_MSB 0xF
2028#define QIB_7322_SendDmaHead_0_SendDmaHead_RMASK 0xFFFF
2029
2030#define QIB_7322_SendDmaHeadAddr_0_OFFS 0x1218
2031#define QIB_7322_SendDmaHeadAddr_0_DEF 0x0000000000000000
2032#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_LSB 0x0
2033#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_MSB 0x2F
2034#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF
2035
2036#define QIB_7322_SendDmaBufMask0_0_OFFS 0x1220
2037#define QIB_7322_SendDmaBufMask0_0_DEF 0x0000000000000000
2038#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_LSB 0x0
2039#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_MSB 0x3F
2040#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_RMASK 0x0
2041
2042#define QIB_7322_SendDmaStatus_0_OFFS 0x1238
2043#define QIB_7322_SendDmaStatus_0_DEF 0x0000000042000000
2044#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_LSB 0x3F
2045#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_MSB 0x3F
2046#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_RMASK 0x1
2047#define QIB_7322_SendDmaStatus_0_HaltInProg_LSB 0x3E
2048#define QIB_7322_SendDmaStatus_0_HaltInProg_MSB 0x3E
2049#define QIB_7322_SendDmaStatus_0_HaltInProg_RMASK 0x1
2050#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_LSB 0x3D
2051#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_MSB 0x3D
2052#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_RMASK 0x1
2053#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_LSB 0x2F
2054#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_MSB 0x3C
2055#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_RMASK 0x3FFF
2056#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_LSB 0x28
2057#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_MSB 0x2E
2058#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_RMASK 0x7F
2059#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_LSB 0x20
2060#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_MSB 0x27
2061#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_RMASK 0xFF
2062#define QIB_7322_SendDmaStatus_0_ScbFull_LSB 0x1F
2063#define QIB_7322_SendDmaStatus_0_ScbFull_MSB 0x1F
2064#define QIB_7322_SendDmaStatus_0_ScbFull_RMASK 0x1
2065#define QIB_7322_SendDmaStatus_0_ScbEmpty_LSB 0x1E
2066#define QIB_7322_SendDmaStatus_0_ScbEmpty_MSB 0x1E
2067#define QIB_7322_SendDmaStatus_0_ScbEmpty_RMASK 0x1
2068#define QIB_7322_SendDmaStatus_0_ScbEntryValid_LSB 0x1D
2069#define QIB_7322_SendDmaStatus_0_ScbEntryValid_MSB 0x1D
2070#define QIB_7322_SendDmaStatus_0_ScbEntryValid_RMASK 0x1
2071#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_LSB 0x1C
2072#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_MSB 0x1C
2073#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_RMASK 0x1
2074#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_LSB 0x1B
2075#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_MSB 0x1B
2076#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_RMASK 0x1
2077#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_LSB 0x1A
2078#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_MSB 0x1A
2079#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_RMASK 0x1
2080#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_LSB 0x19
2081#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_MSB 0x19
2082#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_RMASK 0x1
2083#define QIB_7322_SendDmaStatus_0_SplFifoFull_LSB 0x18
2084#define QIB_7322_SendDmaStatus_0_SplFifoFull_MSB 0x18
2085#define QIB_7322_SendDmaStatus_0_SplFifoFull_RMASK 0x1
2086#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_LSB 0x10
2087#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_MSB 0x17
2088#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_RMASK 0xFF
2089#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_LSB 0x0
2090#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_MSB 0xF
2091#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_RMASK 0xFFFF
2092
2093#define QIB_7322_SendDmaPriorityThld_0_OFFS 0x1258
2094#define QIB_7322_SendDmaPriorityThld_0_DEF 0x0000000000000000
2095#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_LSB 0x0
2096#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_MSB 0x3
2097#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_RMASK 0xF
2098
2099#define QIB_7322_SendHdrErrSymptom_0_OFFS 0x1260
2100#define QIB_7322_SendHdrErrSymptom_0_DEF 0x0000000000000000
2101#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_LSB 0x6
2102#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_MSB 0x6
2103#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_RMASK 0x1
2104#define QIB_7322_SendHdrErrSymptom_0_GRHFail_LSB 0x5
2105#define QIB_7322_SendHdrErrSymptom_0_GRHFail_MSB 0x5
2106#define QIB_7322_SendHdrErrSymptom_0_GRHFail_RMASK 0x1
2107#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_LSB 0x4
2108#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_MSB 0x4
2109#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_RMASK 0x1
2110#define QIB_7322_SendHdrErrSymptom_0_QPFail_LSB 0x3
2111#define QIB_7322_SendHdrErrSymptom_0_QPFail_MSB 0x3
2112#define QIB_7322_SendHdrErrSymptom_0_QPFail_RMASK 0x1
2113#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_LSB 0x2
2114#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_MSB 0x2
2115#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_RMASK 0x1
2116#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_LSB 0x1
2117#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_MSB 0x1
2118#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_RMASK 0x1
2119#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_LSB 0x0
2120#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_MSB 0x0
2121#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_RMASK 0x1
2122
2123#define QIB_7322_RxCreditVL0_0_OFFS 0x1280
2124#define QIB_7322_RxCreditVL0_0_DEF 0x0000000000000000
2125#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_LSB 0x10
2126#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_MSB 0x1B
2127#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_RMASK 0xFFF
2128#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_LSB 0x0
2129#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_MSB 0xB
2130#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_RMASK 0xFFF
2131
2132#define QIB_7322_SendDmaBufUsed0_0_OFFS 0x1480
2133#define QIB_7322_SendDmaBufUsed0_0_DEF 0x0000000000000000
2134#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_LSB 0x0
2135#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_MSB 0x3F
2136#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_RMASK 0x0
2137
2138#define QIB_7322_SendCheckControl_0_OFFS 0x14A8
2139#define QIB_7322_SendCheckControl_0_DEF 0x0000000000000000
2140#define QIB_7322_SendCheckControl_0_PKey_En_LSB 0x4
2141#define QIB_7322_SendCheckControl_0_PKey_En_MSB 0x4
2142#define QIB_7322_SendCheckControl_0_PKey_En_RMASK 0x1
2143#define QIB_7322_SendCheckControl_0_BTHQP_En_LSB 0x3
2144#define QIB_7322_SendCheckControl_0_BTHQP_En_MSB 0x3
2145#define QIB_7322_SendCheckControl_0_BTHQP_En_RMASK 0x1
2146#define QIB_7322_SendCheckControl_0_SLID_En_LSB 0x2
2147#define QIB_7322_SendCheckControl_0_SLID_En_MSB 0x2
2148#define QIB_7322_SendCheckControl_0_SLID_En_RMASK 0x1
2149#define QIB_7322_SendCheckControl_0_RawIPV6_En_LSB 0x1
2150#define QIB_7322_SendCheckControl_0_RawIPV6_En_MSB 0x1
2151#define QIB_7322_SendCheckControl_0_RawIPV6_En_RMASK 0x1
2152#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_LSB 0x0
2153#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_MSB 0x0
2154#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_RMASK 0x1
2155
2156#define QIB_7322_SendIBSLIDMask_0_OFFS 0x14B0
2157#define QIB_7322_SendIBSLIDMask_0_DEF 0x0000000000000000
2158#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_LSB 0x0
2159#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_MSB 0xF
2160#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK 0xFFFF
2161
2162#define QIB_7322_SendIBSLIDAssign_0_OFFS 0x14B8
2163#define QIB_7322_SendIBSLIDAssign_0_DEF 0x0000000000000000
2164#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_LSB 0x0
2165#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_MSB 0xF
2166#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK 0xFFFF
2167
2168#define QIB_7322_IBCStatusA_0_OFFS 0x1540
2169#define QIB_7322_IBCStatusA_0_DEF 0x0000000000000X02
2170#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_LSB 0x27
2171#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_MSB 0x27
2172#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_RMASK 0x1
2173#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_LSB 0x26
2174#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_MSB 0x26
2175#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_RMASK 0x1
2176#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_LSB 0x25
2177#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_MSB 0x25
2178#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_RMASK 0x1
2179#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_LSB 0x24
2180#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_MSB 0x24
2181#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_RMASK 0x1
2182#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_LSB 0x23
2183#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_MSB 0x23
2184#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_RMASK 0x1
2185#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_LSB 0x22
2186#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_MSB 0x22
2187#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_RMASK 0x1
2188#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_LSB 0x21
2189#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_MSB 0x21
2190#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_RMASK 0x1
2191#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_LSB 0x20
2192#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_MSB 0x20
2193#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_RMASK 0x1
2194#define QIB_7322_IBCStatusA_0_TxReady_LSB 0x1E
2195#define QIB_7322_IBCStatusA_0_TxReady_MSB 0x1E
2196#define QIB_7322_IBCStatusA_0_TxReady_RMASK 0x1
2197#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_LSB 0x1D
2198#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_MSB 0x1D
2199#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_RMASK 0x1
2200#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_LSB 0xF
2201#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_MSB 0xF
2202#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_RMASK 0x1
2203#define QIB_7322_IBCStatusA_0_ScrambleEn_LSB 0xE
2204#define QIB_7322_IBCStatusA_0_ScrambleEn_MSB 0xE
2205#define QIB_7322_IBCStatusA_0_ScrambleEn_RMASK 0x1
2206#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_LSB 0xD
2207#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_MSB 0xD
2208#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_RMASK 0x1
2209#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_LSB 0xC
2210#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_MSB 0xC
2211#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_RMASK 0x1
2212#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_LSB 0xA
2213#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_MSB 0xA
2214#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_RMASK 0x1
2215#define QIB_7322_IBCStatusA_0_LinkWidthActive_LSB 0x9
2216#define QIB_7322_IBCStatusA_0_LinkWidthActive_MSB 0x9
2217#define QIB_7322_IBCStatusA_0_LinkWidthActive_RMASK 0x1
2218#define QIB_7322_IBCStatusA_0_LinkSpeedActive_LSB 0x8
2219#define QIB_7322_IBCStatusA_0_LinkSpeedActive_MSB 0x8
2220#define QIB_7322_IBCStatusA_0_LinkSpeedActive_RMASK 0x1
2221#define QIB_7322_IBCStatusA_0_LinkState_LSB 0x5
2222#define QIB_7322_IBCStatusA_0_LinkState_MSB 0x7
2223#define QIB_7322_IBCStatusA_0_LinkState_RMASK 0x7
2224#define QIB_7322_IBCStatusA_0_LinkTrainingState_LSB 0x0
2225#define QIB_7322_IBCStatusA_0_LinkTrainingState_MSB 0x4
2226#define QIB_7322_IBCStatusA_0_LinkTrainingState_RMASK 0x1F
2227
2228#define QIB_7322_IBCStatusB_0_OFFS 0x1548
2229#define QIB_7322_IBCStatusB_0_DEF 0x00000000XXXXXXXX
2230#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_LSB 0x27
2231#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_MSB 0x27
2232#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_RMASK 0x1
2233#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_LSB 0x26
2234#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_MSB 0x26
2235#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_RMASK 0x1
2236#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_LSB 0x25
2237#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_MSB 0x25
2238#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_RMASK 0x1
2239#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_LSB 0x24
2240#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_MSB 0x24
2241#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_RMASK 0x1
2242#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_LSB 0x20
2243#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_MSB 0x23
2244#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_RMASK 0xF
2245#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_LSB 0x1E
2246#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_MSB 0x1F
2247#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_RMASK 0x3
2248#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_LSB 0x1A
2249#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_MSB 0x1D
2250#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_RMASK 0xF
2251#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_LSB 0x0
2252#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_MSB 0x19
2253#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_RMASK 0x3FFFFFF
2254
2255#define QIB_7322_IBCCtrlA_0_OFFS 0x1560
2256#define QIB_7322_IBCCtrlA_0_DEF 0x0000000000000000
2257#define QIB_7322_IBCCtrlA_0_Loopback_LSB 0x3F
2258#define QIB_7322_IBCCtrlA_0_Loopback_MSB 0x3F
2259#define QIB_7322_IBCCtrlA_0_Loopback_RMASK 0x1
2260#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_LSB 0x3E
2261#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_MSB 0x3E
2262#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_RMASK 0x1
2263#define QIB_7322_IBCCtrlA_0_IBLinkEn_LSB 0x3D
2264#define QIB_7322_IBCCtrlA_0_IBLinkEn_MSB 0x3D
2265#define QIB_7322_IBCCtrlA_0_IBLinkEn_RMASK 0x1
2266#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_LSB 0x3C
2267#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_MSB 0x3C
2268#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_RMASK 0x1
2269#define QIB_7322_IBCCtrlA_0_NumVLane_LSB 0x30
2270#define QIB_7322_IBCCtrlA_0_NumVLane_MSB 0x32
2271#define QIB_7322_IBCCtrlA_0_NumVLane_RMASK 0x7
2272#define QIB_7322_IBCCtrlA_0_OverrunThreshold_LSB 0x24
2273#define QIB_7322_IBCCtrlA_0_OverrunThreshold_MSB 0x27
2274#define QIB_7322_IBCCtrlA_0_OverrunThreshold_RMASK 0xF
2275#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_LSB 0x20
2276#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_MSB 0x23
2277#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_RMASK 0xF
2278#define QIB_7322_IBCCtrlA_0_MaxPktLen_LSB 0x15
2279#define QIB_7322_IBCCtrlA_0_MaxPktLen_MSB 0x1F
2280#define QIB_7322_IBCCtrlA_0_MaxPktLen_RMASK 0x7FF
2281#define QIB_7322_IBCCtrlA_0_LinkCmd_LSB 0x13
2282#define QIB_7322_IBCCtrlA_0_LinkCmd_MSB 0x14
2283#define QIB_7322_IBCCtrlA_0_LinkCmd_RMASK 0x3
2284#define QIB_7322_IBCCtrlA_0_LinkInitCmd_LSB 0x10
2285#define QIB_7322_IBCCtrlA_0_LinkInitCmd_MSB 0x12
2286#define QIB_7322_IBCCtrlA_0_LinkInitCmd_RMASK 0x7
2287#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_LSB 0x8
2288#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_MSB 0xF
2289#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_RMASK 0xFF
2290#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_LSB 0x0
2291#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_MSB 0x7
2292#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_RMASK 0xFF
2293
2294#define QIB_7322_IBCCtrlB_0_OFFS 0x1568
2295#define QIB_7322_IBCCtrlB_0_DEF 0x00000000000305FF
2296#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_LSB 0x30
2297#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_MSB 0x3F
2298#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK 0xFFFF
2299#define QIB_7322_IBCCtrlB_0_IB_DLID_LSB 0x20
2300#define QIB_7322_IBCCtrlB_0_IB_DLID_MSB 0x2F
2301#define QIB_7322_IBCCtrlB_0_IB_DLID_RMASK 0xFFFF
2302#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_LSB 0x1B
2303#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_MSB 0x1B
2304#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_RMASK 0x1
2305#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_LSB 0x1A
2306#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_MSB 0x1A
2307#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_RMASK 0x1
2308#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_LSB 0x12
2309#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_MSB 0x19
2310#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_RMASK 0xFF
2311#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_LSB 0x11
2312#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_MSB 0x11
2313#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_RMASK 0x1
2314#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_LSB 0x10
2315#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_MSB 0x10
2316#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_RMASK 0x1
2317#define QIB_7322_IBCCtrlB_0_SD_DDS_LSB 0xC
2318#define QIB_7322_IBCCtrlB_0_SD_DDS_MSB 0xF
2319#define QIB_7322_IBCCtrlB_0_SD_DDS_RMASK 0xF
2320#define QIB_7322_IBCCtrlB_0_SD_DDSV_LSB 0xB
2321#define QIB_7322_IBCCtrlB_0_SD_DDSV_MSB 0xB
2322#define QIB_7322_IBCCtrlB_0_SD_DDSV_RMASK 0x1
2323#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_LSB 0xA
2324#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_MSB 0xA
2325#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_RMASK 0x1
2326#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_LSB 0x9
2327#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_MSB 0x9
2328#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_RMASK 0x1
2329#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_LSB 0x8
2330#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_MSB 0x8
2331#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_RMASK 0x1
2332#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_LSB 0x7
2333#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_MSB 0x7
2334#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_RMASK 0x1
2335#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_LSB 0x5
2336#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_MSB 0x6
2337#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_RMASK 0x3
2338#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_LSB 0x4
2339#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_MSB 0x4
2340#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_RMASK 0x1
2341#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_LSB 0x3
2342#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_MSB 0x3
2343#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_RMASK 0x1
2344#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_LSB 0x2
2345#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_MSB 0x2
2346#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_RMASK 0x1
2347#define QIB_7322_IBCCtrlB_0_SD_SPEED_LSB 0x1
2348#define QIB_7322_IBCCtrlB_0_SD_SPEED_MSB 0x1
2349#define QIB_7322_IBCCtrlB_0_SD_SPEED_RMASK 0x1
2350#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_LSB 0x0
2351#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_MSB 0x0
2352#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_RMASK 0x1
2353
2354#define QIB_7322_IBCCtrlC_0_OFFS 0x1570
2355#define QIB_7322_IBCCtrlC_0_DEF 0x0000000000000301
2356#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_LSB 0x5
2357#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_MSB 0x9
2358#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_RMASK 0x1F
2359#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_LSB 0x0
2360#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_MSB 0x4
2361#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_RMASK 0x1F
2362
2363#define QIB_7322_HRTBT_GUID_0_OFFS 0x1588
2364#define QIB_7322_HRTBT_GUID_0_DEF 0x0000000000000000
2365
2366#define QIB_7322_IB_SDTEST_IF_TX_0_OFFS 0x1590
2367#define QIB_7322_IB_SDTEST_IF_TX_0_DEF 0x0000000000000000
2368#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_LSB 0x30
2369#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_MSB 0x3F
2370#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_RMASK 0xFFFF
2371#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_LSB 0x20
2372#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_MSB 0x2F
2373#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_RMASK 0xFFFF
2374#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_LSB 0xD
2375#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_MSB 0xF
2376#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_RMASK 0x7
2377#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_LSB 0xB
2378#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_MSB 0xC
2379#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_RMASK 0x3
2380#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_LSB 0x4
2381#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_MSB 0x4
2382#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_RMASK 0x1
2383#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_LSB 0x2
2384#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_MSB 0x3
2385#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_RMASK 0x3
2386#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_LSB 0x1
2387#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_MSB 0x1
2388#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_RMASK 0x1
2389#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_LSB 0x0
2390#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_MSB 0x0
2391#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_RMASK 0x1
2392
2393#define QIB_7322_IB_SDTEST_IF_RX_0_OFFS 0x1598
2394#define QIB_7322_IB_SDTEST_IF_RX_0_DEF 0x0000000000000000
2395#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_LSB 0x30
2396#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_MSB 0x3F
2397#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_RMASK 0xFFFF
2398#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_LSB 0x20
2399#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_MSB 0x2F
2400#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_RMASK 0xFFFF
2401#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_LSB 0x18
2402#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_MSB 0x1F
2403#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_RMASK 0xFF
2404#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_LSB 0x10
2405#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_MSB 0x17
2406#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_RMASK 0xFF
2407#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_LSB 0x1
2408#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_MSB 0x1
2409#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_RMASK 0x1
2410#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_LSB 0x0
2411#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_MSB 0x0
2412#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_RMASK 0x1
2413
2414#define QIB_7322_IBNCModeCtrl_0_OFFS 0x15B8
2415#define QIB_7322_IBNCModeCtrl_0_DEF 0x0000000000000000
2416#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_LSB 0x22
2417#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_MSB 0x22
2418#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_RMASK 0x1
2419#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_LSB 0x21
2420#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_MSB 0x21
2421#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_RMASK 0x1
2422#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_LSB 0x20
2423#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_MSB 0x20
2424#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_RMASK 0x1
2425#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_LSB 0x11
2426#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_MSB 0x19
2427#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_RMASK 0x1FF
2428#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_LSB 0x8
2429#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_MSB 0x10
2430#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_RMASK 0x1FF
2431#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_LSB 0x2
2432#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_MSB 0x2
2433#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_RMASK 0x1
2434#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_LSB 0x1
2435#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_MSB 0x1
2436#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_RMASK 0x1
2437#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_LSB 0x0
2438#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_MSB 0x0
2439#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_RMASK 0x1
2440
2441#define QIB_7322_IBSerdesStatus_0_OFFS 0x15D0
2442#define QIB_7322_IBSerdesStatus_0_DEF 0x0000000000000000
2443
2444#define QIB_7322_IBPCSConfig_0_OFFS 0x15D8
2445#define QIB_7322_IBPCSConfig_0_DEF 0x0000000000000007
2446#define QIB_7322_IBPCSConfig_0_link_sync_mask_LSB 0x9
2447#define QIB_7322_IBPCSConfig_0_link_sync_mask_MSB 0x12
2448#define QIB_7322_IBPCSConfig_0_link_sync_mask_RMASK 0x3FF
2449#define QIB_7322_IBPCSConfig_0_xcv_rreset_LSB 0x2
2450#define QIB_7322_IBPCSConfig_0_xcv_rreset_MSB 0x2
2451#define QIB_7322_IBPCSConfig_0_xcv_rreset_RMASK 0x1
2452#define QIB_7322_IBPCSConfig_0_xcv_treset_LSB 0x1
2453#define QIB_7322_IBPCSConfig_0_xcv_treset_MSB 0x1
2454#define QIB_7322_IBPCSConfig_0_xcv_treset_RMASK 0x1
2455#define QIB_7322_IBPCSConfig_0_tx_rx_reset_LSB 0x0
2456#define QIB_7322_IBPCSConfig_0_tx_rx_reset_MSB 0x0
2457#define QIB_7322_IBPCSConfig_0_tx_rx_reset_RMASK 0x1
2458
2459#define QIB_7322_IBSerdesCtrl_0_OFFS 0x15E0
2460#define QIB_7322_IBSerdesCtrl_0_DEF 0x0000000000FFA00F
2461#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_LSB 0x1A
2462#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_MSB 0x1A
2463#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_RMASK 0x1
2464#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_LSB 0x19
2465#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_MSB 0x19
2466#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_RMASK 0x1
2467#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_LSB 0x18
2468#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_MSB 0x18
2469#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_RMASK 0x1
2470#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_LSB 0x14
2471#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_MSB 0x17
2472#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_RMASK 0xF
2473#define QIB_7322_IBSerdesCtrl_0_CGMODE_LSB 0x10
2474#define QIB_7322_IBSerdesCtrl_0_CGMODE_MSB 0x13
2475#define QIB_7322_IBSerdesCtrl_0_CGMODE_RMASK 0xF
2476#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_LSB 0xF
2477#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_MSB 0xF
2478#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_RMASK 0x1
2479#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_LSB 0xD
2480#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_MSB 0xD
2481#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_RMASK 0x1
2482#define QIB_7322_IBSerdesCtrl_0_LPEN_LSB 0xC
2483#define QIB_7322_IBSerdesCtrl_0_LPEN_MSB 0xC
2484#define QIB_7322_IBSerdesCtrl_0_LPEN_RMASK 0x1
2485#define QIB_7322_IBSerdesCtrl_0_PLLPD_LSB 0xB
2486#define QIB_7322_IBSerdesCtrl_0_PLLPD_MSB 0xB
2487#define QIB_7322_IBSerdesCtrl_0_PLLPD_RMASK 0x1
2488#define QIB_7322_IBSerdesCtrl_0_TXPD_LSB 0xA
2489#define QIB_7322_IBSerdesCtrl_0_TXPD_MSB 0xA
2490#define QIB_7322_IBSerdesCtrl_0_TXPD_RMASK 0x1
2491#define QIB_7322_IBSerdesCtrl_0_RXPD_LSB 0x9
2492#define QIB_7322_IBSerdesCtrl_0_RXPD_MSB 0x9
2493#define QIB_7322_IBSerdesCtrl_0_RXPD_RMASK 0x1
2494#define QIB_7322_IBSerdesCtrl_0_TXIDLE_LSB 0x8
2495#define QIB_7322_IBSerdesCtrl_0_TXIDLE_MSB 0x8
2496#define QIB_7322_IBSerdesCtrl_0_TXIDLE_RMASK 0x1
2497#define QIB_7322_IBSerdesCtrl_0_CMODE_LSB 0x0
2498#define QIB_7322_IBSerdesCtrl_0_CMODE_MSB 0x6
2499#define QIB_7322_IBSerdesCtrl_0_CMODE_RMASK 0x7F
2500
2501#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_OFFS 0x1600
2502#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_DEF 0x0000000000000000
2503#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_LSB 0x1F
2504#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_MSB 0x1F
2505#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_RMASK 0x1
2506#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_LSB 0x1E
2507#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_MSB 0x1E
2508#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_RMASK 0x1
2509#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_LSB 0xE
2510#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_MSB 0x11
2511#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_RMASK 0xF
2512#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_LSB 0x9
2513#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_MSB 0xD
2514#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_RMASK 0x1F
2515#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_LSB 0x5
2516#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_MSB 0x8
2517#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_RMASK 0xF
2518#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_LSB 0x3
2519#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_MSB 0x4
2520#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_RMASK 0x3
2521#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_LSB 0x0
2522#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_MSB 0x2
2523#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_RMASK 0x7
2524
2525#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_OFFS 0x1640
2526#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_DEF 0x0000000000000000
2527#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_LSB 0x27
2528#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_MSB 0x27
2529#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_RMASK 0x1
2530#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_LSB 0x26
2531#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_MSB 0x26
2532#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_RMASK 0x1
2533#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_LSB 0x25
2534#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_MSB 0x25
2535#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_RMASK 0x1
2536#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_LSB 0x24
2537#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_MSB 0x24
2538#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_RMASK 0x1
2539#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_LSB 0x23
2540#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_MSB 0x23
2541#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_RMASK 0x1
2542#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_LSB 0x22
2543#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_MSB 0x22
2544#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_RMASK 0x1
2545#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_LSB 0x21
2546#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_MSB 0x21
2547#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_RMASK 0x1
2548#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_LSB 0x20
2549#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_MSB 0x20
2550#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_RMASK 0x1
2551#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_LSB 0x18
2552#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_MSB 0x1F
2553#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_RMASK 0xFF
2554#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_LSB 0x10
2555#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_MSB 0x17
2556#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_RMASK 0xFF
2557#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_LSB 0x8
2558#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_MSB 0xF
2559#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_RMASK 0xFF
2560#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_LSB 0x0
2561#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_MSB 0x7
2562#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_RMASK 0xFF
2563
2564#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_OFFS 0x1648
2565#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_DEF 0x0000000000000000
2566#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_LSB 0x27
2567#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_MSB 0x27
2568#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_RMASK 0x1
2569#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_LSB 0x26
2570#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_MSB 0x26
2571#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_RMASK 0x1
2572#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_LSB 0x25
2573#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_MSB 0x25
2574#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_RMASK 0x1
2575#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_LSB 0x24
2576#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_MSB 0x24
2577#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_RMASK 0x1
2578#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_LSB 0x23
2579#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_MSB 0x23
2580#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_RMASK 0x1
2581#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_LSB 0x22
2582#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_MSB 0x22
2583#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_RMASK 0x1
2584#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_LSB 0x21
2585#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_MSB 0x21
2586#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_RMASK 0x1
2587#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_LSB 0x20
2588#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_MSB 0x20
2589#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_RMASK 0x1
2590#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_LSB 0x18
2591#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_MSB 0x1F
2592#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_RMASK 0xFF
2593#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_LSB 0x10
2594#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_MSB 0x17
2595#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_RMASK 0xFF
2596#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_LSB 0x8
2597#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_MSB 0xF
2598#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_RMASK 0xFF
2599#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_LSB 0x0
2600#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_MSB 0x7
2601#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_RMASK 0xFF
2602
2603#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_OFFS 0x1650
2604#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_DEF 0x0000000000000000
2605#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_LSB 0x27
2606#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_MSB 0x27
2607#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_RMASK 0x1
2608#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_LSB 0x26
2609#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_MSB 0x26
2610#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_RMASK 0x1
2611#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_LSB 0x25
2612#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_MSB 0x25
2613#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_RMASK 0x1
2614#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_LSB 0x24
2615#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_MSB 0x24
2616#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_RMASK 0x1
2617#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_LSB 0x23
2618#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_MSB 0x23
2619#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_RMASK 0x1
2620#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_LSB 0x22
2621#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_MSB 0x22
2622#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_RMASK 0x1
2623#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_LSB 0x21
2624#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_MSB 0x21
2625#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_RMASK 0x1
2626#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_LSB 0x20
2627#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_MSB 0x20
2628#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_RMASK 0x1
2629#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_LSB 0x18
2630#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_MSB 0x1F
2631#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_RMASK 0xFF
2632#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_LSB 0x10
2633#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_MSB 0x17
2634#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_RMASK 0xFF
2635#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_LSB 0x8
2636#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_MSB 0xF
2637#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_RMASK 0xFF
2638#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_LSB 0x0
2639#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_MSB 0x7
2640#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_RMASK 0xFF
2641
2642#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_OFFS 0x1658
2643#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_DEF 0x0000000000000000
2644#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_LSB 0x27
2645#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_MSB 0x27
2646#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_RMASK 0x1
2647#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_LSB 0x26
2648#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_MSB 0x26
2649#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_RMASK 0x1
2650#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_LSB 0x25
2651#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_MSB 0x25
2652#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_RMASK 0x1
2653#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_LSB 0x24
2654#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_MSB 0x24
2655#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_RMASK 0x1
2656#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_LSB 0x23
2657#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_MSB 0x23
2658#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_RMASK 0x1
2659#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_LSB 0x22
2660#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_MSB 0x22
2661#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_RMASK 0x1
2662#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_LSB 0x21
2663#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_MSB 0x21
2664#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_RMASK 0x1
2665#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_LSB 0x20
2666#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_MSB 0x20
2667#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_RMASK 0x1
2668#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_LSB 0x18
2669#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_MSB 0x1F
2670#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_RMASK 0xFF
2671#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_LSB 0x10
2672#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_MSB 0x17
2673#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_RMASK 0xFF
2674#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_LSB 0x8
2675#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_MSB 0xF
2676#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_RMASK 0xFF
2677#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_LSB 0x0
2678#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_MSB 0x7
2679#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_RMASK 0xFF
2680
2681#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_OFFS 0x1660
2682#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_DEF 0x0000000000000000
2683#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_LSB 0x27
2684#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_MSB 0x27
2685#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_RMASK 0x1
2686#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_LSB 0x26
2687#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_MSB 0x26
2688#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_RMASK 0x1
2689#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_LSB 0x25
2690#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_MSB 0x25
2691#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_RMASK 0x1
2692#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_LSB 0x24
2693#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_MSB 0x24
2694#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_RMASK 0x1
2695#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_LSB 0x23
2696#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_MSB 0x23
2697#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_RMASK 0x1
2698#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_LSB 0x22
2699#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_MSB 0x22
2700#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_RMASK 0x1
2701#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_LSB 0x21
2702#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_MSB 0x21
2703#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_RMASK 0x1
2704#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_LSB 0x20
2705#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_MSB 0x20
2706#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_RMASK 0x1
2707#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_LSB 0x18
2708#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_MSB 0x1F
2709#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_RMASK 0xFF
2710#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_LSB 0x10
2711#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_MSB 0x17
2712#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_RMASK 0xFF
2713#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_LSB 0x8
2714#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_MSB 0xF
2715#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_RMASK 0xFF
2716#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_LSB 0x0
2717#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_MSB 0x7
2718#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_RMASK 0xFF
2719
2720#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_OFFS 0x1668
2721#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_DEF 0x0000000000000000
2722#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_LSB 0x27
2723#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_MSB 0x27
2724#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_RMASK 0x1
2725#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_LSB 0x26
2726#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_MSB 0x26
2727#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_RMASK 0x1
2728#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_LSB 0x25
2729#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_MSB 0x25
2730#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_RMASK 0x1
2731#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_LSB 0x24
2732#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_MSB 0x24
2733#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_RMASK 0x1
2734#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_LSB 0x23
2735#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_MSB 0x23
2736#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_RMASK 0x1
2737#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_LSB 0x22
2738#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_MSB 0x22
2739#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_RMASK 0x1
2740#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_LSB 0x21
2741#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_MSB 0x21
2742#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_RMASK 0x1
2743#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_LSB 0x20
2744#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_MSB 0x20
2745#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_RMASK 0x1
2746#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_LSB 0x18
2747#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_MSB 0x1F
2748#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_RMASK 0xFF
2749#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_LSB 0x10
2750#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_MSB 0x17
2751#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_RMASK 0xFF
2752#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_LSB 0x8
2753#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_MSB 0xF
2754#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_RMASK 0xFF
2755#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_LSB 0x0
2756#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_MSB 0x7
2757#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_RMASK 0xFF
2758
2759#define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_OFFS 0x1670
2760#define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_DEF 0x0000000000000000
2761
2762#define QIB_7322_HighPriorityLimit_0_OFFS 0x1BC0
2763#define QIB_7322_HighPriorityLimit_0_DEF 0x0000000000000000
2764#define QIB_7322_HighPriorityLimit_0_Limit_LSB 0x0
2765#define QIB_7322_HighPriorityLimit_0_Limit_MSB 0x7
2766#define QIB_7322_HighPriorityLimit_0_Limit_RMASK 0xFF
2767
2768#define QIB_7322_LowPriority0_0_OFFS 0x1C00
2769#define QIB_7322_LowPriority0_0_DEF 0x0000000000000000
2770#define QIB_7322_LowPriority0_0_VirtualLane_LSB 0x10
2771#define QIB_7322_LowPriority0_0_VirtualLane_MSB 0x12
2772#define QIB_7322_LowPriority0_0_VirtualLane_RMASK 0x7
2773#define QIB_7322_LowPriority0_0_Weight_LSB 0x0
2774#define QIB_7322_LowPriority0_0_Weight_MSB 0x7
2775#define QIB_7322_LowPriority0_0_Weight_RMASK 0xFF
2776
2777#define QIB_7322_HighPriority0_0_OFFS 0x1E00
2778#define QIB_7322_HighPriority0_0_DEF 0x0000000000000000
2779#define QIB_7322_HighPriority0_0_VirtualLane_LSB 0x10
2780#define QIB_7322_HighPriority0_0_VirtualLane_MSB 0x12
2781#define QIB_7322_HighPriority0_0_VirtualLane_RMASK 0x7
2782#define QIB_7322_HighPriority0_0_Weight_LSB 0x0
2783#define QIB_7322_HighPriority0_0_Weight_MSB 0x7
2784#define QIB_7322_HighPriority0_0_Weight_RMASK 0xFF
2785
2786#define QIB_7322_CntrRegBase_1_OFFS 0x2028
2787#define QIB_7322_CntrRegBase_1_DEF 0x0000000000013000
2788
2789#define QIB_7322_RcvQPMulticastContext_1_OFFS 0x2170
2790
2791#define QIB_7322_SendCtrl_1_OFFS 0x21C0
2792
2793#define QIB_7322_SendBufAvail0_OFFS 0x3000
2794#define QIB_7322_SendBufAvail0_DEF 0x0000000000000000
2795#define QIB_7322_SendBufAvail0_SendBuf_31_0_LSB 0x0
2796#define QIB_7322_SendBufAvail0_SendBuf_31_0_MSB 0x3F
2797#define QIB_7322_SendBufAvail0_SendBuf_31_0_RMASK 0x0
2798
2799#define QIB_7322_MsixTable_OFFS 0x8000
2800#define QIB_7322_MsixTable_DEF 0x0000000000000000
2801
2802#define QIB_7322_MsixPba_OFFS 0x9000
2803#define QIB_7322_MsixPba_DEF 0x0000000000000000
2804
2805#define QIB_7322_LAMemory_OFFS 0xA000
2806#define QIB_7322_LAMemory_DEF 0x0000000000000000
2807
2808#define QIB_7322_LBIntCnt_OFFS 0x11000
2809#define QIB_7322_LBIntCnt_DEF 0x0000000000000000
2810
2811#define QIB_7322_LBFlowStallCnt_OFFS 0x11008
2812#define QIB_7322_LBFlowStallCnt_DEF 0x0000000000000000
2813
2814#define QIB_7322_RxTIDFullErrCnt_OFFS 0x110D0
2815#define QIB_7322_RxTIDFullErrCnt_DEF 0x0000000000000000
2816
2817#define QIB_7322_RxTIDValidErrCnt_OFFS 0x110D8
2818#define QIB_7322_RxTIDValidErrCnt_DEF 0x0000000000000000
2819
2820#define QIB_7322_RxP0HdrEgrOvflCnt_OFFS 0x110E8
2821#define QIB_7322_RxP0HdrEgrOvflCnt_DEF 0x0000000000000000
2822
2823#define QIB_7322_PcieRetryBufDiagQwordCnt_OFFS 0x111A0
2824#define QIB_7322_PcieRetryBufDiagQwordCnt_DEF 0x0000000000000000
2825
2826#define QIB_7322_RxTidFlowDropCnt_OFFS 0x111E0
2827#define QIB_7322_RxTidFlowDropCnt_DEF 0x0000000000000000
2828
2829#define QIB_7322_LBIntCnt_0_OFFS 0x12000
2830#define QIB_7322_LBIntCnt_0_DEF 0x0000000000000000
2831
2832#define QIB_7322_TxCreditUpToDateTimeOut_0_OFFS 0x12008
2833#define QIB_7322_TxCreditUpToDateTimeOut_0_DEF 0x0000000000000000
2834
2835#define QIB_7322_TxSDmaDescCnt_0_OFFS 0x12010
2836#define QIB_7322_TxSDmaDescCnt_0_DEF 0x0000000000000000
2837
2838#define QIB_7322_TxUnsupVLErrCnt_0_OFFS 0x12018
2839#define QIB_7322_TxUnsupVLErrCnt_0_DEF 0x0000000000000000
2840
2841#define QIB_7322_TxDataPktCnt_0_OFFS 0x12020
2842#define QIB_7322_TxDataPktCnt_0_DEF 0x0000000000000000
2843
2844#define QIB_7322_TxFlowPktCnt_0_OFFS 0x12028
2845#define QIB_7322_TxFlowPktCnt_0_DEF 0x0000000000000000
2846
2847#define QIB_7322_TxDwordCnt_0_OFFS 0x12030
2848#define QIB_7322_TxDwordCnt_0_DEF 0x0000000000000000
2849
2850#define QIB_7322_TxLenErrCnt_0_OFFS 0x12038
2851#define QIB_7322_TxLenErrCnt_0_DEF 0x0000000000000000
2852
2853#define QIB_7322_TxMaxMinLenErrCnt_0_OFFS 0x12040
2854#define QIB_7322_TxMaxMinLenErrCnt_0_DEF 0x0000000000000000
2855
2856#define QIB_7322_TxUnderrunCnt_0_OFFS 0x12048
2857#define QIB_7322_TxUnderrunCnt_0_DEF 0x0000000000000000
2858
2859#define QIB_7322_TxFlowStallCnt_0_OFFS 0x12050
2860#define QIB_7322_TxFlowStallCnt_0_DEF 0x0000000000000000
2861
2862#define QIB_7322_TxDroppedPktCnt_0_OFFS 0x12058
2863#define QIB_7322_TxDroppedPktCnt_0_DEF 0x0000000000000000
2864
2865#define QIB_7322_RxDroppedPktCnt_0_OFFS 0x12060
2866#define QIB_7322_RxDroppedPktCnt_0_DEF 0x0000000000000000
2867
2868#define QIB_7322_RxDataPktCnt_0_OFFS 0x12068
2869#define QIB_7322_RxDataPktCnt_0_DEF 0x0000000000000000
2870
2871#define QIB_7322_RxFlowPktCnt_0_OFFS 0x12070
2872#define QIB_7322_RxFlowPktCnt_0_DEF 0x0000000000000000
2873
2874#define QIB_7322_RxDwordCnt_0_OFFS 0x12078
2875#define QIB_7322_RxDwordCnt_0_DEF 0x0000000000000000
2876
2877#define QIB_7322_RxLenErrCnt_0_OFFS 0x12080
2878#define QIB_7322_RxLenErrCnt_0_DEF 0x0000000000000000
2879
2880#define QIB_7322_RxMaxMinLenErrCnt_0_OFFS 0x12088
2881#define QIB_7322_RxMaxMinLenErrCnt_0_DEF 0x0000000000000000
2882
2883#define QIB_7322_RxICRCErrCnt_0_OFFS 0x12090
2884#define QIB_7322_RxICRCErrCnt_0_DEF 0x0000000000000000
2885
2886#define QIB_7322_RxVCRCErrCnt_0_OFFS 0x12098
2887#define QIB_7322_RxVCRCErrCnt_0_DEF 0x0000000000000000
2888
2889#define QIB_7322_RxFlowCtrlViolCnt_0_OFFS 0x120A0
2890#define QIB_7322_RxFlowCtrlViolCnt_0_DEF 0x0000000000000000
2891
2892#define QIB_7322_RxVersionErrCnt_0_OFFS 0x120A8
2893#define QIB_7322_RxVersionErrCnt_0_DEF 0x0000000000000000
2894
2895#define QIB_7322_RxLinkMalformCnt_0_OFFS 0x120B0
2896#define QIB_7322_RxLinkMalformCnt_0_DEF 0x0000000000000000
2897
2898#define QIB_7322_RxEBPCnt_0_OFFS 0x120B8
2899#define QIB_7322_RxEBPCnt_0_DEF 0x0000000000000000
2900
2901#define QIB_7322_RxLPCRCErrCnt_0_OFFS 0x120C0
2902#define QIB_7322_RxLPCRCErrCnt_0_DEF 0x0000000000000000
2903
2904#define QIB_7322_RxBufOvflCnt_0_OFFS 0x120C8
2905#define QIB_7322_RxBufOvflCnt_0_DEF 0x0000000000000000
2906
2907#define QIB_7322_RxLenTruncateCnt_0_OFFS 0x120D0
2908#define QIB_7322_RxLenTruncateCnt_0_DEF 0x0000000000000000
2909
2910#define QIB_7322_RxPKeyMismatchCnt_0_OFFS 0x120E0
2911#define QIB_7322_RxPKeyMismatchCnt_0_DEF 0x0000000000000000
2912
2913#define QIB_7322_IBLinkDownedCnt_0_OFFS 0x12180
2914#define QIB_7322_IBLinkDownedCnt_0_DEF 0x0000000000000000
2915
2916#define QIB_7322_IBSymbolErrCnt_0_OFFS 0x12188
2917#define QIB_7322_IBSymbolErrCnt_0_DEF 0x0000000000000000
2918
2919#define QIB_7322_IBStatusChangeCnt_0_OFFS 0x12190
2920#define QIB_7322_IBStatusChangeCnt_0_DEF 0x0000000000000000
2921
2922#define QIB_7322_IBLinkErrRecoveryCnt_0_OFFS 0x12198
2923#define QIB_7322_IBLinkErrRecoveryCnt_0_DEF 0x0000000000000000
2924
2925#define QIB_7322_ExcessBufferOvflCnt_0_OFFS 0x121A8
2926#define QIB_7322_ExcessBufferOvflCnt_0_DEF 0x0000000000000000
2927
2928#define QIB_7322_LocalLinkIntegrityErrCnt_0_OFFS 0x121B0
2929#define QIB_7322_LocalLinkIntegrityErrCnt_0_DEF 0x0000000000000000
2930
2931#define QIB_7322_RxVlErrCnt_0_OFFS 0x121B8
2932#define QIB_7322_RxVlErrCnt_0_DEF 0x0000000000000000
2933
2934#define QIB_7322_RxDlidFltrCnt_0_OFFS 0x121C0
2935#define QIB_7322_RxDlidFltrCnt_0_DEF 0x0000000000000000
2936
2937#define QIB_7322_RxVL15DroppedPktCnt_0_OFFS 0x121C8
2938#define QIB_7322_RxVL15DroppedPktCnt_0_DEF 0x0000000000000000
2939
2940#define QIB_7322_RxOtherLocalPhyErrCnt_0_OFFS 0x121D0
2941#define QIB_7322_RxOtherLocalPhyErrCnt_0_DEF 0x0000000000000000
2942
2943#define QIB_7322_RxQPInvalidContextCnt_0_OFFS 0x121D8
2944#define QIB_7322_RxQPInvalidContextCnt_0_DEF 0x0000000000000000
2945
2946#define QIB_7322_TxHeadersErrCnt_0_OFFS 0x121F8
2947#define QIB_7322_TxHeadersErrCnt_0_DEF 0x0000000000000000
2948
2949#define QIB_7322_PSRcvDataCount_0_OFFS 0x12218
2950#define QIB_7322_PSRcvDataCount_0_DEF 0x0000000000000000
2951
2952#define QIB_7322_PSRcvPktsCount_0_OFFS 0x12220
2953#define QIB_7322_PSRcvPktsCount_0_DEF 0x0000000000000000
2954
2955#define QIB_7322_PSXmitDataCount_0_OFFS 0x12228
2956#define QIB_7322_PSXmitDataCount_0_DEF 0x0000000000000000
2957
2958#define QIB_7322_PSXmitPktsCount_0_OFFS 0x12230
2959#define QIB_7322_PSXmitPktsCount_0_DEF 0x0000000000000000
2960
2961#define QIB_7322_PSXmitWaitCount_0_OFFS 0x12238
2962#define QIB_7322_PSXmitWaitCount_0_DEF 0x0000000000000000
2963
2964#define QIB_7322_LBIntCnt_1_OFFS 0x13000
2965#define QIB_7322_LBIntCnt_1_DEF 0x0000000000000000
2966
2967#define QIB_7322_TxCreditUpToDateTimeOut_1_OFFS 0x13008
2968#define QIB_7322_TxCreditUpToDateTimeOut_1_DEF 0x0000000000000000
2969
2970#define QIB_7322_TxSDmaDescCnt_1_OFFS 0x13010
2971#define QIB_7322_TxSDmaDescCnt_1_DEF 0x0000000000000000
2972
2973#define QIB_7322_TxUnsupVLErrCnt_1_OFFS 0x13018
2974#define QIB_7322_TxUnsupVLErrCnt_1_DEF 0x0000000000000000
2975
2976#define QIB_7322_TxDataPktCnt_1_OFFS 0x13020
2977#define QIB_7322_TxDataPktCnt_1_DEF 0x0000000000000000
2978
2979#define QIB_7322_TxFlowPktCnt_1_OFFS 0x13028
2980#define QIB_7322_TxFlowPktCnt_1_DEF 0x0000000000000000
2981
2982#define QIB_7322_TxDwordCnt_1_OFFS 0x13030
2983#define QIB_7322_TxDwordCnt_1_DEF 0x0000000000000000
2984
2985#define QIB_7322_TxLenErrCnt_1_OFFS 0x13038
2986#define QIB_7322_TxLenErrCnt_1_DEF 0x0000000000000000
2987
2988#define QIB_7322_TxMaxMinLenErrCnt_1_OFFS 0x13040
2989#define QIB_7322_TxMaxMinLenErrCnt_1_DEF 0x0000000000000000
2990
2991#define QIB_7322_TxUnderrunCnt_1_OFFS 0x13048
2992#define QIB_7322_TxUnderrunCnt_1_DEF 0x0000000000000000
2993
2994#define QIB_7322_TxFlowStallCnt_1_OFFS 0x13050
2995#define QIB_7322_TxFlowStallCnt_1_DEF 0x0000000000000000
2996
2997#define QIB_7322_TxDroppedPktCnt_1_OFFS 0x13058
2998#define QIB_7322_TxDroppedPktCnt_1_DEF 0x0000000000000000
2999
3000#define QIB_7322_RxDroppedPktCnt_1_OFFS 0x13060
3001#define QIB_7322_RxDroppedPktCnt_1_DEF 0x0000000000000000
3002
3003#define QIB_7322_RxDataPktCnt_1_OFFS 0x13068
3004#define QIB_7322_RxDataPktCnt_1_DEF 0x0000000000000000
3005
3006#define QIB_7322_RxFlowPktCnt_1_OFFS 0x13070
3007#define QIB_7322_RxFlowPktCnt_1_DEF 0x0000000000000000
3008
3009#define QIB_7322_RxDwordCnt_1_OFFS 0x13078
3010#define QIB_7322_RxDwordCnt_1_DEF 0x0000000000000000
3011
3012#define QIB_7322_RxLenErrCnt_1_OFFS 0x13080
3013#define QIB_7322_RxLenErrCnt_1_DEF 0x0000000000000000
3014
3015#define QIB_7322_RxMaxMinLenErrCnt_1_OFFS 0x13088
3016#define QIB_7322_RxMaxMinLenErrCnt_1_DEF 0x0000000000000000
3017
3018#define QIB_7322_RxICRCErrCnt_1_OFFS 0x13090
3019#define QIB_7322_RxICRCErrCnt_1_DEF 0x0000000000000000
3020
3021#define QIB_7322_RxVCRCErrCnt_1_OFFS 0x13098
3022#define QIB_7322_RxVCRCErrCnt_1_DEF 0x0000000000000000
3023
3024#define QIB_7322_RxFlowCtrlViolCnt_1_OFFS 0x130A0
3025#define QIB_7322_RxFlowCtrlViolCnt_1_DEF 0x0000000000000000
3026
3027#define QIB_7322_RxVersionErrCnt_1_OFFS 0x130A8
3028#define QIB_7322_RxVersionErrCnt_1_DEF 0x0000000000000000
3029
3030#define QIB_7322_RxLinkMalformCnt_1_OFFS 0x130B0
3031#define QIB_7322_RxLinkMalformCnt_1_DEF 0x0000000000000000
3032
3033#define QIB_7322_RxEBPCnt_1_OFFS 0x130B8
3034#define QIB_7322_RxEBPCnt_1_DEF 0x0000000000000000
3035
3036#define QIB_7322_RxLPCRCErrCnt_1_OFFS 0x130C0
3037#define QIB_7322_RxLPCRCErrCnt_1_DEF 0x0000000000000000
3038
3039#define QIB_7322_RxBufOvflCnt_1_OFFS 0x130C8
3040#define QIB_7322_RxBufOvflCnt_1_DEF 0x0000000000000000
3041
3042#define QIB_7322_RxLenTruncateCnt_1_OFFS 0x130D0
3043#define QIB_7322_RxLenTruncateCnt_1_DEF 0x0000000000000000
3044
3045#define QIB_7322_RxPKeyMismatchCnt_1_OFFS 0x130E0
3046#define QIB_7322_RxPKeyMismatchCnt_1_DEF 0x0000000000000000
3047
3048#define QIB_7322_IBLinkDownedCnt_1_OFFS 0x13180
3049#define QIB_7322_IBLinkDownedCnt_1_DEF 0x0000000000000000
3050
3051#define QIB_7322_IBSymbolErrCnt_1_OFFS 0x13188
3052#define QIB_7322_IBSymbolErrCnt_1_DEF 0x0000000000000000
3053
3054#define QIB_7322_IBStatusChangeCnt_1_OFFS 0x13190
3055#define QIB_7322_IBStatusChangeCnt_1_DEF 0x0000000000000000
3056
3057#define QIB_7322_IBLinkErrRecoveryCnt_1_OFFS 0x13198
3058#define QIB_7322_IBLinkErrRecoveryCnt_1_DEF 0x0000000000000000
3059
3060#define QIB_7322_ExcessBufferOvflCnt_1_OFFS 0x131A8
3061#define QIB_7322_ExcessBufferOvflCnt_1_DEF 0x0000000000000000
3062
3063#define QIB_7322_LocalLinkIntegrityErrCnt_1_OFFS 0x131B0
3064#define QIB_7322_LocalLinkIntegrityErrCnt_1_DEF 0x0000000000000000
3065
3066#define QIB_7322_RxVlErrCnt_1_OFFS 0x131B8
3067#define QIB_7322_RxVlErrCnt_1_DEF 0x0000000000000000
3068
3069#define QIB_7322_RxDlidFltrCnt_1_OFFS 0x131C0
3070#define QIB_7322_RxDlidFltrCnt_1_DEF 0x0000000000000000
3071
3072#define QIB_7322_RxVL15DroppedPktCnt_1_OFFS 0x131C8
3073#define QIB_7322_RxVL15DroppedPktCnt_1_DEF 0x0000000000000000
3074
3075#define QIB_7322_RxOtherLocalPhyErrCnt_1_OFFS 0x131D0
3076#define QIB_7322_RxOtherLocalPhyErrCnt_1_DEF 0x0000000000000000
3077
3078#define QIB_7322_RxQPInvalidContextCnt_1_OFFS 0x131D8
3079#define QIB_7322_RxQPInvalidContextCnt_1_DEF 0x0000000000000000
3080
3081#define QIB_7322_TxHeadersErrCnt_1_OFFS 0x131F8
3082#define QIB_7322_TxHeadersErrCnt_1_DEF 0x0000000000000000
3083
3084#define QIB_7322_PSRcvDataCount_1_OFFS 0x13218
3085#define QIB_7322_PSRcvDataCount_1_DEF 0x0000000000000000
3086
3087#define QIB_7322_PSRcvPktsCount_1_OFFS 0x13220
3088#define QIB_7322_PSRcvPktsCount_1_DEF 0x0000000000000000
3089
3090#define QIB_7322_PSXmitDataCount_1_OFFS 0x13228
3091#define QIB_7322_PSXmitDataCount_1_DEF 0x0000000000000000
3092
3093#define QIB_7322_PSXmitPktsCount_1_OFFS 0x13230
3094#define QIB_7322_PSXmitPktsCount_1_DEF 0x0000000000000000
3095
3096#define QIB_7322_PSXmitWaitCount_1_OFFS 0x13238
3097#define QIB_7322_PSXmitWaitCount_1_DEF 0x0000000000000000
3098
3099#define QIB_7322_RcvEgrArray_OFFS 0x14000
3100#define QIB_7322_RcvEgrArray_DEF 0x0000000000000000
3101#define QIB_7322_RcvEgrArray_RT_BufSize_LSB 0x25
3102#define QIB_7322_RcvEgrArray_RT_BufSize_MSB 0x27
3103#define QIB_7322_RcvEgrArray_RT_BufSize_RMASK 0x7
3104#define QIB_7322_RcvEgrArray_RT_Addr_LSB 0x0
3105#define QIB_7322_RcvEgrArray_RT_Addr_MSB 0x24
3106#define QIB_7322_RcvEgrArray_RT_Addr_RMASK 0x1FFFFFFFFF
3107
3108#define QIB_7322_RcvTIDArray0_OFFS 0x50000
3109#define QIB_7322_RcvTIDArray0_DEF 0x0000000000000000
3110#define QIB_7322_RcvTIDArray0_RT_BufSize_LSB 0x25
3111#define QIB_7322_RcvTIDArray0_RT_BufSize_MSB 0x27
3112#define QIB_7322_RcvTIDArray0_RT_BufSize_RMASK 0x7
3113#define QIB_7322_RcvTIDArray0_RT_Addr_LSB 0x0
3114#define QIB_7322_RcvTIDArray0_RT_Addr_MSB 0x24
3115#define QIB_7322_RcvTIDArray0_RT_Addr_RMASK 0x1FFFFFFFFF
3116
3117#define QIB_7322_IBSD_DDS_MAP_TABLE_0_OFFS 0xD0000
3118#define QIB_7322_IBSD_DDS_MAP_TABLE_0_DEF 0x0000000000000000
3119
3120#define QIB_7322_RcvHdrTail0_OFFS 0x200000
3121#define QIB_7322_RcvHdrTail0_DEF 0x0000000000000000
3122
3123#define QIB_7322_RcvHdrHead0_OFFS 0x200008
3124#define QIB_7322_RcvHdrHead0_DEF 0x0000000000000000
3125#define QIB_7322_RcvHdrHead0_counter_LSB 0x20
3126#define QIB_7322_RcvHdrHead0_counter_MSB 0x2F
3127#define QIB_7322_RcvHdrHead0_counter_RMASK 0xFFFF
3128#define QIB_7322_RcvHdrHead0_RcvHeadPointer_LSB 0x0
3129#define QIB_7322_RcvHdrHead0_RcvHeadPointer_MSB 0x1F
3130#define QIB_7322_RcvHdrHead0_RcvHeadPointer_RMASK 0xFFFFFFFF
3131
3132#define QIB_7322_RcvEgrIndexTail0_OFFS 0x200010
3133#define QIB_7322_RcvEgrIndexTail0_DEF 0x0000000000000000
3134
3135#define QIB_7322_RcvEgrIndexHead0_OFFS 0x200018
3136#define QIB_7322_RcvEgrIndexHead0_DEF 0x0000000000000000
3137
3138#define QIB_7322_RcvTIDFlowTable0_OFFS 0x201000
3139#define QIB_7322_RcvTIDFlowTable0_DEF 0x0000000000000000
3140#define QIB_7322_RcvTIDFlowTable0_GenMismatch_LSB 0x1C
3141#define QIB_7322_RcvTIDFlowTable0_GenMismatch_MSB 0x1C
3142#define QIB_7322_RcvTIDFlowTable0_GenMismatch_RMASK 0x1
3143#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_LSB 0x1B
3144#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_MSB 0x1B
3145#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_RMASK 0x1
3146#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_LSB 0x16
3147#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_MSB 0x16
3148#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_RMASK 0x1
3149#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_LSB 0x15
3150#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_MSB 0x15
3151#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_RMASK 0x1
3152#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_LSB 0x14
3153#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_MSB 0x14
3154#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_RMASK 0x1
3155#define QIB_7322_RcvTIDFlowTable0_FlowValid_LSB 0x13
3156#define QIB_7322_RcvTIDFlowTable0_FlowValid_MSB 0x13
3157#define QIB_7322_RcvTIDFlowTable0_FlowValid_RMASK 0x1
3158#define QIB_7322_RcvTIDFlowTable0_GenVal_LSB 0xB
3159#define QIB_7322_RcvTIDFlowTable0_GenVal_MSB 0x12
3160#define QIB_7322_RcvTIDFlowTable0_GenVal_RMASK 0xFF
3161#define QIB_7322_RcvTIDFlowTable0_SeqNum_LSB 0x0
3162#define QIB_7322_RcvTIDFlowTable0_SeqNum_MSB 0xA
3163#define QIB_7322_RcvTIDFlowTable0_SeqNum_RMASK 0x7FF
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
new file mode 100644
index 000000000000..b3955ed8f794
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_common.h
@@ -0,0 +1,758 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef _QIB_COMMON_H
36#define _QIB_COMMON_H
37
38/*
39 * This file contains defines, structures, etc. that are used
40 * to communicate between kernel and user code.
41 */
42
43/* This is the IEEE-assigned OUI for QLogic Inc. QLogic_IB */
44#define QIB_SRC_OUI_1 0x00
45#define QIB_SRC_OUI_2 0x11
46#define QIB_SRC_OUI_3 0x75
47
48/* version of protocol header (known to chip also). In the long run,
49 * we should be able to generate and accept a range of version numbers;
50 * for now we only accept one, and it's compiled in.
51 */
52#define IPS_PROTO_VERSION 2
53
54/*
55 * These are compile time constants that you may want to enable or disable
56 * if you are trying to debug problems with code or performance.
57 * QIB_VERBOSE_TRACING define as 1 if you want additional tracing in
58 * fastpath code
59 * QIB_TRACE_REGWRITES define as 1 if you want register writes to be
60 * traced in faspath code
61 * _QIB_TRACING define as 0 if you want to remove all tracing in a
62 * compilation unit
63 */
64
65/*
66 * The value in the BTH QP field that QLogic_IB uses to differentiate
67 * an qlogic_ib protocol IB packet vs standard IB transport
68 * This it needs to be even (0x656b78), because the LSB is sometimes
69 * used for the MSB of context. The change may cause a problem
70 * interoperating with older software.
71 */
72#define QIB_KD_QP 0x656b78
73
74/*
75 * These are the status bits readable (in ascii form, 64bit value)
76 * from the "status" sysfs file. For binary compatibility, values
77 * must remain as is; removed states can be reused for different
78 * purposes.
79 */
80#define QIB_STATUS_INITTED 0x1 /* basic initialization done */
81/* Chip has been found and initted */
82#define QIB_STATUS_CHIP_PRESENT 0x20
83/* IB link is at ACTIVE, usable for data traffic */
84#define QIB_STATUS_IB_READY 0x40
85/* link is configured, LID, MTU, etc. have been set */
86#define QIB_STATUS_IB_CONF 0x80
87/* A Fatal hardware error has occurred. */
88#define QIB_STATUS_HWERROR 0x200
89
90/*
91 * The list of usermode accessible registers. Also see Reg_* later in file.
92 */
93enum qib_ureg {
94 /* (RO) DMA RcvHdr to be used next. */
95 ur_rcvhdrtail = 0,
96 /* (RW) RcvHdr entry to be processed next by host. */
97 ur_rcvhdrhead = 1,
98 /* (RO) Index of next Eager index to use. */
99 ur_rcvegrindextail = 2,
100 /* (RW) Eager TID to be processed next */
101 ur_rcvegrindexhead = 3,
102 /* For internal use only; max register number. */
103 _QIB_UregMax
104};
105
106/* bit values for spi_runtime_flags */
107#define QIB_RUNTIME_PCIE 0x0002
108#define QIB_RUNTIME_FORCE_WC_ORDER 0x0004
109#define QIB_RUNTIME_RCVHDR_COPY 0x0008
110#define QIB_RUNTIME_MASTER 0x0010
111#define QIB_RUNTIME_RCHK 0x0020
112#define QIB_RUNTIME_NODMA_RTAIL 0x0080
113#define QIB_RUNTIME_SPECIAL_TRIGGER 0x0100
114#define QIB_RUNTIME_SDMA 0x0200
115#define QIB_RUNTIME_FORCE_PIOAVAIL 0x0400
116#define QIB_RUNTIME_PIO_REGSWAPPED 0x0800
117#define QIB_RUNTIME_CTXT_MSB_IN_QP 0x1000
118#define QIB_RUNTIME_CTXT_REDIRECT 0x2000
119#define QIB_RUNTIME_HDRSUPP 0x4000
120
121/*
122 * This structure is returned by qib_userinit() immediately after
123 * open to get implementation-specific info, and info specific to this
124 * instance.
125 *
126 * This struct must have explict pad fields where type sizes
127 * may result in different alignments between 32 and 64 bit
128 * programs, since the 64 bit * bit kernel requires the user code
129 * to have matching offsets
130 */
131struct qib_base_info {
132 /* version of hardware, for feature checking. */
133 __u32 spi_hw_version;
134 /* version of software, for feature checking. */
135 __u32 spi_sw_version;
136 /* QLogic_IB context assigned, goes into sent packets */
137 __u16 spi_ctxt;
138 __u16 spi_subctxt;
139 /*
140 * IB MTU, packets IB data must be less than this.
141 * The MTU is in bytes, and will be a multiple of 4 bytes.
142 */
143 __u32 spi_mtu;
144 /*
145 * Size of a PIO buffer. Any given packet's total size must be less
146 * than this (in words). Included is the starting control word, so
147 * if 513 is returned, then total pkt size is 512 words or less.
148 */
149 __u32 spi_piosize;
150 /* size of the TID cache in qlogic_ib, in entries */
151 __u32 spi_tidcnt;
152 /* size of the TID Eager list in qlogic_ib, in entries */
153 __u32 spi_tidegrcnt;
154 /* size of a single receive header queue entry in words. */
155 __u32 spi_rcvhdrent_size;
156 /*
157 * Count of receive header queue entries allocated.
158 * This may be less than the spu_rcvhdrcnt passed in!.
159 */
160 __u32 spi_rcvhdr_cnt;
161
162 /* per-chip and other runtime features bitmap (QIB_RUNTIME_*) */
163 __u32 spi_runtime_flags;
164
165 /* address where hardware receive header queue is mapped */
166 __u64 spi_rcvhdr_base;
167
168 /* user program. */
169
170 /* base address of eager TID receive buffers used by hardware. */
171 __u64 spi_rcv_egrbufs;
172
173 /* Allocated by initialization code, not by protocol. */
174
175 /*
176 * Size of each TID buffer in host memory, starting at
177 * spi_rcv_egrbufs. The buffers are virtually contiguous.
178 */
179 __u32 spi_rcv_egrbufsize;
180 /*
181 * The special QP (queue pair) value that identifies an qlogic_ib
182 * protocol packet from standard IB packets. More, probably much
183 * more, to be added.
184 */
185 __u32 spi_qpair;
186
187 /*
188 * User register base for init code, not to be used directly by
189 * protocol or applications. Always points to chip registers,
190 * for normal or shared context.
191 */
192 __u64 spi_uregbase;
193 /*
194 * Maximum buffer size in bytes that can be used in a single TID
195 * entry (assuming the buffer is aligned to this boundary). This is
196 * the minimum of what the hardware and software support Guaranteed
197 * to be a power of 2.
198 */
199 __u32 spi_tid_maxsize;
200 /*
201 * alignment of each pio send buffer (byte count
202 * to add to spi_piobufbase to get to second buffer)
203 */
204 __u32 spi_pioalign;
205 /*
206 * The index of the first pio buffer available to this process;
207 * needed to do lookup in spi_pioavailaddr; not added to
208 * spi_piobufbase.
209 */
210 __u32 spi_pioindex;
211 /* number of buffers mapped for this process */
212 __u32 spi_piocnt;
213
214 /*
215 * Base address of writeonly pio buffers for this process.
216 * Each buffer has spi_piosize words, and is aligned on spi_pioalign
217 * boundaries. spi_piocnt buffers are mapped from this address
218 */
219 __u64 spi_piobufbase;
220
221 /*
222 * Base address of readonly memory copy of the pioavail registers.
223 * There are 2 bits for each buffer.
224 */
225 __u64 spi_pioavailaddr;
226
227 /*
228 * Address where driver updates a copy of the interface and driver
229 * status (QIB_STATUS_*) as a 64 bit value. It's followed by a
230 * link status qword (formerly combined with driver status), then a
231 * string indicating hardware error, if there was one.
232 */
233 __u64 spi_status;
234
235 /* number of chip ctxts available to user processes */
236 __u32 spi_nctxts;
237 __u16 spi_unit; /* unit number of chip we are using */
238 __u16 spi_port; /* IB port number we are using */
239 /* num bufs in each contiguous set */
240 __u32 spi_rcv_egrperchunk;
241 /* size in bytes of each contiguous set */
242 __u32 spi_rcv_egrchunksize;
243 /* total size of mmap to cover full rcvegrbuffers */
244 __u32 spi_rcv_egrbuftotlen;
245 __u32 spi_rhf_offset; /* dword offset in hdrqent for rcvhdr flags */
246 /* address of readonly memory copy of the rcvhdrq tail register. */
247 __u64 spi_rcvhdr_tailaddr;
248
249 /*
250 * shared memory pages for subctxts if ctxt is shared; these cover
251 * all the processes in the group sharing a single context.
252 * all have enough space for the num_subcontexts value on this job.
253 */
254 __u64 spi_subctxt_uregbase;
255 __u64 spi_subctxt_rcvegrbuf;
256 __u64 spi_subctxt_rcvhdr_base;
257
258 /* shared memory page for send buffer disarm status */
259 __u64 spi_sendbuf_status;
260} __attribute__ ((aligned(8)));
261
262/*
263 * This version number is given to the driver by the user code during
264 * initialization in the spu_userversion field of qib_user_info, so
265 * the driver can check for compatibility with user code.
266 *
267 * The major version changes when data structures
268 * change in an incompatible way. The driver must be the same or higher
269 * for initialization to succeed. In some cases, a higher version
270 * driver will not interoperate with older software, and initialization
271 * will return an error.
272 */
273#define QIB_USER_SWMAJOR 1
274
275/*
276 * Minor version differences are always compatible
277 * a within a major version, however if user software is larger
278 * than driver software, some new features and/or structure fields
279 * may not be implemented; the user code must deal with this if it
280 * cares, or it must abort after initialization reports the difference.
281 */
282#define QIB_USER_SWMINOR 10
283
284#define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR)
285
286#ifndef QIB_KERN_TYPE
287#define QIB_KERN_TYPE 0
288#define QIB_IDSTR "QLogic kernel.org driver"
289#endif
290
291/*
292 * Similarly, this is the kernel version going back to the user. It's
293 * slightly different, in that we want to tell if the driver was built as
294 * part of a QLogic release, or from the driver from openfabrics.org,
295 * kernel.org, or a standard distribution, for support reasons.
296 * The high bit is 0 for non-QLogic and 1 for QLogic-built/supplied.
297 *
298 * It's returned by the driver to the user code during initialization in the
299 * spi_sw_version field of qib_base_info, so the user code can in turn
300 * check for compatibility with the kernel.
301*/
302#define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION)
303
304/*
305 * This structure is passed to qib_userinit() to tell the driver where
306 * user code buffers are, sizes, etc. The offsets and sizes of the
307 * fields must remain unchanged, for binary compatibility. It can
308 * be extended, if userversion is changed so user code can tell, if needed
309 */
310struct qib_user_info {
311 /*
312 * version of user software, to detect compatibility issues.
313 * Should be set to QIB_USER_SWVERSION.
314 */
315 __u32 spu_userversion;
316
317 __u32 _spu_unused2;
318
319 /* size of struct base_info to write to */
320 __u32 spu_base_info_size;
321
322 __u32 _spu_unused3;
323
324 /*
325 * If two or more processes wish to share a context, each process
326 * must set the spu_subctxt_cnt and spu_subctxt_id to the same
327 * values. The only restriction on the spu_subctxt_id is that
328 * it be unique for a given node.
329 */
330 __u16 spu_subctxt_cnt;
331 __u16 spu_subctxt_id;
332
333 __u32 spu_port; /* IB port requested by user if > 0 */
334
335 /*
336 * address of struct base_info to write to
337 */
338 __u64 spu_base_info;
339
340} __attribute__ ((aligned(8)));
341
342/* User commands. */
343
344/* 16 available, was: old set up userspace (for old user code) */
345#define QIB_CMD_CTXT_INFO 17 /* find out what resources we got */
346#define QIB_CMD_RECV_CTRL 18 /* control receipt of packets */
347#define QIB_CMD_TID_UPDATE 19 /* update expected TID entries */
348#define QIB_CMD_TID_FREE 20 /* free expected TID entries */
349#define QIB_CMD_SET_PART_KEY 21 /* add partition key */
350/* 22 available, was: return info on slave processes (for old user code) */
351#define QIB_CMD_ASSIGN_CTXT 23 /* allocate HCA and ctxt */
352#define QIB_CMD_USER_INIT 24 /* set up userspace */
353#define QIB_CMD_UNUSED_1 25
354#define QIB_CMD_UNUSED_2 26
355#define QIB_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
356#define QIB_CMD_POLL_TYPE 28 /* set the kind of polling we want */
357#define QIB_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */
358/* 30 is unused */
359#define QIB_CMD_SDMA_INFLIGHT 31 /* sdma inflight counter request */
360#define QIB_CMD_SDMA_COMPLETE 32 /* sdma completion counter request */
361/* 33 available, was a testing feature */
362#define QIB_CMD_DISARM_BUFS 34 /* disarm send buffers w/ errors */
363#define QIB_CMD_ACK_EVENT 35 /* ack & clear bits */
364#define QIB_CMD_CPUS_LIST 36 /* list of cpus allocated, for pinned
365 * processes: qib_cpus_list */
366
367/*
368 * QIB_CMD_ACK_EVENT obsoletes QIB_CMD_DISARM_BUFS, but we keep it for
369 * compatibility with libraries from previous release. The ACK_EVENT
370 * will take appropriate driver action (if any, just DISARM for now),
371 * then clear the bits passed in as part of the mask. These bits are
372 * in the first 64bit word at spi_sendbuf_status, and are passed to
373 * the driver in the event_mask union as well.
374 */
375#define _QIB_EVENT_DISARM_BUFS_BIT 0
376#define _QIB_EVENT_LINKDOWN_BIT 1
377#define _QIB_EVENT_LID_CHANGE_BIT 2
378#define _QIB_EVENT_LMC_CHANGE_BIT 3
379#define _QIB_EVENT_SL2VL_CHANGE_BIT 4
380#define _QIB_MAX_EVENT_BIT _QIB_EVENT_SL2VL_CHANGE_BIT
381
382#define QIB_EVENT_DISARM_BUFS_BIT (1UL << _QIB_EVENT_DISARM_BUFS_BIT)
383#define QIB_EVENT_LINKDOWN_BIT (1UL << _QIB_EVENT_LINKDOWN_BIT)
384#define QIB_EVENT_LID_CHANGE_BIT (1UL << _QIB_EVENT_LID_CHANGE_BIT)
385#define QIB_EVENT_LMC_CHANGE_BIT (1UL << _QIB_EVENT_LMC_CHANGE_BIT)
386#define QIB_EVENT_SL2VL_CHANGE_BIT (1UL << _QIB_EVENT_SL2VL_CHANGE_BIT)
387
388
389/*
390 * Poll types
391 */
392#define QIB_POLL_TYPE_ANYRCV 0x0
393#define QIB_POLL_TYPE_URGENT 0x1
394
395struct qib_ctxt_info {
396 __u16 num_active; /* number of active units */
397 __u16 unit; /* unit (chip) assigned to caller */
398 __u16 port; /* IB port assigned to caller (1-based) */
399 __u16 ctxt; /* ctxt on unit assigned to caller */
400 __u16 subctxt; /* subctxt on unit assigned to caller */
401 __u16 num_ctxts; /* number of ctxts available on unit */
402 __u16 num_subctxts; /* number of subctxts opened on ctxt */
403 __u16 rec_cpu; /* cpu # for affinity (ffff if none) */
404};
405
406struct qib_tid_info {
407 __u32 tidcnt;
408 /* make structure same size in 32 and 64 bit */
409 __u32 tid__unused;
410 /* virtual address of first page in transfer */
411 __u64 tidvaddr;
412 /* pointer (same size 32/64 bit) to __u16 tid array */
413 __u64 tidlist;
414
415 /*
416 * pointer (same size 32/64 bit) to bitmap of TIDs used
417 * for this call; checked for being large enough at open
418 */
419 __u64 tidmap;
420};
421
422struct qib_cmd {
423 __u32 type; /* command type */
424 union {
425 struct qib_tid_info tid_info;
426 struct qib_user_info user_info;
427
428 /*
429 * address in userspace where we should put the sdma
430 * inflight counter
431 */
432 __u64 sdma_inflight;
433 /*
434 * address in userspace where we should put the sdma
435 * completion counter
436 */
437 __u64 sdma_complete;
438 /* address in userspace of struct qib_ctxt_info to
439 write result to */
440 __u64 ctxt_info;
441 /* enable/disable receipt of packets */
442 __u32 recv_ctrl;
443 /* enable/disable armlaunch errors (non-zero to enable) */
444 __u32 armlaunch_ctrl;
445 /* partition key to set */
446 __u16 part_key;
447 /* user address of __u32 bitmask of active slaves */
448 __u64 slave_mask_addr;
449 /* type of polling we want */
450 __u16 poll_type;
451 /* back pressure enable bit for one particular context */
452 __u8 ctxt_bp;
453 /* qib_user_event_ack(), IPATH_EVENT_* bits */
454 __u64 event_mask;
455 } cmd;
456};
457
458struct qib_iovec {
459 /* Pointer to data, but same size 32 and 64 bit */
460 __u64 iov_base;
461
462 /*
463 * Length of data; don't need 64 bits, but want
464 * qib_sendpkt to remain same size as before 32 bit changes, so...
465 */
466 __u64 iov_len;
467};
468
469/*
470 * Describes a single packet for send. Each packet can have one or more
471 * buffers, but the total length (exclusive of IB headers) must be less
472 * than the MTU, and if using the PIO method, entire packet length,
473 * including IB headers, must be less than the qib_piosize value (words).
474 * Use of this necessitates including sys/uio.h
475 */
476struct __qib_sendpkt {
477 __u32 sps_flags; /* flags for packet (TBD) */
478 __u32 sps_cnt; /* number of entries to use in sps_iov */
479 /* array of iov's describing packet. TEMPORARY */
480 struct qib_iovec sps_iov[4];
481};
482
483/*
484 * Diagnostics can send a packet by "writing" the following
485 * structs to the diag data special file.
486 * This allows a custom
487 * pbc (+ static rate) qword, so that special modes and deliberate
488 * changes to CRCs can be used. The elements were also re-ordered
489 * for better alignment and to avoid padding issues.
490 */
491#define _DIAG_XPKT_VERS 3
492struct qib_diag_xpkt {
493 __u16 version;
494 __u16 unit;
495 __u16 port;
496 __u16 len;
497 __u64 data;
498 __u64 pbc_wd;
499};
500
501/*
502 * Data layout in I2C flash (for GUID, etc.)
503 * All fields are little-endian binary unless otherwise stated
504 */
505#define QIB_FLASH_VERSION 2
506struct qib_flash {
507 /* flash layout version (QIB_FLASH_VERSION) */
508 __u8 if_fversion;
509 /* checksum protecting if_length bytes */
510 __u8 if_csum;
511 /*
512 * valid length (in use, protected by if_csum), including
513 * if_fversion and if_csum themselves)
514 */
515 __u8 if_length;
516 /* the GUID, in network order */
517 __u8 if_guid[8];
518 /* number of GUIDs to use, starting from if_guid */
519 __u8 if_numguid;
520 /* the (last 10 characters of) board serial number, in ASCII */
521 char if_serial[12];
522 /* board mfg date (YYYYMMDD ASCII) */
523 char if_mfgdate[8];
524 /* last board rework/test date (YYYYMMDD ASCII) */
525 char if_testdate[8];
526 /* logging of error counts, TBD */
527 __u8 if_errcntp[4];
528 /* powered on hours, updated at driver unload */
529 __u8 if_powerhour[2];
530 /* ASCII free-form comment field */
531 char if_comment[32];
532 /* Backwards compatible prefix for longer QLogic Serial Numbers */
533 char if_sprefix[4];
534 /* 82 bytes used, min flash size is 128 bytes */
535 __u8 if_future[46];
536};
537
538/*
539 * These are the counters implemented in the chip, and are listed in order.
540 * The InterCaps naming is taken straight from the chip spec.
541 */
542struct qlogic_ib_counters {
543 __u64 LBIntCnt;
544 __u64 LBFlowStallCnt;
545 __u64 TxSDmaDescCnt; /* was Reserved1 */
546 __u64 TxUnsupVLErrCnt;
547 __u64 TxDataPktCnt;
548 __u64 TxFlowPktCnt;
549 __u64 TxDwordCnt;
550 __u64 TxLenErrCnt;
551 __u64 TxMaxMinLenErrCnt;
552 __u64 TxUnderrunCnt;
553 __u64 TxFlowStallCnt;
554 __u64 TxDroppedPktCnt;
555 __u64 RxDroppedPktCnt;
556 __u64 RxDataPktCnt;
557 __u64 RxFlowPktCnt;
558 __u64 RxDwordCnt;
559 __u64 RxLenErrCnt;
560 __u64 RxMaxMinLenErrCnt;
561 __u64 RxICRCErrCnt;
562 __u64 RxVCRCErrCnt;
563 __u64 RxFlowCtrlErrCnt;
564 __u64 RxBadFormatCnt;
565 __u64 RxLinkProblemCnt;
566 __u64 RxEBPCnt;
567 __u64 RxLPCRCErrCnt;
568 __u64 RxBufOvflCnt;
569 __u64 RxTIDFullErrCnt;
570 __u64 RxTIDValidErrCnt;
571 __u64 RxPKeyMismatchCnt;
572 __u64 RxP0HdrEgrOvflCnt;
573 __u64 RxP1HdrEgrOvflCnt;
574 __u64 RxP2HdrEgrOvflCnt;
575 __u64 RxP3HdrEgrOvflCnt;
576 __u64 RxP4HdrEgrOvflCnt;
577 __u64 RxP5HdrEgrOvflCnt;
578 __u64 RxP6HdrEgrOvflCnt;
579 __u64 RxP7HdrEgrOvflCnt;
580 __u64 RxP8HdrEgrOvflCnt;
581 __u64 RxP9HdrEgrOvflCnt;
582 __u64 RxP10HdrEgrOvflCnt;
583 __u64 RxP11HdrEgrOvflCnt;
584 __u64 RxP12HdrEgrOvflCnt;
585 __u64 RxP13HdrEgrOvflCnt;
586 __u64 RxP14HdrEgrOvflCnt;
587 __u64 RxP15HdrEgrOvflCnt;
588 __u64 RxP16HdrEgrOvflCnt;
589 __u64 IBStatusChangeCnt;
590 __u64 IBLinkErrRecoveryCnt;
591 __u64 IBLinkDownedCnt;
592 __u64 IBSymbolErrCnt;
593 __u64 RxVL15DroppedPktCnt;
594 __u64 RxOtherLocalPhyErrCnt;
595 __u64 PcieRetryBufDiagQwordCnt;
596 __u64 ExcessBufferOvflCnt;
597 __u64 LocalLinkIntegrityErrCnt;
598 __u64 RxVlErrCnt;
599 __u64 RxDlidFltrCnt;
600};
601
602/*
603 * The next set of defines are for packet headers, and chip register
604 * and memory bits that are visible to and/or used by user-mode software.
605 */
606
607/* RcvHdrFlags bits */
608#define QLOGIC_IB_RHF_LENGTH_MASK 0x7FF
609#define QLOGIC_IB_RHF_LENGTH_SHIFT 0
610#define QLOGIC_IB_RHF_RCVTYPE_MASK 0x7
611#define QLOGIC_IB_RHF_RCVTYPE_SHIFT 11
612#define QLOGIC_IB_RHF_EGRINDEX_MASK 0xFFF
613#define QLOGIC_IB_RHF_EGRINDEX_SHIFT 16
614#define QLOGIC_IB_RHF_SEQ_MASK 0xF
615#define QLOGIC_IB_RHF_SEQ_SHIFT 0
616#define QLOGIC_IB_RHF_HDRQ_OFFSET_MASK 0x7FF
617#define QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT 4
618#define QLOGIC_IB_RHF_H_ICRCERR 0x80000000
619#define QLOGIC_IB_RHF_H_VCRCERR 0x40000000
620#define QLOGIC_IB_RHF_H_PARITYERR 0x20000000
621#define QLOGIC_IB_RHF_H_LENERR 0x10000000
622#define QLOGIC_IB_RHF_H_MTUERR 0x08000000
623#define QLOGIC_IB_RHF_H_IHDRERR 0x04000000
624#define QLOGIC_IB_RHF_H_TIDERR 0x02000000
625#define QLOGIC_IB_RHF_H_MKERR 0x01000000
626#define QLOGIC_IB_RHF_H_IBERR 0x00800000
627#define QLOGIC_IB_RHF_H_ERR_MASK 0xFF800000
628#define QLOGIC_IB_RHF_L_USE_EGR 0x80000000
629#define QLOGIC_IB_RHF_L_SWA 0x00008000
630#define QLOGIC_IB_RHF_L_SWB 0x00004000
631
632/* qlogic_ib header fields */
633#define QLOGIC_IB_I_VERS_MASK 0xF
634#define QLOGIC_IB_I_VERS_SHIFT 28
635#define QLOGIC_IB_I_CTXT_MASK 0xF
636#define QLOGIC_IB_I_CTXT_SHIFT 24
637#define QLOGIC_IB_I_TID_MASK 0x7FF
638#define QLOGIC_IB_I_TID_SHIFT 13
639#define QLOGIC_IB_I_OFFSET_MASK 0x1FFF
640#define QLOGIC_IB_I_OFFSET_SHIFT 0
641
642/* K_PktFlags bits */
643#define QLOGIC_IB_KPF_INTR 0x1
644#define QLOGIC_IB_KPF_SUBCTXT_MASK 0x3
645#define QLOGIC_IB_KPF_SUBCTXT_SHIFT 1
646
647#define QLOGIC_IB_MAX_SUBCTXT 4
648
649/* SendPIO per-buffer control */
650#define QLOGIC_IB_SP_TEST 0x40
651#define QLOGIC_IB_SP_TESTEBP 0x20
652#define QLOGIC_IB_SP_TRIGGER_SHIFT 15
653
654/* SendPIOAvail bits */
655#define QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT 1
656#define QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT 0
657
658/* qlogic_ib header format */
659struct qib_header {
660 /*
661 * Version - 4 bits, Context - 4 bits, TID - 10 bits and Offset -
662 * 14 bits before ECO change ~28 Dec 03. After that, Vers 4,
663 * Context 4, TID 11, offset 13.
664 */
665 __le32 ver_ctxt_tid_offset;
666 __le16 chksum;
667 __le16 pkt_flags;
668};
669
670/*
671 * qlogic_ib user message header format.
672 * This structure contains the first 4 fields common to all protocols
673 * that employ qlogic_ib.
674 */
675struct qib_message_header {
676 __be16 lrh[4];
677 __be32 bth[3];
678 /* fields below this point are in host byte order */
679 struct qib_header iph;
680 __u8 sub_opcode;
681};
682
683/* IB - LRH header consts */
684#define QIB_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */
685#define QIB_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
686
687/* misc. */
688#define SIZE_OF_CRC 1
689
690#define QIB_DEFAULT_P_KEY 0xFFFF
691#define QIB_PERMISSIVE_LID 0xFFFF
692#define QIB_AETH_CREDIT_SHIFT 24
693#define QIB_AETH_CREDIT_MASK 0x1F
694#define QIB_AETH_CREDIT_INVAL 0x1F
695#define QIB_PSN_MASK 0xFFFFFF
696#define QIB_MSN_MASK 0xFFFFFF
697#define QIB_QPN_MASK 0xFFFFFF
698#define QIB_MULTICAST_LID_BASE 0xC000
699#define QIB_EAGER_TID_ID QLOGIC_IB_I_TID_MASK
700#define QIB_MULTICAST_QPN 0xFFFFFF
701
702/* Receive Header Queue: receive type (from qlogic_ib) */
703#define RCVHQ_RCV_TYPE_EXPECTED 0
704#define RCVHQ_RCV_TYPE_EAGER 1
705#define RCVHQ_RCV_TYPE_NON_KD 2
706#define RCVHQ_RCV_TYPE_ERROR 3
707
708#define QIB_HEADER_QUEUE_WORDS 9
709
710/* functions for extracting fields from rcvhdrq entries for the driver.
711 */
712static inline __u32 qib_hdrget_err_flags(const __le32 *rbuf)
713{
714 return __le32_to_cpu(rbuf[1]) & QLOGIC_IB_RHF_H_ERR_MASK;
715}
716
717static inline __u32 qib_hdrget_rcv_type(const __le32 *rbuf)
718{
719 return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_RCVTYPE_SHIFT) &
720 QLOGIC_IB_RHF_RCVTYPE_MASK;
721}
722
723static inline __u32 qib_hdrget_length_in_bytes(const __le32 *rbuf)
724{
725 return ((__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_LENGTH_SHIFT) &
726 QLOGIC_IB_RHF_LENGTH_MASK) << 2;
727}
728
729static inline __u32 qib_hdrget_index(const __le32 *rbuf)
730{
731 return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_EGRINDEX_SHIFT) &
732 QLOGIC_IB_RHF_EGRINDEX_MASK;
733}
734
735static inline __u32 qib_hdrget_seq(const __le32 *rbuf)
736{
737 return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_SEQ_SHIFT) &
738 QLOGIC_IB_RHF_SEQ_MASK;
739}
740
741static inline __u32 qib_hdrget_offset(const __le32 *rbuf)
742{
743 return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT) &
744 QLOGIC_IB_RHF_HDRQ_OFFSET_MASK;
745}
746
747static inline __u32 qib_hdrget_use_egr_buf(const __le32 *rbuf)
748{
749 return __le32_to_cpu(rbuf[0]) & QLOGIC_IB_RHF_L_USE_EGR;
750}
751
752static inline __u32 qib_hdrget_qib_ver(__le32 hdrword)
753{
754 return (__le32_to_cpu(hdrword) >> QLOGIC_IB_I_VERS_SHIFT) &
755 QLOGIC_IB_I_VERS_MASK;
756}
757
758#endif /* _QIB_COMMON_H */
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c
new file mode 100644
index 000000000000..a86cbf880f98
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_cq.c
@@ -0,0 +1,484 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/err.h>
35#include <linux/slab.h>
36#include <linux/vmalloc.h>
37
38#include "qib_verbs.h"
39
40/**
41 * qib_cq_enter - add a new entry to the completion queue
42 * @cq: completion queue
43 * @entry: work completion entry to add
44 * @sig: true if @entry is a solicitated entry
45 *
46 * This may be called with qp->s_lock held.
47 */
48void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
49{
50 struct qib_cq_wc *wc;
51 unsigned long flags;
52 u32 head;
53 u32 next;
54
55 spin_lock_irqsave(&cq->lock, flags);
56
57 /*
58 * Note that the head pointer might be writable by user processes.
59 * Take care to verify it is a sane value.
60 */
61 wc = cq->queue;
62 head = wc->head;
63 if (head >= (unsigned) cq->ibcq.cqe) {
64 head = cq->ibcq.cqe;
65 next = 0;
66 } else
67 next = head + 1;
68 if (unlikely(next == wc->tail)) {
69 spin_unlock_irqrestore(&cq->lock, flags);
70 if (cq->ibcq.event_handler) {
71 struct ib_event ev;
72
73 ev.device = cq->ibcq.device;
74 ev.element.cq = &cq->ibcq;
75 ev.event = IB_EVENT_CQ_ERR;
76 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
77 }
78 return;
79 }
80 if (cq->ip) {
81 wc->uqueue[head].wr_id = entry->wr_id;
82 wc->uqueue[head].status = entry->status;
83 wc->uqueue[head].opcode = entry->opcode;
84 wc->uqueue[head].vendor_err = entry->vendor_err;
85 wc->uqueue[head].byte_len = entry->byte_len;
86 wc->uqueue[head].ex.imm_data =
87 (__u32 __force)entry->ex.imm_data;
88 wc->uqueue[head].qp_num = entry->qp->qp_num;
89 wc->uqueue[head].src_qp = entry->src_qp;
90 wc->uqueue[head].wc_flags = entry->wc_flags;
91 wc->uqueue[head].pkey_index = entry->pkey_index;
92 wc->uqueue[head].slid = entry->slid;
93 wc->uqueue[head].sl = entry->sl;
94 wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
95 wc->uqueue[head].port_num = entry->port_num;
96 /* Make sure entry is written before the head index. */
97 smp_wmb();
98 } else
99 wc->kqueue[head] = *entry;
100 wc->head = next;
101
102 if (cq->notify == IB_CQ_NEXT_COMP ||
103 (cq->notify == IB_CQ_SOLICITED && solicited)) {
104 cq->notify = IB_CQ_NONE;
105 cq->triggered++;
106 /*
107 * This will cause send_complete() to be called in
108 * another thread.
109 */
110 queue_work(qib_cq_wq, &cq->comptask);
111 }
112
113 spin_unlock_irqrestore(&cq->lock, flags);
114}
115
116/**
117 * qib_poll_cq - poll for work completion entries
118 * @ibcq: the completion queue to poll
119 * @num_entries: the maximum number of entries to return
120 * @entry: pointer to array where work completions are placed
121 *
122 * Returns the number of completion entries polled.
123 *
124 * This may be called from interrupt context. Also called by ib_poll_cq()
125 * in the generic verbs code.
126 */
127int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
128{
129 struct qib_cq *cq = to_icq(ibcq);
130 struct qib_cq_wc *wc;
131 unsigned long flags;
132 int npolled;
133 u32 tail;
134
135 /* The kernel can only poll a kernel completion queue */
136 if (cq->ip) {
137 npolled = -EINVAL;
138 goto bail;
139 }
140
141 spin_lock_irqsave(&cq->lock, flags);
142
143 wc = cq->queue;
144 tail = wc->tail;
145 if (tail > (u32) cq->ibcq.cqe)
146 tail = (u32) cq->ibcq.cqe;
147 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
148 if (tail == wc->head)
149 break;
150 /* The kernel doesn't need a RMB since it has the lock. */
151 *entry = wc->kqueue[tail];
152 if (tail >= cq->ibcq.cqe)
153 tail = 0;
154 else
155 tail++;
156 }
157 wc->tail = tail;
158
159 spin_unlock_irqrestore(&cq->lock, flags);
160
161bail:
162 return npolled;
163}
164
165static void send_complete(struct work_struct *work)
166{
167 struct qib_cq *cq = container_of(work, struct qib_cq, comptask);
168
169 /*
170 * The completion handler will most likely rearm the notification
171 * and poll for all pending entries. If a new completion entry
172 * is added while we are in this routine, queue_work()
173 * won't call us again until we return so we check triggered to
174 * see if we need to call the handler again.
175 */
176 for (;;) {
177 u8 triggered = cq->triggered;
178
179 /*
180 * IPoIB connected mode assumes the callback is from a
181 * soft IRQ. We simulate this by blocking "bottom halves".
182 * See the implementation for ipoib_cm_handle_tx_wc(),
183 * netif_tx_lock_bh() and netif_tx_lock().
184 */
185 local_bh_disable();
186 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
187 local_bh_enable();
188
189 if (cq->triggered == triggered)
190 return;
191 }
192}
193
194/**
195 * qib_create_cq - create a completion queue
196 * @ibdev: the device this completion queue is attached to
197 * @entries: the minimum size of the completion queue
198 * @context: unused by the QLogic_IB driver
199 * @udata: user data for libibverbs.so
200 *
201 * Returns a pointer to the completion queue or negative errno values
202 * for failure.
203 *
204 * Called by ib_create_cq() in the generic verbs code.
205 */
206struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
207 int comp_vector, struct ib_ucontext *context,
208 struct ib_udata *udata)
209{
210 struct qib_ibdev *dev = to_idev(ibdev);
211 struct qib_cq *cq;
212 struct qib_cq_wc *wc;
213 struct ib_cq *ret;
214 u32 sz;
215
216 if (entries < 1 || entries > ib_qib_max_cqes) {
217 ret = ERR_PTR(-EINVAL);
218 goto done;
219 }
220
221 /* Allocate the completion queue structure. */
222 cq = kmalloc(sizeof(*cq), GFP_KERNEL);
223 if (!cq) {
224 ret = ERR_PTR(-ENOMEM);
225 goto done;
226 }
227
228 /*
229 * Allocate the completion queue entries and head/tail pointers.
230 * This is allocated separately so that it can be resized and
231 * also mapped into user space.
232 * We need to use vmalloc() in order to support mmap and large
233 * numbers of entries.
234 */
235 sz = sizeof(*wc);
236 if (udata && udata->outlen >= sizeof(__u64))
237 sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
238 else
239 sz += sizeof(struct ib_wc) * (entries + 1);
240 wc = vmalloc_user(sz);
241 if (!wc) {
242 ret = ERR_PTR(-ENOMEM);
243 goto bail_cq;
244 }
245
246 /*
247 * Return the address of the WC as the offset to mmap.
248 * See qib_mmap() for details.
249 */
250 if (udata && udata->outlen >= sizeof(__u64)) {
251 int err;
252
253 cq->ip = qib_create_mmap_info(dev, sz, context, wc);
254 if (!cq->ip) {
255 ret = ERR_PTR(-ENOMEM);
256 goto bail_wc;
257 }
258
259 err = ib_copy_to_udata(udata, &cq->ip->offset,
260 sizeof(cq->ip->offset));
261 if (err) {
262 ret = ERR_PTR(err);
263 goto bail_ip;
264 }
265 } else
266 cq->ip = NULL;
267
268 spin_lock(&dev->n_cqs_lock);
269 if (dev->n_cqs_allocated == ib_qib_max_cqs) {
270 spin_unlock(&dev->n_cqs_lock);
271 ret = ERR_PTR(-ENOMEM);
272 goto bail_ip;
273 }
274
275 dev->n_cqs_allocated++;
276 spin_unlock(&dev->n_cqs_lock);
277
278 if (cq->ip) {
279 spin_lock_irq(&dev->pending_lock);
280 list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
281 spin_unlock_irq(&dev->pending_lock);
282 }
283
284 /*
285 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
286 * The number of entries should be >= the number requested or return
287 * an error.
288 */
289 cq->ibcq.cqe = entries;
290 cq->notify = IB_CQ_NONE;
291 cq->triggered = 0;
292 spin_lock_init(&cq->lock);
293 INIT_WORK(&cq->comptask, send_complete);
294 wc->head = 0;
295 wc->tail = 0;
296 cq->queue = wc;
297
298 ret = &cq->ibcq;
299
300 goto done;
301
302bail_ip:
303 kfree(cq->ip);
304bail_wc:
305 vfree(wc);
306bail_cq:
307 kfree(cq);
308done:
309 return ret;
310}
311
312/**
313 * qib_destroy_cq - destroy a completion queue
314 * @ibcq: the completion queue to destroy.
315 *
316 * Returns 0 for success.
317 *
318 * Called by ib_destroy_cq() in the generic verbs code.
319 */
320int qib_destroy_cq(struct ib_cq *ibcq)
321{
322 struct qib_ibdev *dev = to_idev(ibcq->device);
323 struct qib_cq *cq = to_icq(ibcq);
324
325 flush_work(&cq->comptask);
326 spin_lock(&dev->n_cqs_lock);
327 dev->n_cqs_allocated--;
328 spin_unlock(&dev->n_cqs_lock);
329 if (cq->ip)
330 kref_put(&cq->ip->ref, qib_release_mmap_info);
331 else
332 vfree(cq->queue);
333 kfree(cq);
334
335 return 0;
336}
337
338/**
339 * qib_req_notify_cq - change the notification type for a completion queue
340 * @ibcq: the completion queue
341 * @notify_flags: the type of notification to request
342 *
343 * Returns 0 for success.
344 *
345 * This may be called from interrupt context. Also called by
346 * ib_req_notify_cq() in the generic verbs code.
347 */
348int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
349{
350 struct qib_cq *cq = to_icq(ibcq);
351 unsigned long flags;
352 int ret = 0;
353
354 spin_lock_irqsave(&cq->lock, flags);
355 /*
356 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
357 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
358 */
359 if (cq->notify != IB_CQ_NEXT_COMP)
360 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
361
362 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
363 cq->queue->head != cq->queue->tail)
364 ret = 1;
365
366 spin_unlock_irqrestore(&cq->lock, flags);
367
368 return ret;
369}
370
371/**
372 * qib_resize_cq - change the size of the CQ
373 * @ibcq: the completion queue
374 *
375 * Returns 0 for success.
376 */
377int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
378{
379 struct qib_cq *cq = to_icq(ibcq);
380 struct qib_cq_wc *old_wc;
381 struct qib_cq_wc *wc;
382 u32 head, tail, n;
383 int ret;
384 u32 sz;
385
386 if (cqe < 1 || cqe > ib_qib_max_cqes) {
387 ret = -EINVAL;
388 goto bail;
389 }
390
391 /*
392 * Need to use vmalloc() if we want to support large #s of entries.
393 */
394 sz = sizeof(*wc);
395 if (udata && udata->outlen >= sizeof(__u64))
396 sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
397 else
398 sz += sizeof(struct ib_wc) * (cqe + 1);
399 wc = vmalloc_user(sz);
400 if (!wc) {
401 ret = -ENOMEM;
402 goto bail;
403 }
404
405 /* Check that we can write the offset to mmap. */
406 if (udata && udata->outlen >= sizeof(__u64)) {
407 __u64 offset = 0;
408
409 ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
410 if (ret)
411 goto bail_free;
412 }
413
414 spin_lock_irq(&cq->lock);
415 /*
416 * Make sure head and tail are sane since they
417 * might be user writable.
418 */
419 old_wc = cq->queue;
420 head = old_wc->head;
421 if (head > (u32) cq->ibcq.cqe)
422 head = (u32) cq->ibcq.cqe;
423 tail = old_wc->tail;
424 if (tail > (u32) cq->ibcq.cqe)
425 tail = (u32) cq->ibcq.cqe;
426 if (head < tail)
427 n = cq->ibcq.cqe + 1 + head - tail;
428 else
429 n = head - tail;
430 if (unlikely((u32)cqe < n)) {
431 ret = -EINVAL;
432 goto bail_unlock;
433 }
434 for (n = 0; tail != head; n++) {
435 if (cq->ip)
436 wc->uqueue[n] = old_wc->uqueue[tail];
437 else
438 wc->kqueue[n] = old_wc->kqueue[tail];
439 if (tail == (u32) cq->ibcq.cqe)
440 tail = 0;
441 else
442 tail++;
443 }
444 cq->ibcq.cqe = cqe;
445 wc->head = n;
446 wc->tail = 0;
447 cq->queue = wc;
448 spin_unlock_irq(&cq->lock);
449
450 vfree(old_wc);
451
452 if (cq->ip) {
453 struct qib_ibdev *dev = to_idev(ibcq->device);
454 struct qib_mmap_info *ip = cq->ip;
455
456 qib_update_mmap_info(dev, ip, sz, wc);
457
458 /*
459 * Return the offset to mmap.
460 * See qib_mmap() for details.
461 */
462 if (udata && udata->outlen >= sizeof(__u64)) {
463 ret = ib_copy_to_udata(udata, &ip->offset,
464 sizeof(ip->offset));
465 if (ret)
466 goto bail;
467 }
468
469 spin_lock_irq(&dev->pending_lock);
470 if (list_empty(&ip->pending_mmaps))
471 list_add(&ip->pending_mmaps, &dev->pending_mmaps);
472 spin_unlock_irq(&dev->pending_lock);
473 }
474
475 ret = 0;
476 goto bail;
477
478bail_unlock:
479 spin_unlock_irq(&cq->lock);
480bail_free:
481 vfree(wc);
482bail:
483 return ret;
484}
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
new file mode 100644
index 000000000000..ca98dd523752
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_diag.c
@@ -0,0 +1,894 @@
1/*
2 * Copyright (c) 2010 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35/*
36 * This file contains support for diagnostic functions. It is accessed by
37 * opening the qib_diag device, normally minor number 129. Diagnostic use
38 * of the QLogic_IB chip may render the chip or board unusable until the
39 * driver is unloaded, or in some cases, until the system is rebooted.
40 *
41 * Accesses to the chip through this interface are not similar to going
42 * through the /sys/bus/pci resource mmap interface.
43 */
44
45#include <linux/io.h>
46#include <linux/pci.h>
47#include <linux/poll.h>
48#include <linux/vmalloc.h>
49#include <linux/fs.h>
50#include <linux/uaccess.h>
51
52#include "qib.h"
53#include "qib_common.h"
54
55/*
56 * Each client that opens the diag device must read then write
57 * offset 0, to prevent lossage from random cat or od. diag_state
58 * sequences this "handshake".
59 */
60enum diag_state { UNUSED = 0, OPENED, INIT, READY };
61
62/* State for an individual client. PID so children cannot abuse handshake */
63static struct qib_diag_client {
64 struct qib_diag_client *next;
65 struct qib_devdata *dd;
66 pid_t pid;
67 enum diag_state state;
68} *client_pool;
69
70/*
71 * Get a client struct. Recycled if possible, else kmalloc.
72 * Must be called with qib_mutex held
73 */
74static struct qib_diag_client *get_client(struct qib_devdata *dd)
75{
76 struct qib_diag_client *dc;
77
78 dc = client_pool;
79 if (dc)
80 /* got from pool remove it and use */
81 client_pool = dc->next;
82 else
83 /* None in pool, alloc and init */
84 dc = kmalloc(sizeof *dc, GFP_KERNEL);
85
86 if (dc) {
87 dc->next = NULL;
88 dc->dd = dd;
89 dc->pid = current->pid;
90 dc->state = OPENED;
91 }
92 return dc;
93}
94
95/*
96 * Return to pool. Must be called with qib_mutex held
97 */
98static void return_client(struct qib_diag_client *dc)
99{
100 struct qib_devdata *dd = dc->dd;
101 struct qib_diag_client *tdc, *rdc;
102
103 rdc = NULL;
104 if (dc == dd->diag_client) {
105 dd->diag_client = dc->next;
106 rdc = dc;
107 } else {
108 tdc = dc->dd->diag_client;
109 while (tdc) {
110 if (dc == tdc->next) {
111 tdc->next = dc->next;
112 rdc = dc;
113 break;
114 }
115 tdc = tdc->next;
116 }
117 }
118 if (rdc) {
119 rdc->state = UNUSED;
120 rdc->dd = NULL;
121 rdc->pid = 0;
122 rdc->next = client_pool;
123 client_pool = rdc;
124 }
125}
126
127static int qib_diag_open(struct inode *in, struct file *fp);
128static int qib_diag_release(struct inode *in, struct file *fp);
129static ssize_t qib_diag_read(struct file *fp, char __user *data,
130 size_t count, loff_t *off);
131static ssize_t qib_diag_write(struct file *fp, const char __user *data,
132 size_t count, loff_t *off);
133
134static const struct file_operations diag_file_ops = {
135 .owner = THIS_MODULE,
136 .write = qib_diag_write,
137 .read = qib_diag_read,
138 .open = qib_diag_open,
139 .release = qib_diag_release
140};
141
142static atomic_t diagpkt_count = ATOMIC_INIT(0);
143static struct cdev *diagpkt_cdev;
144static struct device *diagpkt_device;
145
146static ssize_t qib_diagpkt_write(struct file *fp, const char __user *data,
147 size_t count, loff_t *off);
148
149static const struct file_operations diagpkt_file_ops = {
150 .owner = THIS_MODULE,
151 .write = qib_diagpkt_write,
152};
153
154int qib_diag_add(struct qib_devdata *dd)
155{
156 char name[16];
157 int ret = 0;
158
159 if (atomic_inc_return(&diagpkt_count) == 1) {
160 ret = qib_cdev_init(QIB_DIAGPKT_MINOR, "ipath_diagpkt",
161 &diagpkt_file_ops, &diagpkt_cdev,
162 &diagpkt_device);
163 if (ret)
164 goto done;
165 }
166
167 snprintf(name, sizeof(name), "ipath_diag%d", dd->unit);
168 ret = qib_cdev_init(QIB_DIAG_MINOR_BASE + dd->unit, name,
169 &diag_file_ops, &dd->diag_cdev,
170 &dd->diag_device);
171done:
172 return ret;
173}
174
175static void qib_unregister_observers(struct qib_devdata *dd);
176
177void qib_diag_remove(struct qib_devdata *dd)
178{
179 struct qib_diag_client *dc;
180
181 if (atomic_dec_and_test(&diagpkt_count))
182 qib_cdev_cleanup(&diagpkt_cdev, &diagpkt_device);
183
184 qib_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);
185
186 /*
187 * Return all diag_clients of this device. There should be none,
188 * as we are "guaranteed" that no clients are still open
189 */
190 while (dd->diag_client)
191 return_client(dd->diag_client);
192
193 /* Now clean up all unused client structs */
194 while (client_pool) {
195 dc = client_pool;
196 client_pool = dc->next;
197 kfree(dc);
198 }
199 /* Clean up observer list */
200 qib_unregister_observers(dd);
201}
202
203/* qib_remap_ioaddr32 - remap an offset into chip address space to __iomem *
204 *
205 * @dd: the qlogic_ib device
206 * @offs: the offset in chip-space
207 * @cntp: Pointer to max (byte) count for transfer starting at offset
208 * This returns a u32 __iomem * so it can be used for both 64 and 32-bit
209 * mapping. It is needed because with the use of PAT for control of
210 * write-combining, the logically contiguous address-space of the chip
211 * may be split into virtually non-contiguous spaces, with different
212 * attributes, which are them mapped to contiguous physical space
213 * based from the first BAR.
214 *
215 * The code below makes the same assumptions as were made in
216 * init_chip_wc_pat() (qib_init.c), copied here:
217 * Assumes chip address space looks like:
218 * - kregs + sregs + cregs + uregs (in any order)
219 * - piobufs (2K and 4K bufs in either order)
220 * or:
221 * - kregs + sregs + cregs (in any order)
222 * - piobufs (2K and 4K bufs in either order)
223 * - uregs
224 *
225 * If cntp is non-NULL, returns how many bytes from offset can be accessed
226 * Returns 0 if the offset is not mapped.
227 */
228static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
229 u32 *cntp)
230{
231 u32 kreglen;
232 u32 snd_bottom, snd_lim = 0;
233 u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase;
234 u32 __iomem *map = NULL;
235 u32 cnt = 0;
236
237 /* First, simplest case, offset is within the first map. */
238 kreglen = (dd->kregend - dd->kregbase) * sizeof(u64);
239 if (offset < kreglen) {
240 map = krb32 + (offset / sizeof(u32));
241 cnt = kreglen - offset;
242 goto mapped;
243 }
244
245 /*
246 * Next check for user regs, the next most common case,
247 * and a cheap check because if they are not in the first map
248 * they are last in chip.
249 */
250 if (dd->userbase) {
251 /* If user regs mapped, they are after send, so set limit. */
252 u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
253 snd_lim = dd->uregbase;
254 krb32 = (u32 __iomem *)dd->userbase;
255 if (offset >= dd->uregbase && offset < ulim) {
256 map = krb32 + (offset - dd->uregbase) / sizeof(u32);
257 cnt = ulim - offset;
258 goto mapped;
259 }
260 }
261
262 /*
263 * Lastly, check for offset within Send Buffers.
264 * This is gnarly because struct devdata is deliberately vague
265 * about things like 7322 VL15 buffers, and we are not in
266 * chip-specific code here, so should not make many assumptions.
267 * The one we _do_ make is that the only chip that has more sndbufs
268 * than we admit is the 7322, and it has userregs above that, so
269 * we know the snd_lim.
270 */
271 /* Assume 2K buffers are first. */
272 snd_bottom = dd->pio2k_bufbase;
273 if (snd_lim == 0) {
274 u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
275 snd_lim = snd_bottom + tot2k;
276 }
277 /* If 4k buffers exist, account for them by bumping
278 * appropriate limit.
279 */
280 if (dd->piobcnt4k) {
281 u32 tot4k = dd->piobcnt4k * dd->align4k;
282 u32 offs4k = dd->piobufbase >> 32;
283 if (snd_bottom > offs4k)
284 snd_bottom = offs4k;
285 else {
286 /* 4k above 2k. Bump snd_lim, if needed*/
287 if (!dd->userbase)
288 snd_lim = offs4k + tot4k;
289 }
290 }
291 /*
292 * Judgement call: can we ignore the space between SendBuffs and
293 * UserRegs, where we would like to see vl15 buffs, but not more?
294 */
295 if (offset >= snd_bottom && offset < snd_lim) {
296 offset -= snd_bottom;
297 map = (u32 __iomem *)dd->piobase + (offset / sizeof(u32));
298 cnt = snd_lim - offset;
299 }
300
301mapped:
302 if (cntp)
303 *cntp = cnt;
304 return map;
305}
306
307/*
308 * qib_read_umem64 - read a 64-bit quantity from the chip into user space
309 * @dd: the qlogic_ib device
310 * @uaddr: the location to store the data in user memory
311 * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
312 * @count: number of bytes to copy (multiple of 32 bits)
313 *
314 * This function also localizes all chip memory accesses.
315 * The copy should be written such that we read full cacheline packets
316 * from the chip. This is usually used for a single qword
317 *
318 * NOTE: This assumes the chip address is 64-bit aligned.
319 */
320static int qib_read_umem64(struct qib_devdata *dd, void __user *uaddr,
321 u32 regoffs, size_t count)
322{
323 const u64 __iomem *reg_addr;
324 const u64 __iomem *reg_end;
325 u32 limit;
326 int ret;
327
328 reg_addr = (const u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
329 if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
330 ret = -EINVAL;
331 goto bail;
332 }
333 if (count >= limit)
334 count = limit;
335 reg_end = reg_addr + (count / sizeof(u64));
336
337 /* not very efficient, but it works for now */
338 while (reg_addr < reg_end) {
339 u64 data = readq(reg_addr);
340
341 if (copy_to_user(uaddr, &data, sizeof(u64))) {
342 ret = -EFAULT;
343 goto bail;
344 }
345 reg_addr++;
346 uaddr += sizeof(u64);
347 }
348 ret = 0;
349bail:
350 return ret;
351}
352
353/*
354 * qib_write_umem64 - write a 64-bit quantity to the chip from user space
355 * @dd: the qlogic_ib device
356 * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
357 * @uaddr: the source of the data in user memory
358 * @count: the number of bytes to copy (multiple of 32 bits)
359 *
360 * This is usually used for a single qword
361 * NOTE: This assumes the chip address is 64-bit aligned.
362 */
363
364static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,
365 const void __user *uaddr, size_t count)
366{
367 u64 __iomem *reg_addr;
368 const u64 __iomem *reg_end;
369 u32 limit;
370 int ret;
371
372 reg_addr = (u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
373 if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
374 ret = -EINVAL;
375 goto bail;
376 }
377 if (count >= limit)
378 count = limit;
379 reg_end = reg_addr + (count / sizeof(u64));
380
381 /* not very efficient, but it works for now */
382 while (reg_addr < reg_end) {
383 u64 data;
384 if (copy_from_user(&data, uaddr, sizeof(data))) {
385 ret = -EFAULT;
386 goto bail;
387 }
388 writeq(data, reg_addr);
389
390 reg_addr++;
391 uaddr += sizeof(u64);
392 }
393 ret = 0;
394bail:
395 return ret;
396}
397
398/*
399 * qib_read_umem32 - read a 32-bit quantity from the chip into user space
400 * @dd: the qlogic_ib device
401 * @uaddr: the location to store the data in user memory
402 * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
403 * @count: number of bytes to copy
404 *
405 * read 32 bit values, not 64 bit; for memories that only
406 * support 32 bit reads; usually a single dword.
407 */
408static int qib_read_umem32(struct qib_devdata *dd, void __user *uaddr,
409 u32 regoffs, size_t count)
410{
411 const u32 __iomem *reg_addr;
412 const u32 __iomem *reg_end;
413 u32 limit;
414 int ret;
415
416 reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
417 if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
418 ret = -EINVAL;
419 goto bail;
420 }
421 if (count >= limit)
422 count = limit;
423 reg_end = reg_addr + (count / sizeof(u32));
424
425 /* not very efficient, but it works for now */
426 while (reg_addr < reg_end) {
427 u32 data = readl(reg_addr);
428
429 if (copy_to_user(uaddr, &data, sizeof(data))) {
430 ret = -EFAULT;
431 goto bail;
432 }
433
434 reg_addr++;
435 uaddr += sizeof(u32);
436
437 }
438 ret = 0;
439bail:
440 return ret;
441}
442
443/*
444 * qib_write_umem32 - write a 32-bit quantity to the chip from user space
445 * @dd: the qlogic_ib device
446 * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
447 * @uaddr: the source of the data in user memory
448 * @count: number of bytes to copy
449 *
450 * write 32 bit values, not 64 bit; for memories that only
451 * support 32 bit write; usually a single dword.
452 */
453
454static int qib_write_umem32(struct qib_devdata *dd, u32 regoffs,
455 const void __user *uaddr, size_t count)
456{
457 u32 __iomem *reg_addr;
458 const u32 __iomem *reg_end;
459 u32 limit;
460 int ret;
461
462 reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
463 if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
464 ret = -EINVAL;
465 goto bail;
466 }
467 if (count >= limit)
468 count = limit;
469 reg_end = reg_addr + (count / sizeof(u32));
470
471 while (reg_addr < reg_end) {
472 u32 data;
473
474 if (copy_from_user(&data, uaddr, sizeof(data))) {
475 ret = -EFAULT;
476 goto bail;
477 }
478 writel(data, reg_addr);
479
480 reg_addr++;
481 uaddr += sizeof(u32);
482 }
483 ret = 0;
484bail:
485 return ret;
486}
487
488static int qib_diag_open(struct inode *in, struct file *fp)
489{
490 int unit = iminor(in) - QIB_DIAG_MINOR_BASE;
491 struct qib_devdata *dd;
492 struct qib_diag_client *dc;
493 int ret;
494
495 mutex_lock(&qib_mutex);
496
497 dd = qib_lookup(unit);
498
499 if (dd == NULL || !(dd->flags & QIB_PRESENT) ||
500 !dd->kregbase) {
501 ret = -ENODEV;
502 goto bail;
503 }
504
505 dc = get_client(dd);
506 if (!dc) {
507 ret = -ENOMEM;
508 goto bail;
509 }
510 dc->next = dd->diag_client;
511 dd->diag_client = dc;
512 fp->private_data = dc;
513 ret = 0;
514bail:
515 mutex_unlock(&qib_mutex);
516
517 return ret;
518}
519
520/**
521 * qib_diagpkt_write - write an IB packet
522 * @fp: the diag data device file pointer
523 * @data: qib_diag_pkt structure saying where to get the packet
524 * @count: size of data to write
525 * @off: unused by this code
526 */
527static ssize_t qib_diagpkt_write(struct file *fp,
528 const char __user *data,
529 size_t count, loff_t *off)
530{
531 u32 __iomem *piobuf;
532 u32 plen, clen, pbufn;
533 struct qib_diag_xpkt dp;
534 u32 *tmpbuf = NULL;
535 struct qib_devdata *dd;
536 struct qib_pportdata *ppd;
537 ssize_t ret = 0;
538
539 if (count != sizeof(dp)) {
540 ret = -EINVAL;
541 goto bail;
542 }
543 if (copy_from_user(&dp, data, sizeof(dp))) {
544 ret = -EFAULT;
545 goto bail;
546 }
547
548 dd = qib_lookup(dp.unit);
549 if (!dd || !(dd->flags & QIB_PRESENT) || !dd->kregbase) {
550 ret = -ENODEV;
551 goto bail;
552 }
553 if (!(dd->flags & QIB_INITTED)) {
554 /* no hardware, freeze, etc. */
555 ret = -ENODEV;
556 goto bail;
557 }
558
559 if (dp.version != _DIAG_XPKT_VERS) {
560 qib_dev_err(dd, "Invalid version %u for diagpkt_write\n",
561 dp.version);
562 ret = -EINVAL;
563 goto bail;
564 }
565 /* send count must be an exact number of dwords */
566 if (dp.len & 3) {
567 ret = -EINVAL;
568 goto bail;
569 }
570 if (!dp.port || dp.port > dd->num_pports) {
571 ret = -EINVAL;
572 goto bail;
573 }
574 ppd = &dd->pport[dp.port - 1];
575
576 /* need total length before first word written */
577 /* +1 word is for the qword padding */
578 plen = sizeof(u32) + dp.len;
579 clen = dp.len >> 2;
580
581 if ((plen + 4) > ppd->ibmaxlen) {
582 ret = -EINVAL;
583 goto bail; /* before writing pbc */
584 }
585 tmpbuf = vmalloc(plen);
586 if (!tmpbuf) {
587 qib_devinfo(dd->pcidev, "Unable to allocate tmp buffer, "
588 "failing\n");
589 ret = -ENOMEM;
590 goto bail;
591 }
592
593 if (copy_from_user(tmpbuf,
594 (const void __user *) (unsigned long) dp.data,
595 dp.len)) {
596 ret = -EFAULT;
597 goto bail;
598 }
599
600 plen >>= 2; /* in dwords */
601
602 if (dp.pbc_wd == 0)
603 dp.pbc_wd = plen;
604
605 piobuf = dd->f_getsendbuf(ppd, dp.pbc_wd, &pbufn);
606 if (!piobuf) {
607 ret = -EBUSY;
608 goto bail;
609 }
610 /* disarm it just to be extra sure */
611 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbufn));
612
613 /* disable header check on pbufn for this packet */
614 dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_DIS1, NULL);
615
616 writeq(dp.pbc_wd, piobuf);
617 /*
618 * Copy all but the trigger word, then flush, so it's written
619 * to chip before trigger word, then write trigger word, then
620 * flush again, so packet is sent.
621 */
622 if (dd->flags & QIB_PIO_FLUSH_WC) {
623 qib_flush_wc();
624 qib_pio_copy(piobuf + 2, tmpbuf, clen - 1);
625 qib_flush_wc();
626 __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1);
627 } else
628 qib_pio_copy(piobuf + 2, tmpbuf, clen);
629
630 if (dd->flags & QIB_USE_SPCL_TRIG) {
631 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
632
633 qib_flush_wc();
634 __raw_writel(0xaebecede, piobuf + spcl_off);
635 }
636
637 /*
638 * Ensure buffer is written to the chip, then re-enable
639 * header checks (if supported by chip). The txchk
640 * code will ensure seen by chip before returning.
641 */
642 qib_flush_wc();
643 qib_sendbuf_done(dd, pbufn);
644 dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
645
646 ret = sizeof(dp);
647
648bail:
649 vfree(tmpbuf);
650 return ret;
651}
652
653static int qib_diag_release(struct inode *in, struct file *fp)
654{
655 mutex_lock(&qib_mutex);
656 return_client(fp->private_data);
657 fp->private_data = NULL;
658 mutex_unlock(&qib_mutex);
659 return 0;
660}
661
662/*
663 * Chip-specific code calls to register its interest in
664 * a specific range.
665 */
666struct diag_observer_list_elt {
667 struct diag_observer_list_elt *next;
668 const struct diag_observer *op;
669};
670
671int qib_register_observer(struct qib_devdata *dd,
672 const struct diag_observer *op)
673{
674 struct diag_observer_list_elt *olp;
675 int ret = -EINVAL;
676
677 if (!dd || !op)
678 goto bail;
679 ret = -ENOMEM;
680 olp = vmalloc(sizeof *olp);
681 if (!olp) {
682 printk(KERN_ERR QIB_DRV_NAME ": vmalloc for observer failed\n");
683 goto bail;
684 }
685 if (olp) {
686 unsigned long flags;
687
688 spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
689 olp->op = op;
690 olp->next = dd->diag_observer_list;
691 dd->diag_observer_list = olp;
692 spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
693 ret = 0;
694 }
695bail:
696 return ret;
697}
698
699/* Remove all registered observers when device is closed */
700static void qib_unregister_observers(struct qib_devdata *dd)
701{
702 struct diag_observer_list_elt *olp;
703 unsigned long flags;
704
705 spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
706 olp = dd->diag_observer_list;
707 while (olp) {
708 /* Pop one observer, let go of lock */
709 dd->diag_observer_list = olp->next;
710 spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
711 vfree(olp);
712 /* try again. */
713 spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
714 olp = dd->diag_observer_list;
715 }
716 spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
717}
718
719/*
720 * Find the observer, if any, for the specified address. Initial implementation
721 * is simple stack of observers. This must be called with diag transaction
722 * lock held.
723 */
724static const struct diag_observer *diag_get_observer(struct qib_devdata *dd,
725 u32 addr)
726{
727 struct diag_observer_list_elt *olp;
728 const struct diag_observer *op = NULL;
729
730 olp = dd->diag_observer_list;
731 while (olp) {
732 op = olp->op;
733 if (addr >= op->bottom && addr <= op->top)
734 break;
735 olp = olp->next;
736 }
737 if (!olp)
738 op = NULL;
739
740 return op;
741}
742
743static ssize_t qib_diag_read(struct file *fp, char __user *data,
744 size_t count, loff_t *off)
745{
746 struct qib_diag_client *dc = fp->private_data;
747 struct qib_devdata *dd = dc->dd;
748 void __iomem *kreg_base;
749 ssize_t ret;
750
751 if (dc->pid != current->pid) {
752 ret = -EPERM;
753 goto bail;
754 }
755
756 kreg_base = dd->kregbase;
757
758 if (count == 0)
759 ret = 0;
760 else if ((count % 4) || (*off % 4))
761 /* address or length is not 32-bit aligned, hence invalid */
762 ret = -EINVAL;
763 else if (dc->state < READY && (*off || count != 8))
764 ret = -EINVAL; /* prevent cat /dev/qib_diag* */
765 else {
766 unsigned long flags;
767 u64 data64 = 0;
768 int use_32;
769 const struct diag_observer *op;
770
771 use_32 = (count % 8) || (*off % 8);
772 ret = -1;
773 spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
774 /*
775 * Check for observer on this address range.
776 * we only support a single 32 or 64-bit read
777 * via observer, currently.
778 */
779 op = diag_get_observer(dd, *off);
780 if (op) {
781 u32 offset = *off;
782 ret = op->hook(dd, op, offset, &data64, 0, use_32);
783 }
784 /*
785 * We need to release lock before any copy_to_user(),
786 * whether implicit in qib_read_umem* or explicit below.
787 */
788 spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
789 if (!op) {
790 if (use_32)
791 /*
792 * Address or length is not 64-bit aligned;
793 * do 32-bit rd
794 */
795 ret = qib_read_umem32(dd, data, (u32) *off,
796 count);
797 else
798 ret = qib_read_umem64(dd, data, (u32) *off,
799 count);
800 } else if (ret == count) {
801 /* Below finishes case where observer existed */
802 ret = copy_to_user(data, &data64, use_32 ?
803 sizeof(u32) : sizeof(u64));
804 if (ret)
805 ret = -EFAULT;
806 }
807 }
808
809 if (ret >= 0) {
810 *off += count;
811 ret = count;
812 if (dc->state == OPENED)
813 dc->state = INIT;
814 }
815bail:
816 return ret;
817}
818
819static ssize_t qib_diag_write(struct file *fp, const char __user *data,
820 size_t count, loff_t *off)
821{
822 struct qib_diag_client *dc = fp->private_data;
823 struct qib_devdata *dd = dc->dd;
824 void __iomem *kreg_base;
825 ssize_t ret;
826
827 if (dc->pid != current->pid) {
828 ret = -EPERM;
829 goto bail;
830 }
831
832 kreg_base = dd->kregbase;
833
834 if (count == 0)
835 ret = 0;
836 else if ((count % 4) || (*off % 4))
837 /* address or length is not 32-bit aligned, hence invalid */
838 ret = -EINVAL;
839 else if (dc->state < READY &&
840 ((*off || count != 8) || dc->state != INIT))
841 /* No writes except second-step of init seq */
842 ret = -EINVAL; /* before any other write allowed */
843 else {
844 unsigned long flags;
845 const struct diag_observer *op = NULL;
846 int use_32 = (count % 8) || (*off % 8);
847
848 /*
849 * Check for observer on this address range.
850 * We only support a single 32 or 64-bit write
851 * via observer, currently. This helps, because
852 * we would otherwise have to jump through hoops
853 * to make "diag transaction" meaningful when we
854 * cannot do a copy_from_user while holding the lock.
855 */
856 if (count == 4 || count == 8) {
857 u64 data64;
858 u32 offset = *off;
859 ret = copy_from_user(&data64, data, count);
860 if (ret) {
861 ret = -EFAULT;
862 goto bail;
863 }
864 spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
865 op = diag_get_observer(dd, *off);
866 if (op)
867 ret = op->hook(dd, op, offset, &data64, ~0Ull,
868 use_32);
869 spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
870 }
871
872 if (!op) {
873 if (use_32)
874 /*
875 * Address or length is not 64-bit aligned;
876 * do 32-bit write
877 */
878 ret = qib_write_umem32(dd, (u32) *off, data,
879 count);
880 else
881 ret = qib_write_umem64(dd, (u32) *off, data,
882 count);
883 }
884 }
885
886 if (ret >= 0) {
887 *off += count;
888 ret = count;
889 if (dc->state == INIT)
890 dc->state = READY; /* all read/write OK now */
891 }
892bail:
893 return ret;
894}
diff --git a/drivers/infiniband/hw/qib/qib_dma.c b/drivers/infiniband/hw/qib/qib_dma.c
new file mode 100644
index 000000000000..2920bb39a65b
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_dma.c
@@ -0,0 +1,182 @@
1/*
2 * Copyright (c) 2006, 2009, 2010 QLogic, Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/types.h>
33#include <linux/scatterlist.h>
34
35#include "qib_verbs.h"
36
37#define BAD_DMA_ADDRESS ((u64) 0)
38
39/*
40 * The following functions implement driver specific replacements
41 * for the ib_dma_*() functions.
42 *
43 * These functions return kernel virtual addresses instead of
44 * device bus addresses since the driver uses the CPU to copy
45 * data instead of using hardware DMA.
46 */
47
48static int qib_mapping_error(struct ib_device *dev, u64 dma_addr)
49{
50 return dma_addr == BAD_DMA_ADDRESS;
51}
52
53static u64 qib_dma_map_single(struct ib_device *dev, void *cpu_addr,
54 size_t size, enum dma_data_direction direction)
55{
56 BUG_ON(!valid_dma_direction(direction));
57 return (u64) cpu_addr;
58}
59
60static void qib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
61 enum dma_data_direction direction)
62{
63 BUG_ON(!valid_dma_direction(direction));
64}
65
66static u64 qib_dma_map_page(struct ib_device *dev, struct page *page,
67 unsigned long offset, size_t size,
68 enum dma_data_direction direction)
69{
70 u64 addr;
71
72 BUG_ON(!valid_dma_direction(direction));
73
74 if (offset + size > PAGE_SIZE) {
75 addr = BAD_DMA_ADDRESS;
76 goto done;
77 }
78
79 addr = (u64) page_address(page);
80 if (addr)
81 addr += offset;
82 /* TODO: handle highmem pages */
83
84done:
85 return addr;
86}
87
88static void qib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
89 enum dma_data_direction direction)
90{
91 BUG_ON(!valid_dma_direction(direction));
92}
93
94static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl,
95 int nents, enum dma_data_direction direction)
96{
97 struct scatterlist *sg;
98 u64 addr;
99 int i;
100 int ret = nents;
101
102 BUG_ON(!valid_dma_direction(direction));
103
104 for_each_sg(sgl, sg, nents, i) {
105 addr = (u64) page_address(sg_page(sg));
106 /* TODO: handle highmem pages */
107 if (!addr) {
108 ret = 0;
109 break;
110 }
111 }
112 return ret;
113}
114
115static void qib_unmap_sg(struct ib_device *dev,
116 struct scatterlist *sg, int nents,
117 enum dma_data_direction direction)
118{
119 BUG_ON(!valid_dma_direction(direction));
120}
121
122static u64 qib_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
123{
124 u64 addr = (u64) page_address(sg_page(sg));
125
126 if (addr)
127 addr += sg->offset;
128 return addr;
129}
130
131static unsigned int qib_sg_dma_len(struct ib_device *dev,
132 struct scatterlist *sg)
133{
134 return sg->length;
135}
136
137static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr,
138 size_t size, enum dma_data_direction dir)
139{
140}
141
142static void qib_sync_single_for_device(struct ib_device *dev, u64 addr,
143 size_t size,
144 enum dma_data_direction dir)
145{
146}
147
148static void *qib_dma_alloc_coherent(struct ib_device *dev, size_t size,
149 u64 *dma_handle, gfp_t flag)
150{
151 struct page *p;
152 void *addr = NULL;
153
154 p = alloc_pages(flag, get_order(size));
155 if (p)
156 addr = page_address(p);
157 if (dma_handle)
158 *dma_handle = (u64) addr;
159 return addr;
160}
161
162static void qib_dma_free_coherent(struct ib_device *dev, size_t size,
163 void *cpu_addr, u64 dma_handle)
164{
165 free_pages((unsigned long) cpu_addr, get_order(size));
166}
167
168struct ib_dma_mapping_ops qib_dma_mapping_ops = {
169 .mapping_error = qib_mapping_error,
170 .map_single = qib_dma_map_single,
171 .unmap_single = qib_dma_unmap_single,
172 .map_page = qib_dma_map_page,
173 .unmap_page = qib_dma_unmap_page,
174 .map_sg = qib_map_sg,
175 .unmap_sg = qib_unmap_sg,
176 .dma_address = qib_sg_dma_address,
177 .dma_len = qib_sg_dma_len,
178 .sync_single_for_cpu = qib_sync_single_for_cpu,
179 .sync_single_for_device = qib_sync_single_for_device,
180 .alloc_coherent = qib_dma_alloc_coherent,
181 .free_coherent = qib_dma_free_coherent
182};
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
new file mode 100644
index 000000000000..f15ce076ac49
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -0,0 +1,665 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/spinlock.h>
35#include <linux/pci.h>
36#include <linux/io.h>
37#include <linux/delay.h>
38#include <linux/netdevice.h>
39#include <linux/vmalloc.h>
40
41#include "qib.h"
42
43/*
44 * The size has to be longer than this string, so we can append
45 * board/chip information to it in the init code.
46 */
47const char ib_qib_version[] = QIB_IDSTR "\n";
48
49DEFINE_SPINLOCK(qib_devs_lock);
50LIST_HEAD(qib_dev_list);
51DEFINE_MUTEX(qib_mutex); /* general driver use */
52
53unsigned qib_ibmtu;
54module_param_named(ibmtu, qib_ibmtu, uint, S_IRUGO);
55MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096");
56
57unsigned qib_compat_ddr_negotiate = 1;
58module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint,
59 S_IWUSR | S_IRUGO);
60MODULE_PARM_DESC(compat_ddr_negotiate,
61 "Attempt pre-IBTA 1.2 DDR speed negotiation");
62
63MODULE_LICENSE("Dual BSD/GPL");
64MODULE_AUTHOR("QLogic <support@qlogic.com>");
65MODULE_DESCRIPTION("QLogic IB driver");
66
67/*
68 * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
69 * PIO send buffers. This is well beyond anything currently
70 * defined in the InfiniBand spec.
71 */
72#define QIB_PIO_MAXIBHDR 128
73
74struct qlogic_ib_stats qib_stats;
75
76const char *qib_get_unit_name(int unit)
77{
78 static char iname[16];
79
80 snprintf(iname, sizeof iname, "infinipath%u", unit);
81 return iname;
82}
83
84/*
85 * Return count of units with at least one port ACTIVE.
86 */
87int qib_count_active_units(void)
88{
89 struct qib_devdata *dd;
90 struct qib_pportdata *ppd;
91 unsigned long flags;
92 int pidx, nunits_active = 0;
93
94 spin_lock_irqsave(&qib_devs_lock, flags);
95 list_for_each_entry(dd, &qib_dev_list, list) {
96 if (!(dd->flags & QIB_PRESENT) || !dd->kregbase)
97 continue;
98 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
99 ppd = dd->pport + pidx;
100 if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
101 QIBL_LINKARMED | QIBL_LINKACTIVE))) {
102 nunits_active++;
103 break;
104 }
105 }
106 }
107 spin_unlock_irqrestore(&qib_devs_lock, flags);
108 return nunits_active;
109}
110
111/*
112 * Return count of all units, optionally return in arguments
113 * the number of usable (present) units, and the number of
114 * ports that are up.
115 */
116int qib_count_units(int *npresentp, int *nupp)
117{
118 int nunits = 0, npresent = 0, nup = 0;
119 struct qib_devdata *dd;
120 unsigned long flags;
121 int pidx;
122 struct qib_pportdata *ppd;
123
124 spin_lock_irqsave(&qib_devs_lock, flags);
125
126 list_for_each_entry(dd, &qib_dev_list, list) {
127 nunits++;
128 if ((dd->flags & QIB_PRESENT) && dd->kregbase)
129 npresent++;
130 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
131 ppd = dd->pport + pidx;
132 if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
133 QIBL_LINKARMED | QIBL_LINKACTIVE)))
134 nup++;
135 }
136 }
137
138 spin_unlock_irqrestore(&qib_devs_lock, flags);
139
140 if (npresentp)
141 *npresentp = npresent;
142 if (nupp)
143 *nupp = nup;
144
145 return nunits;
146}
147
148/**
149 * qib_wait_linkstate - wait for an IB link state change to occur
150 * @dd: the qlogic_ib device
151 * @state: the state to wait for
152 * @msecs: the number of milliseconds to wait
153 *
154 * wait up to msecs milliseconds for IB link state change to occur for
155 * now, take the easy polling route. Currently used only by
156 * qib_set_linkstate. Returns 0 if state reached, otherwise
157 * -ETIMEDOUT state can have multiple states set, for any of several
158 * transitions.
159 */
160int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs)
161{
162 int ret;
163 unsigned long flags;
164
165 spin_lock_irqsave(&ppd->lflags_lock, flags);
166 if (ppd->state_wanted) {
167 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
168 ret = -EBUSY;
169 goto bail;
170 }
171 ppd->state_wanted = state;
172 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
173 wait_event_interruptible_timeout(ppd->state_wait,
174 (ppd->lflags & state),
175 msecs_to_jiffies(msecs));
176 spin_lock_irqsave(&ppd->lflags_lock, flags);
177 ppd->state_wanted = 0;
178 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
179
180 if (!(ppd->lflags & state))
181 ret = -ETIMEDOUT;
182 else
183 ret = 0;
184bail:
185 return ret;
186}
187
188int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate)
189{
190 u32 lstate;
191 int ret;
192 struct qib_devdata *dd = ppd->dd;
193 unsigned long flags;
194
195 switch (newstate) {
196 case QIB_IB_LINKDOWN_ONLY:
197 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
198 IB_LINKCMD_DOWN | IB_LINKINITCMD_NOP);
199 /* don't wait */
200 ret = 0;
201 goto bail;
202
203 case QIB_IB_LINKDOWN:
204 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
205 IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
206 /* don't wait */
207 ret = 0;
208 goto bail;
209
210 case QIB_IB_LINKDOWN_SLEEP:
211 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
212 IB_LINKCMD_DOWN | IB_LINKINITCMD_SLEEP);
213 /* don't wait */
214 ret = 0;
215 goto bail;
216
217 case QIB_IB_LINKDOWN_DISABLE:
218 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
219 IB_LINKCMD_DOWN | IB_LINKINITCMD_DISABLE);
220 /* don't wait */
221 ret = 0;
222 goto bail;
223
224 case QIB_IB_LINKARM:
225 if (ppd->lflags & QIBL_LINKARMED) {
226 ret = 0;
227 goto bail;
228 }
229 if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) {
230 ret = -EINVAL;
231 goto bail;
232 }
233 /*
234 * Since the port can be ACTIVE when we ask for ARMED,
235 * clear QIBL_LINKV so we can wait for a transition.
236 * If the link isn't ARMED, then something else happened
237 * and there is no point waiting for ARMED.
238 */
239 spin_lock_irqsave(&ppd->lflags_lock, flags);
240 ppd->lflags &= ~QIBL_LINKV;
241 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
242 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
243 IB_LINKCMD_ARMED | IB_LINKINITCMD_NOP);
244 lstate = QIBL_LINKV;
245 break;
246
247 case QIB_IB_LINKACTIVE:
248 if (ppd->lflags & QIBL_LINKACTIVE) {
249 ret = 0;
250 goto bail;
251 }
252 if (!(ppd->lflags & QIBL_LINKARMED)) {
253 ret = -EINVAL;
254 goto bail;
255 }
256 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
257 IB_LINKCMD_ACTIVE | IB_LINKINITCMD_NOP);
258 lstate = QIBL_LINKACTIVE;
259 break;
260
261 default:
262 ret = -EINVAL;
263 goto bail;
264 }
265 ret = qib_wait_linkstate(ppd, lstate, 10);
266
267bail:
268 return ret;
269}
270
271/*
272 * Get address of eager buffer from it's index (allocated in chunks, not
273 * contiguous).
274 */
275static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
276{
277 const u32 chunk = etail / rcd->rcvegrbufs_perchunk;
278 const u32 idx = etail % rcd->rcvegrbufs_perchunk;
279
280 return rcd->rcvegrbuf[chunk] + idx * rcd->dd->rcvegrbufsize;
281}
282
283/*
284 * Returns 1 if error was a CRC, else 0.
285 * Needed for some chip's synthesized error counters.
286 */
287static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt,
288 u32 eflags, u32 l, u32 etail, __le32 *rhf_addr,
289 struct qib_message_header *hdr)
290{
291 u32 ret = 0;
292
293 if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
294 ret = 1;
295 return ret;
296}
297
298/*
299 * qib_kreceive - receive a packet
300 * @rcd: the qlogic_ib context
301 * @llic: gets count of good packets needed to clear lli,
302 * (used with chips that need need to track crcs for lli)
303 *
304 * called from interrupt handler for errors or receive interrupt
305 * Returns number of CRC error packets, needed by some chips for
306 * local link integrity tracking. crcs are adjusted down by following
307 * good packets, if any, and count of good packets is also tracked.
308 */
309u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
310{
311 struct qib_devdata *dd = rcd->dd;
312 struct qib_pportdata *ppd = rcd->ppd;
313 __le32 *rhf_addr;
314 void *ebuf;
315 const u32 rsize = dd->rcvhdrentsize; /* words */
316 const u32 maxcnt = dd->rcvhdrcnt * rsize; /* words */
317 u32 etail = -1, l, hdrqtail;
318 struct qib_message_header *hdr;
319 u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
320 int last;
321 u64 lval;
322 struct qib_qp *qp, *nqp;
323
324 l = rcd->head;
325 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
326 if (dd->flags & QIB_NODMA_RTAIL) {
327 u32 seq = qib_hdrget_seq(rhf_addr);
328 if (seq != rcd->seq_cnt)
329 goto bail;
330 hdrqtail = 0;
331 } else {
332 hdrqtail = qib_get_rcvhdrtail(rcd);
333 if (l == hdrqtail)
334 goto bail;
335 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
336 }
337
338 for (last = 0, i = 1; !last; i += !last) {
339 hdr = dd->f_get_msgheader(dd, rhf_addr);
340 eflags = qib_hdrget_err_flags(rhf_addr);
341 etype = qib_hdrget_rcv_type(rhf_addr);
342 /* total length */
343 tlen = qib_hdrget_length_in_bytes(rhf_addr);
344 ebuf = NULL;
345 if ((dd->flags & QIB_NODMA_RTAIL) ?
346 qib_hdrget_use_egr_buf(rhf_addr) :
347 (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
348 etail = qib_hdrget_index(rhf_addr);
349 updegr = 1;
350 if (tlen > sizeof(*hdr) ||
351 etype >= RCVHQ_RCV_TYPE_NON_KD)
352 ebuf = qib_get_egrbuf(rcd, etail);
353 }
354 if (!eflags) {
355 u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2;
356
357 if (lrh_len != tlen) {
358 qib_stats.sps_lenerrs++;
359 goto move_along;
360 }
361 }
362 if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags &&
363 ebuf == NULL &&
364 tlen > (dd->rcvhdrentsize - 2 + 1 -
365 qib_hdrget_offset(rhf_addr)) << 2) {
366 goto move_along;
367 }
368
369 /*
370 * Both tiderr and qibhdrerr are set for all plain IB
371 * packets; only qibhdrerr should be set.
372 */
373 if (unlikely(eflags))
374 crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l,
375 etail, rhf_addr, hdr);
376 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
377 qib_ib_rcv(rcd, hdr, ebuf, tlen);
378 if (crcs)
379 crcs--;
380 else if (llic && *llic)
381 --*llic;
382 }
383move_along:
384 l += rsize;
385 if (l >= maxcnt)
386 l = 0;
387 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
388 if (dd->flags & QIB_NODMA_RTAIL) {
389 u32 seq = qib_hdrget_seq(rhf_addr);
390
391 if (++rcd->seq_cnt > 13)
392 rcd->seq_cnt = 1;
393 if (seq != rcd->seq_cnt)
394 last = 1;
395 } else if (l == hdrqtail)
396 last = 1;
397 /*
398 * Update head regs etc., every 16 packets, if not last pkt,
399 * to help prevent rcvhdrq overflows, when many packets
400 * are processed and queue is nearly full.
401 * Don't request an interrupt for intermediate updates.
402 */
403 lval = l;
404 if (!last && !(i & 0xf)) {
405 dd->f_update_usrhead(rcd, lval, updegr, etail);
406 updegr = 0;
407 }
408 }
409
410 rcd->head = l;
411 rcd->pkt_count += i;
412
413 /*
414 * Iterate over all QPs waiting to respond.
415 * The list won't change since the IRQ is only run on one CPU.
416 */
417 list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
418 list_del_init(&qp->rspwait);
419 if (qp->r_flags & QIB_R_RSP_NAK) {
420 qp->r_flags &= ~QIB_R_RSP_NAK;
421 qib_send_rc_ack(qp);
422 }
423 if (qp->r_flags & QIB_R_RSP_SEND) {
424 unsigned long flags;
425
426 qp->r_flags &= ~QIB_R_RSP_SEND;
427 spin_lock_irqsave(&qp->s_lock, flags);
428 if (ib_qib_state_ops[qp->state] &
429 QIB_PROCESS_OR_FLUSH_SEND)
430 qib_schedule_send(qp);
431 spin_unlock_irqrestore(&qp->s_lock, flags);
432 }
433 if (atomic_dec_and_test(&qp->refcount))
434 wake_up(&qp->wait);
435 }
436
437bail:
438 /* Report number of packets consumed */
439 if (npkts)
440 *npkts = i;
441
442 /*
443 * Always write head at end, and setup rcv interrupt, even
444 * if no packets were processed.
445 */
446 lval = (u64)rcd->head | dd->rhdrhead_intr_off;
447 dd->f_update_usrhead(rcd, lval, updegr, etail);
448 return crcs;
449}
450
451/**
452 * qib_set_mtu - set the MTU
453 * @ppd: the perport data
454 * @arg: the new MTU
455 *
456 * We can handle "any" incoming size, the issue here is whether we
457 * need to restrict our outgoing size. For now, we don't do any
458 * sanity checking on this, and we don't deal with what happens to
459 * programs that are already running when the size changes.
460 * NOTE: changing the MTU will usually cause the IBC to go back to
461 * link INIT state...
462 */
463int qib_set_mtu(struct qib_pportdata *ppd, u16 arg)
464{
465 u32 piosize;
466 int ret, chk;
467
468 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
469 arg != 4096) {
470 ret = -EINVAL;
471 goto bail;
472 }
473 chk = ib_mtu_enum_to_int(qib_ibmtu);
474 if (chk > 0 && arg > chk) {
475 ret = -EINVAL;
476 goto bail;
477 }
478
479 piosize = ppd->ibmaxlen;
480 ppd->ibmtu = arg;
481
482 if (arg >= (piosize - QIB_PIO_MAXIBHDR)) {
483 /* Only if it's not the initial value (or reset to it) */
484 if (piosize != ppd->init_ibmaxlen) {
485 if (arg > piosize && arg <= ppd->init_ibmaxlen)
486 piosize = ppd->init_ibmaxlen - 2 * sizeof(u32);
487 ppd->ibmaxlen = piosize;
488 }
489 } else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) {
490 piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32);
491 ppd->ibmaxlen = piosize;
492 }
493
494 ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0);
495
496 ret = 0;
497
498bail:
499 return ret;
500}
501
502int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
503{
504 struct qib_devdata *dd = ppd->dd;
505 ppd->lid = lid;
506 ppd->lmc = lmc;
507
508 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC,
509 lid | (~((1U << lmc) - 1)) << 16);
510
511 qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n",
512 dd->unit, ppd->port, lid);
513
514 return 0;
515}
516
517/*
518 * Following deal with the "obviously simple" task of overriding the state
519 * of the LEDS, which normally indicate link physical and logical status.
520 * The complications arise in dealing with different hardware mappings
521 * and the board-dependent routine being called from interrupts.
522 * and then there's the requirement to _flash_ them.
523 */
524#define LED_OVER_FREQ_SHIFT 8
525#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
526/* Below is "non-zero" to force override, but both actual LEDs are off */
527#define LED_OVER_BOTH_OFF (8)
528
529static void qib_run_led_override(unsigned long opaque)
530{
531 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
532 struct qib_devdata *dd = ppd->dd;
533 int timeoff;
534 int ph_idx;
535
536 if (!(dd->flags & QIB_INITTED))
537 return;
538
539 ph_idx = ppd->led_override_phase++ & 1;
540 ppd->led_override = ppd->led_override_vals[ph_idx];
541 timeoff = ppd->led_override_timeoff;
542
543 dd->f_setextled(ppd, 1);
544 /*
545 * don't re-fire the timer if user asked for it to be off; we let
546 * it fire one more time after they turn it off to simplify
547 */
548 if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
549 mod_timer(&ppd->led_override_timer, jiffies + timeoff);
550}
551
552void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val)
553{
554 struct qib_devdata *dd = ppd->dd;
555 int timeoff, freq;
556
557 if (!(dd->flags & QIB_INITTED))
558 return;
559
560 /* First check if we are blinking. If not, use 1HZ polling */
561 timeoff = HZ;
562 freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
563
564 if (freq) {
565 /* For blink, set each phase from one nybble of val */
566 ppd->led_override_vals[0] = val & 0xF;
567 ppd->led_override_vals[1] = (val >> 4) & 0xF;
568 timeoff = (HZ << 4)/freq;
569 } else {
570 /* Non-blink set both phases the same. */
571 ppd->led_override_vals[0] = val & 0xF;
572 ppd->led_override_vals[1] = val & 0xF;
573 }
574 ppd->led_override_timeoff = timeoff;
575
576 /*
577 * If the timer has not already been started, do so. Use a "quick"
578 * timeout so the function will be called soon, to look at our request.
579 */
580 if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
581 /* Need to start timer */
582 init_timer(&ppd->led_override_timer);
583 ppd->led_override_timer.function = qib_run_led_override;
584 ppd->led_override_timer.data = (unsigned long) ppd;
585 ppd->led_override_timer.expires = jiffies + 1;
586 add_timer(&ppd->led_override_timer);
587 } else {
588 if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
589 mod_timer(&ppd->led_override_timer, jiffies + 1);
590 atomic_dec(&ppd->led_override_timer_active);
591 }
592}
593
594/**
595 * qib_reset_device - reset the chip if possible
596 * @unit: the device to reset
597 *
598 * Whether or not reset is successful, we attempt to re-initialize the chip
599 * (that is, much like a driver unload/reload). We clear the INITTED flag
600 * so that the various entry points will fail until we reinitialize. For
601 * now, we only allow this if no user contexts are open that use chip resources
602 */
603int qib_reset_device(int unit)
604{
605 int ret, i;
606 struct qib_devdata *dd = qib_lookup(unit);
607 struct qib_pportdata *ppd;
608 unsigned long flags;
609 int pidx;
610
611 if (!dd) {
612 ret = -ENODEV;
613 goto bail;
614 }
615
616 qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit);
617
618 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) {
619 qib_devinfo(dd->pcidev, "Invalid unit number %u or "
620 "not initialized or not present\n", unit);
621 ret = -ENXIO;
622 goto bail;
623 }
624
625 spin_lock_irqsave(&dd->uctxt_lock, flags);
626 if (dd->rcd)
627 for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
628 if (!dd->rcd[i] || !dd->rcd[i]->cnt)
629 continue;
630 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
631 ret = -EBUSY;
632 goto bail;
633 }
634 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
635
636 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
637 ppd = dd->pport + pidx;
638 if (atomic_read(&ppd->led_override_timer_active)) {
639 /* Need to stop LED timer, _then_ shut off LEDs */
640 del_timer_sync(&ppd->led_override_timer);
641 atomic_set(&ppd->led_override_timer_active, 0);
642 }
643
644 /* Shut off LEDs after we are sure timer is not running */
645 ppd->led_override = LED_OVER_BOTH_OFF;
646 dd->f_setextled(ppd, 0);
647 if (dd->flags & QIB_HAS_SEND_DMA)
648 qib_teardown_sdma(ppd);
649 }
650
651 ret = dd->f_reset(dd);
652 if (ret == 1)
653 ret = qib_init(dd, 1);
654 else
655 ret = -EAGAIN;
656 if (ret)
657 qib_dev_err(dd, "Reinitialize unit %u after "
658 "reset failed with %d\n", unit, ret);
659 else
660 qib_devinfo(dd->pcidev, "Reinitialized unit %u after "
661 "resetting\n", unit);
662
663bail:
664 return ret;
665}
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
new file mode 100644
index 000000000000..92d9cfe98a68
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -0,0 +1,451 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/delay.h>
35#include <linux/pci.h>
36#include <linux/vmalloc.h>
37
38#include "qib.h"
39
40/*
41 * Functions specific to the serial EEPROM on cards handled by ib_qib.
42 * The actual serail interface code is in qib_twsi.c. This file is a client
43 */
44
45/**
46 * qib_eeprom_read - receives bytes from the eeprom via I2C
47 * @dd: the qlogic_ib device
48 * @eeprom_offset: address to read from
49 * @buffer: where to store result
50 * @len: number of bytes to receive
51 */
52int qib_eeprom_read(struct qib_devdata *dd, u8 eeprom_offset,
53 void *buff, int len)
54{
55 int ret;
56
57 ret = mutex_lock_interruptible(&dd->eep_lock);
58 if (!ret) {
59 ret = qib_twsi_reset(dd);
60 if (ret)
61 qib_dev_err(dd, "EEPROM Reset for read failed\n");
62 else
63 ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev,
64 eeprom_offset, buff, len);
65 mutex_unlock(&dd->eep_lock);
66 }
67
68 return ret;
69}
70
71/*
72 * Actually update the eeprom, first doing write enable if
73 * needed, then restoring write enable state.
74 * Must be called with eep_lock held
75 */
76static int eeprom_write_with_enable(struct qib_devdata *dd, u8 offset,
77 const void *buf, int len)
78{
79 int ret, pwen;
80
81 pwen = dd->f_eeprom_wen(dd, 1);
82 ret = qib_twsi_reset(dd);
83 if (ret)
84 qib_dev_err(dd, "EEPROM Reset for write failed\n");
85 else
86 ret = qib_twsi_blk_wr(dd, dd->twsi_eeprom_dev,
87 offset, buf, len);
88 dd->f_eeprom_wen(dd, pwen);
89 return ret;
90}
91
92/**
93 * qib_eeprom_write - writes data to the eeprom via I2C
94 * @dd: the qlogic_ib device
95 * @eeprom_offset: where to place data
96 * @buffer: data to write
97 * @len: number of bytes to write
98 */
99int qib_eeprom_write(struct qib_devdata *dd, u8 eeprom_offset,
100 const void *buff, int len)
101{
102 int ret;
103
104 ret = mutex_lock_interruptible(&dd->eep_lock);
105 if (!ret) {
106 ret = eeprom_write_with_enable(dd, eeprom_offset, buff, len);
107 mutex_unlock(&dd->eep_lock);
108 }
109
110 return ret;
111}
112
113static u8 flash_csum(struct qib_flash *ifp, int adjust)
114{
115 u8 *ip = (u8 *) ifp;
116 u8 csum = 0, len;
117
118 /*
119 * Limit length checksummed to max length of actual data.
120 * Checksum of erased eeprom will still be bad, but we avoid
121 * reading past the end of the buffer we were passed.
122 */
123 len = ifp->if_length;
124 if (len > sizeof(struct qib_flash))
125 len = sizeof(struct qib_flash);
126 while (len--)
127 csum += *ip++;
128 csum -= ifp->if_csum;
129 csum = ~csum;
130 if (adjust)
131 ifp->if_csum = csum;
132
133 return csum;
134}
135
136/**
137 * qib_get_eeprom_info- get the GUID et al. from the TSWI EEPROM device
138 * @dd: the qlogic_ib device
139 *
140 * We have the capability to use the nguid field, and get
141 * the guid from the first chip's flash, to use for all of them.
142 */
143void qib_get_eeprom_info(struct qib_devdata *dd)
144{
145 void *buf;
146 struct qib_flash *ifp;
147 __be64 guid;
148 int len, eep_stat;
149 u8 csum, *bguid;
150 int t = dd->unit;
151 struct qib_devdata *dd0 = qib_lookup(0);
152
153 if (t && dd0->nguid > 1 && t <= dd0->nguid) {
154 u8 oguid;
155 dd->base_guid = dd0->base_guid;
156 bguid = (u8 *) &dd->base_guid;
157
158 oguid = bguid[7];
159 bguid[7] += t;
160 if (oguid > bguid[7]) {
161 if (bguid[6] == 0xff) {
162 if (bguid[5] == 0xff) {
163 qib_dev_err(dd, "Can't set %s GUID"
164 " from base, wraps to"
165 " OUI!\n",
166 qib_get_unit_name(t));
167 dd->base_guid = 0;
168 goto bail;
169 }
170 bguid[5]++;
171 }
172 bguid[6]++;
173 }
174 dd->nguid = 1;
175 goto bail;
176 }
177
178 /*
179 * Read full flash, not just currently used part, since it may have
180 * been written with a newer definition.
181 * */
182 len = sizeof(struct qib_flash);
183 buf = vmalloc(len);
184 if (!buf) {
185 qib_dev_err(dd, "Couldn't allocate memory to read %u "
186 "bytes from eeprom for GUID\n", len);
187 goto bail;
188 }
189
190 /*
191 * Use "public" eeprom read function, which does locking and
192 * figures out device. This will migrate to chip-specific.
193 */
194 eep_stat = qib_eeprom_read(dd, 0, buf, len);
195
196 if (eep_stat) {
197 qib_dev_err(dd, "Failed reading GUID from eeprom\n");
198 goto done;
199 }
200 ifp = (struct qib_flash *)buf;
201
202 csum = flash_csum(ifp, 0);
203 if (csum != ifp->if_csum) {
204 qib_devinfo(dd->pcidev, "Bad I2C flash checksum: "
205 "0x%x, not 0x%x\n", csum, ifp->if_csum);
206 goto done;
207 }
208 if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
209 *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
210 qib_dev_err(dd, "Invalid GUID %llx from flash; ignoring\n",
211 *(unsigned long long *) ifp->if_guid);
212 /* don't allow GUID if all 0 or all 1's */
213 goto done;
214 }
215
216 /* complain, but allow it */
217 if (*(u64 *) ifp->if_guid == 0x100007511000000ULL)
218 qib_devinfo(dd->pcidev, "Warning, GUID %llx is "
219 "default, probably not correct!\n",
220 *(unsigned long long *) ifp->if_guid);
221
222 bguid = ifp->if_guid;
223 if (!bguid[0] && !bguid[1] && !bguid[2]) {
224 /*
225 * Original incorrect GUID format in flash; fix in
226 * core copy, by shifting up 2 octets; don't need to
227 * change top octet, since both it and shifted are 0.
228 */
229 bguid[1] = bguid[3];
230 bguid[2] = bguid[4];
231 bguid[3] = 0;
232 bguid[4] = 0;
233 guid = *(__be64 *) ifp->if_guid;
234 } else
235 guid = *(__be64 *) ifp->if_guid;
236 dd->base_guid = guid;
237 dd->nguid = ifp->if_numguid;
238 /*
239 * Things are slightly complicated by the desire to transparently
240 * support both the Pathscale 10-digit serial number and the QLogic
241 * 13-character version.
242 */
243 if ((ifp->if_fversion > 1) && ifp->if_sprefix[0] &&
244 ((u8 *) ifp->if_sprefix)[0] != 0xFF) {
245 char *snp = dd->serial;
246
247 /*
248 * This board has a Serial-prefix, which is stored
249 * elsewhere for backward-compatibility.
250 */
251 memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
252 snp[sizeof ifp->if_sprefix] = '\0';
253 len = strlen(snp);
254 snp += len;
255 len = (sizeof dd->serial) - len;
256 if (len > sizeof ifp->if_serial)
257 len = sizeof ifp->if_serial;
258 memcpy(snp, ifp->if_serial, len);
259 } else
260 memcpy(dd->serial, ifp->if_serial,
261 sizeof ifp->if_serial);
262 if (!strstr(ifp->if_comment, "Tested successfully"))
263 qib_dev_err(dd, "Board SN %s did not pass functional "
264 "test: %s\n", dd->serial, ifp->if_comment);
265
266 memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
267 /*
268 * Power-on (actually "active") hours are kept as little-endian value
269 * in EEPROM, but as seconds in a (possibly as small as 24-bit)
270 * atomic_t while running.
271 */
272 atomic_set(&dd->active_time, 0);
273 dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
274
275done:
276 vfree(buf);
277
278bail:;
279}
280
281/**
282 * qib_update_eeprom_log - copy active-time and error counters to eeprom
283 * @dd: the qlogic_ib device
284 *
285 * Although the time is kept as seconds in the qib_devdata struct, it is
286 * rounded to hours for re-write, as we have only 16 bits in EEPROM.
287 * First-cut code reads whole (expected) struct qib_flash, modifies,
288 * re-writes. Future direction: read/write only what we need, assuming
289 * that the EEPROM had to have been "good enough" for driver init, and
290 * if not, we aren't making it worse.
291 *
292 */
293int qib_update_eeprom_log(struct qib_devdata *dd)
294{
295 void *buf;
296 struct qib_flash *ifp;
297 int len, hi_water;
298 uint32_t new_time, new_hrs;
299 u8 csum;
300 int ret, idx;
301 unsigned long flags;
302
303 /* first, check if we actually need to do anything. */
304 ret = 0;
305 for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
306 if (dd->eep_st_new_errs[idx]) {
307 ret = 1;
308 break;
309 }
310 }
311 new_time = atomic_read(&dd->active_time);
312
313 if (ret == 0 && new_time < 3600)
314 goto bail;
315
316 /*
317 * The quick-check above determined that there is something worthy
318 * of logging, so get current contents and do a more detailed idea.
319 * read full flash, not just currently used part, since it may have
320 * been written with a newer definition
321 */
322 len = sizeof(struct qib_flash);
323 buf = vmalloc(len);
324 ret = 1;
325 if (!buf) {
326 qib_dev_err(dd, "Couldn't allocate memory to read %u "
327 "bytes from eeprom for logging\n", len);
328 goto bail;
329 }
330
331 /* Grab semaphore and read current EEPROM. If we get an
332 * error, let go, but if not, keep it until we finish write.
333 */
334 ret = mutex_lock_interruptible(&dd->eep_lock);
335 if (ret) {
336 qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
337 goto free_bail;
338 }
339 ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
340 if (ret) {
341 mutex_unlock(&dd->eep_lock);
342 qib_dev_err(dd, "Unable read EEPROM for logging\n");
343 goto free_bail;
344 }
345 ifp = (struct qib_flash *)buf;
346
347 csum = flash_csum(ifp, 0);
348 if (csum != ifp->if_csum) {
349 mutex_unlock(&dd->eep_lock);
350 qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
351 csum, ifp->if_csum);
352 ret = 1;
353 goto free_bail;
354 }
355 hi_water = 0;
356 spin_lock_irqsave(&dd->eep_st_lock, flags);
357 for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
358 int new_val = dd->eep_st_new_errs[idx];
359 if (new_val) {
360 /*
361 * If we have seen any errors, add to EEPROM values
362 * We need to saturate at 0xFF (255) and we also
363 * would need to adjust the checksum if we were
364 * trying to minimize EEPROM traffic
365 * Note that we add to actual current count in EEPROM,
366 * in case it was altered while we were running.
367 */
368 new_val += ifp->if_errcntp[idx];
369 if (new_val > 0xFF)
370 new_val = 0xFF;
371 if (ifp->if_errcntp[idx] != new_val) {
372 ifp->if_errcntp[idx] = new_val;
373 hi_water = offsetof(struct qib_flash,
374 if_errcntp) + idx;
375 }
376 /*
377 * update our shadow (used to minimize EEPROM
378 * traffic), to match what we are about to write.
379 */
380 dd->eep_st_errs[idx] = new_val;
381 dd->eep_st_new_errs[idx] = 0;
382 }
383 }
384 /*
385 * Now update active-time. We would like to round to the nearest hour
386 * but unless atomic_t are sure to be proper signed ints we cannot,
387 * because we need to account for what we "transfer" to EEPROM and
388 * if we log an hour at 31 minutes, then we would need to set
389 * active_time to -29 to accurately count the _next_ hour.
390 */
391 if (new_time >= 3600) {
392 new_hrs = new_time / 3600;
393 atomic_sub((new_hrs * 3600), &dd->active_time);
394 new_hrs += dd->eep_hrs;
395 if (new_hrs > 0xFFFF)
396 new_hrs = 0xFFFF;
397 dd->eep_hrs = new_hrs;
398 if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
399 ifp->if_powerhour[0] = new_hrs & 0xFF;
400 hi_water = offsetof(struct qib_flash, if_powerhour);
401 }
402 if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
403 ifp->if_powerhour[1] = new_hrs >> 8;
404 hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
405 }
406 }
407 /*
408 * There is a tiny possibility that we could somehow fail to write
409 * the EEPROM after updating our shadows, but problems from holding
410 * the spinlock too long are a much bigger issue.
411 */
412 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
413 if (hi_water) {
414 /* we made some change to the data, uopdate cksum and write */
415 csum = flash_csum(ifp, 1);
416 ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
417 }
418 mutex_unlock(&dd->eep_lock);
419 if (ret)
420 qib_dev_err(dd, "Failed updating EEPROM\n");
421
422free_bail:
423 vfree(buf);
424bail:
425 return ret;
426}
427
428/**
429 * qib_inc_eeprom_err - increment one of the four error counters
430 * that are logged to EEPROM.
431 * @dd: the qlogic_ib device
432 * @eidx: 0..3, the counter to increment
433 * @incr: how much to add
434 *
435 * Each counter is 8-bits, and saturates at 255 (0xFF). They
436 * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
437 * is called, but it can only be called in a context that allows sleep.
438 * This function can be called even at interrupt level.
439 */
440void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
441{
442 uint new_val;
443 unsigned long flags;
444
445 spin_lock_irqsave(&dd->eep_st_lock, flags);
446 new_val = dd->eep_st_new_errs[eidx] + incr;
447 if (new_val > 255)
448 new_val = 255;
449 dd->eep_st_new_errs[eidx] = new_val;
450 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
451}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
new file mode 100644
index 000000000000..a142a9eb5226
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -0,0 +1,2317 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/pci.h>
36#include <linux/poll.h>
37#include <linux/cdev.h>
38#include <linux/swap.h>
39#include <linux/vmalloc.h>
40#include <linux/highmem.h>
41#include <linux/io.h>
42#include <linux/uio.h>
43#include <linux/jiffies.h>
44#include <asm/pgtable.h>
45#include <linux/delay.h>
46
47#include "qib.h"
48#include "qib_common.h"
49#include "qib_user_sdma.h"
50
51static int qib_open(struct inode *, struct file *);
52static int qib_close(struct inode *, struct file *);
53static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
54static ssize_t qib_aio_write(struct kiocb *, const struct iovec *,
55 unsigned long, loff_t);
56static unsigned int qib_poll(struct file *, struct poll_table_struct *);
57static int qib_mmapf(struct file *, struct vm_area_struct *);
58
59static const struct file_operations qib_file_ops = {
60 .owner = THIS_MODULE,
61 .write = qib_write,
62 .aio_write = qib_aio_write,
63 .open = qib_open,
64 .release = qib_close,
65 .poll = qib_poll,
66 .mmap = qib_mmapf
67};
68
69/*
70 * Convert kernel virtual addresses to physical addresses so they don't
71 * potentially conflict with the chip addresses used as mmap offsets.
72 * It doesn't really matter what mmap offset we use as long as we can
73 * interpret it correctly.
74 */
75static u64 cvt_kvaddr(void *p)
76{
77 struct page *page;
78 u64 paddr = 0;
79
80 page = vmalloc_to_page(p);
81 if (page)
82 paddr = page_to_pfn(page) << PAGE_SHIFT;
83
84 return paddr;
85}
86
87static int qib_get_base_info(struct file *fp, void __user *ubase,
88 size_t ubase_size)
89{
90 struct qib_ctxtdata *rcd = ctxt_fp(fp);
91 int ret = 0;
92 struct qib_base_info *kinfo = NULL;
93 struct qib_devdata *dd = rcd->dd;
94 struct qib_pportdata *ppd = rcd->ppd;
95 unsigned subctxt_cnt;
96 int shared, master;
97 size_t sz;
98
99 subctxt_cnt = rcd->subctxt_cnt;
100 if (!subctxt_cnt) {
101 shared = 0;
102 master = 0;
103 subctxt_cnt = 1;
104 } else {
105 shared = 1;
106 master = !subctxt_fp(fp);
107 }
108
109 sz = sizeof(*kinfo);
110 /* If context sharing is not requested, allow the old size structure */
111 if (!shared)
112 sz -= 7 * sizeof(u64);
113 if (ubase_size < sz) {
114 ret = -EINVAL;
115 goto bail;
116 }
117
118 kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
119 if (kinfo == NULL) {
120 ret = -ENOMEM;
121 goto bail;
122 }
123
124 ret = dd->f_get_base_info(rcd, kinfo);
125 if (ret < 0)
126 goto bail;
127
128 kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt;
129 kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize;
130 kinfo->spi_tidegrcnt = rcd->rcvegrcnt;
131 kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize;
132 /*
133 * have to mmap whole thing
134 */
135 kinfo->spi_rcv_egrbuftotlen =
136 rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
137 kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk;
138 kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
139 rcd->rcvegrbuf_chunks;
140 kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt;
141 if (master)
142 kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
143 /*
144 * for this use, may be cfgctxts summed over all chips that
145 * are are configured and present
146 */
147 kinfo->spi_nctxts = dd->cfgctxts;
148 /* unit (chip/board) our context is on */
149 kinfo->spi_unit = dd->unit;
150 kinfo->spi_port = ppd->port;
151 /* for now, only a single page */
152 kinfo->spi_tid_maxsize = PAGE_SIZE;
153
154 /*
155 * Doing this per context, and based on the skip value, etc. This has
156 * to be the actual buffer size, since the protocol code treats it
157 * as an array.
158 *
159 * These have to be set to user addresses in the user code via mmap.
160 * These values are used on return to user code for the mmap target
161 * addresses only. For 32 bit, same 44 bit address problem, so use
162 * the physical address, not virtual. Before 2.6.11, using the
163 * page_address() macro worked, but in 2.6.11, even that returns the
164 * full 64 bit address (upper bits all 1's). So far, using the
165 * physical addresses (or chip offsets, for chip mapping) works, but
166 * no doubt some future kernel release will change that, and we'll be
167 * on to yet another method of dealing with this.
168 * Normally only one of rcvhdr_tailaddr or rhf_offset is useful
169 * since the chips with non-zero rhf_offset don't normally
170 * enable tail register updates to host memory, but for testing,
171 * both can be enabled and used.
172 */
173 kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys;
174 kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys;
175 kinfo->spi_rhf_offset = dd->rhf_offset;
176 kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys;
177 kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys;
178 /* setup per-unit (not port) status area for user programs */
179 kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
180 (char *) ppd->statusp -
181 (char *) dd->pioavailregs_dma;
182 kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt;
183 if (!shared) {
184 kinfo->spi_piocnt = rcd->piocnt;
185 kinfo->spi_piobufbase = (u64) rcd->piobufs;
186 kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask);
187 } else if (master) {
188 kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) +
189 (rcd->piocnt % subctxt_cnt);
190 /* Master's PIO buffers are after all the slave's */
191 kinfo->spi_piobufbase = (u64) rcd->piobufs +
192 dd->palign *
193 (rcd->piocnt - kinfo->spi_piocnt);
194 } else {
195 unsigned slave = subctxt_fp(fp) - 1;
196
197 kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt;
198 kinfo->spi_piobufbase = (u64) rcd->piobufs +
199 dd->palign * kinfo->spi_piocnt * slave;
200 }
201
202 if (shared) {
203 kinfo->spi_sendbuf_status =
204 cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]);
205 /* only spi_subctxt_* fields should be set in this block! */
206 kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase);
207
208 kinfo->spi_subctxt_rcvegrbuf =
209 cvt_kvaddr(rcd->subctxt_rcvegrbuf);
210 kinfo->spi_subctxt_rcvhdr_base =
211 cvt_kvaddr(rcd->subctxt_rcvhdr_base);
212 }
213
214 /*
215 * All user buffers are 2KB buffers. If we ever support
216 * giving 4KB buffers to user processes, this will need some
217 * work. Can't use piobufbase directly, because it has
218 * both 2K and 4K buffer base values.
219 */
220 kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) /
221 dd->palign;
222 kinfo->spi_pioalign = dd->palign;
223 kinfo->spi_qpair = QIB_KD_QP;
224 /*
225 * user mode PIO buffers are always 2KB, even when 4KB can
226 * be received, and sent via the kernel; this is ibmaxlen
227 * for 2K MTU.
228 */
229 kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32);
230 kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */
231 kinfo->spi_ctxt = rcd->ctxt;
232 kinfo->spi_subctxt = subctxt_fp(fp);
233 kinfo->spi_sw_version = QIB_KERN_SWVERSION;
234 kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */
235 kinfo->spi_hw_version = dd->revision;
236
237 if (master)
238 kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER;
239
240 sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
241 if (copy_to_user(ubase, kinfo, sz))
242 ret = -EFAULT;
243bail:
244 kfree(kinfo);
245 return ret;
246}
247
248/**
249 * qib_tid_update - update a context TID
250 * @rcd: the context
251 * @fp: the qib device file
252 * @ti: the TID information
253 *
254 * The new implementation as of Oct 2004 is that the driver assigns
255 * the tid and returns it to the caller. To reduce search time, we
256 * keep a cursor for each context, walking the shadow tid array to find
257 * one that's not in use.
258 *
259 * For now, if we can't allocate the full list, we fail, although
260 * in the long run, we'll allocate as many as we can, and the
261 * caller will deal with that by trying the remaining pages later.
262 * That means that when we fail, we have to mark the tids as not in
263 * use again, in our shadow copy.
264 *
265 * It's up to the caller to free the tids when they are done.
266 * We'll unlock the pages as they free them.
267 *
268 * Also, right now we are locking one page at a time, but since
269 * the intended use of this routine is for a single group of
270 * virtually contiguous pages, that should change to improve
271 * performance.
272 */
273static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
274 const struct qib_tid_info *ti)
275{
276 int ret = 0, ntids;
277 u32 tid, ctxttid, cnt, i, tidcnt, tidoff;
278 u16 *tidlist;
279 struct qib_devdata *dd = rcd->dd;
280 u64 physaddr;
281 unsigned long vaddr;
282 u64 __iomem *tidbase;
283 unsigned long tidmap[8];
284 struct page **pagep = NULL;
285 unsigned subctxt = subctxt_fp(fp);
286
287 if (!dd->pageshadow) {
288 ret = -ENOMEM;
289 goto done;
290 }
291
292 cnt = ti->tidcnt;
293 if (!cnt) {
294 ret = -EFAULT;
295 goto done;
296 }
297 ctxttid = rcd->ctxt * dd->rcvtidcnt;
298 if (!rcd->subctxt_cnt) {
299 tidcnt = dd->rcvtidcnt;
300 tid = rcd->tidcursor;
301 tidoff = 0;
302 } else if (!subctxt) {
303 tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
304 (dd->rcvtidcnt % rcd->subctxt_cnt);
305 tidoff = dd->rcvtidcnt - tidcnt;
306 ctxttid += tidoff;
307 tid = tidcursor_fp(fp);
308 } else {
309 tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
310 tidoff = tidcnt * (subctxt - 1);
311 ctxttid += tidoff;
312 tid = tidcursor_fp(fp);
313 }
314 if (cnt > tidcnt) {
315 /* make sure it all fits in tid_pg_list */
316 qib_devinfo(dd->pcidev, "Process tried to allocate %u "
317 "TIDs, only trying max (%u)\n", cnt, tidcnt);
318 cnt = tidcnt;
319 }
320 pagep = (struct page **) rcd->tid_pg_list;
321 tidlist = (u16 *) &pagep[dd->rcvtidcnt];
322 pagep += tidoff;
323 tidlist += tidoff;
324
325 memset(tidmap, 0, sizeof(tidmap));
326 /* before decrement; chip actual # */
327 ntids = tidcnt;
328 tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) +
329 dd->rcvtidbase +
330 ctxttid * sizeof(*tidbase));
331
332 /* virtual address of first page in transfer */
333 vaddr = ti->tidvaddr;
334 if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
335 cnt * PAGE_SIZE)) {
336 ret = -EFAULT;
337 goto done;
338 }
339 ret = qib_get_user_pages(vaddr, cnt, pagep);
340 if (ret) {
341 /*
342 * if (ret == -EBUSY)
343 * We can't continue because the pagep array won't be
344 * initialized. This should never happen,
345 * unless perhaps the user has mpin'ed the pages
346 * themselves.
347 */
348 qib_devinfo(dd->pcidev,
349 "Failed to lock addr %p, %u pages: "
350 "errno %d\n", (void *) vaddr, cnt, -ret);
351 goto done;
352 }
353 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
354 for (; ntids--; tid++) {
355 if (tid == tidcnt)
356 tid = 0;
357 if (!dd->pageshadow[ctxttid + tid])
358 break;
359 }
360 if (ntids < 0) {
361 /*
362 * Oops, wrapped all the way through their TIDs,
363 * and didn't have enough free; see comments at
364 * start of routine
365 */
366 i--; /* last tidlist[i] not filled in */
367 ret = -ENOMEM;
368 break;
369 }
370 tidlist[i] = tid + tidoff;
371 /* we "know" system pages and TID pages are same size */
372 dd->pageshadow[ctxttid + tid] = pagep[i];
373 dd->physshadow[ctxttid + tid] =
374 qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
375 PCI_DMA_FROMDEVICE);
376 /*
377 * don't need atomic or it's overhead
378 */
379 __set_bit(tid, tidmap);
380 physaddr = dd->physshadow[ctxttid + tid];
381 /* PERFORMANCE: below should almost certainly be cached */
382 dd->f_put_tid(dd, &tidbase[tid],
383 RCVHQ_RCV_TYPE_EXPECTED, physaddr);
384 /*
385 * don't check this tid in qib_ctxtshadow, since we
386 * just filled it in; start with the next one.
387 */
388 tid++;
389 }
390
391 if (ret) {
392 u32 limit;
393cleanup:
394 /* jump here if copy out of updated info failed... */
395 /* same code that's in qib_free_tid() */
396 limit = sizeof(tidmap) * BITS_PER_BYTE;
397 if (limit > tidcnt)
398 /* just in case size changes in future */
399 limit = tidcnt;
400 tid = find_first_bit((const unsigned long *)tidmap, limit);
401 for (; tid < limit; tid++) {
402 if (!test_bit(tid, tidmap))
403 continue;
404 if (dd->pageshadow[ctxttid + tid]) {
405 dma_addr_t phys;
406
407 phys = dd->physshadow[ctxttid + tid];
408 dd->physshadow[ctxttid + tid] = dd->tidinvalid;
409 /* PERFORMANCE: below should almost certainly
410 * be cached
411 */
412 dd->f_put_tid(dd, &tidbase[tid],
413 RCVHQ_RCV_TYPE_EXPECTED,
414 dd->tidinvalid);
415 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
416 PCI_DMA_FROMDEVICE);
417 dd->pageshadow[ctxttid + tid] = NULL;
418 }
419 }
420 qib_release_user_pages(pagep, cnt);
421 } else {
422 /*
423 * Copy the updated array, with qib_tid's filled in, back
424 * to user. Since we did the copy in already, this "should
425 * never fail" If it does, we have to clean up...
426 */
427 if (copy_to_user((void __user *)
428 (unsigned long) ti->tidlist,
429 tidlist, cnt * sizeof(*tidlist))) {
430 ret = -EFAULT;
431 goto cleanup;
432 }
433 if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
434 tidmap, sizeof tidmap)) {
435 ret = -EFAULT;
436 goto cleanup;
437 }
438 if (tid == tidcnt)
439 tid = 0;
440 if (!rcd->subctxt_cnt)
441 rcd->tidcursor = tid;
442 else
443 tidcursor_fp(fp) = tid;
444 }
445
446done:
447 return ret;
448}
449
450/**
451 * qib_tid_free - free a context TID
452 * @rcd: the context
453 * @subctxt: the subcontext
454 * @ti: the TID info
455 *
456 * right now we are unlocking one page at a time, but since
457 * the intended use of this routine is for a single group of
458 * virtually contiguous pages, that should change to improve
459 * performance. We check that the TID is in range for this context
460 * but otherwise don't check validity; if user has an error and
461 * frees the wrong tid, it's only their own data that can thereby
462 * be corrupted. We do check that the TID was in use, for sanity
463 * We always use our idea of the saved address, not the address that
464 * they pass in to us.
465 */
466static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
467 const struct qib_tid_info *ti)
468{
469 int ret = 0;
470 u32 tid, ctxttid, cnt, limit, tidcnt;
471 struct qib_devdata *dd = rcd->dd;
472 u64 __iomem *tidbase;
473 unsigned long tidmap[8];
474
475 if (!dd->pageshadow) {
476 ret = -ENOMEM;
477 goto done;
478 }
479
480 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
481 sizeof tidmap)) {
482 ret = -EFAULT;
483 goto done;
484 }
485
486 ctxttid = rcd->ctxt * dd->rcvtidcnt;
487 if (!rcd->subctxt_cnt)
488 tidcnt = dd->rcvtidcnt;
489 else if (!subctxt) {
490 tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
491 (dd->rcvtidcnt % rcd->subctxt_cnt);
492 ctxttid += dd->rcvtidcnt - tidcnt;
493 } else {
494 tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
495 ctxttid += tidcnt * (subctxt - 1);
496 }
497 tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) +
498 dd->rcvtidbase +
499 ctxttid * sizeof(*tidbase));
500
501 limit = sizeof(tidmap) * BITS_PER_BYTE;
502 if (limit > tidcnt)
503 /* just in case size changes in future */
504 limit = tidcnt;
505 tid = find_first_bit(tidmap, limit);
506 for (cnt = 0; tid < limit; tid++) {
507 /*
508 * small optimization; if we detect a run of 3 or so without
509 * any set, use find_first_bit again. That's mainly to
510 * accelerate the case where we wrapped, so we have some at
511 * the beginning, and some at the end, and a big gap
512 * in the middle.
513 */
514 if (!test_bit(tid, tidmap))
515 continue;
516 cnt++;
517 if (dd->pageshadow[ctxttid + tid]) {
518 struct page *p;
519 dma_addr_t phys;
520
521 p = dd->pageshadow[ctxttid + tid];
522 dd->pageshadow[ctxttid + tid] = NULL;
523 phys = dd->physshadow[ctxttid + tid];
524 dd->physshadow[ctxttid + tid] = dd->tidinvalid;
525 /* PERFORMANCE: below should almost certainly be
526 * cached
527 */
528 dd->f_put_tid(dd, &tidbase[tid],
529 RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
530 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
531 PCI_DMA_FROMDEVICE);
532 qib_release_user_pages(&p, 1);
533 }
534 }
535done:
536 return ret;
537}
538
539/**
540 * qib_set_part_key - set a partition key
541 * @rcd: the context
542 * @key: the key
543 *
544 * We can have up to 4 active at a time (other than the default, which is
545 * always allowed). This is somewhat tricky, since multiple contexts may set
546 * the same key, so we reference count them, and clean up at exit. All 4
547 * partition keys are packed into a single qlogic_ib register. It's an
548 * error for a process to set the same pkey multiple times. We provide no
549 * mechanism to de-allocate a pkey at this time, we may eventually need to
550 * do that. I've used the atomic operations, and no locking, and only make
551 * a single pass through what's available. This should be more than
552 * adequate for some time. I'll think about spinlocks or the like if and as
553 * it's necessary.
554 */
555static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
556{
557 struct qib_pportdata *ppd = rcd->ppd;
558 int i, any = 0, pidx = -1;
559 u16 lkey = key & 0x7FFF;
560 int ret;
561
562 if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) {
563 /* nothing to do; this key always valid */
564 ret = 0;
565 goto bail;
566 }
567
568 if (!lkey) {
569 ret = -EINVAL;
570 goto bail;
571 }
572
573 /*
574 * Set the full membership bit, because it has to be
575 * set in the register or the packet, and it seems
576 * cleaner to set in the register than to force all
577 * callers to set it.
578 */
579 key |= 0x8000;
580
581 for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
582 if (!rcd->pkeys[i] && pidx == -1)
583 pidx = i;
584 if (rcd->pkeys[i] == key) {
585 ret = -EEXIST;
586 goto bail;
587 }
588 }
589 if (pidx == -1) {
590 ret = -EBUSY;
591 goto bail;
592 }
593 for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
594 if (!ppd->pkeys[i]) {
595 any++;
596 continue;
597 }
598 if (ppd->pkeys[i] == key) {
599 atomic_t *pkrefs = &ppd->pkeyrefs[i];
600
601 if (atomic_inc_return(pkrefs) > 1) {
602 rcd->pkeys[pidx] = key;
603 ret = 0;
604 goto bail;
605 } else {
606 /*
607 * lost race, decrement count, catch below
608 */
609 atomic_dec(pkrefs);
610 any++;
611 }
612 }
613 if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
614 /*
615 * It makes no sense to have both the limited and
616 * full membership PKEY set at the same time since
617 * the unlimited one will disable the limited one.
618 */
619 ret = -EEXIST;
620 goto bail;
621 }
622 }
623 if (!any) {
624 ret = -EBUSY;
625 goto bail;
626 }
627 for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
628 if (!ppd->pkeys[i] &&
629 atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
630 rcd->pkeys[pidx] = key;
631 ppd->pkeys[i] = key;
632 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
633 ret = 0;
634 goto bail;
635 }
636 }
637 ret = -EBUSY;
638
639bail:
640 return ret;
641}
642
643/**
644 * qib_manage_rcvq - manage a context's receive queue
645 * @rcd: the context
646 * @subctxt: the subcontext
647 * @start_stop: action to carry out
648 *
649 * start_stop == 0 disables receive on the context, for use in queue
650 * overflow conditions. start_stop==1 re-enables, to be used to
651 * re-init the software copy of the head register
652 */
653static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt,
654 int start_stop)
655{
656 struct qib_devdata *dd = rcd->dd;
657 unsigned int rcvctrl_op;
658
659 if (subctxt)
660 goto bail;
661 /* atomically clear receive enable ctxt. */
662 if (start_stop) {
663 /*
664 * On enable, force in-memory copy of the tail register to
665 * 0, so that protocol code doesn't have to worry about
666 * whether or not the chip has yet updated the in-memory
667 * copy or not on return from the system call. The chip
668 * always resets it's tail register back to 0 on a
669 * transition from disabled to enabled.
670 */
671 if (rcd->rcvhdrtail_kvaddr)
672 qib_clear_rcvhdrtail(rcd);
673 rcvctrl_op = QIB_RCVCTRL_CTXT_ENB;
674 } else
675 rcvctrl_op = QIB_RCVCTRL_CTXT_DIS;
676 dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt);
677 /* always; new head should be equal to new tail; see above */
678bail:
679 return 0;
680}
681
682static void qib_clean_part_key(struct qib_ctxtdata *rcd,
683 struct qib_devdata *dd)
684{
685 int i, j, pchanged = 0;
686 u64 oldpkey;
687 struct qib_pportdata *ppd = rcd->ppd;
688
689 /* for debugging only */
690 oldpkey = (u64) ppd->pkeys[0] |
691 ((u64) ppd->pkeys[1] << 16) |
692 ((u64) ppd->pkeys[2] << 32) |
693 ((u64) ppd->pkeys[3] << 48);
694
695 for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
696 if (!rcd->pkeys[i])
697 continue;
698 for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) {
699 /* check for match independent of the global bit */
700 if ((ppd->pkeys[j] & 0x7fff) !=
701 (rcd->pkeys[i] & 0x7fff))
702 continue;
703 if (atomic_dec_and_test(&ppd->pkeyrefs[j])) {
704 ppd->pkeys[j] = 0;
705 pchanged++;
706 }
707 break;
708 }
709 rcd->pkeys[i] = 0;
710 }
711 if (pchanged)
712 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
713}
714
715/* common code for the mappings on dma_alloc_coherent mem */
716static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
717 unsigned len, void *kvaddr, u32 write_ok, char *what)
718{
719 struct qib_devdata *dd = rcd->dd;
720 unsigned long pfn;
721 int ret;
722
723 if ((vma->vm_end - vma->vm_start) > len) {
724 qib_devinfo(dd->pcidev,
725 "FAIL on %s: len %lx > %x\n", what,
726 vma->vm_end - vma->vm_start, len);
727 ret = -EFAULT;
728 goto bail;
729 }
730
731 /*
732 * shared context user code requires rcvhdrq mapped r/w, others
733 * only allowed readonly mapping.
734 */
735 if (!write_ok) {
736 if (vma->vm_flags & VM_WRITE) {
737 qib_devinfo(dd->pcidev,
738 "%s must be mapped readonly\n", what);
739 ret = -EPERM;
740 goto bail;
741 }
742
743 /* don't allow them to later change with mprotect */
744 vma->vm_flags &= ~VM_MAYWRITE;
745 }
746
747 pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
748 ret = remap_pfn_range(vma, vma->vm_start, pfn,
749 len, vma->vm_page_prot);
750 if (ret)
751 qib_devinfo(dd->pcidev, "%s ctxt%u mmap of %lx, %x "
752 "bytes failed: %d\n", what, rcd->ctxt,
753 pfn, len, ret);
754bail:
755 return ret;
756}
757
758static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
759 u64 ureg)
760{
761 unsigned long phys;
762 unsigned long sz;
763 int ret;
764
765 /*
766 * This is real hardware, so use io_remap. This is the mechanism
767 * for the user process to update the head registers for their ctxt
768 * in the chip.
769 */
770 sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
771 if ((vma->vm_end - vma->vm_start) > sz) {
772 qib_devinfo(dd->pcidev, "FAIL mmap userreg: reqlen "
773 "%lx > PAGE\n", vma->vm_end - vma->vm_start);
774 ret = -EFAULT;
775 } else {
776 phys = dd->physaddr + ureg;
777 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
778
779 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
780 ret = io_remap_pfn_range(vma, vma->vm_start,
781 phys >> PAGE_SHIFT,
782 vma->vm_end - vma->vm_start,
783 vma->vm_page_prot);
784 }
785 return ret;
786}
787
788static int mmap_piobufs(struct vm_area_struct *vma,
789 struct qib_devdata *dd,
790 struct qib_ctxtdata *rcd,
791 unsigned piobufs, unsigned piocnt)
792{
793 unsigned long phys;
794 int ret;
795
796 /*
797 * When we map the PIO buffers in the chip, we want to map them as
798 * writeonly, no read possible; unfortunately, x86 doesn't allow
799 * for this in hardware, but we still prevent users from asking
800 * for it.
801 */
802 if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
803 qib_devinfo(dd->pcidev, "FAIL mmap piobufs: "
804 "reqlen %lx > PAGE\n",
805 vma->vm_end - vma->vm_start);
806 ret = -EINVAL;
807 goto bail;
808 }
809
810 phys = dd->physaddr + piobufs;
811
812#if defined(__powerpc__)
813 /* There isn't a generic way to specify writethrough mappings */
814 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
815 pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
816 pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
817#endif
818
819 /*
820 * don't allow them to later change to readable with mprotect (for when
821 * not initially mapped readable, as is normally the case)
822 */
823 vma->vm_flags &= ~VM_MAYREAD;
824 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
825
826 if (qib_wc_pat)
827 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
828
829 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
830 vma->vm_end - vma->vm_start,
831 vma->vm_page_prot);
832bail:
833 return ret;
834}
835
836static int mmap_rcvegrbufs(struct vm_area_struct *vma,
837 struct qib_ctxtdata *rcd)
838{
839 struct qib_devdata *dd = rcd->dd;
840 unsigned long start, size;
841 size_t total_size, i;
842 unsigned long pfn;
843 int ret;
844
845 size = rcd->rcvegrbuf_size;
846 total_size = rcd->rcvegrbuf_chunks * size;
847 if ((vma->vm_end - vma->vm_start) > total_size) {
848 qib_devinfo(dd->pcidev, "FAIL on egr bufs: "
849 "reqlen %lx > actual %lx\n",
850 vma->vm_end - vma->vm_start,
851 (unsigned long) total_size);
852 ret = -EINVAL;
853 goto bail;
854 }
855
856 if (vma->vm_flags & VM_WRITE) {
857 qib_devinfo(dd->pcidev, "Can't map eager buffers as "
858 "writable (flags=%lx)\n", vma->vm_flags);
859 ret = -EPERM;
860 goto bail;
861 }
862 /* don't allow them to later change to writeable with mprotect */
863 vma->vm_flags &= ~VM_MAYWRITE;
864
865 start = vma->vm_start;
866
867 for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) {
868 pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT;
869 ret = remap_pfn_range(vma, start, pfn, size,
870 vma->vm_page_prot);
871 if (ret < 0)
872 goto bail;
873 }
874 ret = 0;
875
876bail:
877 return ret;
878}
879
880/*
881 * qib_file_vma_fault - handle a VMA page fault.
882 */
883static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
884{
885 struct page *page;
886
887 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
888 if (!page)
889 return VM_FAULT_SIGBUS;
890
891 get_page(page);
892 vmf->page = page;
893
894 return 0;
895}
896
897static struct vm_operations_struct qib_file_vm_ops = {
898 .fault = qib_file_vma_fault,
899};
900
901static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
902 struct qib_ctxtdata *rcd, unsigned subctxt)
903{
904 struct qib_devdata *dd = rcd->dd;
905 unsigned subctxt_cnt;
906 unsigned long len;
907 void *addr;
908 size_t size;
909 int ret = 0;
910
911 subctxt_cnt = rcd->subctxt_cnt;
912 size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
913
914 /*
915 * Each process has all the subctxt uregbase, rcvhdrq, and
916 * rcvegrbufs mmapped - as an array for all the processes,
917 * and also separately for this process.
918 */
919 if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) {
920 addr = rcd->subctxt_uregbase;
921 size = PAGE_SIZE * subctxt_cnt;
922 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) {
923 addr = rcd->subctxt_rcvhdr_base;
924 size = rcd->rcvhdrq_size * subctxt_cnt;
925 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) {
926 addr = rcd->subctxt_rcvegrbuf;
927 size *= subctxt_cnt;
928 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase +
929 PAGE_SIZE * subctxt)) {
930 addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt;
931 size = PAGE_SIZE;
932 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base +
933 rcd->rcvhdrq_size * subctxt)) {
934 addr = rcd->subctxt_rcvhdr_base +
935 rcd->rcvhdrq_size * subctxt;
936 size = rcd->rcvhdrq_size;
937 } else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) {
938 addr = rcd->user_event_mask;
939 size = PAGE_SIZE;
940 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf +
941 size * subctxt)) {
942 addr = rcd->subctxt_rcvegrbuf + size * subctxt;
943 /* rcvegrbufs are read-only on the slave */
944 if (vma->vm_flags & VM_WRITE) {
945 qib_devinfo(dd->pcidev,
946 "Can't map eager buffers as "
947 "writable (flags=%lx)\n", vma->vm_flags);
948 ret = -EPERM;
949 goto bail;
950 }
951 /*
952 * Don't allow permission to later change to writeable
953 * with mprotect.
954 */
955 vma->vm_flags &= ~VM_MAYWRITE;
956 } else
957 goto bail;
958 len = vma->vm_end - vma->vm_start;
959 if (len > size) {
960 ret = -EINVAL;
961 goto bail;
962 }
963
964 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
965 vma->vm_ops = &qib_file_vm_ops;
966 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
967 ret = 1;
968
969bail:
970 return ret;
971}
972
973/**
974 * qib_mmapf - mmap various structures into user space
975 * @fp: the file pointer
976 * @vma: the VM area
977 *
978 * We use this to have a shared buffer between the kernel and the user code
979 * for the rcvhdr queue, egr buffers, and the per-context user regs and pio
980 * buffers in the chip. We have the open and close entries so we can bump
981 * the ref count and keep the driver from being unloaded while still mapped.
982 */
983static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
984{
985 struct qib_ctxtdata *rcd;
986 struct qib_devdata *dd;
987 u64 pgaddr, ureg;
988 unsigned piobufs, piocnt;
989 int ret, match = 1;
990
991 rcd = ctxt_fp(fp);
992 if (!rcd || !(vma->vm_flags & VM_SHARED)) {
993 ret = -EINVAL;
994 goto bail;
995 }
996 dd = rcd->dd;
997
998 /*
999 * This is the qib_do_user_init() code, mapping the shared buffers
1000 * and per-context user registers into the user process. The address
1001 * referred to by vm_pgoff is the file offset passed via mmap().
1002 * For shared contexts, this is the kernel vmalloc() address of the
1003 * pages to share with the master.
1004 * For non-shared or master ctxts, this is a physical address.
1005 * We only do one mmap for each space mapped.
1006 */
1007 pgaddr = vma->vm_pgoff << PAGE_SHIFT;
1008
1009 /*
1010 * Check for 0 in case one of the allocations failed, but user
1011 * called mmap anyway.
1012 */
1013 if (!pgaddr) {
1014 ret = -EINVAL;
1015 goto bail;
1016 }
1017
1018 /*
1019 * Physical addresses must fit in 40 bits for our hardware.
1020 * Check for kernel virtual addresses first, anything else must
1021 * match a HW or memory address.
1022 */
1023 ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp));
1024 if (ret) {
1025 if (ret > 0)
1026 ret = 0;
1027 goto bail;
1028 }
1029
1030 ureg = dd->uregbase + dd->ureg_align * rcd->ctxt;
1031 if (!rcd->subctxt_cnt) {
1032 /* ctxt is not shared */
1033 piocnt = rcd->piocnt;
1034 piobufs = rcd->piobufs;
1035 } else if (!subctxt_fp(fp)) {
1036 /* caller is the master */
1037 piocnt = (rcd->piocnt / rcd->subctxt_cnt) +
1038 (rcd->piocnt % rcd->subctxt_cnt);
1039 piobufs = rcd->piobufs +
1040 dd->palign * (rcd->piocnt - piocnt);
1041 } else {
1042 unsigned slave = subctxt_fp(fp) - 1;
1043
1044 /* caller is a slave */
1045 piocnt = rcd->piocnt / rcd->subctxt_cnt;
1046 piobufs = rcd->piobufs + dd->palign * piocnt * slave;
1047 }
1048
1049 if (pgaddr == ureg)
1050 ret = mmap_ureg(vma, dd, ureg);
1051 else if (pgaddr == piobufs)
1052 ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
1053 else if (pgaddr == dd->pioavailregs_phys)
1054 /* in-memory copy of pioavail registers */
1055 ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1056 (void *) dd->pioavailregs_dma, 0,
1057 "pioavail registers");
1058 else if (pgaddr == rcd->rcvegr_phys)
1059 ret = mmap_rcvegrbufs(vma, rcd);
1060 else if (pgaddr == (u64) rcd->rcvhdrq_phys)
1061 /*
1062 * The rcvhdrq itself; multiple pages, contiguous
1063 * from an i/o perspective. Shared contexts need
1064 * to map r/w, so we allow writing.
1065 */
1066 ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size,
1067 rcd->rcvhdrq, 1, "rcvhdrq");
1068 else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys)
1069 /* in-memory copy of rcvhdrq tail register */
1070 ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1071 rcd->rcvhdrtail_kvaddr, 0,
1072 "rcvhdrq tail");
1073 else
1074 match = 0;
1075 if (!match)
1076 ret = -EINVAL;
1077
1078 vma->vm_private_data = NULL;
1079
1080 if (ret < 0)
1081 qib_devinfo(dd->pcidev,
1082 "mmap Failure %d: off %llx len %lx\n",
1083 -ret, (unsigned long long)pgaddr,
1084 vma->vm_end - vma->vm_start);
1085bail:
1086 return ret;
1087}
1088
1089static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
1090 struct file *fp,
1091 struct poll_table_struct *pt)
1092{
1093 struct qib_devdata *dd = rcd->dd;
1094 unsigned pollflag;
1095
1096 poll_wait(fp, &rcd->wait, pt);
1097
1098 spin_lock_irq(&dd->uctxt_lock);
1099 if (rcd->urgent != rcd->urgent_poll) {
1100 pollflag = POLLIN | POLLRDNORM;
1101 rcd->urgent_poll = rcd->urgent;
1102 } else {
1103 pollflag = 0;
1104 set_bit(QIB_CTXT_WAITING_URG, &rcd->flag);
1105 }
1106 spin_unlock_irq(&dd->uctxt_lock);
1107
1108 return pollflag;
1109}
1110
1111static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
1112 struct file *fp,
1113 struct poll_table_struct *pt)
1114{
1115 struct qib_devdata *dd = rcd->dd;
1116 unsigned pollflag;
1117
1118 poll_wait(fp, &rcd->wait, pt);
1119
1120 spin_lock_irq(&dd->uctxt_lock);
1121 if (dd->f_hdrqempty(rcd)) {
1122 set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag);
1123 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
1124 pollflag = 0;
1125 } else
1126 pollflag = POLLIN | POLLRDNORM;
1127 spin_unlock_irq(&dd->uctxt_lock);
1128
1129 return pollflag;
1130}
1131
1132static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
1133{
1134 struct qib_ctxtdata *rcd;
1135 unsigned pollflag;
1136
1137 rcd = ctxt_fp(fp);
1138 if (!rcd)
1139 pollflag = POLLERR;
1140 else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
1141 pollflag = qib_poll_urgent(rcd, fp, pt);
1142 else if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
1143 pollflag = qib_poll_next(rcd, fp, pt);
1144 else /* invalid */
1145 pollflag = POLLERR;
1146
1147 return pollflag;
1148}
1149
1150/*
1151 * Check that userland and driver are compatible for subcontexts.
1152 */
1153static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
1154{
1155 /* this code is written long-hand for clarity */
1156 if (QIB_USER_SWMAJOR != user_swmajor) {
1157 /* no promise of compatibility if major mismatch */
1158 return 0;
1159 }
1160 if (QIB_USER_SWMAJOR == 1) {
1161 switch (QIB_USER_SWMINOR) {
1162 case 0:
1163 case 1:
1164 case 2:
1165 /* no subctxt implementation so cannot be compatible */
1166 return 0;
1167 case 3:
1168 /* 3 is only compatible with itself */
1169 return user_swminor == 3;
1170 default:
1171 /* >= 4 are compatible (or are expected to be) */
1172 return user_swminor >= 4;
1173 }
1174 }
1175 /* make no promises yet for future major versions */
1176 return 0;
1177}
1178
1179static int init_subctxts(struct qib_devdata *dd,
1180 struct qib_ctxtdata *rcd,
1181 const struct qib_user_info *uinfo)
1182{
1183 int ret = 0;
1184 unsigned num_subctxts;
1185 size_t size;
1186
1187 /*
1188 * If the user is requesting zero subctxts,
1189 * skip the subctxt allocation.
1190 */
1191 if (uinfo->spu_subctxt_cnt <= 0)
1192 goto bail;
1193 num_subctxts = uinfo->spu_subctxt_cnt;
1194
1195 /* Check for subctxt compatibility */
1196 if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
1197 uinfo->spu_userversion & 0xffff)) {
1198 qib_devinfo(dd->pcidev,
1199 "Mismatched user version (%d.%d) and driver "
1200 "version (%d.%d) while context sharing. Ensure "
1201 "that driver and library are from the same "
1202 "release.\n",
1203 (int) (uinfo->spu_userversion >> 16),
1204 (int) (uinfo->spu_userversion & 0xffff),
1205 QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
1206 goto bail;
1207 }
1208 if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) {
1209 ret = -EINVAL;
1210 goto bail;
1211 }
1212
1213 rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts);
1214 if (!rcd->subctxt_uregbase) {
1215 ret = -ENOMEM;
1216 goto bail;
1217 }
1218 /* Note: rcd->rcvhdrq_size isn't initialized yet. */
1219 size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
1220 sizeof(u32), PAGE_SIZE) * num_subctxts;
1221 rcd->subctxt_rcvhdr_base = vmalloc_user(size);
1222 if (!rcd->subctxt_rcvhdr_base) {
1223 ret = -ENOMEM;
1224 goto bail_ureg;
1225 }
1226
1227 rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks *
1228 rcd->rcvegrbuf_size *
1229 num_subctxts);
1230 if (!rcd->subctxt_rcvegrbuf) {
1231 ret = -ENOMEM;
1232 goto bail_rhdr;
1233 }
1234
1235 rcd->subctxt_cnt = uinfo->spu_subctxt_cnt;
1236 rcd->subctxt_id = uinfo->spu_subctxt_id;
1237 rcd->active_slaves = 1;
1238 rcd->redirect_seq_cnt = 1;
1239 set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
1240 goto bail;
1241
1242bail_rhdr:
1243 vfree(rcd->subctxt_rcvhdr_base);
1244bail_ureg:
1245 vfree(rcd->subctxt_uregbase);
1246 rcd->subctxt_uregbase = NULL;
1247bail:
1248 return ret;
1249}
1250
1251static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
1252 struct file *fp, const struct qib_user_info *uinfo)
1253{
1254 struct qib_devdata *dd = ppd->dd;
1255 struct qib_ctxtdata *rcd;
1256 void *ptmp = NULL;
1257 int ret;
1258
1259 rcd = qib_create_ctxtdata(ppd, ctxt);
1260
1261 /*
1262 * Allocate memory for use in qib_tid_update() at open to
1263 * reduce cost of expected send setup per message segment
1264 */
1265 if (rcd)
1266 ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) +
1267 dd->rcvtidcnt * sizeof(struct page **),
1268 GFP_KERNEL);
1269
1270 if (!rcd || !ptmp) {
1271 qib_dev_err(dd, "Unable to allocate ctxtdata "
1272 "memory, failing open\n");
1273 ret = -ENOMEM;
1274 goto bailerr;
1275 }
1276 rcd->userversion = uinfo->spu_userversion;
1277 ret = init_subctxts(dd, rcd, uinfo);
1278 if (ret)
1279 goto bailerr;
1280 rcd->tid_pg_list = ptmp;
1281 rcd->pid = current->pid;
1282 init_waitqueue_head(&dd->rcd[ctxt]->wait);
1283 strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
1284 ctxt_fp(fp) = rcd;
1285 qib_stats.sps_ctxts++;
1286 ret = 0;
1287 goto bail;
1288
1289bailerr:
1290 dd->rcd[ctxt] = NULL;
1291 kfree(rcd);
1292 kfree(ptmp);
1293bail:
1294 return ret;
1295}
1296
1297static inline int usable(struct qib_pportdata *ppd, int active_only)
1298{
1299 struct qib_devdata *dd = ppd->dd;
1300 u32 linkok = active_only ? QIBL_LINKACTIVE :
1301 (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE);
1302
1303 return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
1304 (ppd->lflags & linkok);
1305}
1306
1307static int find_free_ctxt(int unit, struct file *fp,
1308 const struct qib_user_info *uinfo)
1309{
1310 struct qib_devdata *dd = qib_lookup(unit);
1311 struct qib_pportdata *ppd = NULL;
1312 int ret;
1313 u32 ctxt;
1314
1315 if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) {
1316 ret = -ENODEV;
1317 goto bail;
1318 }
1319
1320 /*
1321 * If users requests specific port, only try that one port, else
1322 * select "best" port below, based on context.
1323 */
1324 if (uinfo->spu_port) {
1325 ppd = dd->pport + uinfo->spu_port - 1;
1326 if (!usable(ppd, 0)) {
1327 ret = -ENETDOWN;
1328 goto bail;
1329 }
1330 }
1331
1332 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
1333 if (dd->rcd[ctxt])
1334 continue;
1335 /*
1336 * The setting and clearing of user context rcd[x] protected
1337 * by the qib_mutex
1338 */
1339 if (!ppd) {
1340 /* choose port based on ctxt, if up, else 1st up */
1341 ppd = dd->pport + (ctxt % dd->num_pports);
1342 if (!usable(ppd, 0)) {
1343 int i;
1344 for (i = 0; i < dd->num_pports; i++) {
1345 ppd = dd->pport + i;
1346 if (usable(ppd, 0))
1347 break;
1348 }
1349 if (i == dd->num_pports) {
1350 ret = -ENETDOWN;
1351 goto bail;
1352 }
1353 }
1354 }
1355 ret = setup_ctxt(ppd, ctxt, fp, uinfo);
1356 goto bail;
1357 }
1358 ret = -EBUSY;
1359
1360bail:
1361 return ret;
1362}
1363
1364static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo)
1365{
1366 struct qib_pportdata *ppd;
1367 int ret = 0, devmax;
1368 int npresent, nup;
1369 int ndev;
1370 u32 port = uinfo->spu_port, ctxt;
1371
1372 devmax = qib_count_units(&npresent, &nup);
1373
1374 for (ndev = 0; ndev < devmax; ndev++) {
1375 struct qib_devdata *dd = qib_lookup(ndev);
1376
1377 /* device portion of usable() */
1378 if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
1379 continue;
1380 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
1381 if (dd->rcd[ctxt])
1382 continue;
1383 if (port) {
1384 if (port > dd->num_pports)
1385 continue;
1386 ppd = dd->pport + port - 1;
1387 if (!usable(ppd, 0))
1388 continue;
1389 } else {
1390 /*
1391 * choose port based on ctxt, if up, else
1392 * first port that's up for multi-port HCA
1393 */
1394 ppd = dd->pport + (ctxt % dd->num_pports);
1395 if (!usable(ppd, 0)) {
1396 int j;
1397
1398 ppd = NULL;
1399 for (j = 0; j < dd->num_pports &&
1400 !ppd; j++)
1401 if (usable(dd->pport + j, 0))
1402 ppd = dd->pport + j;
1403 if (!ppd)
1404 continue; /* to next unit */
1405 }
1406 }
1407 ret = setup_ctxt(ppd, ctxt, fp, uinfo);
1408 goto done;
1409 }
1410 }
1411
1412 if (npresent) {
1413 if (nup == 0)
1414 ret = -ENETDOWN;
1415 else
1416 ret = -EBUSY;
1417 } else
1418 ret = -ENXIO;
1419
1420done:
1421 return ret;
1422}
1423
1424static int find_shared_ctxt(struct file *fp,
1425 const struct qib_user_info *uinfo)
1426{
1427 int devmax, ndev, i;
1428 int ret = 0;
1429
1430 devmax = qib_count_units(NULL, NULL);
1431
1432 for (ndev = 0; ndev < devmax; ndev++) {
1433 struct qib_devdata *dd = qib_lookup(ndev);
1434
1435 /* device portion of usable() */
1436 if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
1437 continue;
1438 for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
1439 struct qib_ctxtdata *rcd = dd->rcd[i];
1440
1441 /* Skip ctxts which are not yet open */
1442 if (!rcd || !rcd->cnt)
1443 continue;
1444 /* Skip ctxt if it doesn't match the requested one */
1445 if (rcd->subctxt_id != uinfo->spu_subctxt_id)
1446 continue;
1447 /* Verify the sharing process matches the master */
1448 if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt ||
1449 rcd->userversion != uinfo->spu_userversion ||
1450 rcd->cnt >= rcd->subctxt_cnt) {
1451 ret = -EINVAL;
1452 goto done;
1453 }
1454 ctxt_fp(fp) = rcd;
1455 subctxt_fp(fp) = rcd->cnt++;
1456 rcd->subpid[subctxt_fp(fp)] = current->pid;
1457 tidcursor_fp(fp) = 0;
1458 rcd->active_slaves |= 1 << subctxt_fp(fp);
1459 ret = 1;
1460 goto done;
1461 }
1462 }
1463
1464done:
1465 return ret;
1466}
1467
1468static int qib_open(struct inode *in, struct file *fp)
1469{
1470 /* The real work is performed later in qib_assign_ctxt() */
1471 fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL);
1472 if (fp->private_data) /* no cpu affinity by default */
1473 ((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1;
1474 return fp->private_data ? 0 : -ENOMEM;
1475}
1476
1477/*
1478 * Get ctxt early, so can set affinity prior to memory allocation.
1479 */
1480static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
1481{
1482 int ret;
1483 int i_minor;
1484 unsigned swmajor, swminor;
1485
1486 /* Check to be sure we haven't already initialized this file */
1487 if (ctxt_fp(fp)) {
1488 ret = -EINVAL;
1489 goto done;
1490 }
1491
1492 /* for now, if major version is different, bail */
1493 swmajor = uinfo->spu_userversion >> 16;
1494 if (swmajor != QIB_USER_SWMAJOR) {
1495 ret = -ENODEV;
1496 goto done;
1497 }
1498
1499 swminor = uinfo->spu_userversion & 0xffff;
1500
1501 mutex_lock(&qib_mutex);
1502
1503 if (qib_compatible_subctxts(swmajor, swminor) &&
1504 uinfo->spu_subctxt_cnt) {
1505 ret = find_shared_ctxt(fp, uinfo);
1506 if (ret) {
1507 if (ret > 0)
1508 ret = 0;
1509 goto done_chk_sdma;
1510 }
1511 }
1512
1513 i_minor = iminor(fp->f_dentry->d_inode) - QIB_USER_MINOR_BASE;
1514 if (i_minor)
1515 ret = find_free_ctxt(i_minor - 1, fp, uinfo);
1516 else
1517 ret = get_a_ctxt(fp, uinfo);
1518
1519done_chk_sdma:
1520 if (!ret) {
1521 struct qib_filedata *fd = fp->private_data;
1522 const struct qib_ctxtdata *rcd = fd->rcd;
1523 const struct qib_devdata *dd = rcd->dd;
1524
1525 if (dd->flags & QIB_HAS_SEND_DMA) {
1526 fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
1527 dd->unit,
1528 rcd->ctxt,
1529 fd->subctxt);
1530 if (!fd->pq)
1531 ret = -ENOMEM;
1532 }
1533
1534 /*
1535 * If process has NOT already set it's affinity, select and
1536 * reserve a processor for it, as a rendevous for all
1537 * users of the driver. If they don't actually later
1538 * set affinity to this cpu, or set it to some other cpu,
1539 * it just means that sooner or later we don't recommend
1540 * a cpu, and let the scheduler do it's best.
1541 */
1542 if (!ret && cpus_weight(current->cpus_allowed) >=
1543 qib_cpulist_count) {
1544 int cpu;
1545 cpu = find_first_zero_bit(qib_cpulist,
1546 qib_cpulist_count);
1547 if (cpu != qib_cpulist_count) {
1548 __set_bit(cpu, qib_cpulist);
1549 fd->rec_cpu_num = cpu;
1550 }
1551 } else if (cpus_weight(current->cpus_allowed) == 1 &&
1552 test_bit(first_cpu(current->cpus_allowed),
1553 qib_cpulist))
1554 qib_devinfo(dd->pcidev, "%s PID %u affinity "
1555 "set to cpu %d; already allocated\n",
1556 current->comm, current->pid,
1557 first_cpu(current->cpus_allowed));
1558 }
1559
1560 mutex_unlock(&qib_mutex);
1561
1562done:
1563 return ret;
1564}
1565
1566
1567static int qib_do_user_init(struct file *fp,
1568 const struct qib_user_info *uinfo)
1569{
1570 int ret;
1571 struct qib_ctxtdata *rcd = ctxt_fp(fp);
1572 struct qib_devdata *dd;
1573 unsigned uctxt;
1574
1575 /* Subctxts don't need to initialize anything since master did it. */
1576 if (subctxt_fp(fp)) {
1577 ret = wait_event_interruptible(rcd->wait,
1578 !test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag));
1579 goto bail;
1580 }
1581
1582 dd = rcd->dd;
1583
1584 /* some ctxts may get extra buffers, calculate that here */
1585 uctxt = rcd->ctxt - dd->first_user_ctxt;
1586 if (uctxt < dd->ctxts_extrabuf) {
1587 rcd->piocnt = dd->pbufsctxt + 1;
1588 rcd->pio_base = rcd->piocnt * uctxt;
1589 } else {
1590 rcd->piocnt = dd->pbufsctxt;
1591 rcd->pio_base = rcd->piocnt * uctxt +
1592 dd->ctxts_extrabuf;
1593 }
1594
1595 /*
1596 * All user buffers are 2KB buffers. If we ever support
1597 * giving 4KB buffers to user processes, this will need some
1598 * work. Can't use piobufbase directly, because it has
1599 * both 2K and 4K buffer base values. So check and handle.
1600 */
1601 if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
1602 if (rcd->pio_base >= dd->piobcnt2k) {
1603 qib_dev_err(dd,
1604 "%u:ctxt%u: no 2KB buffers available\n",
1605 dd->unit, rcd->ctxt);
1606 ret = -ENOBUFS;
1607 goto bail;
1608 }
1609 rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
1610 qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
1611 rcd->ctxt, rcd->piocnt);
1612 }
1613
1614 rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
1615 qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
1616 TXCHK_CHG_TYPE_USER, rcd);
1617 /*
1618 * try to ensure that processes start up with consistent avail update
1619 * for their own range, at least. If system very quiet, it might
1620 * have the in-memory copy out of date at startup for this range of
1621 * buffers, when a context gets re-used. Do after the chg_pioavail
1622 * and before the rest of setup, so it's "almost certain" the dma
1623 * will have occurred (can't 100% guarantee, but should be many
1624 * decimals of 9s, with this ordering), given how much else happens
1625 * after this.
1626 */
1627 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
1628
1629 /*
1630 * Now allocate the rcvhdr Q and eager TIDs; skip the TID
1631 * array for time being. If rcd->ctxt > chip-supported,
1632 * we need to do extra stuff here to handle by handling overflow
1633 * through ctxt 0, someday
1634 */
1635 ret = qib_create_rcvhdrq(dd, rcd);
1636 if (!ret)
1637 ret = qib_setup_eagerbufs(rcd);
1638 if (ret)
1639 goto bail_pio;
1640
1641 rcd->tidcursor = 0; /* start at beginning after open */
1642
1643 /* initialize poll variables... */
1644 rcd->urgent = 0;
1645 rcd->urgent_poll = 0;
1646
1647 /*
1648 * Now enable the ctxt for receive.
1649 * For chips that are set to DMA the tail register to memory
1650 * when they change (and when the update bit transitions from
1651 * 0 to 1. So for those chips, we turn it off and then back on.
1652 * This will (very briefly) affect any other open ctxts, but the
1653 * duration is very short, and therefore isn't an issue. We
1654 * explictly set the in-memory tail copy to 0 beforehand, so we
1655 * don't have to wait to be sure the DMA update has happened
1656 * (chip resets head/tail to 0 on transition to enable).
1657 */
1658 if (rcd->rcvhdrtail_kvaddr)
1659 qib_clear_rcvhdrtail(rcd);
1660
1661 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
1662 rcd->ctxt);
1663
1664 /* Notify any waiting slaves */
1665 if (rcd->subctxt_cnt) {
1666 clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
1667 wake_up(&rcd->wait);
1668 }
1669 return 0;
1670
1671bail_pio:
1672 qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
1673 TXCHK_CHG_TYPE_KERN, rcd);
1674bail:
1675 return ret;
1676}
1677
1678/**
1679 * unlock_exptid - unlock any expected TID entries context still had in use
1680 * @rcd: ctxt
1681 *
1682 * We don't actually update the chip here, because we do a bulk update
1683 * below, using f_clear_tids.
1684 */
1685static void unlock_expected_tids(struct qib_ctxtdata *rcd)
1686{
1687 struct qib_devdata *dd = rcd->dd;
1688 int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
1689 int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt;
1690
1691 for (i = ctxt_tidbase; i < maxtid; i++) {
1692 struct page *p = dd->pageshadow[i];
1693 dma_addr_t phys;
1694
1695 if (!p)
1696 continue;
1697
1698 phys = dd->physshadow[i];
1699 dd->physshadow[i] = dd->tidinvalid;
1700 dd->pageshadow[i] = NULL;
1701 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
1702 PCI_DMA_FROMDEVICE);
1703 qib_release_user_pages(&p, 1);
1704 cnt++;
1705 }
1706}
1707
1708static int qib_close(struct inode *in, struct file *fp)
1709{
1710 int ret = 0;
1711 struct qib_filedata *fd;
1712 struct qib_ctxtdata *rcd;
1713 struct qib_devdata *dd;
1714 unsigned long flags;
1715 unsigned ctxt;
1716 pid_t pid;
1717
1718 mutex_lock(&qib_mutex);
1719
1720 fd = (struct qib_filedata *) fp->private_data;
1721 fp->private_data = NULL;
1722 rcd = fd->rcd;
1723 if (!rcd) {
1724 mutex_unlock(&qib_mutex);
1725 goto bail;
1726 }
1727
1728 dd = rcd->dd;
1729
1730 /* ensure all pio buffer writes in progress are flushed */
1731 qib_flush_wc();
1732
1733 /* drain user sdma queue */
1734 if (fd->pq) {
1735 qib_user_sdma_queue_drain(rcd->ppd, fd->pq);
1736 qib_user_sdma_queue_destroy(fd->pq);
1737 }
1738
1739 if (fd->rec_cpu_num != -1)
1740 __clear_bit(fd->rec_cpu_num, qib_cpulist);
1741
1742 if (--rcd->cnt) {
1743 /*
1744 * XXX If the master closes the context before the slave(s),
1745 * revoke the mmap for the eager receive queue so
1746 * the slave(s) don't wait for receive data forever.
1747 */
1748 rcd->active_slaves &= ~(1 << fd->subctxt);
1749 rcd->subpid[fd->subctxt] = 0;
1750 mutex_unlock(&qib_mutex);
1751 goto bail;
1752 }
1753
1754 /* early; no interrupt users after this */
1755 spin_lock_irqsave(&dd->uctxt_lock, flags);
1756 ctxt = rcd->ctxt;
1757 dd->rcd[ctxt] = NULL;
1758 pid = rcd->pid;
1759 rcd->pid = 0;
1760 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1761
1762 if (rcd->rcvwait_to || rcd->piowait_to ||
1763 rcd->rcvnowait || rcd->pionowait) {
1764 rcd->rcvwait_to = 0;
1765 rcd->piowait_to = 0;
1766 rcd->rcvnowait = 0;
1767 rcd->pionowait = 0;
1768 }
1769 if (rcd->flag)
1770 rcd->flag = 0;
1771
1772 if (dd->kregbase) {
1773 /* atomically clear receive enable ctxt and intr avail. */
1774 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS |
1775 QIB_RCVCTRL_INTRAVAIL_DIS, ctxt);
1776
1777 /* clean up the pkeys for this ctxt user */
1778 qib_clean_part_key(rcd, dd);
1779 qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt);
1780 qib_chg_pioavailkernel(dd, rcd->pio_base,
1781 rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL);
1782
1783 dd->f_clear_tids(dd, rcd);
1784
1785 if (dd->pageshadow)
1786 unlock_expected_tids(rcd);
1787 qib_stats.sps_ctxts--;
1788 }
1789
1790 mutex_unlock(&qib_mutex);
1791 qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */
1792
1793bail:
1794 kfree(fd);
1795 return ret;
1796}
1797
1798static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
1799{
1800 struct qib_ctxt_info info;
1801 int ret;
1802 size_t sz;
1803 struct qib_ctxtdata *rcd = ctxt_fp(fp);
1804 struct qib_filedata *fd;
1805
1806 fd = (struct qib_filedata *) fp->private_data;
1807
1808 info.num_active = qib_count_active_units();
1809 info.unit = rcd->dd->unit;
1810 info.port = rcd->ppd->port;
1811 info.ctxt = rcd->ctxt;
1812 info.subctxt = subctxt_fp(fp);
1813 /* Number of user ctxts available for this device. */
1814 info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt;
1815 info.num_subctxts = rcd->subctxt_cnt;
1816 info.rec_cpu = fd->rec_cpu_num;
1817 sz = sizeof(info);
1818
1819 if (copy_to_user(uinfo, &info, sz)) {
1820 ret = -EFAULT;
1821 goto bail;
1822 }
1823 ret = 0;
1824
1825bail:
1826 return ret;
1827}
1828
1829static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq,
1830 u32 __user *inflightp)
1831{
1832 const u32 val = qib_user_sdma_inflight_counter(pq);
1833
1834 if (put_user(val, inflightp))
1835 return -EFAULT;
1836
1837 return 0;
1838}
1839
1840static int qib_sdma_get_complete(struct qib_pportdata *ppd,
1841 struct qib_user_sdma_queue *pq,
1842 u32 __user *completep)
1843{
1844 u32 val;
1845 int err;
1846
1847 if (!pq)
1848 return -EINVAL;
1849
1850 err = qib_user_sdma_make_progress(ppd, pq);
1851 if (err < 0)
1852 return err;
1853
1854 val = qib_user_sdma_complete_counter(pq);
1855 if (put_user(val, completep))
1856 return -EFAULT;
1857
1858 return 0;
1859}
1860
1861static int disarm_req_delay(struct qib_ctxtdata *rcd)
1862{
1863 int ret = 0;
1864
1865 if (!usable(rcd->ppd, 1)) {
1866 int i;
1867 /*
1868 * if link is down, or otherwise not usable, delay
1869 * the caller up to 30 seconds, so we don't thrash
1870 * in trying to get the chip back to ACTIVE, and
1871 * set flag so they make the call again.
1872 */
1873 if (rcd->user_event_mask) {
1874 /*
1875 * subctxt_cnt is 0 if not shared, so do base
1876 * separately, first, then remaining subctxt, if any
1877 */
1878 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1879 &rcd->user_event_mask[0]);
1880 for (i = 1; i < rcd->subctxt_cnt; i++)
1881 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1882 &rcd->user_event_mask[i]);
1883 }
1884 for (i = 0; !usable(rcd->ppd, 1) && i < 300; i++)
1885 msleep(100);
1886 ret = -ENETDOWN;
1887 }
1888 return ret;
1889}
1890
1891/*
1892 * Find all user contexts in use, and set the specified bit in their
1893 * event mask.
1894 * See also find_ctxt() for a similar use, that is specific to send buffers.
1895 */
1896int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
1897{
1898 struct qib_ctxtdata *rcd;
1899 unsigned ctxt;
1900 int ret = 0;
1901
1902 spin_lock(&ppd->dd->uctxt_lock);
1903 for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
1904 ctxt++) {
1905 rcd = ppd->dd->rcd[ctxt];
1906 if (!rcd)
1907 continue;
1908 if (rcd->user_event_mask) {
1909 int i;
1910 /*
1911 * subctxt_cnt is 0 if not shared, so do base
1912 * separately, first, then remaining subctxt, if any
1913 */
1914 set_bit(evtbit, &rcd->user_event_mask[0]);
1915 for (i = 1; i < rcd->subctxt_cnt; i++)
1916 set_bit(evtbit, &rcd->user_event_mask[i]);
1917 }
1918 ret = 1;
1919 break;
1920 }
1921 spin_unlock(&ppd->dd->uctxt_lock);
1922
1923 return ret;
1924}
1925
1926/*
1927 * clear the event notifier events for this context.
1928 * For the DISARM_BUFS case, we also take action (this obsoletes
1929 * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards
1930 * compatibility.
1931 * Other bits don't currently require actions, just atomically clear.
1932 * User process then performs actions appropriate to bit having been
1933 * set, if desired, and checks again in future.
1934 */
1935static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt,
1936 unsigned long events)
1937{
1938 int ret = 0, i;
1939
1940 for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) {
1941 if (!test_bit(i, &events))
1942 continue;
1943 if (i == _QIB_EVENT_DISARM_BUFS_BIT) {
1944 (void)qib_disarm_piobufs_ifneeded(rcd);
1945 ret = disarm_req_delay(rcd);
1946 } else
1947 clear_bit(i, &rcd->user_event_mask[subctxt]);
1948 }
1949 return ret;
1950}
1951
1952static ssize_t qib_write(struct file *fp, const char __user *data,
1953 size_t count, loff_t *off)
1954{
1955 const struct qib_cmd __user *ucmd;
1956 struct qib_ctxtdata *rcd;
1957 const void __user *src;
1958 size_t consumed, copy = 0;
1959 struct qib_cmd cmd;
1960 ssize_t ret = 0;
1961 void *dest;
1962
1963 if (count < sizeof(cmd.type)) {
1964 ret = -EINVAL;
1965 goto bail;
1966 }
1967
1968 ucmd = (const struct qib_cmd __user *) data;
1969
1970 if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
1971 ret = -EFAULT;
1972 goto bail;
1973 }
1974
1975 consumed = sizeof(cmd.type);
1976
1977 switch (cmd.type) {
1978 case QIB_CMD_ASSIGN_CTXT:
1979 case QIB_CMD_USER_INIT:
1980 copy = sizeof(cmd.cmd.user_info);
1981 dest = &cmd.cmd.user_info;
1982 src = &ucmd->cmd.user_info;
1983 break;
1984
1985 case QIB_CMD_RECV_CTRL:
1986 copy = sizeof(cmd.cmd.recv_ctrl);
1987 dest = &cmd.cmd.recv_ctrl;
1988 src = &ucmd->cmd.recv_ctrl;
1989 break;
1990
1991 case QIB_CMD_CTXT_INFO:
1992 copy = sizeof(cmd.cmd.ctxt_info);
1993 dest = &cmd.cmd.ctxt_info;
1994 src = &ucmd->cmd.ctxt_info;
1995 break;
1996
1997 case QIB_CMD_TID_UPDATE:
1998 case QIB_CMD_TID_FREE:
1999 copy = sizeof(cmd.cmd.tid_info);
2000 dest = &cmd.cmd.tid_info;
2001 src = &ucmd->cmd.tid_info;
2002 break;
2003
2004 case QIB_CMD_SET_PART_KEY:
2005 copy = sizeof(cmd.cmd.part_key);
2006 dest = &cmd.cmd.part_key;
2007 src = &ucmd->cmd.part_key;
2008 break;
2009
2010 case QIB_CMD_DISARM_BUFS:
2011 case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */
2012 copy = 0;
2013 src = NULL;
2014 dest = NULL;
2015 break;
2016
2017 case QIB_CMD_POLL_TYPE:
2018 copy = sizeof(cmd.cmd.poll_type);
2019 dest = &cmd.cmd.poll_type;
2020 src = &ucmd->cmd.poll_type;
2021 break;
2022
2023 case QIB_CMD_ARMLAUNCH_CTRL:
2024 copy = sizeof(cmd.cmd.armlaunch_ctrl);
2025 dest = &cmd.cmd.armlaunch_ctrl;
2026 src = &ucmd->cmd.armlaunch_ctrl;
2027 break;
2028
2029 case QIB_CMD_SDMA_INFLIGHT:
2030 copy = sizeof(cmd.cmd.sdma_inflight);
2031 dest = &cmd.cmd.sdma_inflight;
2032 src = &ucmd->cmd.sdma_inflight;
2033 break;
2034
2035 case QIB_CMD_SDMA_COMPLETE:
2036 copy = sizeof(cmd.cmd.sdma_complete);
2037 dest = &cmd.cmd.sdma_complete;
2038 src = &ucmd->cmd.sdma_complete;
2039 break;
2040
2041 case QIB_CMD_ACK_EVENT:
2042 copy = sizeof(cmd.cmd.event_mask);
2043 dest = &cmd.cmd.event_mask;
2044 src = &ucmd->cmd.event_mask;
2045 break;
2046
2047 default:
2048 ret = -EINVAL;
2049 goto bail;
2050 }
2051
2052 if (copy) {
2053 if ((count - consumed) < copy) {
2054 ret = -EINVAL;
2055 goto bail;
2056 }
2057 if (copy_from_user(dest, src, copy)) {
2058 ret = -EFAULT;
2059 goto bail;
2060 }
2061 consumed += copy;
2062 }
2063
2064 rcd = ctxt_fp(fp);
2065 if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) {
2066 ret = -EINVAL;
2067 goto bail;
2068 }
2069
2070 switch (cmd.type) {
2071 case QIB_CMD_ASSIGN_CTXT:
2072 ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
2073 if (ret)
2074 goto bail;
2075 break;
2076
2077 case QIB_CMD_USER_INIT:
2078 ret = qib_do_user_init(fp, &cmd.cmd.user_info);
2079 if (ret)
2080 goto bail;
2081 ret = qib_get_base_info(fp, (void __user *) (unsigned long)
2082 cmd.cmd.user_info.spu_base_info,
2083 cmd.cmd.user_info.spu_base_info_size);
2084 break;
2085
2086 case QIB_CMD_RECV_CTRL:
2087 ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl);
2088 break;
2089
2090 case QIB_CMD_CTXT_INFO:
2091 ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *)
2092 (unsigned long) cmd.cmd.ctxt_info);
2093 break;
2094
2095 case QIB_CMD_TID_UPDATE:
2096 ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info);
2097 break;
2098
2099 case QIB_CMD_TID_FREE:
2100 ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info);
2101 break;
2102
2103 case QIB_CMD_SET_PART_KEY:
2104 ret = qib_set_part_key(rcd, cmd.cmd.part_key);
2105 break;
2106
2107 case QIB_CMD_DISARM_BUFS:
2108 (void)qib_disarm_piobufs_ifneeded(rcd);
2109 ret = disarm_req_delay(rcd);
2110 break;
2111
2112 case QIB_CMD_PIOAVAILUPD:
2113 qib_force_pio_avail_update(rcd->dd);
2114 break;
2115
2116 case QIB_CMD_POLL_TYPE:
2117 rcd->poll_type = cmd.cmd.poll_type;
2118 break;
2119
2120 case QIB_CMD_ARMLAUNCH_CTRL:
2121 rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl);
2122 break;
2123
2124 case QIB_CMD_SDMA_INFLIGHT:
2125 ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp),
2126 (u32 __user *) (unsigned long)
2127 cmd.cmd.sdma_inflight);
2128 break;
2129
2130 case QIB_CMD_SDMA_COMPLETE:
2131 ret = qib_sdma_get_complete(rcd->ppd,
2132 user_sdma_queue_fp(fp),
2133 (u32 __user *) (unsigned long)
2134 cmd.cmd.sdma_complete);
2135 break;
2136
2137 case QIB_CMD_ACK_EVENT:
2138 ret = qib_user_event_ack(rcd, subctxt_fp(fp),
2139 cmd.cmd.event_mask);
2140 break;
2141 }
2142
2143 if (ret >= 0)
2144 ret = consumed;
2145
2146bail:
2147 return ret;
2148}
2149
2150static ssize_t qib_aio_write(struct kiocb *iocb, const struct iovec *iov,
2151 unsigned long dim, loff_t off)
2152{
2153 struct qib_filedata *fp = iocb->ki_filp->private_data;
2154 struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
2155 struct qib_user_sdma_queue *pq = fp->pq;
2156
2157 if (!dim || !pq)
2158 return -EINVAL;
2159
2160 return qib_user_sdma_writev(rcd, pq, iov, dim);
2161}
2162
2163static struct class *qib_class;
2164static dev_t qib_dev;
2165
2166int qib_cdev_init(int minor, const char *name,
2167 const struct file_operations *fops,
2168 struct cdev **cdevp, struct device **devp)
2169{
2170 const dev_t dev = MKDEV(MAJOR(qib_dev), minor);
2171 struct cdev *cdev;
2172 struct device *device = NULL;
2173 int ret;
2174
2175 cdev = cdev_alloc();
2176 if (!cdev) {
2177 printk(KERN_ERR QIB_DRV_NAME
2178 ": Could not allocate cdev for minor %d, %s\n",
2179 minor, name);
2180 ret = -ENOMEM;
2181 goto done;
2182 }
2183
2184 cdev->owner = THIS_MODULE;
2185 cdev->ops = fops;
2186 kobject_set_name(&cdev->kobj, name);
2187
2188 ret = cdev_add(cdev, dev, 1);
2189 if (ret < 0) {
2190 printk(KERN_ERR QIB_DRV_NAME
2191 ": Could not add cdev for minor %d, %s (err %d)\n",
2192 minor, name, -ret);
2193 goto err_cdev;
2194 }
2195
2196 device = device_create(qib_class, NULL, dev, NULL, name);
2197 if (!IS_ERR(device))
2198 goto done;
2199 ret = PTR_ERR(device);
2200 device = NULL;
2201 printk(KERN_ERR QIB_DRV_NAME ": Could not create "
2202 "device for minor %d, %s (err %d)\n",
2203 minor, name, -ret);
2204err_cdev:
2205 cdev_del(cdev);
2206 cdev = NULL;
2207done:
2208 *cdevp = cdev;
2209 *devp = device;
2210 return ret;
2211}
2212
2213void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp)
2214{
2215 struct device *device = *devp;
2216
2217 if (device) {
2218 device_unregister(device);
2219 *devp = NULL;
2220 }
2221
2222 if (*cdevp) {
2223 cdev_del(*cdevp);
2224 *cdevp = NULL;
2225 }
2226}
2227
2228static struct cdev *wildcard_cdev;
2229static struct device *wildcard_device;
2230
2231int __init qib_dev_init(void)
2232{
2233 int ret;
2234
2235 ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
2236 if (ret < 0) {
2237 printk(KERN_ERR QIB_DRV_NAME ": Could not allocate "
2238 "chrdev region (err %d)\n", -ret);
2239 goto done;
2240 }
2241
2242 qib_class = class_create(THIS_MODULE, "ipath");
2243 if (IS_ERR(qib_class)) {
2244 ret = PTR_ERR(qib_class);
2245 printk(KERN_ERR QIB_DRV_NAME ": Could not create "
2246 "device class (err %d)\n", -ret);
2247 unregister_chrdev_region(qib_dev, QIB_NMINORS);
2248 }
2249
2250done:
2251 return ret;
2252}
2253
2254void qib_dev_cleanup(void)
2255{
2256 if (qib_class) {
2257 class_destroy(qib_class);
2258 qib_class = NULL;
2259 }
2260
2261 unregister_chrdev_region(qib_dev, QIB_NMINORS);
2262}
2263
2264static atomic_t user_count = ATOMIC_INIT(0);
2265
2266static void qib_user_remove(struct qib_devdata *dd)
2267{
2268 if (atomic_dec_return(&user_count) == 0)
2269 qib_cdev_cleanup(&wildcard_cdev, &wildcard_device);
2270
2271 qib_cdev_cleanup(&dd->user_cdev, &dd->user_device);
2272}
2273
2274static int qib_user_add(struct qib_devdata *dd)
2275{
2276 char name[10];
2277 int ret;
2278
2279 if (atomic_inc_return(&user_count) == 1) {
2280 ret = qib_cdev_init(0, "ipath", &qib_file_ops,
2281 &wildcard_cdev, &wildcard_device);
2282 if (ret)
2283 goto done;
2284 }
2285
2286 snprintf(name, sizeof(name), "ipath%d", dd->unit);
2287 ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops,
2288 &dd->user_cdev, &dd->user_device);
2289 if (ret)
2290 qib_user_remove(dd);
2291done:
2292 return ret;
2293}
2294
2295/*
2296 * Create per-unit files in /dev
2297 */
2298int qib_device_create(struct qib_devdata *dd)
2299{
2300 int r, ret;
2301
2302 r = qib_user_add(dd);
2303 ret = qib_diag_add(dd);
2304 if (r && !ret)
2305 ret = r;
2306 return ret;
2307}
2308
2309/*
2310 * Remove per-unit files in /dev
2311 * void, core kernel returns no errors for this stuff
2312 */
2313void qib_device_remove(struct qib_devdata *dd)
2314{
2315 qib_user_remove(dd);
2316 qib_diag_remove(dd);
2317}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
new file mode 100644
index 000000000000..755470440ef1
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -0,0 +1,613 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/module.h>
35#include <linux/fs.h>
36#include <linux/mount.h>
37#include <linux/pagemap.h>
38#include <linux/init.h>
39#include <linux/namei.h>
40
41#include "qib.h"
42
43#define QIBFS_MAGIC 0x726a77
44
45static struct super_block *qib_super;
46
47#define private2dd(file) ((file)->f_dentry->d_inode->i_private)
48
49static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
50 int mode, const struct file_operations *fops,
51 void *data)
52{
53 int error;
54 struct inode *inode = new_inode(dir->i_sb);
55
56 if (!inode) {
57 error = -EPERM;
58 goto bail;
59 }
60
61 inode->i_mode = mode;
62 inode->i_uid = 0;
63 inode->i_gid = 0;
64 inode->i_blocks = 0;
65 inode->i_atime = CURRENT_TIME;
66 inode->i_mtime = inode->i_atime;
67 inode->i_ctime = inode->i_atime;
68 inode->i_private = data;
69 if ((mode & S_IFMT) == S_IFDIR) {
70 inode->i_op = &simple_dir_inode_operations;
71 inc_nlink(inode);
72 inc_nlink(dir);
73 }
74
75 inode->i_fop = fops;
76
77 d_instantiate(dentry, inode);
78 error = 0;
79
80bail:
81 return error;
82}
83
84static int create_file(const char *name, mode_t mode,
85 struct dentry *parent, struct dentry **dentry,
86 const struct file_operations *fops, void *data)
87{
88 int error;
89
90 *dentry = NULL;
91 mutex_lock(&parent->d_inode->i_mutex);
92 *dentry = lookup_one_len(name, parent, strlen(name));
93 if (!IS_ERR(*dentry))
94 error = qibfs_mknod(parent->d_inode, *dentry,
95 mode, fops, data);
96 else
97 error = PTR_ERR(*dentry);
98 mutex_unlock(&parent->d_inode->i_mutex);
99
100 return error;
101}
102
103static ssize_t driver_stats_read(struct file *file, char __user *buf,
104 size_t count, loff_t *ppos)
105{
106 return simple_read_from_buffer(buf, count, ppos, &qib_stats,
107 sizeof qib_stats);
108}
109
110/*
111 * driver stats field names, one line per stat, single string. Used by
112 * programs like ipathstats to print the stats in a way which works for
113 * different versions of drivers, without changing program source.
114 * if qlogic_ib_stats changes, this needs to change. Names need to be
115 * 12 chars or less (w/o newline), for proper display by ipathstats utility.
116 */
117static const char qib_statnames[] =
118 "KernIntr\n"
119 "ErrorIntr\n"
120 "Tx_Errs\n"
121 "Rcv_Errs\n"
122 "H/W_Errs\n"
123 "NoPIOBufs\n"
124 "CtxtsOpen\n"
125 "RcvLen_Errs\n"
126 "EgrBufFull\n"
127 "EgrHdrFull\n"
128 ;
129
130static ssize_t driver_names_read(struct file *file, char __user *buf,
131 size_t count, loff_t *ppos)
132{
133 return simple_read_from_buffer(buf, count, ppos, qib_statnames,
134 sizeof qib_statnames - 1); /* no null */
135}
136
137static const struct file_operations driver_ops[] = {
138 { .read = driver_stats_read, },
139 { .read = driver_names_read, },
140};
141
142/* read the per-device counters */
143static ssize_t dev_counters_read(struct file *file, char __user *buf,
144 size_t count, loff_t *ppos)
145{
146 u64 *counters;
147 struct qib_devdata *dd = private2dd(file);
148
149 return simple_read_from_buffer(buf, count, ppos, counters,
150 dd->f_read_cntrs(dd, *ppos, NULL, &counters));
151}
152
153/* read the per-device counters */
154static ssize_t dev_names_read(struct file *file, char __user *buf,
155 size_t count, loff_t *ppos)
156{
157 char *names;
158 struct qib_devdata *dd = private2dd(file);
159
160 return simple_read_from_buffer(buf, count, ppos, names,
161 dd->f_read_cntrs(dd, *ppos, &names, NULL));
162}
163
164static const struct file_operations cntr_ops[] = {
165 { .read = dev_counters_read, },
166 { .read = dev_names_read, },
167};
168
169/*
170 * Could use file->f_dentry->d_inode->i_ino to figure out which file,
171 * instead of separate routine for each, but for now, this works...
172 */
173
174/* read the per-port names (same for each port) */
175static ssize_t portnames_read(struct file *file, char __user *buf,
176 size_t count, loff_t *ppos)
177{
178 char *names;
179 struct qib_devdata *dd = private2dd(file);
180
181 return simple_read_from_buffer(buf, count, ppos, names,
182 dd->f_read_portcntrs(dd, *ppos, 0, &names, NULL));
183}
184
185/* read the per-port counters for port 1 (pidx 0) */
186static ssize_t portcntrs_1_read(struct file *file, char __user *buf,
187 size_t count, loff_t *ppos)
188{
189 u64 *counters;
190 struct qib_devdata *dd = private2dd(file);
191
192 return simple_read_from_buffer(buf, count, ppos, counters,
193 dd->f_read_portcntrs(dd, *ppos, 0, NULL, &counters));
194}
195
196/* read the per-port counters for port 2 (pidx 1) */
197static ssize_t portcntrs_2_read(struct file *file, char __user *buf,
198 size_t count, loff_t *ppos)
199{
200 u64 *counters;
201 struct qib_devdata *dd = private2dd(file);
202
203 return simple_read_from_buffer(buf, count, ppos, counters,
204 dd->f_read_portcntrs(dd, *ppos, 1, NULL, &counters));
205}
206
207static const struct file_operations portcntr_ops[] = {
208 { .read = portnames_read, },
209 { .read = portcntrs_1_read, },
210 { .read = portcntrs_2_read, },
211};
212
213/*
214 * read the per-port QSFP data for port 1 (pidx 0)
215 */
216static ssize_t qsfp_1_read(struct file *file, char __user *buf,
217 size_t count, loff_t *ppos)
218{
219 struct qib_devdata *dd = private2dd(file);
220 char *tmp;
221 int ret;
222
223 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
224 if (!tmp)
225 return -ENOMEM;
226
227 ret = qib_qsfp_dump(dd->pport, tmp, PAGE_SIZE);
228 if (ret > 0)
229 ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
230 kfree(tmp);
231 return ret;
232}
233
234/*
235 * read the per-port QSFP data for port 2 (pidx 1)
236 */
237static ssize_t qsfp_2_read(struct file *file, char __user *buf,
238 size_t count, loff_t *ppos)
239{
240 struct qib_devdata *dd = private2dd(file);
241 char *tmp;
242 int ret;
243
244 if (dd->num_pports < 2)
245 return -ENODEV;
246
247 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
248 if (!tmp)
249 return -ENOMEM;
250
251 ret = qib_qsfp_dump(dd->pport + 1, tmp, PAGE_SIZE);
252 if (ret > 0)
253 ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
254 kfree(tmp);
255 return ret;
256}
257
258static const struct file_operations qsfp_ops[] = {
259 { .read = qsfp_1_read, },
260 { .read = qsfp_2_read, },
261};
262
263static ssize_t flash_read(struct file *file, char __user *buf,
264 size_t count, loff_t *ppos)
265{
266 struct qib_devdata *dd;
267 ssize_t ret;
268 loff_t pos;
269 char *tmp;
270
271 pos = *ppos;
272
273 if (pos < 0) {
274 ret = -EINVAL;
275 goto bail;
276 }
277
278 if (pos >= sizeof(struct qib_flash)) {
279 ret = 0;
280 goto bail;
281 }
282
283 if (count > sizeof(struct qib_flash) - pos)
284 count = sizeof(struct qib_flash) - pos;
285
286 tmp = kmalloc(count, GFP_KERNEL);
287 if (!tmp) {
288 ret = -ENOMEM;
289 goto bail;
290 }
291
292 dd = private2dd(file);
293 if (qib_eeprom_read(dd, pos, tmp, count)) {
294 qib_dev_err(dd, "failed to read from flash\n");
295 ret = -ENXIO;
296 goto bail_tmp;
297 }
298
299 if (copy_to_user(buf, tmp, count)) {
300 ret = -EFAULT;
301 goto bail_tmp;
302 }
303
304 *ppos = pos + count;
305 ret = count;
306
307bail_tmp:
308 kfree(tmp);
309
310bail:
311 return ret;
312}
313
314static ssize_t flash_write(struct file *file, const char __user *buf,
315 size_t count, loff_t *ppos)
316{
317 struct qib_devdata *dd;
318 ssize_t ret;
319 loff_t pos;
320 char *tmp;
321
322 pos = *ppos;
323
324 if (pos != 0) {
325 ret = -EINVAL;
326 goto bail;
327 }
328
329 if (count != sizeof(struct qib_flash)) {
330 ret = -EINVAL;
331 goto bail;
332 }
333
334 tmp = kmalloc(count, GFP_KERNEL);
335 if (!tmp) {
336 ret = -ENOMEM;
337 goto bail;
338 }
339
340 if (copy_from_user(tmp, buf, count)) {
341 ret = -EFAULT;
342 goto bail_tmp;
343 }
344
345 dd = private2dd(file);
346 if (qib_eeprom_write(dd, pos, tmp, count)) {
347 ret = -ENXIO;
348 qib_dev_err(dd, "failed to write to flash\n");
349 goto bail_tmp;
350 }
351
352 *ppos = pos + count;
353 ret = count;
354
355bail_tmp:
356 kfree(tmp);
357
358bail:
359 return ret;
360}
361
362static const struct file_operations flash_ops = {
363 .read = flash_read,
364 .write = flash_write,
365};
366
367static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
368{
369 struct dentry *dir, *tmp;
370 char unit[10];
371 int ret, i;
372
373 /* create the per-unit directory */
374 snprintf(unit, sizeof unit, "%u", dd->unit);
375 ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
376 &simple_dir_operations, dd);
377 if (ret) {
378 printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
379 goto bail;
380 }
381
382 /* create the files in the new directory */
383 ret = create_file("counters", S_IFREG|S_IRUGO, dir, &tmp,
384 &cntr_ops[0], dd);
385 if (ret) {
386 printk(KERN_ERR "create_file(%s/counters) failed: %d\n",
387 unit, ret);
388 goto bail;
389 }
390 ret = create_file("counter_names", S_IFREG|S_IRUGO, dir, &tmp,
391 &cntr_ops[1], dd);
392 if (ret) {
393 printk(KERN_ERR "create_file(%s/counter_names) failed: %d\n",
394 unit, ret);
395 goto bail;
396 }
397 ret = create_file("portcounter_names", S_IFREG|S_IRUGO, dir, &tmp,
398 &portcntr_ops[0], dd);
399 if (ret) {
400 printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
401 unit, "portcounter_names", ret);
402 goto bail;
403 }
404 for (i = 1; i <= dd->num_pports; i++) {
405 char fname[24];
406
407 sprintf(fname, "port%dcounters", i);
408 /* create the files in the new directory */
409 ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
410 &portcntr_ops[i], dd);
411 if (ret) {
412 printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
413 unit, fname, ret);
414 goto bail;
415 }
416 if (!(dd->flags & QIB_HAS_QSFP))
417 continue;
418 sprintf(fname, "qsfp%d", i);
419 ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
420 &qsfp_ops[i - 1], dd);
421 if (ret) {
422 printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
423 unit, fname, ret);
424 goto bail;
425 }
426 }
427
428 ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
429 &flash_ops, dd);
430 if (ret)
431 printk(KERN_ERR "create_file(%s/flash) failed: %d\n",
432 unit, ret);
433bail:
434 return ret;
435}
436
437static int remove_file(struct dentry *parent, char *name)
438{
439 struct dentry *tmp;
440 int ret;
441
442 tmp = lookup_one_len(name, parent, strlen(name));
443
444 if (IS_ERR(tmp)) {
445 ret = PTR_ERR(tmp);
446 goto bail;
447 }
448
449 spin_lock(&dcache_lock);
450 spin_lock(&tmp->d_lock);
451 if (!(d_unhashed(tmp) && tmp->d_inode)) {
452 dget_locked(tmp);
453 __d_drop(tmp);
454 spin_unlock(&tmp->d_lock);
455 spin_unlock(&dcache_lock);
456 simple_unlink(parent->d_inode, tmp);
457 } else {
458 spin_unlock(&tmp->d_lock);
459 spin_unlock(&dcache_lock);
460 }
461
462 ret = 0;
463bail:
464 /*
465 * We don't expect clients to care about the return value, but
466 * it's there if they need it.
467 */
468 return ret;
469}
470
471static int remove_device_files(struct super_block *sb,
472 struct qib_devdata *dd)
473{
474 struct dentry *dir, *root;
475 char unit[10];
476 int ret, i;
477
478 root = dget(sb->s_root);
479 mutex_lock(&root->d_inode->i_mutex);
480 snprintf(unit, sizeof unit, "%u", dd->unit);
481 dir = lookup_one_len(unit, root, strlen(unit));
482
483 if (IS_ERR(dir)) {
484 ret = PTR_ERR(dir);
485 printk(KERN_ERR "Lookup of %s failed\n", unit);
486 goto bail;
487 }
488
489 remove_file(dir, "counters");
490 remove_file(dir, "counter_names");
491 remove_file(dir, "portcounter_names");
492 for (i = 0; i < dd->num_pports; i++) {
493 char fname[24];
494
495 sprintf(fname, "port%dcounters", i + 1);
496 remove_file(dir, fname);
497 if (dd->flags & QIB_HAS_QSFP) {
498 sprintf(fname, "qsfp%d", i + 1);
499 remove_file(dir, fname);
500 }
501 }
502 remove_file(dir, "flash");
503 d_delete(dir);
504 ret = simple_rmdir(root->d_inode, dir);
505
506bail:
507 mutex_unlock(&root->d_inode->i_mutex);
508 dput(root);
509 return ret;
510}
511
512/*
513 * This fills everything in when the fs is mounted, to handle umount/mount
514 * after device init. The direct add_cntr_files() call handles adding
515 * them from the init code, when the fs is already mounted.
516 */
517static int qibfs_fill_super(struct super_block *sb, void *data, int silent)
518{
519 struct qib_devdata *dd, *tmp;
520 unsigned long flags;
521 int ret;
522
523 static struct tree_descr files[] = {
524 [2] = {"driver_stats", &driver_ops[0], S_IRUGO},
525 [3] = {"driver_stats_names", &driver_ops[1], S_IRUGO},
526 {""},
527 };
528
529 ret = simple_fill_super(sb, QIBFS_MAGIC, files);
530 if (ret) {
531 printk(KERN_ERR "simple_fill_super failed: %d\n", ret);
532 goto bail;
533 }
534
535 spin_lock_irqsave(&qib_devs_lock, flags);
536
537 list_for_each_entry_safe(dd, tmp, &qib_dev_list, list) {
538 spin_unlock_irqrestore(&qib_devs_lock, flags);
539 ret = add_cntr_files(sb, dd);
540 if (ret) {
541 deactivate_super(sb);
542 goto bail;
543 }
544 spin_lock_irqsave(&qib_devs_lock, flags);
545 }
546
547 spin_unlock_irqrestore(&qib_devs_lock, flags);
548
549bail:
550 return ret;
551}
552
553static int qibfs_get_sb(struct file_system_type *fs_type, int flags,
554 const char *dev_name, void *data, struct vfsmount *mnt)
555{
556 int ret = get_sb_single(fs_type, flags, data,
557 qibfs_fill_super, mnt);
558 if (ret >= 0)
559 qib_super = mnt->mnt_sb;
560 return ret;
561}
562
563static void qibfs_kill_super(struct super_block *s)
564{
565 kill_litter_super(s);
566 qib_super = NULL;
567}
568
569int qibfs_add(struct qib_devdata *dd)
570{
571 int ret;
572
573 /*
574 * On first unit initialized, qib_super will not yet exist
575 * because nobody has yet tried to mount the filesystem, so
576 * we can't consider that to be an error; if an error occurs
577 * during the mount, that will get a complaint, so this is OK.
578 * add_cntr_files() for all units is done at mount from
579 * qibfs_fill_super(), so one way or another, everything works.
580 */
581 if (qib_super == NULL)
582 ret = 0;
583 else
584 ret = add_cntr_files(qib_super, dd);
585 return ret;
586}
587
588int qibfs_remove(struct qib_devdata *dd)
589{
590 int ret = 0;
591
592 if (qib_super)
593 ret = remove_device_files(qib_super, dd);
594
595 return ret;
596}
597
598static struct file_system_type qibfs_fs_type = {
599 .owner = THIS_MODULE,
600 .name = "ipathfs",
601 .get_sb = qibfs_get_sb,
602 .kill_sb = qibfs_kill_super,
603};
604
605int __init qib_init_qibfs(void)
606{
607 return register_filesystem(&qibfs_fs_type);
608}
609
610int __exit qib_exit_qibfs(void)
611{
612 return unregister_filesystem(&qibfs_fs_type);
613}
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
new file mode 100644
index 000000000000..7b6549fd429b
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -0,0 +1,3588 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34/*
35 * This file contains all of the code that is specific to the
36 * QLogic_IB 6120 PCIe chip.
37 */
38
39#include <linux/interrupt.h>
40#include <linux/pci.h>
41#include <linux/delay.h>
42#include <rdma/ib_verbs.h>
43
44#include "qib.h"
45#include "qib_6120_regs.h"
46
47static void qib_6120_setup_setextled(struct qib_pportdata *, u32);
48static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op);
49static u8 qib_6120_phys_portstate(u64);
50static u32 qib_6120_iblink_state(u64);
51
52/*
53 * This file contains all the chip-specific register information and
54 * access functions for the QLogic QLogic_IB PCI-Express chip.
55 *
56 */
57
58/* KREG_IDX uses machine-generated #defines */
59#define KREG_IDX(regname) (QIB_6120_##regname##_OFFS / sizeof(u64))
60
61/* Use defines to tie machine-generated names to lower-case names */
62#define kr_extctrl KREG_IDX(EXTCtrl)
63#define kr_extstatus KREG_IDX(EXTStatus)
64#define kr_gpio_clear KREG_IDX(GPIOClear)
65#define kr_gpio_mask KREG_IDX(GPIOMask)
66#define kr_gpio_out KREG_IDX(GPIOOut)
67#define kr_gpio_status KREG_IDX(GPIOStatus)
68#define kr_rcvctrl KREG_IDX(RcvCtrl)
69#define kr_sendctrl KREG_IDX(SendCtrl)
70#define kr_partitionkey KREG_IDX(RcvPartitionKey)
71#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
72#define kr_ibcstatus KREG_IDX(IBCStatus)
73#define kr_ibcctrl KREG_IDX(IBCCtrl)
74#define kr_sendbuffererror KREG_IDX(SendBufErr0)
75#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
76#define kr_counterregbase KREG_IDX(CntrRegBase)
77#define kr_palign KREG_IDX(PageAlign)
78#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
79#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
80#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
81#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
82#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
83#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
84#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
85#define kr_scratch KREG_IDX(Scratch)
86#define kr_sendctrl KREG_IDX(SendCtrl)
87#define kr_sendpioavailaddr KREG_IDX(SendPIOAvailAddr)
88#define kr_sendpiobufbase KREG_IDX(SendPIOBufBase)
89#define kr_sendpiobufcnt KREG_IDX(SendPIOBufCnt)
90#define kr_sendpiosize KREG_IDX(SendPIOSize)
91#define kr_sendregbase KREG_IDX(SendRegBase)
92#define kr_userregbase KREG_IDX(UserRegBase)
93#define kr_control KREG_IDX(Control)
94#define kr_intclear KREG_IDX(IntClear)
95#define kr_intmask KREG_IDX(IntMask)
96#define kr_intstatus KREG_IDX(IntStatus)
97#define kr_errclear KREG_IDX(ErrClear)
98#define kr_errmask KREG_IDX(ErrMask)
99#define kr_errstatus KREG_IDX(ErrStatus)
100#define kr_hwerrclear KREG_IDX(HwErrClear)
101#define kr_hwerrmask KREG_IDX(HwErrMask)
102#define kr_hwerrstatus KREG_IDX(HwErrStatus)
103#define kr_revision KREG_IDX(Revision)
104#define kr_portcnt KREG_IDX(PortCnt)
105#define kr_serdes_cfg0 KREG_IDX(SerdesCfg0)
106#define kr_serdes_cfg1 (kr_serdes_cfg0 + 1)
107#define kr_serdes_stat KREG_IDX(SerdesStat)
108#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
109
110/* These must only be written via qib_write_kreg_ctxt() */
111#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
112#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
113
114#define CREG_IDX(regname) ((QIB_6120_##regname##_OFFS - \
115 QIB_6120_LBIntCnt_OFFS) / sizeof(u64))
116
117#define cr_badformat CREG_IDX(RxBadFormatCnt)
118#define cr_erricrc CREG_IDX(RxICRCErrCnt)
119#define cr_errlink CREG_IDX(RxLinkProblemCnt)
120#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
121#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
122#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlErrCnt)
123#define cr_err_rlen CREG_IDX(RxLenErrCnt)
124#define cr_errslen CREG_IDX(TxLenErrCnt)
125#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
126#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
127#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
128#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
129#define cr_lbint CREG_IDX(LBIntCnt)
130#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
131#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
132#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
133#define cr_pktrcv CREG_IDX(RxDataPktCnt)
134#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
135#define cr_pktsend CREG_IDX(TxDataPktCnt)
136#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
137#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
138#define cr_rcvebp CREG_IDX(RxEBPCnt)
139#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
140#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
141#define cr_sendstall CREG_IDX(TxFlowStallCnt)
142#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
143#define cr_wordrcv CREG_IDX(RxDwordCnt)
144#define cr_wordsend CREG_IDX(TxDwordCnt)
145#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
146#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
147#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
148#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
149#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
150
151#define SYM_RMASK(regname, fldname) ((u64) \
152 QIB_6120_##regname##_##fldname##_RMASK)
153#define SYM_MASK(regname, fldname) ((u64) \
154 QIB_6120_##regname##_##fldname##_RMASK << \
155 QIB_6120_##regname##_##fldname##_LSB)
156#define SYM_LSB(regname, fldname) (QIB_6120_##regname##_##fldname##_LSB)
157
158#define SYM_FIELD(value, regname, fldname) ((u64) \
159 (((value) >> SYM_LSB(regname, fldname)) & \
160 SYM_RMASK(regname, fldname)))
161#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
162#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
163
164/* link training states, from IBC */
165#define IB_6120_LT_STATE_DISABLED 0x00
166#define IB_6120_LT_STATE_LINKUP 0x01
167#define IB_6120_LT_STATE_POLLACTIVE 0x02
168#define IB_6120_LT_STATE_POLLQUIET 0x03
169#define IB_6120_LT_STATE_SLEEPDELAY 0x04
170#define IB_6120_LT_STATE_SLEEPQUIET 0x05
171#define IB_6120_LT_STATE_CFGDEBOUNCE 0x08
172#define IB_6120_LT_STATE_CFGRCVFCFG 0x09
173#define IB_6120_LT_STATE_CFGWAITRMT 0x0a
174#define IB_6120_LT_STATE_CFGIDLE 0x0b
175#define IB_6120_LT_STATE_RECOVERRETRAIN 0x0c
176#define IB_6120_LT_STATE_RECOVERWAITRMT 0x0e
177#define IB_6120_LT_STATE_RECOVERIDLE 0x0f
178
179/* link state machine states from IBC */
180#define IB_6120_L_STATE_DOWN 0x0
181#define IB_6120_L_STATE_INIT 0x1
182#define IB_6120_L_STATE_ARM 0x2
183#define IB_6120_L_STATE_ACTIVE 0x3
184#define IB_6120_L_STATE_ACT_DEFER 0x4
185
186static const u8 qib_6120_physportstate[0x20] = {
187 [IB_6120_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
188 [IB_6120_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
189 [IB_6120_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
190 [IB_6120_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
191 [IB_6120_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
192 [IB_6120_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
193 [IB_6120_LT_STATE_CFGDEBOUNCE] =
194 IB_PHYSPORTSTATE_CFG_TRAIN,
195 [IB_6120_LT_STATE_CFGRCVFCFG] =
196 IB_PHYSPORTSTATE_CFG_TRAIN,
197 [IB_6120_LT_STATE_CFGWAITRMT] =
198 IB_PHYSPORTSTATE_CFG_TRAIN,
199 [IB_6120_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
200 [IB_6120_LT_STATE_RECOVERRETRAIN] =
201 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
202 [IB_6120_LT_STATE_RECOVERWAITRMT] =
203 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
204 [IB_6120_LT_STATE_RECOVERIDLE] =
205 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
206 [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
207 [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
208 [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
209 [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
210 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
211 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
212 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
213 [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
214};
215
216
217struct qib_chip_specific {
218 u64 __iomem *cregbase;
219 u64 *cntrs;
220 u64 *portcntrs;
221 void *dummy_hdrq; /* used after ctxt close */
222 dma_addr_t dummy_hdrq_phys;
223 spinlock_t kernel_tid_lock; /* no back to back kernel TID writes */
224 spinlock_t user_tid_lock; /* no back to back user TID writes */
225 spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
226 spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
227 u64 hwerrmask;
228 u64 errormask;
229 u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
230 u64 gpio_mask; /* shadow the gpio mask register */
231 u64 extctrl; /* shadow the gpio output enable, etc... */
232 /*
233 * these 5 fields are used to establish deltas for IB symbol
234 * errors and linkrecovery errors. They can be reported on
235 * some chips during link negotiation prior to INIT, and with
236 * DDR when faking DDR negotiations with non-IBTA switches.
237 * The chip counters are adjusted at driver unload if there is
238 * a non-zero delta.
239 */
240 u64 ibdeltainprog;
241 u64 ibsymdelta;
242 u64 ibsymsnap;
243 u64 iblnkerrdelta;
244 u64 iblnkerrsnap;
245 u64 ibcctrl; /* shadow for kr_ibcctrl */
246 u32 lastlinkrecov; /* link recovery issue */
247 int irq;
248 u32 cntrnamelen;
249 u32 portcntrnamelen;
250 u32 ncntrs;
251 u32 nportcntrs;
252 /* used with gpio interrupts to implement IB counters */
253 u32 rxfc_unsupvl_errs;
254 u32 overrun_thresh_errs;
255 /*
256 * these count only cases where _successive_ LocalLinkIntegrity
257 * errors were seen in the receive headers of IB standard packets
258 */
259 u32 lli_errs;
260 u32 lli_counter;
261 u64 lli_thresh;
262 u64 sword; /* total dwords sent (sample result) */
263 u64 rword; /* total dwords received (sample result) */
264 u64 spkts; /* total packets sent (sample result) */
265 u64 rpkts; /* total packets received (sample result) */
266 u64 xmit_wait; /* # of ticks no data sent (sample result) */
267 struct timer_list pma_timer;
268 char emsgbuf[128];
269 char bitsmsgbuf[64];
270 u8 pma_sample_status;
271};
272
273/* ibcctrl bits */
274#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
275/* cycle through TS1/TS2 till OK */
276#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
277/* wait for TS1, then go on */
278#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
279#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
280
281#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
282#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
283#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
284#define QLOGIC_IB_IBCC_LINKCMD_SHIFT 18
285
286/*
287 * We could have a single register get/put routine, that takes a group type,
288 * but this is somewhat clearer and cleaner. It also gives us some error
289 * checking. 64 bit register reads should always work, but are inefficient
290 * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
291 * so we use kreg32 wherever possible. User register and counter register
292 * reads are always 32 bit reads, so only one form of those routines.
293 */
294
295/**
296 * qib_read_ureg32 - read 32-bit virtualized per-context register
297 * @dd: device
298 * @regno: register number
299 * @ctxt: context number
300 *
301 * Return the contents of a register that is virtualized to be per context.
302 * Returns -1 on errors (not distinguishable from valid contents at
303 * runtime; we may add a separate error variable at some point).
304 */
305static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
306 enum qib_ureg regno, int ctxt)
307{
308 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
309 return 0;
310
311 if (dd->userbase)
312 return readl(regno + (u64 __iomem *)
313 ((char __iomem *)dd->userbase +
314 dd->ureg_align * ctxt));
315 else
316 return readl(regno + (u64 __iomem *)
317 (dd->uregbase +
318 (char __iomem *)dd->kregbase +
319 dd->ureg_align * ctxt));
320}
321
322/**
323 * qib_write_ureg - write 32-bit virtualized per-context register
324 * @dd: device
325 * @regno: register number
326 * @value: value
327 * @ctxt: context
328 *
329 * Write the contents of a register that is virtualized to be per context.
330 */
331static inline void qib_write_ureg(const struct qib_devdata *dd,
332 enum qib_ureg regno, u64 value, int ctxt)
333{
334 u64 __iomem *ubase;
335 if (dd->userbase)
336 ubase = (u64 __iomem *)
337 ((char __iomem *) dd->userbase +
338 dd->ureg_align * ctxt);
339 else
340 ubase = (u64 __iomem *)
341 (dd->uregbase +
342 (char __iomem *) dd->kregbase +
343 dd->ureg_align * ctxt);
344
345 if (dd->kregbase && (dd->flags & QIB_PRESENT))
346 writeq(value, &ubase[regno]);
347}
348
349static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
350 const u16 regno)
351{
352 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
353 return -1;
354 return readl((u32 __iomem *)&dd->kregbase[regno]);
355}
356
357static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
358 const u16 regno)
359{
360 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
361 return -1;
362
363 return readq(&dd->kregbase[regno]);
364}
365
366static inline void qib_write_kreg(const struct qib_devdata *dd,
367 const u16 regno, u64 value)
368{
369 if (dd->kregbase && (dd->flags & QIB_PRESENT))
370 writeq(value, &dd->kregbase[regno]);
371}
372
373/**
374 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
375 * @dd: the qlogic_ib device
376 * @regno: the register number to write
377 * @ctxt: the context containing the register
378 * @value: the value to write
379 */
380static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
381 const u16 regno, unsigned ctxt,
382 u64 value)
383{
384 qib_write_kreg(dd, regno + ctxt, value);
385}
386
387static inline void write_6120_creg(const struct qib_devdata *dd,
388 u16 regno, u64 value)
389{
390 if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
391 writeq(value, &dd->cspec->cregbase[regno]);
392}
393
394static inline u64 read_6120_creg(const struct qib_devdata *dd, u16 regno)
395{
396 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
397 return 0;
398 return readq(&dd->cspec->cregbase[regno]);
399}
400
401static inline u32 read_6120_creg32(const struct qib_devdata *dd, u16 regno)
402{
403 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
404 return 0;
405 return readl(&dd->cspec->cregbase[regno]);
406}
407
408/* kr_control bits */
409#define QLOGIC_IB_C_RESET 1U
410
411/* kr_intstatus, kr_intclear, kr_intmask bits */
412#define QLOGIC_IB_I_RCVURG_MASK ((1U << 5) - 1)
413#define QLOGIC_IB_I_RCVURG_SHIFT 0
414#define QLOGIC_IB_I_RCVAVAIL_MASK ((1U << 5) - 1)
415#define QLOGIC_IB_I_RCVAVAIL_SHIFT 12
416
417#define QLOGIC_IB_C_FREEZEMODE 0x00000002
418#define QLOGIC_IB_C_LINKENABLE 0x00000004
419#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL
420#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL
421#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL
422#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL
423#define QLOGIC_IB_I_BITSEXTANT \
424 ((QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
425 (QLOGIC_IB_I_RCVAVAIL_MASK << \
426 QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
427 QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
428 QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO)
429
430/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
431#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
432#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
433#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
434#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
435#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
436#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
437#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
438#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
439#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
440#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
441#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
442#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL
443
444
445/* kr_extstatus bits */
446#define QLOGIC_IB_EXTS_FREQSEL 0x2
447#define QLOGIC_IB_EXTS_SERDESSEL 0x4
448#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000
449#define QLOGIC_IB_EXTS_MEMBIST_FOUND 0x0000000000008000
450
451/* kr_xgxsconfig bits */
452#define QLOGIC_IB_XGXS_RESET 0x5ULL
453
454#define _QIB_GPIO_SDA_NUM 1
455#define _QIB_GPIO_SCL_NUM 0
456
457/* Bits in GPIO for the added IB link interrupts */
458#define GPIO_RXUVL_BIT 3
459#define GPIO_OVRUN_BIT 4
460#define GPIO_LLI_BIT 5
461#define GPIO_ERRINTR_MASK 0x38
462
463
464#define QLOGIC_IB_RT_BUFSIZE_MASK 0xe0000000ULL
465#define QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid) \
466 ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) >> 29) + 11 - 1)
467#define QLOGIC_IB_RT_BUFSIZE(tid) (1 << QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid))
468#define QLOGIC_IB_RT_IS_VALID(tid) \
469 (((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) && \
470 ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) != QLOGIC_IB_RT_BUFSIZE_MASK)))
471#define QLOGIC_IB_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */
472#define QLOGIC_IB_RT_ADDR_SHIFT 10
473
474#define QLOGIC_IB_R_INTRAVAIL_SHIFT 16
475#define QLOGIC_IB_R_TAILUPD_SHIFT 31
476#define IBA6120_R_PKEY_DIS_SHIFT 30
477
478#define PBC_6120_VL15_SEND_CTRL (1ULL << 31) /* pbc; VL15; link_buf only */
479
480#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
481#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
482
483#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
484 ((1ULL << (SYM_LSB(regname, fldname) + (bit)))))
485
486#define TXEMEMPARITYERR_PIOBUF \
487 SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
488#define TXEMEMPARITYERR_PIOPBC \
489 SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
490#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
491 SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
492
493#define RXEMEMPARITYERR_RCVBUF \
494 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
495#define RXEMEMPARITYERR_LOOKUPQ \
496 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
497#define RXEMEMPARITYERR_EXPTID \
498 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
499#define RXEMEMPARITYERR_EAGERTID \
500 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
501#define RXEMEMPARITYERR_FLAGBUF \
502 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
503#define RXEMEMPARITYERR_DATAINFO \
504 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
505#define RXEMEMPARITYERR_HDRINFO \
506 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
507
508/* 6120 specific hardware errors... */
509static const struct qib_hwerror_msgs qib_6120_hwerror_msgs[] = {
510 /* generic hardware errors */
511 QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
512 QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
513
514 QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
515 "TXE PIOBUF Memory Parity"),
516 QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
517 "TXE PIOPBC Memory Parity"),
518 QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
519 "TXE PIOLAUNCHFIFO Memory Parity"),
520
521 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
522 "RXE RCVBUF Memory Parity"),
523 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
524 "RXE LOOKUPQ Memory Parity"),
525 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
526 "RXE EAGERTID Memory Parity"),
527 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
528 "RXE EXPTID Memory Parity"),
529 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
530 "RXE FLAGBUF Memory Parity"),
531 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
532 "RXE DATAINFO Memory Parity"),
533 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
534 "RXE HDRINFO Memory Parity"),
535
536 /* chip-specific hardware errors */
537 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
538 "PCIe Poisoned TLP"),
539 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
540 "PCIe completion timeout"),
541 /*
542 * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
543 * parity or memory parity error failures, because most likely we
544 * won't be able to talk to the core of the chip. Nonetheless, we
545 * might see them, if they are in parts of the PCIe core that aren't
546 * essential.
547 */
548 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
549 "PCIePLL1"),
550 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
551 "PCIePLL0"),
552 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
553 "PCIe XTLH core parity"),
554 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
555 "PCIe ADM TX core parity"),
556 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
557 "PCIe ADM RX core parity"),
558 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
559 "SerDes PLL"),
560};
561
562#define TXE_PIO_PARITY (TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC)
563#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \
564 QLOGIC_IB_HWE_COREPLL_RFSLIP)
565
566 /* variables for sanity checking interrupt and errors */
567#define IB_HWE_BITSEXTANT \
568 (HWE_MASK(RXEMemParityErr) | \
569 HWE_MASK(TXEMemParityErr) | \
570 (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \
571 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
572 QLOGIC_IB_HWE_PCIE1PLLFAILED | \
573 QLOGIC_IB_HWE_PCIE0PLLFAILED | \
574 QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
575 QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
576 QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
577 QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
578 QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
579 HWE_MASK(PowerOnBISTFailed) | \
580 QLOGIC_IB_HWE_COREPLL_FBSLIP | \
581 QLOGIC_IB_HWE_COREPLL_RFSLIP | \
582 QLOGIC_IB_HWE_SERDESPLLFAILED | \
583 HWE_MASK(IBCBusToSPCParityErr) | \
584 HWE_MASK(IBCBusFromSPCParityErr))
585
586#define IB_E_BITSEXTANT \
587 (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \
588 ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \
589 ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \
590 ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
591 ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \
592 ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \
593 ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \
594 ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \
595 ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \
596 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendMaxPktLenErr) | \
597 ERR_MASK(SendUnderRunErr) | ERR_MASK(SendPktLenErr) | \
598 ERR_MASK(SendDroppedSmpPktErr) | \
599 ERR_MASK(SendDroppedDataPktErr) | \
600 ERR_MASK(SendPioArmLaunchErr) | \
601 ERR_MASK(SendUnexpectedPktNumErr) | \
602 ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(IBStatusChanged) | \
603 ERR_MASK(InvalidAddrErr) | ERR_MASK(ResetNegated) | \
604 ERR_MASK(HardwareErr))
605
606#define QLOGIC_IB_E_PKTERRS ( \
607 ERR_MASK(SendPktLenErr) | \
608 ERR_MASK(SendDroppedDataPktErr) | \
609 ERR_MASK(RcvVCRCErr) | \
610 ERR_MASK(RcvICRCErr) | \
611 ERR_MASK(RcvShortPktLenErr) | \
612 ERR_MASK(RcvEBPErr))
613
614/* These are all rcv-related errors which we want to count for stats */
615#define E_SUM_PKTERRS \
616 (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \
617 ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \
618 ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \
619 ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
620 ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \
621 ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
622
623/* These are all send-related errors which we want to count for stats */
624#define E_SUM_ERRS \
625 (ERR_MASK(SendPioArmLaunchErr) | \
626 ERR_MASK(SendUnexpectedPktNumErr) | \
627 ERR_MASK(SendDroppedDataPktErr) | \
628 ERR_MASK(SendDroppedSmpPktErr) | \
629 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \
630 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
631 ERR_MASK(InvalidAddrErr))
632
633/*
634 * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
635 * errors not related to freeze and cancelling buffers. Can't ignore
636 * armlaunch because could get more while still cleaning up, and need
637 * to cancel those as they happen.
638 */
639#define E_SPKT_ERRS_IGNORE \
640 (ERR_MASK(SendDroppedDataPktErr) | \
641 ERR_MASK(SendDroppedSmpPktErr) | \
642 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \
643 ERR_MASK(SendPktLenErr))
644
645/*
646 * these are errors that can occur when the link changes state while
647 * a packet is being sent or received. This doesn't cover things
648 * like EBP or VCRC that can be the result of a sending having the
649 * link change state, so we receive a "known bad" packet.
650 */
651#define E_SUM_LINK_PKTERRS \
652 (ERR_MASK(SendDroppedDataPktErr) | \
653 ERR_MASK(SendDroppedSmpPktErr) | \
654 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
655 ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
656 ERR_MASK(RcvUnexpectedCharErr))
657
658static void qib_6120_put_tid_2(struct qib_devdata *, u64 __iomem *,
659 u32, unsigned long);
660
661/*
662 * On platforms using this chip, and not having ordered WC stores, we
663 * can get TXE parity errors due to speculative reads to the PIO buffers,
664 * and this, due to a chip issue can result in (many) false parity error
665 * reports. So it's a debug print on those, and an info print on systems
666 * where the speculative reads don't occur.
667 */
668static void qib_6120_txe_recover(struct qib_devdata *dd)
669{
670 if (!qib_unordered_wc())
671 qib_devinfo(dd->pcidev,
672 "Recovering from TXE PIO parity error\n");
673}
674
675/* enable/disable chip from delivering interrupts */
676static void qib_6120_set_intr_state(struct qib_devdata *dd, u32 enable)
677{
678 if (enable) {
679 if (dd->flags & QIB_BADINTR)
680 return;
681 qib_write_kreg(dd, kr_intmask, ~0ULL);
682 /* force re-interrupt of any pending interrupts. */
683 qib_write_kreg(dd, kr_intclear, 0ULL);
684 } else
685 qib_write_kreg(dd, kr_intmask, 0ULL);
686}
687
688/*
689 * Try to cleanup as much as possible for anything that might have gone
690 * wrong while in freeze mode, such as pio buffers being written by user
691 * processes (causing armlaunch), send errors due to going into freeze mode,
692 * etc., and try to avoid causing extra interrupts while doing so.
693 * Forcibly update the in-memory pioavail register copies after cleanup
694 * because the chip won't do it while in freeze mode (the register values
695 * themselves are kept correct).
696 * Make sure that we don't lose any important interrupts by using the chip
697 * feature that says that writing 0 to a bit in *clear that is set in
698 * *status will cause an interrupt to be generated again (if allowed by
699 * the *mask value).
700 * This is in chip-specific code because of all of the register accesses,
701 * even though the details are similar on most chips
702 */
703static void qib_6120_clear_freeze(struct qib_devdata *dd)
704{
705 /* disable error interrupts, to avoid confusion */
706 qib_write_kreg(dd, kr_errmask, 0ULL);
707
708 /* also disable interrupts; errormask is sometimes overwriten */
709 qib_6120_set_intr_state(dd, 0);
710
711 qib_cancel_sends(dd->pport);
712
713 /* clear the freeze, and be sure chip saw it */
714 qib_write_kreg(dd, kr_control, dd->control);
715 qib_read_kreg32(dd, kr_scratch);
716
717 /* force in-memory update now we are out of freeze */
718 qib_force_pio_avail_update(dd);
719
720 /*
721 * force new interrupt if any hwerr, error or interrupt bits are
722 * still set, and clear "safe" send packet errors related to freeze
723 * and cancelling sends. Re-enable error interrupts before possible
724 * force of re-interrupt on pending interrupts.
725 */
726 qib_write_kreg(dd, kr_hwerrclear, 0ULL);
727 qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
728 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
729 qib_6120_set_intr_state(dd, 1);
730}
731
732/**
733 * qib_handle_6120_hwerrors - display hardware errors.
734 * @dd: the qlogic_ib device
735 * @msg: the output buffer
736 * @msgl: the size of the output buffer
737 *
738 * Use same msg buffer as regular errors to avoid excessive stack
739 * use. Most hardware errors are catastrophic, but for right now,
740 * we'll print them and continue. Reuse the same message buffer as
741 * handle_6120_errors() to avoid excessive stack usage.
742 */
743static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
744 size_t msgl)
745{
746 u64 hwerrs;
747 u32 bits, ctrl;
748 int isfatal = 0;
749 char *bitsmsg;
750 int log_idx;
751
752 hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
753 if (!hwerrs)
754 return;
755 if (hwerrs == ~0ULL) {
756 qib_dev_err(dd, "Read of hardware error status failed "
757 "(all bits set); ignoring\n");
758 return;
759 }
760 qib_stats.sps_hwerrs++;
761
762 /* Always clear the error status register, except MEMBISTFAIL,
763 * regardless of whether we continue or stop using the chip.
764 * We want that set so we know it failed, even across driver reload.
765 * We'll still ignore it in the hwerrmask. We do this partly for
766 * diagnostics, but also for support */
767 qib_write_kreg(dd, kr_hwerrclear,
768 hwerrs & ~HWE_MASK(PowerOnBISTFailed));
769
770 hwerrs &= dd->cspec->hwerrmask;
771
772 /* We log some errors to EEPROM, check if we have any of those. */
773 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
774 if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
775 qib_inc_eeprom_err(dd, log_idx, 1);
776
777 /*
778 * Make sure we get this much out, unless told to be quiet,
779 * or it's occurred within the last 5 seconds.
780 */
781 if (hwerrs & ~(TXE_PIO_PARITY | RXEMEMPARITYERR_EAGERTID))
782 qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
783 "(cleared)\n", (unsigned long long) hwerrs);
784
785 if (hwerrs & ~IB_HWE_BITSEXTANT)
786 qib_dev_err(dd, "hwerror interrupt with unknown errors "
787 "%llx set\n", (unsigned long long)
788 (hwerrs & ~IB_HWE_BITSEXTANT));
789
790 ctrl = qib_read_kreg32(dd, kr_control);
791 if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
792 /*
793 * Parity errors in send memory are recoverable,
794 * just cancel the send (if indicated in * sendbuffererror),
795 * count the occurrence, unfreeze (if no other handled
796 * hardware error bits are set), and continue. They can
797 * occur if a processor speculative read is done to the PIO
798 * buffer while we are sending a packet, for example.
799 */
800 if (hwerrs & TXE_PIO_PARITY) {
801 qib_6120_txe_recover(dd);
802 hwerrs &= ~TXE_PIO_PARITY;
803 }
804
805 if (!hwerrs) {
806 static u32 freeze_cnt;
807
808 freeze_cnt++;
809 qib_6120_clear_freeze(dd);
810 } else
811 isfatal = 1;
812 }
813
814 *msg = '\0';
815
816 if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
817 isfatal = 1;
818 strlcat(msg, "[Memory BIST test failed, InfiniPath hardware"
819 " unusable]", msgl);
820 /* ignore from now on, so disable until driver reloaded */
821 dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
822 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
823 }
824
825 qib_format_hwerrors(hwerrs, qib_6120_hwerror_msgs,
826 ARRAY_SIZE(qib_6120_hwerror_msgs), msg, msgl);
827
828 bitsmsg = dd->cspec->bitsmsgbuf;
829 if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
830 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
831 bits = (u32) ((hwerrs >>
832 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
833 QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
834 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
835 "[PCIe Mem Parity Errs %x] ", bits);
836 strlcat(msg, bitsmsg, msgl);
837 }
838
839 if (hwerrs & _QIB_PLL_FAIL) {
840 isfatal = 1;
841 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
842 "[PLL failed (%llx), InfiniPath hardware unusable]",
843 (unsigned long long) hwerrs & _QIB_PLL_FAIL);
844 strlcat(msg, bitsmsg, msgl);
845 /* ignore from now on, so disable until driver reloaded */
846 dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
847 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
848 }
849
850 if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
851 /*
852 * If it occurs, it is left masked since the external
853 * interface is unused
854 */
855 dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
856 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
857 }
858
859 if (hwerrs)
860 /*
861 * if any set that we aren't ignoring; only
862 * make the complaint once, in case it's stuck
863 * or recurring, and we get here multiple
864 * times.
865 */
866 qib_dev_err(dd, "%s hardware error\n", msg);
867 else
868 *msg = 0; /* recovered from all of them */
869
870 if (isfatal && !dd->diag_client) {
871 qib_dev_err(dd, "Fatal Hardware Error, no longer"
872 " usable, SN %.16s\n", dd->serial);
873 /*
874 * for /sys status file and user programs to print; if no
875 * trailing brace is copied, we'll know it was truncated.
876 */
877 if (dd->freezemsg)
878 snprintf(dd->freezemsg, dd->freezelen,
879 "{%s}", msg);
880 qib_disable_after_error(dd);
881 }
882}
883
884/*
885 * Decode the error status into strings, deciding whether to always
886 * print * it or not depending on "normal packet errors" vs everything
887 * else. Return 1 if "real" errors, otherwise 0 if only packet
888 * errors, so caller can decide what to print with the string.
889 */
890static int qib_decode_6120_err(struct qib_devdata *dd, char *buf, size_t blen,
891 u64 err)
892{
893 int iserr = 1;
894
895 *buf = '\0';
896 if (err & QLOGIC_IB_E_PKTERRS) {
897 if (!(err & ~QLOGIC_IB_E_PKTERRS))
898 iserr = 0;
899 if ((err & ERR_MASK(RcvICRCErr)) &&
900 !(err&(ERR_MASK(RcvVCRCErr)|ERR_MASK(RcvEBPErr))))
901 strlcat(buf, "CRC ", blen);
902 if (!iserr)
903 goto done;
904 }
905 if (err & ERR_MASK(RcvHdrLenErr))
906 strlcat(buf, "rhdrlen ", blen);
907 if (err & ERR_MASK(RcvBadTidErr))
908 strlcat(buf, "rbadtid ", blen);
909 if (err & ERR_MASK(RcvBadVersionErr))
910 strlcat(buf, "rbadversion ", blen);
911 if (err & ERR_MASK(RcvHdrErr))
912 strlcat(buf, "rhdr ", blen);
913 if (err & ERR_MASK(RcvLongPktLenErr))
914 strlcat(buf, "rlongpktlen ", blen);
915 if (err & ERR_MASK(RcvMaxPktLenErr))
916 strlcat(buf, "rmaxpktlen ", blen);
917 if (err & ERR_MASK(RcvMinPktLenErr))
918 strlcat(buf, "rminpktlen ", blen);
919 if (err & ERR_MASK(SendMinPktLenErr))
920 strlcat(buf, "sminpktlen ", blen);
921 if (err & ERR_MASK(RcvFormatErr))
922 strlcat(buf, "rformaterr ", blen);
923 if (err & ERR_MASK(RcvUnsupportedVLErr))
924 strlcat(buf, "runsupvl ", blen);
925 if (err & ERR_MASK(RcvUnexpectedCharErr))
926 strlcat(buf, "runexpchar ", blen);
927 if (err & ERR_MASK(RcvIBFlowErr))
928 strlcat(buf, "ribflow ", blen);
929 if (err & ERR_MASK(SendUnderRunErr))
930 strlcat(buf, "sunderrun ", blen);
931 if (err & ERR_MASK(SendPioArmLaunchErr))
932 strlcat(buf, "spioarmlaunch ", blen);
933 if (err & ERR_MASK(SendUnexpectedPktNumErr))
934 strlcat(buf, "sunexperrpktnum ", blen);
935 if (err & ERR_MASK(SendDroppedSmpPktErr))
936 strlcat(buf, "sdroppedsmppkt ", blen);
937 if (err & ERR_MASK(SendMaxPktLenErr))
938 strlcat(buf, "smaxpktlen ", blen);
939 if (err & ERR_MASK(SendUnsupportedVLErr))
940 strlcat(buf, "sunsupVL ", blen);
941 if (err & ERR_MASK(InvalidAddrErr))
942 strlcat(buf, "invalidaddr ", blen);
943 if (err & ERR_MASK(RcvEgrFullErr))
944 strlcat(buf, "rcvegrfull ", blen);
945 if (err & ERR_MASK(RcvHdrFullErr))
946 strlcat(buf, "rcvhdrfull ", blen);
947 if (err & ERR_MASK(IBStatusChanged))
948 strlcat(buf, "ibcstatuschg ", blen);
949 if (err & ERR_MASK(RcvIBLostLinkErr))
950 strlcat(buf, "riblostlink ", blen);
951 if (err & ERR_MASK(HardwareErr))
952 strlcat(buf, "hardware ", blen);
953 if (err & ERR_MASK(ResetNegated))
954 strlcat(buf, "reset ", blen);
955done:
956 return iserr;
957}
958
959/*
960 * Called when we might have an error that is specific to a particular
961 * PIO buffer, and may need to cancel that buffer, so it can be re-used.
962 */
963static void qib_disarm_6120_senderrbufs(struct qib_pportdata *ppd)
964{
965 unsigned long sbuf[2];
966 struct qib_devdata *dd = ppd->dd;
967
968 /*
969 * It's possible that sendbuffererror could have bits set; might
970 * have already done this as a result of hardware error handling.
971 */
972 sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
973 sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
974
975 if (sbuf[0] || sbuf[1])
976 qib_disarm_piobufs_set(dd, sbuf,
977 dd->piobcnt2k + dd->piobcnt4k);
978}
979
980static int chk_6120_linkrecovery(struct qib_devdata *dd, u64 ibcs)
981{
982 int ret = 1;
983 u32 ibstate = qib_6120_iblink_state(ibcs);
984 u32 linkrecov = read_6120_creg32(dd, cr_iblinkerrrecov);
985
986 if (linkrecov != dd->cspec->lastlinkrecov) {
987 /* and no more until active again */
988 dd->cspec->lastlinkrecov = 0;
989 qib_set_linkstate(dd->pport, QIB_IB_LINKDOWN);
990 ret = 0;
991 }
992 if (ibstate == IB_PORT_ACTIVE)
993 dd->cspec->lastlinkrecov =
994 read_6120_creg32(dd, cr_iblinkerrrecov);
995 return ret;
996}
997
998static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
999{
1000 char *msg;
1001 u64 ignore_this_time = 0;
1002 u64 iserr = 0;
1003 int log_idx;
1004 struct qib_pportdata *ppd = dd->pport;
1005 u64 mask;
1006
1007 /* don't report errors that are masked */
1008 errs &= dd->cspec->errormask;
1009 msg = dd->cspec->emsgbuf;
1010
1011 /* do these first, they are most important */
1012 if (errs & ERR_MASK(HardwareErr))
1013 qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
1014 else
1015 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1016 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1017 qib_inc_eeprom_err(dd, log_idx, 1);
1018
1019 if (errs & ~IB_E_BITSEXTANT)
1020 qib_dev_err(dd, "error interrupt with unknown errors "
1021 "%llx set\n",
1022 (unsigned long long) (errs & ~IB_E_BITSEXTANT));
1023
1024 if (errs & E_SUM_ERRS) {
1025 qib_disarm_6120_senderrbufs(ppd);
1026 if ((errs & E_SUM_LINK_PKTERRS) &&
1027 !(ppd->lflags & QIBL_LINKACTIVE)) {
1028 /*
1029 * This can happen when trying to bring the link
1030 * up, but the IB link changes state at the "wrong"
1031 * time. The IB logic then complains that the packet
1032 * isn't valid. We don't want to confuse people, so
1033 * we just don't print them, except at debug
1034 */
1035 ignore_this_time = errs & E_SUM_LINK_PKTERRS;
1036 }
1037 } else if ((errs & E_SUM_LINK_PKTERRS) &&
1038 !(ppd->lflags & QIBL_LINKACTIVE)) {
1039 /*
1040 * This can happen when SMA is trying to bring the link
1041 * up, but the IB link changes state at the "wrong" time.
1042 * The IB logic then complains that the packet isn't
1043 * valid. We don't want to confuse people, so we just
1044 * don't print them, except at debug
1045 */
1046 ignore_this_time = errs & E_SUM_LINK_PKTERRS;
1047 }
1048
1049 qib_write_kreg(dd, kr_errclear, errs);
1050
1051 errs &= ~ignore_this_time;
1052 if (!errs)
1053 goto done;
1054
1055 /*
1056 * The ones we mask off are handled specially below
1057 * or above.
1058 */
1059 mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |
1060 ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);
1061 qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
1062
1063 if (errs & E_SUM_PKTERRS)
1064 qib_stats.sps_rcverrs++;
1065 if (errs & E_SUM_ERRS)
1066 qib_stats.sps_txerrs++;
1067
1068 iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS);
1069
1070 if (errs & ERR_MASK(IBStatusChanged)) {
1071 u64 ibcs = qib_read_kreg64(dd, kr_ibcstatus);
1072 u32 ibstate = qib_6120_iblink_state(ibcs);
1073 int handle = 1;
1074
1075 if (ibstate != IB_PORT_INIT && dd->cspec->lastlinkrecov)
1076 handle = chk_6120_linkrecovery(dd, ibcs);
1077 /*
1078 * Since going into a recovery state causes the link state
1079 * to go down and since recovery is transitory, it is better
1080 * if we "miss" ever seeing the link training state go into
1081 * recovery (i.e., ignore this transition for link state
1082 * special handling purposes) without updating lastibcstat.
1083 */
1084 if (handle && qib_6120_phys_portstate(ibcs) ==
1085 IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
1086 handle = 0;
1087 if (handle)
1088 qib_handle_e_ibstatuschanged(ppd, ibcs);
1089 }
1090
1091 if (errs & ERR_MASK(ResetNegated)) {
1092 qib_dev_err(dd, "Got reset, requires re-init "
1093 "(unload and reload driver)\n");
1094 dd->flags &= ~QIB_INITTED; /* needs re-init */
1095 /* mark as having had error */
1096 *dd->devstatusp |= QIB_STATUS_HWERROR;
1097 *dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
1098 }
1099
1100 if (*msg && iserr)
1101 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1102
1103 if (ppd->state_wanted & ppd->lflags)
1104 wake_up_interruptible(&ppd->state_wait);
1105
1106 /*
1107 * If there were hdrq or egrfull errors, wake up any processes
1108 * waiting in poll. We used to try to check which contexts had
1109 * the overflow, but given the cost of that and the chip reads
1110 * to support it, it's better to just wake everybody up if we
1111 * get an overflow; waiters can poll again if it's not them.
1112 */
1113 if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1114 qib_handle_urcv(dd, ~0U);
1115 if (errs & ERR_MASK(RcvEgrFullErr))
1116 qib_stats.sps_buffull++;
1117 else
1118 qib_stats.sps_hdrfull++;
1119 }
1120done:
1121 return;
1122}
1123
1124/**
1125 * qib_6120_init_hwerrors - enable hardware errors
1126 * @dd: the qlogic_ib device
1127 *
1128 * now that we have finished initializing everything that might reasonably
1129 * cause a hardware error, and cleared those errors bits as they occur,
1130 * we can enable hardware errors in the mask (potentially enabling
1131 * freeze mode), and enable hardware errors as errors (along with
1132 * everything else) in errormask
1133 */
1134static void qib_6120_init_hwerrors(struct qib_devdata *dd)
1135{
1136 u64 val;
1137 u64 extsval;
1138
1139 extsval = qib_read_kreg64(dd, kr_extstatus);
1140
1141 if (!(extsval & QLOGIC_IB_EXTS_MEMBIST_ENDTEST))
1142 qib_dev_err(dd, "MemBIST did not complete!\n");
1143
1144 /* init so all hwerrors interrupt, and enter freeze, ajdust below */
1145 val = ~0ULL;
1146 if (dd->minrev < 2) {
1147 /*
1148 * Avoid problem with internal interface bus parity
1149 * checking. Fixed in Rev2.
1150 */
1151 val &= ~QLOGIC_IB_HWE_PCIEBUSPARITYRADM;
1152 }
1153 /* avoid some intel cpu's speculative read freeze mode issue */
1154 val &= ~TXEMEMPARITYERR_PIOBUF;
1155
1156 dd->cspec->hwerrmask = val;
1157
1158 qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
1159 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1160
1161 /* clear all */
1162 qib_write_kreg(dd, kr_errclear, ~0ULL);
1163 /* enable errors that are masked, at least this first time. */
1164 qib_write_kreg(dd, kr_errmask, ~0ULL);
1165 dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
1166 /* clear any interrupts up to this point (ints still not enabled) */
1167 qib_write_kreg(dd, kr_intclear, ~0ULL);
1168
1169 qib_write_kreg(dd, kr_rcvbthqp,
1170 dd->qpn_mask << (QIB_6120_RcvBTHQP_BTHQP_Mask_LSB - 1) |
1171 QIB_KD_QP);
1172}
1173
1174/*
1175 * Disable and enable the armlaunch error. Used for PIO bandwidth testing
1176 * on chips that are count-based, rather than trigger-based. There is no
1177 * reference counting, but that's also fine, given the intended use.
1178 * Only chip-specific because it's all register accesses
1179 */
1180static void qib_set_6120_armlaunch(struct qib_devdata *dd, u32 enable)
1181{
1182 if (enable) {
1183 qib_write_kreg(dd, kr_errclear,
1184 ERR_MASK(SendPioArmLaunchErr));
1185 dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
1186 } else
1187 dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
1188 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1189}
1190
1191/*
1192 * Formerly took parameter <which> in pre-shifted,
1193 * pre-merged form with LinkCmd and LinkInitCmd
1194 * together, and assuming the zero was NOP.
1195 */
1196static void qib_set_ib_6120_lstate(struct qib_pportdata *ppd, u16 linkcmd,
1197 u16 linitcmd)
1198{
1199 u64 mod_wd;
1200 struct qib_devdata *dd = ppd->dd;
1201 unsigned long flags;
1202
1203 if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
1204 /*
1205 * If we are told to disable, note that so link-recovery
1206 * code does not attempt to bring us back up.
1207 */
1208 spin_lock_irqsave(&ppd->lflags_lock, flags);
1209 ppd->lflags |= QIBL_IB_LINK_DISABLED;
1210 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1211 } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
1212 /*
1213 * Any other linkinitcmd will lead to LINKDOWN and then
1214 * to INIT (if all is well), so clear flag to let
1215 * link-recovery code attempt to bring us back up.
1216 */
1217 spin_lock_irqsave(&ppd->lflags_lock, flags);
1218 ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
1219 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1220 }
1221
1222 mod_wd = (linkcmd << QLOGIC_IB_IBCC_LINKCMD_SHIFT) |
1223 (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
1224
1225 qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl | mod_wd);
1226 /* write to chip to prevent back-to-back writes of control reg */
1227 qib_write_kreg(dd, kr_scratch, 0);
1228}
1229
1230/**
1231 * qib_6120_bringup_serdes - bring up the serdes
1232 * @dd: the qlogic_ib device
1233 */
1234static int qib_6120_bringup_serdes(struct qib_pportdata *ppd)
1235{
1236 struct qib_devdata *dd = ppd->dd;
1237 u64 val, config1, prev_val, hwstat, ibc;
1238
1239 /* Put IBC in reset, sends disabled */
1240 dd->control &= ~QLOGIC_IB_C_LINKENABLE;
1241 qib_write_kreg(dd, kr_control, 0ULL);
1242
1243 dd->cspec->ibdeltainprog = 1;
1244 dd->cspec->ibsymsnap = read_6120_creg32(dd, cr_ibsymbolerr);
1245 dd->cspec->iblnkerrsnap = read_6120_creg32(dd, cr_iblinkerrrecov);
1246
1247 /* flowcontrolwatermark is in units of KBytes */
1248 ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
1249 /*
1250 * How often flowctrl sent. More or less in usecs; balance against
1251 * watermark value, so that in theory senders always get a flow
1252 * control update in time to not let the IB link go idle.
1253 */
1254 ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
1255 /* max error tolerance */
1256 dd->cspec->lli_thresh = 0xf;
1257 ibc |= (u64) dd->cspec->lli_thresh << SYM_LSB(IBCCtrl, PhyerrThreshold);
1258 /* use "real" buffer space for */
1259 ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
1260 /* IB credit flow control. */
1261 ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
1262 /*
1263 * set initial max size pkt IBC will send, including ICRC; it's the
1264 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
1265 */
1266 ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
1267 dd->cspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
1268
1269 /* initially come up waiting for TS1, without sending anything. */
1270 val = dd->cspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
1271 QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
1272 qib_write_kreg(dd, kr_ibcctrl, val);
1273
1274 val = qib_read_kreg64(dd, kr_serdes_cfg0);
1275 config1 = qib_read_kreg64(dd, kr_serdes_cfg1);
1276
1277 /*
1278 * Force reset on, also set rxdetect enable. Must do before reading
1279 * serdesstatus at least for simulation, or some of the bits in
1280 * serdes status will come back as undefined and cause simulation
1281 * failures
1282 */
1283 val |= SYM_MASK(SerdesCfg0, ResetPLL) |
1284 SYM_MASK(SerdesCfg0, RxDetEnX) |
1285 (SYM_MASK(SerdesCfg0, L1PwrDnA) |
1286 SYM_MASK(SerdesCfg0, L1PwrDnB) |
1287 SYM_MASK(SerdesCfg0, L1PwrDnC) |
1288 SYM_MASK(SerdesCfg0, L1PwrDnD));
1289 qib_write_kreg(dd, kr_serdes_cfg0, val);
1290 /* be sure chip saw it */
1291 qib_read_kreg64(dd, kr_scratch);
1292 udelay(5); /* need pll reset set at least for a bit */
1293 /*
1294 * after PLL is reset, set the per-lane Resets and TxIdle and
1295 * clear the PLL reset and rxdetect (to get falling edge).
1296 * Leave L1PWR bits set (permanently)
1297 */
1298 val &= ~(SYM_MASK(SerdesCfg0, RxDetEnX) |
1299 SYM_MASK(SerdesCfg0, ResetPLL) |
1300 (SYM_MASK(SerdesCfg0, L1PwrDnA) |
1301 SYM_MASK(SerdesCfg0, L1PwrDnB) |
1302 SYM_MASK(SerdesCfg0, L1PwrDnC) |
1303 SYM_MASK(SerdesCfg0, L1PwrDnD)));
1304 val |= (SYM_MASK(SerdesCfg0, ResetA) |
1305 SYM_MASK(SerdesCfg0, ResetB) |
1306 SYM_MASK(SerdesCfg0, ResetC) |
1307 SYM_MASK(SerdesCfg0, ResetD)) |
1308 SYM_MASK(SerdesCfg0, TxIdeEnX);
1309 qib_write_kreg(dd, kr_serdes_cfg0, val);
1310 /* be sure chip saw it */
1311 (void) qib_read_kreg64(dd, kr_scratch);
1312 /* need PLL reset clear for at least 11 usec before lane
1313 * resets cleared; give it a few more to be sure */
1314 udelay(15);
1315 val &= ~((SYM_MASK(SerdesCfg0, ResetA) |
1316 SYM_MASK(SerdesCfg0, ResetB) |
1317 SYM_MASK(SerdesCfg0, ResetC) |
1318 SYM_MASK(SerdesCfg0, ResetD)) |
1319 SYM_MASK(SerdesCfg0, TxIdeEnX));
1320
1321 qib_write_kreg(dd, kr_serdes_cfg0, val);
1322 /* be sure chip saw it */
1323 (void) qib_read_kreg64(dd, kr_scratch);
1324
1325 val = qib_read_kreg64(dd, kr_xgxs_cfg);
1326 prev_val = val;
1327 if (val & QLOGIC_IB_XGXS_RESET)
1328 val &= ~QLOGIC_IB_XGXS_RESET;
1329 if (SYM_FIELD(val, XGXSCfg, polarity_inv) != ppd->rx_pol_inv) {
1330 /* need to compensate for Tx inversion in partner */
1331 val &= ~SYM_MASK(XGXSCfg, polarity_inv);
1332 val |= (u64)ppd->rx_pol_inv << SYM_LSB(XGXSCfg, polarity_inv);
1333 }
1334 if (val != prev_val)
1335 qib_write_kreg(dd, kr_xgxs_cfg, val);
1336
1337 val = qib_read_kreg64(dd, kr_serdes_cfg0);
1338
1339 /* clear current and de-emphasis bits */
1340 config1 &= ~0x0ffffffff00ULL;
1341 /* set current to 20ma */
1342 config1 |= 0x00000000000ULL;
1343 /* set de-emphasis to -5.68dB */
1344 config1 |= 0x0cccc000000ULL;
1345 qib_write_kreg(dd, kr_serdes_cfg1, config1);
1346
1347 /* base and port guid same for single port */
1348 ppd->guid = dd->base_guid;
1349
1350 /*
1351 * the process of setting and un-resetting the serdes normally
1352 * causes a serdes PLL error, so check for that and clear it
1353 * here. Also clearr hwerr bit in errstatus, but not others.
1354 */
1355 hwstat = qib_read_kreg64(dd, kr_hwerrstatus);
1356 if (hwstat) {
1357 /* should just have PLL, clear all set, in an case */
1358 if (hwstat & ~QLOGIC_IB_HWE_SERDESPLLFAILED)
1359 qib_write_kreg(dd, kr_hwerrclear, hwstat);
1360 qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr));
1361 }
1362
1363 dd->control |= QLOGIC_IB_C_LINKENABLE;
1364 dd->control &= ~QLOGIC_IB_C_FREEZEMODE;
1365 qib_write_kreg(dd, kr_control, dd->control);
1366
1367 return 0;
1368}
1369
1370/**
1371 * qib_6120_quiet_serdes - set serdes to txidle
1372 * @ppd: physical port of the qlogic_ib device
1373 * Called when driver is being unloaded
1374 */
1375static void qib_6120_quiet_serdes(struct qib_pportdata *ppd)
1376{
1377 struct qib_devdata *dd = ppd->dd;
1378 u64 val;
1379
1380 qib_set_ib_6120_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1381
1382 /* disable IBC */
1383 dd->control &= ~QLOGIC_IB_C_LINKENABLE;
1384 qib_write_kreg(dd, kr_control,
1385 dd->control | QLOGIC_IB_C_FREEZEMODE);
1386
1387 if (dd->cspec->ibsymdelta || dd->cspec->iblnkerrdelta ||
1388 dd->cspec->ibdeltainprog) {
1389 u64 diagc;
1390
1391 /* enable counter writes */
1392 diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
1393 qib_write_kreg(dd, kr_hwdiagctrl,
1394 diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
1395
1396 if (dd->cspec->ibsymdelta || dd->cspec->ibdeltainprog) {
1397 val = read_6120_creg32(dd, cr_ibsymbolerr);
1398 if (dd->cspec->ibdeltainprog)
1399 val -= val - dd->cspec->ibsymsnap;
1400 val -= dd->cspec->ibsymdelta;
1401 write_6120_creg(dd, cr_ibsymbolerr, val);
1402 }
1403 if (dd->cspec->iblnkerrdelta || dd->cspec->ibdeltainprog) {
1404 val = read_6120_creg32(dd, cr_iblinkerrrecov);
1405 if (dd->cspec->ibdeltainprog)
1406 val -= val - dd->cspec->iblnkerrsnap;
1407 val -= dd->cspec->iblnkerrdelta;
1408 write_6120_creg(dd, cr_iblinkerrrecov, val);
1409 }
1410
1411 /* and disable counter writes */
1412 qib_write_kreg(dd, kr_hwdiagctrl, diagc);
1413 }
1414
1415 val = qib_read_kreg64(dd, kr_serdes_cfg0);
1416 val |= SYM_MASK(SerdesCfg0, TxIdeEnX);
1417 qib_write_kreg(dd, kr_serdes_cfg0, val);
1418}
1419
1420/**
1421 * qib_6120_setup_setextled - set the state of the two external LEDs
1422 * @dd: the qlogic_ib device
1423 * @on: whether the link is up or not
1424 *
1425 * The exact combo of LEDs if on is true is determined by looking
1426 * at the ibcstatus.
1427
1428 * These LEDs indicate the physical and logical state of IB link.
1429 * For this chip (at least with recommended board pinouts), LED1
1430 * is Yellow (logical state) and LED2 is Green (physical state),
1431 *
1432 * Note: We try to match the Mellanox HCA LED behavior as best
1433 * we can. Green indicates physical link state is OK (something is
1434 * plugged in, and we can train).
1435 * Amber indicates the link is logically up (ACTIVE).
1436 * Mellanox further blinks the amber LED to indicate data packet
1437 * activity, but we have no hardware support for that, so it would
1438 * require waking up every 10-20 msecs and checking the counters
1439 * on the chip, and then turning the LED off if appropriate. That's
1440 * visible overhead, so not something we will do.
1441 *
1442 */
1443static void qib_6120_setup_setextled(struct qib_pportdata *ppd, u32 on)
1444{
1445 u64 extctl, val, lst, ltst;
1446 unsigned long flags;
1447 struct qib_devdata *dd = ppd->dd;
1448
1449 /*
1450 * The diags use the LED to indicate diag info, so we leave
1451 * the external LED alone when the diags are running.
1452 */
1453 if (dd->diag_client)
1454 return;
1455
1456 /* Allow override of LED display for, e.g. Locating system in rack */
1457 if (ppd->led_override) {
1458 ltst = (ppd->led_override & QIB_LED_PHYS) ?
1459 IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
1460 lst = (ppd->led_override & QIB_LED_LOG) ?
1461 IB_PORT_ACTIVE : IB_PORT_DOWN;
1462 } else if (on) {
1463 val = qib_read_kreg64(dd, kr_ibcstatus);
1464 ltst = qib_6120_phys_portstate(val);
1465 lst = qib_6120_iblink_state(val);
1466 } else {
1467 ltst = 0;
1468 lst = 0;
1469 }
1470
1471 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
1472 extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
1473 SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
1474
1475 if (ltst == IB_PHYSPORTSTATE_LINKUP)
1476 extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
1477 if (lst == IB_PORT_ACTIVE)
1478 extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
1479 dd->cspec->extctrl = extctl;
1480 qib_write_kreg(dd, kr_extctrl, extctl);
1481 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
1482}
1483
1484static void qib_6120_free_irq(struct qib_devdata *dd)
1485{
1486 if (dd->cspec->irq) {
1487 free_irq(dd->cspec->irq, dd);
1488 dd->cspec->irq = 0;
1489 }
1490 qib_nomsi(dd);
1491}
1492
1493/**
1494 * qib_6120_setup_cleanup - clean up any per-chip chip-specific stuff
1495 * @dd: the qlogic_ib device
1496 *
1497 * This is called during driver unload.
1498*/
1499static void qib_6120_setup_cleanup(struct qib_devdata *dd)
1500{
1501 qib_6120_free_irq(dd);
1502 kfree(dd->cspec->cntrs);
1503 kfree(dd->cspec->portcntrs);
1504 if (dd->cspec->dummy_hdrq) {
1505 dma_free_coherent(&dd->pcidev->dev,
1506 ALIGN(dd->rcvhdrcnt *
1507 dd->rcvhdrentsize *
1508 sizeof(u32), PAGE_SIZE),
1509 dd->cspec->dummy_hdrq,
1510 dd->cspec->dummy_hdrq_phys);
1511 dd->cspec->dummy_hdrq = NULL;
1512 }
1513}
1514
1515static void qib_wantpiobuf_6120_intr(struct qib_devdata *dd, u32 needint)
1516{
1517 unsigned long flags;
1518
1519 spin_lock_irqsave(&dd->sendctrl_lock, flags);
1520 if (needint)
1521 dd->sendctrl |= SYM_MASK(SendCtrl, PIOIntBufAvail);
1522 else
1523 dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOIntBufAvail);
1524 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
1525 qib_write_kreg(dd, kr_scratch, 0ULL);
1526 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
1527}
1528
1529/*
1530 * handle errors and unusual events first, separate function
1531 * to improve cache hits for fast path interrupt handling
1532 */
1533static noinline void unlikely_6120_intr(struct qib_devdata *dd, u64 istat)
1534{
1535 if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
1536 qib_dev_err(dd, "interrupt with unknown interrupts %Lx set\n",
1537 istat & ~QLOGIC_IB_I_BITSEXTANT);
1538
1539 if (istat & QLOGIC_IB_I_ERROR) {
1540 u64 estat = 0;
1541
1542 qib_stats.sps_errints++;
1543 estat = qib_read_kreg64(dd, kr_errstatus);
1544 if (!estat)
1545 qib_devinfo(dd->pcidev, "error interrupt (%Lx), "
1546 "but no error bits set!\n", istat);
1547 handle_6120_errors(dd, estat);
1548 }
1549
1550 if (istat & QLOGIC_IB_I_GPIO) {
1551 u32 gpiostatus;
1552 u32 to_clear = 0;
1553
1554 /*
1555 * GPIO_3..5 on IBA6120 Rev2 chips indicate
1556 * errors that we need to count.
1557 */
1558 gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
1559 /* First the error-counter case. */
1560 if (gpiostatus & GPIO_ERRINTR_MASK) {
1561 /* want to clear the bits we see asserted. */
1562 to_clear |= (gpiostatus & GPIO_ERRINTR_MASK);
1563
1564 /*
1565 * Count appropriately, clear bits out of our copy,
1566 * as they have been "handled".
1567 */
1568 if (gpiostatus & (1 << GPIO_RXUVL_BIT))
1569 dd->cspec->rxfc_unsupvl_errs++;
1570 if (gpiostatus & (1 << GPIO_OVRUN_BIT))
1571 dd->cspec->overrun_thresh_errs++;
1572 if (gpiostatus & (1 << GPIO_LLI_BIT))
1573 dd->cspec->lli_errs++;
1574 gpiostatus &= ~GPIO_ERRINTR_MASK;
1575 }
1576 if (gpiostatus) {
1577 /*
1578 * Some unexpected bits remain. If they could have
1579 * caused the interrupt, complain and clear.
1580 * To avoid repetition of this condition, also clear
1581 * the mask. It is almost certainly due to error.
1582 */
1583 const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
1584
1585 /*
1586 * Also check that the chip reflects our shadow,
1587 * and report issues, If they caused the interrupt.
1588 * we will suppress by refreshing from the shadow.
1589 */
1590 if (mask & gpiostatus) {
1591 to_clear |= (gpiostatus & mask);
1592 dd->cspec->gpio_mask &= ~(gpiostatus & mask);
1593 qib_write_kreg(dd, kr_gpio_mask,
1594 dd->cspec->gpio_mask);
1595 }
1596 }
1597 if (to_clear)
1598 qib_write_kreg(dd, kr_gpio_clear, (u64) to_clear);
1599 }
1600}
1601
1602static irqreturn_t qib_6120intr(int irq, void *data)
1603{
1604 struct qib_devdata *dd = data;
1605 irqreturn_t ret;
1606 u32 istat, ctxtrbits, rmask, crcs = 0;
1607 unsigned i;
1608
1609 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
1610 /*
1611 * This return value is not great, but we do not want the
1612 * interrupt core code to remove our interrupt handler
1613 * because we don't appear to be handling an interrupt
1614 * during a chip reset.
1615 */
1616 ret = IRQ_HANDLED;
1617 goto bail;
1618 }
1619
1620 istat = qib_read_kreg32(dd, kr_intstatus);
1621
1622 if (unlikely(!istat)) {
1623 ret = IRQ_NONE; /* not our interrupt, or already handled */
1624 goto bail;
1625 }
1626 if (unlikely(istat == -1)) {
1627 qib_bad_intrstatus(dd);
1628 /* don't know if it was our interrupt or not */
1629 ret = IRQ_NONE;
1630 goto bail;
1631 }
1632
1633 qib_stats.sps_ints++;
1634 if (dd->int_counter != (u32) -1)
1635 dd->int_counter++;
1636
1637 if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
1638 QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
1639 unlikely_6120_intr(dd, istat);
1640
1641 /*
1642 * Clear the interrupt bits we found set, relatively early, so we
1643 * "know" know the chip will have seen this by the time we process
1644 * the queue, and will re-interrupt if necessary. The processor
1645 * itself won't take the interrupt again until we return.
1646 */
1647 qib_write_kreg(dd, kr_intclear, istat);
1648
1649 /*
1650 * Handle kernel receive queues before checking for pio buffers
1651 * available since receives can overflow; piobuf waiters can afford
1652 * a few extra cycles, since they were waiting anyway.
1653 */
1654 ctxtrbits = istat &
1655 ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1656 (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
1657 if (ctxtrbits) {
1658 rmask = (1U << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1659 (1U << QLOGIC_IB_I_RCVURG_SHIFT);
1660 for (i = 0; i < dd->first_user_ctxt; i++) {
1661 if (ctxtrbits & rmask) {
1662 ctxtrbits &= ~rmask;
1663 crcs += qib_kreceive(dd->rcd[i],
1664 &dd->cspec->lli_counter,
1665 NULL);
1666 }
1667 rmask <<= 1;
1668 }
1669 if (crcs) {
1670 u32 cntr = dd->cspec->lli_counter;
1671 cntr += crcs;
1672 if (cntr) {
1673 if (cntr > dd->cspec->lli_thresh) {
1674 dd->cspec->lli_counter = 0;
1675 dd->cspec->lli_errs++;
1676 } else
1677 dd->cspec->lli_counter += cntr;
1678 }
1679 }
1680
1681
1682 if (ctxtrbits) {
1683 ctxtrbits =
1684 (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1685 (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
1686 qib_handle_urcv(dd, ctxtrbits);
1687 }
1688 }
1689
1690 if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
1691 qib_ib_piobufavail(dd);
1692
1693 ret = IRQ_HANDLED;
1694bail:
1695 return ret;
1696}
1697
1698/*
1699 * Set up our chip-specific interrupt handler
1700 * The interrupt type has already been setup, so
1701 * we just need to do the registration and error checking.
1702 */
1703static void qib_setup_6120_interrupt(struct qib_devdata *dd)
1704{
1705 /*
1706 * If the chip supports added error indication via GPIO pins,
1707 * enable interrupts on those bits so the interrupt routine
1708 * can count the events. Also set flag so interrupt routine
1709 * can know they are expected.
1710 */
1711 if (SYM_FIELD(dd->revision, Revision_R,
1712 ChipRevMinor) > 1) {
1713 /* Rev2+ reports extra errors via internal GPIO pins */
1714 dd->cspec->gpio_mask |= GPIO_ERRINTR_MASK;
1715 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
1716 }
1717
1718 if (!dd->cspec->irq)
1719 qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
1720 "work\n");
1721 else {
1722 int ret;
1723 ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
1724 QIB_DRV_NAME, dd);
1725 if (ret)
1726 qib_dev_err(dd, "Couldn't setup interrupt "
1727 "(irq=%d): %d\n", dd->cspec->irq,
1728 ret);
1729 }
1730}
1731
1732/**
1733 * pe_boardname - fill in the board name
1734 * @dd: the qlogic_ib device
1735 *
1736 * info is based on the board revision register
1737 */
1738static void pe_boardname(struct qib_devdata *dd)
1739{
1740 char *n;
1741 u32 boardid, namelen;
1742
1743 boardid = SYM_FIELD(dd->revision, Revision,
1744 BoardID);
1745
1746 switch (boardid) {
1747 case 2:
1748 n = "InfiniPath_QLE7140";
1749 break;
1750 default:
1751 qib_dev_err(dd, "Unknown 6120 board with ID %u\n", boardid);
1752 n = "Unknown_InfiniPath_6120";
1753 break;
1754 }
1755 namelen = strlen(n) + 1;
1756 dd->boardname = kmalloc(namelen, GFP_KERNEL);
1757 if (!dd->boardname)
1758 qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
1759 else
1760 snprintf(dd->boardname, namelen, "%s", n);
1761
1762 if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)
1763 qib_dev_err(dd, "Unsupported InfiniPath hardware revision "
1764 "%u.%u!\n", dd->majrev, dd->minrev);
1765
1766 snprintf(dd->boardversion, sizeof(dd->boardversion),
1767 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
1768 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
1769 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
1770 dd->majrev, dd->minrev,
1771 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
1772
1773}
1774
1775/*
1776 * This routine sleeps, so it can only be called from user context, not
1777 * from interrupt context. If we need interrupt context, we can split
1778 * it into two routines.
1779 */
1780static int qib_6120_setup_reset(struct qib_devdata *dd)
1781{
1782 u64 val;
1783 int i;
1784 int ret;
1785 u16 cmdval;
1786 u8 int_line, clinesz;
1787
1788 qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
1789
1790 /* Use ERROR so it shows up in logs, etc. */
1791 qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
1792
1793 /* no interrupts till re-initted */
1794 qib_6120_set_intr_state(dd, 0);
1795
1796 dd->cspec->ibdeltainprog = 0;
1797 dd->cspec->ibsymdelta = 0;
1798 dd->cspec->iblnkerrdelta = 0;
1799
1800 /*
1801 * Keep chip from being accessed until we are ready. Use
1802 * writeq() directly, to allow the write even though QIB_PRESENT
1803 * isnt' set.
1804 */
1805 dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
1806 dd->int_counter = 0; /* so we check interrupts work again */
1807 val = dd->control | QLOGIC_IB_C_RESET;
1808 writeq(val, &dd->kregbase[kr_control]);
1809 mb(); /* prevent compiler re-ordering around actual reset */
1810
1811 for (i = 1; i <= 5; i++) {
1812 /*
1813 * Allow MBIST, etc. to complete; longer on each retry.
1814 * We sometimes get machine checks from bus timeout if no
1815 * response, so for now, make it *really* long.
1816 */
1817 msleep(1000 + (1 + i) * 2000);
1818
1819 qib_pcie_reenable(dd, cmdval, int_line, clinesz);
1820
1821 /*
1822 * Use readq directly, so we don't need to mark it as PRESENT
1823 * until we get a successful indication that all is well.
1824 */
1825 val = readq(&dd->kregbase[kr_revision]);
1826 if (val == dd->revision) {
1827 dd->flags |= QIB_PRESENT; /* it's back */
1828 ret = qib_reinit_intr(dd);
1829 goto bail;
1830 }
1831 }
1832 ret = 0; /* failed */
1833
1834bail:
1835 if (ret) {
1836 if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
1837 qib_dev_err(dd, "Reset failed to setup PCIe or "
1838 "interrupts; continuing anyway\n");
1839 /* clear the reset error, init error/hwerror mask */
1840 qib_6120_init_hwerrors(dd);
1841 /* for Rev2 error interrupts; nop for rev 1 */
1842 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
1843 /* clear the reset error, init error/hwerror mask */
1844 qib_6120_init_hwerrors(dd);
1845 }
1846 return ret;
1847}
1848
1849/**
1850 * qib_6120_put_tid - write a TID in chip
1851 * @dd: the qlogic_ib device
1852 * @tidptr: pointer to the expected TID (in chip) to update
1853 * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
1854 * for expected
1855 * @pa: physical address of in memory buffer; tidinvalid if freeing
1856 *
1857 * This exists as a separate routine to allow for special locking etc.
1858 * It's used for both the full cleanup on exit, as well as the normal
1859 * setup and teardown.
1860 */
1861static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
1862 u32 type, unsigned long pa)
1863{
1864 u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
1865 unsigned long flags;
1866 int tidx;
1867 spinlock_t *tidlockp; /* select appropriate spinlock */
1868
1869 if (!dd->kregbase)
1870 return;
1871
1872 if (pa != dd->tidinvalid) {
1873 if (pa & ((1U << 11) - 1)) {
1874 qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
1875 pa);
1876 return;
1877 }
1878 pa >>= 11;
1879 if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
1880 qib_dev_err(dd, "Physical page address 0x%lx "
1881 "larger than supported\n", pa);
1882 return;
1883 }
1884
1885 if (type == RCVHQ_RCV_TYPE_EAGER)
1886 pa |= dd->tidtemplate;
1887 else /* for now, always full 4KB page */
1888 pa |= 2 << 29;
1889 }
1890
1891 /*
1892 * Avoid chip issue by writing the scratch register
1893 * before and after the TID, and with an io write barrier.
1894 * We use a spinlock around the writes, so they can't intermix
1895 * with other TID (eager or expected) writes (the chip problem
1896 * is triggered by back to back TID writes). Unfortunately, this
1897 * call can be done from interrupt level for the ctxt 0 eager TIDs,
1898 * so we have to use irqsave locks.
1899 */
1900 /*
1901 * Assumes tidptr always > egrtidbase
1902 * if type == RCVHQ_RCV_TYPE_EAGER.
1903 */
1904 tidx = tidptr - dd->egrtidbase;
1905
1906 tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->rcvhdrcnt)
1907 ? &dd->cspec->kernel_tid_lock : &dd->cspec->user_tid_lock;
1908 spin_lock_irqsave(tidlockp, flags);
1909 qib_write_kreg(dd, kr_scratch, 0xfeeddeaf);
1910 writel(pa, tidp32);
1911 qib_write_kreg(dd, kr_scratch, 0xdeadbeef);
1912 mmiowb();
1913 spin_unlock_irqrestore(tidlockp, flags);
1914}
1915
1916/**
1917 * qib_6120_put_tid_2 - write a TID in chip, Revision 2 or higher
1918 * @dd: the qlogic_ib device
1919 * @tidptr: pointer to the expected TID (in chip) to update
1920 * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
1921 * for expected
1922 * @pa: physical address of in memory buffer; tidinvalid if freeing
1923 *
1924 * This exists as a separate routine to allow for selection of the
1925 * appropriate "flavor". The static calls in cleanup just use the
1926 * revision-agnostic form, as they are not performance critical.
1927 */
1928static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
1929 u32 type, unsigned long pa)
1930{
1931 u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
1932 u32 tidx;
1933
1934 if (!dd->kregbase)
1935 return;
1936
1937 if (pa != dd->tidinvalid) {
1938 if (pa & ((1U << 11) - 1)) {
1939 qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
1940 pa);
1941 return;
1942 }
1943 pa >>= 11;
1944 if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
1945 qib_dev_err(dd, "Physical page address 0x%lx "
1946 "larger than supported\n", pa);
1947 return;
1948 }
1949
1950 if (type == RCVHQ_RCV_TYPE_EAGER)
1951 pa |= dd->tidtemplate;
1952 else /* for now, always full 4KB page */
1953 pa |= 2 << 29;
1954 }
1955 tidx = tidptr - dd->egrtidbase;
1956 writel(pa, tidp32);
1957 mmiowb();
1958}
1959
1960
1961/**
1962 * qib_6120_clear_tids - clear all TID entries for a context, expected and eager
1963 * @dd: the qlogic_ib device
1964 * @ctxt: the context
1965 *
1966 * clear all TID entries for a context, expected and eager.
1967 * Used from qib_close(). On this chip, TIDs are only 32 bits,
1968 * not 64, but they are still on 64 bit boundaries, so tidbase
1969 * is declared as u64 * for the pointer math, even though we write 32 bits
1970 */
1971static void qib_6120_clear_tids(struct qib_devdata *dd,
1972 struct qib_ctxtdata *rcd)
1973{
1974 u64 __iomem *tidbase;
1975 unsigned long tidinv;
1976 u32 ctxt;
1977 int i;
1978
1979 if (!dd->kregbase || !rcd)
1980 return;
1981
1982 ctxt = rcd->ctxt;
1983
1984 tidinv = dd->tidinvalid;
1985 tidbase = (u64 __iomem *)
1986 ((char __iomem *)(dd->kregbase) +
1987 dd->rcvtidbase +
1988 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
1989
1990 for (i = 0; i < dd->rcvtidcnt; i++)
1991 /* use func pointer because could be one of two funcs */
1992 dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
1993 tidinv);
1994
1995 tidbase = (u64 __iomem *)
1996 ((char __iomem *)(dd->kregbase) +
1997 dd->rcvegrbase +
1998 rcd->rcvegr_tid_base * sizeof(*tidbase));
1999
2000 for (i = 0; i < rcd->rcvegrcnt; i++)
2001 /* use func pointer because could be one of two funcs */
2002 dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
2003 tidinv);
2004}
2005
2006/**
2007 * qib_6120_tidtemplate - setup constants for TID updates
2008 * @dd: the qlogic_ib device
2009 *
2010 * We setup stuff that we use a lot, to avoid calculating each time
2011 */
2012static void qib_6120_tidtemplate(struct qib_devdata *dd)
2013{
2014 u32 egrsize = dd->rcvegrbufsize;
2015
2016 /*
2017 * For now, we always allocate 4KB buffers (at init) so we can
2018 * receive max size packets. We may want a module parameter to
2019 * specify 2KB or 4KB and/or make be per ctxt instead of per device
2020 * for those who want to reduce memory footprint. Note that the
2021 * rcvhdrentsize size must be large enough to hold the largest
2022 * IB header (currently 96 bytes) that we expect to handle (plus of
2023 * course the 2 dwords of RHF).
2024 */
2025 if (egrsize == 2048)
2026 dd->tidtemplate = 1U << 29;
2027 else if (egrsize == 4096)
2028 dd->tidtemplate = 2U << 29;
2029 dd->tidinvalid = 0;
2030}
2031
2032int __attribute__((weak)) qib_unordered_wc(void)
2033{
2034 return 0;
2035}
2036
2037/**
2038 * qib_6120_get_base_info - set chip-specific flags for user code
2039 * @rcd: the qlogic_ib ctxt
2040 * @kbase: qib_base_info pointer
2041 *
2042 * We set the PCIE flag because the lower bandwidth on PCIe vs
2043 * HyperTransport can affect some user packet algorithms.
2044 */
2045static int qib_6120_get_base_info(struct qib_ctxtdata *rcd,
2046 struct qib_base_info *kinfo)
2047{
2048 if (qib_unordered_wc())
2049 kinfo->spi_runtime_flags |= QIB_RUNTIME_FORCE_WC_ORDER;
2050
2051 kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
2052 QIB_RUNTIME_FORCE_PIOAVAIL | QIB_RUNTIME_PIO_REGSWAPPED;
2053 return 0;
2054}
2055
2056
2057static struct qib_message_header *
2058qib_6120_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
2059{
2060 return (struct qib_message_header *)
2061 &rhf_addr[sizeof(u64) / sizeof(u32)];
2062}
2063
2064static void qib_6120_config_ctxts(struct qib_devdata *dd)
2065{
2066 dd->ctxtcnt = qib_read_kreg32(dd, kr_portcnt);
2067 if (qib_n_krcv_queues > 1) {
2068 dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
2069 if (dd->first_user_ctxt > dd->ctxtcnt)
2070 dd->first_user_ctxt = dd->ctxtcnt;
2071 dd->qpn_mask = dd->first_user_ctxt <= 2 ? 2 : 6;
2072 } else
2073 dd->first_user_ctxt = dd->num_pports;
2074 dd->n_krcv_queues = dd->first_user_ctxt;
2075}
2076
2077static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
2078 u32 updegr, u32 egrhd)
2079{
2080 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
2081 if (updegr)
2082 qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
2083}
2084
2085static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
2086{
2087 u32 head, tail;
2088
2089 head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
2090 if (rcd->rcvhdrtail_kvaddr)
2091 tail = qib_get_rcvhdrtail(rcd);
2092 else
2093 tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
2094 return head == tail;
2095}
2096
2097/*
2098 * Used when we close any ctxt, for DMA already in flight
2099 * at close. Can't be done until we know hdrq size, so not
2100 * early in chip init.
2101 */
2102static void alloc_dummy_hdrq(struct qib_devdata *dd)
2103{
2104 dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev,
2105 dd->rcd[0]->rcvhdrq_size,
2106 &dd->cspec->dummy_hdrq_phys,
2107 GFP_KERNEL | __GFP_COMP);
2108 if (!dd->cspec->dummy_hdrq) {
2109 qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n");
2110 /* fallback to just 0'ing */
2111 dd->cspec->dummy_hdrq_phys = 0UL;
2112 }
2113}
2114
2115/*
2116 * Modify the RCVCTRL register in chip-specific way. This
2117 * is a function because bit positions and (future) register
2118 * location is chip-specific, but the needed operations are
2119 * generic. <op> is a bit-mask because we often want to
2120 * do multiple modifications.
2121 */
2122static void rcvctrl_6120_mod(struct qib_pportdata *ppd, unsigned int op,
2123 int ctxt)
2124{
2125 struct qib_devdata *dd = ppd->dd;
2126 u64 mask, val;
2127 unsigned long flags;
2128
2129 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2130
2131 if (op & QIB_RCVCTRL_TAILUPD_ENB)
2132 dd->rcvctrl |= (1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
2133 if (op & QIB_RCVCTRL_TAILUPD_DIS)
2134 dd->rcvctrl &= ~(1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
2135 if (op & QIB_RCVCTRL_PKEY_ENB)
2136 dd->rcvctrl &= ~(1ULL << IBA6120_R_PKEY_DIS_SHIFT);
2137 if (op & QIB_RCVCTRL_PKEY_DIS)
2138 dd->rcvctrl |= (1ULL << IBA6120_R_PKEY_DIS_SHIFT);
2139 if (ctxt < 0)
2140 mask = (1ULL << dd->ctxtcnt) - 1;
2141 else
2142 mask = (1ULL << ctxt);
2143 if (op & QIB_RCVCTRL_CTXT_ENB) {
2144 /* always done for specific ctxt */
2145 dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
2146 if (!(dd->flags & QIB_NODMA_RTAIL))
2147 dd->rcvctrl |= 1ULL << QLOGIC_IB_R_TAILUPD_SHIFT;
2148 /* Write these registers before the context is enabled. */
2149 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
2150 dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
2151 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
2152 dd->rcd[ctxt]->rcvhdrq_phys);
2153
2154 if (ctxt == 0 && !dd->cspec->dummy_hdrq)
2155 alloc_dummy_hdrq(dd);
2156 }
2157 if (op & QIB_RCVCTRL_CTXT_DIS)
2158 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
2159 if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
2160 dd->rcvctrl |= (mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
2161 if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
2162 dd->rcvctrl &= ~(mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
2163 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
2164 if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
2165 /* arm rcv interrupt */
2166 val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
2167 dd->rhdrhead_intr_off;
2168 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
2169 }
2170 if (op & QIB_RCVCTRL_CTXT_ENB) {
2171 /*
2172 * Init the context registers also; if we were
2173 * disabled, tail and head should both be zero
2174 * already from the enable, but since we don't
2175 * know, we have to do it explictly.
2176 */
2177 val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
2178 qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
2179
2180 val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
2181 dd->rcd[ctxt]->head = val;
2182 /* If kctxt, interrupt on next receive. */
2183 if (ctxt < dd->first_user_ctxt)
2184 val |= dd->rhdrhead_intr_off;
2185 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
2186 }
2187 if (op & QIB_RCVCTRL_CTXT_DIS) {
2188 /*
2189 * Be paranoid, and never write 0's to these, just use an
2190 * unused page. Of course,
2191 * rcvhdraddr points to a large chunk of memory, so this
2192 * could still trash things, but at least it won't trash
2193 * page 0, and by disabling the ctxt, it should stop "soon",
2194 * even if a packet or two is in already in flight after we
2195 * disabled the ctxt. Only 6120 has this issue.
2196 */
2197 if (ctxt >= 0) {
2198 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
2199 dd->cspec->dummy_hdrq_phys);
2200 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
2201 dd->cspec->dummy_hdrq_phys);
2202 } else {
2203 unsigned i;
2204
2205 for (i = 0; i < dd->cfgctxts; i++) {
2206 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
2207 i, dd->cspec->dummy_hdrq_phys);
2208 qib_write_kreg_ctxt(dd, kr_rcvhdraddr,
2209 i, dd->cspec->dummy_hdrq_phys);
2210 }
2211 }
2212 }
2213 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2214}
2215
2216/*
2217 * Modify the SENDCTRL register in chip-specific way. This
2218 * is a function there may be multiple such registers with
2219 * slightly different layouts. Only operations actually used
2220 * are implemented yet.
2221 * Chip requires no back-back sendctrl writes, so write
2222 * scratch register after writing sendctrl
2223 */
2224static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op)
2225{
2226 struct qib_devdata *dd = ppd->dd;
2227 u64 tmp_dd_sendctrl;
2228 unsigned long flags;
2229
2230 spin_lock_irqsave(&dd->sendctrl_lock, flags);
2231
2232 /* First the ones that are "sticky", saved in shadow */
2233 if (op & QIB_SENDCTRL_CLEAR)
2234 dd->sendctrl = 0;
2235 if (op & QIB_SENDCTRL_SEND_DIS)
2236 dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOEnable);
2237 else if (op & QIB_SENDCTRL_SEND_ENB)
2238 dd->sendctrl |= SYM_MASK(SendCtrl, PIOEnable);
2239 if (op & QIB_SENDCTRL_AVAIL_DIS)
2240 dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
2241 else if (op & QIB_SENDCTRL_AVAIL_ENB)
2242 dd->sendctrl |= SYM_MASK(SendCtrl, PIOBufAvailUpd);
2243
2244 if (op & QIB_SENDCTRL_DISARM_ALL) {
2245 u32 i, last;
2246
2247 tmp_dd_sendctrl = dd->sendctrl;
2248 /*
2249 * disarm any that are not yet launched, disabling sends
2250 * and updates until done.
2251 */
2252 last = dd->piobcnt2k + dd->piobcnt4k;
2253 tmp_dd_sendctrl &=
2254 ~(SYM_MASK(SendCtrl, PIOEnable) |
2255 SYM_MASK(SendCtrl, PIOBufAvailUpd));
2256 for (i = 0; i < last; i++) {
2257 qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl |
2258 SYM_MASK(SendCtrl, Disarm) | i);
2259 qib_write_kreg(dd, kr_scratch, 0);
2260 }
2261 }
2262
2263 tmp_dd_sendctrl = dd->sendctrl;
2264
2265 if (op & QIB_SENDCTRL_FLUSH)
2266 tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
2267 if (op & QIB_SENDCTRL_DISARM)
2268 tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
2269 ((op & QIB_6120_SendCtrl_DisarmPIOBuf_RMASK) <<
2270 SYM_LSB(SendCtrl, DisarmPIOBuf));
2271 if (op & QIB_SENDCTRL_AVAIL_BLIP)
2272 tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
2273
2274 qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
2275 qib_write_kreg(dd, kr_scratch, 0);
2276
2277 if (op & QIB_SENDCTRL_AVAIL_BLIP) {
2278 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2279 qib_write_kreg(dd, kr_scratch, 0);
2280 }
2281
2282 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2283
2284 if (op & QIB_SENDCTRL_FLUSH) {
2285 u32 v;
2286 /*
2287 * ensure writes have hit chip, then do a few
2288 * more reads, to allow DMA of pioavail registers
2289 * to occur, so in-memory copy is in sync with
2290 * the chip. Not always safe to sleep.
2291 */
2292 v = qib_read_kreg32(dd, kr_scratch);
2293 qib_write_kreg(dd, kr_scratch, v);
2294 v = qib_read_kreg32(dd, kr_scratch);
2295 qib_write_kreg(dd, kr_scratch, v);
2296 qib_read_kreg32(dd, kr_scratch);
2297 }
2298}
2299
2300/**
2301 * qib_portcntr_6120 - read a per-port counter
2302 * @dd: the qlogic_ib device
2303 * @creg: the counter to snapshot
2304 */
2305static u64 qib_portcntr_6120(struct qib_pportdata *ppd, u32 reg)
2306{
2307 u64 ret = 0ULL;
2308 struct qib_devdata *dd = ppd->dd;
2309 u16 creg;
2310 /* 0xffff for unimplemented or synthesized counters */
2311 static const u16 xlator[] = {
2312 [QIBPORTCNTR_PKTSEND] = cr_pktsend,
2313 [QIBPORTCNTR_WORDSEND] = cr_wordsend,
2314 [QIBPORTCNTR_PSXMITDATA] = 0xffff,
2315 [QIBPORTCNTR_PSXMITPKTS] = 0xffff,
2316 [QIBPORTCNTR_PSXMITWAIT] = 0xffff,
2317 [QIBPORTCNTR_SENDSTALL] = cr_sendstall,
2318 [QIBPORTCNTR_PKTRCV] = cr_pktrcv,
2319 [QIBPORTCNTR_PSRCVDATA] = 0xffff,
2320 [QIBPORTCNTR_PSRCVPKTS] = 0xffff,
2321 [QIBPORTCNTR_RCVEBP] = cr_rcvebp,
2322 [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
2323 [QIBPORTCNTR_WORDRCV] = cr_wordrcv,
2324 [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
2325 [QIBPORTCNTR_RXLOCALPHYERR] = 0xffff,
2326 [QIBPORTCNTR_RXVLERR] = 0xffff,
2327 [QIBPORTCNTR_ERRICRC] = cr_erricrc,
2328 [QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
2329 [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
2330 [QIBPORTCNTR_BADFORMAT] = cr_badformat,
2331 [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
2332 [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
2333 [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
2334 [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
2335 [QIBPORTCNTR_EXCESSBUFOVFL] = 0xffff,
2336 [QIBPORTCNTR_ERRLINK] = cr_errlink,
2337 [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
2338 [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
2339 [QIBPORTCNTR_LLI] = 0xffff,
2340 [QIBPORTCNTR_PSINTERVAL] = 0xffff,
2341 [QIBPORTCNTR_PSSTART] = 0xffff,
2342 [QIBPORTCNTR_PSSTAT] = 0xffff,
2343 [QIBPORTCNTR_VL15PKTDROP] = 0xffff,
2344 [QIBPORTCNTR_ERRPKEY] = cr_errpkey,
2345 [QIBPORTCNTR_KHDROVFL] = 0xffff,
2346 };
2347
2348 if (reg >= ARRAY_SIZE(xlator)) {
2349 qib_devinfo(ppd->dd->pcidev,
2350 "Unimplemented portcounter %u\n", reg);
2351 goto done;
2352 }
2353 creg = xlator[reg];
2354
2355 /* handle counters requests not implemented as chip counters */
2356 if (reg == QIBPORTCNTR_LLI)
2357 ret = dd->cspec->lli_errs;
2358 else if (reg == QIBPORTCNTR_EXCESSBUFOVFL)
2359 ret = dd->cspec->overrun_thresh_errs;
2360 else if (reg == QIBPORTCNTR_KHDROVFL) {
2361 int i;
2362
2363 /* sum over all kernel contexts */
2364 for (i = 0; i < dd->first_user_ctxt; i++)
2365 ret += read_6120_creg32(dd, cr_portovfl + i);
2366 } else if (reg == QIBPORTCNTR_PSSTAT)
2367 ret = dd->cspec->pma_sample_status;
2368 if (creg == 0xffff)
2369 goto done;
2370
2371 /*
2372 * only fast incrementing counters are 64bit; use 32 bit reads to
2373 * avoid two independent reads when on opteron
2374 */
2375 if (creg == cr_wordsend || creg == cr_wordrcv ||
2376 creg == cr_pktsend || creg == cr_pktrcv)
2377 ret = read_6120_creg(dd, creg);
2378 else
2379 ret = read_6120_creg32(dd, creg);
2380 if (creg == cr_ibsymbolerr) {
2381 if (dd->cspec->ibdeltainprog)
2382 ret -= ret - dd->cspec->ibsymsnap;
2383 ret -= dd->cspec->ibsymdelta;
2384 } else if (creg == cr_iblinkerrrecov) {
2385 if (dd->cspec->ibdeltainprog)
2386 ret -= ret - dd->cspec->iblnkerrsnap;
2387 ret -= dd->cspec->iblnkerrdelta;
2388 }
2389 if (reg == QIBPORTCNTR_RXDROPPKT) /* add special cased count */
2390 ret += dd->cspec->rxfc_unsupvl_errs;
2391
2392done:
2393 return ret;
2394}
2395
2396/*
2397 * Device counter names (not port-specific), one line per stat,
2398 * single string. Used by utilities like ipathstats to print the stats
2399 * in a way which works for different versions of drivers, without changing
2400 * the utility. Names need to be 12 chars or less (w/o newline), for proper
2401 * display by utility.
2402 * Non-error counters are first.
2403 * Start of "error" conters is indicated by a leading "E " on the first
2404 * "error" counter, and doesn't count in label length.
2405 * The EgrOvfl list needs to be last so we truncate them at the configured
2406 * context count for the device.
2407 * cntr6120indices contains the corresponding register indices.
2408 */
2409static const char cntr6120names[] =
2410 "Interrupts\n"
2411 "HostBusStall\n"
2412 "E RxTIDFull\n"
2413 "RxTIDInvalid\n"
2414 "Ctxt0EgrOvfl\n"
2415 "Ctxt1EgrOvfl\n"
2416 "Ctxt2EgrOvfl\n"
2417 "Ctxt3EgrOvfl\n"
2418 "Ctxt4EgrOvfl\n";
2419
2420static const size_t cntr6120indices[] = {
2421 cr_lbint,
2422 cr_lbflowstall,
2423 cr_errtidfull,
2424 cr_errtidvalid,
2425 cr_portovfl + 0,
2426 cr_portovfl + 1,
2427 cr_portovfl + 2,
2428 cr_portovfl + 3,
2429 cr_portovfl + 4,
2430};
2431
2432/*
2433 * same as cntr6120names and cntr6120indices, but for port-specific counters.
2434 * portcntr6120indices is somewhat complicated by some registers needing
2435 * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
2436 */
2437static const char portcntr6120names[] =
2438 "TxPkt\n"
2439 "TxFlowPkt\n"
2440 "TxWords\n"
2441 "RxPkt\n"
2442 "RxFlowPkt\n"
2443 "RxWords\n"
2444 "TxFlowStall\n"
2445 "E IBStatusChng\n"
2446 "IBLinkDown\n"
2447 "IBLnkRecov\n"
2448 "IBRxLinkErr\n"
2449 "IBSymbolErr\n"
2450 "RxLLIErr\n"
2451 "RxBadFormat\n"
2452 "RxBadLen\n"
2453 "RxBufOvrfl\n"
2454 "RxEBP\n"
2455 "RxFlowCtlErr\n"
2456 "RxICRCerr\n"
2457 "RxLPCRCerr\n"
2458 "RxVCRCerr\n"
2459 "RxInvalLen\n"
2460 "RxInvalPKey\n"
2461 "RxPktDropped\n"
2462 "TxBadLength\n"
2463 "TxDropped\n"
2464 "TxInvalLen\n"
2465 "TxUnderrun\n"
2466 "TxUnsupVL\n"
2467 ;
2468
2469#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
2470static const size_t portcntr6120indices[] = {
2471 QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
2472 cr_pktsendflow,
2473 QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
2474 QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
2475 cr_pktrcvflowctrl,
2476 QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
2477 QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
2478 cr_ibstatuschange,
2479 QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
2480 QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
2481 QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
2482 QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
2483 QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
2484 QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
2485 QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
2486 QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
2487 QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
2488 cr_rcvflowctrl_err,
2489 QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
2490 QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
2491 QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
2492 QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
2493 QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
2494 QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
2495 cr_invalidslen,
2496 cr_senddropped,
2497 cr_errslen,
2498 cr_sendunderrun,
2499 cr_txunsupvl,
2500};
2501
2502/* do all the setup to make the counter reads efficient later */
2503static void init_6120_cntrnames(struct qib_devdata *dd)
2504{
2505 int i, j = 0;
2506 char *s;
2507
2508 for (i = 0, s = (char *)cntr6120names; s && j <= dd->cfgctxts;
2509 i++) {
2510 /* we always have at least one counter before the egrovfl */
2511 if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
2512 j = 1;
2513 s = strchr(s + 1, '\n');
2514 if (s && j)
2515 j++;
2516 }
2517 dd->cspec->ncntrs = i;
2518 if (!s)
2519 /* full list; size is without terminating null */
2520 dd->cspec->cntrnamelen = sizeof(cntr6120names) - 1;
2521 else
2522 dd->cspec->cntrnamelen = 1 + s - cntr6120names;
2523 dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
2524 * sizeof(u64), GFP_KERNEL);
2525 if (!dd->cspec->cntrs)
2526 qib_dev_err(dd, "Failed allocation for counters\n");
2527
2528 for (i = 0, s = (char *)portcntr6120names; s; i++)
2529 s = strchr(s + 1, '\n');
2530 dd->cspec->nportcntrs = i - 1;
2531 dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1;
2532 dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
2533 * sizeof(u64), GFP_KERNEL);
2534 if (!dd->cspec->portcntrs)
2535 qib_dev_err(dd, "Failed allocation for portcounters\n");
2536}
2537
2538static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
2539 u64 **cntrp)
2540{
2541 u32 ret;
2542
2543 if (namep) {
2544 ret = dd->cspec->cntrnamelen;
2545 if (pos >= ret)
2546 ret = 0; /* final read after getting everything */
2547 else
2548 *namep = (char *)cntr6120names;
2549 } else {
2550 u64 *cntr = dd->cspec->cntrs;
2551 int i;
2552
2553 ret = dd->cspec->ncntrs * sizeof(u64);
2554 if (!cntr || pos >= ret) {
2555 /* everything read, or couldn't get memory */
2556 ret = 0;
2557 goto done;
2558 }
2559 if (pos >= ret) {
2560 ret = 0; /* final read after getting everything */
2561 goto done;
2562 }
2563 *cntrp = cntr;
2564 for (i = 0; i < dd->cspec->ncntrs; i++)
2565 *cntr++ = read_6120_creg32(dd, cntr6120indices[i]);
2566 }
2567done:
2568 return ret;
2569}
2570
2571static u32 qib_read_6120portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
2572 char **namep, u64 **cntrp)
2573{
2574 u32 ret;
2575
2576 if (namep) {
2577 ret = dd->cspec->portcntrnamelen;
2578 if (pos >= ret)
2579 ret = 0; /* final read after getting everything */
2580 else
2581 *namep = (char *)portcntr6120names;
2582 } else {
2583 u64 *cntr = dd->cspec->portcntrs;
2584 struct qib_pportdata *ppd = &dd->pport[port];
2585 int i;
2586
2587 ret = dd->cspec->nportcntrs * sizeof(u64);
2588 if (!cntr || pos >= ret) {
2589 /* everything read, or couldn't get memory */
2590 ret = 0;
2591 goto done;
2592 }
2593 *cntrp = cntr;
2594 for (i = 0; i < dd->cspec->nportcntrs; i++) {
2595 if (portcntr6120indices[i] & _PORT_VIRT_FLAG)
2596 *cntr++ = qib_portcntr_6120(ppd,
2597 portcntr6120indices[i] &
2598 ~_PORT_VIRT_FLAG);
2599 else
2600 *cntr++ = read_6120_creg32(dd,
2601 portcntr6120indices[i]);
2602 }
2603 }
2604done:
2605 return ret;
2606}
2607
2608static void qib_chk_6120_errormask(struct qib_devdata *dd)
2609{
2610 static u32 fixed;
2611 u32 ctrl;
2612 unsigned long errormask;
2613 unsigned long hwerrs;
2614
2615 if (!dd->cspec->errormask || !(dd->flags & QIB_INITTED))
2616 return;
2617
2618 errormask = qib_read_kreg64(dd, kr_errmask);
2619
2620 if (errormask == dd->cspec->errormask)
2621 return;
2622 fixed++;
2623
2624 hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2625 ctrl = qib_read_kreg32(dd, kr_control);
2626
2627 qib_write_kreg(dd, kr_errmask,
2628 dd->cspec->errormask);
2629
2630 if ((hwerrs & dd->cspec->hwerrmask) ||
2631 (ctrl & QLOGIC_IB_C_FREEZEMODE)) {
2632 qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2633 qib_write_kreg(dd, kr_errclear, 0ULL);
2634 /* force re-interrupt of pending events, just in case */
2635 qib_write_kreg(dd, kr_intclear, 0ULL);
2636 qib_devinfo(dd->pcidev,
2637 "errormask fixed(%u) %lx->%lx, ctrl %x hwerr %lx\n",
2638 fixed, errormask, (unsigned long)dd->cspec->errormask,
2639 ctrl, hwerrs);
2640 }
2641}
2642
2643/**
2644 * qib_get_faststats - get word counters from chip before they overflow
2645 * @opaque - contains a pointer to the qlogic_ib device qib_devdata
2646 *
2647 * This needs more work; in particular, decision on whether we really
2648 * need traffic_wds done the way it is
2649 * called from add_timer
2650 */
2651static void qib_get_6120_faststats(unsigned long opaque)
2652{
2653 struct qib_devdata *dd = (struct qib_devdata *) opaque;
2654 struct qib_pportdata *ppd = dd->pport;
2655 unsigned long flags;
2656 u64 traffic_wds;
2657
2658 /*
2659 * don't access the chip while running diags, or memory diags can
2660 * fail
2661 */
2662 if (!(dd->flags & QIB_INITTED) || dd->diag_client)
2663 /* but re-arm the timer, for diags case; won't hurt other */
2664 goto done;
2665
2666 /*
2667 * We now try to maintain an activity timer, based on traffic
2668 * exceeding a threshold, so we need to check the word-counts
2669 * even if they are 64-bit.
2670 */
2671 traffic_wds = qib_portcntr_6120(ppd, cr_wordsend) +
2672 qib_portcntr_6120(ppd, cr_wordrcv);
2673 spin_lock_irqsave(&dd->eep_st_lock, flags);
2674 traffic_wds -= dd->traffic_wds;
2675 dd->traffic_wds += traffic_wds;
2676 if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
2677 atomic_add(5, &dd->active_time); /* S/B #define */
2678 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
2679
2680 qib_chk_6120_errormask(dd);
2681done:
2682 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
2683}
2684
2685/* no interrupt fallback for these chips */
2686static int qib_6120_nointr_fallback(struct qib_devdata *dd)
2687{
2688 return 0;
2689}
2690
2691/*
2692 * reset the XGXS (between serdes and IBC). Slightly less intrusive
2693 * than resetting the IBC or external link state, and useful in some
2694 * cases to cause some retraining. To do this right, we reset IBC
2695 * as well.
2696 */
2697static void qib_6120_xgxs_reset(struct qib_pportdata *ppd)
2698{
2699 u64 val, prev_val;
2700 struct qib_devdata *dd = ppd->dd;
2701
2702 prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
2703 val = prev_val | QLOGIC_IB_XGXS_RESET;
2704 prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
2705 qib_write_kreg(dd, kr_control,
2706 dd->control & ~QLOGIC_IB_C_LINKENABLE);
2707 qib_write_kreg(dd, kr_xgxs_cfg, val);
2708 qib_read_kreg32(dd, kr_scratch);
2709 qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
2710 qib_write_kreg(dd, kr_control, dd->control);
2711}
2712
2713static int qib_6120_get_ib_cfg(struct qib_pportdata *ppd, int which)
2714{
2715 int ret;
2716
2717 switch (which) {
2718 case QIB_IB_CFG_LWID:
2719 ret = ppd->link_width_active;
2720 break;
2721
2722 case QIB_IB_CFG_SPD:
2723 ret = ppd->link_speed_active;
2724 break;
2725
2726 case QIB_IB_CFG_LWID_ENB:
2727 ret = ppd->link_width_enabled;
2728 break;
2729
2730 case QIB_IB_CFG_SPD_ENB:
2731 ret = ppd->link_speed_enabled;
2732 break;
2733
2734 case QIB_IB_CFG_OP_VLS:
2735 ret = ppd->vls_operational;
2736 break;
2737
2738 case QIB_IB_CFG_VL_HIGH_CAP:
2739 ret = 0;
2740 break;
2741
2742 case QIB_IB_CFG_VL_LOW_CAP:
2743 ret = 0;
2744 break;
2745
2746 case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
2747 ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
2748 OverrunThreshold);
2749 break;
2750
2751 case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
2752 ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
2753 PhyerrThreshold);
2754 break;
2755
2756 case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
2757 /* will only take effect when the link state changes */
2758 ret = (ppd->dd->cspec->ibcctrl &
2759 SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
2760 IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
2761 break;
2762
2763 case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
2764 ret = 0; /* no heartbeat on this chip */
2765 break;
2766
2767 case QIB_IB_CFG_PMA_TICKS:
2768 ret = 250; /* 1 usec. */
2769 break;
2770
2771 default:
2772 ret = -EINVAL;
2773 break;
2774 }
2775 return ret;
2776}
2777
2778/*
2779 * We assume range checking is already done, if needed.
2780 */
2781static int qib_6120_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
2782{
2783 struct qib_devdata *dd = ppd->dd;
2784 int ret = 0;
2785 u64 val64;
2786 u16 lcmd, licmd;
2787
2788 switch (which) {
2789 case QIB_IB_CFG_LWID_ENB:
2790 ppd->link_width_enabled = val;
2791 break;
2792
2793 case QIB_IB_CFG_SPD_ENB:
2794 ppd->link_speed_enabled = val;
2795 break;
2796
2797 case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
2798 val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
2799 OverrunThreshold);
2800 if (val64 != val) {
2801 dd->cspec->ibcctrl &=
2802 ~SYM_MASK(IBCCtrl, OverrunThreshold);
2803 dd->cspec->ibcctrl |= (u64) val <<
2804 SYM_LSB(IBCCtrl, OverrunThreshold);
2805 qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
2806 qib_write_kreg(dd, kr_scratch, 0);
2807 }
2808 break;
2809
2810 case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
2811 val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
2812 PhyerrThreshold);
2813 if (val64 != val) {
2814 dd->cspec->ibcctrl &=
2815 ~SYM_MASK(IBCCtrl, PhyerrThreshold);
2816 dd->cspec->ibcctrl |= (u64) val <<
2817 SYM_LSB(IBCCtrl, PhyerrThreshold);
2818 qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
2819 qib_write_kreg(dd, kr_scratch, 0);
2820 }
2821 break;
2822
2823 case QIB_IB_CFG_PKEYS: /* update pkeys */
2824 val64 = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
2825 ((u64) ppd->pkeys[2] << 32) |
2826 ((u64) ppd->pkeys[3] << 48);
2827 qib_write_kreg(dd, kr_partitionkey, val64);
2828 break;
2829
2830 case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
2831 /* will only take effect when the link state changes */
2832 if (val == IB_LINKINITCMD_POLL)
2833 dd->cspec->ibcctrl &=
2834 ~SYM_MASK(IBCCtrl, LinkDownDefaultState);
2835 else /* SLEEP */
2836 dd->cspec->ibcctrl |=
2837 SYM_MASK(IBCCtrl, LinkDownDefaultState);
2838 qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
2839 qib_write_kreg(dd, kr_scratch, 0);
2840 break;
2841
2842 case QIB_IB_CFG_MTU: /* update the MTU in IBC */
2843 /*
2844 * Update our housekeeping variables, and set IBC max
2845 * size, same as init code; max IBC is max we allow in
2846 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
2847 * Set even if it's unchanged, print debug message only
2848 * on changes.
2849 */
2850 val = (ppd->ibmaxlen >> 2) + 1;
2851 dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
2852 dd->cspec->ibcctrl |= (u64)val <<
2853 SYM_LSB(IBCCtrl, MaxPktLen);
2854 qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
2855 qib_write_kreg(dd, kr_scratch, 0);
2856 break;
2857
2858 case QIB_IB_CFG_LSTATE: /* set the IB link state */
2859 switch (val & 0xffff0000) {
2860 case IB_LINKCMD_DOWN:
2861 lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
2862 if (!dd->cspec->ibdeltainprog) {
2863 dd->cspec->ibdeltainprog = 1;
2864 dd->cspec->ibsymsnap =
2865 read_6120_creg32(dd, cr_ibsymbolerr);
2866 dd->cspec->iblnkerrsnap =
2867 read_6120_creg32(dd, cr_iblinkerrrecov);
2868 }
2869 break;
2870
2871 case IB_LINKCMD_ARMED:
2872 lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
2873 break;
2874
2875 case IB_LINKCMD_ACTIVE:
2876 lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
2877 break;
2878
2879 default:
2880 ret = -EINVAL;
2881 qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
2882 goto bail;
2883 }
2884 switch (val & 0xffff) {
2885 case IB_LINKINITCMD_NOP:
2886 licmd = 0;
2887 break;
2888
2889 case IB_LINKINITCMD_POLL:
2890 licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
2891 break;
2892
2893 case IB_LINKINITCMD_SLEEP:
2894 licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
2895 break;
2896
2897 case IB_LINKINITCMD_DISABLE:
2898 licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
2899 break;
2900
2901 default:
2902 ret = -EINVAL;
2903 qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
2904 val & 0xffff);
2905 goto bail;
2906 }
2907 qib_set_ib_6120_lstate(ppd, lcmd, licmd);
2908 goto bail;
2909
2910 case QIB_IB_CFG_HRTBT:
2911 ret = -EINVAL;
2912 break;
2913
2914 default:
2915 ret = -EINVAL;
2916 }
2917bail:
2918 return ret;
2919}
2920
2921static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
2922{
2923 int ret = 0;
2924 if (!strncmp(what, "ibc", 3)) {
2925 ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
2926 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
2927 ppd->dd->unit, ppd->port);
2928 } else if (!strncmp(what, "off", 3)) {
2929 ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
2930 qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
2931 "(normal)\n", ppd->dd->unit, ppd->port);
2932 } else
2933 ret = -EINVAL;
2934 if (!ret) {
2935 qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->dd->cspec->ibcctrl);
2936 qib_write_kreg(ppd->dd, kr_scratch, 0);
2937 }
2938 return ret;
2939}
2940
2941static void pma_6120_timer(unsigned long data)
2942{
2943 struct qib_pportdata *ppd = (struct qib_pportdata *)data;
2944 struct qib_chip_specific *cs = ppd->dd->cspec;
2945 struct qib_ibport *ibp = &ppd->ibport_data;
2946 unsigned long flags;
2947
2948 spin_lock_irqsave(&ibp->lock, flags);
2949 if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) {
2950 cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
2951 qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
2952 &cs->spkts, &cs->rpkts, &cs->xmit_wait);
2953 mod_timer(&cs->pma_timer,
2954 jiffies + usecs_to_jiffies(ibp->pma_sample_interval));
2955 } else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
2956 u64 ta, tb, tc, td, te;
2957
2958 cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
2959 qib_snapshot_counters(ppd, &ta, &tb, &tc, &td, &te);
2960
2961 cs->sword = ta - cs->sword;
2962 cs->rword = tb - cs->rword;
2963 cs->spkts = tc - cs->spkts;
2964 cs->rpkts = td - cs->rpkts;
2965 cs->xmit_wait = te - cs->xmit_wait;
2966 }
2967 spin_unlock_irqrestore(&ibp->lock, flags);
2968}
2969
2970/*
2971 * Note that the caller has the ibp->lock held.
2972 */
2973static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv,
2974 u32 start)
2975{
2976 struct qib_chip_specific *cs = ppd->dd->cspec;
2977
2978 if (start && intv) {
2979 cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
2980 mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(start));
2981 } else if (intv) {
2982 cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
2983 qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
2984 &cs->spkts, &cs->rpkts, &cs->xmit_wait);
2985 mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(intv));
2986 } else {
2987 cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
2988 cs->sword = 0;
2989 cs->rword = 0;
2990 cs->spkts = 0;
2991 cs->rpkts = 0;
2992 cs->xmit_wait = 0;
2993 }
2994}
2995
2996static u32 qib_6120_iblink_state(u64 ibcs)
2997{
2998 u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
2999
3000 switch (state) {
3001 case IB_6120_L_STATE_INIT:
3002 state = IB_PORT_INIT;
3003 break;
3004 case IB_6120_L_STATE_ARM:
3005 state = IB_PORT_ARMED;
3006 break;
3007 case IB_6120_L_STATE_ACTIVE:
3008 /* fall through */
3009 case IB_6120_L_STATE_ACT_DEFER:
3010 state = IB_PORT_ACTIVE;
3011 break;
3012 default: /* fall through */
3013 case IB_6120_L_STATE_DOWN:
3014 state = IB_PORT_DOWN;
3015 break;
3016 }
3017 return state;
3018}
3019
3020/* returns the IBTA port state, rather than the IBC link training state */
3021static u8 qib_6120_phys_portstate(u64 ibcs)
3022{
3023 u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
3024 return qib_6120_physportstate[state];
3025}
3026
3027static int qib_6120_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
3028{
3029 unsigned long flags;
3030
3031 spin_lock_irqsave(&ppd->lflags_lock, flags);
3032 ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
3033 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3034
3035 if (ibup) {
3036 if (ppd->dd->cspec->ibdeltainprog) {
3037 ppd->dd->cspec->ibdeltainprog = 0;
3038 ppd->dd->cspec->ibsymdelta +=
3039 read_6120_creg32(ppd->dd, cr_ibsymbolerr) -
3040 ppd->dd->cspec->ibsymsnap;
3041 ppd->dd->cspec->iblnkerrdelta +=
3042 read_6120_creg32(ppd->dd, cr_iblinkerrrecov) -
3043 ppd->dd->cspec->iblnkerrsnap;
3044 }
3045 qib_hol_init(ppd);
3046 } else {
3047 ppd->dd->cspec->lli_counter = 0;
3048 if (!ppd->dd->cspec->ibdeltainprog) {
3049 ppd->dd->cspec->ibdeltainprog = 1;
3050 ppd->dd->cspec->ibsymsnap =
3051 read_6120_creg32(ppd->dd, cr_ibsymbolerr);
3052 ppd->dd->cspec->iblnkerrsnap =
3053 read_6120_creg32(ppd->dd, cr_iblinkerrrecov);
3054 }
3055 qib_hol_down(ppd);
3056 }
3057
3058 qib_6120_setup_setextled(ppd, ibup);
3059
3060 return 0;
3061}
3062
3063/* Does read/modify/write to appropriate registers to
3064 * set output and direction bits selected by mask.
3065 * these are in their canonical postions (e.g. lsb of
3066 * dir will end up in D48 of extctrl on existing chips).
3067 * returns contents of GP Inputs.
3068 */
3069static int gpio_6120_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
3070{
3071 u64 read_val, new_out;
3072 unsigned long flags;
3073
3074 if (mask) {
3075 /* some bits being written, lock access to GPIO */
3076 dir &= mask;
3077 out &= mask;
3078 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
3079 dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
3080 dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
3081 new_out = (dd->cspec->gpio_out & ~mask) | out;
3082
3083 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
3084 qib_write_kreg(dd, kr_gpio_out, new_out);
3085 dd->cspec->gpio_out = new_out;
3086 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
3087 }
3088 /*
3089 * It is unlikely that a read at this time would get valid
3090 * data on a pin whose direction line was set in the same
3091 * call to this function. We include the read here because
3092 * that allows us to potentially combine a change on one pin with
3093 * a read on another, and because the old code did something like
3094 * this.
3095 */
3096 read_val = qib_read_kreg64(dd, kr_extstatus);
3097 return SYM_FIELD(read_val, EXTStatus, GPIOIn);
3098}
3099
3100/*
3101 * Read fundamental info we need to use the chip. These are
3102 * the registers that describe chip capabilities, and are
3103 * saved in shadow registers.
3104 */
3105static void get_6120_chip_params(struct qib_devdata *dd)
3106{
3107 u64 val;
3108 u32 piobufs;
3109 int mtu;
3110
3111 dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
3112
3113 dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
3114 dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
3115 dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
3116 dd->palign = qib_read_kreg32(dd, kr_palign);
3117 dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
3118 dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
3119
3120 dd->rcvhdrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3121
3122 val = qib_read_kreg64(dd, kr_sendpiosize);
3123 dd->piosize2k = val & ~0U;
3124 dd->piosize4k = val >> 32;
3125
3126 mtu = ib_mtu_enum_to_int(qib_ibmtu);
3127 if (mtu == -1)
3128 mtu = QIB_DEFAULT_MTU;
3129 dd->pport->ibmtu = (u32)mtu;
3130
3131 val = qib_read_kreg64(dd, kr_sendpiobufcnt);
3132 dd->piobcnt2k = val & ~0U;
3133 dd->piobcnt4k = val >> 32;
3134 /* these may be adjusted in init_chip_wc_pat() */
3135 dd->pio2kbase = (u32 __iomem *)
3136 (((char __iomem *)dd->kregbase) + dd->pio2k_bufbase);
3137 if (dd->piobcnt4k) {
3138 dd->pio4kbase = (u32 __iomem *)
3139 (((char __iomem *) dd->kregbase) +
3140 (dd->piobufbase >> 32));
3141 /*
3142 * 4K buffers take 2 pages; we use roundup just to be
3143 * paranoid; we calculate it once here, rather than on
3144 * ever buf allocate
3145 */
3146 dd->align4k = ALIGN(dd->piosize4k, dd->palign);
3147 }
3148
3149 piobufs = dd->piobcnt4k + dd->piobcnt2k;
3150
3151 dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
3152 (sizeof(u64) * BITS_PER_BYTE / 2);
3153}
3154
3155/*
3156 * The chip base addresses in cspec and cpspec have to be set
3157 * after possible init_chip_wc_pat(), rather than in
3158 * get_6120_chip_params(), so split out as separate function
3159 */
3160static void set_6120_baseaddrs(struct qib_devdata *dd)
3161{
3162 u32 cregbase;
3163 cregbase = qib_read_kreg32(dd, kr_counterregbase);
3164 dd->cspec->cregbase = (u64 __iomem *)
3165 ((char __iomem *) dd->kregbase + cregbase);
3166
3167 dd->egrtidbase = (u64 __iomem *)
3168 ((char __iomem *) dd->kregbase + dd->rcvegrbase);
3169}
3170
3171/*
3172 * Write the final few registers that depend on some of the
3173 * init setup. Done late in init, just before bringing up
3174 * the serdes.
3175 */
3176static int qib_late_6120_initreg(struct qib_devdata *dd)
3177{
3178 int ret = 0;
3179 u64 val;
3180
3181 qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
3182 qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
3183 qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
3184 qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
3185 val = qib_read_kreg64(dd, kr_sendpioavailaddr);
3186 if (val != dd->pioavailregs_phys) {
3187 qib_dev_err(dd, "Catastrophic software error, "
3188 "SendPIOAvailAddr written as %lx, "
3189 "read back as %llx\n",
3190 (unsigned long) dd->pioavailregs_phys,
3191 (unsigned long long) val);
3192 ret = -EINVAL;
3193 }
3194 return ret;
3195}
3196
3197static int init_6120_variables(struct qib_devdata *dd)
3198{
3199 int ret = 0;
3200 struct qib_pportdata *ppd;
3201 u32 sbufs;
3202
3203 ppd = (struct qib_pportdata *)(dd + 1);
3204 dd->pport = ppd;
3205 dd->num_pports = 1;
3206
3207 dd->cspec = (struct qib_chip_specific *)(ppd + dd->num_pports);
3208 ppd->cpspec = NULL; /* not used in this chip */
3209
3210 spin_lock_init(&dd->cspec->kernel_tid_lock);
3211 spin_lock_init(&dd->cspec->user_tid_lock);
3212 spin_lock_init(&dd->cspec->rcvmod_lock);
3213 spin_lock_init(&dd->cspec->gpio_lock);
3214
3215 /* we haven't yet set QIB_PRESENT, so use read directly */
3216 dd->revision = readq(&dd->kregbase[kr_revision]);
3217
3218 if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
3219 qib_dev_err(dd, "Revision register read failure, "
3220 "giving up initialization\n");
3221 ret = -ENODEV;
3222 goto bail;
3223 }
3224 dd->flags |= QIB_PRESENT; /* now register routines work */
3225
3226 dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
3227 ChipRevMajor);
3228 dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
3229 ChipRevMinor);
3230
3231 get_6120_chip_params(dd);
3232 pe_boardname(dd); /* fill in boardname */
3233
3234 /*
3235 * GPIO bits for TWSI data and clock,
3236 * used for serial EEPROM.
3237 */
3238 dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
3239 dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
3240 dd->twsi_eeprom_dev = QIB_TWSI_NO_DEV;
3241
3242 if (qib_unordered_wc())
3243 dd->flags |= QIB_PIO_FLUSH_WC;
3244
3245 /*
3246 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
3247 * 2 is Some Misc, 3 is reserved for future.
3248 */
3249 dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
3250
3251 /* Ignore errors in PIO/PBC on systems with unordered write-combining */
3252 if (qib_unordered_wc())
3253 dd->eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY;
3254
3255 dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
3256
3257 dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
3258
3259 qib_init_pportdata(ppd, dd, 0, 1);
3260 ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
3261 ppd->link_speed_supported = QIB_IB_SDR;
3262 ppd->link_width_enabled = IB_WIDTH_4X;
3263 ppd->link_speed_enabled = ppd->link_speed_supported;
3264 /* these can't change for this chip, so set once */
3265 ppd->link_width_active = ppd->link_width_enabled;
3266 ppd->link_speed_active = ppd->link_speed_enabled;
3267 ppd->vls_supported = IB_VL_VL0;
3268 ppd->vls_operational = ppd->vls_supported;
3269
3270 dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
3271 dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
3272 dd->rhf_offset = 0;
3273
3274 /* we always allocate at least 2048 bytes for eager buffers */
3275 ret = ib_mtu_enum_to_int(qib_ibmtu);
3276 dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
3277
3278 qib_6120_tidtemplate(dd);
3279
3280 /*
3281 * We can request a receive interrupt for 1 or
3282 * more packets from current offset. For now, we set this
3283 * up for a single packet.
3284 */
3285 dd->rhdrhead_intr_off = 1ULL << 32;
3286
3287 /* setup the stats timer; the add_timer is done at end of init */
3288 init_timer(&dd->stats_timer);
3289 dd->stats_timer.function = qib_get_6120_faststats;
3290 dd->stats_timer.data = (unsigned long) dd;
3291
3292 init_timer(&dd->cspec->pma_timer);
3293 dd->cspec->pma_timer.function = pma_6120_timer;
3294 dd->cspec->pma_timer.data = (unsigned long) ppd;
3295
3296 dd->ureg_align = qib_read_kreg32(dd, kr_palign);
3297
3298 dd->piosize2kmax_dwords = dd->piosize2k >> 2;
3299 qib_6120_config_ctxts(dd);
3300 qib_set_ctxtcnt(dd);
3301
3302 if (qib_wc_pat) {
3303 ret = init_chip_wc_pat(dd, 0);
3304 if (ret)
3305 goto bail;
3306 }
3307 set_6120_baseaddrs(dd); /* set chip access pointers now */
3308
3309 ret = 0;
3310 if (qib_mini_init)
3311 goto bail;
3312
3313 qib_num_cfg_vls = 1; /* if any 6120's, only one VL */
3314
3315 ret = qib_create_ctxts(dd);
3316 init_6120_cntrnames(dd);
3317
3318 /* use all of 4KB buffers for the kernel, otherwise 16 */
3319 sbufs = dd->piobcnt4k ? dd->piobcnt4k : 16;
3320
3321 dd->lastctxt_piobuf = dd->piobcnt2k + dd->piobcnt4k - sbufs;
3322 dd->pbufsctxt = dd->lastctxt_piobuf /
3323 (dd->cfgctxts - dd->first_user_ctxt);
3324
3325 if (ret)
3326 goto bail;
3327bail:
3328 return ret;
3329}
3330
3331/*
3332 * For this chip, we want to use the same buffer every time
3333 * when we are trying to bring the link up (they are always VL15
3334 * packets). At that link state the packet should always go out immediately
3335 * (or at least be discarded at the tx interface if the link is down).
3336 * If it doesn't, and the buffer isn't available, that means some other
3337 * sender has gotten ahead of us, and is preventing our packet from going
3338 * out. In that case, we flush all packets, and try again. If that still
3339 * fails, we fail the request, and hope things work the next time around.
3340 *
3341 * We don't need very complicated heuristics on whether the packet had
3342 * time to go out or not, since even at SDR 1X, it goes out in very short
3343 * time periods, covered by the chip reads done here and as part of the
3344 * flush.
3345 */
3346static u32 __iomem *get_6120_link_buf(struct qib_pportdata *ppd, u32 *bnum)
3347{
3348 u32 __iomem *buf;
3349 u32 lbuf = ppd->dd->piobcnt2k + ppd->dd->piobcnt4k - 1;
3350
3351 /*
3352 * always blip to get avail list updated, since it's almost
3353 * always needed, and is fairly cheap.
3354 */
3355 sendctrl_6120_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
3356 qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
3357 buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
3358 if (buf)
3359 goto done;
3360
3361 sendctrl_6120_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
3362 QIB_SENDCTRL_AVAIL_BLIP);
3363 ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */
3364 qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
3365 buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
3366done:
3367 return buf;
3368}
3369
3370static u32 __iomem *qib_6120_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
3371 u32 *pbufnum)
3372{
3373 u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
3374 struct qib_devdata *dd = ppd->dd;
3375 u32 __iomem *buf;
3376
3377 if (((pbc >> 32) & PBC_6120_VL15_SEND_CTRL) &&
3378 !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
3379 buf = get_6120_link_buf(ppd, pbufnum);
3380 else {
3381
3382 if ((plen + 1) > dd->piosize2kmax_dwords)
3383 first = dd->piobcnt2k;
3384 else
3385 first = 0;
3386 /* try 4k if all 2k busy, so same last for both sizes */
3387 last = dd->piobcnt2k + dd->piobcnt4k - 1;
3388 buf = qib_getsendbuf_range(dd, pbufnum, first, last);
3389 }
3390 return buf;
3391}
3392
3393static int init_sdma_6120_regs(struct qib_pportdata *ppd)
3394{
3395 return -ENODEV;
3396}
3397
3398static u16 qib_sdma_6120_gethead(struct qib_pportdata *ppd)
3399{
3400 return 0;
3401}
3402
3403static int qib_sdma_6120_busy(struct qib_pportdata *ppd)
3404{
3405 return 0;
3406}
3407
3408static void qib_sdma_update_6120_tail(struct qib_pportdata *ppd, u16 tail)
3409{
3410}
3411
3412static void qib_6120_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
3413{
3414}
3415
3416static void qib_sdma_set_6120_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
3417{
3418}
3419
3420/*
3421 * the pbc doesn't need a VL15 indicator, but we need it for link_buf.
3422 * The chip ignores the bit if set.
3423 */
3424static u32 qib_6120_setpbc_control(struct qib_pportdata *ppd, u32 plen,
3425 u8 srate, u8 vl)
3426{
3427 return vl == 15 ? PBC_6120_VL15_SEND_CTRL : 0;
3428}
3429
3430static void qib_6120_initvl15_bufs(struct qib_devdata *dd)
3431{
3432}
3433
3434static void qib_6120_init_ctxt(struct qib_ctxtdata *rcd)
3435{
3436 rcd->rcvegrcnt = rcd->dd->rcvhdrcnt;
3437 rcd->rcvegr_tid_base = rcd->ctxt * rcd->rcvegrcnt;
3438}
3439
3440static void qib_6120_txchk_change(struct qib_devdata *dd, u32 start,
3441 u32 len, u32 avail, struct qib_ctxtdata *rcd)
3442{
3443}
3444
3445static void writescratch(struct qib_devdata *dd, u32 val)
3446{
3447 (void) qib_write_kreg(dd, kr_scratch, val);
3448}
3449
3450static int qib_6120_tempsense_rd(struct qib_devdata *dd, int regnum)
3451{
3452 return -ENXIO;
3453}
3454
3455/* Dummy function, as 6120 boards never disable EEPROM Write */
3456static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen)
3457{
3458 return 1;
3459}
3460
3461/**
3462 * qib_init_iba6120_funcs - set up the chip-specific function pointers
3463 * @pdev: pci_dev of the qlogic_ib device
3464 * @ent: pci_device_id matching this chip
3465 *
3466 * This is global, and is called directly at init to set up the
3467 * chip-specific function pointers for later use.
3468 *
3469 * It also allocates/partially-inits the qib_devdata struct for
3470 * this device.
3471 */
3472struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
3473 const struct pci_device_id *ent)
3474{
3475 struct qib_devdata *dd;
3476 int ret;
3477
3478#ifndef CONFIG_PCI_MSI
3479 qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot "
3480 "work if CONFIG_PCI_MSI is not enabled\n",
3481 ent->device);
3482 dd = ERR_PTR(-ENODEV);
3483 goto bail;
3484#endif
3485
3486 dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) +
3487 sizeof(struct qib_chip_specific));
3488 if (IS_ERR(dd))
3489 goto bail;
3490
3491 dd->f_bringup_serdes = qib_6120_bringup_serdes;
3492 dd->f_cleanup = qib_6120_setup_cleanup;
3493 dd->f_clear_tids = qib_6120_clear_tids;
3494 dd->f_free_irq = qib_6120_free_irq;
3495 dd->f_get_base_info = qib_6120_get_base_info;
3496 dd->f_get_msgheader = qib_6120_get_msgheader;
3497 dd->f_getsendbuf = qib_6120_getsendbuf;
3498 dd->f_gpio_mod = gpio_6120_mod;
3499 dd->f_eeprom_wen = qib_6120_eeprom_wen;
3500 dd->f_hdrqempty = qib_6120_hdrqempty;
3501 dd->f_ib_updown = qib_6120_ib_updown;
3502 dd->f_init_ctxt = qib_6120_init_ctxt;
3503 dd->f_initvl15_bufs = qib_6120_initvl15_bufs;
3504 dd->f_intr_fallback = qib_6120_nointr_fallback;
3505 dd->f_late_initreg = qib_late_6120_initreg;
3506 dd->f_setpbc_control = qib_6120_setpbc_control;
3507 dd->f_portcntr = qib_portcntr_6120;
3508 dd->f_put_tid = (dd->minrev >= 2) ?
3509 qib_6120_put_tid_2 :
3510 qib_6120_put_tid;
3511 dd->f_quiet_serdes = qib_6120_quiet_serdes;
3512 dd->f_rcvctrl = rcvctrl_6120_mod;
3513 dd->f_read_cntrs = qib_read_6120cntrs;
3514 dd->f_read_portcntrs = qib_read_6120portcntrs;
3515 dd->f_reset = qib_6120_setup_reset;
3516 dd->f_init_sdma_regs = init_sdma_6120_regs;
3517 dd->f_sdma_busy = qib_sdma_6120_busy;
3518 dd->f_sdma_gethead = qib_sdma_6120_gethead;
3519 dd->f_sdma_sendctrl = qib_6120_sdma_sendctrl;
3520 dd->f_sdma_set_desc_cnt = qib_sdma_set_6120_desc_cnt;
3521 dd->f_sdma_update_tail = qib_sdma_update_6120_tail;
3522 dd->f_sendctrl = sendctrl_6120_mod;
3523 dd->f_set_armlaunch = qib_set_6120_armlaunch;
3524 dd->f_set_cntr_sample = qib_set_cntr_6120_sample;
3525 dd->f_iblink_state = qib_6120_iblink_state;
3526 dd->f_ibphys_portstate = qib_6120_phys_portstate;
3527 dd->f_get_ib_cfg = qib_6120_get_ib_cfg;
3528 dd->f_set_ib_cfg = qib_6120_set_ib_cfg;
3529 dd->f_set_ib_loopback = qib_6120_set_loopback;
3530 dd->f_set_intr_state = qib_6120_set_intr_state;
3531 dd->f_setextled = qib_6120_setup_setextled;
3532 dd->f_txchk_change = qib_6120_txchk_change;
3533 dd->f_update_usrhead = qib_update_6120_usrhead;
3534 dd->f_wantpiobuf_intr = qib_wantpiobuf_6120_intr;
3535 dd->f_xgxs_reset = qib_6120_xgxs_reset;
3536 dd->f_writescratch = writescratch;
3537 dd->f_tempsense_rd = qib_6120_tempsense_rd;
3538 /*
3539 * Do remaining pcie setup and save pcie values in dd.
3540 * Any error printing is already done by the init code.
3541 * On return, we have the chip mapped and accessible,
3542 * but chip registers are not set up until start of
3543 * init_6120_variables.
3544 */
3545 ret = qib_pcie_ddinit(dd, pdev, ent);
3546 if (ret < 0)
3547 goto bail_free;
3548
3549 /* initialize chip-specific variables */
3550 ret = init_6120_variables(dd);
3551 if (ret)
3552 goto bail_cleanup;
3553
3554 if (qib_mini_init)
3555 goto bail;
3556
3557#ifndef CONFIG_PCI_MSI
3558 qib_dev_err(dd, "PCI_MSI not configured, NO interrupts\n");
3559#endif
3560
3561 if (qib_pcie_params(dd, 8, NULL, NULL))
3562 qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
3563 "continuing anyway\n");
3564 dd->cspec->irq = pdev->irq; /* save IRQ */
3565
3566 /* clear diagctrl register, in case diags were running and crashed */
3567 qib_write_kreg(dd, kr_hwdiagctrl, 0);
3568
3569 if (qib_read_kreg64(dd, kr_hwerrstatus) &
3570 QLOGIC_IB_HWE_SERDESPLLFAILED)
3571 qib_write_kreg(dd, kr_hwerrclear,
3572 QLOGIC_IB_HWE_SERDESPLLFAILED);
3573
3574 /* setup interrupt handler (interrupt type handled above) */
3575 qib_setup_6120_interrupt(dd);
3576 /* Note that qpn_mask is set by qib_6120_config_ctxts() first */
3577 qib_6120_init_hwerrors(dd);
3578
3579 goto bail;
3580
3581bail_cleanup:
3582 qib_pcie_ddcleanup(dd);
3583bail_free:
3584 qib_free_devdata(dd);
3585 dd = ERR_PTR(ret);
3586bail:
3587 return dd;
3588}
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
new file mode 100644
index 000000000000..6fd8d74e7392
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -0,0 +1,4618 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34/*
35 * This file contains all of the code that is specific to the
36 * QLogic_IB 7220 chip (except that specific to the SerDes)
37 */
38
39#include <linux/interrupt.h>
40#include <linux/pci.h>
41#include <linux/delay.h>
42#include <linux/io.h>
43#include <rdma/ib_verbs.h>
44
45#include "qib.h"
46#include "qib_7220.h"
47
48static void qib_setup_7220_setextled(struct qib_pportdata *, u32);
49static void qib_7220_handle_hwerrors(struct qib_devdata *, char *, size_t);
50static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op);
51static u32 qib_7220_iblink_state(u64);
52static u8 qib_7220_phys_portstate(u64);
53static void qib_sdma_update_7220_tail(struct qib_pportdata *, u16);
54static void qib_set_ib_7220_lstate(struct qib_pportdata *, u16, u16);
55
56/*
57 * This file contains almost all the chip-specific register information and
58 * access functions for the QLogic QLogic_IB 7220 PCI-Express chip, with the
59 * exception of SerDes support, which in in qib_sd7220.c.
60 */
61
62/* Below uses machine-generated qib_chipnum_regs.h file */
63#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
64
65/* Use defines to tie machine-generated names to lower-case names */
66#define kr_control KREG_IDX(Control)
67#define kr_counterregbase KREG_IDX(CntrRegBase)
68#define kr_errclear KREG_IDX(ErrClear)
69#define kr_errmask KREG_IDX(ErrMask)
70#define kr_errstatus KREG_IDX(ErrStatus)
71#define kr_extctrl KREG_IDX(EXTCtrl)
72#define kr_extstatus KREG_IDX(EXTStatus)
73#define kr_gpio_clear KREG_IDX(GPIOClear)
74#define kr_gpio_mask KREG_IDX(GPIOMask)
75#define kr_gpio_out KREG_IDX(GPIOOut)
76#define kr_gpio_status KREG_IDX(GPIOStatus)
77#define kr_hrtbt_guid KREG_IDX(HRTBT_GUID)
78#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
79#define kr_hwerrclear KREG_IDX(HwErrClear)
80#define kr_hwerrmask KREG_IDX(HwErrMask)
81#define kr_hwerrstatus KREG_IDX(HwErrStatus)
82#define kr_ibcctrl KREG_IDX(IBCCtrl)
83#define kr_ibcddrctrl KREG_IDX(IBCDDRCtrl)
84#define kr_ibcddrstatus KREG_IDX(IBCDDRStatus)
85#define kr_ibcstatus KREG_IDX(IBCStatus)
86#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
87#define kr_intclear KREG_IDX(IntClear)
88#define kr_intmask KREG_IDX(IntMask)
89#define kr_intstatus KREG_IDX(IntStatus)
90#define kr_ncmodectrl KREG_IDX(IBNCModeCtrl)
91#define kr_palign KREG_IDX(PageAlign)
92#define kr_partitionkey KREG_IDX(RcvPartitionKey)
93#define kr_portcnt KREG_IDX(PortCnt)
94#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
95#define kr_rcvctrl KREG_IDX(RcvCtrl)
96#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
97#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
98#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
99#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
100#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
101#define kr_rcvpktledcnt KREG_IDX(RcvPktLEDCnt)
102#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
103#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
104#define kr_revision KREG_IDX(Revision)
105#define kr_scratch KREG_IDX(Scratch)
106#define kr_sendbuffererror KREG_IDX(SendBufErr0)
107#define kr_sendctrl KREG_IDX(SendCtrl)
108#define kr_senddmabase KREG_IDX(SendDmaBase)
109#define kr_senddmabufmask0 KREG_IDX(SendDmaBufMask0)
110#define kr_senddmabufmask1 (KREG_IDX(SendDmaBufMask0) + 1)
111#define kr_senddmabufmask2 (KREG_IDX(SendDmaBufMask0) + 2)
112#define kr_senddmahead KREG_IDX(SendDmaHead)
113#define kr_senddmaheadaddr KREG_IDX(SendDmaHeadAddr)
114#define kr_senddmalengen KREG_IDX(SendDmaLenGen)
115#define kr_senddmastatus KREG_IDX(SendDmaStatus)
116#define kr_senddmatail KREG_IDX(SendDmaTail)
117#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
118#define kr_sendpiobufbase KREG_IDX(SendBufBase)
119#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
120#define kr_sendpiosize KREG_IDX(SendBufSize)
121#define kr_sendregbase KREG_IDX(SendRegBase)
122#define kr_userregbase KREG_IDX(UserRegBase)
123#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
124
125/* These must only be written via qib_write_kreg_ctxt() */
126#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
127#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
128
129
130#define CREG_IDX(regname) ((QIB_7220_##regname##_OFFS - \
131 QIB_7220_LBIntCnt_OFFS) / sizeof(u64))
132
133#define cr_badformat CREG_IDX(RxVersionErrCnt)
134#define cr_erricrc CREG_IDX(RxICRCErrCnt)
135#define cr_errlink CREG_IDX(RxLinkMalformCnt)
136#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
137#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
138#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlViolCnt)
139#define cr_err_rlen CREG_IDX(RxLenErrCnt)
140#define cr_errslen CREG_IDX(TxLenErrCnt)
141#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
142#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
143#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
144#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
145#define cr_lbint CREG_IDX(LBIntCnt)
146#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
147#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
148#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
149#define cr_pktrcv CREG_IDX(RxDataPktCnt)
150#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
151#define cr_pktsend CREG_IDX(TxDataPktCnt)
152#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
153#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
154#define cr_rcvebp CREG_IDX(RxEBPCnt)
155#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
156#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
157#define cr_sendstall CREG_IDX(TxFlowStallCnt)
158#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
159#define cr_wordrcv CREG_IDX(RxDwordCnt)
160#define cr_wordsend CREG_IDX(TxDwordCnt)
161#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
162#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
163#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
164#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
165#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
166#define cr_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
167#define cr_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
168#define cr_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
169#define cr_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
170#define cr_rxvlerr CREG_IDX(RxVlErrCnt)
171#define cr_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
172#define cr_psstat CREG_IDX(PSStat)
173#define cr_psstart CREG_IDX(PSStart)
174#define cr_psinterval CREG_IDX(PSInterval)
175#define cr_psrcvdatacount CREG_IDX(PSRcvDataCount)
176#define cr_psrcvpktscount CREG_IDX(PSRcvPktsCount)
177#define cr_psxmitdatacount CREG_IDX(PSXmitDataCount)
178#define cr_psxmitpktscount CREG_IDX(PSXmitPktsCount)
179#define cr_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
180#define cr_txsdmadesc CREG_IDX(TxSDmaDescCnt)
181#define cr_pcieretrydiag CREG_IDX(PcieRetryBufDiagQwordCnt)
182
183#define SYM_RMASK(regname, fldname) ((u64) \
184 QIB_7220_##regname##_##fldname##_RMASK)
185#define SYM_MASK(regname, fldname) ((u64) \
186 QIB_7220_##regname##_##fldname##_RMASK << \
187 QIB_7220_##regname##_##fldname##_LSB)
188#define SYM_LSB(regname, fldname) (QIB_7220_##regname##_##fldname##_LSB)
189#define SYM_FIELD(value, regname, fldname) ((u64) \
190 (((value) >> SYM_LSB(regname, fldname)) & \
191 SYM_RMASK(regname, fldname)))
192#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
193#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
194
195/* ibcctrl bits */
196#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
197/* cycle through TS1/TS2 till OK */
198#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
199/* wait for TS1, then go on */
200#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
201#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
202
203#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
204#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
205#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
206
207#define BLOB_7220_IBCHG 0x81
208
209/*
210 * We could have a single register get/put routine, that takes a group type,
211 * but this is somewhat clearer and cleaner. It also gives us some error
212 * checking. 64 bit register reads should always work, but are inefficient
213 * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
214 * so we use kreg32 wherever possible. User register and counter register
215 * reads are always 32 bit reads, so only one form of those routines.
216 */
217
218/**
219 * qib_read_ureg32 - read 32-bit virtualized per-context register
220 * @dd: device
221 * @regno: register number
222 * @ctxt: context number
223 *
224 * Return the contents of a register that is virtualized to be per context.
225 * Returns -1 on errors (not distinguishable from valid contents at
226 * runtime; we may add a separate error variable at some point).
227 */
228static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
229 enum qib_ureg regno, int ctxt)
230{
231 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
232 return 0;
233
234 if (dd->userbase)
235 return readl(regno + (u64 __iomem *)
236 ((char __iomem *)dd->userbase +
237 dd->ureg_align * ctxt));
238 else
239 return readl(regno + (u64 __iomem *)
240 (dd->uregbase +
241 (char __iomem *)dd->kregbase +
242 dd->ureg_align * ctxt));
243}
244
245/**
246 * qib_write_ureg - write 32-bit virtualized per-context register
247 * @dd: device
248 * @regno: register number
249 * @value: value
250 * @ctxt: context
251 *
252 * Write the contents of a register that is virtualized to be per context.
253 */
254static inline void qib_write_ureg(const struct qib_devdata *dd,
255 enum qib_ureg regno, u64 value, int ctxt)
256{
257 u64 __iomem *ubase;
258
259 if (dd->userbase)
260 ubase = (u64 __iomem *)
261 ((char __iomem *) dd->userbase +
262 dd->ureg_align * ctxt);
263 else
264 ubase = (u64 __iomem *)
265 (dd->uregbase +
266 (char __iomem *) dd->kregbase +
267 dd->ureg_align * ctxt);
268
269 if (dd->kregbase && (dd->flags & QIB_PRESENT))
270 writeq(value, &ubase[regno]);
271}
272
273/**
274 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
275 * @dd: the qlogic_ib device
276 * @regno: the register number to write
277 * @ctxt: the context containing the register
278 * @value: the value to write
279 */
280static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
281 const u16 regno, unsigned ctxt,
282 u64 value)
283{
284 qib_write_kreg(dd, regno + ctxt, value);
285}
286
287static inline void write_7220_creg(const struct qib_devdata *dd,
288 u16 regno, u64 value)
289{
290 if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
291 writeq(value, &dd->cspec->cregbase[regno]);
292}
293
294static inline u64 read_7220_creg(const struct qib_devdata *dd, u16 regno)
295{
296 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
297 return 0;
298 return readq(&dd->cspec->cregbase[regno]);
299}
300
301static inline u32 read_7220_creg32(const struct qib_devdata *dd, u16 regno)
302{
303 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
304 return 0;
305 return readl(&dd->cspec->cregbase[regno]);
306}
307
308/* kr_revision bits */
309#define QLOGIC_IB_R_EMULATORREV_MASK ((1ULL << 22) - 1)
310#define QLOGIC_IB_R_EMULATORREV_SHIFT 40
311
312/* kr_control bits */
313#define QLOGIC_IB_C_RESET (1U << 7)
314
315/* kr_intstatus, kr_intclear, kr_intmask bits */
316#define QLOGIC_IB_I_RCVURG_MASK ((1ULL << 17) - 1)
317#define QLOGIC_IB_I_RCVURG_SHIFT 32
318#define QLOGIC_IB_I_RCVAVAIL_MASK ((1ULL << 17) - 1)
319#define QLOGIC_IB_I_RCVAVAIL_SHIFT 0
320#define QLOGIC_IB_I_SERDESTRIMDONE (1ULL << 27)
321
322#define QLOGIC_IB_C_FREEZEMODE 0x00000002
323#define QLOGIC_IB_C_LINKENABLE 0x00000004
324
325#define QLOGIC_IB_I_SDMAINT 0x8000000000000000ULL
326#define QLOGIC_IB_I_SDMADISABLED 0x4000000000000000ULL
327#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL
328#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL
329#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL
330#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL
331
332/* variables for sanity checking interrupt and errors */
333#define QLOGIC_IB_I_BITSEXTANT \
334 (QLOGIC_IB_I_SDMAINT | QLOGIC_IB_I_SDMADISABLED | \
335 (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
336 (QLOGIC_IB_I_RCVAVAIL_MASK << \
337 QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
338 QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
339 QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO | \
340 QLOGIC_IB_I_SERDESTRIMDONE)
341
342#define IB_HWE_BITSEXTANT \
343 (HWE_MASK(RXEMemParityErr) | \
344 HWE_MASK(TXEMemParityErr) | \
345 (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \
346 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
347 QLOGIC_IB_HWE_PCIE1PLLFAILED | \
348 QLOGIC_IB_HWE_PCIE0PLLFAILED | \
349 QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
350 QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
351 QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
352 QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
353 QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
354 HWE_MASK(PowerOnBISTFailed) | \
355 QLOGIC_IB_HWE_COREPLL_FBSLIP | \
356 QLOGIC_IB_HWE_COREPLL_RFSLIP | \
357 QLOGIC_IB_HWE_SERDESPLLFAILED | \
358 HWE_MASK(IBCBusToSPCParityErr) | \
359 HWE_MASK(IBCBusFromSPCParityErr) | \
360 QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR | \
361 QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR | \
362 QLOGIC_IB_HWE_SDMAMEMREADERR | \
363 QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED | \
364 QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT | \
365 QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT | \
366 QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT | \
367 QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT | \
368 QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR | \
369 QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR | \
370 QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR | \
371 QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR)
372
373#define IB_E_BITSEXTANT \
374 (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \
375 ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \
376 ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \
377 ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
378 ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \
379 ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \
380 ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \
381 ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \
382 ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \
383 ERR_MASK(SendSpecialTriggerErr) | \
384 ERR_MASK(SDmaDisabledErr) | ERR_MASK(SendMinPktLenErr) | \
385 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnderRunErr) | \
386 ERR_MASK(SendPktLenErr) | ERR_MASK(SendDroppedSmpPktErr) | \
387 ERR_MASK(SendDroppedDataPktErr) | \
388 ERR_MASK(SendPioArmLaunchErr) | \
389 ERR_MASK(SendUnexpectedPktNumErr) | \
390 ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(SendBufMisuseErr) | \
391 ERR_MASK(SDmaGenMismatchErr) | ERR_MASK(SDmaOutOfBoundErr) | \
392 ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
393 ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \
394 ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \
395 ERR_MASK(SDmaUnexpDataErr) | \
396 ERR_MASK(IBStatusChanged) | ERR_MASK(InvalidAddrErr) | \
397 ERR_MASK(ResetNegated) | ERR_MASK(HardwareErr) | \
398 ERR_MASK(SDmaDescAddrMisalignErr) | \
399 ERR_MASK(InvalidEEPCmd))
400
401/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
402#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL
403#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
404#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
405#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
406#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
407#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
408#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
409#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
410#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
411#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
412#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
413#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL
414/* specific to this chip */
415#define QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL
416#define QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL
417#define QLOGIC_IB_HWE_SDMAMEMREADERR 0x0000000010000000ULL
418#define QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL
419#define QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL
420#define QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL
421#define QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL
422#define QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL
423#define QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL
424#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
425#define QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL
426#define QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL
427
428#define IBA7220_IBCC_LINKCMD_SHIFT 19
429
430/* kr_ibcddrctrl bits */
431#define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL
432#define IBA7220_IBC_DLIDLMC_SHIFT 32
433
434#define IBA7220_IBC_HRTBT_MASK (SYM_RMASK(IBCDDRCtrl, HRTBT_AUTO) | \
435 SYM_RMASK(IBCDDRCtrl, HRTBT_ENB))
436#define IBA7220_IBC_HRTBT_SHIFT SYM_LSB(IBCDDRCtrl, HRTBT_ENB)
437
438#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)
439#define IBA7220_IBC_LREV_MASK 1
440#define IBA7220_IBC_LREV_SHIFT 8
441#define IBA7220_IBC_RXPOL_MASK 1
442#define IBA7220_IBC_RXPOL_SHIFT 7
443#define IBA7220_IBC_WIDTH_SHIFT 5
444#define IBA7220_IBC_WIDTH_MASK 0x3
445#define IBA7220_IBC_WIDTH_1X_ONLY (0 << IBA7220_IBC_WIDTH_SHIFT)
446#define IBA7220_IBC_WIDTH_4X_ONLY (1 << IBA7220_IBC_WIDTH_SHIFT)
447#define IBA7220_IBC_WIDTH_AUTONEG (2 << IBA7220_IBC_WIDTH_SHIFT)
448#define IBA7220_IBC_SPEED_AUTONEG (1 << 1)
449#define IBA7220_IBC_SPEED_SDR (1 << 2)
450#define IBA7220_IBC_SPEED_DDR (1 << 3)
451#define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7 << 1)
452#define IBA7220_IBC_IBTA_1_2_MASK (1)
453
454/* kr_ibcddrstatus */
455/* link latency shift is 0, don't bother defining */
456#define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff
457
458/* kr_extstatus bits */
459#define QLOGIC_IB_EXTS_FREQSEL 0x2
460#define QLOGIC_IB_EXTS_SERDESSEL 0x4
461#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000
462#define QLOGIC_IB_EXTS_MEMBIST_DISABLED 0x0000000000008000
463
464/* kr_xgxsconfig bits */
465#define QLOGIC_IB_XGXS_RESET 0x5ULL
466#define QLOGIC_IB_XGXS_FC_SAFE (1ULL << 63)
467
468/* kr_rcvpktledcnt */
469#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */
470#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */
471
472#define _QIB_GPIO_SDA_NUM 1
473#define _QIB_GPIO_SCL_NUM 0
474#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7220 cards. */
475#define QIB_TWSI_TEMP_DEV 0x98
476
477/* HW counter clock is at 4nsec */
478#define QIB_7220_PSXMITWAIT_CHECK_RATE 4000
479
480#define IBA7220_R_INTRAVAIL_SHIFT 17
481#define IBA7220_R_PKEY_DIS_SHIFT 34
482#define IBA7220_R_TAILUPD_SHIFT 35
483#define IBA7220_R_CTXTCFG_SHIFT 36
484
485#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
486
487/*
488 * the size bits give us 2^N, in KB units. 0 marks as invalid,
489 * and 7 is reserved. We currently use only 2KB and 4KB
490 */
491#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */
492#define IBA7220_TID_SZ_2K (1UL << IBA7220_TID_SZ_SHIFT) /* 2KB */
493#define IBA7220_TID_SZ_4K (2UL << IBA7220_TID_SZ_SHIFT) /* 4KB */
494#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
495#define PBC_7220_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
496#define PBC_7220_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
497
498#define AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */
499
500/* packet rate matching delay multiplier */
501static u8 rate_to_delay[2][2] = {
502 /* 1x, 4x */
503 { 8, 2 }, /* SDR */
504 { 4, 1 } /* DDR */
505};
506
507static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
508 [IB_RATE_2_5_GBPS] = 8,
509 [IB_RATE_5_GBPS] = 4,
510 [IB_RATE_10_GBPS] = 2,
511 [IB_RATE_20_GBPS] = 1
512};
513
514#define IBA7220_LINKSPEED_SHIFT SYM_LSB(IBCStatus, LinkSpeedActive)
515#define IBA7220_LINKWIDTH_SHIFT SYM_LSB(IBCStatus, LinkWidthActive)
516
517/* link training states, from IBC */
518#define IB_7220_LT_STATE_DISABLED 0x00
519#define IB_7220_LT_STATE_LINKUP 0x01
520#define IB_7220_LT_STATE_POLLACTIVE 0x02
521#define IB_7220_LT_STATE_POLLQUIET 0x03
522#define IB_7220_LT_STATE_SLEEPDELAY 0x04
523#define IB_7220_LT_STATE_SLEEPQUIET 0x05
524#define IB_7220_LT_STATE_CFGDEBOUNCE 0x08
525#define IB_7220_LT_STATE_CFGRCVFCFG 0x09
526#define IB_7220_LT_STATE_CFGWAITRMT 0x0a
527#define IB_7220_LT_STATE_CFGIDLE 0x0b
528#define IB_7220_LT_STATE_RECOVERRETRAIN 0x0c
529#define IB_7220_LT_STATE_RECOVERWAITRMT 0x0e
530#define IB_7220_LT_STATE_RECOVERIDLE 0x0f
531
532/* link state machine states from IBC */
533#define IB_7220_L_STATE_DOWN 0x0
534#define IB_7220_L_STATE_INIT 0x1
535#define IB_7220_L_STATE_ARM 0x2
536#define IB_7220_L_STATE_ACTIVE 0x3
537#define IB_7220_L_STATE_ACT_DEFER 0x4
538
539static const u8 qib_7220_physportstate[0x20] = {
540 [IB_7220_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
541 [IB_7220_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
542 [IB_7220_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
543 [IB_7220_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
544 [IB_7220_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
545 [IB_7220_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
546 [IB_7220_LT_STATE_CFGDEBOUNCE] =
547 IB_PHYSPORTSTATE_CFG_TRAIN,
548 [IB_7220_LT_STATE_CFGRCVFCFG] =
549 IB_PHYSPORTSTATE_CFG_TRAIN,
550 [IB_7220_LT_STATE_CFGWAITRMT] =
551 IB_PHYSPORTSTATE_CFG_TRAIN,
552 [IB_7220_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
553 [IB_7220_LT_STATE_RECOVERRETRAIN] =
554 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
555 [IB_7220_LT_STATE_RECOVERWAITRMT] =
556 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
557 [IB_7220_LT_STATE_RECOVERIDLE] =
558 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
559 [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
560 [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
561 [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
562 [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
563 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
564 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
565 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
566 [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
567};
568
569int qib_special_trigger;
570module_param_named(special_trigger, qib_special_trigger, int, S_IRUGO);
571MODULE_PARM_DESC(special_trigger, "Enable SpecialTrigger arm/launch");
572
573#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
574#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
575
576#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
577 (1ULL << (SYM_LSB(regname, fldname) + (bit))))
578
579#define TXEMEMPARITYERR_PIOBUF \
580 SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
581#define TXEMEMPARITYERR_PIOPBC \
582 SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
583#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
584 SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
585
586#define RXEMEMPARITYERR_RCVBUF \
587 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
588#define RXEMEMPARITYERR_LOOKUPQ \
589 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
590#define RXEMEMPARITYERR_EXPTID \
591 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
592#define RXEMEMPARITYERR_EAGERTID \
593 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
594#define RXEMEMPARITYERR_FLAGBUF \
595 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
596#define RXEMEMPARITYERR_DATAINFO \
597 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
598#define RXEMEMPARITYERR_HDRINFO \
599 SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
600
601/* 7220 specific hardware errors... */
602static const struct qib_hwerror_msgs qib_7220_hwerror_msgs[] = {
603 /* generic hardware errors */
604 QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
605 QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
606
607 QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
608 "TXE PIOBUF Memory Parity"),
609 QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
610 "TXE PIOPBC Memory Parity"),
611 QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
612 "TXE PIOLAUNCHFIFO Memory Parity"),
613
614 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
615 "RXE RCVBUF Memory Parity"),
616 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
617 "RXE LOOKUPQ Memory Parity"),
618 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
619 "RXE EAGERTID Memory Parity"),
620 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
621 "RXE EXPTID Memory Parity"),
622 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
623 "RXE FLAGBUF Memory Parity"),
624 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
625 "RXE DATAINFO Memory Parity"),
626 QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
627 "RXE HDRINFO Memory Parity"),
628
629 /* chip-specific hardware errors */
630 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
631 "PCIe Poisoned TLP"),
632 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
633 "PCIe completion timeout"),
634 /*
635 * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
636 * parity or memory parity error failures, because most likely we
637 * won't be able to talk to the core of the chip. Nonetheless, we
638 * might see them, if they are in parts of the PCIe core that aren't
639 * essential.
640 */
641 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
642 "PCIePLL1"),
643 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
644 "PCIePLL0"),
645 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
646 "PCIe XTLH core parity"),
647 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
648 "PCIe ADM TX core parity"),
649 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
650 "PCIe ADM RX core parity"),
651 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
652 "SerDes PLL"),
653 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR,
654 "PCIe cpl header queue"),
655 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR,
656 "PCIe cpl data queue"),
657 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SDMAMEMREADERR,
658 "Send DMA memory read"),
659 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED,
660 "uC PLL clock not locked"),
661 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT,
662 "PCIe serdes Q0 no clock"),
663 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT,
664 "PCIe serdes Q1 no clock"),
665 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT,
666 "PCIe serdes Q2 no clock"),
667 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT,
668 "PCIe serdes Q3 no clock"),
669 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR,
670 "DDS RXEQ memory parity"),
671 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR,
672 "IB uC memory parity"),
673 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR,
674 "PCIe uC oct0 memory parity"),
675 QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR,
676 "PCIe uC oct1 memory parity"),
677};
678
679#define RXE_PARITY (RXEMEMPARITYERR_EAGERTID|RXEMEMPARITYERR_EXPTID)
680
681#define QLOGIC_IB_E_PKTERRS (\
682 ERR_MASK(SendPktLenErr) | \
683 ERR_MASK(SendDroppedDataPktErr) | \
684 ERR_MASK(RcvVCRCErr) | \
685 ERR_MASK(RcvICRCErr) | \
686 ERR_MASK(RcvShortPktLenErr) | \
687 ERR_MASK(RcvEBPErr))
688
689/* Convenience for decoding Send DMA errors */
690#define QLOGIC_IB_E_SDMAERRS ( \
691 ERR_MASK(SDmaGenMismatchErr) | \
692 ERR_MASK(SDmaOutOfBoundErr) | \
693 ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
694 ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \
695 ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \
696 ERR_MASK(SDmaUnexpDataErr) | \
697 ERR_MASK(SDmaDescAddrMisalignErr) | \
698 ERR_MASK(SDmaDisabledErr) | \
699 ERR_MASK(SendBufMisuseErr))
700
701/* These are all rcv-related errors which we want to count for stats */
702#define E_SUM_PKTERRS \
703 (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \
704 ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \
705 ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \
706 ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
707 ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \
708 ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
709
710/* These are all send-related errors which we want to count for stats */
711#define E_SUM_ERRS \
712 (ERR_MASK(SendPioArmLaunchErr) | ERR_MASK(SendUnexpectedPktNumErr) | \
713 ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
714 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \
715 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
716 ERR_MASK(InvalidAddrErr))
717
718/*
719 * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
720 * errors not related to freeze and cancelling buffers. Can't ignore
721 * armlaunch because could get more while still cleaning up, and need
722 * to cancel those as they happen.
723 */
724#define E_SPKT_ERRS_IGNORE \
725 (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
726 ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \
727 ERR_MASK(SendPktLenErr))
728
729/*
730 * these are errors that can occur when the link changes state while
731 * a packet is being sent or received. This doesn't cover things
732 * like EBP or VCRC that can be the result of a sending having the
733 * link change state, so we receive a "known bad" packet.
734 */
735#define E_SUM_LINK_PKTERRS \
736 (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
737 ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
738 ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
739 ERR_MASK(RcvUnexpectedCharErr))
740
741static void autoneg_7220_work(struct work_struct *);
742static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *, u64, u32 *);
743
744/*
745 * Called when we might have an error that is specific to a particular
746 * PIO buffer, and may need to cancel that buffer, so it can be re-used.
747 * because we don't need to force the update of pioavail.
748 */
749static void qib_disarm_7220_senderrbufs(struct qib_pportdata *ppd)
750{
751 unsigned long sbuf[3];
752 struct qib_devdata *dd = ppd->dd;
753
754 /*
755 * It's possible that sendbuffererror could have bits set; might
756 * have already done this as a result of hardware error handling.
757 */
758 /* read these before writing errorclear */
759 sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
760 sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
761 sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
762
763 if (sbuf[0] || sbuf[1] || sbuf[2])
764 qib_disarm_piobufs_set(dd, sbuf,
765 dd->piobcnt2k + dd->piobcnt4k);
766}
767
768static void qib_7220_txe_recover(struct qib_devdata *dd)
769{
770 qib_devinfo(dd->pcidev, "Recovering from TXE PIO parity error\n");
771 qib_disarm_7220_senderrbufs(dd->pport);
772}
773
774/*
775 * This is called with interrupts disabled and sdma_lock held.
776 */
777static void qib_7220_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
778{
779 struct qib_devdata *dd = ppd->dd;
780 u64 set_sendctrl = 0;
781 u64 clr_sendctrl = 0;
782
783 if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
784 set_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
785 else
786 clr_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
787
788 if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
789 set_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
790 else
791 clr_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
792
793 if (op & QIB_SDMA_SENDCTRL_OP_HALT)
794 set_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
795 else
796 clr_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
797
798 spin_lock(&dd->sendctrl_lock);
799
800 dd->sendctrl |= set_sendctrl;
801 dd->sendctrl &= ~clr_sendctrl;
802
803 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
804 qib_write_kreg(dd, kr_scratch, 0);
805
806 spin_unlock(&dd->sendctrl_lock);
807}
808
809static void qib_decode_7220_sdma_errs(struct qib_pportdata *ppd,
810 u64 err, char *buf, size_t blen)
811{
812 static const struct {
813 u64 err;
814 const char *msg;
815 } errs[] = {
816 { ERR_MASK(SDmaGenMismatchErr),
817 "SDmaGenMismatch" },
818 { ERR_MASK(SDmaOutOfBoundErr),
819 "SDmaOutOfBound" },
820 { ERR_MASK(SDmaTailOutOfBoundErr),
821 "SDmaTailOutOfBound" },
822 { ERR_MASK(SDmaBaseErr),
823 "SDmaBase" },
824 { ERR_MASK(SDma1stDescErr),
825 "SDma1stDesc" },
826 { ERR_MASK(SDmaRpyTagErr),
827 "SDmaRpyTag" },
828 { ERR_MASK(SDmaDwEnErr),
829 "SDmaDwEn" },
830 { ERR_MASK(SDmaMissingDwErr),
831 "SDmaMissingDw" },
832 { ERR_MASK(SDmaUnexpDataErr),
833 "SDmaUnexpData" },
834 { ERR_MASK(SDmaDescAddrMisalignErr),
835 "SDmaDescAddrMisalign" },
836 { ERR_MASK(SendBufMisuseErr),
837 "SendBufMisuse" },
838 { ERR_MASK(SDmaDisabledErr),
839 "SDmaDisabled" },
840 };
841 int i;
842 size_t bidx = 0;
843
844 for (i = 0; i < ARRAY_SIZE(errs); i++) {
845 if (err & errs[i].err)
846 bidx += scnprintf(buf + bidx, blen - bidx,
847 "%s ", errs[i].msg);
848 }
849}
850
851/*
852 * This is called as part of link down clean up so disarm and flush
853 * all send buffers so that SMP packets can be sent.
854 */
855static void qib_7220_sdma_hw_clean_up(struct qib_pportdata *ppd)
856{
857 /* This will trigger the Abort interrupt */
858 sendctrl_7220_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
859 QIB_SENDCTRL_AVAIL_BLIP);
860 ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */
861}
862
863static void qib_sdma_7220_setlengen(struct qib_pportdata *ppd)
864{
865 /*
866 * Set SendDmaLenGen and clear and set
867 * the MSB of the generation count to enable generation checking
868 * and load the internal generation counter.
869 */
870 qib_write_kreg(ppd->dd, kr_senddmalengen, ppd->sdma_descq_cnt);
871 qib_write_kreg(ppd->dd, kr_senddmalengen,
872 ppd->sdma_descq_cnt |
873 (1ULL << QIB_7220_SendDmaLenGen_Generation_MSB));
874}
875
876static void qib_7220_sdma_hw_start_up(struct qib_pportdata *ppd)
877{
878 qib_sdma_7220_setlengen(ppd);
879 qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
880 ppd->sdma_head_dma[0] = 0;
881}
882
883#define DISABLES_SDMA ( \
884 ERR_MASK(SDmaDisabledErr) | \
885 ERR_MASK(SDmaBaseErr) | \
886 ERR_MASK(SDmaTailOutOfBoundErr) | \
887 ERR_MASK(SDmaOutOfBoundErr) | \
888 ERR_MASK(SDma1stDescErr) | \
889 ERR_MASK(SDmaRpyTagErr) | \
890 ERR_MASK(SDmaGenMismatchErr) | \
891 ERR_MASK(SDmaDescAddrMisalignErr) | \
892 ERR_MASK(SDmaMissingDwErr) | \
893 ERR_MASK(SDmaDwEnErr))
894
895static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)
896{
897 unsigned long flags;
898 struct qib_devdata *dd = ppd->dd;
899 char *msg;
900
901 errs &= QLOGIC_IB_E_SDMAERRS;
902
903 msg = dd->cspec->sdmamsgbuf;
904 qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf);
905 spin_lock_irqsave(&ppd->sdma_lock, flags);
906
907 if (errs & ERR_MASK(SendBufMisuseErr)) {
908 unsigned long sbuf[3];
909
910 sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
911 sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
912 sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
913
914 qib_dev_err(ppd->dd,
915 "IB%u:%u SendBufMisuse: %04lx %016lx %016lx\n",
916 ppd->dd->unit, ppd->port, sbuf[2], sbuf[1],
917 sbuf[0]);
918 }
919
920 if (errs & ERR_MASK(SDmaUnexpDataErr))
921 qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", ppd->dd->unit,
922 ppd->port);
923
924 switch (ppd->sdma_state.current_state) {
925 case qib_sdma_state_s00_hw_down:
926 /* not expecting any interrupts */
927 break;
928
929 case qib_sdma_state_s10_hw_start_up_wait:
930 /* handled in intr path */
931 break;
932
933 case qib_sdma_state_s20_idle:
934 /* not expecting any interrupts */
935 break;
936
937 case qib_sdma_state_s30_sw_clean_up_wait:
938 /* not expecting any interrupts */
939 break;
940
941 case qib_sdma_state_s40_hw_clean_up_wait:
942 if (errs & ERR_MASK(SDmaDisabledErr))
943 __qib_sdma_process_event(ppd,
944 qib_sdma_event_e50_hw_cleaned);
945 break;
946
947 case qib_sdma_state_s50_hw_halt_wait:
948 /* handled in intr path */
949 break;
950
951 case qib_sdma_state_s99_running:
952 if (errs & DISABLES_SDMA)
953 __qib_sdma_process_event(ppd,
954 qib_sdma_event_e7220_err_halted);
955 break;
956 }
957
958 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
959}
960
961/*
962 * Decode the error status into strings, deciding whether to always
963 * print * it or not depending on "normal packet errors" vs everything
964 * else. Return 1 if "real" errors, otherwise 0 if only packet
965 * errors, so caller can decide what to print with the string.
966 */
967static int qib_decode_7220_err(struct qib_devdata *dd, char *buf, size_t blen,
968 u64 err)
969{
970 int iserr = 1;
971
972 *buf = '\0';
973 if (err & QLOGIC_IB_E_PKTERRS) {
974 if (!(err & ~QLOGIC_IB_E_PKTERRS))
975 iserr = 0;
976 if ((err & ERR_MASK(RcvICRCErr)) &&
977 !(err & (ERR_MASK(RcvVCRCErr) | ERR_MASK(RcvEBPErr))))
978 strlcat(buf, "CRC ", blen);
979 if (!iserr)
980 goto done;
981 }
982 if (err & ERR_MASK(RcvHdrLenErr))
983 strlcat(buf, "rhdrlen ", blen);
984 if (err & ERR_MASK(RcvBadTidErr))
985 strlcat(buf, "rbadtid ", blen);
986 if (err & ERR_MASK(RcvBadVersionErr))
987 strlcat(buf, "rbadversion ", blen);
988 if (err & ERR_MASK(RcvHdrErr))
989 strlcat(buf, "rhdr ", blen);
990 if (err & ERR_MASK(SendSpecialTriggerErr))
991 strlcat(buf, "sendspecialtrigger ", blen);
992 if (err & ERR_MASK(RcvLongPktLenErr))
993 strlcat(buf, "rlongpktlen ", blen);
994 if (err & ERR_MASK(RcvMaxPktLenErr))
995 strlcat(buf, "rmaxpktlen ", blen);
996 if (err & ERR_MASK(RcvMinPktLenErr))
997 strlcat(buf, "rminpktlen ", blen);
998 if (err & ERR_MASK(SendMinPktLenErr))
999 strlcat(buf, "sminpktlen ", blen);
1000 if (err & ERR_MASK(RcvFormatErr))
1001 strlcat(buf, "rformaterr ", blen);
1002 if (err & ERR_MASK(RcvUnsupportedVLErr))
1003 strlcat(buf, "runsupvl ", blen);
1004 if (err & ERR_MASK(RcvUnexpectedCharErr))
1005 strlcat(buf, "runexpchar ", blen);
1006 if (err & ERR_MASK(RcvIBFlowErr))
1007 strlcat(buf, "ribflow ", blen);
1008 if (err & ERR_MASK(SendUnderRunErr))
1009 strlcat(buf, "sunderrun ", blen);
1010 if (err & ERR_MASK(SendPioArmLaunchErr))
1011 strlcat(buf, "spioarmlaunch ", blen);
1012 if (err & ERR_MASK(SendUnexpectedPktNumErr))
1013 strlcat(buf, "sunexperrpktnum ", blen);
1014 if (err & ERR_MASK(SendDroppedSmpPktErr))
1015 strlcat(buf, "sdroppedsmppkt ", blen);
1016 if (err & ERR_MASK(SendMaxPktLenErr))
1017 strlcat(buf, "smaxpktlen ", blen);
1018 if (err & ERR_MASK(SendUnsupportedVLErr))
1019 strlcat(buf, "sunsupVL ", blen);
1020 if (err & ERR_MASK(InvalidAddrErr))
1021 strlcat(buf, "invalidaddr ", blen);
1022 if (err & ERR_MASK(RcvEgrFullErr))
1023 strlcat(buf, "rcvegrfull ", blen);
1024 if (err & ERR_MASK(RcvHdrFullErr))
1025 strlcat(buf, "rcvhdrfull ", blen);
1026 if (err & ERR_MASK(IBStatusChanged))
1027 strlcat(buf, "ibcstatuschg ", blen);
1028 if (err & ERR_MASK(RcvIBLostLinkErr))
1029 strlcat(buf, "riblostlink ", blen);
1030 if (err & ERR_MASK(HardwareErr))
1031 strlcat(buf, "hardware ", blen);
1032 if (err & ERR_MASK(ResetNegated))
1033 strlcat(buf, "reset ", blen);
1034 if (err & QLOGIC_IB_E_SDMAERRS)
1035 qib_decode_7220_sdma_errs(dd->pport, err, buf, blen);
1036 if (err & ERR_MASK(InvalidEEPCmd))
1037 strlcat(buf, "invalideepromcmd ", blen);
1038done:
1039 return iserr;
1040}
1041
1042static void reenable_7220_chase(unsigned long opaque)
1043{
1044 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1045 ppd->cpspec->chase_timer.expires = 0;
1046 qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1047 QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1048}
1049
1050static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst)
1051{
1052 u8 ibclt;
1053 u64 tnow;
1054
1055 ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState);
1056
1057 /*
1058 * Detect and handle the state chase issue, where we can
1059 * get stuck if we are unlucky on timing on both sides of
1060 * the link. If we are, we disable, set a timer, and
1061 * then re-enable.
1062 */
1063 switch (ibclt) {
1064 case IB_7220_LT_STATE_CFGRCVFCFG:
1065 case IB_7220_LT_STATE_CFGWAITRMT:
1066 case IB_7220_LT_STATE_TXREVLANES:
1067 case IB_7220_LT_STATE_CFGENH:
1068 tnow = get_jiffies_64();
1069 if (ppd->cpspec->chase_end &&
1070 time_after64(tnow, ppd->cpspec->chase_end)) {
1071 ppd->cpspec->chase_end = 0;
1072 qib_set_ib_7220_lstate(ppd,
1073 QLOGIC_IB_IBCC_LINKCMD_DOWN,
1074 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1075 ppd->cpspec->chase_timer.expires = jiffies +
1076 QIB_CHASE_DIS_TIME;
1077 add_timer(&ppd->cpspec->chase_timer);
1078 } else if (!ppd->cpspec->chase_end)
1079 ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1080 break;
1081
1082 default:
1083 ppd->cpspec->chase_end = 0;
1084 break;
1085 }
1086}
1087
1088static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
1089{
1090 char *msg;
1091 u64 ignore_this_time = 0;
1092 u64 iserr = 0;
1093 int log_idx;
1094 struct qib_pportdata *ppd = dd->pport;
1095 u64 mask;
1096
1097 /* don't report errors that are masked */
1098 errs &= dd->cspec->errormask;
1099 msg = dd->cspec->emsgbuf;
1100
1101 /* do these first, they are most important */
1102 if (errs & ERR_MASK(HardwareErr))
1103 qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
1104 else
1105 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1106 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1107 qib_inc_eeprom_err(dd, log_idx, 1);
1108
1109 if (errs & QLOGIC_IB_E_SDMAERRS)
1110 sdma_7220_errors(ppd, errs);
1111
1112 if (errs & ~IB_E_BITSEXTANT)
1113 qib_dev_err(dd, "error interrupt with unknown errors "
1114 "%llx set\n", (unsigned long long)
1115 (errs & ~IB_E_BITSEXTANT));
1116
1117 if (errs & E_SUM_ERRS) {
1118 qib_disarm_7220_senderrbufs(ppd);
1119 if ((errs & E_SUM_LINK_PKTERRS) &&
1120 !(ppd->lflags & QIBL_LINKACTIVE)) {
1121 /*
1122 * This can happen when trying to bring the link
1123 * up, but the IB link changes state at the "wrong"
1124 * time. The IB logic then complains that the packet
1125 * isn't valid. We don't want to confuse people, so
1126 * we just don't print them, except at debug
1127 */
1128 ignore_this_time = errs & E_SUM_LINK_PKTERRS;
1129 }
1130 } else if ((errs & E_SUM_LINK_PKTERRS) &&
1131 !(ppd->lflags & QIBL_LINKACTIVE)) {
1132 /*
1133 * This can happen when SMA is trying to bring the link
1134 * up, but the IB link changes state at the "wrong" time.
1135 * The IB logic then complains that the packet isn't
1136 * valid. We don't want to confuse people, so we just
1137 * don't print them, except at debug
1138 */
1139 ignore_this_time = errs & E_SUM_LINK_PKTERRS;
1140 }
1141
1142 qib_write_kreg(dd, kr_errclear, errs);
1143
1144 errs &= ~ignore_this_time;
1145 if (!errs)
1146 goto done;
1147
1148 /*
1149 * The ones we mask off are handled specially below
1150 * or above. Also mask SDMADISABLED by default as it
1151 * is too chatty.
1152 */
1153 mask = ERR_MASK(IBStatusChanged) |
1154 ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
1155 ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
1156
1157 qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
1158
1159 if (errs & E_SUM_PKTERRS)
1160 qib_stats.sps_rcverrs++;
1161 if (errs & E_SUM_ERRS)
1162 qib_stats.sps_txerrs++;
1163 iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS |
1164 ERR_MASK(SDmaDisabledErr));
1165
1166 if (errs & ERR_MASK(IBStatusChanged)) {
1167 u64 ibcs;
1168
1169 ibcs = qib_read_kreg64(dd, kr_ibcstatus);
1170 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1171 handle_7220_chase(ppd, ibcs);
1172
1173 /* Update our picture of width and speed from chip */
1174 ppd->link_width_active =
1175 ((ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1) ?
1176 IB_WIDTH_4X : IB_WIDTH_1X;
1177 ppd->link_speed_active =
1178 ((ibcs >> IBA7220_LINKSPEED_SHIFT) & 1) ?
1179 QIB_IB_DDR : QIB_IB_SDR;
1180
1181 /*
1182 * Since going into a recovery state causes the link state
1183 * to go down and since recovery is transitory, it is better
1184 * if we "miss" ever seeing the link training state go into
1185 * recovery (i.e., ignore this transition for link state
1186 * special handling purposes) without updating lastibcstat.
1187 */
1188 if (qib_7220_phys_portstate(ibcs) !=
1189 IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
1190 qib_handle_e_ibstatuschanged(ppd, ibcs);
1191 }
1192
1193 if (errs & ERR_MASK(ResetNegated)) {
1194 qib_dev_err(dd, "Got reset, requires re-init "
1195 "(unload and reload driver)\n");
1196 dd->flags &= ~QIB_INITTED; /* needs re-init */
1197 /* mark as having had error */
1198 *dd->devstatusp |= QIB_STATUS_HWERROR;
1199 *dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
1200 }
1201
1202 if (*msg && iserr)
1203 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1204
1205 if (ppd->state_wanted & ppd->lflags)
1206 wake_up_interruptible(&ppd->state_wait);
1207
1208 /*
1209 * If there were hdrq or egrfull errors, wake up any processes
1210 * waiting in poll. We used to try to check which contexts had
1211 * the overflow, but given the cost of that and the chip reads
1212 * to support it, it's better to just wake everybody up if we
1213 * get an overflow; waiters can poll again if it's not them.
1214 */
1215 if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1216 qib_handle_urcv(dd, ~0U);
1217 if (errs & ERR_MASK(RcvEgrFullErr))
1218 qib_stats.sps_buffull++;
1219 else
1220 qib_stats.sps_hdrfull++;
1221 }
1222done:
1223 return;
1224}
1225
1226/* enable/disable chip from delivering interrupts */
1227static void qib_7220_set_intr_state(struct qib_devdata *dd, u32 enable)
1228{
1229 if (enable) {
1230 if (dd->flags & QIB_BADINTR)
1231 return;
1232 qib_write_kreg(dd, kr_intmask, ~0ULL);
1233 /* force re-interrupt of any pending interrupts. */
1234 qib_write_kreg(dd, kr_intclear, 0ULL);
1235 } else
1236 qib_write_kreg(dd, kr_intmask, 0ULL);
1237}
1238
1239/*
1240 * Try to cleanup as much as possible for anything that might have gone
1241 * wrong while in freeze mode, such as pio buffers being written by user
1242 * processes (causing armlaunch), send errors due to going into freeze mode,
1243 * etc., and try to avoid causing extra interrupts while doing so.
1244 * Forcibly update the in-memory pioavail register copies after cleanup
1245 * because the chip won't do it while in freeze mode (the register values
1246 * themselves are kept correct).
1247 * Make sure that we don't lose any important interrupts by using the chip
1248 * feature that says that writing 0 to a bit in *clear that is set in
1249 * *status will cause an interrupt to be generated again (if allowed by
1250 * the *mask value).
1251 * This is in chip-specific code because of all of the register accesses,
1252 * even though the details are similar on most chips.
1253 */
1254static void qib_7220_clear_freeze(struct qib_devdata *dd)
1255{
1256 /* disable error interrupts, to avoid confusion */
1257 qib_write_kreg(dd, kr_errmask, 0ULL);
1258
1259 /* also disable interrupts; errormask is sometimes overwriten */
1260 qib_7220_set_intr_state(dd, 0);
1261
1262 qib_cancel_sends(dd->pport);
1263
1264 /* clear the freeze, and be sure chip saw it */
1265 qib_write_kreg(dd, kr_control, dd->control);
1266 qib_read_kreg32(dd, kr_scratch);
1267
1268 /* force in-memory update now we are out of freeze */
1269 qib_force_pio_avail_update(dd);
1270
1271 /*
1272 * force new interrupt if any hwerr, error or interrupt bits are
1273 * still set, and clear "safe" send packet errors related to freeze
1274 * and cancelling sends. Re-enable error interrupts before possible
1275 * force of re-interrupt on pending interrupts.
1276 */
1277 qib_write_kreg(dd, kr_hwerrclear, 0ULL);
1278 qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
1279 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1280 qib_7220_set_intr_state(dd, 1);
1281}
1282
1283/**
1284 * qib_7220_handle_hwerrors - display hardware errors.
1285 * @dd: the qlogic_ib device
1286 * @msg: the output buffer
1287 * @msgl: the size of the output buffer
1288 *
1289 * Use same msg buffer as regular errors to avoid excessive stack
1290 * use. Most hardware errors are catastrophic, but for right now,
1291 * we'll print them and continue. We reuse the same message buffer as
1292 * handle_7220_errors() to avoid excessive stack usage.
1293 */
1294static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
1295 size_t msgl)
1296{
1297 u64 hwerrs;
1298 u32 bits, ctrl;
1299 int isfatal = 0;
1300 char *bitsmsg;
1301 int log_idx;
1302
1303 hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
1304 if (!hwerrs)
1305 goto bail;
1306 if (hwerrs == ~0ULL) {
1307 qib_dev_err(dd, "Read of hardware error status failed "
1308 "(all bits set); ignoring\n");
1309 goto bail;
1310 }
1311 qib_stats.sps_hwerrs++;
1312
1313 /*
1314 * Always clear the error status register, except MEMBISTFAIL,
1315 * regardless of whether we continue or stop using the chip.
1316 * We want that set so we know it failed, even across driver reload.
1317 * We'll still ignore it in the hwerrmask. We do this partly for
1318 * diagnostics, but also for support.
1319 */
1320 qib_write_kreg(dd, kr_hwerrclear,
1321 hwerrs & ~HWE_MASK(PowerOnBISTFailed));
1322
1323 hwerrs &= dd->cspec->hwerrmask;
1324
1325 /* We log some errors to EEPROM, check if we have any of those. */
1326 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1327 if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
1328 qib_inc_eeprom_err(dd, log_idx, 1);
1329 if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC |
1330 RXE_PARITY))
1331 qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
1332 "(cleared)\n", (unsigned long long) hwerrs);
1333
1334 if (hwerrs & ~IB_HWE_BITSEXTANT)
1335 qib_dev_err(dd, "hwerror interrupt with unknown errors "
1336 "%llx set\n", (unsigned long long)
1337 (hwerrs & ~IB_HWE_BITSEXTANT));
1338
1339 if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR)
1340 qib_sd7220_clr_ibpar(dd);
1341
1342 ctrl = qib_read_kreg32(dd, kr_control);
1343 if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
1344 /*
1345 * Parity errors in send memory are recoverable by h/w
1346 * just do housekeeping, exit freeze mode and continue.
1347 */
1348 if (hwerrs & (TXEMEMPARITYERR_PIOBUF |
1349 TXEMEMPARITYERR_PIOPBC)) {
1350 qib_7220_txe_recover(dd);
1351 hwerrs &= ~(TXEMEMPARITYERR_PIOBUF |
1352 TXEMEMPARITYERR_PIOPBC);
1353 }
1354 if (hwerrs)
1355 isfatal = 1;
1356 else
1357 qib_7220_clear_freeze(dd);
1358 }
1359
1360 *msg = '\0';
1361
1362 if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
1363 isfatal = 1;
1364 strlcat(msg, "[Memory BIST test failed, "
1365 "InfiniPath hardware unusable]", msgl);
1366 /* ignore from now on, so disable until driver reloaded */
1367 dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
1368 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1369 }
1370
1371 qib_format_hwerrors(hwerrs, qib_7220_hwerror_msgs,
1372 ARRAY_SIZE(qib_7220_hwerror_msgs), msg, msgl);
1373
1374 bitsmsg = dd->cspec->bitsmsgbuf;
1375 if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
1376 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
1377 bits = (u32) ((hwerrs >>
1378 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
1379 QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
1380 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
1381 "[PCIe Mem Parity Errs %x] ", bits);
1382 strlcat(msg, bitsmsg, msgl);
1383 }
1384
1385#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \
1386 QLOGIC_IB_HWE_COREPLL_RFSLIP)
1387
1388 if (hwerrs & _QIB_PLL_FAIL) {
1389 isfatal = 1;
1390 snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
1391 "[PLL failed (%llx), InfiniPath hardware unusable]",
1392 (unsigned long long) hwerrs & _QIB_PLL_FAIL);
1393 strlcat(msg, bitsmsg, msgl);
1394 /* ignore from now on, so disable until driver reloaded */
1395 dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
1396 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1397 }
1398
1399 if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
1400 /*
1401 * If it occurs, it is left masked since the eternal
1402 * interface is unused.
1403 */
1404 dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
1405 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1406 }
1407
1408 qib_dev_err(dd, "%s hardware error\n", msg);
1409
1410 if (isfatal && !dd->diag_client) {
1411 qib_dev_err(dd, "Fatal Hardware Error, no longer"
1412 " usable, SN %.16s\n", dd->serial);
1413 /*
1414 * For /sys status file and user programs to print; if no
1415 * trailing brace is copied, we'll know it was truncated.
1416 */
1417 if (dd->freezemsg)
1418 snprintf(dd->freezemsg, dd->freezelen,
1419 "{%s}", msg);
1420 qib_disable_after_error(dd);
1421 }
1422bail:;
1423}
1424
1425/**
1426 * qib_7220_init_hwerrors - enable hardware errors
1427 * @dd: the qlogic_ib device
1428 *
1429 * now that we have finished initializing everything that might reasonably
1430 * cause a hardware error, and cleared those errors bits as they occur,
1431 * we can enable hardware errors in the mask (potentially enabling
1432 * freeze mode), and enable hardware errors as errors (along with
1433 * everything else) in errormask
1434 */
1435static void qib_7220_init_hwerrors(struct qib_devdata *dd)
1436{
1437 u64 val;
1438 u64 extsval;
1439
1440 extsval = qib_read_kreg64(dd, kr_extstatus);
1441
1442 if (!(extsval & (QLOGIC_IB_EXTS_MEMBIST_ENDTEST |
1443 QLOGIC_IB_EXTS_MEMBIST_DISABLED)))
1444 qib_dev_err(dd, "MemBIST did not complete!\n");
1445 if (extsval & QLOGIC_IB_EXTS_MEMBIST_DISABLED)
1446 qib_devinfo(dd->pcidev, "MemBIST is disabled.\n");
1447
1448 val = ~0ULL; /* default to all hwerrors become interrupts, */
1449
1450 val &= ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
1451 dd->cspec->hwerrmask = val;
1452
1453 qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
1454 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
1455
1456 /* clear all */
1457 qib_write_kreg(dd, kr_errclear, ~0ULL);
1458 /* enable errors that are masked, at least this first time. */
1459 qib_write_kreg(dd, kr_errmask, ~0ULL);
1460 dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
1461 /* clear any interrupts up to this point (ints still not enabled) */
1462 qib_write_kreg(dd, kr_intclear, ~0ULL);
1463}
1464
1465/*
1466 * Disable and enable the armlaunch error. Used for PIO bandwidth testing
1467 * on chips that are count-based, rather than trigger-based. There is no
1468 * reference counting, but that's also fine, given the intended use.
1469 * Only chip-specific because it's all register accesses
1470 */
1471static void qib_set_7220_armlaunch(struct qib_devdata *dd, u32 enable)
1472{
1473 if (enable) {
1474 qib_write_kreg(dd, kr_errclear, ERR_MASK(SendPioArmLaunchErr));
1475 dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
1476 } else
1477 dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
1478 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1479}
1480
1481/*
1482 * Formerly took parameter <which> in pre-shifted,
1483 * pre-merged form with LinkCmd and LinkInitCmd
1484 * together, and assuming the zero was NOP.
1485 */
1486static void qib_set_ib_7220_lstate(struct qib_pportdata *ppd, u16 linkcmd,
1487 u16 linitcmd)
1488{
1489 u64 mod_wd;
1490 struct qib_devdata *dd = ppd->dd;
1491 unsigned long flags;
1492
1493 if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
1494 /*
1495 * If we are told to disable, note that so link-recovery
1496 * code does not attempt to bring us back up.
1497 */
1498 spin_lock_irqsave(&ppd->lflags_lock, flags);
1499 ppd->lflags |= QIBL_IB_LINK_DISABLED;
1500 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1501 } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
1502 /*
1503 * Any other linkinitcmd will lead to LINKDOWN and then
1504 * to INIT (if all is well), so clear flag to let
1505 * link-recovery code attempt to bring us back up.
1506 */
1507 spin_lock_irqsave(&ppd->lflags_lock, flags);
1508 ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
1509 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1510 }
1511
1512 mod_wd = (linkcmd << IBA7220_IBCC_LINKCMD_SHIFT) |
1513 (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
1514
1515 qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl | mod_wd);
1516 /* write to chip to prevent back-to-back writes of ibc reg */
1517 qib_write_kreg(dd, kr_scratch, 0);
1518}
1519
1520/*
1521 * All detailed interaction with the SerDes has been moved to qib_sd7220.c
1522 *
1523 * The portion of IBA7220-specific bringup_serdes() that actually deals with
1524 * registers and memory within the SerDes itself is qib_sd7220_init().
1525 */
1526
1527/**
1528 * qib_7220_bringup_serdes - bring up the serdes
1529 * @ppd: physical port on the qlogic_ib device
1530 */
1531static int qib_7220_bringup_serdes(struct qib_pportdata *ppd)
1532{
1533 struct qib_devdata *dd = ppd->dd;
1534 u64 val, prev_val, guid, ibc;
1535 int ret = 0;
1536
1537 /* Put IBC in reset, sends disabled */
1538 dd->control &= ~QLOGIC_IB_C_LINKENABLE;
1539 qib_write_kreg(dd, kr_control, 0ULL);
1540
1541 if (qib_compat_ddr_negotiate) {
1542 ppd->cpspec->ibdeltainprog = 1;
1543 ppd->cpspec->ibsymsnap = read_7220_creg32(dd, cr_ibsymbolerr);
1544 ppd->cpspec->iblnkerrsnap =
1545 read_7220_creg32(dd, cr_iblinkerrrecov);
1546 }
1547
1548 /* flowcontrolwatermark is in units of KBytes */
1549 ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
1550 /*
1551 * How often flowctrl sent. More or less in usecs; balance against
1552 * watermark value, so that in theory senders always get a flow
1553 * control update in time to not let the IB link go idle.
1554 */
1555 ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
1556 /* max error tolerance */
1557 ibc |= 0xfULL << SYM_LSB(IBCCtrl, PhyerrThreshold);
1558 /* use "real" buffer space for */
1559 ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
1560 /* IB credit flow control. */
1561 ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
1562 /*
1563 * set initial max size pkt IBC will send, including ICRC; it's the
1564 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
1565 */
1566 ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
1567 ppd->cpspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
1568
1569 /* initially come up waiting for TS1, without sending anything. */
1570 val = ppd->cpspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
1571 QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
1572 qib_write_kreg(dd, kr_ibcctrl, val);
1573
1574 if (!ppd->cpspec->ibcddrctrl) {
1575 /* not on re-init after reset */
1576 ppd->cpspec->ibcddrctrl = qib_read_kreg64(dd, kr_ibcddrctrl);
1577
1578 if (ppd->link_speed_enabled == (QIB_IB_SDR | QIB_IB_DDR))
1579 ppd->cpspec->ibcddrctrl |=
1580 IBA7220_IBC_SPEED_AUTONEG_MASK |
1581 IBA7220_IBC_IBTA_1_2_MASK;
1582 else
1583 ppd->cpspec->ibcddrctrl |=
1584 ppd->link_speed_enabled == QIB_IB_DDR ?
1585 IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
1586 if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
1587 (IB_WIDTH_1X | IB_WIDTH_4X))
1588 ppd->cpspec->ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;
1589 else
1590 ppd->cpspec->ibcddrctrl |=
1591 ppd->link_width_enabled == IB_WIDTH_4X ?
1592 IBA7220_IBC_WIDTH_4X_ONLY :
1593 IBA7220_IBC_WIDTH_1X_ONLY;
1594
1595 /* always enable these on driver reload, not sticky */
1596 ppd->cpspec->ibcddrctrl |=
1597 IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;
1598 ppd->cpspec->ibcddrctrl |=
1599 IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
1600
1601 /* enable automatic lane reversal detection for receive */
1602 ppd->cpspec->ibcddrctrl |= IBA7220_IBC_LANE_REV_SUPPORTED;
1603 } else
1604 /* write to chip to prevent back-to-back writes of ibc reg */
1605 qib_write_kreg(dd, kr_scratch, 0);
1606
1607 qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
1608 qib_write_kreg(dd, kr_scratch, 0);
1609
1610 qib_write_kreg(dd, kr_ncmodectrl, 0Ull);
1611 qib_write_kreg(dd, kr_scratch, 0);
1612
1613 ret = qib_sd7220_init(dd);
1614
1615 val = qib_read_kreg64(dd, kr_xgxs_cfg);
1616 prev_val = val;
1617 val |= QLOGIC_IB_XGXS_FC_SAFE;
1618 if (val != prev_val) {
1619 qib_write_kreg(dd, kr_xgxs_cfg, val);
1620 qib_read_kreg32(dd, kr_scratch);
1621 }
1622 if (val & QLOGIC_IB_XGXS_RESET)
1623 val &= ~QLOGIC_IB_XGXS_RESET;
1624 if (val != prev_val)
1625 qib_write_kreg(dd, kr_xgxs_cfg, val);
1626
1627 /* first time through, set port guid */
1628 if (!ppd->guid)
1629 ppd->guid = dd->base_guid;
1630 guid = be64_to_cpu(ppd->guid);
1631
1632 qib_write_kreg(dd, kr_hrtbt_guid, guid);
1633 if (!ret) {
1634 dd->control |= QLOGIC_IB_C_LINKENABLE;
1635 qib_write_kreg(dd, kr_control, dd->control);
1636 } else
1637 /* write to chip to prevent back-to-back writes of ibc reg */
1638 qib_write_kreg(dd, kr_scratch, 0);
1639 return ret;
1640}
1641
1642/**
1643 * qib_7220_quiet_serdes - set serdes to txidle
1644 * @ppd: physical port of the qlogic_ib device
1645 * Called when driver is being unloaded
1646 */
1647static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
1648{
1649 u64 val;
1650 struct qib_devdata *dd = ppd->dd;
1651 unsigned long flags;
1652
1653 /* disable IBC */
1654 dd->control &= ~QLOGIC_IB_C_LINKENABLE;
1655 qib_write_kreg(dd, kr_control,
1656 dd->control | QLOGIC_IB_C_FREEZEMODE);
1657
1658 ppd->cpspec->chase_end = 0;
1659 if (ppd->cpspec->chase_timer.data) /* if initted */
1660 del_timer_sync(&ppd->cpspec->chase_timer);
1661
1662 if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
1663 ppd->cpspec->ibdeltainprog) {
1664 u64 diagc;
1665
1666 /* enable counter writes */
1667 diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
1668 qib_write_kreg(dd, kr_hwdiagctrl,
1669 diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
1670
1671 if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
1672 val = read_7220_creg32(dd, cr_ibsymbolerr);
1673 if (ppd->cpspec->ibdeltainprog)
1674 val -= val - ppd->cpspec->ibsymsnap;
1675 val -= ppd->cpspec->ibsymdelta;
1676 write_7220_creg(dd, cr_ibsymbolerr, val);
1677 }
1678 if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
1679 val = read_7220_creg32(dd, cr_iblinkerrrecov);
1680 if (ppd->cpspec->ibdeltainprog)
1681 val -= val - ppd->cpspec->iblnkerrsnap;
1682 val -= ppd->cpspec->iblnkerrdelta;
1683 write_7220_creg(dd, cr_iblinkerrrecov, val);
1684 }
1685
1686 /* and disable counter writes */
1687 qib_write_kreg(dd, kr_hwdiagctrl, diagc);
1688 }
1689 qib_set_ib_7220_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1690
1691 spin_lock_irqsave(&ppd->lflags_lock, flags);
1692 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
1693 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
1694 wake_up(&ppd->cpspec->autoneg_wait);
1695 cancel_delayed_work(&ppd->cpspec->autoneg_work);
1696 flush_scheduled_work();
1697
1698 shutdown_7220_relock_poll(ppd->dd);
1699 val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
1700 val |= QLOGIC_IB_XGXS_RESET;
1701 qib_write_kreg(ppd->dd, kr_xgxs_cfg, val);
1702}
1703
1704/**
1705 * qib_setup_7220_setextled - set the state of the two external LEDs
1706 * @dd: the qlogic_ib device
1707 * @on: whether the link is up or not
1708 *
1709 * The exact combo of LEDs if on is true is determined by looking
1710 * at the ibcstatus.
1711 *
1712 * These LEDs indicate the physical and logical state of IB link.
1713 * For this chip (at least with recommended board pinouts), LED1
1714 * is Yellow (logical state) and LED2 is Green (physical state),
1715 *
1716 * Note: We try to match the Mellanox HCA LED behavior as best
1717 * we can. Green indicates physical link state is OK (something is
1718 * plugged in, and we can train).
1719 * Amber indicates the link is logically up (ACTIVE).
1720 * Mellanox further blinks the amber LED to indicate data packet
1721 * activity, but we have no hardware support for that, so it would
1722 * require waking up every 10-20 msecs and checking the counters
1723 * on the chip, and then turning the LED off if appropriate. That's
1724 * visible overhead, so not something we will do.
1725 *
1726 */
1727static void qib_setup_7220_setextled(struct qib_pportdata *ppd, u32 on)
1728{
1729 struct qib_devdata *dd = ppd->dd;
1730 u64 extctl, ledblink = 0, val, lst, ltst;
1731 unsigned long flags;
1732
1733 /*
1734 * The diags use the LED to indicate diag info, so we leave
1735 * the external LED alone when the diags are running.
1736 */
1737 if (dd->diag_client)
1738 return;
1739
1740 if (ppd->led_override) {
1741 ltst = (ppd->led_override & QIB_LED_PHYS) ?
1742 IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
1743 lst = (ppd->led_override & QIB_LED_LOG) ?
1744 IB_PORT_ACTIVE : IB_PORT_DOWN;
1745 } else if (on) {
1746 val = qib_read_kreg64(dd, kr_ibcstatus);
1747 ltst = qib_7220_phys_portstate(val);
1748 lst = qib_7220_iblink_state(val);
1749 } else {
1750 ltst = 0;
1751 lst = 0;
1752 }
1753
1754 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
1755 extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
1756 SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
1757 if (ltst == IB_PHYSPORTSTATE_LINKUP) {
1758 extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
1759 /*
1760 * counts are in chip clock (4ns) periods.
1761 * This is 1/16 sec (66.6ms) on,
1762 * 3/16 sec (187.5 ms) off, with packets rcvd
1763 */
1764 ledblink = ((66600 * 1000UL / 4) << IBA7220_LEDBLINK_ON_SHIFT)
1765 | ((187500 * 1000UL / 4) << IBA7220_LEDBLINK_OFF_SHIFT);
1766 }
1767 if (lst == IB_PORT_ACTIVE)
1768 extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
1769 dd->cspec->extctrl = extctl;
1770 qib_write_kreg(dd, kr_extctrl, extctl);
1771 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
1772
1773 if (ledblink) /* blink the LED on packet receive */
1774 qib_write_kreg(dd, kr_rcvpktledcnt, ledblink);
1775}
1776
1777static void qib_7220_free_irq(struct qib_devdata *dd)
1778{
1779 if (dd->cspec->irq) {
1780 free_irq(dd->cspec->irq, dd);
1781 dd->cspec->irq = 0;
1782 }
1783 qib_nomsi(dd);
1784}
1785
1786/*
1787 * qib_setup_7220_cleanup - clean up any per-chip chip-specific stuff
1788 * @dd: the qlogic_ib device
1789 *
1790 * This is called during driver unload.
1791 *
1792 */
1793static void qib_setup_7220_cleanup(struct qib_devdata *dd)
1794{
1795 qib_7220_free_irq(dd);
1796 kfree(dd->cspec->cntrs);
1797 kfree(dd->cspec->portcntrs);
1798}
1799
1800/*
1801 * This is only called for SDmaInt.
1802 * SDmaDisabled is handled on the error path.
1803 */
1804static void sdma_7220_intr(struct qib_pportdata *ppd, u64 istat)
1805{
1806 unsigned long flags;
1807
1808 spin_lock_irqsave(&ppd->sdma_lock, flags);
1809
1810 switch (ppd->sdma_state.current_state) {
1811 case qib_sdma_state_s00_hw_down:
1812 break;
1813
1814 case qib_sdma_state_s10_hw_start_up_wait:
1815 __qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
1816 break;
1817
1818 case qib_sdma_state_s20_idle:
1819 break;
1820
1821 case qib_sdma_state_s30_sw_clean_up_wait:
1822 break;
1823
1824 case qib_sdma_state_s40_hw_clean_up_wait:
1825 break;
1826
1827 case qib_sdma_state_s50_hw_halt_wait:
1828 __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1829 break;
1830
1831 case qib_sdma_state_s99_running:
1832 /* too chatty to print here */
1833 __qib_sdma_intr(ppd);
1834 break;
1835 }
1836 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1837}
1838
1839static void qib_wantpiobuf_7220_intr(struct qib_devdata *dd, u32 needint)
1840{
1841 unsigned long flags;
1842
1843 spin_lock_irqsave(&dd->sendctrl_lock, flags);
1844 if (needint) {
1845 if (!(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
1846 goto done;
1847 /*
1848 * blip the availupd off, next write will be on, so
1849 * we ensure an avail update, regardless of threshold or
1850 * buffers becoming free, whenever we want an interrupt
1851 */
1852 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl &
1853 ~SYM_MASK(SendCtrl, SendBufAvailUpd));
1854 qib_write_kreg(dd, kr_scratch, 0ULL);
1855 dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
1856 } else
1857 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
1858 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
1859 qib_write_kreg(dd, kr_scratch, 0ULL);
1860done:
1861 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
1862}
1863
1864/*
1865 * Handle errors and unusual events first, separate function
1866 * to improve cache hits for fast path interrupt handling.
1867 */
1868static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat)
1869{
1870 if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
1871 qib_dev_err(dd,
1872 "interrupt with unknown interrupts %Lx set\n",
1873 istat & ~QLOGIC_IB_I_BITSEXTANT);
1874
1875 if (istat & QLOGIC_IB_I_GPIO) {
1876 u32 gpiostatus;
1877
1878 /*
1879 * Boards for this chip currently don't use GPIO interrupts,
1880 * so clear by writing GPIOstatus to GPIOclear, and complain
1881 * to alert developer. To avoid endless repeats, clear
1882 * the bits in the mask, since there is some kind of
1883 * programming error or chip problem.
1884 */
1885 gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
1886 /*
1887 * In theory, writing GPIOstatus to GPIOclear could
1888 * have a bad side-effect on some diagnostic that wanted
1889 * to poll for a status-change, but the various shadows
1890 * make that problematic at best. Diags will just suppress
1891 * all GPIO interrupts during such tests.
1892 */
1893 qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
1894
1895 if (gpiostatus) {
1896 const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
1897 u32 gpio_irq = mask & gpiostatus;
1898
1899 /*
1900 * A bit set in status and (chip) Mask register
1901 * would cause an interrupt. Since we are not
1902 * expecting any, report it. Also check that the
1903 * chip reflects our shadow, report issues,
1904 * and refresh from the shadow.
1905 */
1906 /*
1907 * Clear any troublemakers, and update chip
1908 * from shadow
1909 */
1910 dd->cspec->gpio_mask &= ~gpio_irq;
1911 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
1912 }
1913 }
1914
1915 if (istat & QLOGIC_IB_I_ERROR) {
1916 u64 estat;
1917
1918 qib_stats.sps_errints++;
1919 estat = qib_read_kreg64(dd, kr_errstatus);
1920 if (!estat)
1921 qib_devinfo(dd->pcidev, "error interrupt (%Lx), "
1922 "but no error bits set!\n", istat);
1923 else
1924 handle_7220_errors(dd, estat);
1925 }
1926}
1927
1928static irqreturn_t qib_7220intr(int irq, void *data)
1929{
1930 struct qib_devdata *dd = data;
1931 irqreturn_t ret;
1932 u64 istat;
1933 u64 ctxtrbits;
1934 u64 rmask;
1935 unsigned i;
1936
1937 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
1938 /*
1939 * This return value is not great, but we do not want the
1940 * interrupt core code to remove our interrupt handler
1941 * because we don't appear to be handling an interrupt
1942 * during a chip reset.
1943 */
1944 ret = IRQ_HANDLED;
1945 goto bail;
1946 }
1947
1948 istat = qib_read_kreg64(dd, kr_intstatus);
1949
1950 if (unlikely(!istat)) {
1951 ret = IRQ_NONE; /* not our interrupt, or already handled */
1952 goto bail;
1953 }
1954 if (unlikely(istat == -1)) {
1955 qib_bad_intrstatus(dd);
1956 /* don't know if it was our interrupt or not */
1957 ret = IRQ_NONE;
1958 goto bail;
1959 }
1960
1961 qib_stats.sps_ints++;
1962 if (dd->int_counter != (u32) -1)
1963 dd->int_counter++;
1964
1965 if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
1966 QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
1967 unlikely_7220_intr(dd, istat);
1968
1969 /*
1970 * Clear the interrupt bits we found set, relatively early, so we
1971 * "know" know the chip will have seen this by the time we process
1972 * the queue, and will re-interrupt if necessary. The processor
1973 * itself won't take the interrupt again until we return.
1974 */
1975 qib_write_kreg(dd, kr_intclear, istat);
1976
1977 /*
1978 * Handle kernel receive queues before checking for pio buffers
1979 * available since receives can overflow; piobuf waiters can afford
1980 * a few extra cycles, since they were waiting anyway.
1981 */
1982 ctxtrbits = istat &
1983 ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1984 (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
1985 if (ctxtrbits) {
1986 rmask = (1ULL << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1987 (1ULL << QLOGIC_IB_I_RCVURG_SHIFT);
1988 for (i = 0; i < dd->first_user_ctxt; i++) {
1989 if (ctxtrbits & rmask) {
1990 ctxtrbits &= ~rmask;
1991 qib_kreceive(dd->rcd[i], NULL, NULL);
1992 }
1993 rmask <<= 1;
1994 }
1995 if (ctxtrbits) {
1996 ctxtrbits =
1997 (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
1998 (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
1999 qib_handle_urcv(dd, ctxtrbits);
2000 }
2001 }
2002
2003 /* only call for SDmaInt */
2004 if (istat & QLOGIC_IB_I_SDMAINT)
2005 sdma_7220_intr(dd->pport, istat);
2006
2007 if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
2008 qib_ib_piobufavail(dd);
2009
2010 ret = IRQ_HANDLED;
2011bail:
2012 return ret;
2013}
2014
2015/*
2016 * Set up our chip-specific interrupt handler.
2017 * The interrupt type has already been setup, so
2018 * we just need to do the registration and error checking.
2019 * If we are using MSI interrupts, we may fall back to
2020 * INTx later, if the interrupt handler doesn't get called
2021 * within 1/2 second (see verify_interrupt()).
2022 */
2023static void qib_setup_7220_interrupt(struct qib_devdata *dd)
2024{
2025 if (!dd->cspec->irq)
2026 qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
2027 "work\n");
2028 else {
2029 int ret = request_irq(dd->cspec->irq, qib_7220intr,
2030 dd->msi_lo ? 0 : IRQF_SHARED,
2031 QIB_DRV_NAME, dd);
2032
2033 if (ret)
2034 qib_dev_err(dd, "Couldn't setup %s interrupt "
2035 "(irq=%d): %d\n", dd->msi_lo ?
2036 "MSI" : "INTx", dd->cspec->irq, ret);
2037 }
2038}
2039
2040/**
2041 * qib_7220_boardname - fill in the board name
2042 * @dd: the qlogic_ib device
2043 *
2044 * info is based on the board revision register
2045 */
2046static void qib_7220_boardname(struct qib_devdata *dd)
2047{
2048 char *n;
2049 u32 boardid, namelen;
2050
2051 boardid = SYM_FIELD(dd->revision, Revision,
2052 BoardID);
2053
2054 switch (boardid) {
2055 case 1:
2056 n = "InfiniPath_QLE7240";
2057 break;
2058 case 2:
2059 n = "InfiniPath_QLE7280";
2060 break;
2061 default:
2062 qib_dev_err(dd, "Unknown 7220 board with ID %u\n", boardid);
2063 n = "Unknown_InfiniPath_7220";
2064 break;
2065 }
2066
2067 namelen = strlen(n) + 1;
2068 dd->boardname = kmalloc(namelen, GFP_KERNEL);
2069 if (!dd->boardname)
2070 qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
2071 else
2072 snprintf(dd->boardname, namelen, "%s", n);
2073
2074 if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)
2075 qib_dev_err(dd, "Unsupported InfiniPath hardware "
2076 "revision %u.%u!\n",
2077 dd->majrev, dd->minrev);
2078
2079 snprintf(dd->boardversion, sizeof(dd->boardversion),
2080 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
2081 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
2082 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
2083 dd->majrev, dd->minrev,
2084 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
2085}
2086
2087/*
2088 * This routine sleeps, so it can only be called from user context, not
2089 * from interrupt context.
2090 */
2091static int qib_setup_7220_reset(struct qib_devdata *dd)
2092{
2093 u64 val;
2094 int i;
2095 int ret;
2096 u16 cmdval;
2097 u8 int_line, clinesz;
2098 unsigned long flags;
2099
2100 qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
2101
2102 /* Use dev_err so it shows up in logs, etc. */
2103 qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
2104
2105 /* no interrupts till re-initted */
2106 qib_7220_set_intr_state(dd, 0);
2107
2108 dd->pport->cpspec->ibdeltainprog = 0;
2109 dd->pport->cpspec->ibsymdelta = 0;
2110 dd->pport->cpspec->iblnkerrdelta = 0;
2111
2112 /*
2113 * Keep chip from being accessed until we are ready. Use
2114 * writeq() directly, to allow the write even though QIB_PRESENT
2115 * isnt' set.
2116 */
2117 dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
2118 dd->int_counter = 0; /* so we check interrupts work again */
2119 val = dd->control | QLOGIC_IB_C_RESET;
2120 writeq(val, &dd->kregbase[kr_control]);
2121 mb(); /* prevent compiler reordering around actual reset */
2122
2123 for (i = 1; i <= 5; i++) {
2124 /*
2125 * Allow MBIST, etc. to complete; longer on each retry.
2126 * We sometimes get machine checks from bus timeout if no
2127 * response, so for now, make it *really* long.
2128 */
2129 msleep(1000 + (1 + i) * 2000);
2130
2131 qib_pcie_reenable(dd, cmdval, int_line, clinesz);
2132
2133 /*
2134 * Use readq directly, so we don't need to mark it as PRESENT
2135 * until we get a successful indication that all is well.
2136 */
2137 val = readq(&dd->kregbase[kr_revision]);
2138 if (val == dd->revision) {
2139 dd->flags |= QIB_PRESENT; /* it's back */
2140 ret = qib_reinit_intr(dd);
2141 goto bail;
2142 }
2143 }
2144 ret = 0; /* failed */
2145
2146bail:
2147 if (ret) {
2148 if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
2149 qib_dev_err(dd, "Reset failed to setup PCIe or "
2150 "interrupts; continuing anyway\n");
2151
2152 /* hold IBC in reset, no sends, etc till later */
2153 qib_write_kreg(dd, kr_control, 0ULL);
2154
2155 /* clear the reset error, init error/hwerror mask */
2156 qib_7220_init_hwerrors(dd);
2157
2158 /* do setup similar to speed or link-width changes */
2159 if (dd->pport->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK)
2160 dd->cspec->presets_needed = 1;
2161 spin_lock_irqsave(&dd->pport->lflags_lock, flags);
2162 dd->pport->lflags |= QIBL_IB_FORCE_NOTIFY;
2163 dd->pport->lflags &= ~QIBL_IB_AUTONEG_FAILED;
2164 spin_unlock_irqrestore(&dd->pport->lflags_lock, flags);
2165 }
2166
2167 return ret;
2168}
2169
2170/**
2171 * qib_7220_put_tid - write a TID to the chip
2172 * @dd: the qlogic_ib device
2173 * @tidptr: pointer to the expected TID (in chip) to update
2174 * @tidtype: 0 for eager, 1 for expected
2175 * @pa: physical address of in memory buffer; tidinvalid if freeing
2176 */
2177static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
2178 u32 type, unsigned long pa)
2179{
2180 if (pa != dd->tidinvalid) {
2181 u64 chippa = pa >> IBA7220_TID_PA_SHIFT;
2182
2183 /* paranoia checks */
2184 if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {
2185 qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
2186 pa);
2187 return;
2188 }
2189 if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
2190 qib_dev_err(dd, "Physical page address 0x%lx "
2191 "larger than supported\n", pa);
2192 return;
2193 }
2194
2195 if (type == RCVHQ_RCV_TYPE_EAGER)
2196 chippa |= dd->tidtemplate;
2197 else /* for now, always full 4KB page */
2198 chippa |= IBA7220_TID_SZ_4K;
2199 pa = chippa;
2200 }
2201 writeq(pa, tidptr);
2202 mmiowb();
2203}
2204
2205/**
2206 * qib_7220_clear_tids - clear all TID entries for a ctxt, expected and eager
2207 * @dd: the qlogic_ib device
2208 * @ctxt: the ctxt
2209 *
2210 * clear all TID entries for a ctxt, expected and eager.
2211 * Used from qib_close(). On this chip, TIDs are only 32 bits,
2212 * not 64, but they are still on 64 bit boundaries, so tidbase
2213 * is declared as u64 * for the pointer math, even though we write 32 bits
2214 */
2215static void qib_7220_clear_tids(struct qib_devdata *dd,
2216 struct qib_ctxtdata *rcd)
2217{
2218 u64 __iomem *tidbase;
2219 unsigned long tidinv;
2220 u32 ctxt;
2221 int i;
2222
2223 if (!dd->kregbase || !rcd)
2224 return;
2225
2226 ctxt = rcd->ctxt;
2227
2228 tidinv = dd->tidinvalid;
2229 tidbase = (u64 __iomem *)
2230 ((char __iomem *)(dd->kregbase) +
2231 dd->rcvtidbase +
2232 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
2233
2234 for (i = 0; i < dd->rcvtidcnt; i++)
2235 qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
2236 tidinv);
2237
2238 tidbase = (u64 __iomem *)
2239 ((char __iomem *)(dd->kregbase) +
2240 dd->rcvegrbase +
2241 rcd->rcvegr_tid_base * sizeof(*tidbase));
2242
2243 for (i = 0; i < rcd->rcvegrcnt; i++)
2244 qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
2245 tidinv);
2246}
2247
2248/**
2249 * qib_7220_tidtemplate - setup constants for TID updates
2250 * @dd: the qlogic_ib device
2251 *
2252 * We setup stuff that we use a lot, to avoid calculating each time
2253 */
2254static void qib_7220_tidtemplate(struct qib_devdata *dd)
2255{
2256 if (dd->rcvegrbufsize == 2048)
2257 dd->tidtemplate = IBA7220_TID_SZ_2K;
2258 else if (dd->rcvegrbufsize == 4096)
2259 dd->tidtemplate = IBA7220_TID_SZ_4K;
2260 dd->tidinvalid = 0;
2261}
2262
2263/**
2264 * qib_init_7220_get_base_info - set chip-specific flags for user code
2265 * @rcd: the qlogic_ib ctxt
2266 * @kbase: qib_base_info pointer
2267 *
2268 * We set the PCIE flag because the lower bandwidth on PCIe vs
2269 * HyperTransport can affect some user packet algorithims.
2270 */
2271static int qib_7220_get_base_info(struct qib_ctxtdata *rcd,
2272 struct qib_base_info *kinfo)
2273{
2274 kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
2275 QIB_RUNTIME_NODMA_RTAIL | QIB_RUNTIME_SDMA;
2276
2277 if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
2278 kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
2279
2280 return 0;
2281}
2282
2283static struct qib_message_header *
2284qib_7220_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
2285{
2286 u32 offset = qib_hdrget_offset(rhf_addr);
2287
2288 return (struct qib_message_header *)
2289 (rhf_addr - dd->rhf_offset + offset);
2290}
2291
2292static void qib_7220_config_ctxts(struct qib_devdata *dd)
2293{
2294 unsigned long flags;
2295 u32 nchipctxts;
2296
2297 nchipctxts = qib_read_kreg32(dd, kr_portcnt);
2298 dd->cspec->numctxts = nchipctxts;
2299 if (qib_n_krcv_queues > 1) {
2300 dd->qpn_mask = 0x3f;
2301 dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
2302 if (dd->first_user_ctxt > nchipctxts)
2303 dd->first_user_ctxt = nchipctxts;
2304 } else
2305 dd->first_user_ctxt = dd->num_pports;
2306 dd->n_krcv_queues = dd->first_user_ctxt;
2307
2308 if (!qib_cfgctxts) {
2309 int nctxts = dd->first_user_ctxt + num_online_cpus();
2310
2311 if (nctxts <= 5)
2312 dd->ctxtcnt = 5;
2313 else if (nctxts <= 9)
2314 dd->ctxtcnt = 9;
2315 else if (nctxts <= nchipctxts)
2316 dd->ctxtcnt = nchipctxts;
2317 } else if (qib_cfgctxts <= nchipctxts)
2318 dd->ctxtcnt = qib_cfgctxts;
2319 if (!dd->ctxtcnt) /* none of the above, set to max */
2320 dd->ctxtcnt = nchipctxts;
2321
2322 /*
2323 * Chip can be configured for 5, 9, or 17 ctxts, and choice
2324 * affects number of eager TIDs per ctxt (1K, 2K, 4K).
2325 * Lock to be paranoid about later motion, etc.
2326 */
2327 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2328 if (dd->ctxtcnt > 9)
2329 dd->rcvctrl |= 2ULL << IBA7220_R_CTXTCFG_SHIFT;
2330 else if (dd->ctxtcnt > 5)
2331 dd->rcvctrl |= 1ULL << IBA7220_R_CTXTCFG_SHIFT;
2332 /* else configure for default 5 receive ctxts */
2333 if (dd->qpn_mask)
2334 dd->rcvctrl |= 1ULL << QIB_7220_RcvCtrl_RcvQPMapEnable_LSB;
2335 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
2336 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2337
2338 /* kr_rcvegrcnt changes based on the number of contexts enabled */
2339 dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
2340 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, IBA7220_KRCVEGRCNT);
2341}
2342
2343static int qib_7220_get_ib_cfg(struct qib_pportdata *ppd, int which)
2344{
2345 int lsb, ret = 0;
2346 u64 maskr; /* right-justified mask */
2347
2348 switch (which) {
2349 case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
2350 ret = ppd->link_width_enabled;
2351 goto done;
2352
2353 case QIB_IB_CFG_LWID: /* Get currently active Link-width */
2354 ret = ppd->link_width_active;
2355 goto done;
2356
2357 case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
2358 ret = ppd->link_speed_enabled;
2359 goto done;
2360
2361 case QIB_IB_CFG_SPD: /* Get current Link spd */
2362 ret = ppd->link_speed_active;
2363 goto done;
2364
2365 case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
2366 lsb = IBA7220_IBC_RXPOL_SHIFT;
2367 maskr = IBA7220_IBC_RXPOL_MASK;
2368 break;
2369
2370 case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
2371 lsb = IBA7220_IBC_LREV_SHIFT;
2372 maskr = IBA7220_IBC_LREV_MASK;
2373 break;
2374
2375 case QIB_IB_CFG_LINKLATENCY:
2376 ret = qib_read_kreg64(ppd->dd, kr_ibcddrstatus)
2377 & IBA7220_DDRSTAT_LINKLAT_MASK;
2378 goto done;
2379
2380 case QIB_IB_CFG_OP_VLS:
2381 ret = ppd->vls_operational;
2382 goto done;
2383
2384 case QIB_IB_CFG_VL_HIGH_CAP:
2385 ret = 0;
2386 goto done;
2387
2388 case QIB_IB_CFG_VL_LOW_CAP:
2389 ret = 0;
2390 goto done;
2391
2392 case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
2393 ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
2394 OverrunThreshold);
2395 goto done;
2396
2397 case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
2398 ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
2399 PhyerrThreshold);
2400 goto done;
2401
2402 case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
2403 /* will only take effect when the link state changes */
2404 ret = (ppd->cpspec->ibcctrl &
2405 SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
2406 IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
2407 goto done;
2408
2409 case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
2410 lsb = IBA7220_IBC_HRTBT_SHIFT;
2411 maskr = IBA7220_IBC_HRTBT_MASK;
2412 break;
2413
2414 case QIB_IB_CFG_PMA_TICKS:
2415 /*
2416 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
2417 * Since the clock is always 250MHz, the value is 1 or 0.
2418 */
2419 ret = (ppd->link_speed_active == QIB_IB_DDR);
2420 goto done;
2421
2422 default:
2423 ret = -EINVAL;
2424 goto done;
2425 }
2426 ret = (int)((ppd->cpspec->ibcddrctrl >> lsb) & maskr);
2427done:
2428 return ret;
2429}
2430
2431static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
2432{
2433 struct qib_devdata *dd = ppd->dd;
2434 u64 maskr; /* right-justified mask */
2435 int lsb, ret = 0, setforce = 0;
2436 u16 lcmd, licmd;
2437 unsigned long flags;
2438
2439 switch (which) {
2440 case QIB_IB_CFG_LIDLMC:
2441 /*
2442 * Set LID and LMC. Combined to avoid possible hazard
2443 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
2444 */
2445 lsb = IBA7220_IBC_DLIDLMC_SHIFT;
2446 maskr = IBA7220_IBC_DLIDLMC_MASK;
2447 break;
2448
2449 case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
2450 /*
2451 * As with speed, only write the actual register if
2452 * the link is currently down, otherwise takes effect
2453 * on next link change.
2454 */
2455 ppd->link_width_enabled = val;
2456 if (!(ppd->lflags & QIBL_LINKDOWN))
2457 goto bail;
2458 /*
2459 * We set the QIBL_IB_FORCE_NOTIFY bit so updown
2460 * will get called because we want update
2461 * link_width_active, and the change may not take
2462 * effect for some time (if we are in POLL), so this
2463 * flag will force the updown routine to be called
2464 * on the next ibstatuschange down interrupt, even
2465 * if it's not an down->up transition.
2466 */
2467 val--; /* convert from IB to chip */
2468 maskr = IBA7220_IBC_WIDTH_MASK;
2469 lsb = IBA7220_IBC_WIDTH_SHIFT;
2470 setforce = 1;
2471 spin_lock_irqsave(&ppd->lflags_lock, flags);
2472 ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
2473 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2474 break;
2475
2476 case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
2477 /*
2478 * If we turn off IB1.2, need to preset SerDes defaults,
2479 * but not right now. Set a flag for the next time
2480 * we command the link down. As with width, only write the
2481 * actual register if the link is currently down, otherwise
2482 * takes effect on next link change. Since setting is being
2483 * explictly requested (via MAD or sysfs), clear autoneg
2484 * failure status if speed autoneg is enabled.
2485 */
2486 ppd->link_speed_enabled = val;
2487 if ((ppd->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) &&
2488 !(val & (val - 1)))
2489 dd->cspec->presets_needed = 1;
2490 if (!(ppd->lflags & QIBL_LINKDOWN))
2491 goto bail;
2492 /*
2493 * We set the QIBL_IB_FORCE_NOTIFY bit so updown
2494 * will get called because we want update
2495 * link_speed_active, and the change may not take
2496 * effect for some time (if we are in POLL), so this
2497 * flag will force the updown routine to be called
2498 * on the next ibstatuschange down interrupt, even
2499 * if it's not an down->up transition.
2500 */
2501 if (val == (QIB_IB_SDR | QIB_IB_DDR)) {
2502 val = IBA7220_IBC_SPEED_AUTONEG_MASK |
2503 IBA7220_IBC_IBTA_1_2_MASK;
2504 spin_lock_irqsave(&ppd->lflags_lock, flags);
2505 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
2506 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2507 } else
2508 val = val == QIB_IB_DDR ?
2509 IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
2510 maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |
2511 IBA7220_IBC_IBTA_1_2_MASK;
2512 /* IBTA 1.2 mode + speed bits are contiguous */
2513 lsb = SYM_LSB(IBCDDRCtrl, IB_ENHANCED_MODE);
2514 setforce = 1;
2515 break;
2516
2517 case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
2518 lsb = IBA7220_IBC_RXPOL_SHIFT;
2519 maskr = IBA7220_IBC_RXPOL_MASK;
2520 break;
2521
2522 case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
2523 lsb = IBA7220_IBC_LREV_SHIFT;
2524 maskr = IBA7220_IBC_LREV_MASK;
2525 break;
2526
2527 case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
2528 maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
2529 OverrunThreshold);
2530 if (maskr != val) {
2531 ppd->cpspec->ibcctrl &=
2532 ~SYM_MASK(IBCCtrl, OverrunThreshold);
2533 ppd->cpspec->ibcctrl |= (u64) val <<
2534 SYM_LSB(IBCCtrl, OverrunThreshold);
2535 qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2536 qib_write_kreg(dd, kr_scratch, 0);
2537 }
2538 goto bail;
2539
2540 case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
2541 maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
2542 PhyerrThreshold);
2543 if (maskr != val) {
2544 ppd->cpspec->ibcctrl &=
2545 ~SYM_MASK(IBCCtrl, PhyerrThreshold);
2546 ppd->cpspec->ibcctrl |= (u64) val <<
2547 SYM_LSB(IBCCtrl, PhyerrThreshold);
2548 qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2549 qib_write_kreg(dd, kr_scratch, 0);
2550 }
2551 goto bail;
2552
2553 case QIB_IB_CFG_PKEYS: /* update pkeys */
2554 maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
2555 ((u64) ppd->pkeys[2] << 32) |
2556 ((u64) ppd->pkeys[3] << 48);
2557 qib_write_kreg(dd, kr_partitionkey, maskr);
2558 goto bail;
2559
2560 case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
2561 /* will only take effect when the link state changes */
2562 if (val == IB_LINKINITCMD_POLL)
2563 ppd->cpspec->ibcctrl &=
2564 ~SYM_MASK(IBCCtrl, LinkDownDefaultState);
2565 else /* SLEEP */
2566 ppd->cpspec->ibcctrl |=
2567 SYM_MASK(IBCCtrl, LinkDownDefaultState);
2568 qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2569 qib_write_kreg(dd, kr_scratch, 0);
2570 goto bail;
2571
2572 case QIB_IB_CFG_MTU: /* update the MTU in IBC */
2573 /*
2574 * Update our housekeeping variables, and set IBC max
2575 * size, same as init code; max IBC is max we allow in
2576 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
2577 * Set even if it's unchanged, print debug message only
2578 * on changes.
2579 */
2580 val = (ppd->ibmaxlen >> 2) + 1;
2581 ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
2582 ppd->cpspec->ibcctrl |= (u64)val << SYM_LSB(IBCCtrl, MaxPktLen);
2583 qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2584 qib_write_kreg(dd, kr_scratch, 0);
2585 goto bail;
2586
2587 case QIB_IB_CFG_LSTATE: /* set the IB link state */
2588 switch (val & 0xffff0000) {
2589 case IB_LINKCMD_DOWN:
2590 lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
2591 if (!ppd->cpspec->ibdeltainprog &&
2592 qib_compat_ddr_negotiate) {
2593 ppd->cpspec->ibdeltainprog = 1;
2594 ppd->cpspec->ibsymsnap =
2595 read_7220_creg32(dd, cr_ibsymbolerr);
2596 ppd->cpspec->iblnkerrsnap =
2597 read_7220_creg32(dd, cr_iblinkerrrecov);
2598 }
2599 break;
2600
2601 case IB_LINKCMD_ARMED:
2602 lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
2603 break;
2604
2605 case IB_LINKCMD_ACTIVE:
2606 lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
2607 break;
2608
2609 default:
2610 ret = -EINVAL;
2611 qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
2612 goto bail;
2613 }
2614 switch (val & 0xffff) {
2615 case IB_LINKINITCMD_NOP:
2616 licmd = 0;
2617 break;
2618
2619 case IB_LINKINITCMD_POLL:
2620 licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
2621 break;
2622
2623 case IB_LINKINITCMD_SLEEP:
2624 licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
2625 break;
2626
2627 case IB_LINKINITCMD_DISABLE:
2628 licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
2629 ppd->cpspec->chase_end = 0;
2630 /*
2631 * stop state chase counter and timer, if running.
2632 * wait forpending timer, but don't clear .data (ppd)!
2633 */
2634 if (ppd->cpspec->chase_timer.expires) {
2635 del_timer_sync(&ppd->cpspec->chase_timer);
2636 ppd->cpspec->chase_timer.expires = 0;
2637 }
2638 break;
2639
2640 default:
2641 ret = -EINVAL;
2642 qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
2643 val & 0xffff);
2644 goto bail;
2645 }
2646 qib_set_ib_7220_lstate(ppd, lcmd, licmd);
2647 goto bail;
2648
2649 case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
2650 if (val > IBA7220_IBC_HRTBT_MASK) {
2651 ret = -EINVAL;
2652 goto bail;
2653 }
2654 lsb = IBA7220_IBC_HRTBT_SHIFT;
2655 maskr = IBA7220_IBC_HRTBT_MASK;
2656 break;
2657
2658 default:
2659 ret = -EINVAL;
2660 goto bail;
2661 }
2662 ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
2663 ppd->cpspec->ibcddrctrl |= (((u64) val & maskr) << lsb);
2664 qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
2665 qib_write_kreg(dd, kr_scratch, 0);
2666 if (setforce) {
2667 spin_lock_irqsave(&ppd->lflags_lock, flags);
2668 ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
2669 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2670 }
2671bail:
2672 return ret;
2673}
2674
2675static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
2676{
2677 int ret = 0;
2678 u64 val, ddr;
2679
2680 if (!strncmp(what, "ibc", 3)) {
2681 ppd->cpspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
2682 val = 0; /* disable heart beat, so link will come up */
2683 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
2684 ppd->dd->unit, ppd->port);
2685 } else if (!strncmp(what, "off", 3)) {
2686 ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
2687 /* enable heart beat again */
2688 val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
2689 qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
2690 "(normal)\n", ppd->dd->unit, ppd->port);
2691 } else
2692 ret = -EINVAL;
2693 if (!ret) {
2694 qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
2695 ddr = ppd->cpspec->ibcddrctrl & ~(IBA7220_IBC_HRTBT_MASK
2696 << IBA7220_IBC_HRTBT_SHIFT);
2697 ppd->cpspec->ibcddrctrl = ddr | val;
2698 qib_write_kreg(ppd->dd, kr_ibcddrctrl,
2699 ppd->cpspec->ibcddrctrl);
2700 qib_write_kreg(ppd->dd, kr_scratch, 0);
2701 }
2702 return ret;
2703}
2704
2705static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
2706 u32 updegr, u32 egrhd)
2707{
2708 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
2709 if (updegr)
2710 qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
2711}
2712
2713static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
2714{
2715 u32 head, tail;
2716
2717 head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
2718 if (rcd->rcvhdrtail_kvaddr)
2719 tail = qib_get_rcvhdrtail(rcd);
2720 else
2721 tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
2722 return head == tail;
2723}
2724
2725/*
2726 * Modify the RCVCTRL register in chip-specific way. This
2727 * is a function because bit positions and (future) register
2728 * location is chip-specifc, but the needed operations are
2729 * generic. <op> is a bit-mask because we often want to
2730 * do multiple modifications.
2731 */
2732static void rcvctrl_7220_mod(struct qib_pportdata *ppd, unsigned int op,
2733 int ctxt)
2734{
2735 struct qib_devdata *dd = ppd->dd;
2736 u64 mask, val;
2737 unsigned long flags;
2738
2739 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2740 if (op & QIB_RCVCTRL_TAILUPD_ENB)
2741 dd->rcvctrl |= (1ULL << IBA7220_R_TAILUPD_SHIFT);
2742 if (op & QIB_RCVCTRL_TAILUPD_DIS)
2743 dd->rcvctrl &= ~(1ULL << IBA7220_R_TAILUPD_SHIFT);
2744 if (op & QIB_RCVCTRL_PKEY_ENB)
2745 dd->rcvctrl &= ~(1ULL << IBA7220_R_PKEY_DIS_SHIFT);
2746 if (op & QIB_RCVCTRL_PKEY_DIS)
2747 dd->rcvctrl |= (1ULL << IBA7220_R_PKEY_DIS_SHIFT);
2748 if (ctxt < 0)
2749 mask = (1ULL << dd->ctxtcnt) - 1;
2750 else
2751 mask = (1ULL << ctxt);
2752 if (op & QIB_RCVCTRL_CTXT_ENB) {
2753 /* always done for specific ctxt */
2754 dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
2755 if (!(dd->flags & QIB_NODMA_RTAIL))
2756 dd->rcvctrl |= 1ULL << IBA7220_R_TAILUPD_SHIFT;
2757 /* Write these registers before the context is enabled. */
2758 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
2759 dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
2760 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
2761 dd->rcd[ctxt]->rcvhdrq_phys);
2762 dd->rcd[ctxt]->seq_cnt = 1;
2763 }
2764 if (op & QIB_RCVCTRL_CTXT_DIS)
2765 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
2766 if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
2767 dd->rcvctrl |= (mask << IBA7220_R_INTRAVAIL_SHIFT);
2768 if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
2769 dd->rcvctrl &= ~(mask << IBA7220_R_INTRAVAIL_SHIFT);
2770 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
2771 if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
2772 /* arm rcv interrupt */
2773 val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
2774 dd->rhdrhead_intr_off;
2775 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
2776 }
2777 if (op & QIB_RCVCTRL_CTXT_ENB) {
2778 /*
2779 * Init the context registers also; if we were
2780 * disabled, tail and head should both be zero
2781 * already from the enable, but since we don't
2782 * know, we have to do it explictly.
2783 */
2784 val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
2785 qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
2786
2787 val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
2788 dd->rcd[ctxt]->head = val;
2789 /* If kctxt, interrupt on next receive. */
2790 if (ctxt < dd->first_user_ctxt)
2791 val |= dd->rhdrhead_intr_off;
2792 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
2793 }
2794 if (op & QIB_RCVCTRL_CTXT_DIS) {
2795 if (ctxt >= 0) {
2796 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, 0);
2797 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, 0);
2798 } else {
2799 unsigned i;
2800
2801 for (i = 0; i < dd->cfgctxts; i++) {
2802 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
2803 i, 0);
2804 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, i, 0);
2805 }
2806 }
2807 }
2808 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2809}
2810
2811/*
2812 * Modify the SENDCTRL register in chip-specific way. This
2813 * is a function there may be multiple such registers with
2814 * slightly different layouts. To start, we assume the
2815 * "canonical" register layout of the first chips.
2816 * Chip requires no back-back sendctrl writes, so write
2817 * scratch register after writing sendctrl
2818 */
2819static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op)
2820{
2821 struct qib_devdata *dd = ppd->dd;
2822 u64 tmp_dd_sendctrl;
2823 unsigned long flags;
2824
2825 spin_lock_irqsave(&dd->sendctrl_lock, flags);
2826
2827 /* First the ones that are "sticky", saved in shadow */
2828 if (op & QIB_SENDCTRL_CLEAR)
2829 dd->sendctrl = 0;
2830 if (op & QIB_SENDCTRL_SEND_DIS)
2831 dd->sendctrl &= ~SYM_MASK(SendCtrl, SPioEnable);
2832 else if (op & QIB_SENDCTRL_SEND_ENB) {
2833 dd->sendctrl |= SYM_MASK(SendCtrl, SPioEnable);
2834 if (dd->flags & QIB_USE_SPCL_TRIG)
2835 dd->sendctrl |= SYM_MASK(SendCtrl,
2836 SSpecialTriggerEn);
2837 }
2838 if (op & QIB_SENDCTRL_AVAIL_DIS)
2839 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
2840 else if (op & QIB_SENDCTRL_AVAIL_ENB)
2841 dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
2842
2843 if (op & QIB_SENDCTRL_DISARM_ALL) {
2844 u32 i, last;
2845
2846 tmp_dd_sendctrl = dd->sendctrl;
2847 /*
2848 * disarm any that are not yet launched, disabling sends
2849 * and updates until done.
2850 */
2851 last = dd->piobcnt2k + dd->piobcnt4k;
2852 tmp_dd_sendctrl &=
2853 ~(SYM_MASK(SendCtrl, SPioEnable) |
2854 SYM_MASK(SendCtrl, SendBufAvailUpd));
2855 for (i = 0; i < last; i++) {
2856 qib_write_kreg(dd, kr_sendctrl,
2857 tmp_dd_sendctrl |
2858 SYM_MASK(SendCtrl, Disarm) | i);
2859 qib_write_kreg(dd, kr_scratch, 0);
2860 }
2861 }
2862
2863 tmp_dd_sendctrl = dd->sendctrl;
2864
2865 if (op & QIB_SENDCTRL_FLUSH)
2866 tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
2867 if (op & QIB_SENDCTRL_DISARM)
2868 tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
2869 ((op & QIB_7220_SendCtrl_DisarmPIOBuf_RMASK) <<
2870 SYM_LSB(SendCtrl, DisarmPIOBuf));
2871 if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
2872 (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
2873 tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
2874
2875 qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
2876 qib_write_kreg(dd, kr_scratch, 0);
2877
2878 if (op & QIB_SENDCTRL_AVAIL_BLIP) {
2879 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2880 qib_write_kreg(dd, kr_scratch, 0);
2881 }
2882
2883 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2884
2885 if (op & QIB_SENDCTRL_FLUSH) {
2886 u32 v;
2887 /*
2888 * ensure writes have hit chip, then do a few
2889 * more reads, to allow DMA of pioavail registers
2890 * to occur, so in-memory copy is in sync with
2891 * the chip. Not always safe to sleep.
2892 */
2893 v = qib_read_kreg32(dd, kr_scratch);
2894 qib_write_kreg(dd, kr_scratch, v);
2895 v = qib_read_kreg32(dd, kr_scratch);
2896 qib_write_kreg(dd, kr_scratch, v);
2897 qib_read_kreg32(dd, kr_scratch);
2898 }
2899}
2900
2901/**
2902 * qib_portcntr_7220 - read a per-port counter
2903 * @dd: the qlogic_ib device
2904 * @creg: the counter to snapshot
2905 */
2906static u64 qib_portcntr_7220(struct qib_pportdata *ppd, u32 reg)
2907{
2908 u64 ret = 0ULL;
2909 struct qib_devdata *dd = ppd->dd;
2910 u16 creg;
2911 /* 0xffff for unimplemented or synthesized counters */
2912 static const u16 xlator[] = {
2913 [QIBPORTCNTR_PKTSEND] = cr_pktsend,
2914 [QIBPORTCNTR_WORDSEND] = cr_wordsend,
2915 [QIBPORTCNTR_PSXMITDATA] = cr_psxmitdatacount,
2916 [QIBPORTCNTR_PSXMITPKTS] = cr_psxmitpktscount,
2917 [QIBPORTCNTR_PSXMITWAIT] = cr_psxmitwaitcount,
2918 [QIBPORTCNTR_SENDSTALL] = cr_sendstall,
2919 [QIBPORTCNTR_PKTRCV] = cr_pktrcv,
2920 [QIBPORTCNTR_PSRCVDATA] = cr_psrcvdatacount,
2921 [QIBPORTCNTR_PSRCVPKTS] = cr_psrcvpktscount,
2922 [QIBPORTCNTR_RCVEBP] = cr_rcvebp,
2923 [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
2924 [QIBPORTCNTR_WORDRCV] = cr_wordrcv,
2925 [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
2926 [QIBPORTCNTR_RXLOCALPHYERR] = cr_rxotherlocalphyerr,
2927 [QIBPORTCNTR_RXVLERR] = cr_rxvlerr,
2928 [QIBPORTCNTR_ERRICRC] = cr_erricrc,
2929 [QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
2930 [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
2931 [QIBPORTCNTR_BADFORMAT] = cr_badformat,
2932 [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
2933 [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
2934 [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
2935 [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
2936 [QIBPORTCNTR_EXCESSBUFOVFL] = cr_excessbufferovfl,
2937 [QIBPORTCNTR_ERRLINK] = cr_errlink,
2938 [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
2939 [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
2940 [QIBPORTCNTR_LLI] = cr_locallinkintegrityerr,
2941 [QIBPORTCNTR_PSINTERVAL] = cr_psinterval,
2942 [QIBPORTCNTR_PSSTART] = cr_psstart,
2943 [QIBPORTCNTR_PSSTAT] = cr_psstat,
2944 [QIBPORTCNTR_VL15PKTDROP] = cr_vl15droppedpkt,
2945 [QIBPORTCNTR_ERRPKEY] = cr_errpkey,
2946 [QIBPORTCNTR_KHDROVFL] = 0xffff,
2947 };
2948
2949 if (reg >= ARRAY_SIZE(xlator)) {
2950 qib_devinfo(ppd->dd->pcidev,
2951 "Unimplemented portcounter %u\n", reg);
2952 goto done;
2953 }
2954 creg = xlator[reg];
2955
2956 if (reg == QIBPORTCNTR_KHDROVFL) {
2957 int i;
2958
2959 /* sum over all kernel contexts */
2960 for (i = 0; i < dd->first_user_ctxt; i++)
2961 ret += read_7220_creg32(dd, cr_portovfl + i);
2962 }
2963 if (creg == 0xffff)
2964 goto done;
2965
2966 /*
2967 * only fast incrementing counters are 64bit; use 32 bit reads to
2968 * avoid two independent reads when on opteron
2969 */
2970 if ((creg == cr_wordsend || creg == cr_wordrcv ||
2971 creg == cr_pktsend || creg == cr_pktrcv))
2972 ret = read_7220_creg(dd, creg);
2973 else
2974 ret = read_7220_creg32(dd, creg);
2975 if (creg == cr_ibsymbolerr) {
2976 if (dd->pport->cpspec->ibdeltainprog)
2977 ret -= ret - ppd->cpspec->ibsymsnap;
2978 ret -= dd->pport->cpspec->ibsymdelta;
2979 } else if (creg == cr_iblinkerrrecov) {
2980 if (dd->pport->cpspec->ibdeltainprog)
2981 ret -= ret - ppd->cpspec->iblnkerrsnap;
2982 ret -= dd->pport->cpspec->iblnkerrdelta;
2983 }
2984done:
2985 return ret;
2986}
2987
2988/*
2989 * Device counter names (not port-specific), one line per stat,
2990 * single string. Used by utilities like ipathstats to print the stats
2991 * in a way which works for different versions of drivers, without changing
2992 * the utility. Names need to be 12 chars or less (w/o newline), for proper
2993 * display by utility.
2994 * Non-error counters are first.
2995 * Start of "error" conters is indicated by a leading "E " on the first
2996 * "error" counter, and doesn't count in label length.
2997 * The EgrOvfl list needs to be last so we truncate them at the configured
2998 * context count for the device.
2999 * cntr7220indices contains the corresponding register indices.
3000 */
3001static const char cntr7220names[] =
3002 "Interrupts\n"
3003 "HostBusStall\n"
3004 "E RxTIDFull\n"
3005 "RxTIDInvalid\n"
3006 "Ctxt0EgrOvfl\n"
3007 "Ctxt1EgrOvfl\n"
3008 "Ctxt2EgrOvfl\n"
3009 "Ctxt3EgrOvfl\n"
3010 "Ctxt4EgrOvfl\n"
3011 "Ctxt5EgrOvfl\n"
3012 "Ctxt6EgrOvfl\n"
3013 "Ctxt7EgrOvfl\n"
3014 "Ctxt8EgrOvfl\n"
3015 "Ctxt9EgrOvfl\n"
3016 "Ctx10EgrOvfl\n"
3017 "Ctx11EgrOvfl\n"
3018 "Ctx12EgrOvfl\n"
3019 "Ctx13EgrOvfl\n"
3020 "Ctx14EgrOvfl\n"
3021 "Ctx15EgrOvfl\n"
3022 "Ctx16EgrOvfl\n";
3023
3024static const size_t cntr7220indices[] = {
3025 cr_lbint,
3026 cr_lbflowstall,
3027 cr_errtidfull,
3028 cr_errtidvalid,
3029 cr_portovfl + 0,
3030 cr_portovfl + 1,
3031 cr_portovfl + 2,
3032 cr_portovfl + 3,
3033 cr_portovfl + 4,
3034 cr_portovfl + 5,
3035 cr_portovfl + 6,
3036 cr_portovfl + 7,
3037 cr_portovfl + 8,
3038 cr_portovfl + 9,
3039 cr_portovfl + 10,
3040 cr_portovfl + 11,
3041 cr_portovfl + 12,
3042 cr_portovfl + 13,
3043 cr_portovfl + 14,
3044 cr_portovfl + 15,
3045 cr_portovfl + 16,
3046};
3047
3048/*
3049 * same as cntr7220names and cntr7220indices, but for port-specific counters.
3050 * portcntr7220indices is somewhat complicated by some registers needing
3051 * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
3052 */
3053static const char portcntr7220names[] =
3054 "TxPkt\n"
3055 "TxFlowPkt\n"
3056 "TxWords\n"
3057 "RxPkt\n"
3058 "RxFlowPkt\n"
3059 "RxWords\n"
3060 "TxFlowStall\n"
3061 "TxDmaDesc\n" /* 7220 and 7322-only */
3062 "E RxDlidFltr\n" /* 7220 and 7322-only */
3063 "IBStatusChng\n"
3064 "IBLinkDown\n"
3065 "IBLnkRecov\n"
3066 "IBRxLinkErr\n"
3067 "IBSymbolErr\n"
3068 "RxLLIErr\n"
3069 "RxBadFormat\n"
3070 "RxBadLen\n"
3071 "RxBufOvrfl\n"
3072 "RxEBP\n"
3073 "RxFlowCtlErr\n"
3074 "RxICRCerr\n"
3075 "RxLPCRCerr\n"
3076 "RxVCRCerr\n"
3077 "RxInvalLen\n"
3078 "RxInvalPKey\n"
3079 "RxPktDropped\n"
3080 "TxBadLength\n"
3081 "TxDropped\n"
3082 "TxInvalLen\n"
3083 "TxUnderrun\n"
3084 "TxUnsupVL\n"
3085 "RxLclPhyErr\n" /* 7220 and 7322-only */
3086 "RxVL15Drop\n" /* 7220 and 7322-only */
3087 "RxVlErr\n" /* 7220 and 7322-only */
3088 "XcessBufOvfl\n" /* 7220 and 7322-only */
3089 ;
3090
3091#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
3092static const size_t portcntr7220indices[] = {
3093 QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
3094 cr_pktsendflow,
3095 QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
3096 QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
3097 cr_pktrcvflowctrl,
3098 QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
3099 QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
3100 cr_txsdmadesc,
3101 cr_rxdlidfltr,
3102 cr_ibstatuschange,
3103 QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
3104 QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
3105 QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
3106 QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
3107 QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
3108 QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
3109 QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
3110 QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
3111 QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
3112 cr_rcvflowctrl_err,
3113 QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
3114 QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
3115 QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
3116 QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
3117 QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
3118 QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
3119 cr_invalidslen,
3120 cr_senddropped,
3121 cr_errslen,
3122 cr_sendunderrun,
3123 cr_txunsupvl,
3124 QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
3125 QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
3126 QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
3127 QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
3128};
3129
3130/* do all the setup to make the counter reads efficient later */
3131static void init_7220_cntrnames(struct qib_devdata *dd)
3132{
3133 int i, j = 0;
3134 char *s;
3135
3136 for (i = 0, s = (char *)cntr7220names; s && j <= dd->cfgctxts;
3137 i++) {
3138 /* we always have at least one counter before the egrovfl */
3139 if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
3140 j = 1;
3141 s = strchr(s + 1, '\n');
3142 if (s && j)
3143 j++;
3144 }
3145 dd->cspec->ncntrs = i;
3146 if (!s)
3147 /* full list; size is without terminating null */
3148 dd->cspec->cntrnamelen = sizeof(cntr7220names) - 1;
3149 else
3150 dd->cspec->cntrnamelen = 1 + s - cntr7220names;
3151 dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
3152 * sizeof(u64), GFP_KERNEL);
3153 if (!dd->cspec->cntrs)
3154 qib_dev_err(dd, "Failed allocation for counters\n");
3155
3156 for (i = 0, s = (char *)portcntr7220names; s; i++)
3157 s = strchr(s + 1, '\n');
3158 dd->cspec->nportcntrs = i - 1;
3159 dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1;
3160 dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
3161 * sizeof(u64), GFP_KERNEL);
3162 if (!dd->cspec->portcntrs)
3163 qib_dev_err(dd, "Failed allocation for portcounters\n");
3164}
3165
3166static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
3167 u64 **cntrp)
3168{
3169 u32 ret;
3170
3171 if (!dd->cspec->cntrs) {
3172 ret = 0;
3173 goto done;
3174 }
3175
3176 if (namep) {
3177 *namep = (char *)cntr7220names;
3178 ret = dd->cspec->cntrnamelen;
3179 if (pos >= ret)
3180 ret = 0; /* final read after getting everything */
3181 } else {
3182 u64 *cntr = dd->cspec->cntrs;
3183 int i;
3184
3185 ret = dd->cspec->ncntrs * sizeof(u64);
3186 if (!cntr || pos >= ret) {
3187 /* everything read, or couldn't get memory */
3188 ret = 0;
3189 goto done;
3190 }
3191
3192 *cntrp = cntr;
3193 for (i = 0; i < dd->cspec->ncntrs; i++)
3194 *cntr++ = read_7220_creg32(dd, cntr7220indices[i]);
3195 }
3196done:
3197 return ret;
3198}
3199
3200static u32 qib_read_7220portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
3201 char **namep, u64 **cntrp)
3202{
3203 u32 ret;
3204
3205 if (!dd->cspec->portcntrs) {
3206 ret = 0;
3207 goto done;
3208 }
3209 if (namep) {
3210 *namep = (char *)portcntr7220names;
3211 ret = dd->cspec->portcntrnamelen;
3212 if (pos >= ret)
3213 ret = 0; /* final read after getting everything */
3214 } else {
3215 u64 *cntr = dd->cspec->portcntrs;
3216 struct qib_pportdata *ppd = &dd->pport[port];
3217 int i;
3218
3219 ret = dd->cspec->nportcntrs * sizeof(u64);
3220 if (!cntr || pos >= ret) {
3221 /* everything read, or couldn't get memory */
3222 ret = 0;
3223 goto done;
3224 }
3225 *cntrp = cntr;
3226 for (i = 0; i < dd->cspec->nportcntrs; i++) {
3227 if (portcntr7220indices[i] & _PORT_VIRT_FLAG)
3228 *cntr++ = qib_portcntr_7220(ppd,
3229 portcntr7220indices[i] &
3230 ~_PORT_VIRT_FLAG);
3231 else
3232 *cntr++ = read_7220_creg32(dd,
3233 portcntr7220indices[i]);
3234 }
3235 }
3236done:
3237 return ret;
3238}
3239
3240/**
3241 * qib_get_7220_faststats - get word counters from chip before they overflow
3242 * @opaque - contains a pointer to the qlogic_ib device qib_devdata
3243 *
3244 * This needs more work; in particular, decision on whether we really
3245 * need traffic_wds done the way it is
3246 * called from add_timer
3247 */
3248static void qib_get_7220_faststats(unsigned long opaque)
3249{
3250 struct qib_devdata *dd = (struct qib_devdata *) opaque;
3251 struct qib_pportdata *ppd = dd->pport;
3252 unsigned long flags;
3253 u64 traffic_wds;
3254
3255 /*
3256 * don't access the chip while running diags, or memory diags can
3257 * fail
3258 */
3259 if (!(dd->flags & QIB_INITTED) || dd->diag_client)
3260 /* but re-arm the timer, for diags case; won't hurt other */
3261 goto done;
3262
3263 /*
3264 * We now try to maintain an activity timer, based on traffic
3265 * exceeding a threshold, so we need to check the word-counts
3266 * even if they are 64-bit.
3267 */
3268 traffic_wds = qib_portcntr_7220(ppd, cr_wordsend) +
3269 qib_portcntr_7220(ppd, cr_wordrcv);
3270 spin_lock_irqsave(&dd->eep_st_lock, flags);
3271 traffic_wds -= dd->traffic_wds;
3272 dd->traffic_wds += traffic_wds;
3273 if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
3274 atomic_add(5, &dd->active_time); /* S/B #define */
3275 spin_unlock_irqrestore(&dd->eep_st_lock, flags);
3276done:
3277 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
3278}
3279
3280/*
3281 * If we are using MSI, try to fallback to INTx.
3282 */
3283static int qib_7220_intr_fallback(struct qib_devdata *dd)
3284{
3285 if (!dd->msi_lo)
3286 return 0;
3287
3288 qib_devinfo(dd->pcidev, "MSI interrupt not detected,"
3289 " trying INTx interrupts\n");
3290 qib_7220_free_irq(dd);
3291 qib_enable_intx(dd->pcidev);
3292 /*
3293 * Some newer kernels require free_irq before disable_msi,
3294 * and irq can be changed during disable and INTx enable
3295 * and we need to therefore use the pcidev->irq value,
3296 * not our saved MSI value.
3297 */
3298 dd->cspec->irq = dd->pcidev->irq;
3299 qib_setup_7220_interrupt(dd);
3300 return 1;
3301}
3302
3303/*
3304 * Reset the XGXS (between serdes and IBC). Slightly less intrusive
3305 * than resetting the IBC or external link state, and useful in some
3306 * cases to cause some retraining. To do this right, we reset IBC
3307 * as well.
3308 */
3309static void qib_7220_xgxs_reset(struct qib_pportdata *ppd)
3310{
3311 u64 val, prev_val;
3312 struct qib_devdata *dd = ppd->dd;
3313
3314 prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
3315 val = prev_val | QLOGIC_IB_XGXS_RESET;
3316 prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
3317 qib_write_kreg(dd, kr_control,
3318 dd->control & ~QLOGIC_IB_C_LINKENABLE);
3319 qib_write_kreg(dd, kr_xgxs_cfg, val);
3320 qib_read_kreg32(dd, kr_scratch);
3321 qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
3322 qib_write_kreg(dd, kr_control, dd->control);
3323}
3324
3325/*
3326 * For this chip, we want to use the same buffer every time
3327 * when we are trying to bring the link up (they are always VL15
3328 * packets). At that link state the packet should always go out immediately
3329 * (or at least be discarded at the tx interface if the link is down).
3330 * If it doesn't, and the buffer isn't available, that means some other
3331 * sender has gotten ahead of us, and is preventing our packet from going
3332 * out. In that case, we flush all packets, and try again. If that still
3333 * fails, we fail the request, and hope things work the next time around.
3334 *
3335 * We don't need very complicated heuristics on whether the packet had
3336 * time to go out or not, since even at SDR 1X, it goes out in very short
3337 * time periods, covered by the chip reads done here and as part of the
3338 * flush.
3339 */
3340static u32 __iomem *get_7220_link_buf(struct qib_pportdata *ppd, u32 *bnum)
3341{
3342 u32 __iomem *buf;
3343 u32 lbuf = ppd->dd->cspec->lastbuf_for_pio;
3344 int do_cleanup;
3345 unsigned long flags;
3346
3347 /*
3348 * always blip to get avail list updated, since it's almost
3349 * always needed, and is fairly cheap.
3350 */
3351 sendctrl_7220_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
3352 qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
3353 buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
3354 if (buf)
3355 goto done;
3356
3357 spin_lock_irqsave(&ppd->sdma_lock, flags);
3358 if (ppd->sdma_state.current_state == qib_sdma_state_s20_idle &&
3359 ppd->sdma_state.current_state != qib_sdma_state_s00_hw_down) {
3360 __qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
3361 do_cleanup = 0;
3362 } else {
3363 do_cleanup = 1;
3364 qib_7220_sdma_hw_clean_up(ppd);
3365 }
3366 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
3367
3368 if (do_cleanup) {
3369 qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
3370 buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
3371 }
3372done:
3373 return buf;
3374}
3375
3376/*
3377 * This code for non-IBTA-compliant IB speed negotiation is only known to
3378 * work for the SDR to DDR transition, and only between an HCA and a switch
3379 * with recent firmware. It is based on observed heuristics, rather than
3380 * actual knowledge of the non-compliant speed negotiation.
3381 * It has a number of hard-coded fields, since the hope is to rewrite this
3382 * when a spec is available on how the negoation is intended to work.
3383 */
3384static void autoneg_7220_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
3385 u32 dcnt, u32 *data)
3386{
3387 int i;
3388 u64 pbc;
3389 u32 __iomem *piobuf;
3390 u32 pnum;
3391 struct qib_devdata *dd = ppd->dd;
3392
3393 i = 0;
3394 pbc = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
3395 pbc |= PBC_7220_VL15_SEND;
3396 while (!(piobuf = get_7220_link_buf(ppd, &pnum))) {
3397 if (i++ > 5)
3398 return;
3399 udelay(2);
3400 }
3401 sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_DISARM_BUF(pnum));
3402 writeq(pbc, piobuf);
3403 qib_flush_wc();
3404 qib_pio_copy(piobuf + 2, hdr, 7);
3405 qib_pio_copy(piobuf + 9, data, dcnt);
3406 if (dd->flags & QIB_USE_SPCL_TRIG) {
3407 u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
3408
3409 qib_flush_wc();
3410 __raw_writel(0xaebecede, piobuf + spcl_off);
3411 }
3412 qib_flush_wc();
3413 qib_sendbuf_done(dd, pnum);
3414}
3415
3416/*
3417 * _start packet gets sent twice at start, _done gets sent twice at end
3418 */
3419static void autoneg_7220_send(struct qib_pportdata *ppd, int which)
3420{
3421 struct qib_devdata *dd = ppd->dd;
3422 static u32 swapped;
3423 u32 dw, i, hcnt, dcnt, *data;
3424 static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
3425 static u32 madpayload_start[0x40] = {
3426 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
3427 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3428 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
3429 };
3430 static u32 madpayload_done[0x40] = {
3431 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
3432 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
3433 0x40000001, 0x1388, 0x15e, /* rest 0's */
3434 };
3435
3436 dcnt = ARRAY_SIZE(madpayload_start);
3437 hcnt = ARRAY_SIZE(hdr);
3438 if (!swapped) {
3439 /* for maintainability, do it at runtime */
3440 for (i = 0; i < hcnt; i++) {
3441 dw = (__force u32) cpu_to_be32(hdr[i]);
3442 hdr[i] = dw;
3443 }
3444 for (i = 0; i < dcnt; i++) {
3445 dw = (__force u32) cpu_to_be32(madpayload_start[i]);
3446 madpayload_start[i] = dw;
3447 dw = (__force u32) cpu_to_be32(madpayload_done[i]);
3448 madpayload_done[i] = dw;
3449 }
3450 swapped = 1;
3451 }
3452
3453 data = which ? madpayload_done : madpayload_start;
3454
3455 autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
3456 qib_read_kreg64(dd, kr_scratch);
3457 udelay(2);
3458 autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
3459 qib_read_kreg64(dd, kr_scratch);
3460 udelay(2);
3461}
3462
3463/*
3464 * Do the absolute minimum to cause an IB speed change, and make it
3465 * ready, but don't actually trigger the change. The caller will
3466 * do that when ready (if link is in Polling training state, it will
3467 * happen immediately, otherwise when link next goes down)
3468 *
3469 * This routine should only be used as part of the DDR autonegotation
3470 * code for devices that are not compliant with IB 1.2 (or code that
3471 * fixes things up for same).
3472 *
3473 * When link has gone down, and autoneg enabled, or autoneg has
3474 * failed and we give up until next time we set both speeds, and
3475 * then we want IBTA enabled as well as "use max enabled speed.
3476 */
3477static void set_7220_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
3478{
3479 ppd->cpspec->ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |
3480 IBA7220_IBC_IBTA_1_2_MASK);
3481
3482 if (speed == (QIB_IB_SDR | QIB_IB_DDR))
3483 ppd->cpspec->ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |
3484 IBA7220_IBC_IBTA_1_2_MASK;
3485 else
3486 ppd->cpspec->ibcddrctrl |= speed == QIB_IB_DDR ?
3487 IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
3488
3489 qib_write_kreg(ppd->dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
3490 qib_write_kreg(ppd->dd, kr_scratch, 0);
3491}
3492
3493/*
3494 * This routine is only used when we are not talking to another
3495 * IB 1.2-compliant device that we think can do DDR.
3496 * (This includes all existing switch chips as of Oct 2007.)
3497 * 1.2-compliant devices go directly to DDR prior to reaching INIT
3498 */
3499static void try_7220_autoneg(struct qib_pportdata *ppd)
3500{
3501 unsigned long flags;
3502
3503 /*
3504 * Required for older non-IB1.2 DDR switches. Newer
3505 * non-IB-compliant switches don't need it, but so far,
3506 * aren't bothered by it either. "Magic constant"
3507 */
3508 qib_write_kreg(ppd->dd, kr_ncmodectrl, 0x3b9dc07);
3509
3510 spin_lock_irqsave(&ppd->lflags_lock, flags);
3511 ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
3512 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3513 autoneg_7220_send(ppd, 0);
3514 set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
3515
3516 toggle_7220_rclkrls(ppd->dd);
3517 /* 2 msec is minimum length of a poll cycle */
3518 schedule_delayed_work(&ppd->cpspec->autoneg_work,
3519 msecs_to_jiffies(2));
3520}
3521
3522/*
3523 * Handle the empirically determined mechanism for auto-negotiation
3524 * of DDR speed with switches.
3525 */
3526static void autoneg_7220_work(struct work_struct *work)
3527{
3528 struct qib_pportdata *ppd;
3529 struct qib_devdata *dd;
3530 u64 startms;
3531 u32 i;
3532 unsigned long flags;
3533
3534 ppd = &container_of(work, struct qib_chippport_specific,
3535 autoneg_work.work)->pportdata;
3536 dd = ppd->dd;
3537
3538 startms = jiffies_to_msecs(jiffies);
3539
3540 /*
3541 * Busy wait for this first part, it should be at most a
3542 * few hundred usec, since we scheduled ourselves for 2msec.
3543 */
3544 for (i = 0; i < 25; i++) {
3545 if (SYM_FIELD(ppd->lastibcstat, IBCStatus, LinkTrainingState)
3546 == IB_7220_LT_STATE_POLLQUIET) {
3547 qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
3548 break;
3549 }
3550 udelay(100);
3551 }
3552
3553 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
3554 goto done; /* we got there early or told to stop */
3555
3556 /* we expect this to timeout */
3557 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
3558 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
3559 msecs_to_jiffies(90)))
3560 goto done;
3561
3562 toggle_7220_rclkrls(dd);
3563
3564 /* we expect this to timeout */
3565 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
3566 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
3567 msecs_to_jiffies(1700)))
3568 goto done;
3569
3570 set_7220_ibspeed_fast(ppd, QIB_IB_SDR);
3571 toggle_7220_rclkrls(dd);
3572
3573 /*
3574 * Wait up to 250 msec for link to train and get to INIT at DDR;
3575 * this should terminate early.
3576 */
3577 wait_event_timeout(ppd->cpspec->autoneg_wait,
3578 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
3579 msecs_to_jiffies(250));
3580done:
3581 if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
3582 spin_lock_irqsave(&ppd->lflags_lock, flags);
3583 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
3584 if (dd->cspec->autoneg_tries == AUTONEG_TRIES) {
3585 ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
3586 dd->cspec->autoneg_tries = 0;
3587 }
3588 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3589 set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
3590 }
3591}
3592
3593static u32 qib_7220_iblink_state(u64 ibcs)
3594{
3595 u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
3596
3597 switch (state) {
3598 case IB_7220_L_STATE_INIT:
3599 state = IB_PORT_INIT;
3600 break;
3601 case IB_7220_L_STATE_ARM:
3602 state = IB_PORT_ARMED;
3603 break;
3604 case IB_7220_L_STATE_ACTIVE:
3605 /* fall through */
3606 case IB_7220_L_STATE_ACT_DEFER:
3607 state = IB_PORT_ACTIVE;
3608 break;
3609 default: /* fall through */
3610 case IB_7220_L_STATE_DOWN:
3611 state = IB_PORT_DOWN;
3612 break;
3613 }
3614 return state;
3615}
3616
3617/* returns the IBTA port state, rather than the IBC link training state */
3618static u8 qib_7220_phys_portstate(u64 ibcs)
3619{
3620 u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
3621 return qib_7220_physportstate[state];
3622}
3623
3624static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
3625{
3626 int ret = 0, symadj = 0;
3627 struct qib_devdata *dd = ppd->dd;
3628 unsigned long flags;
3629
3630 spin_lock_irqsave(&ppd->lflags_lock, flags);
3631 ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
3632 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3633
3634 if (!ibup) {
3635 /*
3636 * When the link goes down we don't want AEQ running, so it
3637 * won't interfere with IBC training, etc., and we need
3638 * to go back to the static SerDes preset values.
3639 */
3640 if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
3641 QIBL_IB_AUTONEG_INPROG)))
3642 set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
3643 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
3644 qib_sd7220_presets(dd);
3645 qib_cancel_sends(ppd); /* initial disarm, etc. */
3646 spin_lock_irqsave(&ppd->sdma_lock, flags);
3647 if (__qib_sdma_running(ppd))
3648 __qib_sdma_process_event(ppd,
3649 qib_sdma_event_e70_go_idle);
3650 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
3651 }
3652 /* this might better in qib_sd7220_presets() */
3653 set_7220_relock_poll(dd, ibup);
3654 } else {
3655 if (qib_compat_ddr_negotiate &&
3656 !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
3657 QIBL_IB_AUTONEG_INPROG)) &&
3658 ppd->link_speed_active == QIB_IB_SDR &&
3659 (ppd->link_speed_enabled & (QIB_IB_DDR | QIB_IB_SDR)) ==
3660 (QIB_IB_DDR | QIB_IB_SDR) &&
3661 dd->cspec->autoneg_tries < AUTONEG_TRIES) {
3662 /* we are SDR, and DDR auto-negotiation enabled */
3663 ++dd->cspec->autoneg_tries;
3664 if (!ppd->cpspec->ibdeltainprog) {
3665 ppd->cpspec->ibdeltainprog = 1;
3666 ppd->cpspec->ibsymsnap = read_7220_creg32(dd,
3667 cr_ibsymbolerr);
3668 ppd->cpspec->iblnkerrsnap = read_7220_creg32(dd,
3669 cr_iblinkerrrecov);
3670 }
3671 try_7220_autoneg(ppd);
3672 ret = 1; /* no other IB status change processing */
3673 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
3674 ppd->link_speed_active == QIB_IB_SDR) {
3675 autoneg_7220_send(ppd, 1);
3676 set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
3677 udelay(2);
3678 toggle_7220_rclkrls(dd);
3679 ret = 1; /* no other IB status change processing */
3680 } else {
3681 if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
3682 (ppd->link_speed_active & QIB_IB_DDR)) {
3683 spin_lock_irqsave(&ppd->lflags_lock, flags);
3684 ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
3685 QIBL_IB_AUTONEG_FAILED);
3686 spin_unlock_irqrestore(&ppd->lflags_lock,
3687 flags);
3688 dd->cspec->autoneg_tries = 0;
3689 /* re-enable SDR, for next link down */
3690 set_7220_ibspeed_fast(ppd,
3691 ppd->link_speed_enabled);
3692 wake_up(&ppd->cpspec->autoneg_wait);
3693 symadj = 1;
3694 } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
3695 /*
3696 * Clear autoneg failure flag, and do setup
3697 * so we'll try next time link goes down and
3698 * back to INIT (possibly connected to a
3699 * different device).
3700 */
3701 spin_lock_irqsave(&ppd->lflags_lock, flags);
3702 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3703 spin_unlock_irqrestore(&ppd->lflags_lock,
3704 flags);
3705 ppd->cpspec->ibcddrctrl |=
3706 IBA7220_IBC_IBTA_1_2_MASK;
3707 qib_write_kreg(dd, kr_ncmodectrl, 0);
3708 symadj = 1;
3709 }
3710 }
3711
3712 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
3713 symadj = 1;
3714
3715 if (!ret) {
3716 ppd->delay_mult = rate_to_delay
3717 [(ibcs >> IBA7220_LINKSPEED_SHIFT) & 1]
3718 [(ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1];
3719
3720 set_7220_relock_poll(dd, ibup);
3721 spin_lock_irqsave(&ppd->sdma_lock, flags);
3722 /*
3723 * Unlike 7322, the 7220 needs this, due to lack of
3724 * interrupt in some cases when we have sdma active
3725 * when the link goes down.
3726 */
3727 if (ppd->sdma_state.current_state !=
3728 qib_sdma_state_s20_idle)
3729 __qib_sdma_process_event(ppd,
3730 qib_sdma_event_e00_go_hw_down);
3731 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
3732 }
3733 }
3734
3735 if (symadj) {
3736 if (ppd->cpspec->ibdeltainprog) {
3737 ppd->cpspec->ibdeltainprog = 0;
3738 ppd->cpspec->ibsymdelta += read_7220_creg32(ppd->dd,
3739 cr_ibsymbolerr) - ppd->cpspec->ibsymsnap;
3740 ppd->cpspec->iblnkerrdelta += read_7220_creg32(ppd->dd,
3741 cr_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
3742 }
3743 } else if (!ibup && qib_compat_ddr_negotiate &&
3744 !ppd->cpspec->ibdeltainprog &&
3745 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
3746 ppd->cpspec->ibdeltainprog = 1;
3747 ppd->cpspec->ibsymsnap = read_7220_creg32(ppd->dd,
3748 cr_ibsymbolerr);
3749 ppd->cpspec->iblnkerrsnap = read_7220_creg32(ppd->dd,
3750 cr_iblinkerrrecov);
3751 }
3752
3753 if (!ret)
3754 qib_setup_7220_setextled(ppd, ibup);
3755 return ret;
3756}
3757
3758/*
3759 * Does read/modify/write to appropriate registers to
3760 * set output and direction bits selected by mask.
3761 * these are in their canonical postions (e.g. lsb of
3762 * dir will end up in D48 of extctrl on existing chips).
3763 * returns contents of GP Inputs.
3764 */
3765static int gpio_7220_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
3766{
3767 u64 read_val, new_out;
3768 unsigned long flags;
3769
3770 if (mask) {
3771 /* some bits being written, lock access to GPIO */
3772 dir &= mask;
3773 out &= mask;
3774 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
3775 dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
3776 dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
3777 new_out = (dd->cspec->gpio_out & ~mask) | out;
3778
3779 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
3780 qib_write_kreg(dd, kr_gpio_out, new_out);
3781 dd->cspec->gpio_out = new_out;
3782 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
3783 }
3784 /*
3785 * It is unlikely that a read at this time would get valid
3786 * data on a pin whose direction line was set in the same
3787 * call to this function. We include the read here because
3788 * that allows us to potentially combine a change on one pin with
3789 * a read on another, and because the old code did something like
3790 * this.
3791 */
3792 read_val = qib_read_kreg64(dd, kr_extstatus);
3793 return SYM_FIELD(read_val, EXTStatus, GPIOIn);
3794}
3795
3796/*
3797 * Read fundamental info we need to use the chip. These are
3798 * the registers that describe chip capabilities, and are
3799 * saved in shadow registers.
3800 */
3801static void get_7220_chip_params(struct qib_devdata *dd)
3802{
3803 u64 val;
3804 u32 piobufs;
3805 int mtu;
3806
3807 dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
3808
3809 dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
3810 dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
3811 dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
3812 dd->palign = qib_read_kreg32(dd, kr_palign);
3813 dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
3814 dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
3815
3816 val = qib_read_kreg64(dd, kr_sendpiosize);
3817 dd->piosize2k = val & ~0U;
3818 dd->piosize4k = val >> 32;
3819
3820 mtu = ib_mtu_enum_to_int(qib_ibmtu);
3821 if (mtu == -1)
3822 mtu = QIB_DEFAULT_MTU;
3823 dd->pport->ibmtu = (u32)mtu;
3824
3825 val = qib_read_kreg64(dd, kr_sendpiobufcnt);
3826 dd->piobcnt2k = val & ~0U;
3827 dd->piobcnt4k = val >> 32;
3828 /* these may be adjusted in init_chip_wc_pat() */
3829 dd->pio2kbase = (u32 __iomem *)
3830 ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
3831 if (dd->piobcnt4k) {
3832 dd->pio4kbase = (u32 __iomem *)
3833 ((char __iomem *) dd->kregbase +
3834 (dd->piobufbase >> 32));
3835 /*
3836 * 4K buffers take 2 pages; we use roundup just to be
3837 * paranoid; we calculate it once here, rather than on
3838 * ever buf allocate
3839 */
3840 dd->align4k = ALIGN(dd->piosize4k, dd->palign);
3841 }
3842
3843 piobufs = dd->piobcnt4k + dd->piobcnt2k;
3844
3845 dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
3846 (sizeof(u64) * BITS_PER_BYTE / 2);
3847}
3848
3849/*
3850 * The chip base addresses in cspec and cpspec have to be set
3851 * after possible init_chip_wc_pat(), rather than in
3852 * qib_get_7220_chip_params(), so split out as separate function
3853 */
3854static void set_7220_baseaddrs(struct qib_devdata *dd)
3855{
3856 u32 cregbase;
3857 /* init after possible re-map in init_chip_wc_pat() */
3858 cregbase = qib_read_kreg32(dd, kr_counterregbase);
3859 dd->cspec->cregbase = (u64 __iomem *)
3860 ((char __iomem *) dd->kregbase + cregbase);
3861
3862 dd->egrtidbase = (u64 __iomem *)
3863 ((char __iomem *) dd->kregbase + dd->rcvegrbase);
3864}
3865
3866
3867#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl, SendIntBufAvail) | \
3868 SYM_MASK(SendCtrl, SPioEnable) | \
3869 SYM_MASK(SendCtrl, SSpecialTriggerEn) | \
3870 SYM_MASK(SendCtrl, SendBufAvailUpd) | \
3871 SYM_MASK(SendCtrl, AvailUpdThld) | \
3872 SYM_MASK(SendCtrl, SDmaEnable) | \
3873 SYM_MASK(SendCtrl, SDmaIntEnable) | \
3874 SYM_MASK(SendCtrl, SDmaHalt) | \
3875 SYM_MASK(SendCtrl, SDmaSingleDescriptor))
3876
3877static int sendctrl_hook(struct qib_devdata *dd,
3878 const struct diag_observer *op,
3879 u32 offs, u64 *data, u64 mask, int only_32)
3880{
3881 unsigned long flags;
3882 unsigned idx = offs / sizeof(u64);
3883 u64 local_data, all_bits;
3884
3885 if (idx != kr_sendctrl) {
3886 qib_dev_err(dd, "SendCtrl Hook called with offs %X, %s-bit\n",
3887 offs, only_32 ? "32" : "64");
3888 return 0;
3889 }
3890
3891 all_bits = ~0ULL;
3892 if (only_32)
3893 all_bits >>= 32;
3894 spin_lock_irqsave(&dd->sendctrl_lock, flags);
3895 if ((mask & all_bits) != all_bits) {
3896 /*
3897 * At least some mask bits are zero, so we need
3898 * to read. The judgement call is whether from
3899 * reg or shadow. First-cut: read reg, and complain
3900 * if any bits which should be shadowed are different
3901 * from their shadowed value.
3902 */
3903 if (only_32)
3904 local_data = (u64)qib_read_kreg32(dd, idx);
3905 else
3906 local_data = qib_read_kreg64(dd, idx);
3907 qib_dev_err(dd, "Sendctrl -> %X, Shad -> %X\n",
3908 (u32)local_data, (u32)dd->sendctrl);
3909 if ((local_data & SENDCTRL_SHADOWED) !=
3910 (dd->sendctrl & SENDCTRL_SHADOWED))
3911 qib_dev_err(dd, "Sendctrl read: %X shadow is %X\n",
3912 (u32)local_data, (u32) dd->sendctrl);
3913 *data = (local_data & ~mask) | (*data & mask);
3914 }
3915 if (mask) {
3916 /*
3917 * At least some mask bits are one, so we need
3918 * to write, but only shadow some bits.
3919 */
3920 u64 sval, tval; /* Shadowed, transient */
3921
3922 /*
3923 * New shadow val is bits we don't want to touch,
3924 * ORed with bits we do, that are intended for shadow.
3925 */
3926 sval = (dd->sendctrl & ~mask);
3927 sval |= *data & SENDCTRL_SHADOWED & mask;
3928 dd->sendctrl = sval;
3929 tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
3930 qib_dev_err(dd, "Sendctrl <- %X, Shad <- %X\n",
3931 (u32)tval, (u32)sval);
3932 qib_write_kreg(dd, kr_sendctrl, tval);
3933 qib_write_kreg(dd, kr_scratch, 0Ull);
3934 }
3935 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
3936
3937 return only_32 ? 4 : 8;
3938}
3939
3940static const struct diag_observer sendctrl_observer = {
3941 sendctrl_hook, kr_sendctrl * sizeof(u64),
3942 kr_sendctrl * sizeof(u64)
3943};
3944
3945/*
3946 * write the final few registers that depend on some of the
3947 * init setup. Done late in init, just before bringing up
3948 * the serdes.
3949 */
3950static int qib_late_7220_initreg(struct qib_devdata *dd)
3951{
3952 int ret = 0;
3953 u64 val;
3954
3955 qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
3956 qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
3957 qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
3958 qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
3959 val = qib_read_kreg64(dd, kr_sendpioavailaddr);
3960 if (val != dd->pioavailregs_phys) {
3961 qib_dev_err(dd, "Catastrophic software error, "
3962 "SendPIOAvailAddr written as %lx, "
3963 "read back as %llx\n",
3964 (unsigned long) dd->pioavailregs_phys,
3965 (unsigned long long) val);
3966 ret = -EINVAL;
3967 }
3968 qib_register_observer(dd, &sendctrl_observer);
3969 return ret;
3970}
3971
3972static int qib_init_7220_variables(struct qib_devdata *dd)
3973{
3974 struct qib_chippport_specific *cpspec;
3975 struct qib_pportdata *ppd;
3976 int ret = 0;
3977 u32 sbufs, updthresh;
3978
3979 cpspec = (struct qib_chippport_specific *)(dd + 1);
3980 ppd = &cpspec->pportdata;
3981 dd->pport = ppd;
3982 dd->num_pports = 1;
3983
3984 dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports);
3985 ppd->cpspec = cpspec;
3986
3987 spin_lock_init(&dd->cspec->sdepb_lock);
3988 spin_lock_init(&dd->cspec->rcvmod_lock);
3989 spin_lock_init(&dd->cspec->gpio_lock);
3990
3991 /* we haven't yet set QIB_PRESENT, so use read directly */
3992 dd->revision = readq(&dd->kregbase[kr_revision]);
3993
3994 if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
3995 qib_dev_err(dd, "Revision register read failure, "
3996 "giving up initialization\n");
3997 ret = -ENODEV;
3998 goto bail;
3999 }
4000 dd->flags |= QIB_PRESENT; /* now register routines work */
4001
4002 dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
4003 ChipRevMajor);
4004 dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
4005 ChipRevMinor);
4006
4007 get_7220_chip_params(dd);
4008 qib_7220_boardname(dd);
4009
4010 /*
4011 * GPIO bits for TWSI data and clock,
4012 * used for serial EEPROM.
4013 */
4014 dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
4015 dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
4016 dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
4017
4018 dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
4019 QIB_NODMA_RTAIL | QIB_HAS_THRESH_UPDATE;
4020 dd->flags |= qib_special_trigger ?
4021 QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
4022
4023 /*
4024 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
4025 * 2 is Some Misc, 3 is reserved for future.
4026 */
4027 dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
4028
4029 dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
4030
4031 dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
4032
4033 init_waitqueue_head(&cpspec->autoneg_wait);
4034 INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work);
4035
4036 qib_init_pportdata(ppd, dd, 0, 1);
4037 ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
4038 ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR;
4039
4040 ppd->link_width_enabled = ppd->link_width_supported;
4041 ppd->link_speed_enabled = ppd->link_speed_supported;
4042 /*
4043 * Set the initial values to reasonable default, will be set
4044 * for real when link is up.
4045 */
4046 ppd->link_width_active = IB_WIDTH_4X;
4047 ppd->link_speed_active = QIB_IB_SDR;
4048 ppd->delay_mult = rate_to_delay[0][1];
4049 ppd->vls_supported = IB_VL_VL0;
4050 ppd->vls_operational = ppd->vls_supported;
4051
4052 if (!qib_mini_init)
4053 qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP);
4054
4055 init_timer(&ppd->cpspec->chase_timer);
4056 ppd->cpspec->chase_timer.function = reenable_7220_chase;
4057 ppd->cpspec->chase_timer.data = (unsigned long)ppd;
4058
4059 qib_num_cfg_vls = 1; /* if any 7220's, only one VL */
4060
4061 dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
4062 dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
4063 dd->rhf_offset =
4064 dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
4065
4066 /* we always allocate at least 2048 bytes for eager buffers */
4067 ret = ib_mtu_enum_to_int(qib_ibmtu);
4068 dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
4069
4070 qib_7220_tidtemplate(dd);
4071
4072 /*
4073 * We can request a receive interrupt for 1 or
4074 * more packets from current offset. For now, we set this
4075 * up for a single packet.
4076 */
4077 dd->rhdrhead_intr_off = 1ULL << 32;
4078
4079 /* setup the stats timer; the add_timer is done at end of init */
4080 init_timer(&dd->stats_timer);
4081 dd->stats_timer.function = qib_get_7220_faststats;
4082 dd->stats_timer.data = (unsigned long) dd;
4083 dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ;
4084
4085 /*
4086 * Control[4] has been added to change the arbitration within
4087 * the SDMA engine between favoring data fetches over descriptor
4088 * fetches. qib_sdma_fetch_arb==0 gives data fetches priority.
4089 */
4090 if (qib_sdma_fetch_arb)
4091 dd->control |= 1 << 4;
4092
4093 dd->ureg_align = 0x10000; /* 64KB alignment */
4094
4095 dd->piosize2kmax_dwords = (dd->piosize2k >> 2)-1;
4096 qib_7220_config_ctxts(dd);
4097 qib_set_ctxtcnt(dd); /* needed for PAT setup */
4098
4099 if (qib_wc_pat) {
4100 ret = init_chip_wc_pat(dd, 0);
4101 if (ret)
4102 goto bail;
4103 }
4104 set_7220_baseaddrs(dd); /* set chip access pointers now */
4105
4106 ret = 0;
4107 if (qib_mini_init)
4108 goto bail;
4109
4110 ret = qib_create_ctxts(dd);
4111 init_7220_cntrnames(dd);
4112
4113 /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
4114 * reserve the update threshold amount for other kernel use, such
4115 * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
4116 * unless we aren't enabling SDMA, in which case we want to use
4117 * all the 4k bufs for the kernel.
4118 * if this was less than the update threshold, we could wait
4119 * a long time for an update. Coded this way because we
4120 * sometimes change the update threshold for various reasons,
4121 * and we want this to remain robust.
4122 */
4123 updthresh = 8U; /* update threshold */
4124 if (dd->flags & QIB_HAS_SEND_DMA) {
4125 dd->cspec->sdmabufcnt = dd->piobcnt4k;
4126 sbufs = updthresh > 3 ? updthresh : 3;
4127 } else {
4128 dd->cspec->sdmabufcnt = 0;
4129 sbufs = dd->piobcnt4k;
4130 }
4131
4132 dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
4133 dd->cspec->sdmabufcnt;
4134 dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
4135 dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
4136 dd->pbufsctxt = dd->lastctxt_piobuf /
4137 (dd->cfgctxts - dd->first_user_ctxt);
4138
4139 /*
4140 * if we are at 16 user contexts, we will have one 7 sbufs
4141 * per context, so drop the update threshold to match. We
4142 * want to update before we actually run out, at low pbufs/ctxt
4143 * so give ourselves some margin
4144 */
4145 if ((dd->pbufsctxt - 2) < updthresh)
4146 updthresh = dd->pbufsctxt - 2;
4147
4148 dd->cspec->updthresh_dflt = updthresh;
4149 dd->cspec->updthresh = updthresh;
4150
4151 /* before full enable, no interrupts, no locking needed */
4152 dd->sendctrl |= (updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
4153 << SYM_LSB(SendCtrl, AvailUpdThld);
4154
4155 dd->psxmitwait_supported = 1;
4156 dd->psxmitwait_check_rate = QIB_7220_PSXMITWAIT_CHECK_RATE;
4157bail:
4158 return ret;
4159}
4160
4161static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
4162 u32 *pbufnum)
4163{
4164 u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
4165 struct qib_devdata *dd = ppd->dd;
4166 u32 __iomem *buf;
4167
4168 if (((pbc >> 32) & PBC_7220_VL15_SEND_CTRL) &&
4169 !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
4170 buf = get_7220_link_buf(ppd, pbufnum);
4171 else {
4172 if ((plen + 1) > dd->piosize2kmax_dwords)
4173 first = dd->piobcnt2k;
4174 else
4175 first = 0;
4176 /* try 4k if all 2k busy, so same last for both sizes */
4177 last = dd->cspec->lastbuf_for_pio;
4178 buf = qib_getsendbuf_range(dd, pbufnum, first, last);
4179 }
4180 return buf;
4181}
4182
4183/* these 2 "counters" are really control registers, and are always RW */
4184static void qib_set_cntr_7220_sample(struct qib_pportdata *ppd, u32 intv,
4185 u32 start)
4186{
4187 write_7220_creg(ppd->dd, cr_psinterval, intv);
4188 write_7220_creg(ppd->dd, cr_psstart, start);
4189}
4190
4191/*
4192 * NOTE: no real attempt is made to generalize the SDMA stuff.
4193 * At some point "soon" we will have a new more generalized
4194 * set of sdma interface, and then we'll clean this up.
4195 */
4196
4197/* Must be called with sdma_lock held, or before init finished */
4198static void qib_sdma_update_7220_tail(struct qib_pportdata *ppd, u16 tail)
4199{
4200 /* Commit writes to memory and advance the tail on the chip */
4201 wmb();
4202 ppd->sdma_descq_tail = tail;
4203 qib_write_kreg(ppd->dd, kr_senddmatail, tail);
4204}
4205
4206static void qib_sdma_set_7220_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
4207{
4208}
4209
4210static struct sdma_set_state_action sdma_7220_action_table[] = {
4211 [qib_sdma_state_s00_hw_down] = {
4212 .op_enable = 0,
4213 .op_intenable = 0,
4214 .op_halt = 0,
4215 .go_s99_running_tofalse = 1,
4216 },
4217 [qib_sdma_state_s10_hw_start_up_wait] = {
4218 .op_enable = 1,
4219 .op_intenable = 1,
4220 .op_halt = 1,
4221 },
4222 [qib_sdma_state_s20_idle] = {
4223 .op_enable = 1,
4224 .op_intenable = 1,
4225 .op_halt = 1,
4226 },
4227 [qib_sdma_state_s30_sw_clean_up_wait] = {
4228 .op_enable = 0,
4229 .op_intenable = 1,
4230 .op_halt = 0,
4231 },
4232 [qib_sdma_state_s40_hw_clean_up_wait] = {
4233 .op_enable = 1,
4234 .op_intenable = 1,
4235 .op_halt = 1,
4236 },
4237 [qib_sdma_state_s50_hw_halt_wait] = {
4238 .op_enable = 1,
4239 .op_intenable = 1,
4240 .op_halt = 1,
4241 },
4242 [qib_sdma_state_s99_running] = {
4243 .op_enable = 1,
4244 .op_intenable = 1,
4245 .op_halt = 0,
4246 .go_s99_running_totrue = 1,
4247 },
4248};
4249
4250static void qib_7220_sdma_init_early(struct qib_pportdata *ppd)
4251{
4252 ppd->sdma_state.set_state_action = sdma_7220_action_table;
4253}
4254
4255static int init_sdma_7220_regs(struct qib_pportdata *ppd)
4256{
4257 struct qib_devdata *dd = ppd->dd;
4258 unsigned i, n;
4259 u64 senddmabufmask[3] = { 0 };
4260
4261 /* Set SendDmaBase */
4262 qib_write_kreg(dd, kr_senddmabase, ppd->sdma_descq_phys);
4263 qib_sdma_7220_setlengen(ppd);
4264 qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
4265 /* Set SendDmaHeadAddr */
4266 qib_write_kreg(dd, kr_senddmaheadaddr, ppd->sdma_head_phys);
4267
4268 /*
4269 * Reserve all the former "kernel" piobufs, using high number range
4270 * so we get as many 4K buffers as possible
4271 */
4272 n = dd->piobcnt2k + dd->piobcnt4k;
4273 i = n - dd->cspec->sdmabufcnt;
4274
4275 for (; i < n; ++i) {
4276 unsigned word = i / 64;
4277 unsigned bit = i & 63;
4278
4279 BUG_ON(word >= 3);
4280 senddmabufmask[word] |= 1ULL << bit;
4281 }
4282 qib_write_kreg(dd, kr_senddmabufmask0, senddmabufmask[0]);
4283 qib_write_kreg(dd, kr_senddmabufmask1, senddmabufmask[1]);
4284 qib_write_kreg(dd, kr_senddmabufmask2, senddmabufmask[2]);
4285
4286 ppd->sdma_state.first_sendbuf = i;
4287 ppd->sdma_state.last_sendbuf = n;
4288
4289 return 0;
4290}
4291
4292/* sdma_lock must be held */
4293static u16 qib_sdma_7220_gethead(struct qib_pportdata *ppd)
4294{
4295 struct qib_devdata *dd = ppd->dd;
4296 int sane;
4297 int use_dmahead;
4298 u16 swhead;
4299 u16 swtail;
4300 u16 cnt;
4301 u16 hwhead;
4302
4303 use_dmahead = __qib_sdma_running(ppd) &&
4304 (dd->flags & QIB_HAS_SDMA_TIMEOUT);
4305retry:
4306 hwhead = use_dmahead ?
4307 (u16)le64_to_cpu(*ppd->sdma_head_dma) :
4308 (u16)qib_read_kreg32(dd, kr_senddmahead);
4309
4310 swhead = ppd->sdma_descq_head;
4311 swtail = ppd->sdma_descq_tail;
4312 cnt = ppd->sdma_descq_cnt;
4313
4314 if (swhead < swtail) {
4315 /* not wrapped */
4316 sane = (hwhead >= swhead) & (hwhead <= swtail);
4317 } else if (swhead > swtail) {
4318 /* wrapped around */
4319 sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
4320 (hwhead <= swtail);
4321 } else {
4322 /* empty */
4323 sane = (hwhead == swhead);
4324 }
4325
4326 if (unlikely(!sane)) {
4327 if (use_dmahead) {
4328 /* try one more time, directly from the register */
4329 use_dmahead = 0;
4330 goto retry;
4331 }
4332 /* assume no progress */
4333 hwhead = swhead;
4334 }
4335
4336 return hwhead;
4337}
4338
4339static int qib_sdma_7220_busy(struct qib_pportdata *ppd)
4340{
4341 u64 hwstatus = qib_read_kreg64(ppd->dd, kr_senddmastatus);
4342
4343 return (hwstatus & SYM_MASK(SendDmaStatus, ScoreBoardDrainInProg)) ||
4344 (hwstatus & SYM_MASK(SendDmaStatus, AbortInProg)) ||
4345 (hwstatus & SYM_MASK(SendDmaStatus, InternalSDmaEnable)) ||
4346 !(hwstatus & SYM_MASK(SendDmaStatus, ScbEmpty));
4347}
4348
4349/*
4350 * Compute the amount of delay before sending the next packet if the
4351 * port's send rate differs from the static rate set for the QP.
4352 * Since the delay affects this packet but the amount of the delay is
4353 * based on the length of the previous packet, use the last delay computed
4354 * and save the delay count for this packet to be used next time
4355 * we get here.
4356 */
4357static u32 qib_7220_setpbc_control(struct qib_pportdata *ppd, u32 plen,
4358 u8 srate, u8 vl)
4359{
4360 u8 snd_mult = ppd->delay_mult;
4361 u8 rcv_mult = ib_rate_to_delay[srate];
4362 u32 ret = ppd->cpspec->last_delay_mult;
4363
4364 ppd->cpspec->last_delay_mult = (rcv_mult > snd_mult) ?
4365 (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
4366
4367 /* Indicate VL15, if necessary */
4368 if (vl == 15)
4369 ret |= PBC_7220_VL15_SEND_CTRL;
4370 return ret;
4371}
4372
4373static void qib_7220_initvl15_bufs(struct qib_devdata *dd)
4374{
4375}
4376
4377static void qib_7220_init_ctxt(struct qib_ctxtdata *rcd)
4378{
4379 if (!rcd->ctxt) {
4380 rcd->rcvegrcnt = IBA7220_KRCVEGRCNT;
4381 rcd->rcvegr_tid_base = 0;
4382 } else {
4383 rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
4384 rcd->rcvegr_tid_base = IBA7220_KRCVEGRCNT +
4385 (rcd->ctxt - 1) * rcd->rcvegrcnt;
4386 }
4387}
4388
4389static void qib_7220_txchk_change(struct qib_devdata *dd, u32 start,
4390 u32 len, u32 which, struct qib_ctxtdata *rcd)
4391{
4392 int i;
4393 unsigned long flags;
4394
4395 switch (which) {
4396 case TXCHK_CHG_TYPE_KERN:
4397 /* see if we need to raise avail update threshold */
4398 spin_lock_irqsave(&dd->uctxt_lock, flags);
4399 for (i = dd->first_user_ctxt;
4400 dd->cspec->updthresh != dd->cspec->updthresh_dflt
4401 && i < dd->cfgctxts; i++)
4402 if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
4403 ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
4404 < dd->cspec->updthresh_dflt)
4405 break;
4406 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
4407 if (i == dd->cfgctxts) {
4408 spin_lock_irqsave(&dd->sendctrl_lock, flags);
4409 dd->cspec->updthresh = dd->cspec->updthresh_dflt;
4410 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
4411 dd->sendctrl |= (dd->cspec->updthresh &
4412 SYM_RMASK(SendCtrl, AvailUpdThld)) <<
4413 SYM_LSB(SendCtrl, AvailUpdThld);
4414 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4415 sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
4416 }
4417 break;
4418 case TXCHK_CHG_TYPE_USER:
4419 spin_lock_irqsave(&dd->sendctrl_lock, flags);
4420 if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
4421 / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
4422 dd->cspec->updthresh = (rcd->piocnt /
4423 rcd->subctxt_cnt) - 1;
4424 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
4425 dd->sendctrl |= (dd->cspec->updthresh &
4426 SYM_RMASK(SendCtrl, AvailUpdThld))
4427 << SYM_LSB(SendCtrl, AvailUpdThld);
4428 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4429 sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
4430 } else
4431 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4432 break;
4433 }
4434}
4435
4436static void writescratch(struct qib_devdata *dd, u32 val)
4437{
4438 qib_write_kreg(dd, kr_scratch, val);
4439}
4440
4441#define VALID_TS_RD_REG_MASK 0xBF
4442/**
4443 * qib_7220_tempsense_read - read register of temp sensor via TWSI
4444 * @dd: the qlogic_ib device
4445 * @regnum: register to read from
4446 *
4447 * returns reg contents (0..255) or < 0 for error
4448 */
4449static int qib_7220_tempsense_rd(struct qib_devdata *dd, int regnum)
4450{
4451 int ret;
4452 u8 rdata;
4453
4454 if (regnum > 7) {
4455 ret = -EINVAL;
4456 goto bail;
4457 }
4458
4459 /* return a bogus value for (the one) register we do not have */
4460 if (!((1 << regnum) & VALID_TS_RD_REG_MASK)) {
4461 ret = 0;
4462 goto bail;
4463 }
4464
4465 ret = mutex_lock_interruptible(&dd->eep_lock);
4466 if (ret)
4467 goto bail;
4468
4469 ret = qib_twsi_blk_rd(dd, QIB_TWSI_TEMP_DEV, regnum, &rdata, 1);
4470 if (!ret)
4471 ret = rdata;
4472
4473 mutex_unlock(&dd->eep_lock);
4474
4475 /*
4476 * There are three possibilities here:
4477 * ret is actual value (0..255)
4478 * ret is -ENXIO or -EINVAL from twsi code or this file
4479 * ret is -EINTR from mutex_lock_interruptible.
4480 */
4481bail:
4482 return ret;
4483}
4484
4485/* Dummy function, as 7220 boards never disable EEPROM Write */
4486static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen)
4487{
4488 return 1;
4489}
4490
4491/**
4492 * qib_init_iba7220_funcs - set up the chip-specific function pointers
4493 * @dev: the pci_dev for qlogic_ib device
4494 * @ent: pci_device_id struct for this dev
4495 *
4496 * This is global, and is called directly at init to set up the
4497 * chip-specific function pointers for later use.
4498 */
4499struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
4500 const struct pci_device_id *ent)
4501{
4502 struct qib_devdata *dd;
4503 int ret;
4504 u32 boardid, minwidth;
4505
4506 dd = qib_alloc_devdata(pdev, sizeof(struct qib_chip_specific) +
4507 sizeof(struct qib_chippport_specific));
4508 if (IS_ERR(dd))
4509 goto bail;
4510
4511 dd->f_bringup_serdes = qib_7220_bringup_serdes;
4512 dd->f_cleanup = qib_setup_7220_cleanup;
4513 dd->f_clear_tids = qib_7220_clear_tids;
4514 dd->f_free_irq = qib_7220_free_irq;
4515 dd->f_get_base_info = qib_7220_get_base_info;
4516 dd->f_get_msgheader = qib_7220_get_msgheader;
4517 dd->f_getsendbuf = qib_7220_getsendbuf;
4518 dd->f_gpio_mod = gpio_7220_mod;
4519 dd->f_eeprom_wen = qib_7220_eeprom_wen;
4520 dd->f_hdrqempty = qib_7220_hdrqempty;
4521 dd->f_ib_updown = qib_7220_ib_updown;
4522 dd->f_init_ctxt = qib_7220_init_ctxt;
4523 dd->f_initvl15_bufs = qib_7220_initvl15_bufs;
4524 dd->f_intr_fallback = qib_7220_intr_fallback;
4525 dd->f_late_initreg = qib_late_7220_initreg;
4526 dd->f_setpbc_control = qib_7220_setpbc_control;
4527 dd->f_portcntr = qib_portcntr_7220;
4528 dd->f_put_tid = qib_7220_put_tid;
4529 dd->f_quiet_serdes = qib_7220_quiet_serdes;
4530 dd->f_rcvctrl = rcvctrl_7220_mod;
4531 dd->f_read_cntrs = qib_read_7220cntrs;
4532 dd->f_read_portcntrs = qib_read_7220portcntrs;
4533 dd->f_reset = qib_setup_7220_reset;
4534 dd->f_init_sdma_regs = init_sdma_7220_regs;
4535 dd->f_sdma_busy = qib_sdma_7220_busy;
4536 dd->f_sdma_gethead = qib_sdma_7220_gethead;
4537 dd->f_sdma_sendctrl = qib_7220_sdma_sendctrl;
4538 dd->f_sdma_set_desc_cnt = qib_sdma_set_7220_desc_cnt;
4539 dd->f_sdma_update_tail = qib_sdma_update_7220_tail;
4540 dd->f_sdma_hw_clean_up = qib_7220_sdma_hw_clean_up;
4541 dd->f_sdma_hw_start_up = qib_7220_sdma_hw_start_up;
4542 dd->f_sdma_init_early = qib_7220_sdma_init_early;
4543 dd->f_sendctrl = sendctrl_7220_mod;
4544 dd->f_set_armlaunch = qib_set_7220_armlaunch;
4545 dd->f_set_cntr_sample = qib_set_cntr_7220_sample;
4546 dd->f_iblink_state = qib_7220_iblink_state;
4547 dd->f_ibphys_portstate = qib_7220_phys_portstate;
4548 dd->f_get_ib_cfg = qib_7220_get_ib_cfg;
4549 dd->f_set_ib_cfg = qib_7220_set_ib_cfg;
4550 dd->f_set_ib_loopback = qib_7220_set_loopback;
4551 dd->f_set_intr_state = qib_7220_set_intr_state;
4552 dd->f_setextled = qib_setup_7220_setextled;
4553 dd->f_txchk_change = qib_7220_txchk_change;
4554 dd->f_update_usrhead = qib_update_7220_usrhead;
4555 dd->f_wantpiobuf_intr = qib_wantpiobuf_7220_intr;
4556 dd->f_xgxs_reset = qib_7220_xgxs_reset;
4557 dd->f_writescratch = writescratch;
4558 dd->f_tempsense_rd = qib_7220_tempsense_rd;
4559 /*
4560 * Do remaining pcie setup and save pcie values in dd.
4561 * Any error printing is already done by the init code.
4562 * On return, we have the chip mapped, but chip registers
4563 * are not set up until start of qib_init_7220_variables.
4564 */
4565 ret = qib_pcie_ddinit(dd, pdev, ent);
4566 if (ret < 0)
4567 goto bail_free;
4568
4569 /* initialize chip-specific variables */
4570 ret = qib_init_7220_variables(dd);
4571 if (ret)
4572 goto bail_cleanup;
4573
4574 if (qib_mini_init)
4575 goto bail;
4576
4577 boardid = SYM_FIELD(dd->revision, Revision,
4578 BoardID);
4579 switch (boardid) {
4580 case 0:
4581 case 2:
4582 case 10:
4583 case 12:
4584 minwidth = 16; /* x16 capable boards */
4585 break;
4586 default:
4587 minwidth = 8; /* x8 capable boards */
4588 break;
4589 }
4590 if (qib_pcie_params(dd, minwidth, NULL, NULL))
4591 qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
4592 "continuing anyway\n");
4593
4594 /* save IRQ for possible later use */
4595 dd->cspec->irq = pdev->irq;
4596
4597 if (qib_read_kreg64(dd, kr_hwerrstatus) &
4598 QLOGIC_IB_HWE_SERDESPLLFAILED)
4599 qib_write_kreg(dd, kr_hwerrclear,
4600 QLOGIC_IB_HWE_SERDESPLLFAILED);
4601
4602 /* setup interrupt handler (interrupt type handled above) */
4603 qib_setup_7220_interrupt(dd);
4604 qib_7220_init_hwerrors(dd);
4605
4606 /* clear diagctrl register, in case diags were running and crashed */
4607 qib_write_kreg(dd, kr_hwdiagctrl, 0);
4608
4609 goto bail;
4610
4611bail_cleanup:
4612 qib_pcie_ddcleanup(dd);
4613bail_free:
4614 qib_free_devdata(dd);
4615 dd = ERR_PTR(ret);
4616bail:
4617 return dd;
4618}
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
new file mode 100644
index 000000000000..2c24eab35b54
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -0,0 +1,8058 @@
1/*
2 * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/*
34 * This file contains all of the code that is specific to the
35 * InfiniPath 7322 chip
36 */
37
38#include <linux/interrupt.h>
39#include <linux/pci.h>
40#include <linux/delay.h>
41#include <linux/io.h>
42#include <linux/jiffies.h>
43#include <rdma/ib_verbs.h>
44#include <rdma/ib_smi.h>
45#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
46#include <linux/dca.h>
47#endif
48
49#include "qib.h"
50#include "qib_7322_regs.h"
51#include "qib_qsfp.h"
52
53#include "qib_mad.h"
54
55static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
56static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
57static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
58static irqreturn_t qib_7322intr(int irq, void *data);
59static irqreturn_t qib_7322bufavail(int irq, void *data);
60static irqreturn_t sdma_intr(int irq, void *data);
61static irqreturn_t sdma_idle_intr(int irq, void *data);
62static irqreturn_t sdma_progress_intr(int irq, void *data);
63static irqreturn_t sdma_cleanup_intr(int irq, void *data);
64static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
65 struct qib_ctxtdata *rcd);
66static u8 qib_7322_phys_portstate(u64);
67static u32 qib_7322_iblink_state(u64);
68static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
69 u16 linitcmd);
70static void force_h1(struct qib_pportdata *);
71static void adj_tx_serdes(struct qib_pportdata *);
72static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
73static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
74
75static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
76static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
77
78#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
79
80/* LE2 serdes values for different cases */
81#define LE2_DEFAULT 5
82#define LE2_5m 4
83#define LE2_QME 0
84
85/* Below is special-purpose, so only really works for the IB SerDes blocks. */
86#define IBSD(hw_pidx) (hw_pidx + 2)
87
88/* these are variables for documentation and experimentation purposes */
89static const unsigned rcv_int_timeout = 375;
90static const unsigned rcv_int_count = 16;
91static const unsigned sdma_idle_cnt = 64;
92
93/* Time to stop altering Rx Equalization parameters, after link up. */
94#define RXEQ_DISABLE_MSECS 2500
95
96/*
97 * Number of VLs we are configured to use (to allow for more
98 * credits per vl, etc.)
99 */
100ushort qib_num_cfg_vls = 2;
101module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
102MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
103
104static ushort qib_chase = 1;
105module_param_named(chase, qib_chase, ushort, S_IRUGO);
106MODULE_PARM_DESC(chase, "Enable state chase handling");
107
108static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
109module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
110MODULE_PARM_DESC(long_attenuation, \
111 "attenuation cutoff (dB) for long copper cable setup");
112
113static ushort qib_singleport;
114module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
115MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
116
117
118/*
119 * Setup QMH7342 receive and transmit parameters, necessary because
120 * each bay, Mez connector, and IB port need different tuning, beyond
121 * what the switch and HCA can do automatically.
122 * It's expected to be done by cat'ing files to the modules file,
123 * rather than setting up as a module parameter.
124 * It's a "write-only" file, returns 0 when read back.
125 * The unit, port, bay (if given), and values MUST be done as a single write.
126 * The unit, port, and bay must precede the values to be effective.
127 */
128static int setup_qmh_params(const char *, struct kernel_param *);
129static unsigned dummy_qmh_params;
130module_param_call(qmh_serdes_setup, setup_qmh_params, param_get_uint,
131 &dummy_qmh_params, S_IWUSR | S_IRUGO);
132
133/* similarly for QME7342, but it's simpler */
134static int setup_qme_params(const char *, struct kernel_param *);
135static unsigned dummy_qme_params;
136module_param_call(qme_serdes_setup, setup_qme_params, param_get_uint,
137 &dummy_qme_params, S_IWUSR | S_IRUGO);
138
139#define MAX_ATTEN_LEN 64 /* plenty for any real system */
140/* for read back, default index is ~5m copper cable */
141static char cable_atten_list[MAX_ATTEN_LEN] = "10";
142static struct kparam_string kp_cable_atten = {
143 .string = cable_atten_list,
144 .maxlen = MAX_ATTEN_LEN
145};
146static int setup_cable_atten(const char *, struct kernel_param *);
147module_param_call(cable_atten, setup_cable_atten, param_get_string,
148 &kp_cable_atten, S_IWUSR | S_IRUGO);
149MODULE_PARM_DESC(cable_atten, \
150 "cable attenuation indices for cables with invalid EEPROM");
151
152#define BOARD_QME7342 5
153#define BOARD_QMH7342 6
154#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
155 BOARD_QMH7342)
156#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
157 BOARD_QME7342)
158
159#define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
160
161#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
162
163#define MASK_ACROSS(lsb, msb) \
164 (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
165
166#define SYM_RMASK(regname, fldname) ((u64) \
167 QIB_7322_##regname##_##fldname##_RMASK)
168
169#define SYM_MASK(regname, fldname) ((u64) \
170 QIB_7322_##regname##_##fldname##_RMASK << \
171 QIB_7322_##regname##_##fldname##_LSB)
172
173#define SYM_FIELD(value, regname, fldname) ((u64) \
174 (((value) >> SYM_LSB(regname, fldname)) & \
175 SYM_RMASK(regname, fldname)))
176
177/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
178#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
179 (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
180
181#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
182#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
183#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
184#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
185#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
186/* Below because most, but not all, fields of IntMask have that full suffix */
187#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
188
189
190#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
191
192/*
193 * the size bits give us 2^N, in KB units. 0 marks as invalid,
194 * and 7 is reserved. We currently use only 2KB and 4KB
195 */
196#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
197#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
198#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
199#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
200
201#define SendIBSLIDAssignMask \
202 QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
203#define SendIBSLMCMask \
204 QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
205
206#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
207#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
208#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
209#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
210#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
211#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
212
213#define _QIB_GPIO_SDA_NUM 1
214#define _QIB_GPIO_SCL_NUM 0
215#define QIB_EEPROM_WEN_NUM 14
216#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
217
218/* HW counter clock is at 4nsec */
219#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
220
221/* full speed IB port 1 only */
222#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
223#define PORT_SPD_CAP_SHIFT 3
224
225/* full speed featuremask, both ports */
226#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
227
228/*
229 * This file contains almost all the chip-specific register information and
230 * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
231 */
232
233/* Use defines to tie machine-generated names to lower-case names */
234#define kr_contextcnt KREG_IDX(ContextCnt)
235#define kr_control KREG_IDX(Control)
236#define kr_counterregbase KREG_IDX(CntrRegBase)
237#define kr_errclear KREG_IDX(ErrClear)
238#define kr_errmask KREG_IDX(ErrMask)
239#define kr_errstatus KREG_IDX(ErrStatus)
240#define kr_extctrl KREG_IDX(EXTCtrl)
241#define kr_extstatus KREG_IDX(EXTStatus)
242#define kr_gpio_clear KREG_IDX(GPIOClear)
243#define kr_gpio_mask KREG_IDX(GPIOMask)
244#define kr_gpio_out KREG_IDX(GPIOOut)
245#define kr_gpio_status KREG_IDX(GPIOStatus)
246#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
247#define kr_debugportval KREG_IDX(DebugPortValueReg)
248#define kr_fmask KREG_IDX(feature_mask)
249#define kr_act_fmask KREG_IDX(active_feature_mask)
250#define kr_hwerrclear KREG_IDX(HwErrClear)
251#define kr_hwerrmask KREG_IDX(HwErrMask)
252#define kr_hwerrstatus KREG_IDX(HwErrStatus)
253#define kr_intclear KREG_IDX(IntClear)
254#define kr_intmask KREG_IDX(IntMask)
255#define kr_intredirect KREG_IDX(IntRedirect0)
256#define kr_intstatus KREG_IDX(IntStatus)
257#define kr_pagealign KREG_IDX(PageAlign)
258#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
259#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
260#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
261#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
262#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
263#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
264#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
265#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
266#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
267#define kr_revision KREG_IDX(Revision)
268#define kr_scratch KREG_IDX(Scratch)
269#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
270#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
271#define kr_sendctrl KREG_IDX(SendCtrl)
272#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
273#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
274#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
275#define kr_sendpiobufbase KREG_IDX(SendBufBase)
276#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
277#define kr_sendpiosize KREG_IDX(SendBufSize)
278#define kr_sendregbase KREG_IDX(SendRegBase)
279#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
280#define kr_userregbase KREG_IDX(UserRegBase)
281#define kr_intgranted KREG_IDX(Int_Granted)
282#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
283#define kr_intblocked KREG_IDX(IntBlocked)
284#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
285
286/*
287 * per-port kernel registers. Access only with qib_read_kreg_port()
288 * or qib_write_kreg_port()
289 */
290#define krp_errclear KREG_IBPORT_IDX(ErrClear)
291#define krp_errmask KREG_IBPORT_IDX(ErrMask)
292#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
293#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
294#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
295#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
296#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
297#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
298#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
299#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
300#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
301#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
302#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
303#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
304#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
305#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
306#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
307#define krp_psstart KREG_IBPORT_IDX(PSStart)
308#define krp_psstat KREG_IBPORT_IDX(PSStat)
309#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
310#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
311#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
312#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
313#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
314#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
315#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
316#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
317#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
318#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
319#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
320#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
321#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
322#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
323#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
324#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
325#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
326#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
327#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
328#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
329#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
330#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
331#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
332#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
333#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
334#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
335#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
336#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
337#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
338#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
339#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
340
341/*
342 * Per-context kernel registers. Acess only with qib_read_kreg_ctxt()
343 * or qib_write_kreg_ctxt()
344 */
345#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
346#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
347
348/*
349 * TID Flow table, per context. Reduces
350 * number of hdrq updates to one per flow (or on errors).
351 * context 0 and 1 share same memory, but have distinct
352 * addresses. Since for now, we never use expected sends
353 * on kernel contexts, we don't worry about that (we initialize
354 * those entries for ctxt 0/1 on driver load twice, for example).
355 */
356#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
357#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
358
359/* these are the error bits in the tid flows, and are W1C */
360#define TIDFLOW_ERRBITS ( \
361 (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
362 SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
363 (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
364 SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
365
366/* Most (not all) Counters are per-IBport.
367 * Requires LBIntCnt is at offset 0 in the group
368 */
369#define CREG_IDX(regname) \
370((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
371
372#define crp_badformat CREG_IDX(RxVersionErrCnt)
373#define crp_err_rlen CREG_IDX(RxLenErrCnt)
374#define crp_erricrc CREG_IDX(RxICRCErrCnt)
375#define crp_errlink CREG_IDX(RxLinkMalformCnt)
376#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
377#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
378#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
379#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
380#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
381#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
382#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
383#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
384#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
385#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
386#define crp_pktrcv CREG_IDX(RxDataPktCnt)
387#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
388#define crp_pktsend CREG_IDX(TxDataPktCnt)
389#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
390#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
391#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
392#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
393#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
394#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
395#define crp_rcvebp CREG_IDX(RxEBPCnt)
396#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
397#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
398#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
399#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
400#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
401#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
402#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
403#define crp_sendstall CREG_IDX(TxFlowStallCnt)
404#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
405#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
406#define crp_txlenerr CREG_IDX(TxLenErrCnt)
407#define crp_txlenerr CREG_IDX(TxLenErrCnt)
408#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
409#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
410#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
411#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
412#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
413#define crp_wordrcv CREG_IDX(RxDwordCnt)
414#define crp_wordsend CREG_IDX(TxDwordCnt)
415#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
416
417/* these are the (few) counters that are not port-specific */
418#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
419 QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
420#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
421#define cr_lbint CREG_DEVIDX(LBIntCnt)
422#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
423#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
424#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
425#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
426#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
427
428/* no chip register for # of IB ports supported, so define */
429#define NUM_IB_PORTS 2
430
431/* 1 VL15 buffer per hardware IB port, no register for this, so define */
432#define NUM_VL15_BUFS NUM_IB_PORTS
433
434/*
435 * context 0 and 1 are special, and there is no chip register that
436 * defines this value, so we have to define it here.
437 * These are all allocated to either 0 or 1 for single port
438 * hardware configuration, otherwise each gets half
439 */
440#define KCTXT0_EGRCNT 2048
441
442/* values for vl and port fields in PBC, 7322-specific */
443#define PBC_PORT_SEL_LSB 26
444#define PBC_PORT_SEL_RMASK 1
445#define PBC_VL_NUM_LSB 27
446#define PBC_VL_NUM_RMASK 7
447#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
448#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
449
450static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
451 [IB_RATE_2_5_GBPS] = 16,
452 [IB_RATE_5_GBPS] = 8,
453 [IB_RATE_10_GBPS] = 4,
454 [IB_RATE_20_GBPS] = 2,
455 [IB_RATE_30_GBPS] = 2,
456 [IB_RATE_40_GBPS] = 1
457};
458
459#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
460#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
461
462/* link training states, from IBC */
463#define IB_7322_LT_STATE_DISABLED 0x00
464#define IB_7322_LT_STATE_LINKUP 0x01
465#define IB_7322_LT_STATE_POLLACTIVE 0x02
466#define IB_7322_LT_STATE_POLLQUIET 0x03
467#define IB_7322_LT_STATE_SLEEPDELAY 0x04
468#define IB_7322_LT_STATE_SLEEPQUIET 0x05
469#define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
470#define IB_7322_LT_STATE_CFGRCVFCFG 0x09
471#define IB_7322_LT_STATE_CFGWAITRMT 0x0a
472#define IB_7322_LT_STATE_CFGIDLE 0x0b
473#define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
474#define IB_7322_LT_STATE_TXREVLANES 0x0d
475#define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
476#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
477#define IB_7322_LT_STATE_CFGENH 0x10
478#define IB_7322_LT_STATE_CFGTEST 0x11
479
480/* link state machine states from IBC */
481#define IB_7322_L_STATE_DOWN 0x0
482#define IB_7322_L_STATE_INIT 0x1
483#define IB_7322_L_STATE_ARM 0x2
484#define IB_7322_L_STATE_ACTIVE 0x3
485#define IB_7322_L_STATE_ACT_DEFER 0x4
486
487static const u8 qib_7322_physportstate[0x20] = {
488 [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
489 [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
490 [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
491 [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
492 [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
493 [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
494 [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
495 [IB_7322_LT_STATE_CFGRCVFCFG] =
496 IB_PHYSPORTSTATE_CFG_TRAIN,
497 [IB_7322_LT_STATE_CFGWAITRMT] =
498 IB_PHYSPORTSTATE_CFG_TRAIN,
499 [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
500 [IB_7322_LT_STATE_RECOVERRETRAIN] =
501 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
502 [IB_7322_LT_STATE_RECOVERWAITRMT] =
503 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
504 [IB_7322_LT_STATE_RECOVERIDLE] =
505 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
506 [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
507 [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
508 [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
509 [0x13] = IB_PHYSPORTSTATE_CFG_WAIT_ENH,
510 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
511 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
512 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
513 [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
514};
515
516struct qib_chip_specific {
517 u64 __iomem *cregbase;
518 u64 *cntrs;
519 spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
520 spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
521 u64 main_int_mask; /* clear bits which have dedicated handlers */
522 u64 int_enable_mask; /* for per port interrupts in single port mode */
523 u64 errormask;
524 u64 hwerrmask;
525 u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
526 u64 gpio_mask; /* shadow the gpio mask register */
527 u64 extctrl; /* shadow the gpio output enable, etc... */
528 u32 ncntrs;
529 u32 nportcntrs;
530 u32 cntrnamelen;
531 u32 portcntrnamelen;
532 u32 numctxts;
533 u32 rcvegrcnt;
534 u32 updthresh; /* current AvailUpdThld */
535 u32 updthresh_dflt; /* default AvailUpdThld */
536 u32 r1;
537 int irq;
538 u32 num_msix_entries;
539 u32 sdmabufcnt;
540 u32 lastbuf_for_pio;
541 u32 stay_in_freeze;
542 u32 recovery_ports_initted;
543#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
544 u32 dca_ctrl;
545 int rhdr_cpu[18];
546 int sdma_cpu[2];
547 u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
548#endif
549 struct msix_entry *msix_entries;
550 void **msix_arg;
551 unsigned long *sendchkenable;
552 unsigned long *sendgrhchk;
553 unsigned long *sendibchk;
554 u32 rcvavail_timeout[18];
555 char emsgbuf[128]; /* for device error interrupt msg buffer */
556};
557
558/* Table of entries in "human readable" form Tx Emphasis. */
559struct txdds_ent {
560 u8 amp;
561 u8 pre;
562 u8 main;
563 u8 post;
564};
565
566struct vendor_txdds_ent {
567 u8 oui[QSFP_VOUI_LEN];
568 u8 *partnum;
569 struct txdds_ent sdr;
570 struct txdds_ent ddr;
571 struct txdds_ent qdr;
572};
573
574static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
575
576#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
577#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
578
579#define H1_FORCE_VAL 8
580#define H1_FORCE_QME 1 /* may be overridden via setup_qme_params() */
581#define H1_FORCE_QMH 7 /* may be overridden via setup_qmh_params() */
582
583/* The static and dynamic registers are paired, and the pairs indexed by spd */
584#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
585 + ((spd) * 2))
586
587#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
588#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
589#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
590#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
591#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
592
593static const struct txdds_ent qmh_sdr_txdds = { 11, 0, 5, 6 };
594static const struct txdds_ent qmh_ddr_txdds = { 7, 0, 2, 8 };
595static const struct txdds_ent qmh_qdr_txdds = { 0, 1, 3, 10 };
596
597/* this is used for unknown mez cards also */
598static const struct txdds_ent qme_sdr_txdds = { 11, 0, 4, 4 };
599static const struct txdds_ent qme_ddr_txdds = { 7, 0, 2, 7 };
600static const struct txdds_ent qme_qdr_txdds = { 0, 1, 12, 11 };
601
602struct qib_chippport_specific {
603 u64 __iomem *kpregbase;
604 u64 __iomem *cpregbase;
605 u64 *portcntrs;
606 struct qib_pportdata *ppd;
607 wait_queue_head_t autoneg_wait;
608 struct delayed_work autoneg_work;
609 struct delayed_work ipg_work;
610 struct timer_list chase_timer;
611 /*
612 * these 5 fields are used to establish deltas for IB symbol
613 * errors and linkrecovery errors. They can be reported on
614 * some chips during link negotiation prior to INIT, and with
615 * DDR when faking DDR negotiations with non-IBTA switches.
616 * The chip counters are adjusted at driver unload if there is
617 * a non-zero delta.
618 */
619 u64 ibdeltainprog;
620 u64 ibsymdelta;
621 u64 ibsymsnap;
622 u64 iblnkerrdelta;
623 u64 iblnkerrsnap;
624 u64 iblnkdownsnap;
625 u64 iblnkdowndelta;
626 u64 ibmalfdelta;
627 u64 ibmalfsnap;
628 u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
629 u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
630 u64 qdr_dfe_time;
631 u64 chase_end;
632 u32 autoneg_tries;
633 u32 recovery_init;
634 u32 qdr_dfe_on;
635 u32 qdr_reforce;
636 /*
637 * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
638 * entry zero is unused, to simplify indexing
639 */
640 u16 h1_val;
641 u8 amp[SERDES_CHANS];
642 u8 pre[SERDES_CHANS];
643 u8 mainv[SERDES_CHANS];
644 u8 post[SERDES_CHANS];
645 u8 no_eep; /* attenuation index to use if no qsfp info */
646 u8 ipg_tries;
647 u8 ibmalfusesnap;
648 struct qib_qsfp_data qsfp_data;
649 char epmsgbuf[192]; /* for port error interrupt msg buffer */
650};
651
652static struct {
653 const char *name;
654 irq_handler_t handler;
655 int lsb;
656 int port; /* 0 if not port-specific, else port # */
657} irq_table[] = {
658 { QIB_DRV_NAME, qib_7322intr, -1, 0 },
659 { QIB_DRV_NAME " (buf avail)", qib_7322bufavail,
660 SYM_LSB(IntStatus, SendBufAvail), 0 },
661 { QIB_DRV_NAME " (sdma 0)", sdma_intr,
662 SYM_LSB(IntStatus, SDmaInt_0), 1 },
663 { QIB_DRV_NAME " (sdma 1)", sdma_intr,
664 SYM_LSB(IntStatus, SDmaInt_1), 2 },
665 { QIB_DRV_NAME " (sdmaI 0)", sdma_idle_intr,
666 SYM_LSB(IntStatus, SDmaIdleInt_0), 1 },
667 { QIB_DRV_NAME " (sdmaI 1)", sdma_idle_intr,
668 SYM_LSB(IntStatus, SDmaIdleInt_1), 2 },
669 { QIB_DRV_NAME " (sdmaP 0)", sdma_progress_intr,
670 SYM_LSB(IntStatus, SDmaProgressInt_0), 1 },
671 { QIB_DRV_NAME " (sdmaP 1)", sdma_progress_intr,
672 SYM_LSB(IntStatus, SDmaProgressInt_1), 2 },
673 { QIB_DRV_NAME " (sdmaC 0)", sdma_cleanup_intr,
674 SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 },
675 { QIB_DRV_NAME " (sdmaC 1)", sdma_cleanup_intr,
676 SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 },
677};
678
679#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
680static const struct dca_reg_map {
681 int shadow_inx;
682 int lsb;
683 u64 mask;
684 u16 regno;
685} dca_rcvhdr_reg_map[] = {
686 { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
687 ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
688 { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
689 ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
690 { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
691 ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
692 { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
693 ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
694 { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
695 ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
696 { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
697 ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
698 { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
699 ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
700 { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
701 ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
702 { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
703 ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
704 { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
705 ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
706 { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
707 ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
708 { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
709 ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
710 { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
711 ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
712 { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
713 ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
714 { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
715 ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
716 { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
717 ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
718 { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
719 ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
720 { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
721 ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
722};
723#endif
724
725/* ibcctrl bits */
726#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
727/* cycle through TS1/TS2 till OK */
728#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
729/* wait for TS1, then go on */
730#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
731#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
732
733#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
734#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
735#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
736
737#define BLOB_7322_IBCHG 0x101
738
739static inline void qib_write_kreg(const struct qib_devdata *dd,
740 const u32 regno, u64 value);
741static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
742static void write_7322_initregs(struct qib_devdata *);
743static void write_7322_init_portregs(struct qib_pportdata *);
744static void setup_7322_link_recovery(struct qib_pportdata *, u32);
745static void check_7322_rxe_status(struct qib_pportdata *);
746static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
747
748/**
749 * qib_read_ureg32 - read 32-bit virtualized per-context register
750 * @dd: device
751 * @regno: register number
752 * @ctxt: context number
753 *
754 * Return the contents of a register that is virtualized to be per context.
755 * Returns -1 on errors (not distinguishable from valid contents at
756 * runtime; we may add a separate error variable at some point).
757 */
758static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
759 enum qib_ureg regno, int ctxt)
760{
761 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
762 return 0;
763 return readl(regno + (u64 __iomem *)(
764 (dd->ureg_align * ctxt) + (dd->userbase ?
765 (char __iomem *)dd->userbase :
766 (char __iomem *)dd->kregbase + dd->uregbase)));
767}
768
769/**
770 * qib_read_ureg - read virtualized per-context register
771 * @dd: device
772 * @regno: register number
773 * @ctxt: context number
774 *
775 * Return the contents of a register that is virtualized to be per context.
776 * Returns -1 on errors (not distinguishable from valid contents at
777 * runtime; we may add a separate error variable at some point).
778 */
779static inline u64 qib_read_ureg(const struct qib_devdata *dd,
780 enum qib_ureg regno, int ctxt)
781{
782
783 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
784 return 0;
785 return readq(regno + (u64 __iomem *)(
786 (dd->ureg_align * ctxt) + (dd->userbase ?
787 (char __iomem *)dd->userbase :
788 (char __iomem *)dd->kregbase + dd->uregbase)));
789}
790
791/**
792 * qib_write_ureg - write virtualized per-context register
793 * @dd: device
794 * @regno: register number
795 * @value: value
796 * @ctxt: context
797 *
798 * Write the contents of a register that is virtualized to be per context.
799 */
800static inline void qib_write_ureg(const struct qib_devdata *dd,
801 enum qib_ureg regno, u64 value, int ctxt)
802{
803 u64 __iomem *ubase;
804 if (dd->userbase)
805 ubase = (u64 __iomem *)
806 ((char __iomem *) dd->userbase +
807 dd->ureg_align * ctxt);
808 else
809 ubase = (u64 __iomem *)
810 (dd->uregbase +
811 (char __iomem *) dd->kregbase +
812 dd->ureg_align * ctxt);
813
814 if (dd->kregbase && (dd->flags & QIB_PRESENT))
815 writeq(value, &ubase[regno]);
816}
817
818static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
819 const u32 regno)
820{
821 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
822 return -1;
823 return readl((u32 __iomem *) &dd->kregbase[regno]);
824}
825
826static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
827 const u32 regno)
828{
829 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
830 return -1;
831 return readq(&dd->kregbase[regno]);
832}
833
834static inline void qib_write_kreg(const struct qib_devdata *dd,
835 const u32 regno, u64 value)
836{
837 if (dd->kregbase && (dd->flags & QIB_PRESENT))
838 writeq(value, &dd->kregbase[regno]);
839}
840
841/*
842 * not many sanity checks for the port-specific kernel register routines,
843 * since they are only used when it's known to be safe.
844*/
845static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
846 const u16 regno)
847{
848 if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
849 return 0ULL;
850 return readq(&ppd->cpspec->kpregbase[regno]);
851}
852
853static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
854 const u16 regno, u64 value)
855{
856 if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
857 (ppd->dd->flags & QIB_PRESENT))
858 writeq(value, &ppd->cpspec->kpregbase[regno]);
859}
860
861/**
862 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
863 * @dd: the qlogic_ib device
864 * @regno: the register number to write
865 * @ctxt: the context containing the register
866 * @value: the value to write
867 */
868static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
869 const u16 regno, unsigned ctxt,
870 u64 value)
871{
872 qib_write_kreg(dd, regno + ctxt, value);
873}
874
875static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
876{
877 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
878 return 0;
879 return readq(&dd->cspec->cregbase[regno]);
880
881
882}
883
884static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
885{
886 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
887 return 0;
888 return readl(&dd->cspec->cregbase[regno]);
889
890
891}
892
893static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
894 u16 regno, u64 value)
895{
896 if (ppd->cpspec && ppd->cpspec->cpregbase &&
897 (ppd->dd->flags & QIB_PRESENT))
898 writeq(value, &ppd->cpspec->cpregbase[regno]);
899}
900
901static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
902 u16 regno)
903{
904 if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
905 !(ppd->dd->flags & QIB_PRESENT))
906 return 0;
907 return readq(&ppd->cpspec->cpregbase[regno]);
908}
909
910static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
911 u16 regno)
912{
913 if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
914 !(ppd->dd->flags & QIB_PRESENT))
915 return 0;
916 return readl(&ppd->cpspec->cpregbase[regno]);
917}
918
919/* bits in Control register */
920#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
921#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
922
923/* bits in general interrupt regs */
924#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
925#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
926#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
927#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
928#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
929#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
930#define QIB_I_C_ERROR INT_MASK(Err)
931
932#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
933#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
934#define QIB_I_GPIO INT_MASK(AssertGPIO)
935#define QIB_I_P_SDMAINT(pidx) \
936 (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
937 INT_MASK_P(SDmaProgress, pidx) | \
938 INT_MASK_PM(SDmaCleanupDone, pidx))
939
940/* Interrupt bits that are "per port" */
941#define QIB_I_P_BITSEXTANT(pidx) \
942 (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
943 INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
944 INT_MASK_P(SDmaProgress, pidx) | \
945 INT_MASK_PM(SDmaCleanupDone, pidx))
946
947/* Interrupt bits that are common to a device */
948/* currently unused: QIB_I_SPIOSENT */
949#define QIB_I_C_BITSEXTANT \
950 (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
951 QIB_I_SPIOSENT | \
952 QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
953
954#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
955 QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
956
957/*
958 * Error bits that are "per port".
959 */
960#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
961#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
962#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
963#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
964#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
965#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
966#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
967#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
968#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
969#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
970#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
971#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
972#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
973#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
974#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
975#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
976#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
977#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
978#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
979#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
980#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
981#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
982#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
983#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
984#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
985#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
986#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
987#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
988
989#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
990#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
991#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
992#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
993#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
994#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
995#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
996#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
997#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
998#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
999#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
1000
1001/* Error bits that are common to a device */
1002#define QIB_E_RESET ERR_MASK(ResetNegated)
1003#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
1004#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
1005
1006
1007/*
1008 * Per chip (rather than per-port) errors. Most either do
1009 * nothing but trigger a print (because they self-recover, or
1010 * always occur in tandem with other errors that handle the
1011 * issue), or because they indicate errors with no recovery,
1012 * but we want to know that they happened.
1013 */
1014#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
1015#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
1016#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
1017#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
1018#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
1019#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
1020#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
1021#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
1022
1023/* SDMA chip errors (not per port)
1024 * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
1025 * the SDMAHALT error immediately, so we just print the dup error via the
1026 * E_AUTO mechanism. This is true of most of the per-port fatal errors
1027 * as well, but since this is port-independent, by definition, it's
1028 * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per
1029 * packet send errors, and so are handled in the same manner as other
1030 * per-packet errors.
1031 */
1032#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
1033#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
1034#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
1035
1036/*
1037 * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
1038 * it is used to print "common" packet errors.
1039 */
1040#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
1041 QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
1042 QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
1043 QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1044 QIB_E_P_REBP)
1045
1046/* Error Bits that Packet-related (Receive, per-port) */
1047#define QIB_E_P_RPKTERRS (\
1048 QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
1049 QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
1050 QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
1051 QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
1052 QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
1053 QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
1054
1055/*
1056 * Error bits that are Send-related (per port)
1057 * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
1058 * All of these potentially need to have a buffer disarmed
1059 */
1060#define QIB_E_P_SPKTERRS (\
1061 QIB_E_P_SUNEXP_PKTNUM |\
1062 QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1063 QIB_E_P_SMAXPKTLEN |\
1064 QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1065 QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1066 QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1067
1068#define QIB_E_SPKTERRS ( \
1069 QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1070 ERR_MASK_N(SendUnsupportedVLErr) | \
1071 QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1072
1073#define QIB_E_P_SDMAERRS ( \
1074 QIB_E_P_SDMAHALT | \
1075 QIB_E_P_SDMADESCADDRMISALIGN | \
1076 QIB_E_P_SDMAUNEXPDATA | \
1077 QIB_E_P_SDMAMISSINGDW | \
1078 QIB_E_P_SDMADWEN | \
1079 QIB_E_P_SDMARPYTAG | \
1080 QIB_E_P_SDMA1STDESC | \
1081 QIB_E_P_SDMABASE | \
1082 QIB_E_P_SDMATAILOUTOFBOUND | \
1083 QIB_E_P_SDMAOUTOFBOUND | \
1084 QIB_E_P_SDMAGENMISMATCH)
1085
1086/*
1087 * This sets some bits more than once, but makes it more obvious which
1088 * bits are not handled under other categories, and the repeat definition
1089 * is not a problem.
1090 */
1091#define QIB_E_P_BITSEXTANT ( \
1092 QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1093 QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1094 QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1095 QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1096 )
1097
1098/*
1099 * These are errors that can occur when the link
1100 * changes state while a packet is being sent or received. This doesn't
1101 * cover things like EBP or VCRC that can be the result of a sending
1102 * having the link change state, so we receive a "known bad" packet.
1103 * All of these are "per port", so renamed:
1104 */
1105#define QIB_E_P_LINK_PKTERRS (\
1106 QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1107 QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1108 QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1109 QIB_E_P_RUNEXPCHAR)
1110
1111/*
1112 * This sets some bits more than once, but makes it more obvious which
1113 * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1114 * and the repeat definition is not a problem.
1115 */
1116#define QIB_E_C_BITSEXTANT (\
1117 QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1118 QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1119 QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1120
1121/* Likewise Neuter E_SPKT_ERRS_IGNORE */
1122#define E_SPKT_ERRS_IGNORE 0
1123
1124#define QIB_EXTS_MEMBIST_DISABLED \
1125 SYM_MASK(EXTStatus, MemBISTDisabled)
1126#define QIB_EXTS_MEMBIST_ENDTEST \
1127 SYM_MASK(EXTStatus, MemBISTEndTest)
1128
1129#define QIB_E_SPIOARMLAUNCH \
1130 ERR_MASK(SendArmLaunchErr)
1131
1132#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1133#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1134
1135/*
1136 * IBTA_1_2 is set when multiple speeds are enabled (normal),
1137 * and also if forced QDR (only QDR enabled). It's enabled for the
1138 * forced QDR case so that scrambling will be enabled by the TS3
1139 * exchange, when supported by both sides of the link.
1140 */
1141#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1142#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1143#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1144#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1145#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1146#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1147 SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1148#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1149
1150#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1151#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1152
1153#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1154#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1155#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1156
1157#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1158#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1159#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1160 SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1161#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1162 SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1163#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1164
1165#define IBA7322_REDIRECT_VEC_PER_REG 12
1166
1167#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1168#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1169#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1170#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1171#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1172
1173#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1174
1175#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1176 .msg = #fldname }
1177#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1178 fldname##Mask##_##port), .msg = #fldname }
1179static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1180 HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1181 HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1182 HWE_AUTO(PCIESerdesPClkNotDetect),
1183 HWE_AUTO(PowerOnBISTFailed),
1184 HWE_AUTO(TempsenseTholdReached),
1185 HWE_AUTO(MemoryErr),
1186 HWE_AUTO(PCIeBusParityErr),
1187 HWE_AUTO(PcieCplTimeout),
1188 HWE_AUTO(PciePoisonedTLP),
1189 HWE_AUTO_P(SDmaMemReadErr, 1),
1190 HWE_AUTO_P(SDmaMemReadErr, 0),
1191 HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1192 HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1193 HWE_AUTO_P(statusValidNoEop, 1),
1194 HWE_AUTO_P(statusValidNoEop, 0),
1195 HWE_AUTO(LATriggered),
1196 { .mask = 0 }
1197};
1198
1199#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1200 .msg = #fldname }
1201#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1202 .msg = #fldname }
1203static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1204 E_AUTO(ResetNegated),
1205 E_AUTO(HardwareErr),
1206 E_AUTO(InvalidAddrErr),
1207 E_AUTO(SDmaVL15Err),
1208 E_AUTO(SBufVL15MisUseErr),
1209 E_AUTO(InvalidEEPCmd),
1210 E_AUTO(RcvContextShareErr),
1211 E_AUTO(SendVLMismatchErr),
1212 E_AUTO(SendArmLaunchErr),
1213 E_AUTO(SendSpecialTriggerErr),
1214 E_AUTO(SDmaWrongPortErr),
1215 E_AUTO(SDmaBufMaskDuplicateErr),
1216 E_AUTO(RcvHdrFullErr),
1217 E_AUTO(RcvEgrFullErr),
1218 { .mask = 0 }
1219};
1220
1221static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
1222 E_P_AUTO(IBStatusChanged),
1223 E_P_AUTO(SHeadersErr),
1224 E_P_AUTO(VL15BufMisuseErr),
1225 /*
1226 * SDmaHaltErr is not really an error, make it clearer;
1227 */
1228 {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted"},
1229 E_P_AUTO(SDmaDescAddrMisalignErr),
1230 E_P_AUTO(SDmaUnexpDataErr),
1231 E_P_AUTO(SDmaMissingDwErr),
1232 E_P_AUTO(SDmaDwEnErr),
1233 E_P_AUTO(SDmaRpyTagErr),
1234 E_P_AUTO(SDma1stDescErr),
1235 E_P_AUTO(SDmaBaseErr),
1236 E_P_AUTO(SDmaTailOutOfBoundErr),
1237 E_P_AUTO(SDmaOutOfBoundErr),
1238 E_P_AUTO(SDmaGenMismatchErr),
1239 E_P_AUTO(SendBufMisuseErr),
1240 E_P_AUTO(SendUnsupportedVLErr),
1241 E_P_AUTO(SendUnexpectedPktNumErr),
1242 E_P_AUTO(SendDroppedDataPktErr),
1243 E_P_AUTO(SendDroppedSmpPktErr),
1244 E_P_AUTO(SendPktLenErr),
1245 E_P_AUTO(SendUnderRunErr),
1246 E_P_AUTO(SendMaxPktLenErr),
1247 E_P_AUTO(SendMinPktLenErr),
1248 E_P_AUTO(RcvIBLostLinkErr),
1249 E_P_AUTO(RcvHdrErr),
1250 E_P_AUTO(RcvHdrLenErr),
1251 E_P_AUTO(RcvBadTidErr),
1252 E_P_AUTO(RcvBadVersionErr),
1253 E_P_AUTO(RcvIBFlowErr),
1254 E_P_AUTO(RcvEBPErr),
1255 E_P_AUTO(RcvUnsupportedVLErr),
1256 E_P_AUTO(RcvUnexpectedCharErr),
1257 E_P_AUTO(RcvShortPktLenErr),
1258 E_P_AUTO(RcvLongPktLenErr),
1259 E_P_AUTO(RcvMaxPktLenErr),
1260 E_P_AUTO(RcvMinPktLenErr),
1261 E_P_AUTO(RcvICRCErr),
1262 E_P_AUTO(RcvVCRCErr),
1263 E_P_AUTO(RcvFormatErr),
1264 { .mask = 0 }
1265};
1266
1267/*
1268 * Below generates "auto-message" for interrupts not specific to any port or
1269 * context
1270 */
1271#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1272 .msg = #fldname }
1273/* Below generates "auto-message" for interrupts specific to a port */
1274#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1275 SYM_LSB(IntMask, fldname##Mask##_0), \
1276 SYM_LSB(IntMask, fldname##Mask##_1)), \
1277 .msg = #fldname "_P" }
1278/* For some reason, the SerDesTrimDone bits are reversed */
1279#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1280 SYM_LSB(IntMask, fldname##Mask##_1), \
1281 SYM_LSB(IntMask, fldname##Mask##_0)), \
1282 .msg = #fldname "_P" }
1283/*
1284 * Below generates "auto-message" for interrupts specific to a context,
1285 * with ctxt-number appended
1286 */
1287#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1288 SYM_LSB(IntMask, fldname##0IntMask), \
1289 SYM_LSB(IntMask, fldname##17IntMask)), \
1290 .msg = #fldname "_C"}
1291
1292static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
1293 INTR_AUTO_P(SDmaInt),
1294 INTR_AUTO_P(SDmaProgressInt),
1295 INTR_AUTO_P(SDmaIdleInt),
1296 INTR_AUTO_P(SDmaCleanupDone),
1297 INTR_AUTO_C(RcvUrg),
1298 INTR_AUTO_P(ErrInt),
1299 INTR_AUTO(ErrInt), /* non-port-specific errs */
1300 INTR_AUTO(AssertGPIOInt),
1301 INTR_AUTO_P(SendDoneInt),
1302 INTR_AUTO(SendBufAvailInt),
1303 INTR_AUTO_C(RcvAvail),
1304 { .mask = 0 }
1305};
1306
1307#define TXSYMPTOM_AUTO_P(fldname) \
1308 { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), .msg = #fldname }
1309static const struct qib_hwerror_msgs hdrchk_msgs[] = {
1310 TXSYMPTOM_AUTO_P(NonKeyPacket),
1311 TXSYMPTOM_AUTO_P(GRHFail),
1312 TXSYMPTOM_AUTO_P(PkeyFail),
1313 TXSYMPTOM_AUTO_P(QPFail),
1314 TXSYMPTOM_AUTO_P(SLIDFail),
1315 TXSYMPTOM_AUTO_P(RawIPV6),
1316 TXSYMPTOM_AUTO_P(PacketTooSmall),
1317 { .mask = 0 }
1318};
1319
1320#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1321
1322/*
1323 * Called when we might have an error that is specific to a particular
1324 * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1325 * because we don't need to force the update of pioavail
1326 */
1327static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1328{
1329 struct qib_devdata *dd = ppd->dd;
1330 u32 i;
1331 int any;
1332 u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1333 u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1334 unsigned long sbuf[4];
1335
1336 /*
1337 * It's possible that sendbuffererror could have bits set; might
1338 * have already done this as a result of hardware error handling.
1339 */
1340 any = 0;
1341 for (i = 0; i < regcnt; ++i) {
1342 sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1343 if (sbuf[i]) {
1344 any = 1;
1345 qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1346 }
1347 }
1348
1349 if (any)
1350 qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1351}
1352
1353/* No txe_recover yet, if ever */
1354
1355/* No decode__errors yet */
1356static void err_decode(char *msg, size_t len, u64 errs,
1357 const struct qib_hwerror_msgs *msp)
1358{
1359 u64 these, lmask;
1360 int took, multi, n = 0;
1361
1362 while (msp && msp->mask) {
1363 multi = (msp->mask & (msp->mask - 1));
1364 while (errs & msp->mask) {
1365 these = (errs & msp->mask);
1366 lmask = (these & (these - 1)) ^ these;
1367 if (len) {
1368 if (n++) {
1369 /* separate the strings */
1370 *msg++ = ',';
1371 len--;
1372 }
1373 took = scnprintf(msg, len, "%s", msp->msg);
1374 len -= took;
1375 msg += took;
1376 }
1377 errs &= ~lmask;
1378 if (len && multi) {
1379 /* More than one bit this mask */
1380 int idx = -1;
1381
1382 while (lmask & msp->mask) {
1383 ++idx;
1384 lmask >>= 1;
1385 }
1386 took = scnprintf(msg, len, "_%d", idx);
1387 len -= took;
1388 msg += took;
1389 }
1390 }
1391 ++msp;
1392 }
1393 /* If some bits are left, show in hex. */
1394 if (len && errs)
1395 snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1396 (unsigned long long) errs);
1397}
1398
1399/* only called if r1 set */
1400static void flush_fifo(struct qib_pportdata *ppd)
1401{
1402 struct qib_devdata *dd = ppd->dd;
1403 u32 __iomem *piobuf;
1404 u32 bufn;
1405 u32 *hdr;
1406 u64 pbc;
1407 const unsigned hdrwords = 7;
1408 static struct qib_ib_header ibhdr = {
1409 .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1410 .lrh[1] = IB_LID_PERMISSIVE,
1411 .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1412 .lrh[3] = IB_LID_PERMISSIVE,
1413 .u.oth.bth[0] = cpu_to_be32(
1414 (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1415 .u.oth.bth[1] = cpu_to_be32(0),
1416 .u.oth.bth[2] = cpu_to_be32(0),
1417 .u.oth.u.ud.deth[0] = cpu_to_be32(0),
1418 .u.oth.u.ud.deth[1] = cpu_to_be32(0),
1419 };
1420
1421 /*
1422 * Send a dummy VL15 packet to flush the launch FIFO.
1423 * This will not actually be sent since the TxeBypassIbc bit is set.
1424 */
1425 pbc = PBC_7322_VL15_SEND |
1426 (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1427 (hdrwords + SIZE_OF_CRC);
1428 piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1429 if (!piobuf)
1430 return;
1431 writeq(pbc, piobuf);
1432 hdr = (u32 *) &ibhdr;
1433 if (dd->flags & QIB_PIO_FLUSH_WC) {
1434 qib_flush_wc();
1435 qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1436 qib_flush_wc();
1437 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1438 qib_flush_wc();
1439 } else
1440 qib_pio_copy(piobuf + 2, hdr, hdrwords);
1441 qib_sendbuf_done(dd, bufn);
1442}
1443
1444/*
1445 * This is called with interrupts disabled and sdma_lock held.
1446 */
1447static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1448{
1449 struct qib_devdata *dd = ppd->dd;
1450 u64 set_sendctrl = 0;
1451 u64 clr_sendctrl = 0;
1452
1453 if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1454 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1455 else
1456 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1457
1458 if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1459 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1460 else
1461 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1462
1463 if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1464 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1465 else
1466 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1467
1468 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1469 set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1470 SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1471 SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1472 else
1473 clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1474 SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1475 SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1476
1477 spin_lock(&dd->sendctrl_lock);
1478
1479 /* If we are draining everything, block sends first */
1480 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1481 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1482 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1483 qib_write_kreg(dd, kr_scratch, 0);
1484 }
1485
1486 ppd->p_sendctrl |= set_sendctrl;
1487 ppd->p_sendctrl &= ~clr_sendctrl;
1488
1489 if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1490 qib_write_kreg_port(ppd, krp_sendctrl,
1491 ppd->p_sendctrl |
1492 SYM_MASK(SendCtrl_0, SDmaCleanup));
1493 else
1494 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1495 qib_write_kreg(dd, kr_scratch, 0);
1496
1497 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1498 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1499 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1500 qib_write_kreg(dd, kr_scratch, 0);
1501 }
1502
1503 spin_unlock(&dd->sendctrl_lock);
1504
1505 if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1506 flush_fifo(ppd);
1507}
1508
1509static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1510{
1511 __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1512}
1513
1514static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1515{
1516 /*
1517 * Set SendDmaLenGen and clear and set
1518 * the MSB of the generation count to enable generation checking
1519 * and load the internal generation counter.
1520 */
1521 qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1522 qib_write_kreg_port(ppd, krp_senddmalengen,
1523 ppd->sdma_descq_cnt |
1524 (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1525}
1526
1527/*
1528 * Must be called with sdma_lock held, or before init finished.
1529 */
1530static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1531{
1532 /* Commit writes to memory and advance the tail on the chip */
1533 wmb();
1534 ppd->sdma_descq_tail = tail;
1535 qib_write_kreg_port(ppd, krp_senddmatail, tail);
1536}
1537
1538/*
1539 * This is called with interrupts disabled and sdma_lock held.
1540 */
1541static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1542{
1543 /*
1544 * Drain all FIFOs.
1545 * The hardware doesn't require this but we do it so that verbs
1546 * and user applications don't wait for link active to send stale
1547 * data.
1548 */
1549 sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1550
1551 qib_sdma_7322_setlengen(ppd);
1552 qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1553 ppd->sdma_head_dma[0] = 0;
1554 qib_7322_sdma_sendctrl(ppd,
1555 ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1556}
1557
1558#define DISABLES_SDMA ( \
1559 QIB_E_P_SDMAHALT | \
1560 QIB_E_P_SDMADESCADDRMISALIGN | \
1561 QIB_E_P_SDMAMISSINGDW | \
1562 QIB_E_P_SDMADWEN | \
1563 QIB_E_P_SDMARPYTAG | \
1564 QIB_E_P_SDMA1STDESC | \
1565 QIB_E_P_SDMABASE | \
1566 QIB_E_P_SDMATAILOUTOFBOUND | \
1567 QIB_E_P_SDMAOUTOFBOUND | \
1568 QIB_E_P_SDMAGENMISMATCH)
1569
1570static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1571{
1572 unsigned long flags;
1573 struct qib_devdata *dd = ppd->dd;
1574
1575 errs &= QIB_E_P_SDMAERRS;
1576
1577 if (errs & QIB_E_P_SDMAUNEXPDATA)
1578 qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1579 ppd->port);
1580
1581 spin_lock_irqsave(&ppd->sdma_lock, flags);
1582
1583 switch (ppd->sdma_state.current_state) {
1584 case qib_sdma_state_s00_hw_down:
1585 break;
1586
1587 case qib_sdma_state_s10_hw_start_up_wait:
1588 if (errs & QIB_E_P_SDMAHALT)
1589 __qib_sdma_process_event(ppd,
1590 qib_sdma_event_e20_hw_started);
1591 break;
1592
1593 case qib_sdma_state_s20_idle:
1594 break;
1595
1596 case qib_sdma_state_s30_sw_clean_up_wait:
1597 break;
1598
1599 case qib_sdma_state_s40_hw_clean_up_wait:
1600 if (errs & QIB_E_P_SDMAHALT)
1601 __qib_sdma_process_event(ppd,
1602 qib_sdma_event_e50_hw_cleaned);
1603 break;
1604
1605 case qib_sdma_state_s50_hw_halt_wait:
1606 if (errs & QIB_E_P_SDMAHALT)
1607 __qib_sdma_process_event(ppd,
1608 qib_sdma_event_e60_hw_halted);
1609 break;
1610
1611 case qib_sdma_state_s99_running:
1612 __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1613 __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1614 break;
1615 }
1616
1617 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1618}
1619
1620/*
1621 * handle per-device errors (not per-port errors)
1622 */
1623static noinline void handle_7322_errors(struct qib_devdata *dd)
1624{
1625 char *msg;
1626 u64 iserr = 0;
1627 u64 errs;
1628 u64 mask;
1629 int log_idx;
1630
1631 qib_stats.sps_errints++;
1632 errs = qib_read_kreg64(dd, kr_errstatus);
1633 if (!errs) {
1634 qib_devinfo(dd->pcidev, "device error interrupt, "
1635 "but no error bits set!\n");
1636 goto done;
1637 }
1638
1639 /* don't report errors that are masked */
1640 errs &= dd->cspec->errormask;
1641 msg = dd->cspec->emsgbuf;
1642
1643 /* do these first, they are most important */
1644 if (errs & QIB_E_HARDWARE) {
1645 *msg = '\0';
1646 qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
1647 } else
1648 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1649 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1650 qib_inc_eeprom_err(dd, log_idx, 1);
1651
1652 if (errs & QIB_E_SPKTERRS) {
1653 qib_disarm_7322_senderrbufs(dd->pport);
1654 qib_stats.sps_txerrs++;
1655 } else if (errs & QIB_E_INVALIDADDR)
1656 qib_stats.sps_txerrs++;
1657 else if (errs & QIB_E_ARMLAUNCH) {
1658 qib_stats.sps_txerrs++;
1659 qib_disarm_7322_senderrbufs(dd->pport);
1660 }
1661 qib_write_kreg(dd, kr_errclear, errs);
1662
1663 /*
1664 * The ones we mask off are handled specially below
1665 * or above. Also mask SDMADISABLED by default as it
1666 * is too chatty.
1667 */
1668 mask = QIB_E_HARDWARE;
1669 *msg = '\0';
1670
1671 err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
1672 qib_7322error_msgs);
1673
1674 /*
1675 * Getting reset is a tragedy for all ports. Mark the device
1676 * _and_ the ports as "offline" in way meaningful to each.
1677 */
1678 if (errs & QIB_E_RESET) {
1679 int pidx;
1680
1681 qib_dev_err(dd, "Got reset, requires re-init "
1682 "(unload and reload driver)\n");
1683 dd->flags &= ~QIB_INITTED; /* needs re-init */
1684 /* mark as having had error */
1685 *dd->devstatusp |= QIB_STATUS_HWERROR;
1686 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1687 if (dd->pport[pidx].link_speed_supported)
1688 *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1689 }
1690
1691 if (*msg && iserr)
1692 qib_dev_err(dd, "%s error\n", msg);
1693
1694 /*
1695 * If there were hdrq or egrfull errors, wake up any processes
1696 * waiting in poll. We used to try to check which contexts had
1697 * the overflow, but given the cost of that and the chip reads
1698 * to support it, it's better to just wake everybody up if we
1699 * get an overflow; waiters can poll again if it's not them.
1700 */
1701 if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1702 qib_handle_urcv(dd, ~0U);
1703 if (errs & ERR_MASK(RcvEgrFullErr))
1704 qib_stats.sps_buffull++;
1705 else
1706 qib_stats.sps_hdrfull++;
1707 }
1708
1709done:
1710 return;
1711}
1712
1713static void reenable_chase(unsigned long opaque)
1714{
1715 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1716
1717 ppd->cpspec->chase_timer.expires = 0;
1718 qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1719 QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1720}
1721
1722static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt)
1723{
1724 ppd->cpspec->chase_end = 0;
1725
1726 if (!qib_chase)
1727 return;
1728
1729 qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1730 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1731 ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1732 add_timer(&ppd->cpspec->chase_timer);
1733}
1734
1735static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1736{
1737 u8 ibclt;
1738 u64 tnow;
1739
1740 ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1741
1742 /*
1743 * Detect and handle the state chase issue, where we can
1744 * get stuck if we are unlucky on timing on both sides of
1745 * the link. If we are, we disable, set a timer, and
1746 * then re-enable.
1747 */
1748 switch (ibclt) {
1749 case IB_7322_LT_STATE_CFGRCVFCFG:
1750 case IB_7322_LT_STATE_CFGWAITRMT:
1751 case IB_7322_LT_STATE_TXREVLANES:
1752 case IB_7322_LT_STATE_CFGENH:
1753 tnow = get_jiffies_64();
1754 if (ppd->cpspec->chase_end &&
1755 time_after64(tnow, ppd->cpspec->chase_end))
1756 disable_chase(ppd, tnow, ibclt);
1757 else if (!ppd->cpspec->chase_end)
1758 ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1759 break;
1760 default:
1761 ppd->cpspec->chase_end = 0;
1762 break;
1763 }
1764
1765 if (ibclt == IB_7322_LT_STATE_CFGTEST &&
1766 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1767 force_h1(ppd);
1768 ppd->cpspec->qdr_reforce = 1;
1769 } else if (ppd->cpspec->qdr_reforce &&
1770 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1771 (ibclt == IB_7322_LT_STATE_CFGENH ||
1772 ibclt == IB_7322_LT_STATE_CFGIDLE ||
1773 ibclt == IB_7322_LT_STATE_LINKUP))
1774 force_h1(ppd);
1775
1776 if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1777 ppd->link_speed_enabled == QIB_IB_QDR &&
1778 (ibclt == IB_7322_LT_STATE_CFGTEST ||
1779 ibclt == IB_7322_LT_STATE_CFGENH ||
1780 (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1781 ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1782 adj_tx_serdes(ppd);
1783
1784 if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP &&
1785 ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1786 ppd->cpspec->qdr_dfe_on = 1;
1787 ppd->cpspec->qdr_dfe_time = 0;
1788 /* On link down, reenable QDR adaptation */
1789 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1790 ppd->dd->cspec->r1 ?
1791 QDR_STATIC_ADAPT_DOWN_R1 :
1792 QDR_STATIC_ADAPT_DOWN);
1793 }
1794}
1795
1796/*
1797 * This is per-pport error handling.
1798 * will likely get it's own MSIx interrupt (one for each port,
1799 * although just a single handler).
1800 */
1801static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1802{
1803 char *msg;
1804 u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1805 struct qib_devdata *dd = ppd->dd;
1806
1807 /* do this as soon as possible */
1808 fmask = qib_read_kreg64(dd, kr_act_fmask);
1809 if (!fmask)
1810 check_7322_rxe_status(ppd);
1811
1812 errs = qib_read_kreg_port(ppd, krp_errstatus);
1813 if (!errs)
1814 qib_devinfo(dd->pcidev,
1815 "Port%d error interrupt, but no error bits set!\n",
1816 ppd->port);
1817 if (!fmask)
1818 errs &= ~QIB_E_P_IBSTATUSCHANGED;
1819 if (!errs)
1820 goto done;
1821
1822 msg = ppd->cpspec->epmsgbuf;
1823 *msg = '\0';
1824
1825 if (errs & ~QIB_E_P_BITSEXTANT) {
1826 err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1827 errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1828 if (!*msg)
1829 snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
1830 "no others");
1831 qib_dev_porterr(dd, ppd->port, "error interrupt with unknown"
1832 " errors 0x%016Lx set (and %s)\n",
1833 (errs & ~QIB_E_P_BITSEXTANT), msg);
1834 *msg = '\0';
1835 }
1836
1837 if (errs & QIB_E_P_SHDR) {
1838 u64 symptom;
1839
1840 /* determine cause, then write to clear */
1841 symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1842 qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1843 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
1844 hdrchk_msgs);
1845 *msg = '\0';
1846 /* senderrbuf cleared in SPKTERRS below */
1847 }
1848
1849 if (errs & QIB_E_P_SPKTERRS) {
1850 if ((errs & QIB_E_P_LINK_PKTERRS) &&
1851 !(ppd->lflags & QIBL_LINKACTIVE)) {
1852 /*
1853 * This can happen when trying to bring the link
1854 * up, but the IB link changes state at the "wrong"
1855 * time. The IB logic then complains that the packet
1856 * isn't valid. We don't want to confuse people, so
1857 * we just don't print them, except at debug
1858 */
1859 err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1860 (errs & QIB_E_P_LINK_PKTERRS),
1861 qib_7322p_error_msgs);
1862 *msg = '\0';
1863 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1864 }
1865 qib_disarm_7322_senderrbufs(ppd);
1866 } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1867 !(ppd->lflags & QIBL_LINKACTIVE)) {
1868 /*
1869 * This can happen when SMA is trying to bring the link
1870 * up, but the IB link changes state at the "wrong" time.
1871 * The IB logic then complains that the packet isn't
1872 * valid. We don't want to confuse people, so we just
1873 * don't print them, except at debug
1874 */
1875 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
1876 qib_7322p_error_msgs);
1877 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1878 *msg = '\0';
1879 }
1880
1881 qib_write_kreg_port(ppd, krp_errclear, errs);
1882
1883 errs &= ~ignore_this_time;
1884 if (!errs)
1885 goto done;
1886
1887 if (errs & QIB_E_P_RPKTERRS)
1888 qib_stats.sps_rcverrs++;
1889 if (errs & QIB_E_P_SPKTERRS)
1890 qib_stats.sps_txerrs++;
1891
1892 iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1893
1894 if (errs & QIB_E_P_SDMAERRS)
1895 sdma_7322_p_errors(ppd, errs);
1896
1897 if (errs & QIB_E_P_IBSTATUSCHANGED) {
1898 u64 ibcs;
1899 u8 ltstate;
1900
1901 ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1902 ltstate = qib_7322_phys_portstate(ibcs);
1903
1904 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1905 handle_serdes_issues(ppd, ibcs);
1906 if (!(ppd->cpspec->ibcctrl_a &
1907 SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1908 /*
1909 * We got our interrupt, so init code should be
1910 * happy and not try alternatives. Now squelch
1911 * other "chatter" from link-negotiation (pre Init)
1912 */
1913 ppd->cpspec->ibcctrl_a |=
1914 SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1915 qib_write_kreg_port(ppd, krp_ibcctrl_a,
1916 ppd->cpspec->ibcctrl_a);
1917 }
1918
1919 /* Update our picture of width and speed from chip */
1920 ppd->link_width_active =
1921 (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1922 IB_WIDTH_4X : IB_WIDTH_1X;
1923 ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1924 LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1925 SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1926 QIB_IB_DDR : QIB_IB_SDR;
1927
1928 if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1929 IB_PHYSPORTSTATE_DISABLED)
1930 qib_set_ib_7322_lstate(ppd, 0,
1931 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1932 else
1933 /*
1934 * Since going into a recovery state causes the link
1935 * state to go down and since recovery is transitory,
1936 * it is better if we "miss" ever seeing the link
1937 * training state go into recovery (i.e., ignore this
1938 * transition for link state special handling purposes)
1939 * without updating lastibcstat.
1940 */
1941 if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1942 ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1943 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1944 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1945 qib_handle_e_ibstatuschanged(ppd, ibcs);
1946 }
1947 if (*msg && iserr)
1948 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1949
1950 if (ppd->state_wanted & ppd->lflags)
1951 wake_up_interruptible(&ppd->state_wait);
1952done:
1953 return;
1954}
1955
1956/* enable/disable chip from delivering interrupts */
1957static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
1958{
1959 if (enable) {
1960 if (dd->flags & QIB_BADINTR)
1961 return;
1962 qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
1963 /* cause any pending enabled interrupts to be re-delivered */
1964 qib_write_kreg(dd, kr_intclear, 0ULL);
1965 if (dd->cspec->num_msix_entries) {
1966 /* and same for MSIx */
1967 u64 val = qib_read_kreg64(dd, kr_intgranted);
1968 if (val)
1969 qib_write_kreg(dd, kr_intgranted, val);
1970 }
1971 } else
1972 qib_write_kreg(dd, kr_intmask, 0ULL);
1973}
1974
1975/*
1976 * Try to cleanup as much as possible for anything that might have gone
1977 * wrong while in freeze mode, such as pio buffers being written by user
1978 * processes (causing armlaunch), send errors due to going into freeze mode,
1979 * etc., and try to avoid causing extra interrupts while doing so.
1980 * Forcibly update the in-memory pioavail register copies after cleanup
1981 * because the chip won't do it while in freeze mode (the register values
1982 * themselves are kept correct).
1983 * Make sure that we don't lose any important interrupts by using the chip
1984 * feature that says that writing 0 to a bit in *clear that is set in
1985 * *status will cause an interrupt to be generated again (if allowed by
1986 * the *mask value).
1987 * This is in chip-specific code because of all of the register accesses,
1988 * even though the details are similar on most chips.
1989 */
1990static void qib_7322_clear_freeze(struct qib_devdata *dd)
1991{
1992 int pidx;
1993
1994 /* disable error interrupts, to avoid confusion */
1995 qib_write_kreg(dd, kr_errmask, 0ULL);
1996
1997 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1998 if (dd->pport[pidx].link_speed_supported)
1999 qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2000 0ULL);
2001
2002 /* also disable interrupts; errormask is sometimes overwriten */
2003 qib_7322_set_intr_state(dd, 0);
2004
2005 /* clear the freeze, and be sure chip saw it */
2006 qib_write_kreg(dd, kr_control, dd->control);
2007 qib_read_kreg32(dd, kr_scratch);
2008
2009 /*
2010 * Force new interrupt if any hwerr, error or interrupt bits are
2011 * still set, and clear "safe" send packet errors related to freeze
2012 * and cancelling sends. Re-enable error interrupts before possible
2013 * force of re-interrupt on pending interrupts.
2014 */
2015 qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2016 qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
2017 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2018 /* We need to purge per-port errs and reset mask, too */
2019 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2020 if (!dd->pport[pidx].link_speed_supported)
2021 continue;
2022 qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
2023 qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
2024 }
2025 qib_7322_set_intr_state(dd, 1);
2026}
2027
2028/* no error handling to speak of */
2029/**
2030 * qib_7322_handle_hwerrors - display hardware errors.
2031 * @dd: the qlogic_ib device
2032 * @msg: the output buffer
2033 * @msgl: the size of the output buffer
2034 *
2035 * Use same msg buffer as regular errors to avoid excessive stack
2036 * use. Most hardware errors are catastrophic, but for right now,
2037 * we'll print them and continue. We reuse the same message buffer as
2038 * qib_handle_errors() to avoid excessive stack usage.
2039 */
2040static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2041 size_t msgl)
2042{
2043 u64 hwerrs;
2044 u32 ctrl;
2045 int isfatal = 0;
2046
2047 hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2048 if (!hwerrs)
2049 goto bail;
2050 if (hwerrs == ~0ULL) {
2051 qib_dev_err(dd, "Read of hardware error status failed "
2052 "(all bits set); ignoring\n");
2053 goto bail;
2054 }
2055 qib_stats.sps_hwerrs++;
2056
2057 /* Always clear the error status register, except BIST fail */
2058 qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2059 ~HWE_MASK(PowerOnBISTFailed));
2060
2061 hwerrs &= dd->cspec->hwerrmask;
2062
2063 /* no EEPROM logging, yet */
2064
2065 if (hwerrs)
2066 qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
2067 "(cleared)\n", (unsigned long long) hwerrs);
2068
2069 ctrl = qib_read_kreg32(dd, kr_control);
2070 if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2071 /*
2072 * No recovery yet...
2073 */
2074 if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2075 dd->cspec->stay_in_freeze) {
2076 /*
2077 * If any set that we aren't ignoring only make the
2078 * complaint once, in case it's stuck or recurring,
2079 * and we get here multiple times
2080 * Force link down, so switch knows, and
2081 * LEDs are turned off.
2082 */
2083 if (dd->flags & QIB_INITTED)
2084 isfatal = 1;
2085 } else
2086 qib_7322_clear_freeze(dd);
2087 }
2088
2089 if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2090 isfatal = 1;
2091 strlcpy(msg, "[Memory BIST test failed, "
2092 "InfiniPath hardware unusable]", msgl);
2093 /* ignore from now on, so disable until driver reloaded */
2094 dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2095 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2096 }
2097
2098 err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2099
2100 /* Ignore esoteric PLL failures et al. */
2101
2102 qib_dev_err(dd, "%s hardware error\n", msg);
2103
2104 if (isfatal && !dd->diag_client) {
2105 qib_dev_err(dd, "Fatal Hardware Error, no longer"
2106 " usable, SN %.16s\n", dd->serial);
2107 /*
2108 * for /sys status file and user programs to print; if no
2109 * trailing brace is copied, we'll know it was truncated.
2110 */
2111 if (dd->freezemsg)
2112 snprintf(dd->freezemsg, dd->freezelen,
2113 "{%s}", msg);
2114 qib_disable_after_error(dd);
2115 }
2116bail:;
2117}
2118
2119/**
2120 * qib_7322_init_hwerrors - enable hardware errors
2121 * @dd: the qlogic_ib device
2122 *
2123 * now that we have finished initializing everything that might reasonably
2124 * cause a hardware error, and cleared those errors bits as they occur,
2125 * we can enable hardware errors in the mask (potentially enabling
2126 * freeze mode), and enable hardware errors as errors (along with
2127 * everything else) in errormask
2128 */
2129static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2130{
2131 int pidx;
2132 u64 extsval;
2133
2134 extsval = qib_read_kreg64(dd, kr_extstatus);
2135 if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2136 QIB_EXTS_MEMBIST_ENDTEST)))
2137 qib_dev_err(dd, "MemBIST did not complete!\n");
2138
2139 /* never clear BIST failure, so reported on each driver load */
2140 qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2141 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2142
2143 /* clear all */
2144 qib_write_kreg(dd, kr_errclear, ~0ULL);
2145 /* enable errors that are masked, at least this first time. */
2146 qib_write_kreg(dd, kr_errmask, ~0ULL);
2147 dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2148 for (pidx = 0; pidx < dd->num_pports; ++pidx)
2149 if (dd->pport[pidx].link_speed_supported)
2150 qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2151 ~0ULL);
2152}
2153
2154/*
2155 * Disable and enable the armlaunch error. Used for PIO bandwidth testing
2156 * on chips that are count-based, rather than trigger-based. There is no
2157 * reference counting, but that's also fine, given the intended use.
2158 * Only chip-specific because it's all register accesses
2159 */
2160static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2161{
2162 if (enable) {
2163 qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2164 dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2165 } else
2166 dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2167 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2168}
2169
2170/*
2171 * Formerly took parameter <which> in pre-shifted,
2172 * pre-merged form with LinkCmd and LinkInitCmd
2173 * together, and assuming the zero was NOP.
2174 */
2175static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2176 u16 linitcmd)
2177{
2178 u64 mod_wd;
2179 struct qib_devdata *dd = ppd->dd;
2180 unsigned long flags;
2181
2182 if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2183 /*
2184 * If we are told to disable, note that so link-recovery
2185 * code does not attempt to bring us back up.
2186 * Also reset everything that we can, so we start
2187 * completely clean when re-enabled (before we
2188 * actually issue the disable to the IBC)
2189 */
2190 qib_7322_mini_pcs_reset(ppd);
2191 spin_lock_irqsave(&ppd->lflags_lock, flags);
2192 ppd->lflags |= QIBL_IB_LINK_DISABLED;
2193 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2194 } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2195 /*
2196 * Any other linkinitcmd will lead to LINKDOWN and then
2197 * to INIT (if all is well), so clear flag to let
2198 * link-recovery code attempt to bring us back up.
2199 */
2200 spin_lock_irqsave(&ppd->lflags_lock, flags);
2201 ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2202 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2203 /*
2204 * Clear status change interrupt reduction so the
2205 * new state is seen.
2206 */
2207 ppd->cpspec->ibcctrl_a &=
2208 ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2209 }
2210
2211 mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2212 (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2213
2214 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2215 mod_wd);
2216 /* write to chip to prevent back-to-back writes of ibc reg */
2217 qib_write_kreg(dd, kr_scratch, 0);
2218
2219}
2220
2221/*
2222 * The total RCV buffer memory is 64KB, used for both ports, and is
2223 * in units of 64 bytes (same as IB flow control credit unit).
2224 * The consumedVL unit in the same registers are in 32 byte units!
2225 * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2226 * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2227 * in krp_rxcreditvl15, rather than 10.
2228 */
2229#define RCV_BUF_UNITSZ 64
2230#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2231
2232static void set_vls(struct qib_pportdata *ppd)
2233{
2234 int i, numvls, totcred, cred_vl, vl0extra;
2235 struct qib_devdata *dd = ppd->dd;
2236 u64 val;
2237
2238 numvls = qib_num_vls(ppd->vls_operational);
2239
2240 /*
2241 * Set up per-VL credits. Below is kluge based on these assumptions:
2242 * 1) port is disabled at the time early_init is called.
2243 * 2) give VL15 17 credits, for two max-plausible packets.
2244 * 3) Give VL0-N the rest, with any rounding excess used for VL0
2245 */
2246 /* 2 VL15 packets @ 288 bytes each (including IB headers) */
2247 totcred = NUM_RCV_BUF_UNITS(dd);
2248 cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2249 totcred -= cred_vl;
2250 qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2251 cred_vl = totcred / numvls;
2252 vl0extra = totcred - cred_vl * numvls;
2253 qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2254 for (i = 1; i < numvls; i++)
2255 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2256 for (; i < 8; i++) /* no buffer space for other VLs */
2257 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2258
2259 /* Notify IBC that credits need to be recalculated */
2260 val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2261 val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2262 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2263 qib_write_kreg(dd, kr_scratch, 0ULL);
2264 val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2265 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2266
2267 for (i = 0; i < numvls; i++)
2268 val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2269 val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2270
2271 /* Change the number of operational VLs */
2272 ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2273 ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2274 ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2275 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2276 qib_write_kreg(dd, kr_scratch, 0ULL);
2277}
2278
2279/*
2280 * The code that deals with actual SerDes is in serdes_7322_init().
2281 * Compared to the code for iba7220, it is minimal.
2282 */
2283static int serdes_7322_init(struct qib_pportdata *ppd);
2284
2285/**
2286 * qib_7322_bringup_serdes - bring up the serdes
2287 * @ppd: physical port on the qlogic_ib device
2288 */
2289static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2290{
2291 struct qib_devdata *dd = ppd->dd;
2292 u64 val, guid, ibc;
2293 unsigned long flags;
2294 int ret = 0;
2295
2296 /*
2297 * SerDes model not in Pd, but still need to
2298 * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2299 * eventually.
2300 */
2301 /* Put IBC in reset, sends disabled (should be in reset already) */
2302 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2303 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2304 qib_write_kreg(dd, kr_scratch, 0ULL);
2305
2306 if (qib_compat_ddr_negotiate) {
2307 ppd->cpspec->ibdeltainprog = 1;
2308 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2309 crp_ibsymbolerr);
2310 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2311 crp_iblinkerrrecov);
2312 }
2313
2314 /* flowcontrolwatermark is in units of KBytes */
2315 ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2316 /*
2317 * Flow control is sent this often, even if no changes in
2318 * buffer space occur. Units are 128ns for this chip.
2319 * Set to 3usec.
2320 */
2321 ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2322 /* max error tolerance */
2323 ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2324 /* IB credit flow control. */
2325 ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2326 /*
2327 * set initial max size pkt IBC will send, including ICRC; it's the
2328 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2329 */
2330 ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2331 SYM_LSB(IBCCtrlA_0, MaxPktLen);
2332 ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2333
2334 /* initially come up waiting for TS1, without sending anything. */
2335 val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2336 QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2337
2338 /*
2339 * Reset the PCS interface to the serdes (and also ibc, which is still
2340 * in reset from above). Writes new value of ibcctrl_a as last step.
2341 */
2342 qib_7322_mini_pcs_reset(ppd);
2343 qib_write_kreg(dd, kr_scratch, 0ULL);
2344
2345 if (!ppd->cpspec->ibcctrl_b) {
2346 unsigned lse = ppd->link_speed_enabled;
2347
2348 /*
2349 * Not on re-init after reset, establish shadow
2350 * and force initial config.
2351 */
2352 ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2353 krp_ibcctrl_b);
2354 ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2355 IBA7322_IBC_SPEED_DDR |
2356 IBA7322_IBC_SPEED_SDR |
2357 IBA7322_IBC_WIDTH_AUTONEG |
2358 SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2359 if (lse & (lse - 1)) /* Muliple speeds enabled */
2360 ppd->cpspec->ibcctrl_b |=
2361 (lse << IBA7322_IBC_SPEED_LSB) |
2362 IBA7322_IBC_IBTA_1_2_MASK |
2363 IBA7322_IBC_MAX_SPEED_MASK;
2364 else
2365 ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2366 IBA7322_IBC_SPEED_QDR |
2367 IBA7322_IBC_IBTA_1_2_MASK :
2368 (lse == QIB_IB_DDR) ?
2369 IBA7322_IBC_SPEED_DDR :
2370 IBA7322_IBC_SPEED_SDR;
2371 if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2372 (IB_WIDTH_1X | IB_WIDTH_4X))
2373 ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2374 else
2375 ppd->cpspec->ibcctrl_b |=
2376 ppd->link_width_enabled == IB_WIDTH_4X ?
2377 IBA7322_IBC_WIDTH_4X_ONLY :
2378 IBA7322_IBC_WIDTH_1X_ONLY;
2379
2380 /* always enable these on driver reload, not sticky */
2381 ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2382 IBA7322_IBC_HRTBT_MASK);
2383 }
2384 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2385
2386 /* setup so we have more time at CFGTEST to change H1 */
2387 val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2388 val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2389 val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2390 qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2391
2392 serdes_7322_init(ppd);
2393
2394 guid = be64_to_cpu(ppd->guid);
2395 if (!guid) {
2396 if (dd->base_guid)
2397 guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2398 ppd->guid = cpu_to_be64(guid);
2399 }
2400
2401 qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2402 /* write to chip to prevent back-to-back writes of ibc reg */
2403 qib_write_kreg(dd, kr_scratch, 0);
2404
2405 /* Enable port */
2406 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2407 set_vls(ppd);
2408
2409 /* be paranoid against later code motion, etc. */
2410 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2411 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2412 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2413 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2414
2415 /* Also enable IBSTATUSCHG interrupt. */
2416 val = qib_read_kreg_port(ppd, krp_errmask);
2417 qib_write_kreg_port(ppd, krp_errmask,
2418 val | ERR_MASK_N(IBStatusChanged));
2419
2420 /* Always zero until we start messing with SerDes for real */
2421 return ret;
2422}
2423
2424/**
2425 * qib_7322_quiet_serdes - set serdes to txidle
2426 * @dd: the qlogic_ib device
2427 * Called when driver is being unloaded
2428 */
2429static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2430{
2431 u64 val;
2432 unsigned long flags;
2433
2434 qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2435
2436 spin_lock_irqsave(&ppd->lflags_lock, flags);
2437 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2438 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2439 wake_up(&ppd->cpspec->autoneg_wait);
2440 cancel_delayed_work(&ppd->cpspec->autoneg_work);
2441 if (ppd->dd->cspec->r1)
2442 cancel_delayed_work(&ppd->cpspec->ipg_work);
2443 flush_scheduled_work();
2444
2445 ppd->cpspec->chase_end = 0;
2446 if (ppd->cpspec->chase_timer.data) /* if initted */
2447 del_timer_sync(&ppd->cpspec->chase_timer);
2448
2449 /*
2450 * Despite the name, actually disables IBC as well. Do it when
2451 * we are as sure as possible that no more packets can be
2452 * received, following the down and the PCS reset.
2453 * The actual disabling happens in qib_7322_mini_pci_reset(),
2454 * along with the PCS being reset.
2455 */
2456 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2457 qib_7322_mini_pcs_reset(ppd);
2458
2459 /*
2460 * Update the adjusted counters so the adjustment persists
2461 * across driver reload.
2462 */
2463 if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2464 ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2465 struct qib_devdata *dd = ppd->dd;
2466 u64 diagc;
2467
2468 /* enable counter writes */
2469 diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2470 qib_write_kreg(dd, kr_hwdiagctrl,
2471 diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2472
2473 if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2474 val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2475 if (ppd->cpspec->ibdeltainprog)
2476 val -= val - ppd->cpspec->ibsymsnap;
2477 val -= ppd->cpspec->ibsymdelta;
2478 write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2479 }
2480 if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2481 val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2482 if (ppd->cpspec->ibdeltainprog)
2483 val -= val - ppd->cpspec->iblnkerrsnap;
2484 val -= ppd->cpspec->iblnkerrdelta;
2485 write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2486 }
2487 if (ppd->cpspec->iblnkdowndelta) {
2488 val = read_7322_creg32_port(ppd, crp_iblinkdown);
2489 val += ppd->cpspec->iblnkdowndelta;
2490 write_7322_creg_port(ppd, crp_iblinkdown, val);
2491 }
2492 /*
2493 * No need to save ibmalfdelta since IB perfcounters
2494 * are cleared on driver reload.
2495 */
2496
2497 /* and disable counter writes */
2498 qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2499 }
2500}
2501
2502/**
2503 * qib_setup_7322_setextled - set the state of the two external LEDs
2504 * @ppd: physical port on the qlogic_ib device
2505 * @on: whether the link is up or not
2506 *
2507 * The exact combo of LEDs if on is true is determined by looking
2508 * at the ibcstatus.
2509 *
2510 * These LEDs indicate the physical and logical state of IB link.
2511 * For this chip (at least with recommended board pinouts), LED1
2512 * is Yellow (logical state) and LED2 is Green (physical state),
2513 *
2514 * Note: We try to match the Mellanox HCA LED behavior as best
2515 * we can. Green indicates physical link state is OK (something is
2516 * plugged in, and we can train).
2517 * Amber indicates the link is logically up (ACTIVE).
2518 * Mellanox further blinks the amber LED to indicate data packet
2519 * activity, but we have no hardware support for that, so it would
2520 * require waking up every 10-20 msecs and checking the counters
2521 * on the chip, and then turning the LED off if appropriate. That's
2522 * visible overhead, so not something we will do.
2523 */
2524static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2525{
2526 struct qib_devdata *dd = ppd->dd;
2527 u64 extctl, ledblink = 0, val;
2528 unsigned long flags;
2529 int yel, grn;
2530
2531 /*
2532 * The diags use the LED to indicate diag info, so we leave
2533 * the external LED alone when the diags are running.
2534 */
2535 if (dd->diag_client)
2536 return;
2537
2538 /* Allow override of LED display for, e.g. Locating system in rack */
2539 if (ppd->led_override) {
2540 grn = (ppd->led_override & QIB_LED_PHYS);
2541 yel = (ppd->led_override & QIB_LED_LOG);
2542 } else if (on) {
2543 val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2544 grn = qib_7322_phys_portstate(val) ==
2545 IB_PHYSPORTSTATE_LINKUP;
2546 yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2547 } else {
2548 grn = 0;
2549 yel = 0;
2550 }
2551
2552 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2553 extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2554 ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2555 if (grn) {
2556 extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2557 /*
2558 * Counts are in chip clock (4ns) periods.
2559 * This is 1/16 sec (66.6ms) on,
2560 * 3/16 sec (187.5 ms) off, with packets rcvd.
2561 */
2562 ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2563 ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2564 }
2565 if (yel)
2566 extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2567 dd->cspec->extctrl = extctl;
2568 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2569 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2570
2571 if (ledblink) /* blink the LED on packet receive */
2572 qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2573}
2574
2575#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2576static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd)
2577{
2578 struct qib_devdata *dd = rcd->dd;
2579 struct qib_chip_specific *cspec = dd->cspec;
2580 int cpu = get_cpu();
2581
2582 if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
2583 const struct dca_reg_map *rmp;
2584
2585 cspec->rhdr_cpu[rcd->ctxt] = cpu;
2586 rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
2587 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
2588 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
2589 (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
2590 qib_write_kreg(dd, rmp->regno,
2591 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2592 cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
2593 qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2594 }
2595 put_cpu();
2596}
2597
2598static void qib_update_sdma_dca(struct qib_pportdata *ppd)
2599{
2600 struct qib_devdata *dd = ppd->dd;
2601 struct qib_chip_specific *cspec = dd->cspec;
2602 int cpu = get_cpu();
2603 unsigned pidx = ppd->port - 1;
2604
2605 if (cspec->sdma_cpu[pidx] != cpu) {
2606 cspec->sdma_cpu[pidx] = cpu;
2607 cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2608 SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
2609 SYM_MASK(DCACtrlF, SendDma0DCAOPH));
2610 cspec->dca_rcvhdr_ctrl[4] |=
2611 (u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
2612 (ppd->hw_pidx ?
2613 SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
2614 SYM_LSB(DCACtrlF, SendDma0DCAOPH));
2615 qib_write_kreg(dd, KREG_IDX(DCACtrlF),
2616 cspec->dca_rcvhdr_ctrl[4]);
2617 cspec->dca_ctrl |= ppd->hw_pidx ?
2618 SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
2619 SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
2620 qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2621 }
2622 put_cpu();
2623}
2624
2625static void qib_setup_dca(struct qib_devdata *dd)
2626{
2627 struct qib_chip_specific *cspec = dd->cspec;
2628 int i;
2629
2630 for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
2631 cspec->rhdr_cpu[i] = -1;
2632 for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2633 cspec->sdma_cpu[i] = -1;
2634 cspec->dca_rcvhdr_ctrl[0] =
2635 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
2636 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
2637 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
2638 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
2639 cspec->dca_rcvhdr_ctrl[1] =
2640 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
2641 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
2642 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
2643 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
2644 cspec->dca_rcvhdr_ctrl[2] =
2645 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
2646 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
2647 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
2648 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
2649 cspec->dca_rcvhdr_ctrl[3] =
2650 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
2651 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
2652 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
2653 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
2654 cspec->dca_rcvhdr_ctrl[4] =
2655 (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
2656 (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
2657 for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2658 qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
2659 cspec->dca_rcvhdr_ctrl[i]);
2660}
2661
2662#endif
2663
2664/*
2665 * Disable MSIx interrupt if enabled, call generic MSIx code
2666 * to cleanup, and clear pending MSIx interrupts.
2667 * Used for fallback to INTx, after reset, and when MSIx setup fails.
2668 */
2669static void qib_7322_nomsix(struct qib_devdata *dd)
2670{
2671 u64 intgranted;
2672 int n;
2673
2674 dd->cspec->main_int_mask = ~0ULL;
2675 n = dd->cspec->num_msix_entries;
2676 if (n) {
2677 int i;
2678
2679 dd->cspec->num_msix_entries = 0;
2680 for (i = 0; i < n; i++)
2681 free_irq(dd->cspec->msix_entries[i].vector,
2682 dd->cspec->msix_arg[i]);
2683 qib_nomsix(dd);
2684 }
2685 /* make sure no MSIx interrupts are left pending */
2686 intgranted = qib_read_kreg64(dd, kr_intgranted);
2687 if (intgranted)
2688 qib_write_kreg(dd, kr_intgranted, intgranted);
2689}
2690
2691static void qib_7322_free_irq(struct qib_devdata *dd)
2692{
2693 if (dd->cspec->irq) {
2694 free_irq(dd->cspec->irq, dd);
2695 dd->cspec->irq = 0;
2696 }
2697 qib_7322_nomsix(dd);
2698}
2699
2700static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2701{
2702 int i;
2703
2704#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2705 if (dd->flags & QIB_DCA_ENABLED) {
2706 dca_remove_requester(&dd->pcidev->dev);
2707 dd->flags &= ~QIB_DCA_ENABLED;
2708 dd->cspec->dca_ctrl = 0;
2709 qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
2710 }
2711#endif
2712
2713 qib_7322_free_irq(dd);
2714 kfree(dd->cspec->cntrs);
2715 kfree(dd->cspec->sendchkenable);
2716 kfree(dd->cspec->sendgrhchk);
2717 kfree(dd->cspec->sendibchk);
2718 kfree(dd->cspec->msix_entries);
2719 kfree(dd->cspec->msix_arg);
2720 for (i = 0; i < dd->num_pports; i++) {
2721 unsigned long flags;
2722 u32 mask = QSFP_GPIO_MOD_PRS_N |
2723 (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2724
2725 kfree(dd->pport[i].cpspec->portcntrs);
2726 if (dd->flags & QIB_HAS_QSFP) {
2727 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2728 dd->cspec->gpio_mask &= ~mask;
2729 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2730 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2731 qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
2732 }
2733 if (dd->pport[i].ibport_data.smi_ah)
2734 ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
2735 }
2736}
2737
2738/* handle SDMA interrupts */
2739static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2740{
2741 struct qib_pportdata *ppd0 = &dd->pport[0];
2742 struct qib_pportdata *ppd1 = &dd->pport[1];
2743 u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2744 INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2745 u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2746 INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2747
2748 if (intr0)
2749 qib_sdma_intr(ppd0);
2750 if (intr1)
2751 qib_sdma_intr(ppd1);
2752
2753 if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2754 qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2755 if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2756 qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2757}
2758
2759/*
2760 * Set or clear the Send buffer available interrupt enable bit.
2761 */
2762static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2763{
2764 unsigned long flags;
2765
2766 spin_lock_irqsave(&dd->sendctrl_lock, flags);
2767 if (needint)
2768 dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2769 else
2770 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2771 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2772 qib_write_kreg(dd, kr_scratch, 0ULL);
2773 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2774}
2775
2776/*
2777 * Somehow got an interrupt with reserved bits set in interrupt status.
2778 * Print a message so we know it happened, then clear them.
2779 * keep mainline interrupt handler cache-friendly
2780 */
2781static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2782{
2783 u64 kills;
2784 char msg[128];
2785
2786 kills = istat & ~QIB_I_BITSEXTANT;
2787 qib_dev_err(dd, "Clearing reserved interrupt(s) 0x%016llx:"
2788 " %s\n", (unsigned long long) kills, msg);
2789 qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2790}
2791
2792/* keep mainline interrupt handler cache-friendly */
2793static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2794{
2795 u32 gpiostatus;
2796 int handled = 0;
2797 int pidx;
2798
2799 /*
2800 * Boards for this chip currently don't use GPIO interrupts,
2801 * so clear by writing GPIOstatus to GPIOclear, and complain
2802 * to developer. To avoid endless repeats, clear
2803 * the bits in the mask, since there is some kind of
2804 * programming error or chip problem.
2805 */
2806 gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2807 /*
2808 * In theory, writing GPIOstatus to GPIOclear could
2809 * have a bad side-effect on some diagnostic that wanted
2810 * to poll for a status-change, but the various shadows
2811 * make that problematic at best. Diags will just suppress
2812 * all GPIO interrupts during such tests.
2813 */
2814 qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2815 /*
2816 * Check for QSFP MOD_PRS changes
2817 * only works for single port if IB1 != pidx1
2818 */
2819 for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2820 ++pidx) {
2821 struct qib_pportdata *ppd;
2822 struct qib_qsfp_data *qd;
2823 u32 mask;
2824 if (!dd->pport[pidx].link_speed_supported)
2825 continue;
2826 mask = QSFP_GPIO_MOD_PRS_N;
2827 ppd = dd->pport + pidx;
2828 mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2829 if (gpiostatus & dd->cspec->gpio_mask & mask) {
2830 u64 pins;
2831 qd = &ppd->cpspec->qsfp_data;
2832 gpiostatus &= ~mask;
2833 pins = qib_read_kreg64(dd, kr_extstatus);
2834 pins >>= SYM_LSB(EXTStatus, GPIOIn);
2835 if (!(pins & mask)) {
2836 ++handled;
2837 qd->t_insert = get_jiffies_64();
2838 schedule_work(&qd->work);
2839 }
2840 }
2841 }
2842
2843 if (gpiostatus && !handled) {
2844 const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
2845 u32 gpio_irq = mask & gpiostatus;
2846
2847 /*
2848 * Clear any troublemakers, and update chip from shadow
2849 */
2850 dd->cspec->gpio_mask &= ~gpio_irq;
2851 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2852 }
2853}
2854
2855/*
2856 * Handle errors and unusual events first, separate function
2857 * to improve cache hits for fast path interrupt handling.
2858 */
2859static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
2860{
2861 if (istat & ~QIB_I_BITSEXTANT)
2862 unknown_7322_ibits(dd, istat);
2863 if (istat & QIB_I_GPIO)
2864 unknown_7322_gpio_intr(dd);
2865 if (istat & QIB_I_C_ERROR)
2866 handle_7322_errors(dd);
2867 if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
2868 handle_7322_p_errors(dd->rcd[0]->ppd);
2869 if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
2870 handle_7322_p_errors(dd->rcd[1]->ppd);
2871}
2872
2873/*
2874 * Dynamically adjust the rcv int timeout for a context based on incoming
2875 * packet rate.
2876 */
2877static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
2878{
2879 struct qib_devdata *dd = rcd->dd;
2880 u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
2881
2882 /*
2883 * Dynamically adjust idle timeout on chip
2884 * based on number of packets processed.
2885 */
2886 if (npkts < rcv_int_count && timeout > 2)
2887 timeout >>= 1;
2888 else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
2889 timeout = min(timeout << 1, rcv_int_timeout);
2890 else
2891 return;
2892
2893 dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
2894 qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
2895}
2896
2897/*
2898 * This is the main interrupt handler.
2899 * It will normally only be used for low frequency interrupts but may
2900 * have to handle all interrupts if INTx is enabled or fewer than normal
2901 * MSIx interrupts were allocated.
2902 * This routine should ignore the interrupt bits for any of the
2903 * dedicated MSIx handlers.
2904 */
2905static irqreturn_t qib_7322intr(int irq, void *data)
2906{
2907 struct qib_devdata *dd = data;
2908 irqreturn_t ret;
2909 u64 istat;
2910 u64 ctxtrbits;
2911 u64 rmask;
2912 unsigned i;
2913 u32 npkts;
2914
2915 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
2916 /*
2917 * This return value is not great, but we do not want the
2918 * interrupt core code to remove our interrupt handler
2919 * because we don't appear to be handling an interrupt
2920 * during a chip reset.
2921 */
2922 ret = IRQ_HANDLED;
2923 goto bail;
2924 }
2925
2926 istat = qib_read_kreg64(dd, kr_intstatus);
2927
2928 if (unlikely(istat == ~0ULL)) {
2929 qib_bad_intrstatus(dd);
2930 qib_dev_err(dd, "Interrupt status all f's, skipping\n");
2931 /* don't know if it was our interrupt or not */
2932 ret = IRQ_NONE;
2933 goto bail;
2934 }
2935
2936 istat &= dd->cspec->main_int_mask;
2937 if (unlikely(!istat)) {
2938 /* already handled, or shared and not us */
2939 ret = IRQ_NONE;
2940 goto bail;
2941 }
2942
2943 qib_stats.sps_ints++;
2944 if (dd->int_counter != (u32) -1)
2945 dd->int_counter++;
2946
2947 /* handle "errors" of various kinds first, device ahead of port */
2948 if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
2949 QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
2950 INT_MASK_P(Err, 1))))
2951 unlikely_7322_intr(dd, istat);
2952
2953 /*
2954 * Clear the interrupt bits we found set, relatively early, so we
2955 * "know" know the chip will have seen this by the time we process
2956 * the queue, and will re-interrupt if necessary. The processor
2957 * itself won't take the interrupt again until we return.
2958 */
2959 qib_write_kreg(dd, kr_intclear, istat);
2960
2961 /*
2962 * Handle kernel receive queues before checking for pio buffers
2963 * available since receives can overflow; piobuf waiters can afford
2964 * a few extra cycles, since they were waiting anyway.
2965 */
2966 ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
2967 if (ctxtrbits) {
2968 rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
2969 (1ULL << QIB_I_RCVURG_LSB);
2970 for (i = 0; i < dd->first_user_ctxt; i++) {
2971 if (ctxtrbits & rmask) {
2972 ctxtrbits &= ~rmask;
2973 if (dd->rcd[i]) {
2974 qib_kreceive(dd->rcd[i], NULL, &npkts);
2975 adjust_rcv_timeout(dd->rcd[i], npkts);
2976 }
2977 }
2978 rmask <<= 1;
2979 }
2980 if (ctxtrbits) {
2981 ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
2982 (ctxtrbits >> QIB_I_RCVURG_LSB);
2983 qib_handle_urcv(dd, ctxtrbits);
2984 }
2985 }
2986
2987 if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
2988 sdma_7322_intr(dd, istat);
2989
2990 if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
2991 qib_ib_piobufavail(dd);
2992
2993 ret = IRQ_HANDLED;
2994bail:
2995 return ret;
2996}
2997
2998/*
2999 * Dedicated receive packet available interrupt handler.
3000 */
3001static irqreturn_t qib_7322pintr(int irq, void *data)
3002{
3003 struct qib_ctxtdata *rcd = data;
3004 struct qib_devdata *dd = rcd->dd;
3005 u32 npkts;
3006
3007 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3008 /*
3009 * This return value is not great, but we do not want the
3010 * interrupt core code to remove our interrupt handler
3011 * because we don't appear to be handling an interrupt
3012 * during a chip reset.
3013 */
3014 return IRQ_HANDLED;
3015
3016 qib_stats.sps_ints++;
3017 if (dd->int_counter != (u32) -1)
3018 dd->int_counter++;
3019
3020#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3021 if (dd->flags & QIB_DCA_ENABLED)
3022 qib_update_rhdrq_dca(rcd);
3023#endif
3024
3025 /* Clear the interrupt bit we expect to be set. */
3026 qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
3027 (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
3028
3029 qib_kreceive(rcd, NULL, &npkts);
3030 adjust_rcv_timeout(rcd, npkts);
3031
3032 return IRQ_HANDLED;
3033}
3034
3035/*
3036 * Dedicated Send buffer available interrupt handler.
3037 */
3038static irqreturn_t qib_7322bufavail(int irq, void *data)
3039{
3040 struct qib_devdata *dd = data;
3041
3042 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3043 /*
3044 * This return value is not great, but we do not want the
3045 * interrupt core code to remove our interrupt handler
3046 * because we don't appear to be handling an interrupt
3047 * during a chip reset.
3048 */
3049 return IRQ_HANDLED;
3050
3051 qib_stats.sps_ints++;
3052 if (dd->int_counter != (u32) -1)
3053 dd->int_counter++;
3054
3055 /* Clear the interrupt bit we expect to be set. */
3056 qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
3057
3058 /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
3059 if (dd->flags & QIB_INITTED)
3060 qib_ib_piobufavail(dd);
3061 else
3062 qib_wantpiobuf_7322_intr(dd, 0);
3063
3064 return IRQ_HANDLED;
3065}
3066
3067/*
3068 * Dedicated Send DMA interrupt handler.
3069 */
3070static irqreturn_t sdma_intr(int irq, void *data)
3071{
3072 struct qib_pportdata *ppd = data;
3073 struct qib_devdata *dd = ppd->dd;
3074
3075 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3076 /*
3077 * This return value is not great, but we do not want the
3078 * interrupt core code to remove our interrupt handler
3079 * because we don't appear to be handling an interrupt
3080 * during a chip reset.
3081 */
3082 return IRQ_HANDLED;
3083
3084 qib_stats.sps_ints++;
3085 if (dd->int_counter != (u32) -1)
3086 dd->int_counter++;
3087
3088#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3089 if (dd->flags & QIB_DCA_ENABLED)
3090 qib_update_sdma_dca(ppd);
3091#endif
3092
3093 /* Clear the interrupt bit we expect to be set. */
3094 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3095 INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
3096 qib_sdma_intr(ppd);
3097
3098 return IRQ_HANDLED;
3099}
3100
3101/*
3102 * Dedicated Send DMA idle interrupt handler.
3103 */
3104static irqreturn_t sdma_idle_intr(int irq, void *data)
3105{
3106 struct qib_pportdata *ppd = data;
3107 struct qib_devdata *dd = ppd->dd;
3108
3109 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3110 /*
3111 * This return value is not great, but we do not want the
3112 * interrupt core code to remove our interrupt handler
3113 * because we don't appear to be handling an interrupt
3114 * during a chip reset.
3115 */
3116 return IRQ_HANDLED;
3117
3118 qib_stats.sps_ints++;
3119 if (dd->int_counter != (u32) -1)
3120 dd->int_counter++;
3121
3122#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3123 if (dd->flags & QIB_DCA_ENABLED)
3124 qib_update_sdma_dca(ppd);
3125#endif
3126
3127 /* Clear the interrupt bit we expect to be set. */
3128 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3129 INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
3130 qib_sdma_intr(ppd);
3131
3132 return IRQ_HANDLED;
3133}
3134
3135/*
3136 * Dedicated Send DMA progress interrupt handler.
3137 */
3138static irqreturn_t sdma_progress_intr(int irq, void *data)
3139{
3140 struct qib_pportdata *ppd = data;
3141 struct qib_devdata *dd = ppd->dd;
3142
3143 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3144 /*
3145 * This return value is not great, but we do not want the
3146 * interrupt core code to remove our interrupt handler
3147 * because we don't appear to be handling an interrupt
3148 * during a chip reset.
3149 */
3150 return IRQ_HANDLED;
3151
3152 qib_stats.sps_ints++;
3153 if (dd->int_counter != (u32) -1)
3154 dd->int_counter++;
3155
3156#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3157 if (dd->flags & QIB_DCA_ENABLED)
3158 qib_update_sdma_dca(ppd);
3159#endif
3160
3161 /* Clear the interrupt bit we expect to be set. */
3162 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3163 INT_MASK_P(SDmaProgress, 1) :
3164 INT_MASK_P(SDmaProgress, 0));
3165 qib_sdma_intr(ppd);
3166
3167 return IRQ_HANDLED;
3168}
3169
3170/*
3171 * Dedicated Send DMA cleanup interrupt handler.
3172 */
3173static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3174{
3175 struct qib_pportdata *ppd = data;
3176 struct qib_devdata *dd = ppd->dd;
3177
3178 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3179 /*
3180 * This return value is not great, but we do not want the
3181 * interrupt core code to remove our interrupt handler
3182 * because we don't appear to be handling an interrupt
3183 * during a chip reset.
3184 */
3185 return IRQ_HANDLED;
3186
3187 qib_stats.sps_ints++;
3188 if (dd->int_counter != (u32) -1)
3189 dd->int_counter++;
3190
3191#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3192 if (dd->flags & QIB_DCA_ENABLED)
3193 qib_update_sdma_dca(ppd);
3194#endif
3195
3196 /* Clear the interrupt bit we expect to be set. */
3197 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3198 INT_MASK_PM(SDmaCleanupDone, 1) :
3199 INT_MASK_PM(SDmaCleanupDone, 0));
3200 qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3201
3202 return IRQ_HANDLED;
3203}
3204
3205/*
3206 * Set up our chip-specific interrupt handler.
3207 * The interrupt type has already been setup, so
3208 * we just need to do the registration and error checking.
3209 * If we are using MSIx interrupts, we may fall back to
3210 * INTx later, if the interrupt handler doesn't get called
3211 * within 1/2 second (see verify_interrupt()).
3212 */
3213static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3214{
3215 int ret, i, msixnum;
3216 u64 redirect[6];
3217 u64 mask;
3218
3219 if (!dd->num_pports)
3220 return;
3221
3222 if (clearpend) {
3223 /*
3224 * if not switching interrupt types, be sure interrupts are
3225 * disabled, and then clear anything pending at this point,
3226 * because we are starting clean.
3227 */
3228 qib_7322_set_intr_state(dd, 0);
3229
3230 /* clear the reset error, init error/hwerror mask */
3231 qib_7322_init_hwerrors(dd);
3232
3233 /* clear any interrupt bits that might be set */
3234 qib_write_kreg(dd, kr_intclear, ~0ULL);
3235
3236 /* make sure no pending MSIx intr, and clear diag reg */
3237 qib_write_kreg(dd, kr_intgranted, ~0ULL);
3238 qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3239 }
3240
3241 if (!dd->cspec->num_msix_entries) {
3242 /* Try to get INTx interrupt */
3243try_intx:
3244 if (!dd->pcidev->irq) {
3245 qib_dev_err(dd, "irq is 0, BIOS error? "
3246 "Interrupts won't work\n");
3247 goto bail;
3248 }
3249 ret = request_irq(dd->pcidev->irq, qib_7322intr,
3250 IRQF_SHARED, QIB_DRV_NAME, dd);
3251 if (ret) {
3252 qib_dev_err(dd, "Couldn't setup INTx "
3253 "interrupt (irq=%d): %d\n",
3254 dd->pcidev->irq, ret);
3255 goto bail;
3256 }
3257 dd->cspec->irq = dd->pcidev->irq;
3258 dd->cspec->main_int_mask = ~0ULL;
3259 goto bail;
3260 }
3261
3262 /* Try to get MSIx interrupts */
3263 memset(redirect, 0, sizeof redirect);
3264 mask = ~0ULL;
3265 msixnum = 0;
3266 for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3267 irq_handler_t handler;
3268 const char *name;
3269 void *arg;
3270 u64 val;
3271 int lsb, reg, sh;
3272
3273 if (i < ARRAY_SIZE(irq_table)) {
3274 if (irq_table[i].port) {
3275 /* skip if for a non-configured port */
3276 if (irq_table[i].port > dd->num_pports)
3277 continue;
3278 arg = dd->pport + irq_table[i].port - 1;
3279 } else
3280 arg = dd;
3281 lsb = irq_table[i].lsb;
3282 handler = irq_table[i].handler;
3283 name = irq_table[i].name;
3284 } else {
3285 unsigned ctxt;
3286
3287 ctxt = i - ARRAY_SIZE(irq_table);
3288 /* per krcvq context receive interrupt */
3289 arg = dd->rcd[ctxt];
3290 if (!arg)
3291 continue;
3292 lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3293 handler = qib_7322pintr;
3294 name = QIB_DRV_NAME " (kctx)";
3295 }
3296 ret = request_irq(dd->cspec->msix_entries[msixnum].vector,
3297 handler, 0, name, arg);
3298 if (ret) {
3299 /*
3300 * Shouldn't happen since the enable said we could
3301 * have as many as we are trying to setup here.
3302 */
3303 qib_dev_err(dd, "Couldn't setup MSIx "
3304 "interrupt (vec=%d, irq=%d): %d\n", msixnum,
3305 dd->cspec->msix_entries[msixnum].vector,
3306 ret);
3307 qib_7322_nomsix(dd);
3308 goto try_intx;
3309 }
3310 dd->cspec->msix_arg[msixnum] = arg;
3311 if (lsb >= 0) {
3312 reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3313 sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3314 SYM_LSB(IntRedirect0, vec1);
3315 mask &= ~(1ULL << lsb);
3316 redirect[reg] |= ((u64) msixnum) << sh;
3317 }
3318 val = qib_read_kreg64(dd, 2 * msixnum + 1 +
3319 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3320 msixnum++;
3321 }
3322 /* Initialize the vector mapping */
3323 for (i = 0; i < ARRAY_SIZE(redirect); i++)
3324 qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3325 dd->cspec->main_int_mask = mask;
3326bail:;
3327}
3328
3329/**
3330 * qib_7322_boardname - fill in the board name and note features
3331 * @dd: the qlogic_ib device
3332 *
3333 * info will be based on the board revision register
3334 */
3335static unsigned qib_7322_boardname(struct qib_devdata *dd)
3336{
3337 /* Will need enumeration of board-types here */
3338 char *n;
3339 u32 boardid, namelen;
3340 unsigned features = DUAL_PORT_CAP;
3341
3342 boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3343
3344 switch (boardid) {
3345 case 0:
3346 n = "InfiniPath_QLE7342_Emulation";
3347 break;
3348 case 1:
3349 n = "InfiniPath_QLE7340";
3350 dd->flags |= QIB_HAS_QSFP;
3351 features = PORT_SPD_CAP;
3352 break;
3353 case 2:
3354 n = "InfiniPath_QLE7342";
3355 dd->flags |= QIB_HAS_QSFP;
3356 break;
3357 case 3:
3358 n = "InfiniPath_QMI7342";
3359 break;
3360 case 4:
3361 n = "InfiniPath_Unsupported7342";
3362 qib_dev_err(dd, "Unsupported version of QMH7342\n");
3363 features = 0;
3364 break;
3365 case BOARD_QMH7342:
3366 n = "InfiniPath_QMH7342";
3367 features = 0x24;
3368 break;
3369 case BOARD_QME7342:
3370 n = "InfiniPath_QME7342";
3371 break;
3372 case 15:
3373 n = "InfiniPath_QLE7342_TEST";
3374 dd->flags |= QIB_HAS_QSFP;
3375 break;
3376 default:
3377 n = "InfiniPath_QLE73xy_UNKNOWN";
3378 qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3379 break;
3380 }
3381 dd->board_atten = 1; /* index into txdds_Xdr */
3382
3383 namelen = strlen(n) + 1;
3384 dd->boardname = kmalloc(namelen, GFP_KERNEL);
3385 if (!dd->boardname)
3386 qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
3387 else
3388 snprintf(dd->boardname, namelen, "%s", n);
3389
3390 snprintf(dd->boardversion, sizeof(dd->boardversion),
3391 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3392 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3393 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
3394 dd->majrev, dd->minrev,
3395 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
3396
3397 if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3398 qib_devinfo(dd->pcidev, "IB%u: Forced to single port mode"
3399 " by module parameter\n", dd->unit);
3400 features &= PORT_SPD_CAP;
3401 }
3402
3403 return features;
3404}
3405
3406/*
3407 * This routine sleeps, so it can only be called from user context, not
3408 * from interrupt context.
3409 */
3410static int qib_do_7322_reset(struct qib_devdata *dd)
3411{
3412 u64 val;
3413 u64 *msix_vecsave;
3414 int i, msix_entries, ret = 1;
3415 u16 cmdval;
3416 u8 int_line, clinesz;
3417 unsigned long flags;
3418
3419 /* Use dev_err so it shows up in logs, etc. */
3420 qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3421
3422 qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3423
3424 msix_entries = dd->cspec->num_msix_entries;
3425
3426 /* no interrupts till re-initted */
3427 qib_7322_set_intr_state(dd, 0);
3428
3429 if (msix_entries) {
3430 qib_7322_nomsix(dd);
3431 /* can be up to 512 bytes, too big for stack */
3432 msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
3433 sizeof(u64), GFP_KERNEL);
3434 if (!msix_vecsave)
3435 qib_dev_err(dd, "No mem to save MSIx data\n");
3436 } else
3437 msix_vecsave = NULL;
3438
3439 /*
3440 * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3441 * info that is set up by the BIOS, so we have to save and restore
3442 * it ourselves. There is some risk something could change it,
3443 * after we save it, but since we have disabled the MSIx, it
3444 * shouldn't be touched...
3445 */
3446 for (i = 0; i < msix_entries; i++) {
3447 u64 vecaddr, vecdata;
3448 vecaddr = qib_read_kreg64(dd, 2 * i +
3449 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3450 vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3451 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3452 if (msix_vecsave) {
3453 msix_vecsave[2 * i] = vecaddr;
3454 /* save it without the masked bit set */
3455 msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3456 }
3457 }
3458
3459 dd->pport->cpspec->ibdeltainprog = 0;
3460 dd->pport->cpspec->ibsymdelta = 0;
3461 dd->pport->cpspec->iblnkerrdelta = 0;
3462 dd->pport->cpspec->ibmalfdelta = 0;
3463 dd->int_counter = 0; /* so we check interrupts work again */
3464
3465 /*
3466 * Keep chip from being accessed until we are ready. Use
3467 * writeq() directly, to allow the write even though QIB_PRESENT
3468 * isnt' set.
3469 */
3470 dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3471 dd->flags |= QIB_DOING_RESET;
3472 val = dd->control | QLOGIC_IB_C_RESET;
3473 writeq(val, &dd->kregbase[kr_control]);
3474
3475 for (i = 1; i <= 5; i++) {
3476 /*
3477 * Allow MBIST, etc. to complete; longer on each retry.
3478 * We sometimes get machine checks from bus timeout if no
3479 * response, so for now, make it *really* long.
3480 */
3481 msleep(1000 + (1 + i) * 3000);
3482
3483 qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3484
3485 /*
3486 * Use readq directly, so we don't need to mark it as PRESENT
3487 * until we get a successful indication that all is well.
3488 */
3489 val = readq(&dd->kregbase[kr_revision]);
3490 if (val == dd->revision)
3491 break;
3492 if (i == 5) {
3493 qib_dev_err(dd, "Failed to initialize after reset, "
3494 "unusable\n");
3495 ret = 0;
3496 goto bail;
3497 }
3498 }
3499
3500 dd->flags |= QIB_PRESENT; /* it's back */
3501
3502 if (msix_entries) {
3503 /* restore the MSIx vector address and data if saved above */
3504 for (i = 0; i < msix_entries; i++) {
3505 dd->cspec->msix_entries[i].entry = i;
3506 if (!msix_vecsave || !msix_vecsave[2 * i])
3507 continue;
3508 qib_write_kreg(dd, 2 * i +
3509 (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3510 msix_vecsave[2 * i]);
3511 qib_write_kreg(dd, 1 + 2 * i +
3512 (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3513 msix_vecsave[1 + 2 * i]);
3514 }
3515 }
3516
3517 /* initialize the remaining registers. */
3518 for (i = 0; i < dd->num_pports; ++i)
3519 write_7322_init_portregs(&dd->pport[i]);
3520 write_7322_initregs(dd);
3521
3522 if (qib_pcie_params(dd, dd->lbus_width,
3523 &dd->cspec->num_msix_entries,
3524 dd->cspec->msix_entries))
3525 qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; "
3526 "continuing anyway\n");
3527
3528 qib_setup_7322_interrupt(dd, 1);
3529
3530 for (i = 0; i < dd->num_pports; ++i) {
3531 struct qib_pportdata *ppd = &dd->pport[i];
3532
3533 spin_lock_irqsave(&ppd->lflags_lock, flags);
3534 ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3535 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3536 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3537 }
3538
3539bail:
3540 dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3541 kfree(msix_vecsave);
3542 return ret;
3543}
3544
3545/**
3546 * qib_7322_put_tid - write a TID to the chip
3547 * @dd: the qlogic_ib device
3548 * @tidptr: pointer to the expected TID (in chip) to update
3549 * @tidtype: 0 for eager, 1 for expected
3550 * @pa: physical address of in memory buffer; tidinvalid if freeing
3551 */
3552static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3553 u32 type, unsigned long pa)
3554{
3555 if (!(dd->flags & QIB_PRESENT))
3556 return;
3557 if (pa != dd->tidinvalid) {
3558 u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3559
3560 /* paranoia checks */
3561 if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3562 qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3563 pa);
3564 return;
3565 }
3566 if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3567 qib_dev_err(dd, "Physical page address 0x%lx "
3568 "larger than supported\n", pa);
3569 return;
3570 }
3571
3572 if (type == RCVHQ_RCV_TYPE_EAGER)
3573 chippa |= dd->tidtemplate;
3574 else /* for now, always full 4KB page */
3575 chippa |= IBA7322_TID_SZ_4K;
3576 pa = chippa;
3577 }
3578 writeq(pa, tidptr);
3579 mmiowb();
3580}
3581
3582/**
3583 * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3584 * @dd: the qlogic_ib device
3585 * @ctxt: the ctxt
3586 *
3587 * clear all TID entries for a ctxt, expected and eager.
3588 * Used from qib_close().
3589 */
3590static void qib_7322_clear_tids(struct qib_devdata *dd,
3591 struct qib_ctxtdata *rcd)
3592{
3593 u64 __iomem *tidbase;
3594 unsigned long tidinv;
3595 u32 ctxt;
3596 int i;
3597
3598 if (!dd->kregbase || !rcd)
3599 return;
3600
3601 ctxt = rcd->ctxt;
3602
3603 tidinv = dd->tidinvalid;
3604 tidbase = (u64 __iomem *)
3605 ((char __iomem *) dd->kregbase +
3606 dd->rcvtidbase +
3607 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3608
3609 for (i = 0; i < dd->rcvtidcnt; i++)
3610 qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3611 tidinv);
3612
3613 tidbase = (u64 __iomem *)
3614 ((char __iomem *) dd->kregbase +
3615 dd->rcvegrbase +
3616 rcd->rcvegr_tid_base * sizeof(*tidbase));
3617
3618 for (i = 0; i < rcd->rcvegrcnt; i++)
3619 qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3620 tidinv);
3621}
3622
3623/**
3624 * qib_7322_tidtemplate - setup constants for TID updates
3625 * @dd: the qlogic_ib device
3626 *
3627 * We setup stuff that we use a lot, to avoid calculating each time
3628 */
3629static void qib_7322_tidtemplate(struct qib_devdata *dd)
3630{
3631 /*
3632 * For now, we always allocate 4KB buffers (at init) so we can
3633 * receive max size packets. We may want a module parameter to
3634 * specify 2KB or 4KB and/or make it per port instead of per device
3635 * for those who want to reduce memory footprint. Note that the
3636 * rcvhdrentsize size must be large enough to hold the largest
3637 * IB header (currently 96 bytes) that we expect to handle (plus of
3638 * course the 2 dwords of RHF).
3639 */
3640 if (dd->rcvegrbufsize == 2048)
3641 dd->tidtemplate = IBA7322_TID_SZ_2K;
3642 else if (dd->rcvegrbufsize == 4096)
3643 dd->tidtemplate = IBA7322_TID_SZ_4K;
3644 dd->tidinvalid = 0;
3645}
3646
3647/**
3648 * qib_init_7322_get_base_info - set chip-specific flags for user code
3649 * @rcd: the qlogic_ib ctxt
3650 * @kbase: qib_base_info pointer
3651 *
3652 * We set the PCIE flag because the lower bandwidth on PCIe vs
3653 * HyperTransport can affect some user packet algorithims.
3654 */
3655
3656static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3657 struct qib_base_info *kinfo)
3658{
3659 kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3660 QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3661 QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3662 if (rcd->dd->cspec->r1)
3663 kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3664 if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3665 kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3666
3667 return 0;
3668}
3669
3670static struct qib_message_header *
3671qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3672{
3673 u32 offset = qib_hdrget_offset(rhf_addr);
3674
3675 return (struct qib_message_header *)
3676 (rhf_addr - dd->rhf_offset + offset);
3677}
3678
3679/*
3680 * Configure number of contexts.
3681 */
3682static void qib_7322_config_ctxts(struct qib_devdata *dd)
3683{
3684 unsigned long flags;
3685 u32 nchipctxts;
3686
3687 nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3688 dd->cspec->numctxts = nchipctxts;
3689 if (qib_n_krcv_queues > 1 && dd->num_pports) {
3690 /*
3691 * Set the mask for which bits from the QPN are used
3692 * to select a context number.
3693 */
3694 dd->qpn_mask = 0x3f;
3695 dd->first_user_ctxt = NUM_IB_PORTS +
3696 (qib_n_krcv_queues - 1) * dd->num_pports;
3697 if (dd->first_user_ctxt > nchipctxts)
3698 dd->first_user_ctxt = nchipctxts;
3699 dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3700 } else {
3701 dd->first_user_ctxt = NUM_IB_PORTS;
3702 dd->n_krcv_queues = 1;
3703 }
3704
3705 if (!qib_cfgctxts) {
3706 int nctxts = dd->first_user_ctxt + num_online_cpus();
3707
3708 if (nctxts <= 6)
3709 dd->ctxtcnt = 6;
3710 else if (nctxts <= 10)
3711 dd->ctxtcnt = 10;
3712 else if (nctxts <= nchipctxts)
3713 dd->ctxtcnt = nchipctxts;
3714 } else if (qib_cfgctxts < dd->num_pports)
3715 dd->ctxtcnt = dd->num_pports;
3716 else if (qib_cfgctxts <= nchipctxts)
3717 dd->ctxtcnt = qib_cfgctxts;
3718 if (!dd->ctxtcnt) /* none of the above, set to max */
3719 dd->ctxtcnt = nchipctxts;
3720
3721 /*
3722 * Chip can be configured for 6, 10, or 18 ctxts, and choice
3723 * affects number of eager TIDs per ctxt (1K, 2K, 4K).
3724 * Lock to be paranoid about later motion, etc.
3725 */
3726 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3727 if (dd->ctxtcnt > 10)
3728 dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3729 else if (dd->ctxtcnt > 6)
3730 dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3731 /* else configure for default 6 receive ctxts */
3732
3733 /* The XRC opcode is 5. */
3734 dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3735
3736 /*
3737 * RcvCtrl *must* be written here so that the
3738 * chip understands how to change rcvegrcnt below.
3739 */
3740 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3741 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3742
3743 /* kr_rcvegrcnt changes based on the number of contexts enabled */
3744 dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3745 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
3746 dd->num_pports > 1 ? 1024U : 2048U);
3747}
3748
3749static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3750{
3751
3752 int lsb, ret = 0;
3753 u64 maskr; /* right-justified mask */
3754
3755 switch (which) {
3756
3757 case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
3758 ret = ppd->link_width_enabled;
3759 goto done;
3760
3761 case QIB_IB_CFG_LWID: /* Get currently active Link-width */
3762 ret = ppd->link_width_active;
3763 goto done;
3764
3765 case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
3766 ret = ppd->link_speed_enabled;
3767 goto done;
3768
3769 case QIB_IB_CFG_SPD: /* Get current Link spd */
3770 ret = ppd->link_speed_active;
3771 goto done;
3772
3773 case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
3774 lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3775 maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3776 break;
3777
3778 case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
3779 lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3780 maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3781 break;
3782
3783 case QIB_IB_CFG_LINKLATENCY:
3784 ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
3785 SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
3786 goto done;
3787
3788 case QIB_IB_CFG_OP_VLS:
3789 ret = ppd->vls_operational;
3790 goto done;
3791
3792 case QIB_IB_CFG_VL_HIGH_CAP:
3793 ret = 16;
3794 goto done;
3795
3796 case QIB_IB_CFG_VL_LOW_CAP:
3797 ret = 16;
3798 goto done;
3799
3800 case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
3801 ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3802 OverrunThreshold);
3803 goto done;
3804
3805 case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
3806 ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3807 PhyerrThreshold);
3808 goto done;
3809
3810 case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
3811 /* will only take effect when the link state changes */
3812 ret = (ppd->cpspec->ibcctrl_a &
3813 SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
3814 IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
3815 goto done;
3816
3817 case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
3818 lsb = IBA7322_IBC_HRTBT_LSB;
3819 maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
3820 break;
3821
3822 case QIB_IB_CFG_PMA_TICKS:
3823 /*
3824 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
3825 * Since the clock is always 250MHz, the value is 3, 1 or 0.
3826 */
3827 if (ppd->link_speed_active == QIB_IB_QDR)
3828 ret = 3;
3829 else if (ppd->link_speed_active == QIB_IB_DDR)
3830 ret = 1;
3831 else
3832 ret = 0;
3833 goto done;
3834
3835 default:
3836 ret = -EINVAL;
3837 goto done;
3838 }
3839 ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
3840done:
3841 return ret;
3842}
3843
3844/*
3845 * Below again cribbed liberally from older version. Do not lean
3846 * heavily on it.
3847 */
3848#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
3849#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
3850 | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
3851
3852static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
3853{
3854 struct qib_devdata *dd = ppd->dd;
3855 u64 maskr; /* right-justified mask */
3856 int lsb, ret = 0;
3857 u16 lcmd, licmd;
3858 unsigned long flags;
3859
3860 switch (which) {
3861 case QIB_IB_CFG_LIDLMC:
3862 /*
3863 * Set LID and LMC. Combined to avoid possible hazard
3864 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
3865 */
3866 lsb = IBA7322_IBC_DLIDLMC_SHIFT;
3867 maskr = IBA7322_IBC_DLIDLMC_MASK;
3868 /*
3869 * For header-checking, the SLID in the packet will
3870 * be masked with SendIBSLMCMask, and compared
3871 * with SendIBSLIDAssignMask. Make sure we do not
3872 * set any bits not covered by the mask, or we get
3873 * false-positives.
3874 */
3875 qib_write_kreg_port(ppd, krp_sendslid,
3876 val & (val >> 16) & SendIBSLIDAssignMask);
3877 qib_write_kreg_port(ppd, krp_sendslidmask,
3878 (val >> 16) & SendIBSLMCMask);
3879 break;
3880
3881 case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
3882 ppd->link_width_enabled = val;
3883 /* convert IB value to chip register value */
3884 if (val == IB_WIDTH_1X)
3885 val = 0;
3886 else if (val == IB_WIDTH_4X)
3887 val = 1;
3888 else
3889 val = 3;
3890 maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
3891 lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
3892 break;
3893
3894 case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
3895 /*
3896 * As with width, only write the actual register if the
3897 * link is currently down, otherwise takes effect on next
3898 * link change. Since setting is being explictly requested
3899 * (via MAD or sysfs), clear autoneg failure status if speed
3900 * autoneg is enabled.
3901 */
3902 ppd->link_speed_enabled = val;
3903 val <<= IBA7322_IBC_SPEED_LSB;
3904 maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
3905 IBA7322_IBC_MAX_SPEED_MASK;
3906 if (val & (val - 1)) {
3907 /* Muliple speeds enabled */
3908 val |= IBA7322_IBC_IBTA_1_2_MASK |
3909 IBA7322_IBC_MAX_SPEED_MASK;
3910 spin_lock_irqsave(&ppd->lflags_lock, flags);
3911 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3912 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3913 } else if (val & IBA7322_IBC_SPEED_QDR)
3914 val |= IBA7322_IBC_IBTA_1_2_MASK;
3915 /* IBTA 1.2 mode + min/max + speed bits are contiguous */
3916 lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
3917 break;
3918
3919 case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
3920 lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3921 maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3922 break;
3923
3924 case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
3925 lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3926 maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3927 break;
3928
3929 case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
3930 maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3931 OverrunThreshold);
3932 if (maskr != val) {
3933 ppd->cpspec->ibcctrl_a &=
3934 ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
3935 ppd->cpspec->ibcctrl_a |= (u64) val <<
3936 SYM_LSB(IBCCtrlA_0, OverrunThreshold);
3937 qib_write_kreg_port(ppd, krp_ibcctrl_a,
3938 ppd->cpspec->ibcctrl_a);
3939 qib_write_kreg(dd, kr_scratch, 0ULL);
3940 }
3941 goto bail;
3942
3943 case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
3944 maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3945 PhyerrThreshold);
3946 if (maskr != val) {
3947 ppd->cpspec->ibcctrl_a &=
3948 ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
3949 ppd->cpspec->ibcctrl_a |= (u64) val <<
3950 SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
3951 qib_write_kreg_port(ppd, krp_ibcctrl_a,
3952 ppd->cpspec->ibcctrl_a);
3953 qib_write_kreg(dd, kr_scratch, 0ULL);
3954 }
3955 goto bail;
3956
3957 case QIB_IB_CFG_PKEYS: /* update pkeys */
3958 maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
3959 ((u64) ppd->pkeys[2] << 32) |
3960 ((u64) ppd->pkeys[3] << 48);
3961 qib_write_kreg_port(ppd, krp_partitionkey, maskr);
3962 goto bail;
3963
3964 case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
3965 /* will only take effect when the link state changes */
3966 if (val == IB_LINKINITCMD_POLL)
3967 ppd->cpspec->ibcctrl_a &=
3968 ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
3969 else /* SLEEP */
3970 ppd->cpspec->ibcctrl_a |=
3971 SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
3972 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
3973 qib_write_kreg(dd, kr_scratch, 0ULL);
3974 goto bail;
3975
3976 case QIB_IB_CFG_MTU: /* update the MTU in IBC */
3977 /*
3978 * Update our housekeeping variables, and set IBC max
3979 * size, same as init code; max IBC is max we allow in
3980 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
3981 * Set even if it's unchanged, print debug message only
3982 * on changes.
3983 */
3984 val = (ppd->ibmaxlen >> 2) + 1;
3985 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
3986 ppd->cpspec->ibcctrl_a |= (u64)val <<
3987 SYM_LSB(IBCCtrlA_0, MaxPktLen);
3988 qib_write_kreg_port(ppd, krp_ibcctrl_a,
3989 ppd->cpspec->ibcctrl_a);
3990 qib_write_kreg(dd, kr_scratch, 0ULL);
3991 goto bail;
3992
3993 case QIB_IB_CFG_LSTATE: /* set the IB link state */
3994 switch (val & 0xffff0000) {
3995 case IB_LINKCMD_DOWN:
3996 lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
3997 ppd->cpspec->ibmalfusesnap = 1;
3998 ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
3999 crp_errlink);
4000 if (!ppd->cpspec->ibdeltainprog &&
4001 qib_compat_ddr_negotiate) {
4002 ppd->cpspec->ibdeltainprog = 1;
4003 ppd->cpspec->ibsymsnap =
4004 read_7322_creg32_port(ppd,
4005 crp_ibsymbolerr);
4006 ppd->cpspec->iblnkerrsnap =
4007 read_7322_creg32_port(ppd,
4008 crp_iblinkerrrecov);
4009 }
4010 break;
4011
4012 case IB_LINKCMD_ARMED:
4013 lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
4014 if (ppd->cpspec->ibmalfusesnap) {
4015 ppd->cpspec->ibmalfusesnap = 0;
4016 ppd->cpspec->ibmalfdelta +=
4017 read_7322_creg32_port(ppd,
4018 crp_errlink) -
4019 ppd->cpspec->ibmalfsnap;
4020 }
4021 break;
4022
4023 case IB_LINKCMD_ACTIVE:
4024 lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
4025 break;
4026
4027 default:
4028 ret = -EINVAL;
4029 qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
4030 goto bail;
4031 }
4032 switch (val & 0xffff) {
4033 case IB_LINKINITCMD_NOP:
4034 licmd = 0;
4035 break;
4036
4037 case IB_LINKINITCMD_POLL:
4038 licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
4039 break;
4040
4041 case IB_LINKINITCMD_SLEEP:
4042 licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
4043 break;
4044
4045 case IB_LINKINITCMD_DISABLE:
4046 licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
4047 ppd->cpspec->chase_end = 0;
4048 /*
4049 * stop state chase counter and timer, if running.
4050 * wait forpending timer, but don't clear .data (ppd)!
4051 */
4052 if (ppd->cpspec->chase_timer.expires) {
4053 del_timer_sync(&ppd->cpspec->chase_timer);
4054 ppd->cpspec->chase_timer.expires = 0;
4055 }
4056 break;
4057
4058 default:
4059 ret = -EINVAL;
4060 qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
4061 val & 0xffff);
4062 goto bail;
4063 }
4064 qib_set_ib_7322_lstate(ppd, lcmd, licmd);
4065 goto bail;
4066
4067 case QIB_IB_CFG_OP_VLS:
4068 if (ppd->vls_operational != val) {
4069 ppd->vls_operational = val;
4070 set_vls(ppd);
4071 }
4072 goto bail;
4073
4074 case QIB_IB_CFG_VL_HIGH_LIMIT:
4075 qib_write_kreg_port(ppd, krp_highprio_limit, val);
4076 goto bail;
4077
4078 case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
4079 if (val > 3) {
4080 ret = -EINVAL;
4081 goto bail;
4082 }
4083 lsb = IBA7322_IBC_HRTBT_LSB;
4084 maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4085 break;
4086
4087 case QIB_IB_CFG_PORT:
4088 /* val is the port number of the switch we are connected to. */
4089 if (ppd->dd->cspec->r1) {
4090 cancel_delayed_work(&ppd->cpspec->ipg_work);
4091 ppd->cpspec->ipg_tries = 0;
4092 }
4093 goto bail;
4094
4095 default:
4096 ret = -EINVAL;
4097 goto bail;
4098 }
4099 ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
4100 ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
4101 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4102 qib_write_kreg(dd, kr_scratch, 0);
4103bail:
4104 return ret;
4105}
4106
4107static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
4108{
4109 int ret = 0;
4110 u64 val, ctrlb;
4111
4112 /* only IBC loopback, may add serdes and xgxs loopbacks later */
4113 if (!strncmp(what, "ibc", 3)) {
4114 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
4115 Loopback);
4116 val = 0; /* disable heart beat, so link will come up */
4117 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
4118 ppd->dd->unit, ppd->port);
4119 } else if (!strncmp(what, "off", 3)) {
4120 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
4121 Loopback);
4122 /* enable heart beat again */
4123 val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
4124 qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
4125 "(normal)\n", ppd->dd->unit, ppd->port);
4126 } else
4127 ret = -EINVAL;
4128 if (!ret) {
4129 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4130 ppd->cpspec->ibcctrl_a);
4131 ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
4132 << IBA7322_IBC_HRTBT_LSB);
4133 ppd->cpspec->ibcctrl_b = ctrlb | val;
4134 qib_write_kreg_port(ppd, krp_ibcctrl_b,
4135 ppd->cpspec->ibcctrl_b);
4136 qib_write_kreg(ppd->dd, kr_scratch, 0);
4137 }
4138 return ret;
4139}
4140
4141static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4142 struct ib_vl_weight_elem *vl)
4143{
4144 unsigned i;
4145
4146 for (i = 0; i < 16; i++, regno++, vl++) {
4147 u32 val = qib_read_kreg_port(ppd, regno);
4148
4149 vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
4150 SYM_RMASK(LowPriority0_0, VirtualLane);
4151 vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
4152 SYM_RMASK(LowPriority0_0, Weight);
4153 }
4154}
4155
4156static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4157 struct ib_vl_weight_elem *vl)
4158{
4159 unsigned i;
4160
4161 for (i = 0; i < 16; i++, regno++, vl++) {
4162 u64 val;
4163
4164 val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4165 SYM_LSB(LowPriority0_0, VirtualLane)) |
4166 ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
4167 SYM_LSB(LowPriority0_0, Weight));
4168 qib_write_kreg_port(ppd, regno, val);
4169 }
4170 if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4171 struct qib_devdata *dd = ppd->dd;
4172 unsigned long flags;
4173
4174 spin_lock_irqsave(&dd->sendctrl_lock, flags);
4175 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4176 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4177 qib_write_kreg(dd, kr_scratch, 0);
4178 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4179 }
4180}
4181
4182static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4183{
4184 switch (which) {
4185 case QIB_IB_TBL_VL_HIGH_ARB:
4186 get_vl_weights(ppd, krp_highprio_0, t);
4187 break;
4188
4189 case QIB_IB_TBL_VL_LOW_ARB:
4190 get_vl_weights(ppd, krp_lowprio_0, t);
4191 break;
4192
4193 default:
4194 return -EINVAL;
4195 }
4196 return 0;
4197}
4198
4199static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4200{
4201 switch (which) {
4202 case QIB_IB_TBL_VL_HIGH_ARB:
4203 set_vl_weights(ppd, krp_highprio_0, t);
4204 break;
4205
4206 case QIB_IB_TBL_VL_LOW_ARB:
4207 set_vl_weights(ppd, krp_lowprio_0, t);
4208 break;
4209
4210 default:
4211 return -EINVAL;
4212 }
4213 return 0;
4214}
4215
4216static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4217 u32 updegr, u32 egrhd)
4218{
4219 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4220 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4221 if (updegr)
4222 qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4223}
4224
4225static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4226{
4227 u32 head, tail;
4228
4229 head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4230 if (rcd->rcvhdrtail_kvaddr)
4231 tail = qib_get_rcvhdrtail(rcd);
4232 else
4233 tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4234 return head == tail;
4235}
4236
4237#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4238 QIB_RCVCTRL_CTXT_DIS | \
4239 QIB_RCVCTRL_TIDFLOW_ENB | \
4240 QIB_RCVCTRL_TIDFLOW_DIS | \
4241 QIB_RCVCTRL_TAILUPD_ENB | \
4242 QIB_RCVCTRL_TAILUPD_DIS | \
4243 QIB_RCVCTRL_INTRAVAIL_ENB | \
4244 QIB_RCVCTRL_INTRAVAIL_DIS | \
4245 QIB_RCVCTRL_BP_ENB | \
4246 QIB_RCVCTRL_BP_DIS)
4247
4248#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4249 QIB_RCVCTRL_CTXT_DIS | \
4250 QIB_RCVCTRL_PKEY_DIS | \
4251 QIB_RCVCTRL_PKEY_ENB)
4252
4253/*
4254 * Modify the RCVCTRL register in chip-specific way. This
4255 * is a function because bit positions and (future) register
4256 * location is chip-specifc, but the needed operations are
4257 * generic. <op> is a bit-mask because we often want to
4258 * do multiple modifications.
4259 */
4260static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4261 int ctxt)
4262{
4263 struct qib_devdata *dd = ppd->dd;
4264 struct qib_ctxtdata *rcd;
4265 u64 mask, val;
4266 unsigned long flags;
4267
4268 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4269
4270 if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4271 dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4272 if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4273 dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4274 if (op & QIB_RCVCTRL_TAILUPD_ENB)
4275 dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4276 if (op & QIB_RCVCTRL_TAILUPD_DIS)
4277 dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4278 if (op & QIB_RCVCTRL_PKEY_ENB)
4279 ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4280 if (op & QIB_RCVCTRL_PKEY_DIS)
4281 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4282 if (ctxt < 0) {
4283 mask = (1ULL << dd->ctxtcnt) - 1;
4284 rcd = NULL;
4285 } else {
4286 mask = (1ULL << ctxt);
4287 rcd = dd->rcd[ctxt];
4288 }
4289 if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4290 ppd->p_rcvctrl |=
4291 (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4292 if (!(dd->flags & QIB_NODMA_RTAIL)) {
4293 op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4294 dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4295 }
4296 /* Write these registers before the context is enabled. */
4297 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4298 rcd->rcvhdrqtailaddr_phys);
4299 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4300 rcd->rcvhdrq_phys);
4301 rcd->seq_cnt = 1;
4302#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
4303 if (dd->flags & QIB_DCA_ENABLED)
4304 qib_update_rhdrq_dca(rcd);
4305#endif
4306 }
4307 if (op & QIB_RCVCTRL_CTXT_DIS)
4308 ppd->p_rcvctrl &=
4309 ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4310 if (op & QIB_RCVCTRL_BP_ENB)
4311 dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4312 if (op & QIB_RCVCTRL_BP_DIS)
4313 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4314 if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4315 dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4316 if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4317 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4318 /*
4319 * Decide which registers to write depending on the ops enabled.
4320 * Special case is "flush" (no bits set at all)
4321 * which needs to write both.
4322 */
4323 if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4324 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4325 if (op == 0 || (op & RCVCTRL_PORT_MODS))
4326 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4327 if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4328 /*
4329 * Init the context registers also; if we were
4330 * disabled, tail and head should both be zero
4331 * already from the enable, but since we don't
4332 * know, we have to do it explictly.
4333 */
4334 val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4335 qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4336
4337 /* be sure enabling write seen; hd/tl should be 0 */
4338 (void) qib_read_kreg32(dd, kr_scratch);
4339 val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4340 dd->rcd[ctxt]->head = val;
4341 /* If kctxt, interrupt on next receive. */
4342 if (ctxt < dd->first_user_ctxt)
4343 val |= dd->rhdrhead_intr_off;
4344 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4345 } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4346 dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4347 /* arm rcv interrupt */
4348 val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4349 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4350 }
4351 if (op & QIB_RCVCTRL_CTXT_DIS) {
4352 unsigned f;
4353
4354 /* Now that the context is disabled, clear these registers. */
4355 if (ctxt >= 0) {
4356 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4357 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4358 for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4359 qib_write_ureg(dd, ur_rcvflowtable + f,
4360 TIDFLOW_ERRBITS, ctxt);
4361 } else {
4362 unsigned i;
4363
4364 for (i = 0; i < dd->cfgctxts; i++) {
4365 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4366 i, 0);
4367 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4368 for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4369 qib_write_ureg(dd, ur_rcvflowtable + f,
4370 TIDFLOW_ERRBITS, i);
4371 }
4372 }
4373 }
4374 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4375}
4376
4377/*
4378 * Modify the SENDCTRL register in chip-specific way. This
4379 * is a function where there are multiple such registers with
4380 * slightly different layouts.
4381 * The chip doesn't allow back-to-back sendctrl writes, so write
4382 * the scratch register after writing sendctrl.
4383 *
4384 * Which register is written depends on the operation.
4385 * Most operate on the common register, while
4386 * SEND_ENB and SEND_DIS operate on the per-port ones.
4387 * SEND_ENB is included in common because it can change SPCL_TRIG
4388 */
4389#define SENDCTRL_COMMON_MODS (\
4390 QIB_SENDCTRL_CLEAR | \
4391 QIB_SENDCTRL_AVAIL_DIS | \
4392 QIB_SENDCTRL_AVAIL_ENB | \
4393 QIB_SENDCTRL_AVAIL_BLIP | \
4394 QIB_SENDCTRL_DISARM | \
4395 QIB_SENDCTRL_DISARM_ALL | \
4396 QIB_SENDCTRL_SEND_ENB)
4397
4398#define SENDCTRL_PORT_MODS (\
4399 QIB_SENDCTRL_CLEAR | \
4400 QIB_SENDCTRL_SEND_ENB | \
4401 QIB_SENDCTRL_SEND_DIS | \
4402 QIB_SENDCTRL_FLUSH)
4403
4404static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4405{
4406 struct qib_devdata *dd = ppd->dd;
4407 u64 tmp_dd_sendctrl;
4408 unsigned long flags;
4409
4410 spin_lock_irqsave(&dd->sendctrl_lock, flags);
4411
4412 /* First the dd ones that are "sticky", saved in shadow */
4413 if (op & QIB_SENDCTRL_CLEAR)
4414 dd->sendctrl = 0;
4415 if (op & QIB_SENDCTRL_AVAIL_DIS)
4416 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4417 else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4418 dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4419 if (dd->flags & QIB_USE_SPCL_TRIG)
4420 dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4421 }
4422
4423 /* Then the ppd ones that are "sticky", saved in shadow */
4424 if (op & QIB_SENDCTRL_SEND_DIS)
4425 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4426 else if (op & QIB_SENDCTRL_SEND_ENB)
4427 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4428
4429 if (op & QIB_SENDCTRL_DISARM_ALL) {
4430 u32 i, last;
4431
4432 tmp_dd_sendctrl = dd->sendctrl;
4433 last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4434 /*
4435 * Disarm any buffers that are not yet launched,
4436 * disabling updates until done.
4437 */
4438 tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4439 for (i = 0; i < last; i++) {
4440 qib_write_kreg(dd, kr_sendctrl,
4441 tmp_dd_sendctrl |
4442 SYM_MASK(SendCtrl, Disarm) | i);
4443 qib_write_kreg(dd, kr_scratch, 0);
4444 }
4445 }
4446
4447 if (op & QIB_SENDCTRL_FLUSH) {
4448 u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4449
4450 /*
4451 * Now drain all the fifos. The Abort bit should never be
4452 * needed, so for now, at least, we don't use it.
4453 */
4454 tmp_ppd_sendctrl |=
4455 SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4456 SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4457 SYM_MASK(SendCtrl_0, TxeBypassIbc);
4458 qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4459 qib_write_kreg(dd, kr_scratch, 0);
4460 }
4461
4462 tmp_dd_sendctrl = dd->sendctrl;
4463
4464 if (op & QIB_SENDCTRL_DISARM)
4465 tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4466 ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4467 SYM_LSB(SendCtrl, DisarmSendBuf));
4468 if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4469 (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4470 tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4471
4472 if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4473 qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4474 qib_write_kreg(dd, kr_scratch, 0);
4475 }
4476
4477 if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4478 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4479 qib_write_kreg(dd, kr_scratch, 0);
4480 }
4481
4482 if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4483 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4484 qib_write_kreg(dd, kr_scratch, 0);
4485 }
4486
4487 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4488
4489 if (op & QIB_SENDCTRL_FLUSH) {
4490 u32 v;
4491 /*
4492 * ensure writes have hit chip, then do a few
4493 * more reads, to allow DMA of pioavail registers
4494 * to occur, so in-memory copy is in sync with
4495 * the chip. Not always safe to sleep.
4496 */
4497 v = qib_read_kreg32(dd, kr_scratch);
4498 qib_write_kreg(dd, kr_scratch, v);
4499 v = qib_read_kreg32(dd, kr_scratch);
4500 qib_write_kreg(dd, kr_scratch, v);
4501 qib_read_kreg32(dd, kr_scratch);
4502 }
4503}
4504
4505#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4506#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4507#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4508
4509/**
4510 * qib_portcntr_7322 - read a per-port chip counter
4511 * @ppd: the qlogic_ib pport
4512 * @creg: the counter to read (not a chip offset)
4513 */
4514static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4515{
4516 struct qib_devdata *dd = ppd->dd;
4517 u64 ret = 0ULL;
4518 u16 creg;
4519 /* 0xffff for unimplemented or synthesized counters */
4520 static const u32 xlator[] = {
4521 [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4522 [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4523 [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4524 [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4525 [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4526 [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4527 [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4528 [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4529 [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4530 [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4531 [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4532 [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4533 [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */
4534 [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4535 [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4536 [QIBPORTCNTR_ERRICRC] = crp_erricrc,
4537 [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4538 [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4539 [QIBPORTCNTR_BADFORMAT] = crp_badformat,
4540 [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4541 [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4542 [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4543 [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4544 [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4545 [QIBPORTCNTR_ERRLINK] = crp_errlink,
4546 [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4547 [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4548 [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4549 [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4550 [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4551 /*
4552 * the next 3 aren't really counters, but were implemented
4553 * as counters in older chips, so still get accessed as
4554 * though they were counters from this code.
4555 */
4556 [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4557 [QIBPORTCNTR_PSSTART] = krp_psstart,
4558 [QIBPORTCNTR_PSSTAT] = krp_psstat,
4559 /* pseudo-counter, summed for all ports */
4560 [QIBPORTCNTR_KHDROVFL] = 0xffff,
4561 };
4562
4563 if (reg >= ARRAY_SIZE(xlator)) {
4564 qib_devinfo(ppd->dd->pcidev,
4565 "Unimplemented portcounter %u\n", reg);
4566 goto done;
4567 }
4568 creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4569
4570 /* handle non-counters and special cases first */
4571 if (reg == QIBPORTCNTR_KHDROVFL) {
4572 int i;
4573
4574 /* sum over all kernel contexts (skip if mini_init) */
4575 for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4576 struct qib_ctxtdata *rcd = dd->rcd[i];
4577
4578 if (!rcd || rcd->ppd != ppd)
4579 continue;
4580 ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4581 }
4582 goto done;
4583 } else if (reg == QIBPORTCNTR_RXDROPPKT) {
4584 /*
4585 * Used as part of the synthesis of port_rcv_errors
4586 * in the verbs code for IBTA counters. Not needed for 7322,
4587 * because all the errors are already counted by other cntrs.
4588 */
4589 goto done;
4590 } else if (reg == QIBPORTCNTR_PSINTERVAL ||
4591 reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4592 /* were counters in older chips, now per-port kernel regs */
4593 ret = qib_read_kreg_port(ppd, creg);
4594 goto done;
4595 }
4596
4597 /*
4598 * Only fast increment counters are 64 bits; use 32 bit reads to
4599 * avoid two independent reads when on Opteron.
4600 */
4601 if (xlator[reg] & _PORT_64BIT_FLAG)
4602 ret = read_7322_creg_port(ppd, creg);
4603 else
4604 ret = read_7322_creg32_port(ppd, creg);
4605 if (creg == crp_ibsymbolerr) {
4606 if (ppd->cpspec->ibdeltainprog)
4607 ret -= ret - ppd->cpspec->ibsymsnap;
4608 ret -= ppd->cpspec->ibsymdelta;
4609 } else if (creg == crp_iblinkerrrecov) {
4610 if (ppd->cpspec->ibdeltainprog)
4611 ret -= ret - ppd->cpspec->iblnkerrsnap;
4612 ret -= ppd->cpspec->iblnkerrdelta;
4613 } else if (creg == crp_errlink)
4614 ret -= ppd->cpspec->ibmalfdelta;
4615 else if (creg == crp_iblinkdown)
4616 ret += ppd->cpspec->iblnkdowndelta;
4617done:
4618 return ret;
4619}
4620
4621/*
4622 * Device counter names (not port-specific), one line per stat,
4623 * single string. Used by utilities like ipathstats to print the stats
4624 * in a way which works for different versions of drivers, without changing
4625 * the utility. Names need to be 12 chars or less (w/o newline), for proper
4626 * display by utility.
4627 * Non-error counters are first.
4628 * Start of "error" conters is indicated by a leading "E " on the first
4629 * "error" counter, and doesn't count in label length.
4630 * The EgrOvfl list needs to be last so we truncate them at the configured
4631 * context count for the device.
4632 * cntr7322indices contains the corresponding register indices.
4633 */
4634static const char cntr7322names[] =
4635 "Interrupts\n"
4636 "HostBusStall\n"
4637 "E RxTIDFull\n"
4638 "RxTIDInvalid\n"
4639 "RxTIDFloDrop\n" /* 7322 only */
4640 "Ctxt0EgrOvfl\n"
4641 "Ctxt1EgrOvfl\n"
4642 "Ctxt2EgrOvfl\n"
4643 "Ctxt3EgrOvfl\n"
4644 "Ctxt4EgrOvfl\n"
4645 "Ctxt5EgrOvfl\n"
4646 "Ctxt6EgrOvfl\n"
4647 "Ctxt7EgrOvfl\n"
4648 "Ctxt8EgrOvfl\n"
4649 "Ctxt9EgrOvfl\n"
4650 "Ctx10EgrOvfl\n"
4651 "Ctx11EgrOvfl\n"
4652 "Ctx12EgrOvfl\n"
4653 "Ctx13EgrOvfl\n"
4654 "Ctx14EgrOvfl\n"
4655 "Ctx15EgrOvfl\n"
4656 "Ctx16EgrOvfl\n"
4657 "Ctx17EgrOvfl\n"
4658 ;
4659
4660static const u32 cntr7322indices[] = {
4661 cr_lbint | _PORT_64BIT_FLAG,
4662 cr_lbstall | _PORT_64BIT_FLAG,
4663 cr_tidfull,
4664 cr_tidinvalid,
4665 cr_rxtidflowdrop,
4666 cr_base_egrovfl + 0,
4667 cr_base_egrovfl + 1,
4668 cr_base_egrovfl + 2,
4669 cr_base_egrovfl + 3,
4670 cr_base_egrovfl + 4,
4671 cr_base_egrovfl + 5,
4672 cr_base_egrovfl + 6,
4673 cr_base_egrovfl + 7,
4674 cr_base_egrovfl + 8,
4675 cr_base_egrovfl + 9,
4676 cr_base_egrovfl + 10,
4677 cr_base_egrovfl + 11,
4678 cr_base_egrovfl + 12,
4679 cr_base_egrovfl + 13,
4680 cr_base_egrovfl + 14,
4681 cr_base_egrovfl + 15,
4682 cr_base_egrovfl + 16,
4683 cr_base_egrovfl + 17,
4684};
4685
4686/*
4687 * same as cntr7322names and cntr7322indices, but for port-specific counters.
4688 * portcntr7322indices is somewhat complicated by some registers needing
4689 * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4690 */
4691static const char portcntr7322names[] =
4692 "TxPkt\n"
4693 "TxFlowPkt\n"
4694 "TxWords\n"
4695 "RxPkt\n"
4696 "RxFlowPkt\n"
4697 "RxWords\n"
4698 "TxFlowStall\n"
4699 "TxDmaDesc\n" /* 7220 and 7322-only */
4700 "E RxDlidFltr\n" /* 7220 and 7322-only */
4701 "IBStatusChng\n"
4702 "IBLinkDown\n"
4703 "IBLnkRecov\n"
4704 "IBRxLinkErr\n"
4705 "IBSymbolErr\n"
4706 "RxLLIErr\n"
4707 "RxBadFormat\n"
4708 "RxBadLen\n"
4709 "RxBufOvrfl\n"
4710 "RxEBP\n"
4711 "RxFlowCtlErr\n"
4712 "RxICRCerr\n"
4713 "RxLPCRCerr\n"
4714 "RxVCRCerr\n"
4715 "RxInvalLen\n"
4716 "RxInvalPKey\n"
4717 "RxPktDropped\n"
4718 "TxBadLength\n"
4719 "TxDropped\n"
4720 "TxInvalLen\n"
4721 "TxUnderrun\n"
4722 "TxUnsupVL\n"
4723 "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4724 "RxVL15Drop\n"
4725 "RxVlErr\n"
4726 "XcessBufOvfl\n"
4727 "RxQPBadCtxt\n" /* 7322-only from here down */
4728 "TXBadHeader\n"
4729 ;
4730
4731static const u32 portcntr7322indices[] = {
4732 QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4733 crp_pktsendflow,
4734 QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4735 QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4736 crp_pktrcvflowctrl,
4737 QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4738 QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4739 crp_txsdmadesc | _PORT_64BIT_FLAG,
4740 crp_rxdlidfltr,
4741 crp_ibstatuschange,
4742 QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
4743 QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
4744 QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
4745 QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
4746 QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
4747 QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
4748 QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
4749 QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
4750 QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
4751 crp_rcvflowctrlviol,
4752 QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
4753 QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
4754 QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
4755 QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
4756 QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
4757 QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
4758 crp_txminmaxlenerr,
4759 crp_txdroppedpkt,
4760 crp_txlenerr,
4761 crp_txunderrun,
4762 crp_txunsupvl,
4763 QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
4764 QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
4765 QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
4766 QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
4767 crp_rxqpinvalidctxt,
4768 crp_txhdrerr,
4769};
4770
4771/* do all the setup to make the counter reads efficient later */
4772static void init_7322_cntrnames(struct qib_devdata *dd)
4773{
4774 int i, j = 0;
4775 char *s;
4776
4777 for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
4778 i++) {
4779 /* we always have at least one counter before the egrovfl */
4780 if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
4781 j = 1;
4782 s = strchr(s + 1, '\n');
4783 if (s && j)
4784 j++;
4785 }
4786 dd->cspec->ncntrs = i;
4787 if (!s)
4788 /* full list; size is without terminating null */
4789 dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
4790 else
4791 dd->cspec->cntrnamelen = 1 + s - cntr7322names;
4792 dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
4793 * sizeof(u64), GFP_KERNEL);
4794 if (!dd->cspec->cntrs)
4795 qib_dev_err(dd, "Failed allocation for counters\n");
4796
4797 for (i = 0, s = (char *)portcntr7322names; s; i++)
4798 s = strchr(s + 1, '\n');
4799 dd->cspec->nportcntrs = i - 1;
4800 dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
4801 for (i = 0; i < dd->num_pports; ++i) {
4802 dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
4803 * sizeof(u64), GFP_KERNEL);
4804 if (!dd->pport[i].cpspec->portcntrs)
4805 qib_dev_err(dd, "Failed allocation for"
4806 " portcounters\n");
4807 }
4808}
4809
4810static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
4811 u64 **cntrp)
4812{
4813 u32 ret;
4814
4815 if (namep) {
4816 ret = dd->cspec->cntrnamelen;
4817 if (pos >= ret)
4818 ret = 0; /* final read after getting everything */
4819 else
4820 *namep = (char *) cntr7322names;
4821 } else {
4822 u64 *cntr = dd->cspec->cntrs;
4823 int i;
4824
4825 ret = dd->cspec->ncntrs * sizeof(u64);
4826 if (!cntr || pos >= ret) {
4827 /* everything read, or couldn't get memory */
4828 ret = 0;
4829 goto done;
4830 }
4831 *cntrp = cntr;
4832 for (i = 0; i < dd->cspec->ncntrs; i++)
4833 if (cntr7322indices[i] & _PORT_64BIT_FLAG)
4834 *cntr++ = read_7322_creg(dd,
4835 cntr7322indices[i] &
4836 _PORT_CNTR_IDXMASK);
4837 else
4838 *cntr++ = read_7322_creg32(dd,
4839 cntr7322indices[i]);
4840 }
4841done:
4842 return ret;
4843}
4844
4845static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
4846 char **namep, u64 **cntrp)
4847{
4848 u32 ret;
4849
4850 if (namep) {
4851 ret = dd->cspec->portcntrnamelen;
4852 if (pos >= ret)
4853 ret = 0; /* final read after getting everything */
4854 else
4855 *namep = (char *)portcntr7322names;
4856 } else {
4857 struct qib_pportdata *ppd = &dd->pport[port];
4858 u64 *cntr = ppd->cpspec->portcntrs;
4859 int i;
4860
4861 ret = dd->cspec->nportcntrs * sizeof(u64);
4862 if (!cntr || pos >= ret) {
4863 /* everything read, or couldn't get memory */
4864 ret = 0;
4865 goto done;
4866 }
4867 *cntrp = cntr;
4868 for (i = 0; i < dd->cspec->nportcntrs; i++) {
4869 if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
4870 *cntr++ = qib_portcntr_7322(ppd,
4871 portcntr7322indices[i] &
4872 _PORT_CNTR_IDXMASK);
4873 else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
4874 *cntr++ = read_7322_creg_port(ppd,
4875 portcntr7322indices[i] &
4876 _PORT_CNTR_IDXMASK);
4877 else
4878 *cntr++ = read_7322_creg32_port(ppd,
4879 portcntr7322indices[i]);
4880 }
4881 }
4882done:
4883 return ret;
4884}
4885
4886/**
4887 * qib_get_7322_faststats - get word counters from chip before they overflow
4888 * @opaque - contains a pointer to the qlogic_ib device qib_devdata
4889 *
4890 * VESTIGIAL IBA7322 has no "small fast counters", so the only
4891 * real purpose of this function is to maintain the notion of
4892 * "active time", which in turn is only logged into the eeprom,
4893 * which we don;t have, yet, for 7322-based boards.
4894 *
4895 * called from add_timer
4896 */
4897static void qib_get_7322_faststats(unsigned long opaque)
4898{
4899 struct qib_devdata *dd = (struct qib_devdata *) opaque;
4900 struct qib_pportdata *ppd;
4901 unsigned long flags;
4902 u64 traffic_wds;
4903 int pidx;
4904
4905 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
4906 ppd = dd->pport + pidx;
4907
4908 /*
4909 * If port isn't enabled or not operational ports, or
4910 * diags is running (can cause memory diags to fail)
4911 * skip this port this time.
4912 */
4913 if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
4914 || dd->diag_client)
4915 continue;
4916
4917 /*
4918 * Maintain an activity timer, based on traffic
4919 * exceeding a threshold, so we need to check the word-counts
4920 * even if they are 64-bit.
4921 */
4922 traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
4923 qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
4924 spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
4925 traffic_wds -= ppd->dd->traffic_wds;
4926 ppd->dd->traffic_wds += traffic_wds;
4927 if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
4928 atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
4929 spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
4930 if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
4931 QIB_IB_QDR) &&
4932 (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
4933 QIBL_LINKACTIVE)) &&
4934 ppd->cpspec->qdr_dfe_time &&
4935 time_after64(get_jiffies_64(), ppd->cpspec->qdr_dfe_time)) {
4936 ppd->cpspec->qdr_dfe_on = 0;
4937
4938 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
4939 ppd->dd->cspec->r1 ?
4940 QDR_STATIC_ADAPT_INIT_R1 :
4941 QDR_STATIC_ADAPT_INIT);
4942 force_h1(ppd);
4943 }
4944 }
4945 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
4946}
4947
4948/*
4949 * If we were using MSIx, try to fallback to INTx.
4950 */
4951static int qib_7322_intr_fallback(struct qib_devdata *dd)
4952{
4953 if (!dd->cspec->num_msix_entries)
4954 return 0; /* already using INTx */
4955
4956 qib_devinfo(dd->pcidev, "MSIx interrupt not detected,"
4957 " trying INTx interrupts\n");
4958 qib_7322_nomsix(dd);
4959 qib_enable_intx(dd->pcidev);
4960 qib_setup_7322_interrupt(dd, 0);
4961 return 1;
4962}
4963
4964/*
4965 * Reset the XGXS (between serdes and IBC). Slightly less intrusive
4966 * than resetting the IBC or external link state, and useful in some
4967 * cases to cause some retraining. To do this right, we reset IBC
4968 * as well, then return to previous state (which may be still in reset)
4969 * NOTE: some callers of this "know" this writes the current value
4970 * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
4971 * check all callers.
4972 */
4973static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
4974{
4975 u64 val;
4976 struct qib_devdata *dd = ppd->dd;
4977 const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
4978 SYM_MASK(IBPCSConfig_0, xcv_treset) |
4979 SYM_MASK(IBPCSConfig_0, tx_rx_reset);
4980
4981 val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
4982 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4983 ppd->cpspec->ibcctrl_a &
4984 ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
4985
4986 qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
4987 qib_read_kreg32(dd, kr_scratch);
4988 qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
4989 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4990 qib_write_kreg(dd, kr_scratch, 0ULL);
4991}
4992
4993/*
4994 * This code for non-IBTA-compliant IB speed negotiation is only known to
4995 * work for the SDR to DDR transition, and only between an HCA and a switch
4996 * with recent firmware. It is based on observed heuristics, rather than
4997 * actual knowledge of the non-compliant speed negotiation.
4998 * It has a number of hard-coded fields, since the hope is to rewrite this
4999 * when a spec is available on how the negoation is intended to work.
5000 */
5001static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
5002 u32 dcnt, u32 *data)
5003{
5004 int i;
5005 u64 pbc;
5006 u32 __iomem *piobuf;
5007 u32 pnum, control, len;
5008 struct qib_devdata *dd = ppd->dd;
5009
5010 i = 0;
5011 len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
5012 control = qib_7322_setpbc_control(ppd, len, 0, 15);
5013 pbc = ((u64) control << 32) | len;
5014 while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
5015 if (i++ > 15)
5016 return;
5017 udelay(2);
5018 }
5019 /* disable header check on this packet, since it can't be valid */
5020 dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
5021 writeq(pbc, piobuf);
5022 qib_flush_wc();
5023 qib_pio_copy(piobuf + 2, hdr, 7);
5024 qib_pio_copy(piobuf + 9, data, dcnt);
5025 if (dd->flags & QIB_USE_SPCL_TRIG) {
5026 u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
5027
5028 qib_flush_wc();
5029 __raw_writel(0xaebecede, piobuf + spcl_off);
5030 }
5031 qib_flush_wc();
5032 qib_sendbuf_done(dd, pnum);
5033 /* and re-enable hdr check */
5034 dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
5035}
5036
5037/*
5038 * _start packet gets sent twice at start, _done gets sent twice at end
5039 */
5040static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5041{
5042 struct qib_devdata *dd = ppd->dd;
5043 static u32 swapped;
5044 u32 dw, i, hcnt, dcnt, *data;
5045 static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
5046 static u32 madpayload_start[0x40] = {
5047 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5048 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5049 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
5050 };
5051 static u32 madpayload_done[0x40] = {
5052 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5053 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5054 0x40000001, 0x1388, 0x15e, /* rest 0's */
5055 };
5056
5057 dcnt = ARRAY_SIZE(madpayload_start);
5058 hcnt = ARRAY_SIZE(hdr);
5059 if (!swapped) {
5060 /* for maintainability, do it at runtime */
5061 for (i = 0; i < hcnt; i++) {
5062 dw = (__force u32) cpu_to_be32(hdr[i]);
5063 hdr[i] = dw;
5064 }
5065 for (i = 0; i < dcnt; i++) {
5066 dw = (__force u32) cpu_to_be32(madpayload_start[i]);
5067 madpayload_start[i] = dw;
5068 dw = (__force u32) cpu_to_be32(madpayload_done[i]);
5069 madpayload_done[i] = dw;
5070 }
5071 swapped = 1;
5072 }
5073
5074 data = which ? madpayload_done : madpayload_start;
5075
5076 autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5077 qib_read_kreg64(dd, kr_scratch);
5078 udelay(2);
5079 autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5080 qib_read_kreg64(dd, kr_scratch);
5081 udelay(2);
5082}
5083
5084/*
5085 * Do the absolute minimum to cause an IB speed change, and make it
5086 * ready, but don't actually trigger the change. The caller will
5087 * do that when ready (if link is in Polling training state, it will
5088 * happen immediately, otherwise when link next goes down)
5089 *
5090 * This routine should only be used as part of the DDR autonegotation
5091 * code for devices that are not compliant with IB 1.2 (or code that
5092 * fixes things up for same).
5093 *
5094 * When link has gone down, and autoneg enabled, or autoneg has
5095 * failed and we give up until next time we set both speeds, and
5096 * then we want IBTA enabled as well as "use max enabled speed.
5097 */
5098static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5099{
5100 u64 newctrlb;
5101 newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5102 IBA7322_IBC_IBTA_1_2_MASK |
5103 IBA7322_IBC_MAX_SPEED_MASK);
5104
5105 if (speed & (speed - 1)) /* multiple speeds */
5106 newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
5107 IBA7322_IBC_IBTA_1_2_MASK |
5108 IBA7322_IBC_MAX_SPEED_MASK;
5109 else
5110 newctrlb |= speed == QIB_IB_QDR ?
5111 IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
5112 ((speed == QIB_IB_DDR ?
5113 IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
5114
5115 if (newctrlb == ppd->cpspec->ibcctrl_b)
5116 return;
5117
5118 ppd->cpspec->ibcctrl_b = newctrlb;
5119 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
5120 qib_write_kreg(ppd->dd, kr_scratch, 0);
5121}
5122
5123/*
5124 * This routine is only used when we are not talking to another
5125 * IB 1.2-compliant device that we think can do DDR.
5126 * (This includes all existing switch chips as of Oct 2007.)
5127 * 1.2-compliant devices go directly to DDR prior to reaching INIT
5128 */
5129static void try_7322_autoneg(struct qib_pportdata *ppd)
5130{
5131 unsigned long flags;
5132
5133 spin_lock_irqsave(&ppd->lflags_lock, flags);
5134 ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5135 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5136 qib_autoneg_7322_send(ppd, 0);
5137 set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5138 qib_7322_mini_pcs_reset(ppd);
5139 /* 2 msec is minimum length of a poll cycle */
5140 schedule_delayed_work(&ppd->cpspec->autoneg_work,
5141 msecs_to_jiffies(2));
5142}
5143
5144/*
5145 * Handle the empirically determined mechanism for auto-negotiation
5146 * of DDR speed with switches.
5147 */
5148static void autoneg_7322_work(struct work_struct *work)
5149{
5150 struct qib_pportdata *ppd;
5151 struct qib_devdata *dd;
5152 u64 startms;
5153 u32 i;
5154 unsigned long flags;
5155
5156 ppd = container_of(work, struct qib_chippport_specific,
5157 autoneg_work.work)->ppd;
5158 dd = ppd->dd;
5159
5160 startms = jiffies_to_msecs(jiffies);
5161
5162 /*
5163 * Busy wait for this first part, it should be at most a
5164 * few hundred usec, since we scheduled ourselves for 2msec.
5165 */
5166 for (i = 0; i < 25; i++) {
5167 if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5168 == IB_7322_LT_STATE_POLLQUIET) {
5169 qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5170 break;
5171 }
5172 udelay(100);
5173 }
5174
5175 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5176 goto done; /* we got there early or told to stop */
5177
5178 /* we expect this to timeout */
5179 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5180 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5181 msecs_to_jiffies(90)))
5182 goto done;
5183 qib_7322_mini_pcs_reset(ppd);
5184
5185 /* we expect this to timeout */
5186 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5187 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5188 msecs_to_jiffies(1700)))
5189 goto done;
5190 qib_7322_mini_pcs_reset(ppd);
5191
5192 set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5193
5194 /*
5195 * Wait up to 250 msec for link to train and get to INIT at DDR;
5196 * this should terminate early.
5197 */
5198 wait_event_timeout(ppd->cpspec->autoneg_wait,
5199 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5200 msecs_to_jiffies(250));
5201done:
5202 if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5203 spin_lock_irqsave(&ppd->lflags_lock, flags);
5204 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5205 if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5206 ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5207 ppd->cpspec->autoneg_tries = 0;
5208 }
5209 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5210 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5211 }
5212}
5213
5214/*
5215 * This routine is used to request IPG set in the QLogic switch.
5216 * Only called if r1.
5217 */
5218static void try_7322_ipg(struct qib_pportdata *ppd)
5219{
5220 struct qib_ibport *ibp = &ppd->ibport_data;
5221 struct ib_mad_send_buf *send_buf;
5222 struct ib_mad_agent *agent;
5223 struct ib_smp *smp;
5224 unsigned delay;
5225 int ret;
5226
5227 agent = ibp->send_agent;
5228 if (!agent)
5229 goto retry;
5230
5231 send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5232 IB_MGMT_MAD_DATA, GFP_ATOMIC);
5233 if (IS_ERR(send_buf))
5234 goto retry;
5235
5236 if (!ibp->smi_ah) {
5237 struct ib_ah_attr attr;
5238 struct ib_ah *ah;
5239
5240 memset(&attr, 0, sizeof attr);
5241 attr.dlid = be16_to_cpu(IB_LID_PERMISSIVE);
5242 attr.port_num = ppd->port;
5243 ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
5244 if (IS_ERR(ah))
5245 ret = -EINVAL;
5246 else {
5247 send_buf->ah = ah;
5248 ibp->smi_ah = to_iah(ah);
5249 ret = 0;
5250 }
5251 } else {
5252 send_buf->ah = &ibp->smi_ah->ibah;
5253 ret = 0;
5254 }
5255
5256 smp = send_buf->mad;
5257 smp->base_version = IB_MGMT_BASE_VERSION;
5258 smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5259 smp->class_version = 1;
5260 smp->method = IB_MGMT_METHOD_SEND;
5261 smp->hop_cnt = 1;
5262 smp->attr_id = QIB_VENDOR_IPG;
5263 smp->attr_mod = 0;
5264
5265 if (!ret)
5266 ret = ib_post_send_mad(send_buf, NULL);
5267 if (ret)
5268 ib_free_send_mad(send_buf);
5269retry:
5270 delay = 2 << ppd->cpspec->ipg_tries;
5271 schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay));
5272}
5273
5274/*
5275 * Timeout handler for setting IPG.
5276 * Only called if r1.
5277 */
5278static void ipg_7322_work(struct work_struct *work)
5279{
5280 struct qib_pportdata *ppd;
5281
5282 ppd = container_of(work, struct qib_chippport_specific,
5283 ipg_work.work)->ppd;
5284 if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5285 && ++ppd->cpspec->ipg_tries <= 10)
5286 try_7322_ipg(ppd);
5287}
5288
5289static u32 qib_7322_iblink_state(u64 ibcs)
5290{
5291 u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5292
5293 switch (state) {
5294 case IB_7322_L_STATE_INIT:
5295 state = IB_PORT_INIT;
5296 break;
5297 case IB_7322_L_STATE_ARM:
5298 state = IB_PORT_ARMED;
5299 break;
5300 case IB_7322_L_STATE_ACTIVE:
5301 /* fall through */
5302 case IB_7322_L_STATE_ACT_DEFER:
5303 state = IB_PORT_ACTIVE;
5304 break;
5305 default: /* fall through */
5306 case IB_7322_L_STATE_DOWN:
5307 state = IB_PORT_DOWN;
5308 break;
5309 }
5310 return state;
5311}
5312
5313/* returns the IBTA port state, rather than the IBC link training state */
5314static u8 qib_7322_phys_portstate(u64 ibcs)
5315{
5316 u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5317 return qib_7322_physportstate[state];
5318}
5319
5320static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5321{
5322 int ret = 0, symadj = 0;
5323 unsigned long flags;
5324 int mult;
5325
5326 spin_lock_irqsave(&ppd->lflags_lock, flags);
5327 ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5328 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5329
5330 /* Update our picture of width and speed from chip */
5331 if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5332 ppd->link_speed_active = QIB_IB_QDR;
5333 mult = 4;
5334 } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5335 ppd->link_speed_active = QIB_IB_DDR;
5336 mult = 2;
5337 } else {
5338 ppd->link_speed_active = QIB_IB_SDR;
5339 mult = 1;
5340 }
5341 if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5342 ppd->link_width_active = IB_WIDTH_4X;
5343 mult *= 4;
5344 } else
5345 ppd->link_width_active = IB_WIDTH_1X;
5346 ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5347
5348 if (!ibup) {
5349 u64 clr;
5350
5351 /* Link went down. */
5352 /* do IPG MAD again after linkdown, even if last time failed */
5353 ppd->cpspec->ipg_tries = 0;
5354 clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5355 (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5356 SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5357 if (clr)
5358 qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5359 if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5360 QIBL_IB_AUTONEG_INPROG)))
5361 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5362 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5363 qib_cancel_sends(ppd);
5364 spin_lock_irqsave(&ppd->sdma_lock, flags);
5365 if (__qib_sdma_running(ppd))
5366 __qib_sdma_process_event(ppd,
5367 qib_sdma_event_e70_go_idle);
5368 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5369 }
5370 clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5371 if (clr == ppd->cpspec->iblnkdownsnap)
5372 ppd->cpspec->iblnkdowndelta++;
5373 } else {
5374 if (qib_compat_ddr_negotiate &&
5375 !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5376 QIBL_IB_AUTONEG_INPROG)) &&
5377 ppd->link_speed_active == QIB_IB_SDR &&
5378 (ppd->link_speed_enabled & QIB_IB_DDR)
5379 && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5380 /* we are SDR, and auto-negotiation enabled */
5381 ++ppd->cpspec->autoneg_tries;
5382 if (!ppd->cpspec->ibdeltainprog) {
5383 ppd->cpspec->ibdeltainprog = 1;
5384 ppd->cpspec->ibsymdelta +=
5385 read_7322_creg32_port(ppd,
5386 crp_ibsymbolerr) -
5387 ppd->cpspec->ibsymsnap;
5388 ppd->cpspec->iblnkerrdelta +=
5389 read_7322_creg32_port(ppd,
5390 crp_iblinkerrrecov) -
5391 ppd->cpspec->iblnkerrsnap;
5392 }
5393 try_7322_autoneg(ppd);
5394 ret = 1; /* no other IB status change processing */
5395 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5396 ppd->link_speed_active == QIB_IB_SDR) {
5397 qib_autoneg_7322_send(ppd, 1);
5398 set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5399 qib_7322_mini_pcs_reset(ppd);
5400 udelay(2);
5401 ret = 1; /* no other IB status change processing */
5402 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5403 (ppd->link_speed_active & QIB_IB_DDR)) {
5404 spin_lock_irqsave(&ppd->lflags_lock, flags);
5405 ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5406 QIBL_IB_AUTONEG_FAILED);
5407 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5408 ppd->cpspec->autoneg_tries = 0;
5409 /* re-enable SDR, for next link down */
5410 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5411 wake_up(&ppd->cpspec->autoneg_wait);
5412 symadj = 1;
5413 } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5414 /*
5415 * Clear autoneg failure flag, and do setup
5416 * so we'll try next time link goes down and
5417 * back to INIT (possibly connected to a
5418 * different device).
5419 */
5420 spin_lock_irqsave(&ppd->lflags_lock, flags);
5421 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5422 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5423 ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5424 symadj = 1;
5425 }
5426 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5427 symadj = 1;
5428 if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5429 try_7322_ipg(ppd);
5430 if (!ppd->cpspec->recovery_init)
5431 setup_7322_link_recovery(ppd, 0);
5432 ppd->cpspec->qdr_dfe_time = jiffies +
5433 msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5434 }
5435 ppd->cpspec->ibmalfusesnap = 0;
5436 ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5437 crp_errlink);
5438 }
5439 if (symadj) {
5440 ppd->cpspec->iblnkdownsnap =
5441 read_7322_creg32_port(ppd, crp_iblinkdown);
5442 if (ppd->cpspec->ibdeltainprog) {
5443 ppd->cpspec->ibdeltainprog = 0;
5444 ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5445 crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5446 ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5447 crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5448 }
5449 } else if (!ibup && qib_compat_ddr_negotiate &&
5450 !ppd->cpspec->ibdeltainprog &&
5451 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5452 ppd->cpspec->ibdeltainprog = 1;
5453 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5454 crp_ibsymbolerr);
5455 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5456 crp_iblinkerrrecov);
5457 }
5458
5459 if (!ret)
5460 qib_setup_7322_setextled(ppd, ibup);
5461 return ret;
5462}
5463
5464/*
5465 * Does read/modify/write to appropriate registers to
5466 * set output and direction bits selected by mask.
5467 * these are in their canonical postions (e.g. lsb of
5468 * dir will end up in D48 of extctrl on existing chips).
5469 * returns contents of GP Inputs.
5470 */
5471static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5472{
5473 u64 read_val, new_out;
5474 unsigned long flags;
5475
5476 if (mask) {
5477 /* some bits being written, lock access to GPIO */
5478 dir &= mask;
5479 out &= mask;
5480 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5481 dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5482 dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5483 new_out = (dd->cspec->gpio_out & ~mask) | out;
5484
5485 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5486 qib_write_kreg(dd, kr_gpio_out, new_out);
5487 dd->cspec->gpio_out = new_out;
5488 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5489 }
5490 /*
5491 * It is unlikely that a read at this time would get valid
5492 * data on a pin whose direction line was set in the same
5493 * call to this function. We include the read here because
5494 * that allows us to potentially combine a change on one pin with
5495 * a read on another, and because the old code did something like
5496 * this.
5497 */
5498 read_val = qib_read_kreg64(dd, kr_extstatus);
5499 return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5500}
5501
5502/* Enable writes to config EEPROM, if possible. Returns previous state */
5503static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5504{
5505 int prev_wen;
5506 u32 mask;
5507
5508 mask = 1 << QIB_EEPROM_WEN_NUM;
5509 prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5510 gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5511
5512 return prev_wen & 1;
5513}
5514
5515/*
5516 * Read fundamental info we need to use the chip. These are
5517 * the registers that describe chip capabilities, and are
5518 * saved in shadow registers.
5519 */
5520static void get_7322_chip_params(struct qib_devdata *dd)
5521{
5522 u64 val;
5523 u32 piobufs;
5524 int mtu;
5525
5526 dd->palign = qib_read_kreg32(dd, kr_pagealign);
5527
5528 dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5529
5530 dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5531 dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5532 dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5533 dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5534 dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5535
5536 val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5537 dd->piobcnt2k = val & ~0U;
5538 dd->piobcnt4k = val >> 32;
5539 val = qib_read_kreg64(dd, kr_sendpiosize);
5540 dd->piosize2k = val & ~0U;
5541 dd->piosize4k = val >> 32;
5542
5543 mtu = ib_mtu_enum_to_int(qib_ibmtu);
5544 if (mtu == -1)
5545 mtu = QIB_DEFAULT_MTU;
5546 dd->pport[0].ibmtu = (u32)mtu;
5547 dd->pport[1].ibmtu = (u32)mtu;
5548
5549 /* these may be adjusted in init_chip_wc_pat() */
5550 dd->pio2kbase = (u32 __iomem *)
5551 ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5552 dd->pio4kbase = (u32 __iomem *)
5553 ((char __iomem *) dd->kregbase +
5554 (dd->piobufbase >> 32));
5555 /*
5556 * 4K buffers take 2 pages; we use roundup just to be
5557 * paranoid; we calculate it once here, rather than on
5558 * ever buf allocate
5559 */
5560 dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5561
5562 piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5563
5564 dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5565 (sizeof(u64) * BITS_PER_BYTE / 2);
5566}
5567
5568/*
5569 * The chip base addresses in cspec and cpspec have to be set
5570 * after possible init_chip_wc_pat(), rather than in
5571 * get_7322_chip_params(), so split out as separate function
5572 */
5573static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5574{
5575 u32 cregbase;
5576 cregbase = qib_read_kreg32(dd, kr_counterregbase);
5577
5578 dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5579 (char __iomem *)dd->kregbase);
5580
5581 dd->egrtidbase = (u64 __iomem *)
5582 ((char __iomem *) dd->kregbase + dd->rcvegrbase);
5583
5584 /* port registers are defined as relative to base of chip */
5585 dd->pport[0].cpspec->kpregbase =
5586 (u64 __iomem *)((char __iomem *)dd->kregbase);
5587 dd->pport[1].cpspec->kpregbase =
5588 (u64 __iomem *)(dd->palign +
5589 (char __iomem *)dd->kregbase);
5590 dd->pport[0].cpspec->cpregbase =
5591 (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5592 kr_counterregbase) + (char __iomem *)dd->kregbase);
5593 dd->pport[1].cpspec->cpregbase =
5594 (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5595 kr_counterregbase) + (char __iomem *)dd->kregbase);
5596}
5597
5598/*
5599 * This is a fairly special-purpose observer, so we only support
5600 * the port-specific parts of SendCtrl
5601 */
5602
5603#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \
5604 SYM_MASK(SendCtrl_0, SDmaEnable) | \
5605 SYM_MASK(SendCtrl_0, SDmaIntEnable) | \
5606 SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5607 SYM_MASK(SendCtrl_0, SDmaHalt) | \
5608 SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \
5609 SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5610
5611static int sendctrl_hook(struct qib_devdata *dd,
5612 const struct diag_observer *op, u32 offs,
5613 u64 *data, u64 mask, int only_32)
5614{
5615 unsigned long flags;
5616 unsigned idx;
5617 unsigned pidx;
5618 struct qib_pportdata *ppd = NULL;
5619 u64 local_data, all_bits;
5620
5621 /*
5622 * The fixed correspondence between Physical ports and pports is
5623 * severed. We need to hunt for the ppd that corresponds
5624 * to the offset we got. And we have to do that without admitting
5625 * we know the stride, apparently.
5626 */
5627 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5628 u64 __iomem *psptr;
5629 u32 psoffs;
5630
5631 ppd = dd->pport + pidx;
5632 if (!ppd->cpspec->kpregbase)
5633 continue;
5634
5635 psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5636 psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5637 if (psoffs == offs)
5638 break;
5639 }
5640
5641 /* If pport is not being managed by driver, just avoid shadows. */
5642 if (pidx >= dd->num_pports)
5643 ppd = NULL;
5644
5645 /* In any case, "idx" is flat index in kreg space */
5646 idx = offs / sizeof(u64);
5647
5648 all_bits = ~0ULL;
5649 if (only_32)
5650 all_bits >>= 32;
5651
5652 spin_lock_irqsave(&dd->sendctrl_lock, flags);
5653 if (!ppd || (mask & all_bits) != all_bits) {
5654 /*
5655 * At least some mask bits are zero, so we need
5656 * to read. The judgement call is whether from
5657 * reg or shadow. First-cut: read reg, and complain
5658 * if any bits which should be shadowed are different
5659 * from their shadowed value.
5660 */
5661 if (only_32)
5662 local_data = (u64)qib_read_kreg32(dd, idx);
5663 else
5664 local_data = qib_read_kreg64(dd, idx);
5665 *data = (local_data & ~mask) | (*data & mask);
5666 }
5667 if (mask) {
5668 /*
5669 * At least some mask bits are one, so we need
5670 * to write, but only shadow some bits.
5671 */
5672 u64 sval, tval; /* Shadowed, transient */
5673
5674 /*
5675 * New shadow val is bits we don't want to touch,
5676 * ORed with bits we do, that are intended for shadow.
5677 */
5678 if (ppd) {
5679 sval = ppd->p_sendctrl & ~mask;
5680 sval |= *data & SENDCTRL_SHADOWED & mask;
5681 ppd->p_sendctrl = sval;
5682 } else
5683 sval = *data & SENDCTRL_SHADOWED & mask;
5684 tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5685 qib_write_kreg(dd, idx, tval);
5686 qib_write_kreg(dd, kr_scratch, 0Ull);
5687 }
5688 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5689 return only_32 ? 4 : 8;
5690}
5691
5692static const struct diag_observer sendctrl_0_observer = {
5693 sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5694 KREG_IDX(SendCtrl_0) * sizeof(u64)
5695};
5696
5697static const struct diag_observer sendctrl_1_observer = {
5698 sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5699 KREG_IDX(SendCtrl_1) * sizeof(u64)
5700};
5701
5702static ushort sdma_fetch_prio = 8;
5703module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5704MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5705
5706/* Besides logging QSFP events, we set appropriate TxDDS values */
5707static void init_txdds_table(struct qib_pportdata *ppd, int override);
5708
5709static void qsfp_7322_event(struct work_struct *work)
5710{
5711 struct qib_qsfp_data *qd;
5712 struct qib_pportdata *ppd;
5713 u64 pwrup;
5714 int ret;
5715 u32 le2;
5716
5717 qd = container_of(work, struct qib_qsfp_data, work);
5718 ppd = qd->ppd;
5719 pwrup = qd->t_insert + msecs_to_jiffies(QSFP_PWR_LAG_MSEC);
5720
5721 /*
5722 * Some QSFP's not only do not respond until the full power-up
5723 * time, but may behave badly if we try. So hold off responding
5724 * to insertion.
5725 */
5726 while (1) {
5727 u64 now = get_jiffies_64();
5728 if (time_after64(now, pwrup))
5729 break;
5730 msleep(1);
5731 }
5732 ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5733 /*
5734 * Need to change LE2 back to defaults if we couldn't
5735 * read the cable type (to handle cable swaps), so do this
5736 * even on failure to read cable information. We don't
5737 * get here for QME, so IS_QME check not needed here.
5738 */
5739 le2 = (!ret && qd->cache.atten[1] >= qib_long_atten &&
5740 !ppd->dd->cspec->r1 && QSFP_IS_CU(qd->cache.tech)) ?
5741 LE2_5m : LE2_DEFAULT;
5742 ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
5743 init_txdds_table(ppd, 0);
5744}
5745
5746/*
5747 * There is little we can do but complain to the user if QSFP
5748 * initialization fails.
5749 */
5750static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
5751{
5752 unsigned long flags;
5753 struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
5754 struct qib_devdata *dd = ppd->dd;
5755 u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
5756
5757 mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
5758 qd->ppd = ppd;
5759 qib_qsfp_init(qd, qsfp_7322_event);
5760 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5761 dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
5762 dd->cspec->gpio_mask |= mod_prs_bit;
5763 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5764 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
5765 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5766}
5767
5768/*
5769 * called at device initialization time, and also if the cable_atten
5770 * module parameter is changed. This is used for cables that don't
5771 * have valid QSFP EEPROMs (not present, or attenuation is zero).
5772 * We initialize to the default, then if there is a specific
5773 * unit,port match, we use that.
5774 * String format is "default# unit#,port#=# ... u,p=#", separators must
5775 * be a SPACE character. A newline terminates.
5776 * The last specific match is used (actually, all are used, but last
5777 * one is the one that winds up set); if none at all, fall back on default.
5778 */
5779static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5780{
5781 char *nxt, *str;
5782 int pidx, unit, port, deflt;
5783 unsigned long val;
5784 int any = 0;
5785
5786 str = cable_atten_list;
5787
5788 /* default number is validated in setup_cable_atten() */
5789 deflt = simple_strtoul(str, &nxt, 0);
5790 for (pidx = 0; pidx < dd->num_pports; ++pidx)
5791 dd->pport[pidx].cpspec->no_eep = deflt;
5792
5793 while (*nxt && nxt[1]) {
5794 str = ++nxt;
5795 unit = simple_strtoul(str, &nxt, 0);
5796 if (nxt == str || !*nxt || *nxt != ',') {
5797 while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5798 ;
5799 continue;
5800 }
5801 str = ++nxt;
5802 port = simple_strtoul(str, &nxt, 0);
5803 if (nxt == str || *nxt != '=') {
5804 while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5805 ;
5806 continue;
5807 }
5808 str = ++nxt;
5809 val = simple_strtoul(str, &nxt, 0);
5810 if (nxt == str) {
5811 while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5812 ;
5813 continue;
5814 }
5815 if (val >= TXDDS_TABLE_SZ)
5816 continue;
5817 for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
5818 ++pidx) {
5819 if (dd->pport[pidx].port != port ||
5820 !dd->pport[pidx].link_speed_supported)
5821 continue;
5822 dd->pport[pidx].cpspec->no_eep = val;
5823 /* now change the IBC and serdes, overriding generic */
5824 init_txdds_table(&dd->pport[pidx], 1);
5825 any++;
5826 }
5827 if (*nxt == '\n')
5828 break; /* done */
5829 }
5830 if (change && !any) {
5831 /* no specific setting, use the default.
5832 * Change the IBC and serdes, but since it's
5833 * general, don't override specific settings.
5834 */
5835 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5836 if (!dd->pport[pidx].link_speed_supported)
5837 continue;
5838 init_txdds_table(&dd->pport[pidx], 0);
5839 }
5840 }
5841}
5842
5843/* handle the cable_atten parameter changing */
5844static int setup_cable_atten(const char *str, struct kernel_param *kp)
5845{
5846 struct qib_devdata *dd;
5847 unsigned long val;
5848 char *n;
5849 if (strlen(str) >= MAX_ATTEN_LEN) {
5850 printk(KERN_INFO QIB_DRV_NAME " cable_atten_values string "
5851 "too long\n");
5852 return -ENOSPC;
5853 }
5854 val = simple_strtoul(str, &n, 0);
5855 if (n == str || val >= TXDDS_TABLE_SZ) {
5856 printk(KERN_INFO QIB_DRV_NAME
5857 "cable_atten_values must start with a number\n");
5858 return -EINVAL;
5859 }
5860 strcpy(cable_atten_list, str);
5861
5862 list_for_each_entry(dd, &qib_dev_list, list)
5863 set_no_qsfp_atten(dd, 1);
5864 return 0;
5865}
5866
5867/*
5868 * Write the final few registers that depend on some of the
5869 * init setup. Done late in init, just before bringing up
5870 * the serdes.
5871 */
5872static int qib_late_7322_initreg(struct qib_devdata *dd)
5873{
5874 int ret = 0, n;
5875 u64 val;
5876
5877 qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
5878 qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
5879 qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
5880 qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
5881 val = qib_read_kreg64(dd, kr_sendpioavailaddr);
5882 if (val != dd->pioavailregs_phys) {
5883 qib_dev_err(dd, "Catastrophic software error, "
5884 "SendPIOAvailAddr written as %lx, "
5885 "read back as %llx\n",
5886 (unsigned long) dd->pioavailregs_phys,
5887 (unsigned long long) val);
5888 ret = -EINVAL;
5889 }
5890
5891 n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
5892 qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
5893 /* driver sends get pkey, lid, etc. checking also, to catch bugs */
5894 qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
5895
5896 qib_register_observer(dd, &sendctrl_0_observer);
5897 qib_register_observer(dd, &sendctrl_1_observer);
5898
5899 dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
5900 qib_write_kreg(dd, kr_control, dd->control);
5901 /*
5902 * Set SendDmaFetchPriority and init Tx params, including
5903 * QSFP handler on boards that have QSFP.
5904 * First set our default attenuation entry for cables that
5905 * don't have valid attenuation.
5906 */
5907 set_no_qsfp_atten(dd, 0);
5908 for (n = 0; n < dd->num_pports; ++n) {
5909 struct qib_pportdata *ppd = dd->pport + n;
5910
5911 qib_write_kreg_port(ppd, krp_senddmaprioritythld,
5912 sdma_fetch_prio & 0xf);
5913 /* Initialize qsfp if present on board. */
5914 if (dd->flags & QIB_HAS_QSFP)
5915 qib_init_7322_qsfp(ppd);
5916 }
5917 dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
5918 qib_write_kreg(dd, kr_control, dd->control);
5919
5920 return ret;
5921}
5922
5923/* per IB port errors. */
5924#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
5925 MASK_ACROSS(8, 15))
5926#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
5927#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
5928 MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
5929 MASK_ACROSS(0, 11))
5930
5931/*
5932 * Write the initialization per-port registers that need to be done at
5933 * driver load and after reset completes (i.e., that aren't done as part
5934 * of other init procedures called from qib_init.c).
5935 * Some of these should be redundant on reset, but play safe.
5936 */
5937static void write_7322_init_portregs(struct qib_pportdata *ppd)
5938{
5939 u64 val;
5940 int i;
5941
5942 if (!ppd->link_speed_supported) {
5943 /* no buffer credits for this port */
5944 for (i = 1; i < 8; i++)
5945 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
5946 qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
5947 qib_write_kreg(ppd->dd, kr_scratch, 0);
5948 return;
5949 }
5950
5951 /*
5952 * Set the number of supported virtual lanes in IBC,
5953 * for flow control packet handling on unsupported VLs
5954 */
5955 val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
5956 val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
5957 val |= (u64)(ppd->vls_supported - 1) <<
5958 SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
5959 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
5960
5961 qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
5962
5963 /* enable tx header checking */
5964 qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
5965 IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
5966 IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
5967
5968 qib_write_kreg_port(ppd, krp_ncmodectrl,
5969 SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
5970
5971 /*
5972 * Unconditionally clear the bufmask bits. If SDMA is
5973 * enabled, we'll set them appropriately later.
5974 */
5975 qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
5976 qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
5977 qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
5978 if (ppd->dd->cspec->r1)
5979 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
5980}
5981
5982/*
5983 * Write the initialization per-device registers that need to be done at
5984 * driver load and after reset completes (i.e., that aren't done as part
5985 * of other init procedures called from qib_init.c). Also write per-port
5986 * registers that are affected by overall device config, such as QP mapping
5987 * Some of these should be redundant on reset, but play safe.
5988 */
5989static void write_7322_initregs(struct qib_devdata *dd)
5990{
5991 struct qib_pportdata *ppd;
5992 int i, pidx;
5993 u64 val;
5994
5995 /* Set Multicast QPs received by port 2 to map to context one. */
5996 qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
5997
5998 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5999 unsigned n, regno;
6000 unsigned long flags;
6001
6002 if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported)
6003 continue;
6004
6005 ppd = &dd->pport[pidx];
6006
6007 /* be paranoid against later code motion, etc. */
6008 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
6009 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
6010 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
6011
6012 /* Initialize QP to context mapping */
6013 regno = krp_rcvqpmaptable;
6014 val = 0;
6015 if (dd->num_pports > 1)
6016 n = dd->first_user_ctxt / dd->num_pports;
6017 else
6018 n = dd->first_user_ctxt - 1;
6019 for (i = 0; i < 32; ) {
6020 unsigned ctxt;
6021
6022 if (dd->num_pports > 1)
6023 ctxt = (i % n) * dd->num_pports + pidx;
6024 else if (i % n)
6025 ctxt = (i % n) + 1;
6026 else
6027 ctxt = ppd->hw_pidx;
6028 val |= ctxt << (5 * (i % 6));
6029 i++;
6030 if (i % 6 == 0) {
6031 qib_write_kreg_port(ppd, regno, val);
6032 val = 0;
6033 regno++;
6034 }
6035 }
6036 qib_write_kreg_port(ppd, regno, val);
6037 }
6038
6039 /*
6040 * Setup up interrupt mitigation for kernel contexts, but
6041 * not user contexts (user contexts use interrupts when
6042 * stalled waiting for any packet, so want those interrupts
6043 * right away).
6044 */
6045 for (i = 0; i < dd->first_user_ctxt; i++) {
6046 dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
6047 qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
6048 }
6049
6050 /*
6051 * Initialize as (disabled) rcvflow tables. Application code
6052 * will setup each flow as it uses the flow.
6053 * Doesn't clear any of the error bits that might be set.
6054 */
6055 val = TIDFLOW_ERRBITS; /* these are W1C */
6056 for (i = 0; i < dd->ctxtcnt; i++) {
6057 int flow;
6058 for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6059 qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6060 }
6061
6062 /*
6063 * dual cards init to dual port recovery, single port cards to
6064 * the one port. Dual port cards may later adjust to 1 port,
6065 * and then back to dual port if both ports are connected
6066 * */
6067 if (dd->num_pports)
6068 setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
6069}
6070
6071static int qib_init_7322_variables(struct qib_devdata *dd)
6072{
6073 struct qib_pportdata *ppd;
6074 unsigned features, pidx, sbufcnt;
6075 int ret, mtu;
6076 u32 sbufs, updthresh;
6077
6078 /* pport structs are contiguous, allocated after devdata */
6079 ppd = (struct qib_pportdata *)(dd + 1);
6080 dd->pport = ppd;
6081 ppd[0].dd = dd;
6082 ppd[1].dd = dd;
6083
6084 dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6085
6086 ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6087 ppd[1].cpspec = &ppd[0].cpspec[1];
6088 ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
6089 ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
6090
6091 spin_lock_init(&dd->cspec->rcvmod_lock);
6092 spin_lock_init(&dd->cspec->gpio_lock);
6093
6094 /* we haven't yet set QIB_PRESENT, so use read directly */
6095 dd->revision = readq(&dd->kregbase[kr_revision]);
6096
6097 if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
6098 qib_dev_err(dd, "Revision register read failure, "
6099 "giving up initialization\n");
6100 ret = -ENODEV;
6101 goto bail;
6102 }
6103 dd->flags |= QIB_PRESENT; /* now register routines work */
6104
6105 dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
6106 dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
6107 dd->cspec->r1 = dd->minrev == 1;
6108
6109 get_7322_chip_params(dd);
6110 features = qib_7322_boardname(dd);
6111
6112 /* now that piobcnt2k and 4k set, we can allocate these */
6113 sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
6114 NUM_VL15_BUFS + BITS_PER_LONG - 1;
6115 sbufcnt /= BITS_PER_LONG;
6116 dd->cspec->sendchkenable = kmalloc(sbufcnt *
6117 sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
6118 dd->cspec->sendgrhchk = kmalloc(sbufcnt *
6119 sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
6120 dd->cspec->sendibchk = kmalloc(sbufcnt *
6121 sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
6122 if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
6123 !dd->cspec->sendibchk) {
6124 qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
6125 ret = -ENOMEM;
6126 goto bail;
6127 }
6128
6129 ppd = dd->pport;
6130
6131 /*
6132 * GPIO bits for TWSI data and clock,
6133 * used for serial EEPROM.
6134 */
6135 dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
6136 dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
6137 dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
6138
6139 dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
6140 QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
6141 QIB_HAS_THRESH_UPDATE |
6142 (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
6143 dd->flags |= qib_special_trigger ?
6144 QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
6145
6146 /*
6147 * Setup initial values. These may change when PAT is enabled, but
6148 * we need these to do initial chip register accesses.
6149 */
6150 qib_7322_set_baseaddrs(dd);
6151
6152 mtu = ib_mtu_enum_to_int(qib_ibmtu);
6153 if (mtu == -1)
6154 mtu = QIB_DEFAULT_MTU;
6155
6156 dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
6157 /* all hwerrors become interrupts, unless special purposed */
6158 dd->cspec->hwerrmask = ~0ULL;
6159 /* link_recovery setup causes these errors, so ignore them,
6160 * other than clearing them when they occur */
6161 dd->cspec->hwerrmask &=
6162 ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6163 SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6164 HWE_MASK(LATriggered));
6165
6166 for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6167 struct qib_chippport_specific *cp = ppd->cpspec;
6168 ppd->link_speed_supported = features & PORT_SPD_CAP;
6169 features >>= PORT_SPD_CAP_SHIFT;
6170 if (!ppd->link_speed_supported) {
6171 /* single port mode (7340, or configured) */
6172 dd->skip_kctxt_mask |= 1 << pidx;
6173 if (pidx == 0) {
6174 /* Make sure port is disabled. */
6175 qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6176 qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6177 ppd[0] = ppd[1];
6178 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6179 IBSerdesPClkNotDetectMask_0)
6180 | SYM_MASK(HwErrMask,
6181 SDmaMemReadErrMask_0));
6182 dd->cspec->int_enable_mask &= ~(
6183 SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6184 SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6185 SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6186 SYM_MASK(IntMask, SDmaIntMask_0) |
6187 SYM_MASK(IntMask, ErrIntMask_0) |
6188 SYM_MASK(IntMask, SendDoneIntMask_0));
6189 } else {
6190 /* Make sure port is disabled. */
6191 qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6192 qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6193 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6194 IBSerdesPClkNotDetectMask_1)
6195 | SYM_MASK(HwErrMask,
6196 SDmaMemReadErrMask_1));
6197 dd->cspec->int_enable_mask &= ~(
6198 SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6199 SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6200 SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6201 SYM_MASK(IntMask, SDmaIntMask_1) |
6202 SYM_MASK(IntMask, ErrIntMask_1) |
6203 SYM_MASK(IntMask, SendDoneIntMask_1));
6204 }
6205 continue;
6206 }
6207
6208 dd->num_pports++;
6209 qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6210
6211 ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6212 ppd->link_width_enabled = IB_WIDTH_4X;
6213 ppd->link_speed_enabled = ppd->link_speed_supported;
6214 /*
6215 * Set the initial values to reasonable default, will be set
6216 * for real when link is up.
6217 */
6218 ppd->link_width_active = IB_WIDTH_4X;
6219 ppd->link_speed_active = QIB_IB_SDR;
6220 ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6221 switch (qib_num_cfg_vls) {
6222 case 1:
6223 ppd->vls_supported = IB_VL_VL0;
6224 break;
6225 case 2:
6226 ppd->vls_supported = IB_VL_VL0_1;
6227 break;
6228 default:
6229 qib_devinfo(dd->pcidev,
6230 "Invalid num_vls %u, using 4 VLs\n",
6231 qib_num_cfg_vls);
6232 qib_num_cfg_vls = 4;
6233 /* fall through */
6234 case 4:
6235 ppd->vls_supported = IB_VL_VL0_3;
6236 break;
6237 case 8:
6238 if (mtu <= 2048)
6239 ppd->vls_supported = IB_VL_VL0_7;
6240 else {
6241 qib_devinfo(dd->pcidev,
6242 "Invalid num_vls %u for MTU %d "
6243 ", using 4 VLs\n",
6244 qib_num_cfg_vls, mtu);
6245 ppd->vls_supported = IB_VL_VL0_3;
6246 qib_num_cfg_vls = 4;
6247 }
6248 break;
6249 }
6250 ppd->vls_operational = ppd->vls_supported;
6251
6252 init_waitqueue_head(&cp->autoneg_wait);
6253 INIT_DELAYED_WORK(&cp->autoneg_work,
6254 autoneg_7322_work);
6255 if (ppd->dd->cspec->r1)
6256 INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6257
6258 /*
6259 * For Mez and similar cards, no qsfp info, so do
6260 * the "cable info" setup here. Can be overridden
6261 * in adapter-specific routines.
6262 */
6263 if (!(ppd->dd->flags & QIB_HAS_QSFP)) {
6264 int i;
6265 const struct txdds_ent *txdds;
6266
6267 if (!IS_QMH(ppd->dd) && !IS_QME(ppd->dd))
6268 qib_devinfo(ppd->dd->pcidev, "IB%u:%u: "
6269 "Unknown mezzanine card type\n",
6270 ppd->dd->unit, ppd->port);
6271 txdds = IS_QMH(ppd->dd) ? &qmh_qdr_txdds :
6272 &qme_qdr_txdds;
6273
6274 /*
6275 * set values in case link comes up
6276 * before table is written to driver.
6277 */
6278 cp->h1_val = IS_QMH(ppd->dd) ? H1_FORCE_QMH :
6279 H1_FORCE_QME;
6280 for (i = 0; i < SERDES_CHANS; i++) {
6281 cp->amp[i] = txdds->amp;
6282 cp->pre[i] = txdds->pre;
6283 cp->mainv[i] = txdds->main;
6284 cp->post[i] = txdds->post;
6285 }
6286 } else
6287 cp->h1_val = H1_FORCE_VAL;
6288
6289 /* Avoid writes to chip for mini_init */
6290 if (!qib_mini_init)
6291 write_7322_init_portregs(ppd);
6292
6293 init_timer(&cp->chase_timer);
6294 cp->chase_timer.function = reenable_chase;
6295 cp->chase_timer.data = (unsigned long)ppd;
6296
6297 ppd++;
6298 }
6299
6300 dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
6301 dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
6302 dd->rhf_offset =
6303 dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6304
6305 /* we always allocate at least 2048 bytes for eager buffers */
6306 dd->rcvegrbufsize = max(mtu, 2048);
6307
6308 qib_7322_tidtemplate(dd);
6309
6310 /*
6311 * We can request a receive interrupt for 1 or
6312 * more packets from current offset.
6313 */
6314 dd->rhdrhead_intr_off =
6315 (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6316
6317 /* setup the stats timer; the add_timer is done at end of init */
6318 init_timer(&dd->stats_timer);
6319 dd->stats_timer.function = qib_get_7322_faststats;
6320 dd->stats_timer.data = (unsigned long) dd;
6321
6322 dd->ureg_align = 0x10000; /* 64KB alignment */
6323
6324 dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6325
6326 qib_7322_config_ctxts(dd);
6327 qib_set_ctxtcnt(dd);
6328
6329 if (qib_wc_pat) {
6330 ret = init_chip_wc_pat(dd, NUM_VL15_BUFS * dd->align4k);
6331 if (ret)
6332 goto bail;
6333 }
6334 qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6335
6336 ret = 0;
6337 if (qib_mini_init)
6338 goto bail;
6339 if (!dd->num_pports) {
6340 qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6341 goto bail; /* no error, so can still figure out why err */
6342 }
6343
6344 write_7322_initregs(dd);
6345 ret = qib_create_ctxts(dd);
6346 init_7322_cntrnames(dd);
6347
6348 updthresh = 8U; /* update threshold */
6349
6350 /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6351 * reserve the update threshold amount for other kernel use, such
6352 * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6353 * unless we aren't enabling SDMA, in which case we want to use
6354 * all the 4k bufs for the kernel.
6355 * if this was less than the update threshold, we could wait
6356 * a long time for an update. Coded this way because we
6357 * sometimes change the update threshold for various reasons,
6358 * and we want this to remain robust.
6359 */
6360 if (dd->flags & QIB_HAS_SEND_DMA) {
6361 dd->cspec->sdmabufcnt = dd->piobcnt4k;
6362 sbufs = updthresh > 3 ? updthresh : 3;
6363 } else {
6364 dd->cspec->sdmabufcnt = 0;
6365 sbufs = dd->piobcnt4k;
6366 }
6367 dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6368 dd->cspec->sdmabufcnt;
6369 dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6370 dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
6371 dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6372 dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6373
6374 /*
6375 * If we have 16 user contexts, we will have 7 sbufs
6376 * per context, so reduce the update threshold to match. We
6377 * want to update before we actually run out, at low pbufs/ctxt
6378 * so give ourselves some margin.
6379 */
6380 if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6381 updthresh = dd->pbufsctxt - 2;
6382 dd->cspec->updthresh_dflt = updthresh;
6383 dd->cspec->updthresh = updthresh;
6384
6385 /* before full enable, no interrupts, no locking needed */
6386 dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6387 << SYM_LSB(SendCtrl, AvailUpdThld)) |
6388 SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6389
6390 dd->psxmitwait_supported = 1;
6391 dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6392bail:
6393 if (!dd->ctxtcnt)
6394 dd->ctxtcnt = 1; /* for other initialization code */
6395
6396 return ret;
6397}
6398
6399static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6400 u32 *pbufnum)
6401{
6402 u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6403 struct qib_devdata *dd = ppd->dd;
6404
6405 /* last is same for 2k and 4k, because we use 4k if all 2k busy */
6406 if (pbc & PBC_7322_VL15_SEND) {
6407 first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6408 last = first;
6409 } else {
6410 if ((plen + 1) > dd->piosize2kmax_dwords)
6411 first = dd->piobcnt2k;
6412 else
6413 first = 0;
6414 last = dd->cspec->lastbuf_for_pio;
6415 }
6416 return qib_getsendbuf_range(dd, pbufnum, first, last);
6417}
6418
6419static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6420 u32 start)
6421{
6422 qib_write_kreg_port(ppd, krp_psinterval, intv);
6423 qib_write_kreg_port(ppd, krp_psstart, start);
6424}
6425
6426/*
6427 * Must be called with sdma_lock held, or before init finished.
6428 */
6429static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6430{
6431 qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6432}
6433
6434static struct sdma_set_state_action sdma_7322_action_table[] = {
6435 [qib_sdma_state_s00_hw_down] = {
6436 .go_s99_running_tofalse = 1,
6437 .op_enable = 0,
6438 .op_intenable = 0,
6439 .op_halt = 0,
6440 .op_drain = 0,
6441 },
6442 [qib_sdma_state_s10_hw_start_up_wait] = {
6443 .op_enable = 0,
6444 .op_intenable = 1,
6445 .op_halt = 1,
6446 .op_drain = 0,
6447 },
6448 [qib_sdma_state_s20_idle] = {
6449 .op_enable = 1,
6450 .op_intenable = 1,
6451 .op_halt = 1,
6452 .op_drain = 0,
6453 },
6454 [qib_sdma_state_s30_sw_clean_up_wait] = {
6455 .op_enable = 0,
6456 .op_intenable = 1,
6457 .op_halt = 1,
6458 .op_drain = 0,
6459 },
6460 [qib_sdma_state_s40_hw_clean_up_wait] = {
6461 .op_enable = 1,
6462 .op_intenable = 1,
6463 .op_halt = 1,
6464 .op_drain = 0,
6465 },
6466 [qib_sdma_state_s50_hw_halt_wait] = {
6467 .op_enable = 1,
6468 .op_intenable = 1,
6469 .op_halt = 1,
6470 .op_drain = 1,
6471 },
6472 [qib_sdma_state_s99_running] = {
6473 .op_enable = 1,
6474 .op_intenable = 1,
6475 .op_halt = 0,
6476 .op_drain = 0,
6477 .go_s99_running_totrue = 1,
6478 },
6479};
6480
6481static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6482{
6483 ppd->sdma_state.set_state_action = sdma_7322_action_table;
6484}
6485
6486static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6487{
6488 struct qib_devdata *dd = ppd->dd;
6489 unsigned lastbuf, erstbuf;
6490 u64 senddmabufmask[3] = { 0 };
6491 int n, ret = 0;
6492
6493 qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6494 qib_sdma_7322_setlengen(ppd);
6495 qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6496 qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6497 qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6498 qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6499
6500 if (dd->num_pports)
6501 n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6502 else
6503 n = dd->cspec->sdmabufcnt; /* failsafe for init */
6504 erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6505 ((dd->num_pports == 1 || ppd->port == 2) ? n :
6506 dd->cspec->sdmabufcnt);
6507 lastbuf = erstbuf + n;
6508
6509 ppd->sdma_state.first_sendbuf = erstbuf;
6510 ppd->sdma_state.last_sendbuf = lastbuf;
6511 for (; erstbuf < lastbuf; ++erstbuf) {
6512 unsigned word = erstbuf / BITS_PER_LONG;
6513 unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6514
6515 BUG_ON(word >= 3);
6516 senddmabufmask[word] |= 1ULL << bit;
6517 }
6518 qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6519 qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6520 qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6521 return ret;
6522}
6523
6524/* sdma_lock must be held */
6525static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6526{
6527 struct qib_devdata *dd = ppd->dd;
6528 int sane;
6529 int use_dmahead;
6530 u16 swhead;
6531 u16 swtail;
6532 u16 cnt;
6533 u16 hwhead;
6534
6535 use_dmahead = __qib_sdma_running(ppd) &&
6536 (dd->flags & QIB_HAS_SDMA_TIMEOUT);
6537retry:
6538 hwhead = use_dmahead ?
6539 (u16) le64_to_cpu(*ppd->sdma_head_dma) :
6540 (u16) qib_read_kreg_port(ppd, krp_senddmahead);
6541
6542 swhead = ppd->sdma_descq_head;
6543 swtail = ppd->sdma_descq_tail;
6544 cnt = ppd->sdma_descq_cnt;
6545
6546 if (swhead < swtail)
6547 /* not wrapped */
6548 sane = (hwhead >= swhead) & (hwhead <= swtail);
6549 else if (swhead > swtail)
6550 /* wrapped around */
6551 sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6552 (hwhead <= swtail);
6553 else
6554 /* empty */
6555 sane = (hwhead == swhead);
6556
6557 if (unlikely(!sane)) {
6558 if (use_dmahead) {
6559 /* try one more time, directly from the register */
6560 use_dmahead = 0;
6561 goto retry;
6562 }
6563 /* proceed as if no progress */
6564 hwhead = swhead;
6565 }
6566
6567 return hwhead;
6568}
6569
6570static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6571{
6572 u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6573
6574 return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
6575 (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
6576 !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
6577 !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
6578}
6579
6580/*
6581 * Compute the amount of delay before sending the next packet if the
6582 * port's send rate differs from the static rate set for the QP.
6583 * The delay affects the next packet and the amount of the delay is
6584 * based on the length of the this packet.
6585 */
6586static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
6587 u8 srate, u8 vl)
6588{
6589 u8 snd_mult = ppd->delay_mult;
6590 u8 rcv_mult = ib_rate_to_delay[srate];
6591 u32 ret;
6592
6593 ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
6594
6595 /* Indicate VL15, else set the VL in the control word */
6596 if (vl == 15)
6597 ret |= PBC_7322_VL15_SEND_CTRL;
6598 else
6599 ret |= vl << PBC_VL_NUM_LSB;
6600 ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
6601
6602 return ret;
6603}
6604
6605/*
6606 * Enable the per-port VL15 send buffers for use.
6607 * They follow the rest of the buffers, without a config parameter.
6608 * This was in initregs, but that is done before the shadow
6609 * is set up, and this has to be done after the shadow is
6610 * set up.
6611 */
6612static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
6613{
6614 unsigned vl15bufs;
6615
6616 vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
6617 qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
6618 TXCHK_CHG_TYPE_KERN, NULL);
6619}
6620
6621static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
6622{
6623 if (rcd->ctxt < NUM_IB_PORTS) {
6624 if (rcd->dd->num_pports > 1) {
6625 rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
6626 rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
6627 } else {
6628 rcd->rcvegrcnt = KCTXT0_EGRCNT;
6629 rcd->rcvegr_tid_base = 0;
6630 }
6631 } else {
6632 rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
6633 rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
6634 (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
6635 }
6636}
6637
6638#define QTXSLEEPS 5000
6639static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
6640 u32 len, u32 which, struct qib_ctxtdata *rcd)
6641{
6642 int i;
6643 const int last = start + len - 1;
6644 const int lastr = last / BITS_PER_LONG;
6645 u32 sleeps = 0;
6646 int wait = rcd != NULL;
6647 unsigned long flags;
6648
6649 while (wait) {
6650 unsigned long shadow;
6651 int cstart, previ = -1;
6652
6653 /*
6654 * when flipping from kernel to user, we can't change
6655 * the checking type if the buffer is allocated to the
6656 * driver. It's OK the other direction, because it's
6657 * from close, and we have just disarm'ed all the
6658 * buffers. All the kernel to kernel changes are also
6659 * OK.
6660 */
6661 for (cstart = start; cstart <= last; cstart++) {
6662 i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
6663 / BITS_PER_LONG;
6664 if (i != previ) {
6665 shadow = (unsigned long)
6666 le64_to_cpu(dd->pioavailregs_dma[i]);
6667 previ = i;
6668 }
6669 if (test_bit(((2 * cstart) +
6670 QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
6671 % BITS_PER_LONG, &shadow))
6672 break;
6673 }
6674
6675 if (cstart > last)
6676 break;
6677
6678 if (sleeps == QTXSLEEPS)
6679 break;
6680 /* make sure we see an updated copy next time around */
6681 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6682 sleeps++;
6683 msleep(1);
6684 }
6685
6686 switch (which) {
6687 case TXCHK_CHG_TYPE_DIS1:
6688 /*
6689 * disable checking on a range; used by diags; just
6690 * one buffer, but still written generically
6691 */
6692 for (i = start; i <= last; i++)
6693 clear_bit(i, dd->cspec->sendchkenable);
6694 break;
6695
6696 case TXCHK_CHG_TYPE_ENAB1:
6697 /*
6698 * (re)enable checking on a range; used by diags; just
6699 * one buffer, but still written generically; read
6700 * scratch to be sure buffer actually triggered, not
6701 * just flushed from processor.
6702 */
6703 qib_read_kreg32(dd, kr_scratch);
6704 for (i = start; i <= last; i++)
6705 set_bit(i, dd->cspec->sendchkenable);
6706 break;
6707
6708 case TXCHK_CHG_TYPE_KERN:
6709 /* usable by kernel */
6710 for (i = start; i <= last; i++) {
6711 set_bit(i, dd->cspec->sendibchk);
6712 clear_bit(i, dd->cspec->sendgrhchk);
6713 }
6714 spin_lock_irqsave(&dd->uctxt_lock, flags);
6715 /* see if we need to raise avail update threshold */
6716 for (i = dd->first_user_ctxt;
6717 dd->cspec->updthresh != dd->cspec->updthresh_dflt
6718 && i < dd->cfgctxts; i++)
6719 if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
6720 ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
6721 < dd->cspec->updthresh_dflt)
6722 break;
6723 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
6724 if (i == dd->cfgctxts) {
6725 spin_lock_irqsave(&dd->sendctrl_lock, flags);
6726 dd->cspec->updthresh = dd->cspec->updthresh_dflt;
6727 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
6728 dd->sendctrl |= (dd->cspec->updthresh &
6729 SYM_RMASK(SendCtrl, AvailUpdThld)) <<
6730 SYM_LSB(SendCtrl, AvailUpdThld);
6731 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6732 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6733 }
6734 break;
6735
6736 case TXCHK_CHG_TYPE_USER:
6737 /* for user process */
6738 for (i = start; i <= last; i++) {
6739 clear_bit(i, dd->cspec->sendibchk);
6740 set_bit(i, dd->cspec->sendgrhchk);
6741 }
6742 spin_lock_irqsave(&dd->sendctrl_lock, flags);
6743 if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
6744 / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
6745 dd->cspec->updthresh = (rcd->piocnt /
6746 rcd->subctxt_cnt) - 1;
6747 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
6748 dd->sendctrl |= (dd->cspec->updthresh &
6749 SYM_RMASK(SendCtrl, AvailUpdThld))
6750 << SYM_LSB(SendCtrl, AvailUpdThld);
6751 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6752 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6753 } else
6754 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6755 break;
6756
6757 default:
6758 break;
6759 }
6760
6761 for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
6762 qib_write_kreg(dd, kr_sendcheckmask + i,
6763 dd->cspec->sendchkenable[i]);
6764
6765 for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
6766 qib_write_kreg(dd, kr_sendgrhcheckmask + i,
6767 dd->cspec->sendgrhchk[i]);
6768 qib_write_kreg(dd, kr_sendibpktmask + i,
6769 dd->cspec->sendibchk[i]);
6770 }
6771
6772 /*
6773 * Be sure whatever we did was seen by the chip and acted upon,
6774 * before we return. Mostly important for which >= 2.
6775 */
6776 qib_read_kreg32(dd, kr_scratch);
6777}
6778
6779
6780/* useful for trigger analyzers, etc. */
6781static void writescratch(struct qib_devdata *dd, u32 val)
6782{
6783 qib_write_kreg(dd, kr_scratch, val);
6784}
6785
6786/* Dummy for now, use chip regs soon */
6787static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
6788{
6789 return -ENXIO;
6790}
6791
6792/**
6793 * qib_init_iba7322_funcs - set up the chip-specific function pointers
6794 * @dev: the pci_dev for qlogic_ib device
6795 * @ent: pci_device_id struct for this dev
6796 *
6797 * Also allocates, inits, and returns the devdata struct for this
6798 * device instance
6799 *
6800 * This is global, and is called directly at init to set up the
6801 * chip-specific function pointers for later use.
6802 */
6803struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
6804 const struct pci_device_id *ent)
6805{
6806 struct qib_devdata *dd;
6807 int ret, i;
6808 u32 tabsize, actual_cnt = 0;
6809
6810 dd = qib_alloc_devdata(pdev,
6811 NUM_IB_PORTS * sizeof(struct qib_pportdata) +
6812 sizeof(struct qib_chip_specific) +
6813 NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
6814 if (IS_ERR(dd))
6815 goto bail;
6816
6817 dd->f_bringup_serdes = qib_7322_bringup_serdes;
6818 dd->f_cleanup = qib_setup_7322_cleanup;
6819 dd->f_clear_tids = qib_7322_clear_tids;
6820 dd->f_free_irq = qib_7322_free_irq;
6821 dd->f_get_base_info = qib_7322_get_base_info;
6822 dd->f_get_msgheader = qib_7322_get_msgheader;
6823 dd->f_getsendbuf = qib_7322_getsendbuf;
6824 dd->f_gpio_mod = gpio_7322_mod;
6825 dd->f_eeprom_wen = qib_7322_eeprom_wen;
6826 dd->f_hdrqempty = qib_7322_hdrqempty;
6827 dd->f_ib_updown = qib_7322_ib_updown;
6828 dd->f_init_ctxt = qib_7322_init_ctxt;
6829 dd->f_initvl15_bufs = qib_7322_initvl15_bufs;
6830 dd->f_intr_fallback = qib_7322_intr_fallback;
6831 dd->f_late_initreg = qib_late_7322_initreg;
6832 dd->f_setpbc_control = qib_7322_setpbc_control;
6833 dd->f_portcntr = qib_portcntr_7322;
6834 dd->f_put_tid = qib_7322_put_tid;
6835 dd->f_quiet_serdes = qib_7322_mini_quiet_serdes;
6836 dd->f_rcvctrl = rcvctrl_7322_mod;
6837 dd->f_read_cntrs = qib_read_7322cntrs;
6838 dd->f_read_portcntrs = qib_read_7322portcntrs;
6839 dd->f_reset = qib_do_7322_reset;
6840 dd->f_init_sdma_regs = init_sdma_7322_regs;
6841 dd->f_sdma_busy = qib_sdma_7322_busy;
6842 dd->f_sdma_gethead = qib_sdma_7322_gethead;
6843 dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl;
6844 dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
6845 dd->f_sdma_update_tail = qib_sdma_update_7322_tail;
6846 dd->f_sendctrl = sendctrl_7322_mod;
6847 dd->f_set_armlaunch = qib_set_7322_armlaunch;
6848 dd->f_set_cntr_sample = qib_set_cntr_7322_sample;
6849 dd->f_iblink_state = qib_7322_iblink_state;
6850 dd->f_ibphys_portstate = qib_7322_phys_portstate;
6851 dd->f_get_ib_cfg = qib_7322_get_ib_cfg;
6852 dd->f_set_ib_cfg = qib_7322_set_ib_cfg;
6853 dd->f_set_ib_loopback = qib_7322_set_loopback;
6854 dd->f_get_ib_table = qib_7322_get_ib_table;
6855 dd->f_set_ib_table = qib_7322_set_ib_table;
6856 dd->f_set_intr_state = qib_7322_set_intr_state;
6857 dd->f_setextled = qib_setup_7322_setextled;
6858 dd->f_txchk_change = qib_7322_txchk_change;
6859 dd->f_update_usrhead = qib_update_7322_usrhead;
6860 dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr;
6861 dd->f_xgxs_reset = qib_7322_mini_pcs_reset;
6862 dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up;
6863 dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up;
6864 dd->f_sdma_init_early = qib_7322_sdma_init_early;
6865 dd->f_writescratch = writescratch;
6866 dd->f_tempsense_rd = qib_7322_tempsense_rd;
6867 /*
6868 * Do remaining PCIe setup and save PCIe values in dd.
6869 * Any error printing is already done by the init code.
6870 * On return, we have the chip mapped, but chip registers
6871 * are not set up until start of qib_init_7322_variables.
6872 */
6873 ret = qib_pcie_ddinit(dd, pdev, ent);
6874 if (ret < 0)
6875 goto bail_free;
6876
6877 /* initialize chip-specific variables */
6878 ret = qib_init_7322_variables(dd);
6879 if (ret)
6880 goto bail_cleanup;
6881
6882 if (qib_mini_init || !dd->num_pports)
6883 goto bail;
6884
6885 /*
6886 * Determine number of vectors we want; depends on port count
6887 * and number of configured kernel receive queues actually used.
6888 * Should also depend on whether sdma is enabled or not, but
6889 * that's such a rare testing case it's not worth worrying about.
6890 */
6891 tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
6892 for (i = 0; i < tabsize; i++)
6893 if ((i < ARRAY_SIZE(irq_table) &&
6894 irq_table[i].port <= dd->num_pports) ||
6895 (i >= ARRAY_SIZE(irq_table) &&
6896 dd->rcd[i - ARRAY_SIZE(irq_table)]))
6897 actual_cnt++;
6898 tabsize = actual_cnt;
6899 dd->cspec->msix_entries = kmalloc(tabsize *
6900 sizeof(struct msix_entry), GFP_KERNEL);
6901 dd->cspec->msix_arg = kmalloc(tabsize *
6902 sizeof(void *), GFP_KERNEL);
6903 if (!dd->cspec->msix_entries || !dd->cspec->msix_arg) {
6904 qib_dev_err(dd, "No memory for MSIx table\n");
6905 tabsize = 0;
6906 }
6907 for (i = 0; i < tabsize; i++)
6908 dd->cspec->msix_entries[i].entry = i;
6909
6910 if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
6911 qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
6912 "continuing anyway\n");
6913 /* may be less than we wanted, if not enough available */
6914 dd->cspec->num_msix_entries = tabsize;
6915
6916 /* setup interrupt handler */
6917 qib_setup_7322_interrupt(dd, 1);
6918
6919 /* clear diagctrl register, in case diags were running and crashed */
6920 qib_write_kreg(dd, kr_hwdiagctrl, 0);
6921
6922#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
6923 ret = dca_add_requester(&pdev->dev);
6924 if (!ret) {
6925 dd->flags |= QIB_DCA_ENABLED;
6926 qib_setup_dca(dd);
6927 }
6928#endif
6929 goto bail;
6930
6931bail_cleanup:
6932 qib_pcie_ddcleanup(dd);
6933bail_free:
6934 qib_free_devdata(dd);
6935 dd = ERR_PTR(ret);
6936bail:
6937 return dd;
6938}
6939
6940/*
6941 * Set the table entry at the specified index from the table specifed.
6942 * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
6943 * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
6944 * 'idx' below addresses the correct entry, while its 4 LSBs select the
6945 * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
6946 */
6947#define DDS_ENT_AMP_LSB 14
6948#define DDS_ENT_MAIN_LSB 9
6949#define DDS_ENT_POST_LSB 5
6950#define DDS_ENT_PRE_XTRA_LSB 3
6951#define DDS_ENT_PRE_LSB 0
6952
6953/*
6954 * Set one entry in the TxDDS table for spec'd port
6955 * ridx picks one of the entries, while tp points
6956 * to the appropriate table entry.
6957 */
6958static void set_txdds(struct qib_pportdata *ppd, int ridx,
6959 const struct txdds_ent *tp)
6960{
6961 struct qib_devdata *dd = ppd->dd;
6962 u32 pack_ent;
6963 int regidx;
6964
6965 /* Get correct offset in chip-space, and in source table */
6966 regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
6967 /*
6968 * We do not use qib_write_kreg_port() because it was intended
6969 * only for registers in the lower "port specific" pages.
6970 * So do index calculation by hand.
6971 */
6972 if (ppd->hw_pidx)
6973 regidx += (dd->palign / sizeof(u64));
6974
6975 pack_ent = tp->amp << DDS_ENT_AMP_LSB;
6976 pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
6977 pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
6978 pack_ent |= tp->post << DDS_ENT_POST_LSB;
6979 qib_write_kreg(dd, regidx, pack_ent);
6980 /* Prevent back-to-back writes by hitting scratch */
6981 qib_write_kreg(ppd->dd, kr_scratch, 0);
6982}
6983
6984static const struct vendor_txdds_ent vendor_txdds[] = {
6985 { /* Amphenol 1m 30awg NoEq */
6986 { 0x41, 0x50, 0x48 }, "584470002 ",
6987 { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 },
6988 },
6989 { /* Amphenol 3m 28awg NoEq */
6990 { 0x41, 0x50, 0x48 }, "584470004 ",
6991 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 },
6992 },
6993 { /* Finisar 3m OM2 Optical */
6994 { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
6995 { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 },
6996 },
6997 { /* Finisar 30m OM2 Optical */
6998 { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
6999 { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 },
7000 },
7001 { /* Finisar Default OM2 Optical */
7002 { 0x00, 0x90, 0x65 }, NULL,
7003 { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 },
7004 },
7005 { /* Gore 1m 30awg NoEq */
7006 { 0x00, 0x21, 0x77 }, "QSN3300-1 ",
7007 { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 },
7008 },
7009 { /* Gore 2m 30awg NoEq */
7010 { 0x00, 0x21, 0x77 }, "QSN3300-2 ",
7011 { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 },
7012 },
7013 { /* Gore 1m 28awg NoEq */
7014 { 0x00, 0x21, 0x77 }, "QSN3800-1 ",
7015 { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 },
7016 },
7017 { /* Gore 3m 28awg NoEq */
7018 { 0x00, 0x21, 0x77 }, "QSN3800-3 ",
7019 { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 },
7020 },
7021 { /* Gore 5m 24awg Eq */
7022 { 0x00, 0x21, 0x77 }, "QSN7000-5 ",
7023 { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 },
7024 },
7025 { /* Gore 7m 24awg Eq */
7026 { 0x00, 0x21, 0x77 }, "QSN7000-7 ",
7027 { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 },
7028 },
7029 { /* Gore 5m 26awg Eq */
7030 { 0x00, 0x21, 0x77 }, "QSN7600-5 ",
7031 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 },
7032 },
7033 { /* Gore 7m 26awg Eq */
7034 { 0x00, 0x21, 0x77 }, "QSN7600-7 ",
7035 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 },
7036 },
7037 { /* Intersil 12m 24awg Active */
7038 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
7039 { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 },
7040 },
7041 { /* Intersil 10m 28awg Active */
7042 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
7043 { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 },
7044 },
7045 { /* Intersil 7m 30awg Active */
7046 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
7047 { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 },
7048 },
7049 { /* Intersil 5m 32awg Active */
7050 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
7051 { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 },
7052 },
7053 { /* Intersil Default Active */
7054 { 0x00, 0x30, 0xB4 }, NULL,
7055 { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 },
7056 },
7057 { /* Luxtera 20m Active Optical */
7058 { 0x00, 0x25, 0x63 }, NULL,
7059 { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 },
7060 },
7061 { /* Molex 1M Cu loopback */
7062 { 0x00, 0x09, 0x3A }, "74763-0025 ",
7063 { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 },
7064 },
7065 { /* Molex 2m 28awg NoEq */
7066 { 0x00, 0x09, 0x3A }, "74757-2201 ",
7067 { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 },
7068 },
7069};
7070
7071static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
7072 /* amp, pre, main, post */
7073 { 2, 2, 15, 6 }, /* Loopback */
7074 { 0, 0, 0, 1 }, /* 2 dB */
7075 { 0, 0, 0, 2 }, /* 3 dB */
7076 { 0, 0, 0, 3 }, /* 4 dB */
7077 { 0, 0, 0, 4 }, /* 5 dB */
7078 { 0, 0, 0, 5 }, /* 6 dB */
7079 { 0, 0, 0, 6 }, /* 7 dB */
7080 { 0, 0, 0, 7 }, /* 8 dB */
7081 { 0, 0, 0, 8 }, /* 9 dB */
7082 { 0, 0, 0, 9 }, /* 10 dB */
7083 { 0, 0, 0, 10 }, /* 11 dB */
7084 { 0, 0, 0, 11 }, /* 12 dB */
7085 { 0, 0, 0, 12 }, /* 13 dB */
7086 { 0, 0, 0, 13 }, /* 14 dB */
7087 { 0, 0, 0, 14 }, /* 15 dB */
7088 { 0, 0, 0, 15 }, /* 16 dB */
7089};
7090
7091static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
7092 /* amp, pre, main, post */
7093 { 2, 2, 15, 6 }, /* Loopback */
7094 { 0, 0, 0, 8 }, /* 2 dB */
7095 { 0, 0, 0, 8 }, /* 3 dB */
7096 { 0, 0, 0, 9 }, /* 4 dB */
7097 { 0, 0, 0, 9 }, /* 5 dB */
7098 { 0, 0, 0, 10 }, /* 6 dB */
7099 { 0, 0, 0, 10 }, /* 7 dB */
7100 { 0, 0, 0, 11 }, /* 8 dB */
7101 { 0, 0, 0, 11 }, /* 9 dB */
7102 { 0, 0, 0, 12 }, /* 10 dB */
7103 { 0, 0, 0, 12 }, /* 11 dB */
7104 { 0, 0, 0, 13 }, /* 12 dB */
7105 { 0, 0, 0, 13 }, /* 13 dB */
7106 { 0, 0, 0, 14 }, /* 14 dB */
7107 { 0, 0, 0, 14 }, /* 15 dB */
7108 { 0, 0, 0, 15 }, /* 16 dB */
7109};
7110
7111static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7112 /* amp, pre, main, post */
7113 { 2, 2, 15, 6 }, /* Loopback */
7114 { 0, 1, 0, 7 }, /* 2 dB */
7115 { 0, 1, 0, 9 }, /* 3 dB */
7116 { 0, 1, 0, 11 }, /* 4 dB */
7117 { 0, 1, 0, 13 }, /* 5 dB */
7118 { 0, 1, 0, 15 }, /* 6 dB */
7119 { 0, 1, 3, 15 }, /* 7 dB */
7120 { 0, 1, 7, 15 }, /* 8 dB */
7121 { 0, 1, 7, 15 }, /* 9 dB */
7122 { 0, 1, 8, 15 }, /* 10 dB */
7123 { 0, 1, 9, 15 }, /* 11 dB */
7124 { 0, 1, 10, 15 }, /* 12 dB */
7125 { 0, 2, 6, 15 }, /* 13 dB */
7126 { 0, 2, 7, 15 }, /* 14 dB */
7127 { 0, 2, 8, 15 }, /* 15 dB */
7128 { 0, 2, 9, 15 }, /* 16 dB */
7129};
7130
7131static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7132 unsigned atten)
7133{
7134 /*
7135 * The attenuation table starts at 2dB for entry 1,
7136 * with entry 0 being the loopback entry.
7137 */
7138 if (atten <= 2)
7139 atten = 1;
7140 else if (atten > TXDDS_TABLE_SZ)
7141 atten = TXDDS_TABLE_SZ - 1;
7142 else
7143 atten--;
7144 return txdds + atten;
7145}
7146
7147/*
7148 * if override is set, the module parameter cable_atten has a value
7149 * for this specific port, so use it, rather than our normal mechanism.
7150 */
7151static void find_best_ent(struct qib_pportdata *ppd,
7152 const struct txdds_ent **sdr_dds,
7153 const struct txdds_ent **ddr_dds,
7154 const struct txdds_ent **qdr_dds, int override)
7155{
7156 struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7157 int idx;
7158
7159 /* Search table of known cables */
7160 for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7161 const struct vendor_txdds_ent *v = vendor_txdds + idx;
7162
7163 if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7164 (!v->partnum ||
7165 !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7166 *sdr_dds = &v->sdr;
7167 *ddr_dds = &v->ddr;
7168 *qdr_dds = &v->qdr;
7169 return;
7170 }
7171 }
7172
7173 /* Lookup serdes setting by cable type and attenuation */
7174 if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7175 *sdr_dds = txdds_sdr + ppd->dd->board_atten;
7176 *ddr_dds = txdds_ddr + ppd->dd->board_atten;
7177 *qdr_dds = txdds_qdr + ppd->dd->board_atten;
7178 return;
7179 }
7180
7181 if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7182 qd->atten[1])) {
7183 *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7184 *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7185 *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7186 return;
7187 } else {
7188 /*
7189 * If we have no (or incomplete) data from the cable
7190 * EEPROM, or no QSFP, use the module parameter value
7191 * to index into the attentuation table.
7192 */
7193 *sdr_dds = &txdds_sdr[ppd->cpspec->no_eep];
7194 *ddr_dds = &txdds_ddr[ppd->cpspec->no_eep];
7195 *qdr_dds = &txdds_qdr[ppd->cpspec->no_eep];
7196 }
7197}
7198
7199static void init_txdds_table(struct qib_pportdata *ppd, int override)
7200{
7201 const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7202 struct txdds_ent *dds;
7203 int idx;
7204 int single_ent = 0;
7205
7206 if (IS_QMH(ppd->dd)) {
7207 /* normally will be overridden, via setup_qmh() */
7208 sdr_dds = &qmh_sdr_txdds;
7209 ddr_dds = &qmh_ddr_txdds;
7210 qdr_dds = &qmh_qdr_txdds;
7211 single_ent = 1;
7212 } else if (IS_QME(ppd->dd)) {
7213 sdr_dds = &qme_sdr_txdds;
7214 ddr_dds = &qme_ddr_txdds;
7215 qdr_dds = &qme_qdr_txdds;
7216 single_ent = 1;
7217 } else
7218 find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7219
7220 /* Fill in the first entry with the best entry found. */
7221 set_txdds(ppd, 0, sdr_dds);
7222 set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7223 set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7224
7225 /*
7226 * for our current speed, also write that value into the
7227 * tx serdes registers.
7228 */
7229 dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
7230 qdr_dds : (ppd->link_speed_active ==
7231 QIB_IB_DDR ? ddr_dds : sdr_dds));
7232 write_tx_serdes_param(ppd, dds);
7233
7234 /* Fill in the remaining entries with the default table values. */
7235 for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7236 set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7237 set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7238 single_ent ? ddr_dds : txdds_ddr + idx);
7239 set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7240 single_ent ? qdr_dds : txdds_qdr + idx);
7241 }
7242}
7243
7244#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7245#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7246#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7247#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7248#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7249#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7250#define AHB_TRANS_TRIES 10
7251
7252/*
7253 * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7254 * 5=subsystem which is why most calls have "chan + chan >> 1"
7255 * for the channel argument.
7256 */
7257static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7258 u32 data, u32 mask)
7259{
7260 u32 rd_data, wr_data, sz_mask;
7261 u64 trans, acc, prev_acc;
7262 u32 ret = 0xBAD0BAD;
7263 int tries;
7264
7265 prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7266 /* From this point on, make sure we return access */
7267 acc = (quad << 1) | 1;
7268 qib_write_kreg(dd, KR_AHB_ACC, acc);
7269
7270 for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7271 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7272 if (trans & AHB_TRANS_RDY)
7273 break;
7274 }
7275 if (tries >= AHB_TRANS_TRIES) {
7276 qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7277 goto bail;
7278 }
7279
7280 /* If mask is not all 1s, we need to read, but different SerDes
7281 * entities have different sizes
7282 */
7283 sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7284 wr_data = data & mask & sz_mask;
7285 if ((~mask & sz_mask) != 0) {
7286 trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7287 qib_write_kreg(dd, KR_AHB_TRANS, trans);
7288
7289 for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7290 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7291 if (trans & AHB_TRANS_RDY)
7292 break;
7293 }
7294 if (tries >= AHB_TRANS_TRIES) {
7295 qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7296 AHB_TRANS_TRIES);
7297 goto bail;
7298 }
7299 /* Re-read in case host split reads and read data first */
7300 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7301 rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7302 wr_data |= (rd_data & ~mask & sz_mask);
7303 }
7304
7305 /* If mask is not zero, we need to write. */
7306 if (mask & sz_mask) {
7307 trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7308 trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7309 trans |= AHB_WR;
7310 qib_write_kreg(dd, KR_AHB_TRANS, trans);
7311
7312 for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7313 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7314 if (trans & AHB_TRANS_RDY)
7315 break;
7316 }
7317 if (tries >= AHB_TRANS_TRIES) {
7318 qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7319 AHB_TRANS_TRIES);
7320 goto bail;
7321 }
7322 }
7323 ret = wr_data;
7324bail:
7325 qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7326 return ret;
7327}
7328
7329static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7330 unsigned mask)
7331{
7332 struct qib_devdata *dd = ppd->dd;
7333 int chan;
7334 u32 rbc;
7335
7336 for (chan = 0; chan < SERDES_CHANS; ++chan) {
7337 ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7338 data, mask);
7339 rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7340 addr, 0, 0);
7341 }
7342}
7343
7344static int serdes_7322_init(struct qib_pportdata *ppd)
7345{
7346 u64 data;
7347 u32 le_val;
7348
7349 /*
7350 * Initialize the Tx DDS tables. Also done every QSFP event,
7351 * for adapters with QSFP
7352 */
7353 init_txdds_table(ppd, 0);
7354
7355 /* Patch some SerDes defaults to "Better for IB" */
7356 /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7357 ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7358
7359 /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7360 ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7361 /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7362 ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7363
7364 /* May be overridden in qsfp_7322_event */
7365 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7366 ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7367
7368 /* enable LE1 adaptation for all but QME, which is disabled */
7369 le_val = IS_QME(ppd->dd) ? 0 : 1;
7370 ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7371
7372 /* Clear cmode-override, may be set from older driver */
7373 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7374
7375 /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7376 ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7377
7378 /* setup LoS params; these are subsystem, so chan == 5 */
7379 /* LoS filter threshold_count on, ch 0-3, set to 8 */
7380 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7381 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7382 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7383 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7384
7385 /* LoS filter threshold_count off, ch 0-3, set to 4 */
7386 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7387 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7388 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7389 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7390
7391 /* LoS filter select enabled */
7392 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7393
7394 /* LoS target data: SDR=4, DDR=2, QDR=1 */
7395 ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7396 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7397 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7398
7399 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7400 qib_write_kreg_port(ppd, krp_serdesctrl, data |
7401 SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
7402
7403 /* rxbistena; set 0 to avoid effects of it switch later */
7404 ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7405
7406 /* Configure 4 DFE taps, and only they adapt */
7407 ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7408
7409 /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7410 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7411 ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7412
7413 /*
7414 * Set receive adaptation mode. SDR and DDR adaptation are
7415 * always on, and QDR is initially enabled; later disabled.
7416 */
7417 qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7418 qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7419 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7420 ppd->dd->cspec->r1 ?
7421 QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7422 ppd->cpspec->qdr_dfe_on = 1;
7423
7424 /* (FLoop LOS gate: PPM filter enabled */
7425 ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7426
7427 /* rx offset center enabled */
7428 ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7429
7430 if (!ppd->dd->cspec->r1) {
7431 ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7432 ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7433 }
7434
7435 /* Set the frequency loop bandwidth to 15 */
7436 ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7437
7438 return 0;
7439}
7440
7441/* start adjust QMH serdes parameters */
7442
7443static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
7444{
7445 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7446 9, code << 9, 0x3f << 9);
7447}
7448
7449static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
7450 int enable, u32 tapenable)
7451{
7452 if (enable)
7453 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7454 1, 3 << 10, 0x1f << 10);
7455 else
7456 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7457 1, 0, 0x1f << 10);
7458}
7459
7460/* Set clock to 1, 0, 1, 0 */
7461static void clock_man(struct qib_pportdata *ppd, int chan)
7462{
7463 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7464 4, 0x4000, 0x4000);
7465 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7466 4, 0, 0x4000);
7467 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7468 4, 0x4000, 0x4000);
7469 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7470 4, 0, 0x4000);
7471}
7472
7473/*
7474 * write the current Tx serdes pre,post,main,amp settings into the serdes.
7475 * The caller must pass the settings appropriate for the current speed,
7476 * or not care if they are correct for the current speed.
7477 */
7478static void write_tx_serdes_param(struct qib_pportdata *ppd,
7479 struct txdds_ent *txdds)
7480{
7481 u64 deemph;
7482
7483 deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
7484 /* field names for amp, main, post, pre, respectively */
7485 deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
7486 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
7487 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
7488 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
7489 deemph |= 1ULL << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7490 tx_override_deemphasis_select);
7491 deemph |= txdds->amp << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7492 txampcntl_d2a);
7493 deemph |= txdds->main << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7494 txc0_ena);
7495 deemph |= txdds->post << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7496 txcp1_ena);
7497 deemph |= txdds->pre << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7498 txcn1_ena);
7499 qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
7500}
7501
7502/*
7503 * set per-bay, per channel parameters. For now, we ignore
7504 * do_tx, and always set tx parameters, and set them with the same value
7505 * for all channels, using the channel 0 value. We may switch to
7506 * per-channel settings in the future, and that method only needs
7507 * to be done once.
7508 * Because this also writes the IBC txdds table with a single set
7509 * of values, it should be called only for cases where we want to completely
7510 * force a specific setting, typically only for mez cards.
7511 */
7512static void adj_tx_serdes(struct qib_pportdata *ppd)
7513{
7514 struct txdds_ent txdds;
7515 int i;
7516 u8 *amp, *pre, *mainv, *post;
7517
7518 /*
7519 * Because we use TX_DEEMPHASIS_OVERRIDE, we need to
7520 * always do tx side, just like H1, since it is cleared
7521 * by link down
7522 */
7523 amp = ppd->cpspec->amp;
7524 pre = ppd->cpspec->pre;
7525 mainv = ppd->cpspec->mainv;
7526 post = ppd->cpspec->post;
7527
7528 amp[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7529 txampcntl_d2a);
7530 mainv[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7531 txc0_ena);
7532 post[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7533 txcp1_ena);
7534 pre[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7535 txcn1_ena);
7536
7537 /*
7538 * Use the channel zero values, only, for now, for
7539 * all channels
7540 */
7541 txdds.amp = amp[0];
7542 txdds.pre = pre[0];
7543 txdds.main = mainv[0];
7544 txdds.post = post[0];
7545
7546 /* write the QDR table for IBC use, as backup for link down */
7547 for (i = 0; i < ARRAY_SIZE(txdds_qdr); ++i)
7548 set_txdds(ppd, i + 32, &txdds);
7549
7550 write_tx_serdes_param(ppd, &txdds);
7551}
7552
7553/* set QDR forced value for H1, if needed */
7554static void force_h1(struct qib_pportdata *ppd)
7555{
7556 int chan;
7557
7558 ppd->cpspec->qdr_reforce = 0;
7559 if (!ppd->dd->cspec->r1)
7560 return;
7561
7562 for (chan = 0; chan < SERDES_CHANS; chan++) {
7563 set_man_mode_h1(ppd, chan, 1, 0);
7564 set_man_code(ppd, chan, ppd->cpspec->h1_val);
7565 clock_man(ppd, chan);
7566 set_man_mode_h1(ppd, chan, 0, 0);
7567 }
7568}
7569
7570/*
7571 * Parse the parameters for the QMH7342, to get rx and tx serdes
7572 * settings for that Bay, for both possible mez connectors (PCIe bus)
7573 * and IB link (one link on mez1, two possible on mez2).
7574 *
7575 * Data is comma or white space separated.
7576 *
7577 * A set of data has 7 groups, rx and tx groups have SERDES_CHANS values,
7578 * one per IB lane (serdes channel).
7579 * The groups are Bay, bus# H1 rcv, and amp, pre, post, main Tx values (QDR).
7580 * The Bay # is used only for debugging currently.
7581 * H1 values are set whenever the link goes down, or is at cfg_test or
7582 * cfg_wait_enh. Tx values are programmed once, when this routine is called
7583 * (and with default values at chip initialization). Values are any base, in
7584 * strtoul style, and values are seperated by comma, or any white space
7585 * (space, tab, newline).
7586 *
7587 * An example set might look like this (white space vs
7588 * comma used for human ease of reading)
7589 * The ordering is a set of Bay# Bus# H1, amp, pre, post, and main for mez1 IB1,
7590 * repeat for mez2 IB1, then mez2 IB2.
7591 *
7592 * B B H1:0 amp:0 pre:0 post: 0 main:0
7593 * a u H1: 1 amp: 1 pre: 1 post: 1 main: 1
7594 * y s H1: 2 amp: 2 pre: 2 post: 2 main: 2
7595 * H1: 4 amp: 3 pre: 3 post: 3 main: 3
7596 * 1 3 8,6,5,6 0,0,0,0 1,1,1,1 10,10,10,10 3,3,3,3
7597 * 1 6 7,6,6,7 0,0,0,0 1,1,1,1 10,10,10,10 3,3,3,3
7598 * 1 6 9,7,7,8 0,0,0,0 1,1,1,1 10,10,10,10 3,3,3,3
7599 */
7600#define N_QMH_FIELDS 22
7601static int setup_qmh_params(const char *str, struct kernel_param *kp)
7602{
7603 char *abuf, *v, *nv, *nvp;
7604 struct qib_devdata *dd;
7605 struct qib_pportdata *ppd;
7606 u32 mez, vlen, nf, port, bay;
7607 int ret = 0, found = 0;
7608
7609 vlen = strlen(str) + 1;
7610 abuf = kmalloc(vlen, GFP_KERNEL);
7611 if (!abuf) {
7612 printk(KERN_INFO QIB_DRV_NAME
7613 " Unable to allocate QMH param buffer; ignoring\n");
7614 return 0;
7615 }
7616 memcpy(abuf, str, vlen);
7617 v = abuf;
7618
7619 /* these 3 are because gcc can't know they are set before used */
7620 port = 1;
7621 mez = 1; /* used only for debugging */
7622 bay = 0; /* used only for debugging */
7623 ppd = NULL;
7624 for (nf = 0; (nv = strsep(&v, ", \t\n\r")) &&
7625 nf < (N_QMH_FIELDS * 3);) {
7626 u32 val;
7627
7628 if (!*nv)
7629 /* allow for multiple separators */
7630 continue;
7631
7632 val = simple_strtoul(nv, &nvp, 0);
7633 if (nv == nvp) {
7634 printk(KERN_INFO QIB_DRV_NAME
7635 " Bay%u, mez%u IB%u non-numeric value (%s) "
7636 "field #%u, ignoring rest\n", bay, mez,
7637 port, nv, nf % (N_QMH_FIELDS * 3));
7638 ret = -EINVAL;
7639 goto bail;
7640 }
7641 if (!(nf % N_QMH_FIELDS)) {
7642 ppd = NULL;
7643 bay = val;
7644 if (!bay || bay > 16) {
7645 printk(KERN_INFO QIB_DRV_NAME
7646 " Invalid bay # %u, field %u, "
7647 "ignoring rest\n", bay, nf);
7648 ret = -EINVAL;
7649 goto bail;
7650 }
7651 } else if ((nf % N_QMH_FIELDS) == 1) {
7652 u32 bus = val;
7653 if (nf == 1) {
7654 mez = 1;
7655 port = 1;
7656 } else if (nf == (N_QMH_FIELDS + 1)) {
7657 mez = 2;
7658 port = 1;
7659 } else {
7660 mez = 2;
7661 port = 2;
7662 }
7663 list_for_each_entry(dd, &qib_dev_list, list) {
7664 if (dd->deviceid != PCI_DEVICE_ID_QLOGIC_IB_7322
7665 || !IS_QMH(dd))
7666 continue; /* only for QMH cards */
7667 if (dd->pcidev->bus->number == bus) {
7668 found++;
7669 ppd = &dd->pport[port - 1];
7670 }
7671 }
7672 } else if (ppd) {
7673 u32 parm = (nf % N_QMH_FIELDS) - 2;
7674 if (parm < SERDES_CHANS && !(parm % SERDES_CHANS))
7675 ppd->cpspec->h1_val = val;
7676 else if (parm < (2 * SERDES_CHANS))
7677 ppd->cpspec->amp[parm % SERDES_CHANS] = val;
7678 else if (parm < (3 * SERDES_CHANS))
7679 ppd->cpspec->pre[parm % SERDES_CHANS] = val;
7680 else if (parm < (4 * SERDES_CHANS))
7681 ppd->cpspec->post[parm % SERDES_CHANS] = val;
7682 else {
7683 ppd->cpspec->mainv[parm % SERDES_CHANS] = val;
7684 /* At the end of a port, set params */
7685 if (parm == ((5 * SERDES_CHANS) - 1))
7686 adj_tx_serdes(ppd);
7687 }
7688 }
7689 nf++;
7690 }
7691 if (!found) {
7692 printk(KERN_ERR QIB_DRV_NAME
7693 ": No match found for qmh_serdes_setup parameter\n");
7694 ret = -EINVAL;
7695 }
7696bail:
7697 kfree(abuf);
7698 return ret;
7699}
7700
7701/*
7702 * Similarly for QME7342, but the format is simpler, values are the
7703 * same for all mez card positions in a blade (2 or 4 per blade), but
7704 * are different for some blades vs others, and we don't need to
7705 * specify different parameters for different serdes channels or different
7706 * IB ports.
7707 * Format is: h1 amp,pre,post,main
7708 * Alternate format (so ports can be different): Pport# h1 amp,pre,post,main
7709 */
7710#define N_QME_FIELDS 5
7711static int setup_qme_params(const char *str, struct kernel_param *kp)
7712{
7713 char *abuf, *v, *nv, *nvp;
7714 struct qib_devdata *dd;
7715 u32 vlen, nf, port = 0;
7716 u8 h1, tx[4]; /* amp, pre, post, main */
7717 int ret = -EINVAL;
7718 char *seplist;
7719
7720 vlen = strlen(str) + 1;
7721 abuf = kmalloc(vlen, GFP_KERNEL);
7722 if (!abuf) {
7723 printk(KERN_INFO QIB_DRV_NAME
7724 " Unable to allocate QME param buffer; ignoring\n");
7725 return 0;
7726 }
7727 strncpy(abuf, str, vlen);
7728
7729 v = abuf;
7730 seplist = " \t";
7731 h1 = H1_FORCE_QME; /* gcc can't figure out always set before used */
7732
7733 for (nf = 0; (nv = strsep(&v, seplist)); ) {
7734 u32 val;
7735
7736 if (!*nv)
7737 /* allow for multiple separators */
7738 continue;
7739
7740 if (!nf && *nv == 'P') {
7741 /* alternate format with port */
7742 val = simple_strtoul(++nv, &nvp, 0);
7743 if (nv == nvp || port >= NUM_IB_PORTS) {
7744 printk(KERN_INFO QIB_DRV_NAME
7745 " %s: non-numeric port value (%s) "
7746 "ignoring rest\n", __func__, nv);
7747 goto done;
7748 }
7749 port = val;
7750 continue; /* without incrementing nf */
7751 }
7752 val = simple_strtoul(nv, &nvp, 0);
7753 if (nv == nvp) {
7754 printk(KERN_INFO QIB_DRV_NAME
7755 " %s: non-numeric value (%s) "
7756 "field #%u, ignoring rest\n", __func__,
7757 nv, nf);
7758 goto done;
7759 }
7760 if (!nf) {
7761 h1 = val;
7762 seplist = ",";
7763 } else
7764 tx[nf - 1] = val;
7765 if (++nf == N_QME_FIELDS) {
7766 list_for_each_entry(dd, &qib_dev_list, list) {
7767 int pidx, i;
7768 if (dd->deviceid != PCI_DEVICE_ID_QLOGIC_IB_7322
7769 || !IS_QME(dd))
7770 continue; /* only for QME cards */
7771 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
7772 struct qib_pportdata *ppd;
7773 ppd = &dd->pport[pidx];
7774 if ((port && ppd->port != port) ||
7775 !ppd->link_speed_supported)
7776 continue;
7777 ppd->cpspec->h1_val = h1;
7778 for (i = 0; i < SERDES_CHANS; i++) {
7779 ppd->cpspec->amp[i] = tx[0];
7780 ppd->cpspec->pre[i] = tx[1];
7781 ppd->cpspec->post[i] = tx[2];
7782 ppd->cpspec->mainv[i] = tx[3];
7783 }
7784 adj_tx_serdes(ppd);
7785 }
7786 }
7787 ret = 0;
7788 goto done;
7789 }
7790 }
7791 printk(KERN_INFO QIB_DRV_NAME
7792 " %s: Only %u of %u fields provided, skipping\n",
7793 __func__, nf, N_QME_FIELDS);
7794done:
7795 kfree(abuf);
7796 return ret;
7797}
7798
7799#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
7800#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
7801
7802#define R_OPCODE_LSB 3
7803#define R_OP_NOP 0
7804#define R_OP_SHIFT 2
7805#define R_OP_UPDATE 3
7806#define R_TDI_LSB 2
7807#define R_TDO_LSB 1
7808#define R_RDY 1
7809
7810static int qib_r_grab(struct qib_devdata *dd)
7811{
7812 u64 val;
7813 val = SJA_EN;
7814 qib_write_kreg(dd, kr_r_access, val);
7815 qib_read_kreg32(dd, kr_scratch);
7816 return 0;
7817}
7818
7819/* qib_r_wait_for_rdy() not only waits for the ready bit, it
7820 * returns the current state of R_TDO
7821 */
7822static int qib_r_wait_for_rdy(struct qib_devdata *dd)
7823{
7824 u64 val;
7825 int timeout;
7826 for (timeout = 0; timeout < 100 ; ++timeout) {
7827 val = qib_read_kreg32(dd, kr_r_access);
7828 if (val & R_RDY)
7829 return (val >> R_TDO_LSB) & 1;
7830 }
7831 return -1;
7832}
7833
7834static int qib_r_shift(struct qib_devdata *dd, int bisten,
7835 int len, u8 *inp, u8 *outp)
7836{
7837 u64 valbase, val;
7838 int ret, pos;
7839
7840 valbase = SJA_EN | (bisten << BISTEN_LSB) |
7841 (R_OP_SHIFT << R_OPCODE_LSB);
7842 ret = qib_r_wait_for_rdy(dd);
7843 if (ret < 0)
7844 goto bail;
7845 for (pos = 0; pos < len; ++pos) {
7846 val = valbase;
7847 if (outp) {
7848 outp[pos >> 3] &= ~(1 << (pos & 7));
7849 outp[pos >> 3] |= (ret << (pos & 7));
7850 }
7851 if (inp) {
7852 int tdi = inp[pos >> 3] >> (pos & 7);
7853 val |= ((tdi & 1) << R_TDI_LSB);
7854 }
7855 qib_write_kreg(dd, kr_r_access, val);
7856 qib_read_kreg32(dd, kr_scratch);
7857 ret = qib_r_wait_for_rdy(dd);
7858 if (ret < 0)
7859 break;
7860 }
7861 /* Restore to NOP between operations. */
7862 val = SJA_EN | (bisten << BISTEN_LSB);
7863 qib_write_kreg(dd, kr_r_access, val);
7864 qib_read_kreg32(dd, kr_scratch);
7865 ret = qib_r_wait_for_rdy(dd);
7866
7867 if (ret >= 0)
7868 ret = pos;
7869bail:
7870 return ret;
7871}
7872
7873static int qib_r_update(struct qib_devdata *dd, int bisten)
7874{
7875 u64 val;
7876 int ret;
7877
7878 val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
7879 ret = qib_r_wait_for_rdy(dd);
7880 if (ret >= 0) {
7881 qib_write_kreg(dd, kr_r_access, val);
7882 qib_read_kreg32(dd, kr_scratch);
7883 }
7884 return ret;
7885}
7886
7887#define BISTEN_PORT_SEL 15
7888#define LEN_PORT_SEL 625
7889#define BISTEN_AT 17
7890#define LEN_AT 156
7891#define BISTEN_ETM 16
7892#define LEN_ETM 632
7893
7894#define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
7895
7896/* these are common for all IB port use cases. */
7897static u8 reset_at[BIT2BYTE(LEN_AT)] = {
7898 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7899 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
7900};
7901static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
7902 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7903 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7904 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
7905 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
7906 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
7907 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
7908 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7909 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
7910};
7911static u8 at[BIT2BYTE(LEN_AT)] = {
7912 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
7913 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
7914};
7915
7916/* used for IB1 or IB2, only one in use */
7917static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
7918 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7919 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7920 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7921 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
7922 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7923 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
7924 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
7925 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
7926};
7927
7928/* used when both IB1 and IB2 are in use */
7929static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
7930 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7931 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
7932 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7933 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
7934 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
7935 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
7936 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
7937 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
7938};
7939
7940/* used when only IB1 is in use */
7941static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
7942 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
7943 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
7944 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7945 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7946 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
7947 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
7948 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
7949 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
7950};
7951
7952/* used when only IB2 is in use */
7953static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
7954 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
7955 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
7956 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
7957 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
7958 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
7959 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
7960 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
7961 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
7962};
7963
7964/* used when both IB1 and IB2 are in use */
7965static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
7966 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
7967 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
7968 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7969 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7970 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
7971 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
7972 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
7973 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
7974};
7975
7976/*
7977 * Do setup to properly handle IB link recovery; if port is zero, we
7978 * are initializing to cover both ports; otherwise we are initializing
7979 * to cover a single port card, or the port has reached INIT and we may
7980 * need to switch coverage types.
7981 */
7982static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
7983{
7984 u8 *portsel, *etm;
7985 struct qib_devdata *dd = ppd->dd;
7986
7987 if (!ppd->dd->cspec->r1)
7988 return;
7989 if (!both) {
7990 dd->cspec->recovery_ports_initted++;
7991 ppd->cpspec->recovery_init = 1;
7992 }
7993 if (!both && dd->cspec->recovery_ports_initted == 1) {
7994 portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
7995 etm = atetm_1port;
7996 } else {
7997 portsel = portsel_2port;
7998 etm = atetm_2port;
7999 }
8000
8001 if (qib_r_grab(dd) < 0 ||
8002 qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
8003 qib_r_update(dd, BISTEN_ETM) < 0 ||
8004 qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
8005 qib_r_update(dd, BISTEN_AT) < 0 ||
8006 qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
8007 portsel, NULL) < 0 ||
8008 qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
8009 qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
8010 qib_r_update(dd, BISTEN_AT) < 0 ||
8011 qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
8012 qib_r_update(dd, BISTEN_ETM) < 0)
8013 qib_dev_err(dd, "Failed IB link recovery setup\n");
8014}
8015
8016static void check_7322_rxe_status(struct qib_pportdata *ppd)
8017{
8018 struct qib_devdata *dd = ppd->dd;
8019 u64 fmask;
8020
8021 if (dd->cspec->recovery_ports_initted != 1)
8022 return; /* rest doesn't apply to dualport */
8023 qib_write_kreg(dd, kr_control, dd->control |
8024 SYM_MASK(Control, FreezeMode));
8025 (void)qib_read_kreg64(dd, kr_scratch);
8026 udelay(3); /* ibcreset asserted 400ns, be sure that's over */
8027 fmask = qib_read_kreg64(dd, kr_act_fmask);
8028 if (!fmask) {
8029 /*
8030 * require a powercycle before we'll work again, and make
8031 * sure we get no more interrupts, and don't turn off
8032 * freeze.
8033 */
8034 ppd->dd->cspec->stay_in_freeze = 1;
8035 qib_7322_set_intr_state(ppd->dd, 0);
8036 qib_write_kreg(dd, kr_fmask, 0ULL);
8037 qib_dev_err(dd, "HCA unusable until powercycled\n");
8038 return; /* eventually reset */
8039 }
8040
8041 qib_write_kreg(ppd->dd, kr_hwerrclear,
8042 SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
8043
8044 /* don't do the full clear_freeze(), not needed for this */
8045 qib_write_kreg(dd, kr_control, dd->control);
8046 qib_read_kreg32(dd, kr_scratch);
8047 /* take IBC out of reset */
8048 if (ppd->link_speed_supported) {
8049 ppd->cpspec->ibcctrl_a &=
8050 ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
8051 qib_write_kreg_port(ppd, krp_ibcctrl_a,
8052 ppd->cpspec->ibcctrl_a);
8053 qib_read_kreg32(dd, kr_scratch);
8054 if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8055 qib_set_ib_7322_lstate(ppd, 0,
8056 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
8057 }
8058}
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
new file mode 100644
index 000000000000..c0139c07e97e
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -0,0 +1,1580 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/pci.h>
36#include <linux/netdevice.h>
37#include <linux/vmalloc.h>
38#include <linux/delay.h>
39#include <linux/idr.h>
40
41#include "qib.h"
42#include "qib_common.h"
43
44/*
45 * min buffers we want to have per context, after driver
46 */
47#define QIB_MIN_USER_CTXT_BUFCNT 7
48
49#define QLOGIC_IB_R_SOFTWARE_MASK 0xFF
50#define QLOGIC_IB_R_SOFTWARE_SHIFT 24
51#define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62)
52
53/*
54 * Number of ctxts we are configured to use (to allow for more pio
55 * buffers per ctxt, etc.) Zero means use chip value.
56 */
57ushort qib_cfgctxts;
58module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO);
59MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use");
60
61/*
62 * If set, do not write to any regs if avoidable, hack to allow
63 * check for deranged default register values.
64 */
65ushort qib_mini_init;
66module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO);
67MODULE_PARM_DESC(mini_init, "If set, do minimal diag init");
68
69unsigned qib_n_krcv_queues;
70module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO);
71MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
72
73/*
74 * qib_wc_pat parameter:
75 * 0 is WC via MTRR
76 * 1 is WC via PAT
77 * If PAT initialization fails, code reverts back to MTRR
78 */
79unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
80module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
81MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
82
83struct workqueue_struct *qib_wq;
84struct workqueue_struct *qib_cq_wq;
85
86static void verify_interrupt(unsigned long);
87
88static struct idr qib_unit_table;
89u32 qib_cpulist_count;
90unsigned long *qib_cpulist;
91
92/* set number of contexts we'll actually use */
93void qib_set_ctxtcnt(struct qib_devdata *dd)
94{
95 if (!qib_cfgctxts)
96 dd->cfgctxts = dd->ctxtcnt;
97 else if (qib_cfgctxts < dd->num_pports)
98 dd->cfgctxts = dd->ctxtcnt;
99 else if (qib_cfgctxts <= dd->ctxtcnt)
100 dd->cfgctxts = qib_cfgctxts;
101 else
102 dd->cfgctxts = dd->ctxtcnt;
103}
104
105/*
106 * Common code for creating the receive context array.
107 */
108int qib_create_ctxts(struct qib_devdata *dd)
109{
110 unsigned i;
111 int ret;
112
113 /*
114 * Allocate full ctxtcnt array, rather than just cfgctxts, because
115 * cleanup iterates across all possible ctxts.
116 */
117 dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL);
118 if (!dd->rcd) {
119 qib_dev_err(dd, "Unable to allocate ctxtdata array, "
120 "failing\n");
121 ret = -ENOMEM;
122 goto done;
123 }
124
125 /* create (one or more) kctxt */
126 for (i = 0; i < dd->first_user_ctxt; ++i) {
127 struct qib_pportdata *ppd;
128 struct qib_ctxtdata *rcd;
129
130 if (dd->skip_kctxt_mask & (1 << i))
131 continue;
132
133 ppd = dd->pport + (i % dd->num_pports);
134 rcd = qib_create_ctxtdata(ppd, i);
135 if (!rcd) {
136 qib_dev_err(dd, "Unable to allocate ctxtdata"
137 " for Kernel ctxt, failing\n");
138 ret = -ENOMEM;
139 goto done;
140 }
141 rcd->pkeys[0] = QIB_DEFAULT_P_KEY;
142 rcd->seq_cnt = 1;
143 }
144 ret = 0;
145done:
146 return ret;
147}
148
149/*
150 * Common code for user and kernel context setup.
151 */
152struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt)
153{
154 struct qib_devdata *dd = ppd->dd;
155 struct qib_ctxtdata *rcd;
156
157 rcd = kzalloc(sizeof(*rcd), GFP_KERNEL);
158 if (rcd) {
159 INIT_LIST_HEAD(&rcd->qp_wait_list);
160 rcd->ppd = ppd;
161 rcd->dd = dd;
162 rcd->cnt = 1;
163 rcd->ctxt = ctxt;
164 dd->rcd[ctxt] = rcd;
165
166 dd->f_init_ctxt(rcd);
167
168 /*
169 * To avoid wasting a lot of memory, we allocate 32KB chunks
170 * of physically contiguous memory, advance through it until
171 * used up and then allocate more. Of course, we need
172 * memory to store those extra pointers, now. 32KB seems to
173 * be the most that is "safe" under memory pressure
174 * (creating large files and then copying them over
175 * NFS while doing lots of MPI jobs). The OOM killer can
176 * get invoked, even though we say we can sleep and this can
177 * cause significant system problems....
178 */
179 rcd->rcvegrbuf_size = 0x8000;
180 rcd->rcvegrbufs_perchunk =
181 rcd->rcvegrbuf_size / dd->rcvegrbufsize;
182 rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt +
183 rcd->rcvegrbufs_perchunk - 1) /
184 rcd->rcvegrbufs_perchunk;
185 }
186 return rcd;
187}
188
189/*
190 * Common code for initializing the physical port structure.
191 */
192void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
193 u8 hw_pidx, u8 port)
194{
195 ppd->dd = dd;
196 ppd->hw_pidx = hw_pidx;
197 ppd->port = port; /* IB port number, not index */
198
199 spin_lock_init(&ppd->sdma_lock);
200 spin_lock_init(&ppd->lflags_lock);
201 init_waitqueue_head(&ppd->state_wait);
202
203 init_timer(&ppd->symerr_clear_timer);
204 ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup;
205 ppd->symerr_clear_timer.data = (unsigned long)ppd;
206}
207
208static int init_pioavailregs(struct qib_devdata *dd)
209{
210 int ret, pidx;
211 u64 *status_page;
212
213 dd->pioavailregs_dma = dma_alloc_coherent(
214 &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys,
215 GFP_KERNEL);
216 if (!dd->pioavailregs_dma) {
217 qib_dev_err(dd, "failed to allocate PIOavail reg area "
218 "in memory\n");
219 ret = -ENOMEM;
220 goto done;
221 }
222
223 /*
224 * We really want L2 cache aligned, but for current CPUs of
225 * interest, they are the same.
226 */
227 status_page = (u64 *)
228 ((char *) dd->pioavailregs_dma +
229 ((2 * L1_CACHE_BYTES +
230 dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
231 /* device status comes first, for backwards compatibility */
232 dd->devstatusp = status_page;
233 *status_page++ = 0;
234 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
235 dd->pport[pidx].statusp = status_page;
236 *status_page++ = 0;
237 }
238
239 /*
240 * Setup buffer to hold freeze and other messages, accessible to
241 * apps, following statusp. This is per-unit, not per port.
242 */
243 dd->freezemsg = (char *) status_page;
244 *dd->freezemsg = 0;
245 /* length of msg buffer is "whatever is left" */
246 ret = (char *) status_page - (char *) dd->pioavailregs_dma;
247 dd->freezelen = PAGE_SIZE - ret;
248
249 ret = 0;
250
251done:
252 return ret;
253}
254
255/**
256 * init_shadow_tids - allocate the shadow TID array
257 * @dd: the qlogic_ib device
258 *
259 * allocate the shadow TID array, so we can qib_munlock previous
260 * entries. It may make more sense to move the pageshadow to the
261 * ctxt data structure, so we only allocate memory for ctxts actually
262 * in use, since we at 8k per ctxt, now.
263 * We don't want failures here to prevent use of the driver/chip,
264 * so no return value.
265 */
266static void init_shadow_tids(struct qib_devdata *dd)
267{
268 struct page **pages;
269 dma_addr_t *addrs;
270
271 pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
272 if (!pages) {
273 qib_dev_err(dd, "failed to allocate shadow page * "
274 "array, no expected sends!\n");
275 goto bail;
276 }
277
278 addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
279 if (!addrs) {
280 qib_dev_err(dd, "failed to allocate shadow dma handle "
281 "array, no expected sends!\n");
282 goto bail_free;
283 }
284
285 memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
286 memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
287
288 dd->pageshadow = pages;
289 dd->physshadow = addrs;
290 return;
291
292bail_free:
293 vfree(pages);
294bail:
295 dd->pageshadow = NULL;
296}
297
298/*
299 * Do initialization for device that is only needed on
300 * first detect, not on resets.
301 */
302static int loadtime_init(struct qib_devdata *dd)
303{
304 int ret = 0;
305
306 if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
307 QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) {
308 qib_dev_err(dd, "Driver only handles version %d, "
309 "chip swversion is %d (%llx), failng\n",
310 QIB_CHIP_SWVERSION,
311 (int)(dd->revision >>
312 QLOGIC_IB_R_SOFTWARE_SHIFT) &
313 QLOGIC_IB_R_SOFTWARE_MASK,
314 (unsigned long long) dd->revision);
315 ret = -ENOSYS;
316 goto done;
317 }
318
319 if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK)
320 qib_devinfo(dd->pcidev, "%s", dd->boardversion);
321
322 spin_lock_init(&dd->pioavail_lock);
323 spin_lock_init(&dd->sendctrl_lock);
324 spin_lock_init(&dd->uctxt_lock);
325 spin_lock_init(&dd->qib_diag_trans_lock);
326 spin_lock_init(&dd->eep_st_lock);
327 mutex_init(&dd->eep_lock);
328
329 if (qib_mini_init)
330 goto done;
331
332 ret = init_pioavailregs(dd);
333 init_shadow_tids(dd);
334
335 qib_get_eeprom_info(dd);
336
337 /* setup time (don't start yet) to verify we got interrupt */
338 init_timer(&dd->intrchk_timer);
339 dd->intrchk_timer.function = verify_interrupt;
340 dd->intrchk_timer.data = (unsigned long) dd;
341
342done:
343 return ret;
344}
345
346/**
347 * init_after_reset - re-initialize after a reset
348 * @dd: the qlogic_ib device
349 *
350 * sanity check at least some of the values after reset, and
351 * ensure no receive or transmit (explictly, in case reset
352 * failed
353 */
354static int init_after_reset(struct qib_devdata *dd)
355{
356 int i;
357
358 /*
359 * Ensure chip does no sends or receives, tail updates, or
360 * pioavail updates while we re-initialize. This is mostly
361 * for the driver data structures, not chip registers.
362 */
363 for (i = 0; i < dd->num_pports; ++i) {
364 /*
365 * ctxt == -1 means "all contexts". Only really safe for
366 * _dis_abling things, as here.
367 */
368 dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS |
369 QIB_RCVCTRL_INTRAVAIL_DIS |
370 QIB_RCVCTRL_TAILUPD_DIS, -1);
371 /* Redundant across ports for some, but no big deal. */
372 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS |
373 QIB_SENDCTRL_AVAIL_DIS);
374 }
375
376 return 0;
377}
378
379static void enable_chip(struct qib_devdata *dd)
380{
381 u64 rcvmask;
382 int i;
383
384 /*
385 * Enable PIO send, and update of PIOavail regs to memory.
386 */
387 for (i = 0; i < dd->num_pports; ++i)
388 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB |
389 QIB_SENDCTRL_AVAIL_ENB);
390 /*
391 * Enable kernel ctxts' receive and receive interrupt.
392 * Other ctxts done as user opens and inits them.
393 */
394 rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB;
395 rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ?
396 QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB;
397 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
398 struct qib_ctxtdata *rcd = dd->rcd[i];
399
400 if (rcd)
401 dd->f_rcvctrl(rcd->ppd, rcvmask, i);
402 }
403}
404
405static void verify_interrupt(unsigned long opaque)
406{
407 struct qib_devdata *dd = (struct qib_devdata *) opaque;
408
409 if (!dd)
410 return; /* being torn down */
411
412 /*
413 * If we don't have a lid or any interrupts, let the user know and
414 * don't bother checking again.
415 */
416 if (dd->int_counter == 0) {
417 if (!dd->f_intr_fallback(dd))
418 dev_err(&dd->pcidev->dev, "No interrupts detected, "
419 "not usable.\n");
420 else /* re-arm the timer to see if fallback works */
421 mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
422 }
423}
424
425static void init_piobuf_state(struct qib_devdata *dd)
426{
427 int i, pidx;
428 u32 uctxts;
429
430 /*
431 * Ensure all buffers are free, and fifos empty. Buffers
432 * are common, so only do once for port 0.
433 *
434 * After enable and qib_chg_pioavailkernel so we can safely
435 * enable pioavail updates and PIOENABLE. After this, packets
436 * are ready and able to go out.
437 */
438 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL);
439 for (pidx = 0; pidx < dd->num_pports; ++pidx)
440 dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH);
441
442 /*
443 * If not all sendbufs are used, add the one to each of the lower
444 * numbered contexts. pbufsctxt and lastctxt_piobuf are
445 * calculated in chip-specific code because it may cause some
446 * chip-specific adjustments to be made.
447 */
448 uctxts = dd->cfgctxts - dd->first_user_ctxt;
449 dd->ctxts_extrabuf = dd->pbufsctxt ?
450 dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0;
451
452 /*
453 * Set up the shadow copies of the piobufavail registers,
454 * which we compare against the chip registers for now, and
455 * the in memory DMA'ed copies of the registers.
456 * By now pioavail updates to memory should have occurred, so
457 * copy them into our working/shadow registers; this is in
458 * case something went wrong with abort, but mostly to get the
459 * initial values of the generation bit correct.
460 */
461 for (i = 0; i < dd->pioavregs; i++) {
462 __le64 tmp;
463
464 tmp = dd->pioavailregs_dma[i];
465 /*
466 * Don't need to worry about pioavailkernel here
467 * because we will call qib_chg_pioavailkernel() later
468 * in initialization, to busy out buffers as needed.
469 */
470 dd->pioavailshadow[i] = le64_to_cpu(tmp);
471 }
472 while (i < ARRAY_SIZE(dd->pioavailshadow))
473 dd->pioavailshadow[i++] = 0; /* for debugging sanity */
474
475 /* after pioavailshadow is setup */
476 qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k,
477 TXCHK_CHG_TYPE_KERN, NULL);
478 dd->f_initvl15_bufs(dd);
479}
480
481/**
482 * qib_init - do the actual initialization sequence on the chip
483 * @dd: the qlogic_ib device
484 * @reinit: reinitializing, so don't allocate new memory
485 *
486 * Do the actual initialization sequence on the chip. This is done
487 * both from the init routine called from the PCI infrastructure, and
488 * when we reset the chip, or detect that it was reset internally,
489 * or it's administratively re-enabled.
490 *
491 * Memory allocation here and in called routines is only done in
492 * the first case (reinit == 0). We have to be careful, because even
493 * without memory allocation, we need to re-write all the chip registers
494 * TIDs, etc. after the reset or enable has completed.
495 */
496int qib_init(struct qib_devdata *dd, int reinit)
497{
498 int ret = 0, pidx, lastfail = 0;
499 u32 portok = 0;
500 unsigned i;
501 struct qib_ctxtdata *rcd;
502 struct qib_pportdata *ppd;
503 unsigned long flags;
504
505 /* Set linkstate to unknown, so we can watch for a transition. */
506 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
507 ppd = dd->pport + pidx;
508 spin_lock_irqsave(&ppd->lflags_lock, flags);
509 ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED |
510 QIBL_LINKDOWN | QIBL_LINKINIT |
511 QIBL_LINKV);
512 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
513 }
514
515 if (reinit)
516 ret = init_after_reset(dd);
517 else
518 ret = loadtime_init(dd);
519 if (ret)
520 goto done;
521
522 /* Bypass most chip-init, to get to device creation */
523 if (qib_mini_init)
524 return 0;
525
526 ret = dd->f_late_initreg(dd);
527 if (ret)
528 goto done;
529
530 /* dd->rcd can be NULL if early init failed */
531 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
532 /*
533 * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
534 * re-init, the simplest way to handle this is to free
535 * existing, and re-allocate.
536 * Need to re-create rest of ctxt 0 ctxtdata as well.
537 */
538 rcd = dd->rcd[i];
539 if (!rcd)
540 continue;
541
542 lastfail = qib_create_rcvhdrq(dd, rcd);
543 if (!lastfail)
544 lastfail = qib_setup_eagerbufs(rcd);
545 if (lastfail) {
546 qib_dev_err(dd, "failed to allocate kernel ctxt's "
547 "rcvhdrq and/or egr bufs\n");
548 continue;
549 }
550 }
551
552 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
553 int mtu;
554 if (lastfail)
555 ret = lastfail;
556 ppd = dd->pport + pidx;
557 mtu = ib_mtu_enum_to_int(qib_ibmtu);
558 if (mtu == -1) {
559 mtu = QIB_DEFAULT_MTU;
560 qib_ibmtu = 0; /* don't leave invalid value */
561 }
562 /* set max we can ever have for this driver load */
563 ppd->init_ibmaxlen = min(mtu > 2048 ?
564 dd->piosize4k : dd->piosize2k,
565 dd->rcvegrbufsize +
566 (dd->rcvhdrentsize << 2));
567 /*
568 * Have to initialize ibmaxlen, but this will normally
569 * change immediately in qib_set_mtu().
570 */
571 ppd->ibmaxlen = ppd->init_ibmaxlen;
572 qib_set_mtu(ppd, mtu);
573
574 spin_lock_irqsave(&ppd->lflags_lock, flags);
575 ppd->lflags |= QIBL_IB_LINK_DISABLED;
576 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
577
578 lastfail = dd->f_bringup_serdes(ppd);
579 if (lastfail) {
580 qib_devinfo(dd->pcidev,
581 "Failed to bringup IB port %u\n", ppd->port);
582 lastfail = -ENETDOWN;
583 continue;
584 }
585
586 /* let link come up, and enable IBC */
587 spin_lock_irqsave(&ppd->lflags_lock, flags);
588 ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
589 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
590 portok++;
591 }
592
593 if (!portok) {
594 /* none of the ports initialized */
595 if (!ret && lastfail)
596 ret = lastfail;
597 else if (!ret)
598 ret = -ENETDOWN;
599 /* but continue on, so we can debug cause */
600 }
601
602 enable_chip(dd);
603
604 init_piobuf_state(dd);
605
606done:
607 if (!ret) {
608 /* chip is OK for user apps; mark it as initialized */
609 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
610 ppd = dd->pport + pidx;
611 /*
612 * Set status even if port serdes is not initialized
613 * so that diags will work.
614 */
615 *ppd->statusp |= QIB_STATUS_CHIP_PRESENT |
616 QIB_STATUS_INITTED;
617 if (!ppd->link_speed_enabled)
618 continue;
619 if (dd->flags & QIB_HAS_SEND_DMA)
620 ret = qib_setup_sdma(ppd);
621 init_timer(&ppd->hol_timer);
622 ppd->hol_timer.function = qib_hol_event;
623 ppd->hol_timer.data = (unsigned long)ppd;
624 ppd->hol_state = QIB_HOL_UP;
625 }
626
627 /* now we can enable all interrupts from the chip */
628 dd->f_set_intr_state(dd, 1);
629
630 /*
631 * Setup to verify we get an interrupt, and fallback
632 * to an alternate if necessary and possible.
633 */
634 mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
635 /* start stats retrieval timer */
636 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
637 }
638
639 /* if ret is non-zero, we probably should do some cleanup here... */
640 return ret;
641}
642
643/*
644 * These next two routines are placeholders in case we don't have per-arch
645 * code for controlling write combining. If explicit control of write
646 * combining is not available, performance will probably be awful.
647 */
648
649int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd)
650{
651 return -EOPNOTSUPP;
652}
653
654void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd)
655{
656}
657
658static inline struct qib_devdata *__qib_lookup(int unit)
659{
660 return idr_find(&qib_unit_table, unit);
661}
662
663struct qib_devdata *qib_lookup(int unit)
664{
665 struct qib_devdata *dd;
666 unsigned long flags;
667
668 spin_lock_irqsave(&qib_devs_lock, flags);
669 dd = __qib_lookup(unit);
670 spin_unlock_irqrestore(&qib_devs_lock, flags);
671
672 return dd;
673}
674
675/*
676 * Stop the timers during unit shutdown, or after an error late
677 * in initialization.
678 */
679static void qib_stop_timers(struct qib_devdata *dd)
680{
681 struct qib_pportdata *ppd;
682 int pidx;
683
684 if (dd->stats_timer.data) {
685 del_timer_sync(&dd->stats_timer);
686 dd->stats_timer.data = 0;
687 }
688 if (dd->intrchk_timer.data) {
689 del_timer_sync(&dd->intrchk_timer);
690 dd->intrchk_timer.data = 0;
691 }
692 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
693 ppd = dd->pport + pidx;
694 if (ppd->hol_timer.data)
695 del_timer_sync(&ppd->hol_timer);
696 if (ppd->led_override_timer.data) {
697 del_timer_sync(&ppd->led_override_timer);
698 atomic_set(&ppd->led_override_timer_active, 0);
699 }
700 if (ppd->symerr_clear_timer.data)
701 del_timer_sync(&ppd->symerr_clear_timer);
702 }
703}
704
705/**
706 * qib_shutdown_device - shut down a device
707 * @dd: the qlogic_ib device
708 *
709 * This is called to make the device quiet when we are about to
710 * unload the driver, and also when the device is administratively
711 * disabled. It does not free any data structures.
712 * Everything it does has to be setup again by qib_init(dd, 1)
713 */
714static void qib_shutdown_device(struct qib_devdata *dd)
715{
716 struct qib_pportdata *ppd;
717 unsigned pidx;
718
719 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
720 ppd = dd->pport + pidx;
721
722 spin_lock_irq(&ppd->lflags_lock);
723 ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT |
724 QIBL_LINKARMED | QIBL_LINKACTIVE |
725 QIBL_LINKV);
726 spin_unlock_irq(&ppd->lflags_lock);
727 *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY);
728 }
729 dd->flags &= ~QIB_INITTED;
730
731 /* mask interrupts, but not errors */
732 dd->f_set_intr_state(dd, 0);
733
734 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
735 ppd = dd->pport + pidx;
736 dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS |
737 QIB_RCVCTRL_CTXT_DIS |
738 QIB_RCVCTRL_INTRAVAIL_DIS |
739 QIB_RCVCTRL_PKEY_ENB, -1);
740 /*
741 * Gracefully stop all sends allowing any in progress to
742 * trickle out first.
743 */
744 dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR);
745 }
746
747 /*
748 * Enough for anything that's going to trickle out to have actually
749 * done so.
750 */
751 udelay(20);
752
753 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
754 ppd = dd->pport + pidx;
755 dd->f_setextled(ppd, 0); /* make sure LEDs are off */
756
757 if (dd->flags & QIB_HAS_SEND_DMA)
758 qib_teardown_sdma(ppd);
759
760 dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS |
761 QIB_SENDCTRL_SEND_DIS);
762 /*
763 * Clear SerdesEnable.
764 * We can't count on interrupts since we are stopping.
765 */
766 dd->f_quiet_serdes(ppd);
767 }
768
769 qib_update_eeprom_log(dd);
770}
771
772/**
773 * qib_free_ctxtdata - free a context's allocated data
774 * @dd: the qlogic_ib device
775 * @rcd: the ctxtdata structure
776 *
777 * free up any allocated data for a context
778 * This should not touch anything that would affect a simultaneous
779 * re-allocation of context data, because it is called after qib_mutex
780 * is released (and can be called from reinit as well).
781 * It should never change any chip state, or global driver state.
782 */
783void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
784{
785 if (!rcd)
786 return;
787
788 if (rcd->rcvhdrq) {
789 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
790 rcd->rcvhdrq, rcd->rcvhdrq_phys);
791 rcd->rcvhdrq = NULL;
792 if (rcd->rcvhdrtail_kvaddr) {
793 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
794 rcd->rcvhdrtail_kvaddr,
795 rcd->rcvhdrqtailaddr_phys);
796 rcd->rcvhdrtail_kvaddr = NULL;
797 }
798 }
799 if (rcd->rcvegrbuf) {
800 unsigned e;
801
802 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
803 void *base = rcd->rcvegrbuf[e];
804 size_t size = rcd->rcvegrbuf_size;
805
806 dma_free_coherent(&dd->pcidev->dev, size,
807 base, rcd->rcvegrbuf_phys[e]);
808 }
809 kfree(rcd->rcvegrbuf);
810 rcd->rcvegrbuf = NULL;
811 kfree(rcd->rcvegrbuf_phys);
812 rcd->rcvegrbuf_phys = NULL;
813 rcd->rcvegrbuf_chunks = 0;
814 }
815
816 kfree(rcd->tid_pg_list);
817 vfree(rcd->user_event_mask);
818 vfree(rcd->subctxt_uregbase);
819 vfree(rcd->subctxt_rcvegrbuf);
820 vfree(rcd->subctxt_rcvhdr_base);
821 kfree(rcd);
822}
823
824/*
825 * Perform a PIO buffer bandwidth write test, to verify proper system
826 * configuration. Even when all the setup calls work, occasionally
827 * BIOS or other issues can prevent write combining from working, or
828 * can cause other bandwidth problems to the chip.
829 *
830 * This test simply writes the same buffer over and over again, and
831 * measures close to the peak bandwidth to the chip (not testing
832 * data bandwidth to the wire). On chips that use an address-based
833 * trigger to send packets to the wire, this is easy. On chips that
834 * use a count to trigger, we want to make sure that the packet doesn't
835 * go out on the wire, or trigger flow control checks.
836 */
837static void qib_verify_pioperf(struct qib_devdata *dd)
838{
839 u32 pbnum, cnt, lcnt;
840 u32 __iomem *piobuf;
841 u32 *addr;
842 u64 msecs, emsecs;
843
844 piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum);
845 if (!piobuf) {
846 qib_devinfo(dd->pcidev,
847 "No PIObufs for checking perf, skipping\n");
848 return;
849 }
850
851 /*
852 * Enough to give us a reasonable test, less than piobuf size, and
853 * likely multiple of store buffer length.
854 */
855 cnt = 1024;
856
857 addr = vmalloc(cnt);
858 if (!addr) {
859 qib_devinfo(dd->pcidev,
860 "Couldn't get memory for checking PIO perf,"
861 " skipping\n");
862 goto done;
863 }
864
865 preempt_disable(); /* we want reasonably accurate elapsed time */
866 msecs = 1 + jiffies_to_msecs(jiffies);
867 for (lcnt = 0; lcnt < 10000U; lcnt++) {
868 /* wait until we cross msec boundary */
869 if (jiffies_to_msecs(jiffies) >= msecs)
870 break;
871 udelay(1);
872 }
873
874 dd->f_set_armlaunch(dd, 0);
875
876 /*
877 * length 0, no dwords actually sent
878 */
879 writeq(0, piobuf);
880 qib_flush_wc();
881
882 /*
883 * This is only roughly accurate, since even with preempt we
884 * still take interrupts that could take a while. Running for
885 * >= 5 msec seems to get us "close enough" to accurate values.
886 */
887 msecs = jiffies_to_msecs(jiffies);
888 for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
889 qib_pio_copy(piobuf + 64, addr, cnt >> 2);
890 emsecs = jiffies_to_msecs(jiffies) - msecs;
891 }
892
893 /* 1 GiB/sec, slightly over IB SDR line rate */
894 if (lcnt < (emsecs * 1024U))
895 qib_dev_err(dd,
896 "Performance problem: bandwidth to PIO buffers is "
897 "only %u MiB/sec\n",
898 lcnt / (u32) emsecs);
899
900 preempt_enable();
901
902 vfree(addr);
903
904done:
905 /* disarm piobuf, so it's available again */
906 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum));
907 qib_sendbuf_done(dd, pbnum);
908 dd->f_set_armlaunch(dd, 1);
909}
910
911
912void qib_free_devdata(struct qib_devdata *dd)
913{
914 unsigned long flags;
915
916 spin_lock_irqsave(&qib_devs_lock, flags);
917 idr_remove(&qib_unit_table, dd->unit);
918 list_del(&dd->list);
919 spin_unlock_irqrestore(&qib_devs_lock, flags);
920
921 ib_dealloc_device(&dd->verbs_dev.ibdev);
922}
923
924/*
925 * Allocate our primary per-unit data structure. Must be done via verbs
926 * allocator, because the verbs cleanup process both does cleanup and
927 * free of the data structure.
928 * "extra" is for chip-specific data.
929 *
930 * Use the idr mechanism to get a unit number for this unit.
931 */
932struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
933{
934 unsigned long flags;
935 struct qib_devdata *dd;
936 int ret;
937
938 if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
939 dd = ERR_PTR(-ENOMEM);
940 goto bail;
941 }
942
943 dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);
944 if (!dd) {
945 dd = ERR_PTR(-ENOMEM);
946 goto bail;
947 }
948
949 spin_lock_irqsave(&qib_devs_lock, flags);
950 ret = idr_get_new(&qib_unit_table, dd, &dd->unit);
951 if (ret >= 0)
952 list_add(&dd->list, &qib_dev_list);
953 spin_unlock_irqrestore(&qib_devs_lock, flags);
954
955 if (ret < 0) {
956 qib_early_err(&pdev->dev,
957 "Could not allocate unit ID: error %d\n", -ret);
958 ib_dealloc_device(&dd->verbs_dev.ibdev);
959 dd = ERR_PTR(ret);
960 goto bail;
961 }
962
963 if (!qib_cpulist_count) {
964 u32 count = num_online_cpus();
965 qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
966 sizeof(long), GFP_KERNEL);
967 if (qib_cpulist)
968 qib_cpulist_count = count;
969 else
970 qib_early_err(&pdev->dev, "Could not alloc cpulist "
971 "info, cpu affinity might be wrong\n");
972 }
973
974bail:
975 return dd;
976}
977
978/*
979 * Called from freeze mode handlers, and from PCI error
980 * reporting code. Should be paranoid about state of
981 * system and data structures.
982 */
983void qib_disable_after_error(struct qib_devdata *dd)
984{
985 if (dd->flags & QIB_INITTED) {
986 u32 pidx;
987
988 dd->flags &= ~QIB_INITTED;
989 if (dd->pport)
990 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
991 struct qib_pportdata *ppd;
992
993 ppd = dd->pport + pidx;
994 if (dd->flags & QIB_PRESENT) {
995 qib_set_linkstate(ppd,
996 QIB_IB_LINKDOWN_DISABLE);
997 dd->f_setextled(ppd, 0);
998 }
999 *ppd->statusp &= ~QIB_STATUS_IB_READY;
1000 }
1001 }
1002
1003 /*
1004 * Mark as having had an error for driver, and also
1005 * for /sys and status word mapped to user programs.
1006 * This marks unit as not usable, until reset.
1007 */
1008 if (dd->devstatusp)
1009 *dd->devstatusp |= QIB_STATUS_HWERROR;
1010}
1011
1012static void __devexit qib_remove_one(struct pci_dev *);
1013static int __devinit qib_init_one(struct pci_dev *,
1014 const struct pci_device_id *);
1015
1016#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: "
1017#define PFX QIB_DRV_NAME ": "
1018
1019static const struct pci_device_id qib_pci_tbl[] = {
1020 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
1021 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
1022 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
1023 { 0, }
1024};
1025
1026MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
1027
1028struct pci_driver qib_driver = {
1029 .name = QIB_DRV_NAME,
1030 .probe = qib_init_one,
1031 .remove = __devexit_p(qib_remove_one),
1032 .id_table = qib_pci_tbl,
1033 .err_handler = &qib_pci_err_handler,
1034};
1035
1036/*
1037 * Do all the generic driver unit- and chip-independent memory
1038 * allocation and initialization.
1039 */
1040static int __init qlogic_ib_init(void)
1041{
1042 int ret;
1043
1044 ret = qib_dev_init();
1045 if (ret)
1046 goto bail;
1047
1048 /*
1049 * We create our own workqueue mainly because we want to be
1050 * able to flush it when devices are being removed. We can't
1051 * use schedule_work()/flush_scheduled_work() because both
1052 * unregister_netdev() and linkwatch_event take the rtnl lock,
1053 * so flush_scheduled_work() can deadlock during device
1054 * removal.
1055 */
1056 qib_wq = create_workqueue("qib");
1057 if (!qib_wq) {
1058 ret = -ENOMEM;
1059 goto bail_dev;
1060 }
1061
1062 qib_cq_wq = create_workqueue("qib_cq");
1063 if (!qib_cq_wq) {
1064 ret = -ENOMEM;
1065 goto bail_wq;
1066 }
1067
1068 /*
1069 * These must be called before the driver is registered with
1070 * the PCI subsystem.
1071 */
1072 idr_init(&qib_unit_table);
1073 if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
1074 printk(KERN_ERR QIB_DRV_NAME ": idr_pre_get() failed\n");
1075 ret = -ENOMEM;
1076 goto bail_cq_wq;
1077 }
1078
1079 ret = pci_register_driver(&qib_driver);
1080 if (ret < 0) {
1081 printk(KERN_ERR QIB_DRV_NAME
1082 ": Unable to register driver: error %d\n", -ret);
1083 goto bail_unit;
1084 }
1085
1086 /* not fatal if it doesn't work */
1087 if (qib_init_qibfs())
1088 printk(KERN_ERR QIB_DRV_NAME ": Unable to register ipathfs\n");
1089 goto bail; /* all OK */
1090
1091bail_unit:
1092 idr_destroy(&qib_unit_table);
1093bail_cq_wq:
1094 destroy_workqueue(qib_cq_wq);
1095bail_wq:
1096 destroy_workqueue(qib_wq);
1097bail_dev:
1098 qib_dev_cleanup();
1099bail:
1100 return ret;
1101}
1102
1103module_init(qlogic_ib_init);
1104
1105/*
1106 * Do the non-unit driver cleanup, memory free, etc. at unload.
1107 */
1108static void __exit qlogic_ib_cleanup(void)
1109{
1110 int ret;
1111
1112 ret = qib_exit_qibfs();
1113 if (ret)
1114 printk(KERN_ERR QIB_DRV_NAME ": "
1115 "Unable to cleanup counter filesystem: "
1116 "error %d\n", -ret);
1117
1118 pci_unregister_driver(&qib_driver);
1119
1120 destroy_workqueue(qib_wq);
1121 destroy_workqueue(qib_cq_wq);
1122
1123 qib_cpulist_count = 0;
1124 kfree(qib_cpulist);
1125
1126 idr_destroy(&qib_unit_table);
1127 qib_dev_cleanup();
1128}
1129
1130module_exit(qlogic_ib_cleanup);
1131
1132/* this can only be called after a successful initialization */
1133static void cleanup_device_data(struct qib_devdata *dd)
1134{
1135 int ctxt;
1136 int pidx;
1137 struct qib_ctxtdata **tmp;
1138 unsigned long flags;
1139
1140 /* users can't do anything more with chip */
1141 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1142 if (dd->pport[pidx].statusp)
1143 *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT;
1144
1145 if (!qib_wc_pat)
1146 qib_disable_wc(dd);
1147
1148 if (dd->pioavailregs_dma) {
1149 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1150 (void *) dd->pioavailregs_dma,
1151 dd->pioavailregs_phys);
1152 dd->pioavailregs_dma = NULL;
1153 }
1154
1155 if (dd->pageshadow) {
1156 struct page **tmpp = dd->pageshadow;
1157 dma_addr_t *tmpd = dd->physshadow;
1158 int i, cnt = 0;
1159
1160 for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) {
1161 int ctxt_tidbase = ctxt * dd->rcvtidcnt;
1162 int maxtid = ctxt_tidbase + dd->rcvtidcnt;
1163
1164 for (i = ctxt_tidbase; i < maxtid; i++) {
1165 if (!tmpp[i])
1166 continue;
1167 pci_unmap_page(dd->pcidev, tmpd[i],
1168 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1169 qib_release_user_pages(&tmpp[i], 1);
1170 tmpp[i] = NULL;
1171 cnt++;
1172 }
1173 }
1174
1175 tmpp = dd->pageshadow;
1176 dd->pageshadow = NULL;
1177 vfree(tmpp);
1178 }
1179
1180 /*
1181 * Free any resources still in use (usually just kernel contexts)
1182 * at unload; we do for ctxtcnt, because that's what we allocate.
1183 * We acquire lock to be really paranoid that rcd isn't being
1184 * accessed from some interrupt-related code (that should not happen,
1185 * but best to be sure).
1186 */
1187 spin_lock_irqsave(&dd->uctxt_lock, flags);
1188 tmp = dd->rcd;
1189 dd->rcd = NULL;
1190 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1191 for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) {
1192 struct qib_ctxtdata *rcd = tmp[ctxt];
1193
1194 tmp[ctxt] = NULL; /* debugging paranoia */
1195 qib_free_ctxtdata(dd, rcd);
1196 }
1197 kfree(tmp);
1198 kfree(dd->boardname);
1199}
1200
1201/*
1202 * Clean up on unit shutdown, or error during unit load after
1203 * successful initialization.
1204 */
1205static void qib_postinit_cleanup(struct qib_devdata *dd)
1206{
1207 /*
1208 * Clean up chip-specific stuff.
1209 * We check for NULL here, because it's outside
1210 * the kregbase check, and we need to call it
1211 * after the free_irq. Thus it's possible that
1212 * the function pointers were never initialized.
1213 */
1214 if (dd->f_cleanup)
1215 dd->f_cleanup(dd);
1216
1217 qib_pcie_ddcleanup(dd);
1218
1219 cleanup_device_data(dd);
1220
1221 qib_free_devdata(dd);
1222}
1223
1224static int __devinit qib_init_one(struct pci_dev *pdev,
1225 const struct pci_device_id *ent)
1226{
1227 int ret, j, pidx, initfail;
1228 struct qib_devdata *dd = NULL;
1229
1230 ret = qib_pcie_init(pdev, ent);
1231 if (ret)
1232 goto bail;
1233
1234 /*
1235 * Do device-specific initialiation, function table setup, dd
1236 * allocation, etc.
1237 */
1238 switch (ent->device) {
1239 case PCI_DEVICE_ID_QLOGIC_IB_6120:
1240 dd = qib_init_iba6120_funcs(pdev, ent);
1241 break;
1242
1243 case PCI_DEVICE_ID_QLOGIC_IB_7220:
1244 dd = qib_init_iba7220_funcs(pdev, ent);
1245 break;
1246
1247 case PCI_DEVICE_ID_QLOGIC_IB_7322:
1248 dd = qib_init_iba7322_funcs(pdev, ent);
1249 break;
1250
1251 default:
1252 qib_early_err(&pdev->dev, "Failing on unknown QLogic "
1253 "deviceid 0x%x\n", ent->device);
1254 ret = -ENODEV;
1255 }
1256
1257 if (IS_ERR(dd))
1258 ret = PTR_ERR(dd);
1259 if (ret)
1260 goto bail; /* error already printed */
1261
1262 /* do the generic initialization */
1263 initfail = qib_init(dd, 0);
1264
1265 ret = qib_register_ib_device(dd);
1266
1267 /*
1268 * Now ready for use. this should be cleared whenever we
1269 * detect a reset, or initiate one. If earlier failure,
1270 * we still create devices, so diags, etc. can be used
1271 * to determine cause of problem.
1272 */
1273 if (!qib_mini_init && !initfail && !ret)
1274 dd->flags |= QIB_INITTED;
1275
1276 j = qib_device_create(dd);
1277 if (j)
1278 qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1279 j = qibfs_add(dd);
1280 if (j)
1281 qib_dev_err(dd, "Failed filesystem setup for counters: %d\n",
1282 -j);
1283
1284 if (qib_mini_init || initfail || ret) {
1285 qib_stop_timers(dd);
1286 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1287 dd->f_quiet_serdes(dd->pport + pidx);
1288 if (initfail)
1289 ret = initfail;
1290 goto bail;
1291 }
1292
1293 if (!qib_wc_pat) {
1294 ret = qib_enable_wc(dd);
1295 if (ret) {
1296 qib_dev_err(dd, "Write combining not enabled "
1297 "(err %d): performance may be poor\n",
1298 -ret);
1299 ret = 0;
1300 }
1301 }
1302
1303 qib_verify_pioperf(dd);
1304bail:
1305 return ret;
1306}
1307
1308static void __devexit qib_remove_one(struct pci_dev *pdev)
1309{
1310 struct qib_devdata *dd = pci_get_drvdata(pdev);
1311 int ret;
1312
1313 /* unregister from IB core */
1314 qib_unregister_ib_device(dd);
1315
1316 /*
1317 * Disable the IB link, disable interrupts on the device,
1318 * clear dma engines, etc.
1319 */
1320 if (!qib_mini_init)
1321 qib_shutdown_device(dd);
1322
1323 qib_stop_timers(dd);
1324
1325 /* wait until all of our (qsfp) schedule_work() calls complete */
1326 flush_scheduled_work();
1327
1328 ret = qibfs_remove(dd);
1329 if (ret)
1330 qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n",
1331 -ret);
1332
1333 qib_device_remove(dd);
1334
1335 qib_postinit_cleanup(dd);
1336}
1337
1338/**
1339 * qib_create_rcvhdrq - create a receive header queue
1340 * @dd: the qlogic_ib device
1341 * @rcd: the context data
1342 *
1343 * This must be contiguous memory (from an i/o perspective), and must be
1344 * DMA'able (which means for some systems, it will go through an IOMMU,
1345 * or be forced into a low address range).
1346 */
1347int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
1348{
1349 unsigned amt;
1350
1351 if (!rcd->rcvhdrq) {
1352 dma_addr_t phys_hdrqtail;
1353 gfp_t gfp_flags;
1354
1355 amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
1356 sizeof(u32), PAGE_SIZE);
1357 gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
1358 GFP_USER : GFP_KERNEL;
1359 rcd->rcvhdrq = dma_alloc_coherent(
1360 &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
1361 gfp_flags | __GFP_COMP);
1362
1363 if (!rcd->rcvhdrq) {
1364 qib_dev_err(dd, "attempt to allocate %d bytes "
1365 "for ctxt %u rcvhdrq failed\n",
1366 amt, rcd->ctxt);
1367 goto bail;
1368 }
1369
1370 if (rcd->ctxt >= dd->first_user_ctxt) {
1371 rcd->user_event_mask = vmalloc_user(PAGE_SIZE);
1372 if (!rcd->user_event_mask)
1373 goto bail_free_hdrq;
1374 }
1375
1376 if (!(dd->flags & QIB_NODMA_RTAIL)) {
1377 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(
1378 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1379 gfp_flags);
1380 if (!rcd->rcvhdrtail_kvaddr)
1381 goto bail_free;
1382 rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
1383 }
1384
1385 rcd->rcvhdrq_size = amt;
1386 }
1387
1388 /* clear for security and sanity on each use */
1389 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
1390 if (rcd->rcvhdrtail_kvaddr)
1391 memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE);
1392 return 0;
1393
1394bail_free:
1395 qib_dev_err(dd, "attempt to allocate 1 page for ctxt %u "
1396 "rcvhdrqtailaddr failed\n", rcd->ctxt);
1397 vfree(rcd->user_event_mask);
1398 rcd->user_event_mask = NULL;
1399bail_free_hdrq:
1400 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1401 rcd->rcvhdrq_phys);
1402 rcd->rcvhdrq = NULL;
1403bail:
1404 return -ENOMEM;
1405}
1406
1407/**
1408 * allocate eager buffers, both kernel and user contexts.
1409 * @rcd: the context we are setting up.
1410 *
1411 * Allocate the eager TID buffers and program them into hip.
1412 * They are no longer completely contiguous, we do multiple allocation
1413 * calls. Otherwise we get the OOM code involved, by asking for too
1414 * much per call, with disastrous results on some kernels.
1415 */
1416int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
1417{
1418 struct qib_devdata *dd = rcd->dd;
1419 unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
1420 size_t size;
1421 gfp_t gfp_flags;
1422
1423 /*
1424 * GFP_USER, but without GFP_FS, so buffer cache can be
1425 * coalesced (we hope); otherwise, even at order 4,
1426 * heavy filesystem activity makes these fail, and we can
1427 * use compound pages.
1428 */
1429 gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
1430
1431 egrcnt = rcd->rcvegrcnt;
1432 egroff = rcd->rcvegr_tid_base;
1433 egrsize = dd->rcvegrbufsize;
1434
1435 chunk = rcd->rcvegrbuf_chunks;
1436 egrperchunk = rcd->rcvegrbufs_perchunk;
1437 size = rcd->rcvegrbuf_size;
1438 if (!rcd->rcvegrbuf) {
1439 rcd->rcvegrbuf =
1440 kzalloc(chunk * sizeof(rcd->rcvegrbuf[0]),
1441 GFP_KERNEL);
1442 if (!rcd->rcvegrbuf)
1443 goto bail;
1444 }
1445 if (!rcd->rcvegrbuf_phys) {
1446 rcd->rcvegrbuf_phys =
1447 kmalloc(chunk * sizeof(rcd->rcvegrbuf_phys[0]),
1448 GFP_KERNEL);
1449 if (!rcd->rcvegrbuf_phys)
1450 goto bail_rcvegrbuf;
1451 }
1452 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
1453 if (rcd->rcvegrbuf[e])
1454 continue;
1455 rcd->rcvegrbuf[e] =
1456 dma_alloc_coherent(&dd->pcidev->dev, size,
1457 &rcd->rcvegrbuf_phys[e],
1458 gfp_flags);
1459 if (!rcd->rcvegrbuf[e])
1460 goto bail_rcvegrbuf_phys;
1461 }
1462
1463 rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0];
1464
1465 for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) {
1466 dma_addr_t pa = rcd->rcvegrbuf_phys[chunk];
1467 unsigned i;
1468
1469 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
1470 dd->f_put_tid(dd, e + egroff +
1471 (u64 __iomem *)
1472 ((char __iomem *)
1473 dd->kregbase +
1474 dd->rcvegrbase),
1475 RCVHQ_RCV_TYPE_EAGER, pa);
1476 pa += egrsize;
1477 }
1478 cond_resched(); /* don't hog the cpu */
1479 }
1480
1481 return 0;
1482
1483bail_rcvegrbuf_phys:
1484 for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++)
1485 dma_free_coherent(&dd->pcidev->dev, size,
1486 rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]);
1487 kfree(rcd->rcvegrbuf_phys);
1488 rcd->rcvegrbuf_phys = NULL;
1489bail_rcvegrbuf:
1490 kfree(rcd->rcvegrbuf);
1491 rcd->rcvegrbuf = NULL;
1492bail:
1493 return -ENOMEM;
1494}
1495
1496int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
1497{
1498 u64 __iomem *qib_kregbase = NULL;
1499 void __iomem *qib_piobase = NULL;
1500 u64 __iomem *qib_userbase = NULL;
1501 u64 qib_kreglen;
1502 u64 qib_pio2koffset = dd->piobufbase & 0xffffffff;
1503 u64 qib_pio4koffset = dd->piobufbase >> 32;
1504 u64 qib_pio2klen = dd->piobcnt2k * dd->palign;
1505 u64 qib_pio4klen = dd->piobcnt4k * dd->align4k;
1506 u64 qib_physaddr = dd->physaddr;
1507 u64 qib_piolen;
1508 u64 qib_userlen = 0;
1509
1510 /*
1511 * Free the old mapping because the kernel will try to reuse the
1512 * old mapping and not create a new mapping with the
1513 * write combining attribute.
1514 */
1515 iounmap(dd->kregbase);
1516 dd->kregbase = NULL;
1517
1518 /*
1519 * Assumes chip address space looks like:
1520 * - kregs + sregs + cregs + uregs (in any order)
1521 * - piobufs (2K and 4K bufs in either order)
1522 * or:
1523 * - kregs + sregs + cregs (in any order)
1524 * - piobufs (2K and 4K bufs in either order)
1525 * - uregs
1526 */
1527 if (dd->piobcnt4k == 0) {
1528 qib_kreglen = qib_pio2koffset;
1529 qib_piolen = qib_pio2klen;
1530 } else if (qib_pio2koffset < qib_pio4koffset) {
1531 qib_kreglen = qib_pio2koffset;
1532 qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen;
1533 } else {
1534 qib_kreglen = qib_pio4koffset;
1535 qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen;
1536 }
1537 qib_piolen += vl15buflen;
1538 /* Map just the configured ports (not all hw ports) */
1539 if (dd->uregbase > qib_kreglen)
1540 qib_userlen = dd->ureg_align * dd->cfgctxts;
1541
1542 /* Sanity checks passed, now create the new mappings */
1543 qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen);
1544 if (!qib_kregbase)
1545 goto bail;
1546
1547 qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen);
1548 if (!qib_piobase)
1549 goto bail_kregbase;
1550
1551 if (qib_userlen) {
1552 qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase,
1553 qib_userlen);
1554 if (!qib_userbase)
1555 goto bail_piobase;
1556 }
1557
1558 dd->kregbase = qib_kregbase;
1559 dd->kregend = (u64 __iomem *)
1560 ((char __iomem *) qib_kregbase + qib_kreglen);
1561 dd->piobase = qib_piobase;
1562 dd->pio2kbase = (void __iomem *)
1563 (((char __iomem *) dd->piobase) +
1564 qib_pio2koffset - qib_kreglen);
1565 if (dd->piobcnt4k)
1566 dd->pio4kbase = (void __iomem *)
1567 (((char __iomem *) dd->piobase) +
1568 qib_pio4koffset - qib_kreglen);
1569 if (qib_userlen)
1570 /* ureg will now be accessed relative to dd->userbase */
1571 dd->userbase = qib_userbase;
1572 return 0;
1573
1574bail_piobase:
1575 iounmap(qib_piobase);
1576bail_kregbase:
1577 iounmap(qib_kregbase);
1578bail:
1579 return -ENOMEM;
1580}
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
new file mode 100644
index 000000000000..54a40828a106
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -0,0 +1,236 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/pci.h>
36#include <linux/delay.h>
37
38#include "qib.h"
39#include "qib_common.h"
40
41/**
42 * qib_format_hwmsg - format a single hwerror message
43 * @msg message buffer
44 * @msgl length of message buffer
45 * @hwmsg message to add to message buffer
46 */
47static void qib_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
48{
49 strlcat(msg, "[", msgl);
50 strlcat(msg, hwmsg, msgl);
51 strlcat(msg, "]", msgl);
52}
53
54/**
55 * qib_format_hwerrors - format hardware error messages for display
56 * @hwerrs hardware errors bit vector
57 * @hwerrmsgs hardware error descriptions
58 * @nhwerrmsgs number of hwerrmsgs
59 * @msg message buffer
60 * @msgl message buffer length
61 */
62void qib_format_hwerrors(u64 hwerrs, const struct qib_hwerror_msgs *hwerrmsgs,
63 size_t nhwerrmsgs, char *msg, size_t msgl)
64{
65 int i;
66
67 for (i = 0; i < nhwerrmsgs; i++)
68 if (hwerrs & hwerrmsgs[i].mask)
69 qib_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
70}
71
72static void signal_ib_event(struct qib_pportdata *ppd, enum ib_event_type ev)
73{
74 struct ib_event event;
75 struct qib_devdata *dd = ppd->dd;
76
77 event.device = &dd->verbs_dev.ibdev;
78 event.element.port_num = ppd->port;
79 event.event = ev;
80 ib_dispatch_event(&event);
81}
82
83void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
84{
85 struct qib_devdata *dd = ppd->dd;
86 unsigned long flags;
87 u32 lstate;
88 u8 ltstate;
89 enum ib_event_type ev = 0;
90
91 lstate = dd->f_iblink_state(ibcs); /* linkstate */
92 ltstate = dd->f_ibphys_portstate(ibcs);
93
94 /*
95 * If linkstate transitions into INIT from any of the various down
96 * states, or if it transitions from any of the up (INIT or better)
97 * states into any of the down states (except link recovery), then
98 * call the chip-specific code to take appropriate actions.
99 */
100 if (lstate >= IB_PORT_INIT && (ppd->lflags & QIBL_LINKDOWN) &&
101 ltstate == IB_PHYSPORTSTATE_LINKUP) {
102 /* transitioned to UP */
103 if (dd->f_ib_updown(ppd, 1, ibcs))
104 goto skip_ibchange; /* chip-code handled */
105 } else if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
106 QIBL_LINKACTIVE | QIBL_IB_FORCE_NOTIFY)) {
107 if (ltstate != IB_PHYSPORTSTATE_LINKUP &&
108 ltstate <= IB_PHYSPORTSTATE_CFG_TRAIN &&
109 dd->f_ib_updown(ppd, 0, ibcs))
110 goto skip_ibchange; /* chip-code handled */
111 qib_set_uevent_bits(ppd, _QIB_EVENT_LINKDOWN_BIT);
112 }
113
114 if (lstate != IB_PORT_DOWN) {
115 /* lstate is INIT, ARMED, or ACTIVE */
116 if (lstate != IB_PORT_ACTIVE) {
117 *ppd->statusp &= ~QIB_STATUS_IB_READY;
118 if (ppd->lflags & QIBL_LINKACTIVE)
119 ev = IB_EVENT_PORT_ERR;
120 spin_lock_irqsave(&ppd->lflags_lock, flags);
121 if (lstate == IB_PORT_ARMED) {
122 ppd->lflags |= QIBL_LINKARMED | QIBL_LINKV;
123 ppd->lflags &= ~(QIBL_LINKINIT |
124 QIBL_LINKDOWN | QIBL_LINKACTIVE);
125 } else {
126 ppd->lflags |= QIBL_LINKINIT | QIBL_LINKV;
127 ppd->lflags &= ~(QIBL_LINKARMED |
128 QIBL_LINKDOWN | QIBL_LINKACTIVE);
129 }
130 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
131 /* start a 75msec timer to clear symbol errors */
132 mod_timer(&ppd->symerr_clear_timer,
133 msecs_to_jiffies(75));
134 } else if (ltstate == IB_PHYSPORTSTATE_LINKUP) {
135 /* active, but not active defered */
136 qib_hol_up(ppd); /* useful only for 6120 now */
137 *ppd->statusp |=
138 QIB_STATUS_IB_READY | QIB_STATUS_IB_CONF;
139 qib_clear_symerror_on_linkup((unsigned long)ppd);
140 spin_lock_irqsave(&ppd->lflags_lock, flags);
141 ppd->lflags |= QIBL_LINKACTIVE | QIBL_LINKV;
142 ppd->lflags &= ~(QIBL_LINKINIT |
143 QIBL_LINKDOWN | QIBL_LINKARMED);
144 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
145 if (dd->flags & QIB_HAS_SEND_DMA)
146 qib_sdma_process_event(ppd,
147 qib_sdma_event_e30_go_running);
148 ev = IB_EVENT_PORT_ACTIVE;
149 dd->f_setextled(ppd, 1);
150 }
151 } else { /* down */
152 if (ppd->lflags & QIBL_LINKACTIVE)
153 ev = IB_EVENT_PORT_ERR;
154 spin_lock_irqsave(&ppd->lflags_lock, flags);
155 ppd->lflags |= QIBL_LINKDOWN | QIBL_LINKV;
156 ppd->lflags &= ~(QIBL_LINKINIT |
157 QIBL_LINKACTIVE | QIBL_LINKARMED);
158 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
159 *ppd->statusp &= ~QIB_STATUS_IB_READY;
160 }
161
162skip_ibchange:
163 ppd->lastibcstat = ibcs;
164 if (ev)
165 signal_ib_event(ppd, ev);
166 return;
167}
168
169void qib_clear_symerror_on_linkup(unsigned long opaque)
170{
171 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
172
173 if (ppd->lflags & QIBL_LINKACTIVE)
174 return;
175
176 ppd->ibport_data.z_symbol_error_counter =
177 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
178}
179
180/*
181 * Handle receive interrupts for user ctxts; this means a user
182 * process was waiting for a packet to arrive, and didn't want
183 * to poll.
184 */
185void qib_handle_urcv(struct qib_devdata *dd, u64 ctxtr)
186{
187 struct qib_ctxtdata *rcd;
188 unsigned long flags;
189 int i;
190
191 spin_lock_irqsave(&dd->uctxt_lock, flags);
192 for (i = dd->first_user_ctxt; dd->rcd && i < dd->cfgctxts; i++) {
193 if (!(ctxtr & (1ULL << i)))
194 continue;
195 rcd = dd->rcd[i];
196 if (!rcd || !rcd->cnt)
197 continue;
198
199 if (test_and_clear_bit(QIB_CTXT_WAITING_RCV, &rcd->flag)) {
200 wake_up_interruptible(&rcd->wait);
201 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_DIS,
202 rcd->ctxt);
203 } else if (test_and_clear_bit(QIB_CTXT_WAITING_URG,
204 &rcd->flag)) {
205 rcd->urgent++;
206 wake_up_interruptible(&rcd->wait);
207 }
208 }
209 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
210}
211
212void qib_bad_intrstatus(struct qib_devdata *dd)
213{
214 static int allbits;
215
216 /* separate routine, for better optimization of qib_intr() */
217
218 /*
219 * We print the message and disable interrupts, in hope of
220 * having a better chance of debugging the problem.
221 */
222 qib_dev_err(dd, "Read of chip interrupt status failed"
223 " disabling interrupts\n");
224 if (allbits++) {
225 /* disable interrupt delivery, something is very wrong */
226 if (allbits == 2)
227 dd->f_set_intr_state(dd, 0);
228 if (allbits == 3) {
229 qib_dev_err(dd, "2nd bad interrupt status, "
230 "unregistering interrupts\n");
231 dd->flags |= QIB_BADINTR;
232 dd->flags &= ~QIB_INITTED;
233 dd->f_free_irq(dd);
234 }
235 }
236}
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
new file mode 100644
index 000000000000..4b80eb153d57
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -0,0 +1,328 @@
1/*
2 * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include "qib.h"
35
36/**
37 * qib_alloc_lkey - allocate an lkey
38 * @rkt: lkey table in which to allocate the lkey
39 * @mr: memory region that this lkey protects
40 *
41 * Returns 1 if successful, otherwise returns 0.
42 */
43
44int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
45{
46 unsigned long flags;
47 u32 r;
48 u32 n;
49 int ret;
50
51 spin_lock_irqsave(&rkt->lock, flags);
52
53 /* Find the next available LKEY */
54 r = rkt->next;
55 n = r;
56 for (;;) {
57 if (rkt->table[r] == NULL)
58 break;
59 r = (r + 1) & (rkt->max - 1);
60 if (r == n) {
61 spin_unlock_irqrestore(&rkt->lock, flags);
62 ret = 0;
63 goto bail;
64 }
65 }
66 rkt->next = (r + 1) & (rkt->max - 1);
67 /*
68 * Make sure lkey is never zero which is reserved to indicate an
69 * unrestricted LKEY.
70 */
71 rkt->gen++;
72 mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
73 ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
74 << 8);
75 if (mr->lkey == 0) {
76 mr->lkey |= 1 << 8;
77 rkt->gen++;
78 }
79 rkt->table[r] = mr;
80 spin_unlock_irqrestore(&rkt->lock, flags);
81
82 ret = 1;
83
84bail:
85 return ret;
86}
87
88/**
89 * qib_free_lkey - free an lkey
90 * @rkt: table from which to free the lkey
91 * @lkey: lkey id to free
92 */
93int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr)
94{
95 unsigned long flags;
96 u32 lkey = mr->lkey;
97 u32 r;
98 int ret;
99
100 spin_lock_irqsave(&dev->lk_table.lock, flags);
101 if (lkey == 0) {
102 if (dev->dma_mr && dev->dma_mr == mr) {
103 ret = atomic_read(&dev->dma_mr->refcount);
104 if (!ret)
105 dev->dma_mr = NULL;
106 } else
107 ret = 0;
108 } else {
109 r = lkey >> (32 - ib_qib_lkey_table_size);
110 ret = atomic_read(&dev->lk_table.table[r]->refcount);
111 if (!ret)
112 dev->lk_table.table[r] = NULL;
113 }
114 spin_unlock_irqrestore(&dev->lk_table.lock, flags);
115
116 if (ret)
117 ret = -EBUSY;
118 return ret;
119}
120
121/**
122 * qib_lkey_ok - check IB SGE for validity and initialize
123 * @rkt: table containing lkey to check SGE against
124 * @isge: outgoing internal SGE
125 * @sge: SGE to check
126 * @acc: access flags
127 *
128 * Return 1 if valid and successful, otherwise returns 0.
129 *
130 * Check the IB SGE for validity and initialize our internal version
131 * of it.
132 */
133int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
134 struct qib_sge *isge, struct ib_sge *sge, int acc)
135{
136 struct qib_mregion *mr;
137 unsigned n, m;
138 size_t off;
139 int ret = 0;
140 unsigned long flags;
141
142 /*
143 * We use LKEY == zero for kernel virtual addresses
144 * (see qib_get_dma_mr and qib_dma.c).
145 */
146 spin_lock_irqsave(&rkt->lock, flags);
147 if (sge->lkey == 0) {
148 struct qib_ibdev *dev = to_idev(pd->ibpd.device);
149
150 if (pd->user)
151 goto bail;
152 if (!dev->dma_mr)
153 goto bail;
154 atomic_inc(&dev->dma_mr->refcount);
155 isge->mr = dev->dma_mr;
156 isge->vaddr = (void *) sge->addr;
157 isge->length = sge->length;
158 isge->sge_length = sge->length;
159 isge->m = 0;
160 isge->n = 0;
161 goto ok;
162 }
163 mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))];
164 if (unlikely(mr == NULL || mr->lkey != sge->lkey ||
165 mr->pd != &pd->ibpd))
166 goto bail;
167
168 off = sge->addr - mr->user_base;
169 if (unlikely(sge->addr < mr->user_base ||
170 off + sge->length > mr->length ||
171 (mr->access_flags & acc) != acc))
172 goto bail;
173
174 off += mr->offset;
175 m = 0;
176 n = 0;
177 while (off >= mr->map[m]->segs[n].length) {
178 off -= mr->map[m]->segs[n].length;
179 n++;
180 if (n >= QIB_SEGSZ) {
181 m++;
182 n = 0;
183 }
184 }
185 atomic_inc(&mr->refcount);
186 isge->mr = mr;
187 isge->vaddr = mr->map[m]->segs[n].vaddr + off;
188 isge->length = mr->map[m]->segs[n].length - off;
189 isge->sge_length = sge->length;
190 isge->m = m;
191 isge->n = n;
192ok:
193 ret = 1;
194bail:
195 spin_unlock_irqrestore(&rkt->lock, flags);
196 return ret;
197}
198
199/**
200 * qib_rkey_ok - check the IB virtual address, length, and RKEY
201 * @dev: infiniband device
202 * @ss: SGE state
203 * @len: length of data
204 * @vaddr: virtual address to place data
205 * @rkey: rkey to check
206 * @acc: access flags
207 *
208 * Return 1 if successful, otherwise 0.
209 */
210int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
211 u32 len, u64 vaddr, u32 rkey, int acc)
212{
213 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
214 struct qib_mregion *mr;
215 unsigned n, m;
216 size_t off;
217 int ret = 0;
218 unsigned long flags;
219
220 /*
221 * We use RKEY == zero for kernel virtual addresses
222 * (see qib_get_dma_mr and qib_dma.c).
223 */
224 spin_lock_irqsave(&rkt->lock, flags);
225 if (rkey == 0) {
226 struct qib_pd *pd = to_ipd(qp->ibqp.pd);
227 struct qib_ibdev *dev = to_idev(pd->ibpd.device);
228
229 if (pd->user)
230 goto bail;
231 if (!dev->dma_mr)
232 goto bail;
233 atomic_inc(&dev->dma_mr->refcount);
234 sge->mr = dev->dma_mr;
235 sge->vaddr = (void *) vaddr;
236 sge->length = len;
237 sge->sge_length = len;
238 sge->m = 0;
239 sge->n = 0;
240 goto ok;
241 }
242
243 mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
244 if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
245 goto bail;
246
247 off = vaddr - mr->iova;
248 if (unlikely(vaddr < mr->iova || off + len > mr->length ||
249 (mr->access_flags & acc) == 0))
250 goto bail;
251
252 off += mr->offset;
253 m = 0;
254 n = 0;
255 while (off >= mr->map[m]->segs[n].length) {
256 off -= mr->map[m]->segs[n].length;
257 n++;
258 if (n >= QIB_SEGSZ) {
259 m++;
260 n = 0;
261 }
262 }
263 atomic_inc(&mr->refcount);
264 sge->mr = mr;
265 sge->vaddr = mr->map[m]->segs[n].vaddr + off;
266 sge->length = mr->map[m]->segs[n].length - off;
267 sge->sge_length = len;
268 sge->m = m;
269 sge->n = n;
270ok:
271 ret = 1;
272bail:
273 spin_unlock_irqrestore(&rkt->lock, flags);
274 return ret;
275}
276
277/*
278 * Initialize the memory region specified by the work reqeust.
279 */
280int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr)
281{
282 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
283 struct qib_pd *pd = to_ipd(qp->ibqp.pd);
284 struct qib_mregion *mr;
285 u32 rkey = wr->wr.fast_reg.rkey;
286 unsigned i, n, m;
287 int ret = -EINVAL;
288 unsigned long flags;
289 u64 *page_list;
290 size_t ps;
291
292 spin_lock_irqsave(&rkt->lock, flags);
293 if (pd->user || rkey == 0)
294 goto bail;
295
296 mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
297 if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
298 goto bail;
299
300 if (wr->wr.fast_reg.page_list_len > mr->max_segs)
301 goto bail;
302
303 ps = 1UL << wr->wr.fast_reg.page_shift;
304 if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len)
305 goto bail;
306
307 mr->user_base = wr->wr.fast_reg.iova_start;
308 mr->iova = wr->wr.fast_reg.iova_start;
309 mr->lkey = rkey;
310 mr->length = wr->wr.fast_reg.length;
311 mr->access_flags = wr->wr.fast_reg.access_flags;
312 page_list = wr->wr.fast_reg.page_list->page_list;
313 m = 0;
314 n = 0;
315 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
316 mr->map[m]->segs[n].vaddr = (void *) page_list[i];
317 mr->map[m]->segs[n].length = ps;
318 if (++n == QIB_SEGSZ) {
319 m++;
320 n = 0;
321 }
322 }
323
324 ret = 0;
325bail:
326 spin_unlock_irqrestore(&rkt->lock, flags);
327 return ret;
328}
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
new file mode 100644
index 000000000000..94b0d1f3a8f0
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -0,0 +1,2173 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <rdma/ib_smi.h>
36
37#include "qib.h"
38#include "qib_mad.h"
39
40static int reply(struct ib_smp *smp)
41{
42 /*
43 * The verbs framework will handle the directed/LID route
44 * packet changes.
45 */
46 smp->method = IB_MGMT_METHOD_GET_RESP;
47 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
48 smp->status |= IB_SMP_DIRECTION;
49 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
50}
51
52static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
53{
54 struct ib_mad_send_buf *send_buf;
55 struct ib_mad_agent *agent;
56 struct ib_smp *smp;
57 int ret;
58 unsigned long flags;
59 unsigned long timeout;
60
61 agent = ibp->send_agent;
62 if (!agent)
63 return;
64
65 /* o14-3.2.1 */
66 if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE))
67 return;
68
69 /* o14-2 */
70 if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout))
71 return;
72
73 send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
74 IB_MGMT_MAD_DATA, GFP_ATOMIC);
75 if (IS_ERR(send_buf))
76 return;
77
78 smp = send_buf->mad;
79 smp->base_version = IB_MGMT_BASE_VERSION;
80 smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
81 smp->class_version = 1;
82 smp->method = IB_MGMT_METHOD_TRAP;
83 ibp->tid++;
84 smp->tid = cpu_to_be64(ibp->tid);
85 smp->attr_id = IB_SMP_ATTR_NOTICE;
86 /* o14-1: smp->mkey = 0; */
87 memcpy(smp->data, data, len);
88
89 spin_lock_irqsave(&ibp->lock, flags);
90 if (!ibp->sm_ah) {
91 if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
92 struct ib_ah *ah;
93 struct ib_ah_attr attr;
94
95 memset(&attr, 0, sizeof attr);
96 attr.dlid = ibp->sm_lid;
97 attr.port_num = ppd_from_ibp(ibp)->port;
98 ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
99 if (IS_ERR(ah))
100 ret = -EINVAL;
101 else {
102 send_buf->ah = ah;
103 ibp->sm_ah = to_iah(ah);
104 ret = 0;
105 }
106 } else
107 ret = -EINVAL;
108 } else {
109 send_buf->ah = &ibp->sm_ah->ibah;
110 ret = 0;
111 }
112 spin_unlock_irqrestore(&ibp->lock, flags);
113
114 if (!ret)
115 ret = ib_post_send_mad(send_buf, NULL);
116 if (!ret) {
117 /* 4.096 usec. */
118 timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000;
119 ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout);
120 } else {
121 ib_free_send_mad(send_buf);
122 ibp->trap_timeout = 0;
123 }
124}
125
126/*
127 * Send a bad [PQ]_Key trap (ch. 14.3.8).
128 */
129void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
130 u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
131{
132 struct ib_mad_notice_attr data;
133
134 if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
135 ibp->pkey_violations++;
136 else
137 ibp->qkey_violations++;
138 ibp->n_pkt_drops++;
139
140 /* Send violation trap */
141 data.generic_type = IB_NOTICE_TYPE_SECURITY;
142 data.prod_type_msb = 0;
143 data.prod_type_lsb = IB_NOTICE_PROD_CA;
144 data.trap_num = trap_num;
145 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
146 data.toggle_count = 0;
147 memset(&data.details, 0, sizeof data.details);
148 data.details.ntc_257_258.lid1 = lid1;
149 data.details.ntc_257_258.lid2 = lid2;
150 data.details.ntc_257_258.key = cpu_to_be32(key);
151 data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
152 data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
153
154 qib_send_trap(ibp, &data, sizeof data);
155}
156
157/*
158 * Send a bad M_Key trap (ch. 14.3.9).
159 */
160static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
161{
162 struct ib_mad_notice_attr data;
163
164 /* Send violation trap */
165 data.generic_type = IB_NOTICE_TYPE_SECURITY;
166 data.prod_type_msb = 0;
167 data.prod_type_lsb = IB_NOTICE_PROD_CA;
168 data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
169 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
170 data.toggle_count = 0;
171 memset(&data.details, 0, sizeof data.details);
172 data.details.ntc_256.lid = data.issuer_lid;
173 data.details.ntc_256.method = smp->method;
174 data.details.ntc_256.attr_id = smp->attr_id;
175 data.details.ntc_256.attr_mod = smp->attr_mod;
176 data.details.ntc_256.mkey = smp->mkey;
177 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
178 u8 hop_cnt;
179
180 data.details.ntc_256.dr_slid = smp->dr_slid;
181 data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
182 hop_cnt = smp->hop_cnt;
183 if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {
184 data.details.ntc_256.dr_trunc_hop |=
185 IB_NOTICE_TRAP_DR_TRUNC;
186 hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);
187 }
188 data.details.ntc_256.dr_trunc_hop |= hop_cnt;
189 memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path,
190 hop_cnt);
191 }
192
193 qib_send_trap(ibp, &data, sizeof data);
194}
195
196/*
197 * Send a Port Capability Mask Changed trap (ch. 14.3.11).
198 */
199void qib_cap_mask_chg(struct qib_ibport *ibp)
200{
201 struct ib_mad_notice_attr data;
202
203 data.generic_type = IB_NOTICE_TYPE_INFO;
204 data.prod_type_msb = 0;
205 data.prod_type_lsb = IB_NOTICE_PROD_CA;
206 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
207 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
208 data.toggle_count = 0;
209 memset(&data.details, 0, sizeof data.details);
210 data.details.ntc_144.lid = data.issuer_lid;
211 data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
212
213 qib_send_trap(ibp, &data, sizeof data);
214}
215
216/*
217 * Send a System Image GUID Changed trap (ch. 14.3.12).
218 */
219void qib_sys_guid_chg(struct qib_ibport *ibp)
220{
221 struct ib_mad_notice_attr data;
222
223 data.generic_type = IB_NOTICE_TYPE_INFO;
224 data.prod_type_msb = 0;
225 data.prod_type_lsb = IB_NOTICE_PROD_CA;
226 data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
227 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
228 data.toggle_count = 0;
229 memset(&data.details, 0, sizeof data.details);
230 data.details.ntc_145.lid = data.issuer_lid;
231 data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
232
233 qib_send_trap(ibp, &data, sizeof data);
234}
235
236/*
237 * Send a Node Description Changed trap (ch. 14.3.13).
238 */
239void qib_node_desc_chg(struct qib_ibport *ibp)
240{
241 struct ib_mad_notice_attr data;
242
243 data.generic_type = IB_NOTICE_TYPE_INFO;
244 data.prod_type_msb = 0;
245 data.prod_type_lsb = IB_NOTICE_PROD_CA;
246 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
247 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
248 data.toggle_count = 0;
249 memset(&data.details, 0, sizeof data.details);
250 data.details.ntc_144.lid = data.issuer_lid;
251 data.details.ntc_144.local_changes = 1;
252 data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
253
254 qib_send_trap(ibp, &data, sizeof data);
255}
256
257static int subn_get_nodedescription(struct ib_smp *smp,
258 struct ib_device *ibdev)
259{
260 if (smp->attr_mod)
261 smp->status |= IB_SMP_INVALID_FIELD;
262
263 memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
264
265 return reply(smp);
266}
267
268static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
269 u8 port)
270{
271 struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
272 struct qib_devdata *dd = dd_from_ibdev(ibdev);
273 u32 vendor, majrev, minrev;
274 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
275
276 /* GUID 0 is illegal */
277 if (smp->attr_mod || pidx >= dd->num_pports ||
278 dd->pport[pidx].guid == 0)
279 smp->status |= IB_SMP_INVALID_FIELD;
280 else
281 nip->port_guid = dd->pport[pidx].guid;
282
283 nip->base_version = 1;
284 nip->class_version = 1;
285 nip->node_type = 1; /* channel adapter */
286 nip->num_ports = ibdev->phys_port_cnt;
287 /* This is already in network order */
288 nip->sys_guid = ib_qib_sys_image_guid;
289 nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */
290 nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd));
291 nip->device_id = cpu_to_be16(dd->deviceid);
292 majrev = dd->majrev;
293 minrev = dd->minrev;
294 nip->revision = cpu_to_be32((majrev << 16) | minrev);
295 nip->local_port_num = port;
296 vendor = dd->vendorid;
297 nip->vendor_id[0] = QIB_SRC_OUI_1;
298 nip->vendor_id[1] = QIB_SRC_OUI_2;
299 nip->vendor_id[2] = QIB_SRC_OUI_3;
300
301 return reply(smp);
302}
303
304static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
305 u8 port)
306{
307 struct qib_devdata *dd = dd_from_ibdev(ibdev);
308 u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
309 __be64 *p = (__be64 *) smp->data;
310 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
311
312 /* 32 blocks of 8 64-bit GUIDs per block */
313
314 memset(smp->data, 0, sizeof(smp->data));
315
316 if (startgx == 0 && pidx < dd->num_pports) {
317 struct qib_pportdata *ppd = dd->pport + pidx;
318 struct qib_ibport *ibp = &ppd->ibport_data;
319 __be64 g = ppd->guid;
320 unsigned i;
321
322 /* GUID 0 is illegal */
323 if (g == 0)
324 smp->status |= IB_SMP_INVALID_FIELD;
325 else {
326 /* The first is a copy of the read-only HW GUID. */
327 p[0] = g;
328 for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
329 p[i] = ibp->guids[i - 1];
330 }
331 } else
332 smp->status |= IB_SMP_INVALID_FIELD;
333
334 return reply(smp);
335}
336
337static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w)
338{
339 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w);
340}
341
342static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s)
343{
344 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s);
345}
346
347static int get_overrunthreshold(struct qib_pportdata *ppd)
348{
349 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH);
350}
351
352/**
353 * set_overrunthreshold - set the overrun threshold
354 * @ppd: the physical port data
355 * @n: the new threshold
356 *
357 * Note that this will only take effect when the link state changes.
358 */
359static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n)
360{
361 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH,
362 (u32)n);
363 return 0;
364}
365
366static int get_phyerrthreshold(struct qib_pportdata *ppd)
367{
368 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH);
369}
370
371/**
372 * set_phyerrthreshold - set the physical error threshold
373 * @ppd: the physical port data
374 * @n: the new threshold
375 *
376 * Note that this will only take effect when the link state changes.
377 */
378static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n)
379{
380 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH,
381 (u32)n);
382 return 0;
383}
384
385/**
386 * get_linkdowndefaultstate - get the default linkdown state
387 * @ppd: the physical port data
388 *
389 * Returns zero if the default is POLL, 1 if the default is SLEEP.
390 */
391static int get_linkdowndefaultstate(struct qib_pportdata *ppd)
392{
393 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) ==
394 IB_LINKINITCMD_SLEEP;
395}
396
397static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
398{
399 int ret = 0;
400
401 /* Is the mkey in the process of expiring? */
402 if (ibp->mkey_lease_timeout &&
403 time_after_eq(jiffies, ibp->mkey_lease_timeout)) {
404 /* Clear timeout and mkey protection field. */
405 ibp->mkey_lease_timeout = 0;
406 ibp->mkeyprot = 0;
407 }
408
409 /* M_Key checking depends on Portinfo:M_Key_protect_bits */
410 if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && ibp->mkey != 0 &&
411 ibp->mkey != smp->mkey &&
412 (smp->method == IB_MGMT_METHOD_SET ||
413 smp->method == IB_MGMT_METHOD_TRAP_REPRESS ||
414 (smp->method == IB_MGMT_METHOD_GET && ibp->mkeyprot >= 2))) {
415 if (ibp->mkey_violations != 0xFFFF)
416 ++ibp->mkey_violations;
417 if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
418 ibp->mkey_lease_timeout = jiffies +
419 ibp->mkey_lease_period * HZ;
420 /* Generate a trap notice. */
421 qib_bad_mkey(ibp, smp);
422 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
423 } else if (ibp->mkey_lease_timeout)
424 ibp->mkey_lease_timeout = 0;
425
426 return ret;
427}
428
429static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
430 u8 port)
431{
432 struct qib_devdata *dd;
433 struct qib_pportdata *ppd;
434 struct qib_ibport *ibp;
435 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
436 u16 lid;
437 u8 mtu;
438 int ret;
439 u32 state;
440 u32 port_num = be32_to_cpu(smp->attr_mod);
441
442 if (port_num == 0)
443 port_num = port;
444 else {
445 if (port_num > ibdev->phys_port_cnt) {
446 smp->status |= IB_SMP_INVALID_FIELD;
447 ret = reply(smp);
448 goto bail;
449 }
450 if (port_num != port) {
451 ibp = to_iport(ibdev, port_num);
452 ret = check_mkey(ibp, smp, 0);
453 if (ret)
454 goto bail;
455 }
456 }
457
458 dd = dd_from_ibdev(ibdev);
459 /* IB numbers ports from 1, hdw from 0 */
460 ppd = dd->pport + (port_num - 1);
461 ibp = &ppd->ibport_data;
462
463 /* Clear all fields. Only set the non-zero fields. */
464 memset(smp->data, 0, sizeof(smp->data));
465
466 /* Only return the mkey if the protection field allows it. */
467 if (smp->method == IB_MGMT_METHOD_SET || ibp->mkey == smp->mkey ||
468 ibp->mkeyprot == 0)
469 pip->mkey = ibp->mkey;
470 pip->gid_prefix = ibp->gid_prefix;
471 lid = ppd->lid;
472 pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
473 pip->sm_lid = cpu_to_be16(ibp->sm_lid);
474 pip->cap_mask = cpu_to_be32(ibp->port_cap_flags);
475 /* pip->diag_code; */
476 pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period);
477 pip->local_port_num = port;
478 pip->link_width_enabled = ppd->link_width_enabled;
479 pip->link_width_supported = ppd->link_width_supported;
480 pip->link_width_active = ppd->link_width_active;
481 state = dd->f_iblink_state(ppd->lastibcstat);
482 pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state;
483
484 pip->portphysstate_linkdown =
485 (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
486 (get_linkdowndefaultstate(ppd) ? 1 : 2);
487 pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc;
488 pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
489 ppd->link_speed_enabled;
490 switch (ppd->ibmtu) {
491 default: /* something is wrong; fall through */
492 case 4096:
493 mtu = IB_MTU_4096;
494 break;
495 case 2048:
496 mtu = IB_MTU_2048;
497 break;
498 case 1024:
499 mtu = IB_MTU_1024;
500 break;
501 case 512:
502 mtu = IB_MTU_512;
503 break;
504 case 256:
505 mtu = IB_MTU_256;
506 break;
507 }
508 pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl;
509 pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */
510 pip->vl_high_limit = ibp->vl_high_limit;
511 pip->vl_arb_high_cap =
512 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
513 pip->vl_arb_low_cap =
514 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP);
515 /* InitTypeReply = 0 */
516 pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
517 /* HCAs ignore VLStallCount and HOQLife */
518 /* pip->vlstallcnt_hoqlife; */
519 pip->operationalvl_pei_peo_fpi_fpo =
520 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
521 pip->mkey_violations = cpu_to_be16(ibp->mkey_violations);
522 /* P_KeyViolations are counted by hardware. */
523 pip->pkey_violations = cpu_to_be16(ibp->pkey_violations);
524 pip->qkey_violations = cpu_to_be16(ibp->qkey_violations);
525 /* Only the hardware GUID is supported for now */
526 pip->guid_cap = QIB_GUIDS_PER_PORT;
527 pip->clientrereg_resv_subnetto = ibp->subnet_timeout;
528 /* 32.768 usec. response time (guessing) */
529 pip->resv_resptimevalue = 3;
530 pip->localphyerrors_overrunerrors =
531 (get_phyerrthreshold(ppd) << 4) |
532 get_overrunthreshold(ppd);
533 /* pip->max_credit_hint; */
534 if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
535 u32 v;
536
537 v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
538 pip->link_roundtrip_latency[0] = v >> 16;
539 pip->link_roundtrip_latency[1] = v >> 8;
540 pip->link_roundtrip_latency[2] = v;
541 }
542
543 ret = reply(smp);
544
545bail:
546 return ret;
547}
548
549/**
550 * get_pkeys - return the PKEY table
551 * @dd: the qlogic_ib device
552 * @port: the IB port number
553 * @pkeys: the pkey table is placed here
554 */
555static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
556{
557 struct qib_pportdata *ppd = dd->pport + port - 1;
558 /*
559 * always a kernel context, no locking needed.
560 * If we get here with ppd setup, no need to check
561 * that pd is valid.
562 */
563 struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx];
564
565 memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys));
566
567 return 0;
568}
569
570static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
571 u8 port)
572{
573 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
574 u16 *p = (u16 *) smp->data;
575 __be16 *q = (__be16 *) smp->data;
576
577 /* 64 blocks of 32 16-bit P_Key entries */
578
579 memset(smp->data, 0, sizeof(smp->data));
580 if (startpx == 0) {
581 struct qib_devdata *dd = dd_from_ibdev(ibdev);
582 unsigned i, n = qib_get_npkeys(dd);
583
584 get_pkeys(dd, port, p);
585
586 for (i = 0; i < n; i++)
587 q[i] = cpu_to_be16(p[i]);
588 } else
589 smp->status |= IB_SMP_INVALID_FIELD;
590
591 return reply(smp);
592}
593
594static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
595 u8 port)
596{
597 struct qib_devdata *dd = dd_from_ibdev(ibdev);
598 u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
599 __be64 *p = (__be64 *) smp->data;
600 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
601
602 /* 32 blocks of 8 64-bit GUIDs per block */
603
604 if (startgx == 0 && pidx < dd->num_pports) {
605 struct qib_pportdata *ppd = dd->pport + pidx;
606 struct qib_ibport *ibp = &ppd->ibport_data;
607 unsigned i;
608
609 /* The first entry is read-only. */
610 for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
611 ibp->guids[i - 1] = p[i];
612 } else
613 smp->status |= IB_SMP_INVALID_FIELD;
614
615 /* The only GUID we support is the first read-only entry. */
616 return subn_get_guidinfo(smp, ibdev, port);
617}
618
619/**
620 * subn_set_portinfo - set port information
621 * @smp: the incoming SM packet
622 * @ibdev: the infiniband device
623 * @port: the port on the device
624 *
625 * Set Portinfo (see ch. 14.2.5.6).
626 */
627static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
628 u8 port)
629{
630 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
631 struct ib_event event;
632 struct qib_devdata *dd;
633 struct qib_pportdata *ppd;
634 struct qib_ibport *ibp;
635 char clientrereg = 0;
636 unsigned long flags;
637 u16 lid, smlid;
638 u8 lwe;
639 u8 lse;
640 u8 state;
641 u8 vls;
642 u8 msl;
643 u16 lstate;
644 int ret, ore, mtu;
645 u32 port_num = be32_to_cpu(smp->attr_mod);
646
647 if (port_num == 0)
648 port_num = port;
649 else {
650 if (port_num > ibdev->phys_port_cnt)
651 goto err;
652 /* Port attributes can only be set on the receiving port */
653 if (port_num != port)
654 goto get_only;
655 }
656
657 dd = dd_from_ibdev(ibdev);
658 /* IB numbers ports from 1, hdw from 0 */
659 ppd = dd->pport + (port_num - 1);
660 ibp = &ppd->ibport_data;
661 event.device = ibdev;
662 event.element.port_num = port;
663
664 ibp->mkey = pip->mkey;
665 ibp->gid_prefix = pip->gid_prefix;
666 ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
667
668 lid = be16_to_cpu(pip->lid);
669 /* Must be a valid unicast LID address. */
670 if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
671 goto err;
672 if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
673 if (ppd->lid != lid)
674 qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
675 if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
676 qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT);
677 qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7);
678 event.event = IB_EVENT_LID_CHANGE;
679 ib_dispatch_event(&event);
680 }
681
682 smlid = be16_to_cpu(pip->sm_lid);
683 msl = pip->neighbormtu_mastersmsl & 0xF;
684 /* Must be a valid unicast LID address. */
685 if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
686 goto err;
687 if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
688 spin_lock_irqsave(&ibp->lock, flags);
689 if (ibp->sm_ah) {
690 if (smlid != ibp->sm_lid)
691 ibp->sm_ah->attr.dlid = smlid;
692 if (msl != ibp->sm_sl)
693 ibp->sm_ah->attr.sl = msl;
694 }
695 spin_unlock_irqrestore(&ibp->lock, flags);
696 if (smlid != ibp->sm_lid)
697 ibp->sm_lid = smlid;
698 if (msl != ibp->sm_sl)
699 ibp->sm_sl = msl;
700 event.event = IB_EVENT_SM_CHANGE;
701 ib_dispatch_event(&event);
702 }
703
704 /* Allow 1x or 4x to be set (see 14.2.6.6). */
705 lwe = pip->link_width_enabled;
706 if (lwe) {
707 if (lwe == 0xFF)
708 lwe = ppd->link_width_supported;
709 else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
710 goto err;
711 set_link_width_enabled(ppd, lwe);
712 }
713
714 lse = pip->linkspeedactive_enabled & 0xF;
715 if (lse) {
716 /*
717 * The IB 1.2 spec. only allows link speed values
718 * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific
719 * speeds.
720 */
721 if (lse == 15)
722 lse = ppd->link_speed_supported;
723 else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
724 goto err;
725 set_link_speed_enabled(ppd, lse);
726 }
727
728 /* Set link down default state. */
729 switch (pip->portphysstate_linkdown & 0xF) {
730 case 0: /* NOP */
731 break;
732 case 1: /* SLEEP */
733 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
734 IB_LINKINITCMD_SLEEP);
735 break;
736 case 2: /* POLL */
737 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
738 IB_LINKINITCMD_POLL);
739 break;
740 default:
741 goto err;
742 }
743
744 ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
745 ibp->vl_high_limit = pip->vl_high_limit;
746 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
747 ibp->vl_high_limit);
748
749 mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
750 if (mtu == -1)
751 goto err;
752 qib_set_mtu(ppd, mtu);
753
754 /* Set operational VLs */
755 vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
756 if (vls) {
757 if (vls > ppd->vls_supported)
758 goto err;
759 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
760 }
761
762 if (pip->mkey_violations == 0)
763 ibp->mkey_violations = 0;
764
765 if (pip->pkey_violations == 0)
766 ibp->pkey_violations = 0;
767
768 if (pip->qkey_violations == 0)
769 ibp->qkey_violations = 0;
770
771 ore = pip->localphyerrors_overrunerrors;
772 if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
773 goto err;
774
775 if (set_overrunthreshold(ppd, (ore & 0xF)))
776 goto err;
777
778 ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
779
780 if (pip->clientrereg_resv_subnetto & 0x80) {
781 clientrereg = 1;
782 event.event = IB_EVENT_CLIENT_REREGISTER;
783 ib_dispatch_event(&event);
784 }
785
786 /*
787 * Do the port state change now that the other link parameters
788 * have been set.
789 * Changing the port physical state only makes sense if the link
790 * is down or is being set to down.
791 */
792 state = pip->linkspeed_portstate & 0xF;
793 lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
794 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
795 goto err;
796
797 /*
798 * Only state changes of DOWN, ARM, and ACTIVE are valid
799 * and must be in the correct state to take effect (see 7.2.6).
800 */
801 switch (state) {
802 case IB_PORT_NOP:
803 if (lstate == 0)
804 break;
805 /* FALLTHROUGH */
806 case IB_PORT_DOWN:
807 if (lstate == 0)
808 lstate = QIB_IB_LINKDOWN_ONLY;
809 else if (lstate == 1)
810 lstate = QIB_IB_LINKDOWN_SLEEP;
811 else if (lstate == 2)
812 lstate = QIB_IB_LINKDOWN;
813 else if (lstate == 3)
814 lstate = QIB_IB_LINKDOWN_DISABLE;
815 else
816 goto err;
817 spin_lock_irqsave(&ppd->lflags_lock, flags);
818 ppd->lflags &= ~QIBL_LINKV;
819 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
820 qib_set_linkstate(ppd, lstate);
821 /*
822 * Don't send a reply if the response would be sent
823 * through the disabled port.
824 */
825 if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) {
826 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
827 goto done;
828 }
829 qib_wait_linkstate(ppd, QIBL_LINKV, 10);
830 break;
831 case IB_PORT_ARMED:
832 qib_set_linkstate(ppd, QIB_IB_LINKARM);
833 break;
834 case IB_PORT_ACTIVE:
835 qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
836 break;
837 default:
838 /* XXX We have already partially updated our state! */
839 goto err;
840 }
841
842 ret = subn_get_portinfo(smp, ibdev, port);
843
844 if (clientrereg)
845 pip->clientrereg_resv_subnetto |= 0x80;
846
847 goto done;
848
849err:
850 smp->status |= IB_SMP_INVALID_FIELD;
851get_only:
852 ret = subn_get_portinfo(smp, ibdev, port);
853done:
854 return ret;
855}
856
857/**
858 * rm_pkey - decrecment the reference count for the given PKEY
859 * @dd: the qlogic_ib device
860 * @key: the PKEY index
861 *
862 * Return true if this was the last reference and the hardware table entry
863 * needs to be changed.
864 */
865static int rm_pkey(struct qib_pportdata *ppd, u16 key)
866{
867 int i;
868 int ret;
869
870 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
871 if (ppd->pkeys[i] != key)
872 continue;
873 if (atomic_dec_and_test(&ppd->pkeyrefs[i])) {
874 ppd->pkeys[i] = 0;
875 ret = 1;
876 goto bail;
877 }
878 break;
879 }
880
881 ret = 0;
882
883bail:
884 return ret;
885}
886
887/**
888 * add_pkey - add the given PKEY to the hardware table
889 * @dd: the qlogic_ib device
890 * @key: the PKEY
891 *
892 * Return an error code if unable to add the entry, zero if no change,
893 * or 1 if the hardware PKEY register needs to be updated.
894 */
895static int add_pkey(struct qib_pportdata *ppd, u16 key)
896{
897 int i;
898 u16 lkey = key & 0x7FFF;
899 int any = 0;
900 int ret;
901
902 if (lkey == 0x7FFF) {
903 ret = 0;
904 goto bail;
905 }
906
907 /* Look for an empty slot or a matching PKEY. */
908 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
909 if (!ppd->pkeys[i]) {
910 any++;
911 continue;
912 }
913 /* If it matches exactly, try to increment the ref count */
914 if (ppd->pkeys[i] == key) {
915 if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) {
916 ret = 0;
917 goto bail;
918 }
919 /* Lost the race. Look for an empty slot below. */
920 atomic_dec(&ppd->pkeyrefs[i]);
921 any++;
922 }
923 /*
924 * It makes no sense to have both the limited and unlimited
925 * PKEY set at the same time since the unlimited one will
926 * disable the limited one.
927 */
928 if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
929 ret = -EEXIST;
930 goto bail;
931 }
932 }
933 if (!any) {
934 ret = -EBUSY;
935 goto bail;
936 }
937 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
938 if (!ppd->pkeys[i] &&
939 atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
940 /* for qibstats, etc. */
941 ppd->pkeys[i] = key;
942 ret = 1;
943 goto bail;
944 }
945 }
946 ret = -EBUSY;
947
948bail:
949 return ret;
950}
951
952/**
953 * set_pkeys - set the PKEY table for ctxt 0
954 * @dd: the qlogic_ib device
955 * @port: the IB port number
956 * @pkeys: the PKEY table
957 */
958static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
959{
960 struct qib_pportdata *ppd;
961 struct qib_ctxtdata *rcd;
962 int i;
963 int changed = 0;
964
965 /*
966 * IB port one/two always maps to context zero/one,
967 * always a kernel context, no locking needed
968 * If we get here with ppd setup, no need to check
969 * that rcd is valid.
970 */
971 ppd = dd->pport + (port - 1);
972 rcd = dd->rcd[ppd->hw_pidx];
973
974 for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
975 u16 key = pkeys[i];
976 u16 okey = rcd->pkeys[i];
977
978 if (key == okey)
979 continue;
980 /*
981 * The value of this PKEY table entry is changing.
982 * Remove the old entry in the hardware's array of PKEYs.
983 */
984 if (okey & 0x7FFF)
985 changed |= rm_pkey(ppd, okey);
986 if (key & 0x7FFF) {
987 int ret = add_pkey(ppd, key);
988
989 if (ret < 0)
990 key = 0;
991 else
992 changed |= ret;
993 }
994 rcd->pkeys[i] = key;
995 }
996 if (changed) {
997 struct ib_event event;
998
999 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
1000
1001 event.event = IB_EVENT_PKEY_CHANGE;
1002 event.device = &dd->verbs_dev.ibdev;
1003 event.element.port_num = 1;
1004 ib_dispatch_event(&event);
1005 }
1006 return 0;
1007}
1008
1009static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
1010 u8 port)
1011{
1012 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
1013 __be16 *p = (__be16 *) smp->data;
1014 u16 *q = (u16 *) smp->data;
1015 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1016 unsigned i, n = qib_get_npkeys(dd);
1017
1018 for (i = 0; i < n; i++)
1019 q[i] = be16_to_cpu(p[i]);
1020
1021 if (startpx != 0 || set_pkeys(dd, port, q) != 0)
1022 smp->status |= IB_SMP_INVALID_FIELD;
1023
1024 return subn_get_pkeytable(smp, ibdev, port);
1025}
1026
1027static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
1028 u8 port)
1029{
1030 struct qib_ibport *ibp = to_iport(ibdev, port);
1031 u8 *p = (u8 *) smp->data;
1032 unsigned i;
1033
1034 memset(smp->data, 0, sizeof(smp->data));
1035
1036 if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP))
1037 smp->status |= IB_SMP_UNSUP_METHOD;
1038 else
1039 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
1040 *p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1];
1041
1042 return reply(smp);
1043}
1044
1045static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
1046 u8 port)
1047{
1048 struct qib_ibport *ibp = to_iport(ibdev, port);
1049 u8 *p = (u8 *) smp->data;
1050 unsigned i;
1051
1052 if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) {
1053 smp->status |= IB_SMP_UNSUP_METHOD;
1054 return reply(smp);
1055 }
1056
1057 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) {
1058 ibp->sl_to_vl[i] = *p >> 4;
1059 ibp->sl_to_vl[i + 1] = *p & 0xF;
1060 }
1061 qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)),
1062 _QIB_EVENT_SL2VL_CHANGE_BIT);
1063
1064 return subn_get_sl_to_vl(smp, ibdev, port);
1065}
1066
1067static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
1068 u8 port)
1069{
1070 unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
1071 struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1072
1073 memset(smp->data, 0, sizeof(smp->data));
1074
1075 if (ppd->vls_supported == IB_VL_VL0)
1076 smp->status |= IB_SMP_UNSUP_METHOD;
1077 else if (which == IB_VLARB_LOWPRI_0_31)
1078 (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
1079 smp->data);
1080 else if (which == IB_VLARB_HIGHPRI_0_31)
1081 (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
1082 smp->data);
1083 else
1084 smp->status |= IB_SMP_INVALID_FIELD;
1085
1086 return reply(smp);
1087}
1088
1089static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
1090 u8 port)
1091{
1092 unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
1093 struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1094
1095 if (ppd->vls_supported == IB_VL_VL0)
1096 smp->status |= IB_SMP_UNSUP_METHOD;
1097 else if (which == IB_VLARB_LOWPRI_0_31)
1098 (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
1099 smp->data);
1100 else if (which == IB_VLARB_HIGHPRI_0_31)
1101 (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
1102 smp->data);
1103 else
1104 smp->status |= IB_SMP_INVALID_FIELD;
1105
1106 return subn_get_vl_arb(smp, ibdev, port);
1107}
1108
1109static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
1110 u8 port)
1111{
1112 /*
1113 * For now, we only send the trap once so no need to process this.
1114 * o13-6, o13-7,
1115 * o14-3.a4 The SMA shall not send any message in response to a valid
1116 * SubnTrapRepress() message.
1117 */
1118 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1119}
1120
1121static int pma_get_classportinfo(struct ib_perf *pmp,
1122 struct ib_device *ibdev)
1123{
1124 struct ib_pma_classportinfo *p =
1125 (struct ib_pma_classportinfo *)pmp->data;
1126 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1127
1128 memset(pmp->data, 0, sizeof(pmp->data));
1129
1130 if (pmp->attr_mod != 0)
1131 pmp->status |= IB_SMP_INVALID_FIELD;
1132
1133 /* Note that AllPortSelect is not valid */
1134 p->base_version = 1;
1135 p->class_version = 1;
1136 p->cap_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
1137 /*
1138 * Set the most significant bit of CM2 to indicate support for
1139 * congestion statistics
1140 */
1141 p->reserved[0] = dd->psxmitwait_supported << 7;
1142 /*
1143 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
1144 */
1145 p->resp_time_value = 18;
1146
1147 return reply((struct ib_smp *) pmp);
1148}
1149
1150static int pma_get_portsamplescontrol(struct ib_perf *pmp,
1151 struct ib_device *ibdev, u8 port)
1152{
1153 struct ib_pma_portsamplescontrol *p =
1154 (struct ib_pma_portsamplescontrol *)pmp->data;
1155 struct qib_ibdev *dev = to_idev(ibdev);
1156 struct qib_devdata *dd = dd_from_dev(dev);
1157 struct qib_ibport *ibp = to_iport(ibdev, port);
1158 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1159 unsigned long flags;
1160 u8 port_select = p->port_select;
1161
1162 memset(pmp->data, 0, sizeof(pmp->data));
1163
1164 p->port_select = port_select;
1165 if (pmp->attr_mod != 0 || port_select != port) {
1166 pmp->status |= IB_SMP_INVALID_FIELD;
1167 goto bail;
1168 }
1169 spin_lock_irqsave(&ibp->lock, flags);
1170 p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
1171 p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1172 p->counter_width = 4; /* 32 bit counters */
1173 p->counter_mask0_9 = COUNTER_MASK0_9;
1174 p->sample_start = cpu_to_be32(ibp->pma_sample_start);
1175 p->sample_interval = cpu_to_be32(ibp->pma_sample_interval);
1176 p->tag = cpu_to_be16(ibp->pma_tag);
1177 p->counter_select[0] = ibp->pma_counter_select[0];
1178 p->counter_select[1] = ibp->pma_counter_select[1];
1179 p->counter_select[2] = ibp->pma_counter_select[2];
1180 p->counter_select[3] = ibp->pma_counter_select[3];
1181 p->counter_select[4] = ibp->pma_counter_select[4];
1182 spin_unlock_irqrestore(&ibp->lock, flags);
1183
1184bail:
1185 return reply((struct ib_smp *) pmp);
1186}
1187
1188static int pma_set_portsamplescontrol(struct ib_perf *pmp,
1189 struct ib_device *ibdev, u8 port)
1190{
1191 struct ib_pma_portsamplescontrol *p =
1192 (struct ib_pma_portsamplescontrol *)pmp->data;
1193 struct qib_ibdev *dev = to_idev(ibdev);
1194 struct qib_devdata *dd = dd_from_dev(dev);
1195 struct qib_ibport *ibp = to_iport(ibdev, port);
1196 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1197 unsigned long flags;
1198 u8 status, xmit_flags;
1199 int ret;
1200
1201 if (pmp->attr_mod != 0 || p->port_select != port) {
1202 pmp->status |= IB_SMP_INVALID_FIELD;
1203 ret = reply((struct ib_smp *) pmp);
1204 goto bail;
1205 }
1206
1207 spin_lock_irqsave(&ibp->lock, flags);
1208
1209 /* Port Sampling code owns the PS* HW counters */
1210 xmit_flags = ppd->cong_stats.flags;
1211 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE;
1212 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1213 if (status == IB_PMA_SAMPLE_STATUS_DONE ||
1214 (status == IB_PMA_SAMPLE_STATUS_RUNNING &&
1215 xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
1216 ibp->pma_sample_start = be32_to_cpu(p->sample_start);
1217 ibp->pma_sample_interval = be32_to_cpu(p->sample_interval);
1218 ibp->pma_tag = be16_to_cpu(p->tag);
1219 ibp->pma_counter_select[0] = p->counter_select[0];
1220 ibp->pma_counter_select[1] = p->counter_select[1];
1221 ibp->pma_counter_select[2] = p->counter_select[2];
1222 ibp->pma_counter_select[3] = p->counter_select[3];
1223 ibp->pma_counter_select[4] = p->counter_select[4];
1224 dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval,
1225 ibp->pma_sample_start);
1226 }
1227 spin_unlock_irqrestore(&ibp->lock, flags);
1228
1229 ret = pma_get_portsamplescontrol(pmp, ibdev, port);
1230
1231bail:
1232 return ret;
1233}
1234
1235static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd,
1236 __be16 sel)
1237{
1238 u64 ret;
1239
1240 switch (sel) {
1241 case IB_PMA_PORT_XMIT_DATA:
1242 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA);
1243 break;
1244 case IB_PMA_PORT_RCV_DATA:
1245 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA);
1246 break;
1247 case IB_PMA_PORT_XMIT_PKTS:
1248 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS);
1249 break;
1250 case IB_PMA_PORT_RCV_PKTS:
1251 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS);
1252 break;
1253 case IB_PMA_PORT_XMIT_WAIT:
1254 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT);
1255 break;
1256 default:
1257 ret = 0;
1258 }
1259
1260 return ret;
1261}
1262
1263/* This function assumes that the xmit_wait lock is already held */
1264static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd)
1265{
1266 u32 delta;
1267
1268 delta = get_counter(&ppd->ibport_data, ppd,
1269 IB_PMA_PORT_XMIT_WAIT);
1270 return ppd->cong_stats.counter + delta;
1271}
1272
1273static void cache_hw_sample_counters(struct qib_pportdata *ppd)
1274{
1275 struct qib_ibport *ibp = &ppd->ibport_data;
1276
1277 ppd->cong_stats.counter_cache.psxmitdata =
1278 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA);
1279 ppd->cong_stats.counter_cache.psrcvdata =
1280 get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA);
1281 ppd->cong_stats.counter_cache.psxmitpkts =
1282 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS);
1283 ppd->cong_stats.counter_cache.psrcvpkts =
1284 get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS);
1285 ppd->cong_stats.counter_cache.psxmitwait =
1286 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT);
1287}
1288
1289static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
1290 __be16 sel)
1291{
1292 u64 ret;
1293
1294 switch (sel) {
1295 case IB_PMA_PORT_XMIT_DATA:
1296 ret = ppd->cong_stats.counter_cache.psxmitdata;
1297 break;
1298 case IB_PMA_PORT_RCV_DATA:
1299 ret = ppd->cong_stats.counter_cache.psrcvdata;
1300 break;
1301 case IB_PMA_PORT_XMIT_PKTS:
1302 ret = ppd->cong_stats.counter_cache.psxmitpkts;
1303 break;
1304 case IB_PMA_PORT_RCV_PKTS:
1305 ret = ppd->cong_stats.counter_cache.psrcvpkts;
1306 break;
1307 case IB_PMA_PORT_XMIT_WAIT:
1308 ret = ppd->cong_stats.counter_cache.psxmitwait;
1309 break;
1310 default:
1311 ret = 0;
1312 }
1313
1314 return ret;
1315}
1316
1317static int pma_get_portsamplesresult(struct ib_perf *pmp,
1318 struct ib_device *ibdev, u8 port)
1319{
1320 struct ib_pma_portsamplesresult *p =
1321 (struct ib_pma_portsamplesresult *)pmp->data;
1322 struct qib_ibdev *dev = to_idev(ibdev);
1323 struct qib_devdata *dd = dd_from_dev(dev);
1324 struct qib_ibport *ibp = to_iport(ibdev, port);
1325 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1326 unsigned long flags;
1327 u8 status;
1328 int i;
1329
1330 memset(pmp->data, 0, sizeof(pmp->data));
1331 spin_lock_irqsave(&ibp->lock, flags);
1332 p->tag = cpu_to_be16(ibp->pma_tag);
1333 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
1334 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
1335 else {
1336 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1337 p->sample_status = cpu_to_be16(status);
1338 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
1339 cache_hw_sample_counters(ppd);
1340 ppd->cong_stats.counter =
1341 xmit_wait_get_value_delta(ppd);
1342 dd->f_set_cntr_sample(ppd,
1343 QIB_CONG_TIMER_PSINTERVAL, 0);
1344 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
1345 }
1346 }
1347 for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
1348 p->counter[i] = cpu_to_be32(
1349 get_cache_hw_sample_counters(
1350 ppd, ibp->pma_counter_select[i]));
1351 spin_unlock_irqrestore(&ibp->lock, flags);
1352
1353 return reply((struct ib_smp *) pmp);
1354}
1355
1356static int pma_get_portsamplesresult_ext(struct ib_perf *pmp,
1357 struct ib_device *ibdev, u8 port)
1358{
1359 struct ib_pma_portsamplesresult_ext *p =
1360 (struct ib_pma_portsamplesresult_ext *)pmp->data;
1361 struct qib_ibdev *dev = to_idev(ibdev);
1362 struct qib_devdata *dd = dd_from_dev(dev);
1363 struct qib_ibport *ibp = to_iport(ibdev, port);
1364 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1365 unsigned long flags;
1366 u8 status;
1367 int i;
1368
1369 /* Port Sampling code owns the PS* HW counters */
1370 memset(pmp->data, 0, sizeof(pmp->data));
1371 spin_lock_irqsave(&ibp->lock, flags);
1372 p->tag = cpu_to_be16(ibp->pma_tag);
1373 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
1374 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
1375 else {
1376 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1377 p->sample_status = cpu_to_be16(status);
1378 /* 64 bits */
1379 p->extended_width = cpu_to_be32(0x80000000);
1380 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
1381 cache_hw_sample_counters(ppd);
1382 ppd->cong_stats.counter =
1383 xmit_wait_get_value_delta(ppd);
1384 dd->f_set_cntr_sample(ppd,
1385 QIB_CONG_TIMER_PSINTERVAL, 0);
1386 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
1387 }
1388 }
1389 for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
1390 p->counter[i] = cpu_to_be64(
1391 get_cache_hw_sample_counters(
1392 ppd, ibp->pma_counter_select[i]));
1393 spin_unlock_irqrestore(&ibp->lock, flags);
1394
1395 return reply((struct ib_smp *) pmp);
1396}
1397
1398static int pma_get_portcounters(struct ib_perf *pmp,
1399 struct ib_device *ibdev, u8 port)
1400{
1401 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1402 pmp->data;
1403 struct qib_ibport *ibp = to_iport(ibdev, port);
1404 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1405 struct qib_verbs_counters cntrs;
1406 u8 port_select = p->port_select;
1407
1408 qib_get_counters(ppd, &cntrs);
1409
1410 /* Adjust counters for any resets done. */
1411 cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
1412 cntrs.link_error_recovery_counter -=
1413 ibp->z_link_error_recovery_counter;
1414 cntrs.link_downed_counter -= ibp->z_link_downed_counter;
1415 cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
1416 cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors;
1417 cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
1418 cntrs.port_xmit_data -= ibp->z_port_xmit_data;
1419 cntrs.port_rcv_data -= ibp->z_port_rcv_data;
1420 cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
1421 cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
1422 cntrs.local_link_integrity_errors -=
1423 ibp->z_local_link_integrity_errors;
1424 cntrs.excessive_buffer_overrun_errors -=
1425 ibp->z_excessive_buffer_overrun_errors;
1426 cntrs.vl15_dropped -= ibp->z_vl15_dropped;
1427 cntrs.vl15_dropped += ibp->n_vl15_dropped;
1428
1429 memset(pmp->data, 0, sizeof(pmp->data));
1430
1431 p->port_select = port_select;
1432 if (pmp->attr_mod != 0 || port_select != port)
1433 pmp->status |= IB_SMP_INVALID_FIELD;
1434
1435 if (cntrs.symbol_error_counter > 0xFFFFUL)
1436 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1437 else
1438 p->symbol_error_counter =
1439 cpu_to_be16((u16)cntrs.symbol_error_counter);
1440 if (cntrs.link_error_recovery_counter > 0xFFUL)
1441 p->link_error_recovery_counter = 0xFF;
1442 else
1443 p->link_error_recovery_counter =
1444 (u8)cntrs.link_error_recovery_counter;
1445 if (cntrs.link_downed_counter > 0xFFUL)
1446 p->link_downed_counter = 0xFF;
1447 else
1448 p->link_downed_counter = (u8)cntrs.link_downed_counter;
1449 if (cntrs.port_rcv_errors > 0xFFFFUL)
1450 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1451 else
1452 p->port_rcv_errors =
1453 cpu_to_be16((u16) cntrs.port_rcv_errors);
1454 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1455 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1456 else
1457 p->port_rcv_remphys_errors =
1458 cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
1459 if (cntrs.port_xmit_discards > 0xFFFFUL)
1460 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1461 else
1462 p->port_xmit_discards =
1463 cpu_to_be16((u16)cntrs.port_xmit_discards);
1464 if (cntrs.local_link_integrity_errors > 0xFUL)
1465 cntrs.local_link_integrity_errors = 0xFUL;
1466 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1467 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1468 p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
1469 cntrs.excessive_buffer_overrun_errors;
1470 if (cntrs.vl15_dropped > 0xFFFFUL)
1471 p->vl15_dropped = cpu_to_be16(0xFFFF);
1472 else
1473 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1474 if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
1475 p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
1476 else
1477 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
1478 if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
1479 p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
1480 else
1481 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
1482 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
1483 p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
1484 else
1485 p->port_xmit_packets =
1486 cpu_to_be32((u32)cntrs.port_xmit_packets);
1487 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
1488 p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
1489 else
1490 p->port_rcv_packets =
1491 cpu_to_be32((u32) cntrs.port_rcv_packets);
1492
1493 return reply((struct ib_smp *) pmp);
1494}
1495
1496static int pma_get_portcounters_cong(struct ib_perf *pmp,
1497 struct ib_device *ibdev, u8 port)
1498{
1499 /* Congestion PMA packets start at offset 24 not 64 */
1500 struct ib_pma_portcounters_cong *p =
1501 (struct ib_pma_portcounters_cong *)pmp->reserved;
1502 struct qib_verbs_counters cntrs;
1503 struct qib_ibport *ibp = to_iport(ibdev, port);
1504 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1505 struct qib_devdata *dd = dd_from_ppd(ppd);
1506 u32 port_select = be32_to_cpu(pmp->attr_mod) & 0xFF;
1507 u64 xmit_wait_counter;
1508 unsigned long flags;
1509
1510 /*
1511 * This check is performed only in the GET method because the
1512 * SET method ends up calling this anyway.
1513 */
1514 if (!dd->psxmitwait_supported)
1515 pmp->status |= IB_SMP_UNSUP_METH_ATTR;
1516 if (port_select != port)
1517 pmp->status |= IB_SMP_INVALID_FIELD;
1518
1519 qib_get_counters(ppd, &cntrs);
1520 spin_lock_irqsave(&ppd->ibport_data.lock, flags);
1521 xmit_wait_counter = xmit_wait_get_value_delta(ppd);
1522 spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
1523
1524 /* Adjust counters for any resets done. */
1525 cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
1526 cntrs.link_error_recovery_counter -=
1527 ibp->z_link_error_recovery_counter;
1528 cntrs.link_downed_counter -= ibp->z_link_downed_counter;
1529 cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
1530 cntrs.port_rcv_remphys_errors -=
1531 ibp->z_port_rcv_remphys_errors;
1532 cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
1533 cntrs.local_link_integrity_errors -=
1534 ibp->z_local_link_integrity_errors;
1535 cntrs.excessive_buffer_overrun_errors -=
1536 ibp->z_excessive_buffer_overrun_errors;
1537 cntrs.vl15_dropped -= ibp->z_vl15_dropped;
1538 cntrs.vl15_dropped += ibp->n_vl15_dropped;
1539 cntrs.port_xmit_data -= ibp->z_port_xmit_data;
1540 cntrs.port_rcv_data -= ibp->z_port_rcv_data;
1541 cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
1542 cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
1543
1544 memset(pmp->reserved, 0, sizeof(pmp->reserved) +
1545 sizeof(pmp->data));
1546
1547 /*
1548 * Set top 3 bits to indicate interval in picoseconds in
1549 * remaining bits.
1550 */
1551 p->port_check_rate =
1552 cpu_to_be16((QIB_XMIT_RATE_PICO << 13) |
1553 (dd->psxmitwait_check_rate &
1554 ~(QIB_XMIT_RATE_PICO << 13)));
1555 p->port_adr_events = cpu_to_be64(0);
1556 p->port_xmit_wait = cpu_to_be64(xmit_wait_counter);
1557 p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data);
1558 p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data);
1559 p->port_xmit_packets =
1560 cpu_to_be64(cntrs.port_xmit_packets);
1561 p->port_rcv_packets =
1562 cpu_to_be64(cntrs.port_rcv_packets);
1563 if (cntrs.symbol_error_counter > 0xFFFFUL)
1564 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1565 else
1566 p->symbol_error_counter =
1567 cpu_to_be16(
1568 (u16)cntrs.symbol_error_counter);
1569 if (cntrs.link_error_recovery_counter > 0xFFUL)
1570 p->link_error_recovery_counter = 0xFF;
1571 else
1572 p->link_error_recovery_counter =
1573 (u8)cntrs.link_error_recovery_counter;
1574 if (cntrs.link_downed_counter > 0xFFUL)
1575 p->link_downed_counter = 0xFF;
1576 else
1577 p->link_downed_counter =
1578 (u8)cntrs.link_downed_counter;
1579 if (cntrs.port_rcv_errors > 0xFFFFUL)
1580 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1581 else
1582 p->port_rcv_errors =
1583 cpu_to_be16((u16) cntrs.port_rcv_errors);
1584 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1585 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1586 else
1587 p->port_rcv_remphys_errors =
1588 cpu_to_be16(
1589 (u16)cntrs.port_rcv_remphys_errors);
1590 if (cntrs.port_xmit_discards > 0xFFFFUL)
1591 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1592 else
1593 p->port_xmit_discards =
1594 cpu_to_be16((u16)cntrs.port_xmit_discards);
1595 if (cntrs.local_link_integrity_errors > 0xFUL)
1596 cntrs.local_link_integrity_errors = 0xFUL;
1597 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1598 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1599 p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
1600 cntrs.excessive_buffer_overrun_errors;
1601 if (cntrs.vl15_dropped > 0xFFFFUL)
1602 p->vl15_dropped = cpu_to_be16(0xFFFF);
1603 else
1604 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1605
1606 return reply((struct ib_smp *)pmp);
1607}
1608
1609static int pma_get_portcounters_ext(struct ib_perf *pmp,
1610 struct ib_device *ibdev, u8 port)
1611{
1612 struct ib_pma_portcounters_ext *p =
1613 (struct ib_pma_portcounters_ext *)pmp->data;
1614 struct qib_ibport *ibp = to_iport(ibdev, port);
1615 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1616 u64 swords, rwords, spkts, rpkts, xwait;
1617 u8 port_select = p->port_select;
1618
1619 memset(pmp->data, 0, sizeof(pmp->data));
1620
1621 p->port_select = port_select;
1622 if (pmp->attr_mod != 0 || port_select != port) {
1623 pmp->status |= IB_SMP_INVALID_FIELD;
1624 goto bail;
1625 }
1626
1627 qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
1628
1629 /* Adjust counters for any resets done. */
1630 swords -= ibp->z_port_xmit_data;
1631 rwords -= ibp->z_port_rcv_data;
1632 spkts -= ibp->z_port_xmit_packets;
1633 rpkts -= ibp->z_port_rcv_packets;
1634
1635 p->port_xmit_data = cpu_to_be64(swords);
1636 p->port_rcv_data = cpu_to_be64(rwords);
1637 p->port_xmit_packets = cpu_to_be64(spkts);
1638 p->port_rcv_packets = cpu_to_be64(rpkts);
1639 p->port_unicast_xmit_packets = cpu_to_be64(ibp->n_unicast_xmit);
1640 p->port_unicast_rcv_packets = cpu_to_be64(ibp->n_unicast_rcv);
1641 p->port_multicast_xmit_packets = cpu_to_be64(ibp->n_multicast_xmit);
1642 p->port_multicast_rcv_packets = cpu_to_be64(ibp->n_multicast_rcv);
1643
1644bail:
1645 return reply((struct ib_smp *) pmp);
1646}
1647
1648static int pma_set_portcounters(struct ib_perf *pmp,
1649 struct ib_device *ibdev, u8 port)
1650{
1651 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1652 pmp->data;
1653 struct qib_ibport *ibp = to_iport(ibdev, port);
1654 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1655 struct qib_verbs_counters cntrs;
1656
1657 /*
1658 * Since the HW doesn't support clearing counters, we save the
1659 * current count and subtract it from future responses.
1660 */
1661 qib_get_counters(ppd, &cntrs);
1662
1663 if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
1664 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1665
1666 if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
1667 ibp->z_link_error_recovery_counter =
1668 cntrs.link_error_recovery_counter;
1669
1670 if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
1671 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1672
1673 if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
1674 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1675
1676 if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
1677 ibp->z_port_rcv_remphys_errors =
1678 cntrs.port_rcv_remphys_errors;
1679
1680 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
1681 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1682
1683 if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
1684 ibp->z_local_link_integrity_errors =
1685 cntrs.local_link_integrity_errors;
1686
1687 if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
1688 ibp->z_excessive_buffer_overrun_errors =
1689 cntrs.excessive_buffer_overrun_errors;
1690
1691 if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
1692 ibp->n_vl15_dropped = 0;
1693 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1694 }
1695
1696 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
1697 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1698
1699 if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
1700 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1701
1702 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
1703 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1704
1705 if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
1706 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1707
1708 return pma_get_portcounters(pmp, ibdev, port);
1709}
1710
1711static int pma_set_portcounters_cong(struct ib_perf *pmp,
1712 struct ib_device *ibdev, u8 port)
1713{
1714 struct qib_ibport *ibp = to_iport(ibdev, port);
1715 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1716 struct qib_devdata *dd = dd_from_ppd(ppd);
1717 struct qib_verbs_counters cntrs;
1718 u32 counter_select = (be32_to_cpu(pmp->attr_mod) >> 24) & 0xFF;
1719 int ret = 0;
1720 unsigned long flags;
1721
1722 qib_get_counters(ppd, &cntrs);
1723 /* Get counter values before we save them */
1724 ret = pma_get_portcounters_cong(pmp, ibdev, port);
1725
1726 if (counter_select & IB_PMA_SEL_CONG_XMIT) {
1727 spin_lock_irqsave(&ppd->ibport_data.lock, flags);
1728 ppd->cong_stats.counter = 0;
1729 dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
1730 0x0);
1731 spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
1732 }
1733 if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
1734 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1735 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1736 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1737 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1738 }
1739 if (counter_select & IB_PMA_SEL_CONG_ALL) {
1740 ibp->z_symbol_error_counter =
1741 cntrs.symbol_error_counter;
1742 ibp->z_link_error_recovery_counter =
1743 cntrs.link_error_recovery_counter;
1744 ibp->z_link_downed_counter =
1745 cntrs.link_downed_counter;
1746 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1747 ibp->z_port_rcv_remphys_errors =
1748 cntrs.port_rcv_remphys_errors;
1749 ibp->z_port_xmit_discards =
1750 cntrs.port_xmit_discards;
1751 ibp->z_local_link_integrity_errors =
1752 cntrs.local_link_integrity_errors;
1753 ibp->z_excessive_buffer_overrun_errors =
1754 cntrs.excessive_buffer_overrun_errors;
1755 ibp->n_vl15_dropped = 0;
1756 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1757 }
1758
1759 return ret;
1760}
1761
1762static int pma_set_portcounters_ext(struct ib_perf *pmp,
1763 struct ib_device *ibdev, u8 port)
1764{
1765 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1766 pmp->data;
1767 struct qib_ibport *ibp = to_iport(ibdev, port);
1768 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1769 u64 swords, rwords, spkts, rpkts, xwait;
1770
1771 qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
1772
1773 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
1774 ibp->z_port_xmit_data = swords;
1775
1776 if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
1777 ibp->z_port_rcv_data = rwords;
1778
1779 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
1780 ibp->z_port_xmit_packets = spkts;
1781
1782 if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
1783 ibp->z_port_rcv_packets = rpkts;
1784
1785 if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
1786 ibp->n_unicast_xmit = 0;
1787
1788 if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
1789 ibp->n_unicast_rcv = 0;
1790
1791 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
1792 ibp->n_multicast_xmit = 0;
1793
1794 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
1795 ibp->n_multicast_rcv = 0;
1796
1797 return pma_get_portcounters_ext(pmp, ibdev, port);
1798}
1799
1800static int process_subn(struct ib_device *ibdev, int mad_flags,
1801 u8 port, struct ib_mad *in_mad,
1802 struct ib_mad *out_mad)
1803{
1804 struct ib_smp *smp = (struct ib_smp *)out_mad;
1805 struct qib_ibport *ibp = to_iport(ibdev, port);
1806 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1807 int ret;
1808
1809 *out_mad = *in_mad;
1810 if (smp->class_version != 1) {
1811 smp->status |= IB_SMP_UNSUP_VERSION;
1812 ret = reply(smp);
1813 goto bail;
1814 }
1815
1816 ret = check_mkey(ibp, smp, mad_flags);
1817 if (ret) {
1818 u32 port_num = be32_to_cpu(smp->attr_mod);
1819
1820 /*
1821 * If this is a get/set portinfo, we already check the
1822 * M_Key if the MAD is for another port and the M_Key
1823 * is OK on the receiving port. This check is needed
1824 * to increment the error counters when the M_Key
1825 * fails to match on *both* ports.
1826 */
1827 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
1828 (smp->method == IB_MGMT_METHOD_GET ||
1829 smp->method == IB_MGMT_METHOD_SET) &&
1830 port_num && port_num <= ibdev->phys_port_cnt &&
1831 port != port_num)
1832 (void) check_mkey(to_iport(ibdev, port_num), smp, 0);
1833 goto bail;
1834 }
1835
1836 switch (smp->method) {
1837 case IB_MGMT_METHOD_GET:
1838 switch (smp->attr_id) {
1839 case IB_SMP_ATTR_NODE_DESC:
1840 ret = subn_get_nodedescription(smp, ibdev);
1841 goto bail;
1842 case IB_SMP_ATTR_NODE_INFO:
1843 ret = subn_get_nodeinfo(smp, ibdev, port);
1844 goto bail;
1845 case IB_SMP_ATTR_GUID_INFO:
1846 ret = subn_get_guidinfo(smp, ibdev, port);
1847 goto bail;
1848 case IB_SMP_ATTR_PORT_INFO:
1849 ret = subn_get_portinfo(smp, ibdev, port);
1850 goto bail;
1851 case IB_SMP_ATTR_PKEY_TABLE:
1852 ret = subn_get_pkeytable(smp, ibdev, port);
1853 goto bail;
1854 case IB_SMP_ATTR_SL_TO_VL_TABLE:
1855 ret = subn_get_sl_to_vl(smp, ibdev, port);
1856 goto bail;
1857 case IB_SMP_ATTR_VL_ARB_TABLE:
1858 ret = subn_get_vl_arb(smp, ibdev, port);
1859 goto bail;
1860 case IB_SMP_ATTR_SM_INFO:
1861 if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
1862 ret = IB_MAD_RESULT_SUCCESS |
1863 IB_MAD_RESULT_CONSUMED;
1864 goto bail;
1865 }
1866 if (ibp->port_cap_flags & IB_PORT_SM) {
1867 ret = IB_MAD_RESULT_SUCCESS;
1868 goto bail;
1869 }
1870 /* FALLTHROUGH */
1871 default:
1872 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1873 ret = reply(smp);
1874 goto bail;
1875 }
1876
1877 case IB_MGMT_METHOD_SET:
1878 switch (smp->attr_id) {
1879 case IB_SMP_ATTR_GUID_INFO:
1880 ret = subn_set_guidinfo(smp, ibdev, port);
1881 goto bail;
1882 case IB_SMP_ATTR_PORT_INFO:
1883 ret = subn_set_portinfo(smp, ibdev, port);
1884 goto bail;
1885 case IB_SMP_ATTR_PKEY_TABLE:
1886 ret = subn_set_pkeytable(smp, ibdev, port);
1887 goto bail;
1888 case IB_SMP_ATTR_SL_TO_VL_TABLE:
1889 ret = subn_set_sl_to_vl(smp, ibdev, port);
1890 goto bail;
1891 case IB_SMP_ATTR_VL_ARB_TABLE:
1892 ret = subn_set_vl_arb(smp, ibdev, port);
1893 goto bail;
1894 case IB_SMP_ATTR_SM_INFO:
1895 if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
1896 ret = IB_MAD_RESULT_SUCCESS |
1897 IB_MAD_RESULT_CONSUMED;
1898 goto bail;
1899 }
1900 if (ibp->port_cap_flags & IB_PORT_SM) {
1901 ret = IB_MAD_RESULT_SUCCESS;
1902 goto bail;
1903 }
1904 /* FALLTHROUGH */
1905 default:
1906 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1907 ret = reply(smp);
1908 goto bail;
1909 }
1910
1911 case IB_MGMT_METHOD_TRAP_REPRESS:
1912 if (smp->attr_id == IB_SMP_ATTR_NOTICE)
1913 ret = subn_trap_repress(smp, ibdev, port);
1914 else {
1915 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1916 ret = reply(smp);
1917 }
1918 goto bail;
1919
1920 case IB_MGMT_METHOD_TRAP:
1921 case IB_MGMT_METHOD_REPORT:
1922 case IB_MGMT_METHOD_REPORT_RESP:
1923 case IB_MGMT_METHOD_GET_RESP:
1924 /*
1925 * The ib_mad module will call us to process responses
1926 * before checking for other consumers.
1927 * Just tell the caller to process it normally.
1928 */
1929 ret = IB_MAD_RESULT_SUCCESS;
1930 goto bail;
1931
1932 case IB_MGMT_METHOD_SEND:
1933 if (ib_get_smp_direction(smp) &&
1934 smp->attr_id == QIB_VENDOR_IPG) {
1935 ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT,
1936 smp->data[0]);
1937 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1938 } else
1939 ret = IB_MAD_RESULT_SUCCESS;
1940 goto bail;
1941
1942 default:
1943 smp->status |= IB_SMP_UNSUP_METHOD;
1944 ret = reply(smp);
1945 }
1946
1947bail:
1948 return ret;
1949}
1950
1951static int process_perf(struct ib_device *ibdev, u8 port,
1952 struct ib_mad *in_mad,
1953 struct ib_mad *out_mad)
1954{
1955 struct ib_perf *pmp = (struct ib_perf *)out_mad;
1956 int ret;
1957
1958 *out_mad = *in_mad;
1959 if (pmp->class_version != 1) {
1960 pmp->status |= IB_SMP_UNSUP_VERSION;
1961 ret = reply((struct ib_smp *) pmp);
1962 goto bail;
1963 }
1964
1965 switch (pmp->method) {
1966 case IB_MGMT_METHOD_GET:
1967 switch (pmp->attr_id) {
1968 case IB_PMA_CLASS_PORT_INFO:
1969 ret = pma_get_classportinfo(pmp, ibdev);
1970 goto bail;
1971 case IB_PMA_PORT_SAMPLES_CONTROL:
1972 ret = pma_get_portsamplescontrol(pmp, ibdev, port);
1973 goto bail;
1974 case IB_PMA_PORT_SAMPLES_RESULT:
1975 ret = pma_get_portsamplesresult(pmp, ibdev, port);
1976 goto bail;
1977 case IB_PMA_PORT_SAMPLES_RESULT_EXT:
1978 ret = pma_get_portsamplesresult_ext(pmp, ibdev, port);
1979 goto bail;
1980 case IB_PMA_PORT_COUNTERS:
1981 ret = pma_get_portcounters(pmp, ibdev, port);
1982 goto bail;
1983 case IB_PMA_PORT_COUNTERS_EXT:
1984 ret = pma_get_portcounters_ext(pmp, ibdev, port);
1985 goto bail;
1986 case IB_PMA_PORT_COUNTERS_CONG:
1987 ret = pma_get_portcounters_cong(pmp, ibdev, port);
1988 goto bail;
1989 default:
1990 pmp->status |= IB_SMP_UNSUP_METH_ATTR;
1991 ret = reply((struct ib_smp *) pmp);
1992 goto bail;
1993 }
1994
1995 case IB_MGMT_METHOD_SET:
1996 switch (pmp->attr_id) {
1997 case IB_PMA_PORT_SAMPLES_CONTROL:
1998 ret = pma_set_portsamplescontrol(pmp, ibdev, port);
1999 goto bail;
2000 case IB_PMA_PORT_COUNTERS:
2001 ret = pma_set_portcounters(pmp, ibdev, port);
2002 goto bail;
2003 case IB_PMA_PORT_COUNTERS_EXT:
2004 ret = pma_set_portcounters_ext(pmp, ibdev, port);
2005 goto bail;
2006 case IB_PMA_PORT_COUNTERS_CONG:
2007 ret = pma_set_portcounters_cong(pmp, ibdev, port);
2008 goto bail;
2009 default:
2010 pmp->status |= IB_SMP_UNSUP_METH_ATTR;
2011 ret = reply((struct ib_smp *) pmp);
2012 goto bail;
2013 }
2014
2015 case IB_MGMT_METHOD_TRAP:
2016 case IB_MGMT_METHOD_GET_RESP:
2017 /*
2018 * The ib_mad module will call us to process responses
2019 * before checking for other consumers.
2020 * Just tell the caller to process it normally.
2021 */
2022 ret = IB_MAD_RESULT_SUCCESS;
2023 goto bail;
2024
2025 default:
2026 pmp->status |= IB_SMP_UNSUP_METHOD;
2027 ret = reply((struct ib_smp *) pmp);
2028 }
2029
2030bail:
2031 return ret;
2032}
2033
2034/**
2035 * qib_process_mad - process an incoming MAD packet
2036 * @ibdev: the infiniband device this packet came in on
2037 * @mad_flags: MAD flags
2038 * @port: the port number this packet came in on
2039 * @in_wc: the work completion entry for this packet
2040 * @in_grh: the global route header for this packet
2041 * @in_mad: the incoming MAD
2042 * @out_mad: any outgoing MAD reply
2043 *
2044 * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
2045 * interested in processing.
2046 *
2047 * Note that the verbs framework has already done the MAD sanity checks,
2048 * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
2049 * MADs.
2050 *
2051 * This is called by the ib_mad module.
2052 */
2053int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
2054 struct ib_wc *in_wc, struct ib_grh *in_grh,
2055 struct ib_mad *in_mad, struct ib_mad *out_mad)
2056{
2057 int ret;
2058
2059 switch (in_mad->mad_hdr.mgmt_class) {
2060 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
2061 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
2062 ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
2063 goto bail;
2064
2065 case IB_MGMT_CLASS_PERF_MGMT:
2066 ret = process_perf(ibdev, port, in_mad, out_mad);
2067 goto bail;
2068
2069 default:
2070 ret = IB_MAD_RESULT_SUCCESS;
2071 }
2072
2073bail:
2074 return ret;
2075}
2076
2077static void send_handler(struct ib_mad_agent *agent,
2078 struct ib_mad_send_wc *mad_send_wc)
2079{
2080 ib_free_send_mad(mad_send_wc->send_buf);
2081}
2082
2083static void xmit_wait_timer_func(unsigned long opaque)
2084{
2085 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
2086 struct qib_devdata *dd = dd_from_ppd(ppd);
2087 unsigned long flags;
2088 u8 status;
2089
2090 spin_lock_irqsave(&ppd->ibport_data.lock, flags);
2091 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
2092 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
2093 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
2094 /* save counter cache */
2095 cache_hw_sample_counters(ppd);
2096 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
2097 } else
2098 goto done;
2099 }
2100 ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
2101 dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
2102done:
2103 spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
2104 mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
2105}
2106
2107int qib_create_agents(struct qib_ibdev *dev)
2108{
2109 struct qib_devdata *dd = dd_from_dev(dev);
2110 struct ib_mad_agent *agent;
2111 struct qib_ibport *ibp;
2112 int p;
2113 int ret;
2114
2115 for (p = 0; p < dd->num_pports; p++) {
2116 ibp = &dd->pport[p].ibport_data;
2117 agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
2118 NULL, 0, send_handler,
2119 NULL, NULL);
2120 if (IS_ERR(agent)) {
2121 ret = PTR_ERR(agent);
2122 goto err;
2123 }
2124
2125 /* Initialize xmit_wait structure */
2126 dd->pport[p].cong_stats.counter = 0;
2127 init_timer(&dd->pport[p].cong_stats.timer);
2128 dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func;
2129 dd->pport[p].cong_stats.timer.data =
2130 (unsigned long)(&dd->pport[p]);
2131 dd->pport[p].cong_stats.timer.expires = 0;
2132 add_timer(&dd->pport[p].cong_stats.timer);
2133
2134 ibp->send_agent = agent;
2135 }
2136
2137 return 0;
2138
2139err:
2140 for (p = 0; p < dd->num_pports; p++) {
2141 ibp = &dd->pport[p].ibport_data;
2142 if (ibp->send_agent) {
2143 agent = ibp->send_agent;
2144 ibp->send_agent = NULL;
2145 ib_unregister_mad_agent(agent);
2146 }
2147 }
2148
2149 return ret;
2150}
2151
2152void qib_free_agents(struct qib_ibdev *dev)
2153{
2154 struct qib_devdata *dd = dd_from_dev(dev);
2155 struct ib_mad_agent *agent;
2156 struct qib_ibport *ibp;
2157 int p;
2158
2159 for (p = 0; p < dd->num_pports; p++) {
2160 ibp = &dd->pport[p].ibport_data;
2161 if (ibp->send_agent) {
2162 agent = ibp->send_agent;
2163 ibp->send_agent = NULL;
2164 ib_unregister_mad_agent(agent);
2165 }
2166 if (ibp->sm_ah) {
2167 ib_destroy_ah(&ibp->sm_ah->ibah);
2168 ibp->sm_ah = NULL;
2169 }
2170 if (dd->pport[p].cong_stats.timer.data)
2171 del_timer_sync(&dd->pport[p].cong_stats.timer);
2172 }
2173}
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h
new file mode 100644
index 000000000000..147aff9117d7
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mad.h
@@ -0,0 +1,373 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
36#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
37#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
38#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
39
40struct ib_node_info {
41 u8 base_version;
42 u8 class_version;
43 u8 node_type;
44 u8 num_ports;
45 __be64 sys_guid;
46 __be64 node_guid;
47 __be64 port_guid;
48 __be16 partition_cap;
49 __be16 device_id;
50 __be32 revision;
51 u8 local_port_num;
52 u8 vendor_id[3];
53} __attribute__ ((packed));
54
55struct ib_mad_notice_attr {
56 u8 generic_type;
57 u8 prod_type_msb;
58 __be16 prod_type_lsb;
59 __be16 trap_num;
60 __be16 issuer_lid;
61 __be16 toggle_count;
62
63 union {
64 struct {
65 u8 details[54];
66 } raw_data;
67
68 struct {
69 __be16 reserved;
70 __be16 lid; /* where violation happened */
71 u8 port_num; /* where violation happened */
72 } __attribute__ ((packed)) ntc_129_131;
73
74 struct {
75 __be16 reserved;
76 __be16 lid; /* LID where change occured */
77 u8 reserved2;
78 u8 local_changes; /* low bit - local changes */
79 __be32 new_cap_mask; /* new capability mask */
80 u8 reserved3;
81 u8 change_flags; /* low 3 bits only */
82 } __attribute__ ((packed)) ntc_144;
83
84 struct {
85 __be16 reserved;
86 __be16 lid; /* lid where sys guid changed */
87 __be16 reserved2;
88 __be64 new_sys_guid;
89 } __attribute__ ((packed)) ntc_145;
90
91 struct {
92 __be16 reserved;
93 __be16 lid;
94 __be16 dr_slid;
95 u8 method;
96 u8 reserved2;
97 __be16 attr_id;
98 __be32 attr_mod;
99 __be64 mkey;
100 u8 reserved3;
101 u8 dr_trunc_hop;
102 u8 dr_rtn_path[30];
103 } __attribute__ ((packed)) ntc_256;
104
105 struct {
106 __be16 reserved;
107 __be16 lid1;
108 __be16 lid2;
109 __be32 key;
110 __be32 sl_qp1; /* SL: high 4 bits */
111 __be32 qp2; /* high 8 bits reserved */
112 union ib_gid gid1;
113 union ib_gid gid2;
114 } __attribute__ ((packed)) ntc_257_258;
115
116 } details;
117};
118
119/*
120 * Generic trap/notice types
121 */
122#define IB_NOTICE_TYPE_FATAL 0x80
123#define IB_NOTICE_TYPE_URGENT 0x81
124#define IB_NOTICE_TYPE_SECURITY 0x82
125#define IB_NOTICE_TYPE_SM 0x83
126#define IB_NOTICE_TYPE_INFO 0x84
127
128/*
129 * Generic trap/notice producers
130 */
131#define IB_NOTICE_PROD_CA cpu_to_be16(1)
132#define IB_NOTICE_PROD_SWITCH cpu_to_be16(2)
133#define IB_NOTICE_PROD_ROUTER cpu_to_be16(3)
134#define IB_NOTICE_PROD_CLASS_MGR cpu_to_be16(4)
135
136/*
137 * Generic trap/notice numbers
138 */
139#define IB_NOTICE_TRAP_LLI_THRESH cpu_to_be16(129)
140#define IB_NOTICE_TRAP_EBO_THRESH cpu_to_be16(130)
141#define IB_NOTICE_TRAP_FLOW_UPDATE cpu_to_be16(131)
142#define IB_NOTICE_TRAP_CAP_MASK_CHG cpu_to_be16(144)
143#define IB_NOTICE_TRAP_SYS_GUID_CHG cpu_to_be16(145)
144#define IB_NOTICE_TRAP_BAD_MKEY cpu_to_be16(256)
145#define IB_NOTICE_TRAP_BAD_PKEY cpu_to_be16(257)
146#define IB_NOTICE_TRAP_BAD_QKEY cpu_to_be16(258)
147
148/*
149 * Repress trap/notice flags
150 */
151#define IB_NOTICE_REPRESS_LLI_THRESH (1 << 0)
152#define IB_NOTICE_REPRESS_EBO_THRESH (1 << 1)
153#define IB_NOTICE_REPRESS_FLOW_UPDATE (1 << 2)
154#define IB_NOTICE_REPRESS_CAP_MASK_CHG (1 << 3)
155#define IB_NOTICE_REPRESS_SYS_GUID_CHG (1 << 4)
156#define IB_NOTICE_REPRESS_BAD_MKEY (1 << 5)
157#define IB_NOTICE_REPRESS_BAD_PKEY (1 << 6)
158#define IB_NOTICE_REPRESS_BAD_QKEY (1 << 7)
159
160/*
161 * Generic trap/notice other local changes flags (trap 144).
162 */
163#define IB_NOTICE_TRAP_LSE_CHG 0x04 /* Link Speed Enable changed */
164#define IB_NOTICE_TRAP_LWE_CHG 0x02 /* Link Width Enable changed */
165#define IB_NOTICE_TRAP_NODE_DESC_CHG 0x01
166
167/*
168 * Generic trap/notice M_Key volation flags in dr_trunc_hop (trap 256).
169 */
170#define IB_NOTICE_TRAP_DR_NOTICE 0x80
171#define IB_NOTICE_TRAP_DR_TRUNC 0x40
172
173struct ib_vl_weight_elem {
174 u8 vl; /* Only low 4 bits, upper 4 bits reserved */
175 u8 weight;
176};
177
178#define IB_VLARB_LOWPRI_0_31 1
179#define IB_VLARB_LOWPRI_32_63 2
180#define IB_VLARB_HIGHPRI_0_31 3
181#define IB_VLARB_HIGHPRI_32_63 4
182
183/*
184 * PMA class portinfo capability mask bits
185 */
186#define IB_PMA_CLASS_CAP_ALLPORTSELECT cpu_to_be16(1 << 8)
187#define IB_PMA_CLASS_CAP_EXT_WIDTH cpu_to_be16(1 << 9)
188#define IB_PMA_CLASS_CAP_XMIT_WAIT cpu_to_be16(1 << 12)
189
190#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
191#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
192#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
193#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
194#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
195#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
196#define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00)
197
198struct ib_perf {
199 u8 base_version;
200 u8 mgmt_class;
201 u8 class_version;
202 u8 method;
203 __be16 status;
204 __be16 unused;
205 __be64 tid;
206 __be16 attr_id;
207 __be16 resv;
208 __be32 attr_mod;
209 u8 reserved[40];
210 u8 data[192];
211} __attribute__ ((packed));
212
213struct ib_pma_classportinfo {
214 u8 base_version;
215 u8 class_version;
216 __be16 cap_mask;
217 u8 reserved[3];
218 u8 resp_time_value; /* only lower 5 bits */
219 union ib_gid redirect_gid;
220 __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */
221 __be16 redirect_lid;
222 __be16 redirect_pkey;
223 __be32 redirect_qp; /* only lower 24 bits */
224 __be32 redirect_qkey;
225 union ib_gid trap_gid;
226 __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */
227 __be16 trap_lid;
228 __be16 trap_pkey;
229 __be32 trap_hl_qp; /* 8, 24 bits respectively */
230 __be32 trap_qkey;
231} __attribute__ ((packed));
232
233struct ib_pma_portsamplescontrol {
234 u8 opcode;
235 u8 port_select;
236 u8 tick;
237 u8 counter_width; /* only lower 3 bits */
238 __be32 counter_mask0_9; /* 2, 10 * 3, bits */
239 __be16 counter_mask10_14; /* 1, 5 * 3, bits */
240 u8 sample_mechanisms;
241 u8 sample_status; /* only lower 2 bits */
242 __be64 option_mask;
243 __be64 vendor_mask;
244 __be32 sample_start;
245 __be32 sample_interval;
246 __be16 tag;
247 __be16 counter_select[15];
248} __attribute__ ((packed));
249
250struct ib_pma_portsamplesresult {
251 __be16 tag;
252 __be16 sample_status; /* only lower 2 bits */
253 __be32 counter[15];
254} __attribute__ ((packed));
255
256struct ib_pma_portsamplesresult_ext {
257 __be16 tag;
258 __be16 sample_status; /* only lower 2 bits */
259 __be32 extended_width; /* only upper 2 bits */
260 __be64 counter[15];
261} __attribute__ ((packed));
262
263struct ib_pma_portcounters {
264 u8 reserved;
265 u8 port_select;
266 __be16 counter_select;
267 __be16 symbol_error_counter;
268 u8 link_error_recovery_counter;
269 u8 link_downed_counter;
270 __be16 port_rcv_errors;
271 __be16 port_rcv_remphys_errors;
272 __be16 port_rcv_switch_relay_errors;
273 __be16 port_xmit_discards;
274 u8 port_xmit_constraint_errors;
275 u8 port_rcv_constraint_errors;
276 u8 reserved1;
277 u8 lli_ebor_errors; /* 4, 4, bits */
278 __be16 reserved2;
279 __be16 vl15_dropped;
280 __be32 port_xmit_data;
281 __be32 port_rcv_data;
282 __be32 port_xmit_packets;
283 __be32 port_rcv_packets;
284} __attribute__ ((packed));
285
286struct ib_pma_portcounters_cong {
287 u8 reserved;
288 u8 reserved1;
289 __be16 port_check_rate;
290 __be16 symbol_error_counter;
291 u8 link_error_recovery_counter;
292 u8 link_downed_counter;
293 __be16 port_rcv_errors;
294 __be16 port_rcv_remphys_errors;
295 __be16 port_rcv_switch_relay_errors;
296 __be16 port_xmit_discards;
297 u8 port_xmit_constraint_errors;
298 u8 port_rcv_constraint_errors;
299 u8 reserved2;
300 u8 lli_ebor_errors; /* 4, 4, bits */
301 __be16 reserved3;
302 __be16 vl15_dropped;
303 __be64 port_xmit_data;
304 __be64 port_rcv_data;
305 __be64 port_xmit_packets;
306 __be64 port_rcv_packets;
307 __be64 port_xmit_wait;
308 __be64 port_adr_events;
309} __attribute__ ((packed));
310
311#define IB_PMA_CONG_HW_CONTROL_TIMER 0x00
312#define IB_PMA_CONG_HW_CONTROL_SAMPLE 0x01
313
314#define QIB_XMIT_RATE_UNSUPPORTED 0x0
315#define QIB_XMIT_RATE_PICO 0x7
316/* number of 4nsec cycles equaling 2secs */
317#define QIB_CONG_TIMER_PSINTERVAL 0x1DCD64EC
318
319#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
320#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
321#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
322#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
323#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
324#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
325#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
326#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
327#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
328#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
329#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
330#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
331#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
332
333#define IB_PMA_SEL_CONG_ALL 0x01
334#define IB_PMA_SEL_CONG_PORT_DATA 0x02
335#define IB_PMA_SEL_CONG_XMIT 0x04
336#define IB_PMA_SEL_CONG_ROUTING 0x08
337
338struct ib_pma_portcounters_ext {
339 u8 reserved;
340 u8 port_select;
341 __be16 counter_select;
342 __be32 reserved1;
343 __be64 port_xmit_data;
344 __be64 port_rcv_data;
345 __be64 port_xmit_packets;
346 __be64 port_rcv_packets;
347 __be64 port_unicast_xmit_packets;
348 __be64 port_unicast_rcv_packets;
349 __be64 port_multicast_xmit_packets;
350 __be64 port_multicast_rcv_packets;
351} __attribute__ ((packed));
352
353#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
354#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
355#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
356#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
357#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
358#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
359#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
360#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
361
362/*
363 * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
364 * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
365 * We support 5 counters which only count the mandatory quantities.
366 */
367#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
368#define COUNTER_MASK0_9 \
369 cpu_to_be32(COUNTER_MASK(1, 0) | \
370 COUNTER_MASK(1, 1) | \
371 COUNTER_MASK(1, 2) | \
372 COUNTER_MASK(1, 3) | \
373 COUNTER_MASK(1, 4))
diff --git a/drivers/infiniband/hw/qib/qib_mmap.c b/drivers/infiniband/hw/qib/qib_mmap.c
new file mode 100644
index 000000000000..8b73a11d571c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mmap.c
@@ -0,0 +1,174 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/module.h>
34#include <linux/slab.h>
35#include <linux/vmalloc.h>
36#include <linux/mm.h>
37#include <linux/errno.h>
38#include <asm/pgtable.h>
39
40#include "qib_verbs.h"
41
42/**
43 * qib_release_mmap_info - free mmap info structure
44 * @ref: a pointer to the kref within struct qib_mmap_info
45 */
46void qib_release_mmap_info(struct kref *ref)
47{
48 struct qib_mmap_info *ip =
49 container_of(ref, struct qib_mmap_info, ref);
50 struct qib_ibdev *dev = to_idev(ip->context->device);
51
52 spin_lock_irq(&dev->pending_lock);
53 list_del(&ip->pending_mmaps);
54 spin_unlock_irq(&dev->pending_lock);
55
56 vfree(ip->obj);
57 kfree(ip);
58}
59
60/*
61 * open and close keep track of how many times the CQ is mapped,
62 * to avoid releasing it.
63 */
64static void qib_vma_open(struct vm_area_struct *vma)
65{
66 struct qib_mmap_info *ip = vma->vm_private_data;
67
68 kref_get(&ip->ref);
69}
70
71static void qib_vma_close(struct vm_area_struct *vma)
72{
73 struct qib_mmap_info *ip = vma->vm_private_data;
74
75 kref_put(&ip->ref, qib_release_mmap_info);
76}
77
78static struct vm_operations_struct qib_vm_ops = {
79 .open = qib_vma_open,
80 .close = qib_vma_close,
81};
82
83/**
84 * qib_mmap - create a new mmap region
85 * @context: the IB user context of the process making the mmap() call
86 * @vma: the VMA to be initialized
87 * Return zero if the mmap is OK. Otherwise, return an errno.
88 */
89int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
90{
91 struct qib_ibdev *dev = to_idev(context->device);
92 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
93 unsigned long size = vma->vm_end - vma->vm_start;
94 struct qib_mmap_info *ip, *pp;
95 int ret = -EINVAL;
96
97 /*
98 * Search the device's list of objects waiting for a mmap call.
99 * Normally, this list is very short since a call to create a
100 * CQ, QP, or SRQ is soon followed by a call to mmap().
101 */
102 spin_lock_irq(&dev->pending_lock);
103 list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
104 pending_mmaps) {
105 /* Only the creator is allowed to mmap the object */
106 if (context != ip->context || (__u64) offset != ip->offset)
107 continue;
108 /* Don't allow a mmap larger than the object. */
109 if (size > ip->size)
110 break;
111
112 list_del_init(&ip->pending_mmaps);
113 spin_unlock_irq(&dev->pending_lock);
114
115 ret = remap_vmalloc_range(vma, ip->obj, 0);
116 if (ret)
117 goto done;
118 vma->vm_ops = &qib_vm_ops;
119 vma->vm_private_data = ip;
120 qib_vma_open(vma);
121 goto done;
122 }
123 spin_unlock_irq(&dev->pending_lock);
124done:
125 return ret;
126}
127
128/*
129 * Allocate information for qib_mmap
130 */
131struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev,
132 u32 size,
133 struct ib_ucontext *context,
134 void *obj) {
135 struct qib_mmap_info *ip;
136
137 ip = kmalloc(sizeof *ip, GFP_KERNEL);
138 if (!ip)
139 goto bail;
140
141 size = PAGE_ALIGN(size);
142
143 spin_lock_irq(&dev->mmap_offset_lock);
144 if (dev->mmap_offset == 0)
145 dev->mmap_offset = PAGE_SIZE;
146 ip->offset = dev->mmap_offset;
147 dev->mmap_offset += size;
148 spin_unlock_irq(&dev->mmap_offset_lock);
149
150 INIT_LIST_HEAD(&ip->pending_mmaps);
151 ip->size = size;
152 ip->context = context;
153 ip->obj = obj;
154 kref_init(&ip->ref);
155
156bail:
157 return ip;
158}
159
160void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
161 u32 size, void *obj)
162{
163 size = PAGE_ALIGN(size);
164
165 spin_lock_irq(&dev->mmap_offset_lock);
166 if (dev->mmap_offset == 0)
167 dev->mmap_offset = PAGE_SIZE;
168 ip->offset = dev->mmap_offset;
169 dev->mmap_offset += size;
170 spin_unlock_irq(&dev->mmap_offset_lock);
171
172 ip->size = size;
173 ip->obj = obj;
174}
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
new file mode 100644
index 000000000000..5f95f0f6385d
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -0,0 +1,503 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <rdma/ib_umem.h>
35#include <rdma/ib_smi.h>
36
37#include "qib.h"
38
39/* Fast memory region */
40struct qib_fmr {
41 struct ib_fmr ibfmr;
42 u8 page_shift;
43 struct qib_mregion mr; /* must be last */
44};
45
46static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
47{
48 return container_of(ibfmr, struct qib_fmr, ibfmr);
49}
50
51/**
52 * qib_get_dma_mr - get a DMA memory region
53 * @pd: protection domain for this memory region
54 * @acc: access flags
55 *
56 * Returns the memory region on success, otherwise returns an errno.
57 * Note that all DMA addresses should be created via the
58 * struct ib_dma_mapping_ops functions (see qib_dma.c).
59 */
60struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
61{
62 struct qib_ibdev *dev = to_idev(pd->device);
63 struct qib_mr *mr;
64 struct ib_mr *ret;
65 unsigned long flags;
66
67 if (to_ipd(pd)->user) {
68 ret = ERR_PTR(-EPERM);
69 goto bail;
70 }
71
72 mr = kzalloc(sizeof *mr, GFP_KERNEL);
73 if (!mr) {
74 ret = ERR_PTR(-ENOMEM);
75 goto bail;
76 }
77
78 mr->mr.access_flags = acc;
79 atomic_set(&mr->mr.refcount, 0);
80
81 spin_lock_irqsave(&dev->lk_table.lock, flags);
82 if (!dev->dma_mr)
83 dev->dma_mr = &mr->mr;
84 spin_unlock_irqrestore(&dev->lk_table.lock, flags);
85
86 ret = &mr->ibmr;
87
88bail:
89 return ret;
90}
91
92static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
93{
94 struct qib_mr *mr;
95 int m, i = 0;
96
97 /* Allocate struct plus pointers to first level page tables. */
98 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
99 mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
100 if (!mr)
101 goto done;
102
103 /* Allocate first level page tables. */
104 for (; i < m; i++) {
105 mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
106 if (!mr->mr.map[i])
107 goto bail;
108 }
109 mr->mr.mapsz = m;
110 mr->mr.max_segs = count;
111
112 /*
113 * ib_reg_phys_mr() will initialize mr->ibmr except for
114 * lkey and rkey.
115 */
116 if (!qib_alloc_lkey(lk_table, &mr->mr))
117 goto bail;
118 mr->ibmr.lkey = mr->mr.lkey;
119 mr->ibmr.rkey = mr->mr.lkey;
120
121 atomic_set(&mr->mr.refcount, 0);
122 goto done;
123
124bail:
125 while (i)
126 kfree(mr->mr.map[--i]);
127 kfree(mr);
128 mr = NULL;
129
130done:
131 return mr;
132}
133
134/**
135 * qib_reg_phys_mr - register a physical memory region
136 * @pd: protection domain for this memory region
137 * @buffer_list: pointer to the list of physical buffers to register
138 * @num_phys_buf: the number of physical buffers to register
139 * @iova_start: the starting address passed over IB which maps to this MR
140 *
141 * Returns the memory region on success, otherwise returns an errno.
142 */
143struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
144 struct ib_phys_buf *buffer_list,
145 int num_phys_buf, int acc, u64 *iova_start)
146{
147 struct qib_mr *mr;
148 int n, m, i;
149 struct ib_mr *ret;
150
151 mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
152 if (mr == NULL) {
153 ret = ERR_PTR(-ENOMEM);
154 goto bail;
155 }
156
157 mr->mr.pd = pd;
158 mr->mr.user_base = *iova_start;
159 mr->mr.iova = *iova_start;
160 mr->mr.length = 0;
161 mr->mr.offset = 0;
162 mr->mr.access_flags = acc;
163 mr->umem = NULL;
164
165 m = 0;
166 n = 0;
167 for (i = 0; i < num_phys_buf; i++) {
168 mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
169 mr->mr.map[m]->segs[n].length = buffer_list[i].size;
170 mr->mr.length += buffer_list[i].size;
171 n++;
172 if (n == QIB_SEGSZ) {
173 m++;
174 n = 0;
175 }
176 }
177
178 ret = &mr->ibmr;
179
180bail:
181 return ret;
182}
183
184/**
185 * qib_reg_user_mr - register a userspace memory region
186 * @pd: protection domain for this memory region
187 * @start: starting userspace address
188 * @length: length of region to register
189 * @virt_addr: virtual address to use (from HCA's point of view)
190 * @mr_access_flags: access flags for this memory region
191 * @udata: unused by the QLogic_IB driver
192 *
193 * Returns the memory region on success, otherwise returns an errno.
194 */
195struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
196 u64 virt_addr, int mr_access_flags,
197 struct ib_udata *udata)
198{
199 struct qib_mr *mr;
200 struct ib_umem *umem;
201 struct ib_umem_chunk *chunk;
202 int n, m, i;
203 struct ib_mr *ret;
204
205 if (length == 0) {
206 ret = ERR_PTR(-EINVAL);
207 goto bail;
208 }
209
210 umem = ib_umem_get(pd->uobject->context, start, length,
211 mr_access_flags, 0);
212 if (IS_ERR(umem))
213 return (void *) umem;
214
215 n = 0;
216 list_for_each_entry(chunk, &umem->chunk_list, list)
217 n += chunk->nents;
218
219 mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
220 if (!mr) {
221 ret = ERR_PTR(-ENOMEM);
222 ib_umem_release(umem);
223 goto bail;
224 }
225
226 mr->mr.pd = pd;
227 mr->mr.user_base = start;
228 mr->mr.iova = virt_addr;
229 mr->mr.length = length;
230 mr->mr.offset = umem->offset;
231 mr->mr.access_flags = mr_access_flags;
232 mr->umem = umem;
233
234 m = 0;
235 n = 0;
236 list_for_each_entry(chunk, &umem->chunk_list, list) {
237 for (i = 0; i < chunk->nents; i++) {
238 void *vaddr;
239
240 vaddr = page_address(sg_page(&chunk->page_list[i]));
241 if (!vaddr) {
242 ret = ERR_PTR(-EINVAL);
243 goto bail;
244 }
245 mr->mr.map[m]->segs[n].vaddr = vaddr;
246 mr->mr.map[m]->segs[n].length = umem->page_size;
247 n++;
248 if (n == QIB_SEGSZ) {
249 m++;
250 n = 0;
251 }
252 }
253 }
254 ret = &mr->ibmr;
255
256bail:
257 return ret;
258}
259
260/**
261 * qib_dereg_mr - unregister and free a memory region
262 * @ibmr: the memory region to free
263 *
264 * Returns 0 on success.
265 *
266 * Note that this is called to free MRs created by qib_get_dma_mr()
267 * or qib_reg_user_mr().
268 */
269int qib_dereg_mr(struct ib_mr *ibmr)
270{
271 struct qib_mr *mr = to_imr(ibmr);
272 struct qib_ibdev *dev = to_idev(ibmr->device);
273 int ret;
274 int i;
275
276 ret = qib_free_lkey(dev, &mr->mr);
277 if (ret)
278 return ret;
279
280 i = mr->mr.mapsz;
281 while (i)
282 kfree(mr->mr.map[--i]);
283 if (mr->umem)
284 ib_umem_release(mr->umem);
285 kfree(mr);
286 return 0;
287}
288
289/*
290 * Allocate a memory region usable with the
291 * IB_WR_FAST_REG_MR send work request.
292 *
293 * Return the memory region on success, otherwise return an errno.
294 */
295struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
296{
297 struct qib_mr *mr;
298
299 mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table);
300 if (mr == NULL)
301 return ERR_PTR(-ENOMEM);
302
303 mr->mr.pd = pd;
304 mr->mr.user_base = 0;
305 mr->mr.iova = 0;
306 mr->mr.length = 0;
307 mr->mr.offset = 0;
308 mr->mr.access_flags = 0;
309 mr->umem = NULL;
310
311 return &mr->ibmr;
312}
313
314struct ib_fast_reg_page_list *
315qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
316{
317 unsigned size = page_list_len * sizeof(u64);
318 struct ib_fast_reg_page_list *pl;
319
320 if (size > PAGE_SIZE)
321 return ERR_PTR(-EINVAL);
322
323 pl = kmalloc(sizeof *pl, GFP_KERNEL);
324 if (!pl)
325 return ERR_PTR(-ENOMEM);
326
327 pl->page_list = kmalloc(size, GFP_KERNEL);
328 if (!pl->page_list)
329 goto err_free;
330
331 return pl;
332
333err_free:
334 kfree(pl);
335 return ERR_PTR(-ENOMEM);
336}
337
338void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)
339{
340 kfree(pl->page_list);
341 kfree(pl);
342}
343
344/**
345 * qib_alloc_fmr - allocate a fast memory region
346 * @pd: the protection domain for this memory region
347 * @mr_access_flags: access flags for this memory region
348 * @fmr_attr: fast memory region attributes
349 *
350 * Returns the memory region on success, otherwise returns an errno.
351 */
352struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
353 struct ib_fmr_attr *fmr_attr)
354{
355 struct qib_fmr *fmr;
356 int m, i = 0;
357 struct ib_fmr *ret;
358
359 /* Allocate struct plus pointers to first level page tables. */
360 m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
361 fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
362 if (!fmr)
363 goto bail;
364
365 /* Allocate first level page tables. */
366 for (; i < m; i++) {
367 fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
368 GFP_KERNEL);
369 if (!fmr->mr.map[i])
370 goto bail;
371 }
372 fmr->mr.mapsz = m;
373
374 /*
375 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
376 * rkey.
377 */
378 if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
379 goto bail;
380 fmr->ibfmr.rkey = fmr->mr.lkey;
381 fmr->ibfmr.lkey = fmr->mr.lkey;
382 /*
383 * Resources are allocated but no valid mapping (RKEY can't be
384 * used).
385 */
386 fmr->mr.pd = pd;
387 fmr->mr.user_base = 0;
388 fmr->mr.iova = 0;
389 fmr->mr.length = 0;
390 fmr->mr.offset = 0;
391 fmr->mr.access_flags = mr_access_flags;
392 fmr->mr.max_segs = fmr_attr->max_pages;
393 fmr->page_shift = fmr_attr->page_shift;
394
395 atomic_set(&fmr->mr.refcount, 0);
396 ret = &fmr->ibfmr;
397 goto done;
398
399bail:
400 while (i)
401 kfree(fmr->mr.map[--i]);
402 kfree(fmr);
403 ret = ERR_PTR(-ENOMEM);
404
405done:
406 return ret;
407}
408
409/**
410 * qib_map_phys_fmr - set up a fast memory region
411 * @ibmfr: the fast memory region to set up
412 * @page_list: the list of pages to associate with the fast memory region
413 * @list_len: the number of pages to associate with the fast memory region
414 * @iova: the virtual address of the start of the fast memory region
415 *
416 * This may be called from interrupt context.
417 */
418
419int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
420 int list_len, u64 iova)
421{
422 struct qib_fmr *fmr = to_ifmr(ibfmr);
423 struct qib_lkey_table *rkt;
424 unsigned long flags;
425 int m, n, i;
426 u32 ps;
427 int ret;
428
429 if (atomic_read(&fmr->mr.refcount))
430 return -EBUSY;
431
432 if (list_len > fmr->mr.max_segs) {
433 ret = -EINVAL;
434 goto bail;
435 }
436 rkt = &to_idev(ibfmr->device)->lk_table;
437 spin_lock_irqsave(&rkt->lock, flags);
438 fmr->mr.user_base = iova;
439 fmr->mr.iova = iova;
440 ps = 1 << fmr->page_shift;
441 fmr->mr.length = list_len * ps;
442 m = 0;
443 n = 0;
444 for (i = 0; i < list_len; i++) {
445 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
446 fmr->mr.map[m]->segs[n].length = ps;
447 if (++n == QIB_SEGSZ) {
448 m++;
449 n = 0;
450 }
451 }
452 spin_unlock_irqrestore(&rkt->lock, flags);
453 ret = 0;
454
455bail:
456 return ret;
457}
458
459/**
460 * qib_unmap_fmr - unmap fast memory regions
461 * @fmr_list: the list of fast memory regions to unmap
462 *
463 * Returns 0 on success.
464 */
465int qib_unmap_fmr(struct list_head *fmr_list)
466{
467 struct qib_fmr *fmr;
468 struct qib_lkey_table *rkt;
469 unsigned long flags;
470
471 list_for_each_entry(fmr, fmr_list, ibfmr.list) {
472 rkt = &to_idev(fmr->ibfmr.device)->lk_table;
473 spin_lock_irqsave(&rkt->lock, flags);
474 fmr->mr.user_base = 0;
475 fmr->mr.iova = 0;
476 fmr->mr.length = 0;
477 spin_unlock_irqrestore(&rkt->lock, flags);
478 }
479 return 0;
480}
481
482/**
483 * qib_dealloc_fmr - deallocate a fast memory region
484 * @ibfmr: the fast memory region to deallocate
485 *
486 * Returns 0 on success.
487 */
488int qib_dealloc_fmr(struct ib_fmr *ibfmr)
489{
490 struct qib_fmr *fmr = to_ifmr(ibfmr);
491 int ret;
492 int i;
493
494 ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr);
495 if (ret)
496 return ret;
497
498 i = fmr->mr.mapsz;
499 while (i)
500 kfree(fmr->mr.map[--i]);
501 kfree(fmr);
502 return 0;
503}
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
new file mode 100644
index 000000000000..c926bf4541df
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -0,0 +1,738 @@
1/*
2 * Copyright (c) 2008, 2009 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/pci.h>
34#include <linux/io.h>
35#include <linux/delay.h>
36#include <linux/vmalloc.h>
37#include <linux/aer.h>
38
39#include "qib.h"
40
41/*
42 * This file contains PCIe utility routines that are common to the
43 * various QLogic InfiniPath adapters
44 */
45
46/*
47 * Code to adjust PCIe capabilities.
48 * To minimize the change footprint, we call it
49 * from qib_pcie_params, which every chip-specific
50 * file calls, even though this violates some
51 * expectations of harmlessness.
52 */
53static int qib_tune_pcie_caps(struct qib_devdata *);
54static int qib_tune_pcie_coalesce(struct qib_devdata *);
55
56/*
57 * Do all the common PCIe setup and initialization.
58 * devdata is not yet allocated, and is not allocated until after this
59 * routine returns success. Therefore qib_dev_err() can't be used for error
60 * printing.
61 */
62int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
63{
64 int ret;
65
66 ret = pci_enable_device(pdev);
67 if (ret) {
68 /*
69 * This can happen (in theory) iff:
70 * We did a chip reset, and then failed to reprogram the
71 * BAR, or the chip reset due to an internal error. We then
72 * unloaded the driver and reloaded it.
73 *
74 * Both reset cases set the BAR back to initial state. For
75 * the latter case, the AER sticky error bit at offset 0x718
76 * should be set, but the Linux kernel doesn't yet know
77 * about that, it appears. If the original BAR was retained
78 * in the kernel data structures, this may be OK.
79 */
80 qib_early_err(&pdev->dev, "pci enable failed: error %d\n",
81 -ret);
82 goto done;
83 }
84
85 ret = pci_request_regions(pdev, QIB_DRV_NAME);
86 if (ret) {
87 qib_devinfo(pdev, "pci_request_regions fails: err %d\n", -ret);
88 goto bail;
89 }
90
91 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
92 if (ret) {
93 /*
94 * If the 64 bit setup fails, try 32 bit. Some systems
95 * do not setup 64 bit maps on systems with 2GB or less
96 * memory installed.
97 */
98 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
99 if (ret) {
100 qib_devinfo(pdev, "Unable to set DMA mask: %d\n", ret);
101 goto bail;
102 }
103 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
104 } else
105 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
106 if (ret)
107 qib_early_err(&pdev->dev,
108 "Unable to set DMA consistent mask: %d\n", ret);
109
110 pci_set_master(pdev);
111 ret = pci_enable_pcie_error_reporting(pdev);
112 if (ret)
113 qib_early_err(&pdev->dev,
114 "Unable to enable pcie error reporting: %d\n",
115 ret);
116 goto done;
117
118bail:
119 pci_disable_device(pdev);
120 pci_release_regions(pdev);
121done:
122 return ret;
123}
124
125/*
126 * Do remaining PCIe setup, once dd is allocated, and save away
127 * fields required to re-initialize after a chip reset, or for
128 * various other purposes
129 */
130int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev,
131 const struct pci_device_id *ent)
132{
133 unsigned long len;
134 resource_size_t addr;
135
136 dd->pcidev = pdev;
137 pci_set_drvdata(pdev, dd);
138
139 addr = pci_resource_start(pdev, 0);
140 len = pci_resource_len(pdev, 0);
141
142#if defined(__powerpc__)
143 /* There isn't a generic way to specify writethrough mappings */
144 dd->kregbase = __ioremap(addr, len, _PAGE_NO_CACHE | _PAGE_WRITETHRU);
145#else
146 dd->kregbase = ioremap_nocache(addr, len);
147#endif
148
149 if (!dd->kregbase)
150 return -ENOMEM;
151
152 dd->kregend = (u64 __iomem *)((void __iomem *) dd->kregbase + len);
153 dd->physaddr = addr; /* used for io_remap, etc. */
154
155 /*
156 * Save BARs to rewrite after device reset. Save all 64 bits of
157 * BAR, just in case.
158 */
159 dd->pcibar0 = addr;
160 dd->pcibar1 = addr >> 32;
161 dd->deviceid = ent->device; /* save for later use */
162 dd->vendorid = ent->vendor;
163
164 return 0;
165}
166
167/*
168 * Do PCIe cleanup, after chip-specific cleanup, etc. Just prior
169 * to releasing the dd memory.
170 * void because none of the core pcie cleanup returns are void
171 */
172void qib_pcie_ddcleanup(struct qib_devdata *dd)
173{
174 u64 __iomem *base = (void __iomem *) dd->kregbase;
175
176 dd->kregbase = NULL;
177 iounmap(base);
178 if (dd->piobase)
179 iounmap(dd->piobase);
180 if (dd->userbase)
181 iounmap(dd->userbase);
182
183 pci_disable_device(dd->pcidev);
184 pci_release_regions(dd->pcidev);
185
186 pci_set_drvdata(dd->pcidev, NULL);
187}
188
189static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
190 struct msix_entry *msix_entry)
191{
192 int ret;
193 u32 tabsize = 0;
194 u16 msix_flags;
195
196 pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags);
197 tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE);
198 if (tabsize > *msixcnt)
199 tabsize = *msixcnt;
200 ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize);
201 if (ret > 0) {
202 tabsize = ret;
203 ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize);
204 }
205 if (ret) {
206 qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, "
207 "falling back to INTx\n", tabsize, ret);
208 tabsize = 0;
209 }
210 *msixcnt = tabsize;
211
212 if (ret)
213 qib_enable_intx(dd->pcidev);
214
215}
216
217/**
218 * We save the msi lo and hi values, so we can restore them after
219 * chip reset (the kernel PCI infrastructure doesn't yet handle that
220 * correctly.
221 */
222static int qib_msi_setup(struct qib_devdata *dd, int pos)
223{
224 struct pci_dev *pdev = dd->pcidev;
225 u16 control;
226 int ret;
227
228 ret = pci_enable_msi(pdev);
229 if (ret)
230 qib_dev_err(dd, "pci_enable_msi failed: %d, "
231 "interrupts may not work\n", ret);
232 /* continue even if it fails, we may still be OK... */
233
234 pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
235 &dd->msi_lo);
236 pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI,
237 &dd->msi_hi);
238 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
239 /* now save the data (vector) info */
240 pci_read_config_word(pdev, pos + ((control & PCI_MSI_FLAGS_64BIT)
241 ? 12 : 8),
242 &dd->msi_data);
243 return ret;
244}
245
246int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
247 struct msix_entry *entry)
248{
249 u16 linkstat, speed;
250 int pos = 0, pose, ret = 1;
251
252 pose = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
253 if (!pose) {
254 qib_dev_err(dd, "Can't find PCI Express capability!\n");
255 /* set up something... */
256 dd->lbus_width = 1;
257 dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
258 goto bail;
259 }
260
261 pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSIX);
262 if (nent && *nent && pos) {
263 qib_msix_setup(dd, pos, nent, entry);
264 ret = 0; /* did it, either MSIx or INTx */
265 } else {
266 pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
267 if (pos)
268 ret = qib_msi_setup(dd, pos);
269 else
270 qib_dev_err(dd, "No PCI MSI or MSIx capability!\n");
271 }
272 if (!pos)
273 qib_enable_intx(dd->pcidev);
274
275 pci_read_config_word(dd->pcidev, pose + PCI_EXP_LNKSTA, &linkstat);
276 /*
277 * speed is bits 0-3, linkwidth is bits 4-8
278 * no defines for them in headers
279 */
280 speed = linkstat & 0xf;
281 linkstat >>= 4;
282 linkstat &= 0x1f;
283 dd->lbus_width = linkstat;
284
285 switch (speed) {
286 case 1:
287 dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
288 break;
289 case 2:
290 dd->lbus_speed = 5000; /* Gen1, 5GHz */
291 break;
292 default: /* not defined, assume gen1 */
293 dd->lbus_speed = 2500;
294 break;
295 }
296
297 /*
298 * Check against expected pcie width and complain if "wrong"
299 * on first initialization, not afterwards (i.e., reset).
300 */
301 if (minw && linkstat < minw)
302 qib_dev_err(dd,
303 "PCIe width %u (x%u HCA), performance reduced\n",
304 linkstat, minw);
305
306 qib_tune_pcie_caps(dd);
307
308 qib_tune_pcie_coalesce(dd);
309
310bail:
311 /* fill in string, even on errors */
312 snprintf(dd->lbus_info, sizeof(dd->lbus_info),
313 "PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width);
314 return ret;
315}
316
317/*
318 * Setup pcie interrupt stuff again after a reset. I'd like to just call
319 * pci_enable_msi() again for msi, but when I do that,
320 * the MSI enable bit doesn't get set in the command word, and
321 * we switch to to a different interrupt vector, which is confusing,
322 * so I instead just do it all inline. Perhaps somehow can tie this
323 * into the PCIe hotplug support at some point
324 */
325int qib_reinit_intr(struct qib_devdata *dd)
326{
327 int pos;
328 u16 control;
329 int ret = 0;
330
331 /* If we aren't using MSI, don't restore it */
332 if (!dd->msi_lo)
333 goto bail;
334
335 pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
336 if (!pos) {
337 qib_dev_err(dd, "Can't find MSI capability, "
338 "can't restore MSI settings\n");
339 ret = 0;
340 /* nothing special for MSIx, just MSI */
341 goto bail;
342 }
343 pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
344 dd->msi_lo);
345 pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
346 dd->msi_hi);
347 pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
348 if (!(control & PCI_MSI_FLAGS_ENABLE)) {
349 control |= PCI_MSI_FLAGS_ENABLE;
350 pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
351 control);
352 }
353 /* now rewrite the data (vector) info */
354 pci_write_config_word(dd->pcidev, pos +
355 ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
356 dd->msi_data);
357 ret = 1;
358bail:
359 if (!ret && (dd->flags & QIB_HAS_INTX)) {
360 qib_enable_intx(dd->pcidev);
361 ret = 1;
362 }
363
364 /* and now set the pci master bit again */
365 pci_set_master(dd->pcidev);
366
367 return ret;
368}
369
370/*
371 * Disable msi interrupt if enabled, and clear msi_lo.
372 * This is used primarily for the fallback to INTx, but
373 * is also used in reinit after reset, and during cleanup.
374 */
375void qib_nomsi(struct qib_devdata *dd)
376{
377 dd->msi_lo = 0;
378 pci_disable_msi(dd->pcidev);
379}
380
381/*
382 * Same as qib_nosmi, but for MSIx.
383 */
384void qib_nomsix(struct qib_devdata *dd)
385{
386 pci_disable_msix(dd->pcidev);
387}
388
389/*
390 * Similar to pci_intx(pdev, 1), except that we make sure
391 * msi(x) is off.
392 */
393void qib_enable_intx(struct pci_dev *pdev)
394{
395 u16 cw, new;
396 int pos;
397
398 /* first, turn on INTx */
399 pci_read_config_word(pdev, PCI_COMMAND, &cw);
400 new = cw & ~PCI_COMMAND_INTX_DISABLE;
401 if (new != cw)
402 pci_write_config_word(pdev, PCI_COMMAND, new);
403
404 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
405 if (pos) {
406 /* then turn off MSI */
407 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
408 new = cw & ~PCI_MSI_FLAGS_ENABLE;
409 if (new != cw)
410 pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
411 }
412 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
413 if (pos) {
414 /* then turn off MSIx */
415 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &cw);
416 new = cw & ~PCI_MSIX_FLAGS_ENABLE;
417 if (new != cw)
418 pci_write_config_word(pdev, pos + PCI_MSIX_FLAGS, new);
419 }
420}
421
422/*
423 * These two routines are helper routines for the device reset code
424 * to move all the pcie code out of the chip-specific driver code.
425 */
426void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
427{
428 pci_read_config_word(dd->pcidev, PCI_COMMAND, cmd);
429 pci_read_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
430 pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
431}
432
433void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
434{
435 int r;
436 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
437 dd->pcibar0);
438 if (r)
439 qib_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
440 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
441 dd->pcibar1);
442 if (r)
443 qib_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
444 /* now re-enable memory access, and restore cosmetic settings */
445 pci_write_config_word(dd->pcidev, PCI_COMMAND, cmd);
446 pci_write_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
447 pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
448 r = pci_enable_device(dd->pcidev);
449 if (r)
450 qib_dev_err(dd, "pci_enable_device failed after "
451 "reset: %d\n", r);
452}
453
454/* code to adjust PCIe capabilities. */
455
456static int fld2val(int wd, int mask)
457{
458 int lsbmask;
459
460 if (!mask)
461 return 0;
462 wd &= mask;
463 lsbmask = mask ^ (mask & (mask - 1));
464 wd /= lsbmask;
465 return wd;
466}
467
468static int val2fld(int wd, int mask)
469{
470 int lsbmask;
471
472 if (!mask)
473 return 0;
474 lsbmask = mask ^ (mask & (mask - 1));
475 wd *= lsbmask;
476 return wd;
477}
478
479static int qib_pcie_coalesce;
480module_param_named(pcie_coalesce, qib_pcie_coalesce, int, S_IRUGO);
481MODULE_PARM_DESC(pcie_coalesce, "tune PCIe colescing on some Intel chipsets");
482
483/*
484 * Enable PCIe completion and data coalescing, on Intel 5x00 and 7300
485 * chipsets. This is known to be unsafe for some revisions of some
486 * of these chipsets, with some BIOS settings, and enabling it on those
487 * systems may result in the system crashing, and/or data corruption.
488 */
489static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
490{
491 int r;
492 struct pci_dev *parent;
493 int ppos;
494 u16 devid;
495 u32 mask, bits, val;
496
497 if (!qib_pcie_coalesce)
498 return 0;
499
500 /* Find out supported and configured values for parent (root) */
501 parent = dd->pcidev->bus->self;
502 if (parent->bus->parent) {
503 qib_devinfo(dd->pcidev, "Parent not root\n");
504 return 1;
505 }
506 ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
507 if (!ppos)
508 return 1;
509 if (parent->vendor != 0x8086)
510 return 1;
511
512 /*
513 * - bit 12: Max_rdcmp_Imt_EN: need to set to 1
514 * - bit 11: COALESCE_FORCE: need to set to 0
515 * - bit 10: COALESCE_EN: need to set to 1
516 * (but limitations on some on some chipsets)
517 *
518 * On the Intel 5000, 5100, and 7300 chipsets, there is
519 * also: - bit 25:24: COALESCE_MODE, need to set to 0
520 */
521 devid = parent->device;
522 if (devid >= 0x25e2 && devid <= 0x25fa) {
523 u8 rev;
524
525 /* 5000 P/V/X/Z */
526 pci_read_config_byte(parent, PCI_REVISION_ID, &rev);
527 if (rev <= 0xb2)
528 bits = 1U << 10;
529 else
530 bits = 7U << 10;
531 mask = (3U << 24) | (7U << 10);
532 } else if (devid >= 0x65e2 && devid <= 0x65fa) {
533 /* 5100 */
534 bits = 1U << 10;
535 mask = (3U << 24) | (7U << 10);
536 } else if (devid >= 0x4021 && devid <= 0x402e) {
537 /* 5400 */
538 bits = 7U << 10;
539 mask = 7U << 10;
540 } else if (devid >= 0x3604 && devid <= 0x360a) {
541 /* 7300 */
542 bits = 7U << 10;
543 mask = (3U << 24) | (7U << 10);
544 } else {
545 /* not one of the chipsets that we know about */
546 return 1;
547 }
548 pci_read_config_dword(parent, 0x48, &val);
549 val &= ~mask;
550 val |= bits;
551 r = pci_write_config_dword(parent, 0x48, val);
552 return 0;
553}
554
555/*
556 * BIOS may not set PCIe bus-utilization parameters for best performance.
557 * Check and optionally adjust them to maximize our throughput.
558 */
559static int qib_pcie_caps;
560module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
561MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (4lsb), ReadReq (D4..7)");
562
563static int qib_tune_pcie_caps(struct qib_devdata *dd)
564{
565 int ret = 1; /* Assume the worst */
566 struct pci_dev *parent;
567 int ppos, epos;
568 u16 pcaps, pctl, ecaps, ectl;
569 int rc_sup, ep_sup;
570 int rc_cur, ep_cur;
571
572 /* Find out supported and configured values for parent (root) */
573 parent = dd->pcidev->bus->self;
574 if (parent->bus->parent) {
575 qib_devinfo(dd->pcidev, "Parent not root\n");
576 goto bail;
577 }
578 ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
579 if (ppos) {
580 pci_read_config_word(parent, ppos + PCI_EXP_DEVCAP, &pcaps);
581 pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
582 } else
583 goto bail;
584 /* Find out supported and configured values for endpoint (us) */
585 epos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
586 if (epos) {
587 pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCAP, &ecaps);
588 pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, &ectl);
589 } else
590 goto bail;
591 ret = 0;
592 /* Find max payload supported by root, endpoint */
593 rc_sup = fld2val(pcaps, PCI_EXP_DEVCAP_PAYLOAD);
594 ep_sup = fld2val(ecaps, PCI_EXP_DEVCAP_PAYLOAD);
595 if (rc_sup > ep_sup)
596 rc_sup = ep_sup;
597
598 rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_PAYLOAD);
599 ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_PAYLOAD);
600
601 /* If Supported greater than limit in module param, limit it */
602 if (rc_sup > (qib_pcie_caps & 7))
603 rc_sup = qib_pcie_caps & 7;
604 /* If less than (allowed, supported), bump root payload */
605 if (rc_sup > rc_cur) {
606 rc_cur = rc_sup;
607 pctl = (pctl & ~PCI_EXP_DEVCTL_PAYLOAD) |
608 val2fld(rc_cur, PCI_EXP_DEVCTL_PAYLOAD);
609 pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl);
610 }
611 /* If less than (allowed, supported), bump endpoint payload */
612 if (rc_sup > ep_cur) {
613 ep_cur = rc_sup;
614 ectl = (ectl & ~PCI_EXP_DEVCTL_PAYLOAD) |
615 val2fld(ep_cur, PCI_EXP_DEVCTL_PAYLOAD);
616 pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl);
617 }
618
619 /*
620 * Now the Read Request size.
621 * No field for max supported, but PCIe spec limits it to 4096,
622 * which is code '5' (log2(4096) - 7)
623 */
624 rc_sup = 5;
625 if (rc_sup > ((qib_pcie_caps >> 4) & 7))
626 rc_sup = (qib_pcie_caps >> 4) & 7;
627 rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_READRQ);
628 ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_READRQ);
629
630 if (rc_sup > rc_cur) {
631 rc_cur = rc_sup;
632 pctl = (pctl & ~PCI_EXP_DEVCTL_READRQ) |
633 val2fld(rc_cur, PCI_EXP_DEVCTL_READRQ);
634 pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl);
635 }
636 if (rc_sup > ep_cur) {
637 ep_cur = rc_sup;
638 ectl = (ectl & ~PCI_EXP_DEVCTL_READRQ) |
639 val2fld(ep_cur, PCI_EXP_DEVCTL_READRQ);
640 pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl);
641 }
642bail:
643 return ret;
644}
645/* End of PCIe capability tuning */
646
647/*
648 * From here through qib_pci_err_handler definition is invoked via
649 * PCI error infrastructure, registered via pci
650 */
651static pci_ers_result_t
652qib_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
653{
654 struct qib_devdata *dd = pci_get_drvdata(pdev);
655 pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
656
657 switch (state) {
658 case pci_channel_io_normal:
659 qib_devinfo(pdev, "State Normal, ignoring\n");
660 break;
661
662 case pci_channel_io_frozen:
663 qib_devinfo(pdev, "State Frozen, requesting reset\n");
664 pci_disable_device(pdev);
665 ret = PCI_ERS_RESULT_NEED_RESET;
666 break;
667
668 case pci_channel_io_perm_failure:
669 qib_devinfo(pdev, "State Permanent Failure, disabling\n");
670 if (dd) {
671 /* no more register accesses! */
672 dd->flags &= ~QIB_PRESENT;
673 qib_disable_after_error(dd);
674 }
675 /* else early, or other problem */
676 ret = PCI_ERS_RESULT_DISCONNECT;
677 break;
678
679 default: /* shouldn't happen */
680 qib_devinfo(pdev, "QIB PCI errors detected (state %d)\n",
681 state);
682 break;
683 }
684 return ret;
685}
686
687static pci_ers_result_t
688qib_pci_mmio_enabled(struct pci_dev *pdev)
689{
690 u64 words = 0U;
691 struct qib_devdata *dd = pci_get_drvdata(pdev);
692 pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
693
694 if (dd && dd->pport) {
695 words = dd->f_portcntr(dd->pport, QIBPORTCNTR_WORDRCV);
696 if (words == ~0ULL)
697 ret = PCI_ERS_RESULT_NEED_RESET;
698 }
699 qib_devinfo(pdev, "QIB mmio_enabled function called, "
700 "read wordscntr %Lx, returning %d\n", words, ret);
701 return ret;
702}
703
704static pci_ers_result_t
705qib_pci_slot_reset(struct pci_dev *pdev)
706{
707 qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
708 return PCI_ERS_RESULT_CAN_RECOVER;
709}
710
711static pci_ers_result_t
712qib_pci_link_reset(struct pci_dev *pdev)
713{
714 qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
715 return PCI_ERS_RESULT_CAN_RECOVER;
716}
717
718static void
719qib_pci_resume(struct pci_dev *pdev)
720{
721 struct qib_devdata *dd = pci_get_drvdata(pdev);
722 qib_devinfo(pdev, "QIB resume function called\n");
723 pci_cleanup_aer_uncorrect_error_status(pdev);
724 /*
725 * Running jobs will fail, since it's asynchronous
726 * unlike sysfs-requested reset. Better than
727 * doing nothing.
728 */
729 qib_init(dd, 1); /* same as re-init after reset */
730}
731
732struct pci_error_handlers qib_pci_err_handler = {
733 .error_detected = qib_pci_error_detected,
734 .mmio_enabled = qib_pci_mmio_enabled,
735 .link_reset = qib_pci_link_reset,
736 .slot_reset = qib_pci_slot_reset,
737 .resume = qib_pci_resume,
738};
diff --git a/drivers/infiniband/hw/qib/qib_pio_copy.c b/drivers/infiniband/hw/qib/qib_pio_copy.c
new file mode 100644
index 000000000000..10b8c444dd31
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_pio_copy.c
@@ -0,0 +1,64 @@
1/*
2 * Copyright (c) 2009 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "qib.h"
34
35/**
36 * qib_pio_copy - copy data to MMIO space, in multiples of 32-bits
37 * @to: destination, in MMIO space (must be 64-bit aligned)
38 * @from: source (must be 64-bit aligned)
39 * @count: number of 32-bit quantities to copy
40 *
41 * Copy data from kernel space to MMIO space, in multiples of 32 bits at a
42 * time. Order of access is not guaranteed, nor is a memory barrier
43 * performed afterwards.
44 */
45void qib_pio_copy(void __iomem *to, const void *from, size_t count)
46{
47#ifdef CONFIG_64BIT
48 u64 __iomem *dst = to;
49 const u64 *src = from;
50 const u64 *end = src + (count >> 1);
51
52 while (src < end)
53 __raw_writeq(*src++, dst++);
54 if (count & 1)
55 __raw_writel(*(const u32 *)src, dst);
56#else
57 u32 __iomem *dst = to;
58 const u32 *src = from;
59 const u32 *end = src + count;
60
61 while (src < end)
62 __raw_writel(*src++, dst++);
63#endif
64}
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
new file mode 100644
index 000000000000..e0f65e39076b
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -0,0 +1,1255 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/err.h>
36#include <linux/vmalloc.h>
37
38#include "qib.h"
39
40#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
41#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
42
43static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
44 struct qpn_map *map, unsigned off)
45{
46 return (map - qpt->map) * BITS_PER_PAGE + off;
47}
48
49static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
50 struct qpn_map *map, unsigned off,
51 unsigned r)
52{
53 if (qpt->mask) {
54 off++;
55 if ((off & qpt->mask) >> 1 != r)
56 off = ((off & qpt->mask) ?
57 (off | qpt->mask) + 1 : off) | (r << 1);
58 } else
59 off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
60 return off;
61}
62
63/*
64 * Convert the AETH credit code into the number of credits.
65 */
66static u32 credit_table[31] = {
67 0, /* 0 */
68 1, /* 1 */
69 2, /* 2 */
70 3, /* 3 */
71 4, /* 4 */
72 6, /* 5 */
73 8, /* 6 */
74 12, /* 7 */
75 16, /* 8 */
76 24, /* 9 */
77 32, /* A */
78 48, /* B */
79 64, /* C */
80 96, /* D */
81 128, /* E */
82 192, /* F */
83 256, /* 10 */
84 384, /* 11 */
85 512, /* 12 */
86 768, /* 13 */
87 1024, /* 14 */
88 1536, /* 15 */
89 2048, /* 16 */
90 3072, /* 17 */
91 4096, /* 18 */
92 6144, /* 19 */
93 8192, /* 1A */
94 12288, /* 1B */
95 16384, /* 1C */
96 24576, /* 1D */
97 32768 /* 1E */
98};
99
100static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
101{
102 unsigned long page = get_zeroed_page(GFP_KERNEL);
103
104 /*
105 * Free the page if someone raced with us installing it.
106 */
107
108 spin_lock(&qpt->lock);
109 if (map->page)
110 free_page(page);
111 else
112 map->page = (void *)page;
113 spin_unlock(&qpt->lock);
114}
115
116/*
117 * Allocate the next available QPN or
118 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
119 */
120static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
121 enum ib_qp_type type, u8 port)
122{
123 u32 i, offset, max_scan, qpn;
124 struct qpn_map *map;
125 u32 ret;
126 int r;
127
128 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
129 unsigned n;
130
131 ret = type == IB_QPT_GSI;
132 n = 1 << (ret + 2 * (port - 1));
133 spin_lock(&qpt->lock);
134 if (qpt->flags & n)
135 ret = -EINVAL;
136 else
137 qpt->flags |= n;
138 spin_unlock(&qpt->lock);
139 goto bail;
140 }
141
142 r = smp_processor_id();
143 if (r >= dd->n_krcv_queues)
144 r %= dd->n_krcv_queues;
145 qpn = qpt->last + 1;
146 if (qpn >= QPN_MAX)
147 qpn = 2;
148 if (qpt->mask && ((qpn & qpt->mask) >> 1) != r)
149 qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) |
150 (r << 1);
151 offset = qpn & BITS_PER_PAGE_MASK;
152 map = &qpt->map[qpn / BITS_PER_PAGE];
153 max_scan = qpt->nmaps - !offset;
154 for (i = 0;;) {
155 if (unlikely(!map->page)) {
156 get_map_page(qpt, map);
157 if (unlikely(!map->page))
158 break;
159 }
160 do {
161 if (!test_and_set_bit(offset, map->page)) {
162 qpt->last = qpn;
163 ret = qpn;
164 goto bail;
165 }
166 offset = find_next_offset(qpt, map, offset, r);
167 qpn = mk_qpn(qpt, map, offset);
168 /*
169 * This test differs from alloc_pidmap().
170 * If find_next_offset() does find a zero
171 * bit, we don't need to check for QPN
172 * wrapping around past our starting QPN.
173 * We just need to be sure we don't loop
174 * forever.
175 */
176 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
177 /*
178 * In order to keep the number of pages allocated to a
179 * minimum, we scan the all existing pages before increasing
180 * the size of the bitmap table.
181 */
182 if (++i > max_scan) {
183 if (qpt->nmaps == QPNMAP_ENTRIES)
184 break;
185 map = &qpt->map[qpt->nmaps++];
186 offset = qpt->mask ? (r << 1) : 0;
187 } else if (map < &qpt->map[qpt->nmaps]) {
188 ++map;
189 offset = qpt->mask ? (r << 1) : 0;
190 } else {
191 map = &qpt->map[0];
192 offset = qpt->mask ? (r << 1) : 2;
193 }
194 qpn = mk_qpn(qpt, map, offset);
195 }
196
197 ret = -ENOMEM;
198
199bail:
200 return ret;
201}
202
203static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
204{
205 struct qpn_map *map;
206
207 map = qpt->map + qpn / BITS_PER_PAGE;
208 if (map->page)
209 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
210}
211
212/*
213 * Put the QP into the hash table.
214 * The hash table holds a reference to the QP.
215 */
216static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
217{
218 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
219 unsigned n = qp->ibqp.qp_num % dev->qp_table_size;
220 unsigned long flags;
221
222 spin_lock_irqsave(&dev->qpt_lock, flags);
223
224 if (qp->ibqp.qp_num == 0)
225 ibp->qp0 = qp;
226 else if (qp->ibqp.qp_num == 1)
227 ibp->qp1 = qp;
228 else {
229 qp->next = dev->qp_table[n];
230 dev->qp_table[n] = qp;
231 }
232 atomic_inc(&qp->refcount);
233
234 spin_unlock_irqrestore(&dev->qpt_lock, flags);
235}
236
237/*
238 * Remove the QP from the table so it can't be found asynchronously by
239 * the receive interrupt routine.
240 */
241static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
242{
243 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
244 struct qib_qp *q, **qpp;
245 unsigned long flags;
246
247 qpp = &dev->qp_table[qp->ibqp.qp_num % dev->qp_table_size];
248
249 spin_lock_irqsave(&dev->qpt_lock, flags);
250
251 if (ibp->qp0 == qp) {
252 ibp->qp0 = NULL;
253 atomic_dec(&qp->refcount);
254 } else if (ibp->qp1 == qp) {
255 ibp->qp1 = NULL;
256 atomic_dec(&qp->refcount);
257 } else
258 for (; (q = *qpp) != NULL; qpp = &q->next)
259 if (q == qp) {
260 *qpp = qp->next;
261 qp->next = NULL;
262 atomic_dec(&qp->refcount);
263 break;
264 }
265
266 spin_unlock_irqrestore(&dev->qpt_lock, flags);
267}
268
269/**
270 * qib_free_all_qps - check for QPs still in use
271 * @qpt: the QP table to empty
272 *
273 * There should not be any QPs still in use.
274 * Free memory for table.
275 */
276unsigned qib_free_all_qps(struct qib_devdata *dd)
277{
278 struct qib_ibdev *dev = &dd->verbs_dev;
279 unsigned long flags;
280 struct qib_qp *qp;
281 unsigned n, qp_inuse = 0;
282
283 for (n = 0; n < dd->num_pports; n++) {
284 struct qib_ibport *ibp = &dd->pport[n].ibport_data;
285
286 if (!qib_mcast_tree_empty(ibp))
287 qp_inuse++;
288 if (ibp->qp0)
289 qp_inuse++;
290 if (ibp->qp1)
291 qp_inuse++;
292 }
293
294 spin_lock_irqsave(&dev->qpt_lock, flags);
295 for (n = 0; n < dev->qp_table_size; n++) {
296 qp = dev->qp_table[n];
297 dev->qp_table[n] = NULL;
298
299 for (; qp; qp = qp->next)
300 qp_inuse++;
301 }
302 spin_unlock_irqrestore(&dev->qpt_lock, flags);
303
304 return qp_inuse;
305}
306
307/**
308 * qib_lookup_qpn - return the QP with the given QPN
309 * @qpt: the QP table
310 * @qpn: the QP number to look up
311 *
312 * The caller is responsible for decrementing the QP reference count
313 * when done.
314 */
315struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
316{
317 struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
318 unsigned long flags;
319 struct qib_qp *qp;
320
321 spin_lock_irqsave(&dev->qpt_lock, flags);
322
323 if (qpn == 0)
324 qp = ibp->qp0;
325 else if (qpn == 1)
326 qp = ibp->qp1;
327 else
328 for (qp = dev->qp_table[qpn % dev->qp_table_size]; qp;
329 qp = qp->next)
330 if (qp->ibqp.qp_num == qpn)
331 break;
332 if (qp)
333 atomic_inc(&qp->refcount);
334
335 spin_unlock_irqrestore(&dev->qpt_lock, flags);
336 return qp;
337}
338
339/**
340 * qib_reset_qp - initialize the QP state to the reset state
341 * @qp: the QP to reset
342 * @type: the QP type
343 */
344static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
345{
346 qp->remote_qpn = 0;
347 qp->qkey = 0;
348 qp->qp_access_flags = 0;
349 atomic_set(&qp->s_dma_busy, 0);
350 qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
351 qp->s_hdrwords = 0;
352 qp->s_wqe = NULL;
353 qp->s_draining = 0;
354 qp->s_next_psn = 0;
355 qp->s_last_psn = 0;
356 qp->s_sending_psn = 0;
357 qp->s_sending_hpsn = 0;
358 qp->s_psn = 0;
359 qp->r_psn = 0;
360 qp->r_msn = 0;
361 if (type == IB_QPT_RC) {
362 qp->s_state = IB_OPCODE_RC_SEND_LAST;
363 qp->r_state = IB_OPCODE_RC_SEND_LAST;
364 } else {
365 qp->s_state = IB_OPCODE_UC_SEND_LAST;
366 qp->r_state = IB_OPCODE_UC_SEND_LAST;
367 }
368 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
369 qp->r_nak_state = 0;
370 qp->r_aflags = 0;
371 qp->r_flags = 0;
372 qp->s_head = 0;
373 qp->s_tail = 0;
374 qp->s_cur = 0;
375 qp->s_acked = 0;
376 qp->s_last = 0;
377 qp->s_ssn = 1;
378 qp->s_lsn = 0;
379 qp->s_mig_state = IB_MIG_MIGRATED;
380 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
381 qp->r_head_ack_queue = 0;
382 qp->s_tail_ack_queue = 0;
383 qp->s_num_rd_atomic = 0;
384 if (qp->r_rq.wq) {
385 qp->r_rq.wq->head = 0;
386 qp->r_rq.wq->tail = 0;
387 }
388 qp->r_sge.num_sge = 0;
389}
390
391static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
392{
393 unsigned n;
394
395 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
396 while (qp->s_rdma_read_sge.num_sge) {
397 atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
398 if (--qp->s_rdma_read_sge.num_sge)
399 qp->s_rdma_read_sge.sge =
400 *qp->s_rdma_read_sge.sg_list++;
401 }
402
403 while (qp->r_sge.num_sge) {
404 atomic_dec(&qp->r_sge.sge.mr->refcount);
405 if (--qp->r_sge.num_sge)
406 qp->r_sge.sge = *qp->r_sge.sg_list++;
407 }
408
409 if (clr_sends) {
410 while (qp->s_last != qp->s_head) {
411 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
412 unsigned i;
413
414 for (i = 0; i < wqe->wr.num_sge; i++) {
415 struct qib_sge *sge = &wqe->sg_list[i];
416
417 atomic_dec(&sge->mr->refcount);
418 }
419 if (qp->ibqp.qp_type == IB_QPT_UD ||
420 qp->ibqp.qp_type == IB_QPT_SMI ||
421 qp->ibqp.qp_type == IB_QPT_GSI)
422 atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
423 if (++qp->s_last >= qp->s_size)
424 qp->s_last = 0;
425 }
426 if (qp->s_rdma_mr) {
427 atomic_dec(&qp->s_rdma_mr->refcount);
428 qp->s_rdma_mr = NULL;
429 }
430 }
431
432 if (qp->ibqp.qp_type != IB_QPT_RC)
433 return;
434
435 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
436 struct qib_ack_entry *e = &qp->s_ack_queue[n];
437
438 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
439 e->rdma_sge.mr) {
440 atomic_dec(&e->rdma_sge.mr->refcount);
441 e->rdma_sge.mr = NULL;
442 }
443 }
444}
445
446/**
447 * qib_error_qp - put a QP into the error state
448 * @qp: the QP to put into the error state
449 * @err: the receive completion error to signal if a RWQE is active
450 *
451 * Flushes both send and receive work queues.
452 * Returns true if last WQE event should be generated.
453 * The QP s_lock should be held and interrupts disabled.
454 * If we are already in error state, just return.
455 */
456int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
457{
458 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
459 struct ib_wc wc;
460 int ret = 0;
461
462 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
463 goto bail;
464
465 qp->state = IB_QPS_ERR;
466
467 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
468 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
469 del_timer(&qp->s_timer);
470 }
471 spin_lock(&dev->pending_lock);
472 if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
473 qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
474 list_del_init(&qp->iowait);
475 }
476 spin_unlock(&dev->pending_lock);
477
478 if (!(qp->s_flags & QIB_S_BUSY)) {
479 qp->s_hdrwords = 0;
480 if (qp->s_rdma_mr) {
481 atomic_dec(&qp->s_rdma_mr->refcount);
482 qp->s_rdma_mr = NULL;
483 }
484 if (qp->s_tx) {
485 qib_put_txreq(qp->s_tx);
486 qp->s_tx = NULL;
487 }
488 }
489
490 /* Schedule the sending tasklet to drain the send work queue. */
491 if (qp->s_last != qp->s_head)
492 qib_schedule_send(qp);
493
494 clear_mr_refs(qp, 0);
495
496 memset(&wc, 0, sizeof(wc));
497 wc.qp = &qp->ibqp;
498 wc.opcode = IB_WC_RECV;
499
500 if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
501 wc.wr_id = qp->r_wr_id;
502 wc.status = err;
503 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
504 }
505 wc.status = IB_WC_WR_FLUSH_ERR;
506
507 if (qp->r_rq.wq) {
508 struct qib_rwq *wq;
509 u32 head;
510 u32 tail;
511
512 spin_lock(&qp->r_rq.lock);
513
514 /* sanity check pointers before trusting them */
515 wq = qp->r_rq.wq;
516 head = wq->head;
517 if (head >= qp->r_rq.size)
518 head = 0;
519 tail = wq->tail;
520 if (tail >= qp->r_rq.size)
521 tail = 0;
522 while (tail != head) {
523 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
524 if (++tail >= qp->r_rq.size)
525 tail = 0;
526 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
527 }
528 wq->tail = tail;
529
530 spin_unlock(&qp->r_rq.lock);
531 } else if (qp->ibqp.event_handler)
532 ret = 1;
533
534bail:
535 return ret;
536}
537
538/**
539 * qib_modify_qp - modify the attributes of a queue pair
540 * @ibqp: the queue pair who's attributes we're modifying
541 * @attr: the new attributes
542 * @attr_mask: the mask of attributes to modify
543 * @udata: user data for libibverbs.so
544 *
545 * Returns 0 on success, otherwise returns an errno.
546 */
547int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
548 int attr_mask, struct ib_udata *udata)
549{
550 struct qib_ibdev *dev = to_idev(ibqp->device);
551 struct qib_qp *qp = to_iqp(ibqp);
552 enum ib_qp_state cur_state, new_state;
553 struct ib_event ev;
554 int lastwqe = 0;
555 int mig = 0;
556 int ret;
557 u32 pmtu = 0; /* for gcc warning only */
558
559 spin_lock_irq(&qp->r_lock);
560 spin_lock(&qp->s_lock);
561
562 cur_state = attr_mask & IB_QP_CUR_STATE ?
563 attr->cur_qp_state : qp->state;
564 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
565
566 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
567 attr_mask))
568 goto inval;
569
570 if (attr_mask & IB_QP_AV) {
571 if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
572 goto inval;
573 if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
574 goto inval;
575 }
576
577 if (attr_mask & IB_QP_ALT_PATH) {
578 if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
579 goto inval;
580 if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
581 goto inval;
582 if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
583 goto inval;
584 }
585
586 if (attr_mask & IB_QP_PKEY_INDEX)
587 if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
588 goto inval;
589
590 if (attr_mask & IB_QP_MIN_RNR_TIMER)
591 if (attr->min_rnr_timer > 31)
592 goto inval;
593
594 if (attr_mask & IB_QP_PORT)
595 if (qp->ibqp.qp_type == IB_QPT_SMI ||
596 qp->ibqp.qp_type == IB_QPT_GSI ||
597 attr->port_num == 0 ||
598 attr->port_num > ibqp->device->phys_port_cnt)
599 goto inval;
600
601 if (attr_mask & IB_QP_DEST_QPN)
602 if (attr->dest_qp_num > QIB_QPN_MASK)
603 goto inval;
604
605 if (attr_mask & IB_QP_RETRY_CNT)
606 if (attr->retry_cnt > 7)
607 goto inval;
608
609 if (attr_mask & IB_QP_RNR_RETRY)
610 if (attr->rnr_retry > 7)
611 goto inval;
612
613 /*
614 * Don't allow invalid path_mtu values. OK to set greater
615 * than the active mtu (or even the max_cap, if we have tuned
616 * that to a small mtu. We'll set qp->path_mtu
617 * to the lesser of requested attribute mtu and active,
618 * for packetizing messages.
619 * Note that the QP port has to be set in INIT and MTU in RTR.
620 */
621 if (attr_mask & IB_QP_PATH_MTU) {
622 struct qib_devdata *dd = dd_from_dev(dev);
623 int mtu, pidx = qp->port_num - 1;
624
625 mtu = ib_mtu_enum_to_int(attr->path_mtu);
626 if (mtu == -1)
627 goto inval;
628 if (mtu > dd->pport[pidx].ibmtu) {
629 switch (dd->pport[pidx].ibmtu) {
630 case 4096:
631 pmtu = IB_MTU_4096;
632 break;
633 case 2048:
634 pmtu = IB_MTU_2048;
635 break;
636 case 1024:
637 pmtu = IB_MTU_1024;
638 break;
639 case 512:
640 pmtu = IB_MTU_512;
641 break;
642 case 256:
643 pmtu = IB_MTU_256;
644 break;
645 default:
646 pmtu = IB_MTU_2048;
647 }
648 } else
649 pmtu = attr->path_mtu;
650 }
651
652 if (attr_mask & IB_QP_PATH_MIG_STATE) {
653 if (attr->path_mig_state == IB_MIG_REARM) {
654 if (qp->s_mig_state == IB_MIG_ARMED)
655 goto inval;
656 if (new_state != IB_QPS_RTS)
657 goto inval;
658 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
659 if (qp->s_mig_state == IB_MIG_REARM)
660 goto inval;
661 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
662 goto inval;
663 if (qp->s_mig_state == IB_MIG_ARMED)
664 mig = 1;
665 } else
666 goto inval;
667 }
668
669 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
670 if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
671 goto inval;
672
673 switch (new_state) {
674 case IB_QPS_RESET:
675 if (qp->state != IB_QPS_RESET) {
676 qp->state = IB_QPS_RESET;
677 spin_lock(&dev->pending_lock);
678 if (!list_empty(&qp->iowait))
679 list_del_init(&qp->iowait);
680 spin_unlock(&dev->pending_lock);
681 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
682 spin_unlock(&qp->s_lock);
683 spin_unlock_irq(&qp->r_lock);
684 /* Stop the sending work queue and retry timer */
685 cancel_work_sync(&qp->s_work);
686 del_timer_sync(&qp->s_timer);
687 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
688 if (qp->s_tx) {
689 qib_put_txreq(qp->s_tx);
690 qp->s_tx = NULL;
691 }
692 remove_qp(dev, qp);
693 wait_event(qp->wait, !atomic_read(&qp->refcount));
694 spin_lock_irq(&qp->r_lock);
695 spin_lock(&qp->s_lock);
696 clear_mr_refs(qp, 1);
697 qib_reset_qp(qp, ibqp->qp_type);
698 }
699 break;
700
701 case IB_QPS_RTR:
702 /* Allow event to retrigger if QP set to RTR more than once */
703 qp->r_flags &= ~QIB_R_COMM_EST;
704 qp->state = new_state;
705 break;
706
707 case IB_QPS_SQD:
708 qp->s_draining = qp->s_last != qp->s_cur;
709 qp->state = new_state;
710 break;
711
712 case IB_QPS_SQE:
713 if (qp->ibqp.qp_type == IB_QPT_RC)
714 goto inval;
715 qp->state = new_state;
716 break;
717
718 case IB_QPS_ERR:
719 lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
720 break;
721
722 default:
723 qp->state = new_state;
724 break;
725 }
726
727 if (attr_mask & IB_QP_PKEY_INDEX)
728 qp->s_pkey_index = attr->pkey_index;
729
730 if (attr_mask & IB_QP_PORT)
731 qp->port_num = attr->port_num;
732
733 if (attr_mask & IB_QP_DEST_QPN)
734 qp->remote_qpn = attr->dest_qp_num;
735
736 if (attr_mask & IB_QP_SQ_PSN) {
737 qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
738 qp->s_psn = qp->s_next_psn;
739 qp->s_sending_psn = qp->s_next_psn;
740 qp->s_last_psn = qp->s_next_psn - 1;
741 qp->s_sending_hpsn = qp->s_last_psn;
742 }
743
744 if (attr_mask & IB_QP_RQ_PSN)
745 qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
746
747 if (attr_mask & IB_QP_ACCESS_FLAGS)
748 qp->qp_access_flags = attr->qp_access_flags;
749
750 if (attr_mask & IB_QP_AV) {
751 qp->remote_ah_attr = attr->ah_attr;
752 qp->s_srate = attr->ah_attr.static_rate;
753 }
754
755 if (attr_mask & IB_QP_ALT_PATH) {
756 qp->alt_ah_attr = attr->alt_ah_attr;
757 qp->s_alt_pkey_index = attr->alt_pkey_index;
758 }
759
760 if (attr_mask & IB_QP_PATH_MIG_STATE) {
761 qp->s_mig_state = attr->path_mig_state;
762 if (mig) {
763 qp->remote_ah_attr = qp->alt_ah_attr;
764 qp->port_num = qp->alt_ah_attr.port_num;
765 qp->s_pkey_index = qp->s_alt_pkey_index;
766 }
767 }
768
769 if (attr_mask & IB_QP_PATH_MTU)
770 qp->path_mtu = pmtu;
771
772 if (attr_mask & IB_QP_RETRY_CNT) {
773 qp->s_retry_cnt = attr->retry_cnt;
774 qp->s_retry = attr->retry_cnt;
775 }
776
777 if (attr_mask & IB_QP_RNR_RETRY) {
778 qp->s_rnr_retry_cnt = attr->rnr_retry;
779 qp->s_rnr_retry = attr->rnr_retry;
780 }
781
782 if (attr_mask & IB_QP_MIN_RNR_TIMER)
783 qp->r_min_rnr_timer = attr->min_rnr_timer;
784
785 if (attr_mask & IB_QP_TIMEOUT)
786 qp->timeout = attr->timeout;
787
788 if (attr_mask & IB_QP_QKEY)
789 qp->qkey = attr->qkey;
790
791 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
792 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
793
794 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
795 qp->s_max_rd_atomic = attr->max_rd_atomic;
796
797 spin_unlock(&qp->s_lock);
798 spin_unlock_irq(&qp->r_lock);
799
800 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
801 insert_qp(dev, qp);
802
803 if (lastwqe) {
804 ev.device = qp->ibqp.device;
805 ev.element.qp = &qp->ibqp;
806 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
807 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
808 }
809 if (mig) {
810 ev.device = qp->ibqp.device;
811 ev.element.qp = &qp->ibqp;
812 ev.event = IB_EVENT_PATH_MIG;
813 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
814 }
815 ret = 0;
816 goto bail;
817
818inval:
819 spin_unlock(&qp->s_lock);
820 spin_unlock_irq(&qp->r_lock);
821 ret = -EINVAL;
822
823bail:
824 return ret;
825}
826
827int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
828 int attr_mask, struct ib_qp_init_attr *init_attr)
829{
830 struct qib_qp *qp = to_iqp(ibqp);
831
832 attr->qp_state = qp->state;
833 attr->cur_qp_state = attr->qp_state;
834 attr->path_mtu = qp->path_mtu;
835 attr->path_mig_state = qp->s_mig_state;
836 attr->qkey = qp->qkey;
837 attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
838 attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
839 attr->dest_qp_num = qp->remote_qpn;
840 attr->qp_access_flags = qp->qp_access_flags;
841 attr->cap.max_send_wr = qp->s_size - 1;
842 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
843 attr->cap.max_send_sge = qp->s_max_sge;
844 attr->cap.max_recv_sge = qp->r_rq.max_sge;
845 attr->cap.max_inline_data = 0;
846 attr->ah_attr = qp->remote_ah_attr;
847 attr->alt_ah_attr = qp->alt_ah_attr;
848 attr->pkey_index = qp->s_pkey_index;
849 attr->alt_pkey_index = qp->s_alt_pkey_index;
850 attr->en_sqd_async_notify = 0;
851 attr->sq_draining = qp->s_draining;
852 attr->max_rd_atomic = qp->s_max_rd_atomic;
853 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
854 attr->min_rnr_timer = qp->r_min_rnr_timer;
855 attr->port_num = qp->port_num;
856 attr->timeout = qp->timeout;
857 attr->retry_cnt = qp->s_retry_cnt;
858 attr->rnr_retry = qp->s_rnr_retry_cnt;
859 attr->alt_port_num = qp->alt_ah_attr.port_num;
860 attr->alt_timeout = qp->alt_timeout;
861
862 init_attr->event_handler = qp->ibqp.event_handler;
863 init_attr->qp_context = qp->ibqp.qp_context;
864 init_attr->send_cq = qp->ibqp.send_cq;
865 init_attr->recv_cq = qp->ibqp.recv_cq;
866 init_attr->srq = qp->ibqp.srq;
867 init_attr->cap = attr->cap;
868 if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
869 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
870 else
871 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
872 init_attr->qp_type = qp->ibqp.qp_type;
873 init_attr->port_num = qp->port_num;
874 return 0;
875}
876
877/**
878 * qib_compute_aeth - compute the AETH (syndrome + MSN)
879 * @qp: the queue pair to compute the AETH for
880 *
881 * Returns the AETH.
882 */
883__be32 qib_compute_aeth(struct qib_qp *qp)
884{
885 u32 aeth = qp->r_msn & QIB_MSN_MASK;
886
887 if (qp->ibqp.srq) {
888 /*
889 * Shared receive queues don't generate credits.
890 * Set the credit field to the invalid value.
891 */
892 aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
893 } else {
894 u32 min, max, x;
895 u32 credits;
896 struct qib_rwq *wq = qp->r_rq.wq;
897 u32 head;
898 u32 tail;
899
900 /* sanity check pointers before trusting them */
901 head = wq->head;
902 if (head >= qp->r_rq.size)
903 head = 0;
904 tail = wq->tail;
905 if (tail >= qp->r_rq.size)
906 tail = 0;
907 /*
908 * Compute the number of credits available (RWQEs).
909 * XXX Not holding the r_rq.lock here so there is a small
910 * chance that the pair of reads are not atomic.
911 */
912 credits = head - tail;
913 if ((int)credits < 0)
914 credits += qp->r_rq.size;
915 /*
916 * Binary search the credit table to find the code to
917 * use.
918 */
919 min = 0;
920 max = 31;
921 for (;;) {
922 x = (min + max) / 2;
923 if (credit_table[x] == credits)
924 break;
925 if (credit_table[x] > credits)
926 max = x;
927 else if (min == x)
928 break;
929 else
930 min = x;
931 }
932 aeth |= x << QIB_AETH_CREDIT_SHIFT;
933 }
934 return cpu_to_be32(aeth);
935}
936
937/**
938 * qib_create_qp - create a queue pair for a device
939 * @ibpd: the protection domain who's device we create the queue pair for
940 * @init_attr: the attributes of the queue pair
941 * @udata: user data for libibverbs.so
942 *
943 * Returns the queue pair on success, otherwise returns an errno.
944 *
945 * Called by the ib_create_qp() core verbs function.
946 */
947struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
948 struct ib_qp_init_attr *init_attr,
949 struct ib_udata *udata)
950{
951 struct qib_qp *qp;
952 int err;
953 struct qib_swqe *swq = NULL;
954 struct qib_ibdev *dev;
955 struct qib_devdata *dd;
956 size_t sz;
957 size_t sg_list_sz;
958 struct ib_qp *ret;
959
960 if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
961 init_attr->cap.max_send_wr > ib_qib_max_qp_wrs) {
962 ret = ERR_PTR(-EINVAL);
963 goto bail;
964 }
965
966 /* Check receive queue parameters if no SRQ is specified. */
967 if (!init_attr->srq) {
968 if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
969 init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
970 ret = ERR_PTR(-EINVAL);
971 goto bail;
972 }
973 if (init_attr->cap.max_send_sge +
974 init_attr->cap.max_send_wr +
975 init_attr->cap.max_recv_sge +
976 init_attr->cap.max_recv_wr == 0) {
977 ret = ERR_PTR(-EINVAL);
978 goto bail;
979 }
980 }
981
982 switch (init_attr->qp_type) {
983 case IB_QPT_SMI:
984 case IB_QPT_GSI:
985 if (init_attr->port_num == 0 ||
986 init_attr->port_num > ibpd->device->phys_port_cnt) {
987 ret = ERR_PTR(-EINVAL);
988 goto bail;
989 }
990 case IB_QPT_UC:
991 case IB_QPT_RC:
992 case IB_QPT_UD:
993 sz = sizeof(struct qib_sge) *
994 init_attr->cap.max_send_sge +
995 sizeof(struct qib_swqe);
996 swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
997 if (swq == NULL) {
998 ret = ERR_PTR(-ENOMEM);
999 goto bail;
1000 }
1001 sz = sizeof(*qp);
1002 sg_list_sz = 0;
1003 if (init_attr->srq) {
1004 struct qib_srq *srq = to_isrq(init_attr->srq);
1005
1006 if (srq->rq.max_sge > 1)
1007 sg_list_sz = sizeof(*qp->r_sg_list) *
1008 (srq->rq.max_sge - 1);
1009 } else if (init_attr->cap.max_recv_sge > 1)
1010 sg_list_sz = sizeof(*qp->r_sg_list) *
1011 (init_attr->cap.max_recv_sge - 1);
1012 qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
1013 if (!qp) {
1014 ret = ERR_PTR(-ENOMEM);
1015 goto bail_swq;
1016 }
1017 if (init_attr->srq)
1018 sz = 0;
1019 else {
1020 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1021 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1022 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1023 sizeof(struct qib_rwqe);
1024 qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
1025 qp->r_rq.size * sz);
1026 if (!qp->r_rq.wq) {
1027 ret = ERR_PTR(-ENOMEM);
1028 goto bail_qp;
1029 }
1030 }
1031
1032 /*
1033 * ib_create_qp() will initialize qp->ibqp
1034 * except for qp->ibqp.qp_num.
1035 */
1036 spin_lock_init(&qp->r_lock);
1037 spin_lock_init(&qp->s_lock);
1038 spin_lock_init(&qp->r_rq.lock);
1039 atomic_set(&qp->refcount, 0);
1040 init_waitqueue_head(&qp->wait);
1041 init_waitqueue_head(&qp->wait_dma);
1042 init_timer(&qp->s_timer);
1043 qp->s_timer.data = (unsigned long)qp;
1044 INIT_WORK(&qp->s_work, qib_do_send);
1045 INIT_LIST_HEAD(&qp->iowait);
1046 INIT_LIST_HEAD(&qp->rspwait);
1047 qp->state = IB_QPS_RESET;
1048 qp->s_wq = swq;
1049 qp->s_size = init_attr->cap.max_send_wr + 1;
1050 qp->s_max_sge = init_attr->cap.max_send_sge;
1051 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1052 qp->s_flags = QIB_S_SIGNAL_REQ_WR;
1053 dev = to_idev(ibpd->device);
1054 dd = dd_from_dev(dev);
1055 err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
1056 init_attr->port_num);
1057 if (err < 0) {
1058 ret = ERR_PTR(err);
1059 vfree(qp->r_rq.wq);
1060 goto bail_qp;
1061 }
1062 qp->ibqp.qp_num = err;
1063 qp->port_num = init_attr->port_num;
1064 qp->processor_id = smp_processor_id();
1065 qib_reset_qp(qp, init_attr->qp_type);
1066 break;
1067
1068 default:
1069 /* Don't support raw QPs */
1070 ret = ERR_PTR(-ENOSYS);
1071 goto bail;
1072 }
1073
1074 init_attr->cap.max_inline_data = 0;
1075
1076 /*
1077 * Return the address of the RWQ as the offset to mmap.
1078 * See qib_mmap() for details.
1079 */
1080 if (udata && udata->outlen >= sizeof(__u64)) {
1081 if (!qp->r_rq.wq) {
1082 __u64 offset = 0;
1083
1084 err = ib_copy_to_udata(udata, &offset,
1085 sizeof(offset));
1086 if (err) {
1087 ret = ERR_PTR(err);
1088 goto bail_ip;
1089 }
1090 } else {
1091 u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
1092
1093 qp->ip = qib_create_mmap_info(dev, s,
1094 ibpd->uobject->context,
1095 qp->r_rq.wq);
1096 if (!qp->ip) {
1097 ret = ERR_PTR(-ENOMEM);
1098 goto bail_ip;
1099 }
1100
1101 err = ib_copy_to_udata(udata, &(qp->ip->offset),
1102 sizeof(qp->ip->offset));
1103 if (err) {
1104 ret = ERR_PTR(err);
1105 goto bail_ip;
1106 }
1107 }
1108 }
1109
1110 spin_lock(&dev->n_qps_lock);
1111 if (dev->n_qps_allocated == ib_qib_max_qps) {
1112 spin_unlock(&dev->n_qps_lock);
1113 ret = ERR_PTR(-ENOMEM);
1114 goto bail_ip;
1115 }
1116
1117 dev->n_qps_allocated++;
1118 spin_unlock(&dev->n_qps_lock);
1119
1120 if (qp->ip) {
1121 spin_lock_irq(&dev->pending_lock);
1122 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
1123 spin_unlock_irq(&dev->pending_lock);
1124 }
1125
1126 ret = &qp->ibqp;
1127 goto bail;
1128
1129bail_ip:
1130 if (qp->ip)
1131 kref_put(&qp->ip->ref, qib_release_mmap_info);
1132 else
1133 vfree(qp->r_rq.wq);
1134 free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1135bail_qp:
1136 kfree(qp);
1137bail_swq:
1138 vfree(swq);
1139bail:
1140 return ret;
1141}
1142
1143/**
1144 * qib_destroy_qp - destroy a queue pair
1145 * @ibqp: the queue pair to destroy
1146 *
1147 * Returns 0 on success.
1148 *
1149 * Note that this can be called while the QP is actively sending or
1150 * receiving!
1151 */
1152int qib_destroy_qp(struct ib_qp *ibqp)
1153{
1154 struct qib_qp *qp = to_iqp(ibqp);
1155 struct qib_ibdev *dev = to_idev(ibqp->device);
1156
1157 /* Make sure HW and driver activity is stopped. */
1158 spin_lock_irq(&qp->s_lock);
1159 if (qp->state != IB_QPS_RESET) {
1160 qp->state = IB_QPS_RESET;
1161 spin_lock(&dev->pending_lock);
1162 if (!list_empty(&qp->iowait))
1163 list_del_init(&qp->iowait);
1164 spin_unlock(&dev->pending_lock);
1165 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
1166 spin_unlock_irq(&qp->s_lock);
1167 cancel_work_sync(&qp->s_work);
1168 del_timer_sync(&qp->s_timer);
1169 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
1170 if (qp->s_tx) {
1171 qib_put_txreq(qp->s_tx);
1172 qp->s_tx = NULL;
1173 }
1174 remove_qp(dev, qp);
1175 wait_event(qp->wait, !atomic_read(&qp->refcount));
1176 clear_mr_refs(qp, 1);
1177 } else
1178 spin_unlock_irq(&qp->s_lock);
1179
1180 /* all user's cleaned up, mark it available */
1181 free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1182 spin_lock(&dev->n_qps_lock);
1183 dev->n_qps_allocated--;
1184 spin_unlock(&dev->n_qps_lock);
1185
1186 if (qp->ip)
1187 kref_put(&qp->ip->ref, qib_release_mmap_info);
1188 else
1189 vfree(qp->r_rq.wq);
1190 vfree(qp->s_wq);
1191 kfree(qp);
1192 return 0;
1193}
1194
1195/**
1196 * qib_init_qpn_table - initialize the QP number table for a device
1197 * @qpt: the QPN table
1198 */
1199void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
1200{
1201 spin_lock_init(&qpt->lock);
1202 qpt->last = 1; /* start with QPN 2 */
1203 qpt->nmaps = 1;
1204 qpt->mask = dd->qpn_mask;
1205}
1206
1207/**
1208 * qib_free_qpn_table - free the QP number table for a device
1209 * @qpt: the QPN table
1210 */
1211void qib_free_qpn_table(struct qib_qpn_table *qpt)
1212{
1213 int i;
1214
1215 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
1216 if (qpt->map[i].page)
1217 free_page((unsigned long) qpt->map[i].page);
1218}
1219
1220/**
1221 * qib_get_credit - flush the send work queue of a QP
1222 * @qp: the qp who's send work queue to flush
1223 * @aeth: the Acknowledge Extended Transport Header
1224 *
1225 * The QP s_lock should be held.
1226 */
1227void qib_get_credit(struct qib_qp *qp, u32 aeth)
1228{
1229 u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
1230
1231 /*
1232 * If the credit is invalid, we can send
1233 * as many packets as we like. Otherwise, we have to
1234 * honor the credit field.
1235 */
1236 if (credit == QIB_AETH_CREDIT_INVAL) {
1237 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1238 qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
1239 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1240 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1241 qib_schedule_send(qp);
1242 }
1243 }
1244 } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1245 /* Compute new LSN (i.e., MSN + credit) */
1246 credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
1247 if (qib_cmp24(credit, qp->s_lsn) > 0) {
1248 qp->s_lsn = credit;
1249 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1250 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1251 qib_schedule_send(qp);
1252 }
1253 }
1254 }
1255}
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
new file mode 100644
index 000000000000..35b3604b691d
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -0,0 +1,564 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/delay.h>
35#include <linux/pci.h>
36#include <linux/vmalloc.h>
37
38#include "qib.h"
39#include "qib_qsfp.h"
40
41/*
42 * QSFP support for ib_qib driver, using "Two Wire Serial Interface" driver
43 * in qib_twsi.c
44 */
45#define QSFP_MAX_RETRY 4
46
47static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
48{
49 struct qib_devdata *dd = ppd->dd;
50 u32 out, mask;
51 int ret, cnt, pass = 0;
52 int stuck = 0;
53 u8 *buff = bp;
54
55 ret = mutex_lock_interruptible(&dd->eep_lock);
56 if (ret)
57 goto no_unlock;
58
59 if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
60 ret = -ENXIO;
61 goto bail;
62 }
63
64 /*
65 * We presume, if we are called at all, that this board has
66 * QSFP. This is on the same i2c chain as the legacy parts,
67 * but only responds if the module is selected via GPIO pins.
68 * Further, there are very long setup and hold requirements
69 * on MODSEL.
70 */
71 mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
72 out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
73 if (ppd->hw_pidx) {
74 mask <<= QSFP_GPIO_PORT2_SHIFT;
75 out <<= QSFP_GPIO_PORT2_SHIFT;
76 }
77
78 dd->f_gpio_mod(dd, out, mask, mask);
79
80 /*
81 * Module could take up to 2 Msec to respond to MOD_SEL, and there
82 * is no way to tell if it is ready, so we must wait.
83 */
84 msleep(2);
85
86 /* Make sure TWSI bus is in sane state. */
87 ret = qib_twsi_reset(dd);
88 if (ret) {
89 qib_dev_porterr(dd, ppd->port,
90 "QSFP interface Reset for read failed\n");
91 ret = -EIO;
92 stuck = 1;
93 goto deselect;
94 }
95
96 /* All QSFP modules are at A0 */
97
98 cnt = 0;
99 while (cnt < len) {
100 unsigned in_page;
101 int wlen = len - cnt;
102 in_page = addr % QSFP_PAGESIZE;
103 if ((in_page + wlen) > QSFP_PAGESIZE)
104 wlen = QSFP_PAGESIZE - in_page;
105 ret = qib_twsi_blk_rd(dd, QSFP_DEV, addr, buff + cnt, wlen);
106 /* Some QSFP's fail first try. Retry as experiment */
107 if (ret && cnt == 0 && ++pass < QSFP_MAX_RETRY)
108 continue;
109 if (ret) {
110 /* qib_twsi_blk_rd() 1 for error, else 0 */
111 ret = -EIO;
112 goto deselect;
113 }
114 addr += wlen;
115 cnt += wlen;
116 }
117 ret = cnt;
118
119deselect:
120 /*
121 * Module could take up to 10 uSec after transfer before
122 * ready to respond to MOD_SEL negation, and there is no way
123 * to tell if it is ready, so we must wait.
124 */
125 udelay(10);
126 /* set QSFP MODSEL, RST. LP all high */
127 dd->f_gpio_mod(dd, mask, mask, mask);
128
129 /*
130 * Module could take up to 2 Msec to respond to MOD_SEL
131 * going away, and there is no way to tell if it is ready.
132 * so we must wait.
133 */
134 if (stuck)
135 qib_dev_err(dd, "QSFP interface bus stuck non-idle\n");
136
137 if (pass >= QSFP_MAX_RETRY && ret)
138 qib_dev_porterr(dd, ppd->port, "QSFP failed even retrying\n");
139 else if (pass)
140 qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
141
142 msleep(2);
143
144bail:
145 mutex_unlock(&dd->eep_lock);
146
147no_unlock:
148 return ret;
149}
150
151/*
152 * qsfp_write
153 * We do not ordinarily write the QSFP, but this is needed to select
154 * the page on non-flat QSFPs, and possibly later unusual cases
155 */
156static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
157 int len)
158{
159 struct qib_devdata *dd = ppd->dd;
160 u32 out, mask;
161 int ret, cnt;
162 u8 *buff = bp;
163
164 ret = mutex_lock_interruptible(&dd->eep_lock);
165 if (ret)
166 goto no_unlock;
167
168 if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
169 ret = -ENXIO;
170 goto bail;
171 }
172
173 /*
174 * We presume, if we are called at all, that this board has
175 * QSFP. This is on the same i2c chain as the legacy parts,
176 * but only responds if the module is selected via GPIO pins.
177 * Further, there are very long setup and hold requirements
178 * on MODSEL.
179 */
180 mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
181 out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
182 if (ppd->hw_pidx) {
183 mask <<= QSFP_GPIO_PORT2_SHIFT;
184 out <<= QSFP_GPIO_PORT2_SHIFT;
185 }
186 dd->f_gpio_mod(dd, out, mask, mask);
187
188 /*
189 * Module could take up to 2 Msec to respond to MOD_SEL,
190 * and there is no way to tell if it is ready, so we must wait.
191 */
192 msleep(2);
193
194 /* Make sure TWSI bus is in sane state. */
195 ret = qib_twsi_reset(dd);
196 if (ret) {
197 qib_dev_porterr(dd, ppd->port,
198 "QSFP interface Reset for write failed\n");
199 ret = -EIO;
200 goto deselect;
201 }
202
203 /* All QSFP modules are at A0 */
204
205 cnt = 0;
206 while (cnt < len) {
207 unsigned in_page;
208 int wlen = len - cnt;
209 in_page = addr % QSFP_PAGESIZE;
210 if ((in_page + wlen) > QSFP_PAGESIZE)
211 wlen = QSFP_PAGESIZE - in_page;
212 ret = qib_twsi_blk_wr(dd, QSFP_DEV, addr, buff + cnt, wlen);
213 if (ret) {
214 /* qib_twsi_blk_wr() 1 for error, else 0 */
215 ret = -EIO;
216 goto deselect;
217 }
218 addr += wlen;
219 cnt += wlen;
220 }
221 ret = cnt;
222
223deselect:
224 /*
225 * Module could take up to 10 uSec after transfer before
226 * ready to respond to MOD_SEL negation, and there is no way
227 * to tell if it is ready, so we must wait.
228 */
229 udelay(10);
230 /* set QSFP MODSEL, RST, LP high */
231 dd->f_gpio_mod(dd, mask, mask, mask);
232 /*
233 * Module could take up to 2 Msec to respond to MOD_SEL
234 * going away, and there is no way to tell if it is ready.
235 * so we must wait.
236 */
237 msleep(2);
238
239bail:
240 mutex_unlock(&dd->eep_lock);
241
242no_unlock:
243 return ret;
244}
245
246/*
247 * For validation, we want to check the checksums, even of the
248 * fields we do not otherwise use. This function reads the bytes from
249 * <first> to <next-1> and returns the 8lsbs of the sum, or <0 for errors
250 */
251static int qsfp_cks(struct qib_pportdata *ppd, int first, int next)
252{
253 int ret;
254 u16 cks;
255 u8 bval;
256
257 cks = 0;
258 while (first < next) {
259 ret = qsfp_read(ppd, first, &bval, 1);
260 if (ret < 0)
261 goto bail;
262 cks += bval;
263 ++first;
264 }
265 ret = cks & 0xFF;
266bail:
267 return ret;
268
269}
270
271int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
272{
273 int ret;
274 int idx;
275 u16 cks;
276 u32 mask;
277 u8 peek[4];
278
279 /* ensure sane contents on invalid reads, for cable swaps */
280 memset(cp, 0, sizeof(*cp));
281
282 mask = QSFP_GPIO_MOD_PRS_N;
283 if (ppd->hw_pidx)
284 mask <<= QSFP_GPIO_PORT2_SHIFT;
285
286 ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0);
287 if (ret & mask) {
288 ret = -ENODEV;
289 goto bail;
290 }
291
292 ret = qsfp_read(ppd, 0, peek, 3);
293 if (ret < 0)
294 goto bail;
295 if ((peek[0] & 0xFE) != 0x0C)
296 qib_dev_porterr(ppd->dd, ppd->port,
297 "QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]);
298
299 if ((peek[2] & 2) == 0) {
300 /*
301 * If cable is paged, rather than "flat memory", we need to
302 * set the page to zero, Even if it already appears to be zero.
303 */
304 u8 poke = 0;
305 ret = qib_qsfp_write(ppd, 127, &poke, 1);
306 udelay(50);
307 if (ret != 1) {
308 qib_dev_porterr(ppd->dd, ppd->port,
309 "Failed QSFP Page set\n");
310 goto bail;
311 }
312 }
313
314 ret = qsfp_read(ppd, QSFP_MOD_ID_OFFS, &cp->id, 1);
315 if (ret < 0)
316 goto bail;
317 if ((cp->id & 0xFE) != 0x0C)
318 qib_dev_porterr(ppd->dd, ppd->port,
319 "QSFP ID byte is 0x%02X, S/B 0x0C/D\n", cp->id);
320 cks = cp->id;
321
322 ret = qsfp_read(ppd, QSFP_MOD_PWR_OFFS, &cp->pwr, 1);
323 if (ret < 0)
324 goto bail;
325 cks += cp->pwr;
326
327 ret = qsfp_cks(ppd, QSFP_MOD_PWR_OFFS + 1, QSFP_MOD_LEN_OFFS);
328 if (ret < 0)
329 goto bail;
330 cks += ret;
331
332 ret = qsfp_read(ppd, QSFP_MOD_LEN_OFFS, &cp->len, 1);
333 if (ret < 0)
334 goto bail;
335 cks += cp->len;
336
337 ret = qsfp_read(ppd, QSFP_MOD_TECH_OFFS, &cp->tech, 1);
338 if (ret < 0)
339 goto bail;
340 cks += cp->tech;
341
342 ret = qsfp_read(ppd, QSFP_VEND_OFFS, &cp->vendor, QSFP_VEND_LEN);
343 if (ret < 0)
344 goto bail;
345 for (idx = 0; idx < QSFP_VEND_LEN; ++idx)
346 cks += cp->vendor[idx];
347
348 ret = qsfp_read(ppd, QSFP_IBXCV_OFFS, &cp->xt_xcv, 1);
349 if (ret < 0)
350 goto bail;
351 cks += cp->xt_xcv;
352
353 ret = qsfp_read(ppd, QSFP_VOUI_OFFS, &cp->oui, QSFP_VOUI_LEN);
354 if (ret < 0)
355 goto bail;
356 for (idx = 0; idx < QSFP_VOUI_LEN; ++idx)
357 cks += cp->oui[idx];
358
359 ret = qsfp_read(ppd, QSFP_PN_OFFS, &cp->partnum, QSFP_PN_LEN);
360 if (ret < 0)
361 goto bail;
362 for (idx = 0; idx < QSFP_PN_LEN; ++idx)
363 cks += cp->partnum[idx];
364
365 ret = qsfp_read(ppd, QSFP_REV_OFFS, &cp->rev, QSFP_REV_LEN);
366 if (ret < 0)
367 goto bail;
368 for (idx = 0; idx < QSFP_REV_LEN; ++idx)
369 cks += cp->rev[idx];
370
371 ret = qsfp_read(ppd, QSFP_ATTEN_OFFS, &cp->atten, QSFP_ATTEN_LEN);
372 if (ret < 0)
373 goto bail;
374 for (idx = 0; idx < QSFP_ATTEN_LEN; ++idx)
375 cks += cp->atten[idx];
376
377 ret = qsfp_cks(ppd, QSFP_ATTEN_OFFS + QSFP_ATTEN_LEN, QSFP_CC_OFFS);
378 if (ret < 0)
379 goto bail;
380 cks += ret;
381
382 cks &= 0xFF;
383 ret = qsfp_read(ppd, QSFP_CC_OFFS, &cp->cks1, 1);
384 if (ret < 0)
385 goto bail;
386 if (cks != cp->cks1)
387 qib_dev_porterr(ppd->dd, ppd->port,
388 "QSFP cks1 is %02X, computed %02X\n", cp->cks1,
389 cks);
390
391 /* Second checksum covers 192 to (serial, date, lot) */
392 ret = qsfp_cks(ppd, QSFP_CC_OFFS + 1, QSFP_SN_OFFS);
393 if (ret < 0)
394 goto bail;
395 cks = ret;
396
397 ret = qsfp_read(ppd, QSFP_SN_OFFS, &cp->serial, QSFP_SN_LEN);
398 if (ret < 0)
399 goto bail;
400 for (idx = 0; idx < QSFP_SN_LEN; ++idx)
401 cks += cp->serial[idx];
402
403 ret = qsfp_read(ppd, QSFP_DATE_OFFS, &cp->date, QSFP_DATE_LEN);
404 if (ret < 0)
405 goto bail;
406 for (idx = 0; idx < QSFP_DATE_LEN; ++idx)
407 cks += cp->date[idx];
408
409 ret = qsfp_read(ppd, QSFP_LOT_OFFS, &cp->lot, QSFP_LOT_LEN);
410 if (ret < 0)
411 goto bail;
412 for (idx = 0; idx < QSFP_LOT_LEN; ++idx)
413 cks += cp->lot[idx];
414
415 ret = qsfp_cks(ppd, QSFP_LOT_OFFS + QSFP_LOT_LEN, QSFP_CC_EXT_OFFS);
416 if (ret < 0)
417 goto bail;
418 cks += ret;
419
420 ret = qsfp_read(ppd, QSFP_CC_EXT_OFFS, &cp->cks2, 1);
421 if (ret < 0)
422 goto bail;
423 cks &= 0xFF;
424 if (cks != cp->cks2)
425 qib_dev_porterr(ppd->dd, ppd->port,
426 "QSFP cks2 is %02X, computed %02X\n", cp->cks2,
427 cks);
428 return 0;
429
430bail:
431 cp->id = 0;
432 return ret;
433}
434
435const char * const qib_qsfp_devtech[16] = {
436 "850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP",
437 "1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML",
438 "Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq",
439 "Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq"
440};
441
442#define QSFP_DUMP_CHUNK 16 /* Holds longest string */
443#define QSFP_DEFAULT_HDR_CNT 224
444
445static const char *pwr_codes = "1.5W2.0W2.5W3.5W";
446
447/*
448 * Initialize structures that control access to QSFP. Called once per port
449 * on cards that support QSFP.
450 */
451void qib_qsfp_init(struct qib_qsfp_data *qd,
452 void (*fevent)(struct work_struct *))
453{
454 u32 mask, highs;
455 int pins;
456
457 struct qib_devdata *dd = qd->ppd->dd;
458
459 /* Initialize work struct for later QSFP events */
460 INIT_WORK(&qd->work, fevent);
461
462 /*
463 * Later, we may want more validation. For now, just set up pins and
464 * blip reset. If module is present, call qib_refresh_qsfp_cache(),
465 * to do further init.
466 */
467 mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
468 highs = mask - QSFP_GPIO_MOD_RST_N;
469 if (qd->ppd->hw_pidx) {
470 mask <<= QSFP_GPIO_PORT2_SHIFT;
471 highs <<= QSFP_GPIO_PORT2_SHIFT;
472 }
473 dd->f_gpio_mod(dd, highs, mask, mask);
474 udelay(20); /* Generous RST dwell */
475
476 dd->f_gpio_mod(dd, mask, mask, mask);
477 /* Spec says module can take up to two seconds! */
478 mask = QSFP_GPIO_MOD_PRS_N;
479 if (qd->ppd->hw_pidx)
480 mask <<= QSFP_GPIO_PORT2_SHIFT;
481
482 /* Do not try to wait here. Better to let event handle it */
483 pins = dd->f_gpio_mod(dd, 0, 0, 0);
484 if (pins & mask)
485 goto bail;
486 /* We see a module, but it may be unwise to look yet. Just schedule */
487 qd->t_insert = get_jiffies_64();
488 schedule_work(&qd->work);
489bail:
490 return;
491}
492
493void qib_qsfp_deinit(struct qib_qsfp_data *qd)
494{
495 /*
496 * There is nothing to do here for now. our
497 * work is scheduled with schedule_work(), and
498 * flush_scheduled_work() from remove_one will
499 * block until all work ssetup with schedule_work()
500 * completes.
501 */
502}
503
504int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
505{
506 struct qib_qsfp_cache cd;
507 u8 bin_buff[QSFP_DUMP_CHUNK];
508 char lenstr[6];
509 int sofar, ret;
510 int bidx = 0;
511
512 sofar = 0;
513 ret = qib_refresh_qsfp_cache(ppd, &cd);
514 if (ret < 0)
515 goto bail;
516
517 lenstr[0] = ' ';
518 lenstr[1] = '\0';
519 if (QSFP_IS_CU(cd.tech))
520 sprintf(lenstr, "%dM ", cd.len);
521
522 sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", pwr_codes +
523 (QSFP_PWR(cd.pwr) * 4));
524
525 sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n", lenstr,
526 qib_qsfp_devtech[cd.tech >> 4]);
527
528 sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n",
529 QSFP_VEND_LEN, cd.vendor);
530
531 sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n",
532 QSFP_OUI(cd.oui));
533
534 sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n",
535 QSFP_PN_LEN, cd.partnum);
536 sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n",
537 QSFP_REV_LEN, cd.rev);
538 if (QSFP_IS_CU(cd.tech))
539 sofar += scnprintf(buf + sofar, len - sofar, "Atten:%d, %d\n",
540 QSFP_ATTEN_SDR(cd.atten),
541 QSFP_ATTEN_DDR(cd.atten));
542 sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n",
543 QSFP_SN_LEN, cd.serial);
544 sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
545 QSFP_DATE_LEN, cd.date);
546 sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
547 QSFP_LOT_LEN, cd.date);
548
549 while (bidx < QSFP_DEFAULT_HDR_CNT) {
550 int iidx;
551 ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
552 if (ret < 0)
553 goto bail;
554 for (iidx = 0; iidx < ret; ++iidx) {
555 sofar += scnprintf(buf + sofar, len-sofar, " %02X",
556 bin_buff[iidx]);
557 }
558 sofar += scnprintf(buf + sofar, len - sofar, "\n");
559 bidx += QSFP_DUMP_CHUNK;
560 }
561 ret = sofar;
562bail:
563 return ret;
564}
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h
new file mode 100644
index 000000000000..19b527bafd57
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_qsfp.h
@@ -0,0 +1,184 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33/* QSFP support common definitions, for ib_qib driver */
34
35#define QSFP_DEV 0xA0
36#define QSFP_PWR_LAG_MSEC 2000
37
38/*
39 * Below are masks for various QSFP signals, for Port 1.
40 * Port2 equivalents are shifted by QSFP_GPIO_PORT2_SHIFT.
41 * _N means asserted low
42 */
43#define QSFP_GPIO_MOD_SEL_N (4)
44#define QSFP_GPIO_MOD_PRS_N (8)
45#define QSFP_GPIO_INT_N (0x10)
46#define QSFP_GPIO_MOD_RST_N (0x20)
47#define QSFP_GPIO_LP_MODE (0x40)
48#define QSFP_GPIO_PORT2_SHIFT 5
49
50#define QSFP_PAGESIZE 128
51/* Defined fields that QLogic requires of qualified cables */
52/* Byte 0 is Identifier, not checked */
53/* Byte 1 is reserved "status MSB" */
54/* Byte 2 is "status LSB" We only care that D2 "Flat Mem" is set. */
55/*
56 * Rest of first 128 not used, although 127 is reserved for page select
57 * if module is not "Flat memory".
58 */
59/* Byte 128 is Identifier: must be 0x0c for QSFP, or 0x0d for QSFP+ */
60#define QSFP_MOD_ID_OFFS 128
61/*
62 * Byte 129 is "Extended Identifier". We only care about D7,D6: Power class
63 * 0:1.5W, 1:2.0W, 2:2.5W, 3:3.5W
64 */
65#define QSFP_MOD_PWR_OFFS 129
66/* Byte 130 is Connector type. Not QLogic req'd */
67/* Bytes 131..138 are Transceiver types, bit maps for various tech, none IB */
68/* Byte 139 is encoding. code 0x01 is 8b10b. Not QLogic req'd */
69/* byte 140 is nominal bit-rate, in units of 100Mbits/sec Not QLogic req'd */
70/* Byte 141 is Extended Rate Select. Not QLogic req'd */
71/* Bytes 142..145 are lengths for various fiber types. Not QLogic req'd */
72/* Byte 146 is length for Copper. Units of 1 meter */
73#define QSFP_MOD_LEN_OFFS 146
74/*
75 * Byte 147 is Device technology. D0..3 not Qlogc req'd
76 * D4..7 select from 15 choices, translated by table:
77 */
78#define QSFP_MOD_TECH_OFFS 147
79extern const char *const qib_qsfp_devtech[16];
80/* Active Equalization includes fiber, copper full EQ, and copper near Eq */
81#define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1)
82/* Attenuation should be valid for copper other than full/near Eq */
83#define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1)
84/* Length is only valid if technology is "copper" */
85#define QSFP_IS_CU(tech) ((0xED00 >> ((tech) >> 4)) & 1)
86#define QSFP_TECH_1490 9
87
88#define QSFP_OUI(oui) (((unsigned)oui[0] << 16) | ((unsigned)oui[1] << 8) | \
89 oui[2])
90#define QSFP_OUI_AMPHENOL 0x415048
91#define QSFP_OUI_FINISAR 0x009065
92#define QSFP_OUI_GORE 0x002177
93
94/* Bytes 148..163 are Vendor Name, Left-justified Blank-filled */
95#define QSFP_VEND_OFFS 148
96#define QSFP_VEND_LEN 16
97/* Byte 164 is IB Extended tranceiver codes Bits D0..3 are SDR,DDR,QDR,EDR */
98#define QSFP_IBXCV_OFFS 164
99/* Bytes 165..167 are Vendor OUI number */
100#define QSFP_VOUI_OFFS 165
101#define QSFP_VOUI_LEN 3
102/* Bytes 168..183 are Vendor Part Number, string */
103#define QSFP_PN_OFFS 168
104#define QSFP_PN_LEN 16
105/* Bytes 184,185 are Vendor Rev. Left Justified, Blank-filled */
106#define QSFP_REV_OFFS 184
107#define QSFP_REV_LEN 2
108/*
109 * Bytes 186,187 are Wavelength, if Optical. Not Qlogic req'd
110 * If copper, they are attenuation in dB:
111 * Byte 186 is at 2.5Gb/sec (SDR), Byte 187 at 5.0Gb/sec (DDR)
112 */
113#define QSFP_ATTEN_OFFS 186
114#define QSFP_ATTEN_LEN 2
115/* Bytes 188,189 are Wavelength tolerance, not QLogic req'd */
116/* Byte 190 is Max Case Temp. Not QLogic req'd */
117/* Byte 191 is LSB of sum of bytes 128..190. Not QLogic req'd */
118#define QSFP_CC_OFFS 191
119/* Bytes 192..195 are Options implemented in qsfp. Not Qlogic req'd */
120/* Bytes 196..211 are Serial Number, String */
121#define QSFP_SN_OFFS 196
122#define QSFP_SN_LEN 16
123/* Bytes 212..219 are date-code YYMMDD (MM==1 for Jan) */
124#define QSFP_DATE_OFFS 212
125#define QSFP_DATE_LEN 6
126/* Bytes 218,219 are optional lot-code, string */
127#define QSFP_LOT_OFFS 218
128#define QSFP_LOT_LEN 2
129/* Bytes 220, 221 indicate monitoring options, Not QLogic req'd */
130/* Byte 223 is LSB of sum of bytes 192..222 */
131#define QSFP_CC_EXT_OFFS 223
132
133/*
134 * struct qib_qsfp_data encapsulates state of QSFP device for one port.
135 * it will be part of port-chip-specific data if a board supports QSFP.
136 *
137 * Since multiple board-types use QSFP, and their pport_data structs
138 * differ (in the chip-specific section), we need a pointer to its head.
139 *
140 * Avoiding premature optimization, we will have one work_struct per port,
141 * and let the (increasingly inaccurately named) eep_lock arbitrate
142 * access to common resources.
143 *
144 */
145
146/*
147 * Hold the parts of the onboard EEPROM that we care about, so we aren't
148 * coonstantly bit-boffing
149 */
150struct qib_qsfp_cache {
151 u8 id; /* must be 0x0C or 0x0D; 0 indicates invalid EEPROM read */
152 u8 pwr; /* in D6,7 */
153 u8 len; /* in meters, Cu only */
154 u8 tech;
155 char vendor[QSFP_VEND_LEN];
156 u8 xt_xcv; /* Ext. tranceiver codes, 4 lsbs are IB speed supported */
157 u8 oui[QSFP_VOUI_LEN];
158 u8 partnum[QSFP_PN_LEN];
159 u8 rev[QSFP_REV_LEN];
160 u8 atten[QSFP_ATTEN_LEN];
161 u8 cks1; /* Checksum of bytes 128..190 */
162 u8 serial[QSFP_SN_LEN];
163 u8 date[QSFP_DATE_LEN];
164 u8 lot[QSFP_LOT_LEN];
165 u8 cks2; /* Checsum of bytes 192..222 */
166};
167
168#define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3)
169#define QSFP_ATTEN_SDR(attenarray) (attenarray[0])
170#define QSFP_ATTEN_DDR(attenarray) (attenarray[1])
171
172struct qib_qsfp_data {
173 /* Helps to find our way */
174 struct qib_pportdata *ppd;
175 struct work_struct work;
176 struct qib_qsfp_cache cache;
177 u64 t_insert;
178};
179
180extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd,
181 struct qib_qsfp_cache *cp);
182extern void qib_qsfp_init(struct qib_qsfp_data *qd,
183 void (*fevent)(struct work_struct *));
184extern void qib_qsfp_deinit(struct qib_qsfp_data *qd);
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
new file mode 100644
index 000000000000..40c0a373719c
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -0,0 +1,2288 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/io.h>
35
36#include "qib.h"
37
38/* cut down ridiculously long IB macro names */
39#define OP(x) IB_OPCODE_RC_##x
40
41static void rc_timeout(unsigned long arg);
42
43static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe,
44 u32 psn, u32 pmtu)
45{
46 u32 len;
47
48 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
49 ss->sge = wqe->sg_list[0];
50 ss->sg_list = wqe->sg_list + 1;
51 ss->num_sge = wqe->wr.num_sge;
52 ss->total_len = wqe->length;
53 qib_skip_sge(ss, len, 0);
54 return wqe->length - len;
55}
56
57static void start_timer(struct qib_qp *qp)
58{
59 qp->s_flags |= QIB_S_TIMER;
60 qp->s_timer.function = rc_timeout;
61 /* 4.096 usec. * (1 << qp->timeout) */
62 qp->s_timer.expires = jiffies +
63 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1000UL);
64 add_timer(&qp->s_timer);
65}
66
67/**
68 * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
69 * @dev: the device for this QP
70 * @qp: a pointer to the QP
71 * @ohdr: a pointer to the IB header being constructed
72 * @pmtu: the path MTU
73 *
74 * Return 1 if constructed; otherwise, return 0.
75 * Note that we are in the responder's side of the QP context.
76 * Note the QP s_lock must be held.
77 */
78static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
79 struct qib_other_headers *ohdr, u32 pmtu)
80{
81 struct qib_ack_entry *e;
82 u32 hwords;
83 u32 len;
84 u32 bth0;
85 u32 bth2;
86
87 /* Don't send an ACK if we aren't supposed to. */
88 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
89 goto bail;
90
91 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
92 hwords = 5;
93
94 switch (qp->s_ack_state) {
95 case OP(RDMA_READ_RESPONSE_LAST):
96 case OP(RDMA_READ_RESPONSE_ONLY):
97 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
98 if (e->rdma_sge.mr) {
99 atomic_dec(&e->rdma_sge.mr->refcount);
100 e->rdma_sge.mr = NULL;
101 }
102 /* FALLTHROUGH */
103 case OP(ATOMIC_ACKNOWLEDGE):
104 /*
105 * We can increment the tail pointer now that the last
106 * response has been sent instead of only being
107 * constructed.
108 */
109 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
110 qp->s_tail_ack_queue = 0;
111 /* FALLTHROUGH */
112 case OP(SEND_ONLY):
113 case OP(ACKNOWLEDGE):
114 /* Check for no next entry in the queue. */
115 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
116 if (qp->s_flags & QIB_S_ACK_PENDING)
117 goto normal;
118 goto bail;
119 }
120
121 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
122 if (e->opcode == OP(RDMA_READ_REQUEST)) {
123 /*
124 * If a RDMA read response is being resent and
125 * we haven't seen the duplicate request yet,
126 * then stop sending the remaining responses the
127 * responder has seen until the requester resends it.
128 */
129 len = e->rdma_sge.sge_length;
130 if (len && !e->rdma_sge.mr) {
131 qp->s_tail_ack_queue = qp->r_head_ack_queue;
132 goto bail;
133 }
134 /* Copy SGE state in case we need to resend */
135 qp->s_rdma_mr = e->rdma_sge.mr;
136 if (qp->s_rdma_mr)
137 atomic_inc(&qp->s_rdma_mr->refcount);
138 qp->s_ack_rdma_sge.sge = e->rdma_sge;
139 qp->s_ack_rdma_sge.num_sge = 1;
140 qp->s_cur_sge = &qp->s_ack_rdma_sge;
141 if (len > pmtu) {
142 len = pmtu;
143 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
144 } else {
145 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
146 e->sent = 1;
147 }
148 ohdr->u.aeth = qib_compute_aeth(qp);
149 hwords++;
150 qp->s_ack_rdma_psn = e->psn;
151 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
152 } else {
153 /* COMPARE_SWAP or FETCH_ADD */
154 qp->s_cur_sge = NULL;
155 len = 0;
156 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
157 ohdr->u.at.aeth = qib_compute_aeth(qp);
158 ohdr->u.at.atomic_ack_eth[0] =
159 cpu_to_be32(e->atomic_data >> 32);
160 ohdr->u.at.atomic_ack_eth[1] =
161 cpu_to_be32(e->atomic_data);
162 hwords += sizeof(ohdr->u.at) / sizeof(u32);
163 bth2 = e->psn & QIB_PSN_MASK;
164 e->sent = 1;
165 }
166 bth0 = qp->s_ack_state << 24;
167 break;
168
169 case OP(RDMA_READ_RESPONSE_FIRST):
170 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
171 /* FALLTHROUGH */
172 case OP(RDMA_READ_RESPONSE_MIDDLE):
173 qp->s_cur_sge = &qp->s_ack_rdma_sge;
174 qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
175 if (qp->s_rdma_mr)
176 atomic_inc(&qp->s_rdma_mr->refcount);
177 len = qp->s_ack_rdma_sge.sge.sge_length;
178 if (len > pmtu)
179 len = pmtu;
180 else {
181 ohdr->u.aeth = qib_compute_aeth(qp);
182 hwords++;
183 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
184 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
185 e->sent = 1;
186 }
187 bth0 = qp->s_ack_state << 24;
188 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
189 break;
190
191 default:
192normal:
193 /*
194 * Send a regular ACK.
195 * Set the s_ack_state so we wait until after sending
196 * the ACK before setting s_ack_state to ACKNOWLEDGE
197 * (see above).
198 */
199 qp->s_ack_state = OP(SEND_ONLY);
200 qp->s_flags &= ~QIB_S_ACK_PENDING;
201 qp->s_cur_sge = NULL;
202 if (qp->s_nak_state)
203 ohdr->u.aeth =
204 cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
205 (qp->s_nak_state <<
206 QIB_AETH_CREDIT_SHIFT));
207 else
208 ohdr->u.aeth = qib_compute_aeth(qp);
209 hwords++;
210 len = 0;
211 bth0 = OP(ACKNOWLEDGE) << 24;
212 bth2 = qp->s_ack_psn & QIB_PSN_MASK;
213 }
214 qp->s_rdma_ack_cnt++;
215 qp->s_hdrwords = hwords;
216 qp->s_cur_size = len;
217 qib_make_ruc_header(qp, ohdr, bth0, bth2);
218 return 1;
219
220bail:
221 qp->s_ack_state = OP(ACKNOWLEDGE);
222 qp->s_flags &= ~(QIB_S_RESP_PENDING | QIB_S_ACK_PENDING);
223 return 0;
224}
225
226/**
227 * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
228 * @qp: a pointer to the QP
229 *
230 * Return 1 if constructed; otherwise, return 0.
231 */
232int qib_make_rc_req(struct qib_qp *qp)
233{
234 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
235 struct qib_other_headers *ohdr;
236 struct qib_sge_state *ss;
237 struct qib_swqe *wqe;
238 u32 hwords;
239 u32 len;
240 u32 bth0;
241 u32 bth2;
242 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
243 char newreq;
244 unsigned long flags;
245 int ret = 0;
246 int delta;
247
248 ohdr = &qp->s_hdr.u.oth;
249 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
250 ohdr = &qp->s_hdr.u.l.oth;
251
252 /*
253 * The lock is needed to synchronize between the sending tasklet,
254 * the receive interrupt handler, and timeout resends.
255 */
256 spin_lock_irqsave(&qp->s_lock, flags);
257
258 /* Sending responses has higher priority over sending requests. */
259 if ((qp->s_flags & QIB_S_RESP_PENDING) &&
260 qib_make_rc_ack(dev, qp, ohdr, pmtu))
261 goto done;
262
263 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
264 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
265 goto bail;
266 /* We are in the error state, flush the work request. */
267 if (qp->s_last == qp->s_head)
268 goto bail;
269 /* If DMAs are in progress, we can't flush immediately. */
270 if (atomic_read(&qp->s_dma_busy)) {
271 qp->s_flags |= QIB_S_WAIT_DMA;
272 goto bail;
273 }
274 wqe = get_swqe_ptr(qp, qp->s_last);
275 while (qp->s_last != qp->s_acked) {
276 qib_send_complete(qp, wqe, IB_WC_SUCCESS);
277 if (++qp->s_last >= qp->s_size)
278 qp->s_last = 0;
279 wqe = get_swqe_ptr(qp, qp->s_last);
280 }
281 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
282 goto done;
283 }
284
285 if (qp->s_flags & (QIB_S_WAIT_RNR | QIB_S_WAIT_ACK))
286 goto bail;
287
288 if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
289 if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
290 qp->s_flags |= QIB_S_WAIT_PSN;
291 goto bail;
292 }
293 qp->s_sending_psn = qp->s_psn;
294 qp->s_sending_hpsn = qp->s_psn - 1;
295 }
296
297 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
298 hwords = 5;
299 bth0 = 0;
300
301 /* Send a request. */
302 wqe = get_swqe_ptr(qp, qp->s_cur);
303 switch (qp->s_state) {
304 default:
305 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK))
306 goto bail;
307 /*
308 * Resend an old request or start a new one.
309 *
310 * We keep track of the current SWQE so that
311 * we don't reset the "furthest progress" state
312 * if we need to back up.
313 */
314 newreq = 0;
315 if (qp->s_cur == qp->s_tail) {
316 /* Check if send work queue is empty. */
317 if (qp->s_tail == qp->s_head)
318 goto bail;
319 /*
320 * If a fence is requested, wait for previous
321 * RDMA read and atomic operations to finish.
322 */
323 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
324 qp->s_num_rd_atomic) {
325 qp->s_flags |= QIB_S_WAIT_FENCE;
326 goto bail;
327 }
328 wqe->psn = qp->s_next_psn;
329 newreq = 1;
330 }
331 /*
332 * Note that we have to be careful not to modify the
333 * original work request since we may need to resend
334 * it.
335 */
336 len = wqe->length;
337 ss = &qp->s_sge;
338 bth2 = qp->s_psn & QIB_PSN_MASK;
339 switch (wqe->wr.opcode) {
340 case IB_WR_SEND:
341 case IB_WR_SEND_WITH_IMM:
342 /* If no credit, return. */
343 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
344 qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
345 qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
346 goto bail;
347 }
348 wqe->lpsn = wqe->psn;
349 if (len > pmtu) {
350 wqe->lpsn += (len - 1) / pmtu;
351 qp->s_state = OP(SEND_FIRST);
352 len = pmtu;
353 break;
354 }
355 if (wqe->wr.opcode == IB_WR_SEND)
356 qp->s_state = OP(SEND_ONLY);
357 else {
358 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
359 /* Immediate data comes after the BTH */
360 ohdr->u.imm_data = wqe->wr.ex.imm_data;
361 hwords += 1;
362 }
363 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
364 bth0 |= IB_BTH_SOLICITED;
365 bth2 |= IB_BTH_REQ_ACK;
366 if (++qp->s_cur == qp->s_size)
367 qp->s_cur = 0;
368 break;
369
370 case IB_WR_RDMA_WRITE:
371 if (newreq && !(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
372 qp->s_lsn++;
373 /* FALLTHROUGH */
374 case IB_WR_RDMA_WRITE_WITH_IMM:
375 /* If no credit, return. */
376 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
377 qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
378 qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
379 goto bail;
380 }
381 ohdr->u.rc.reth.vaddr =
382 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
383 ohdr->u.rc.reth.rkey =
384 cpu_to_be32(wqe->wr.wr.rdma.rkey);
385 ohdr->u.rc.reth.length = cpu_to_be32(len);
386 hwords += sizeof(struct ib_reth) / sizeof(u32);
387 wqe->lpsn = wqe->psn;
388 if (len > pmtu) {
389 wqe->lpsn += (len - 1) / pmtu;
390 qp->s_state = OP(RDMA_WRITE_FIRST);
391 len = pmtu;
392 break;
393 }
394 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
395 qp->s_state = OP(RDMA_WRITE_ONLY);
396 else {
397 qp->s_state =
398 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
399 /* Immediate data comes after RETH */
400 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
401 hwords += 1;
402 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
403 bth0 |= IB_BTH_SOLICITED;
404 }
405 bth2 |= IB_BTH_REQ_ACK;
406 if (++qp->s_cur == qp->s_size)
407 qp->s_cur = 0;
408 break;
409
410 case IB_WR_RDMA_READ:
411 /*
412 * Don't allow more operations to be started
413 * than the QP limits allow.
414 */
415 if (newreq) {
416 if (qp->s_num_rd_atomic >=
417 qp->s_max_rd_atomic) {
418 qp->s_flags |= QIB_S_WAIT_RDMAR;
419 goto bail;
420 }
421 qp->s_num_rd_atomic++;
422 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
423 qp->s_lsn++;
424 /*
425 * Adjust s_next_psn to count the
426 * expected number of responses.
427 */
428 if (len > pmtu)
429 qp->s_next_psn += (len - 1) / pmtu;
430 wqe->lpsn = qp->s_next_psn++;
431 }
432 ohdr->u.rc.reth.vaddr =
433 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
434 ohdr->u.rc.reth.rkey =
435 cpu_to_be32(wqe->wr.wr.rdma.rkey);
436 ohdr->u.rc.reth.length = cpu_to_be32(len);
437 qp->s_state = OP(RDMA_READ_REQUEST);
438 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
439 ss = NULL;
440 len = 0;
441 bth2 |= IB_BTH_REQ_ACK;
442 if (++qp->s_cur == qp->s_size)
443 qp->s_cur = 0;
444 break;
445
446 case IB_WR_ATOMIC_CMP_AND_SWP:
447 case IB_WR_ATOMIC_FETCH_AND_ADD:
448 /*
449 * Don't allow more operations to be started
450 * than the QP limits allow.
451 */
452 if (newreq) {
453 if (qp->s_num_rd_atomic >=
454 qp->s_max_rd_atomic) {
455 qp->s_flags |= QIB_S_WAIT_RDMAR;
456 goto bail;
457 }
458 qp->s_num_rd_atomic++;
459 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
460 qp->s_lsn++;
461 wqe->lpsn = wqe->psn;
462 }
463 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
464 qp->s_state = OP(COMPARE_SWAP);
465 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
466 wqe->wr.wr.atomic.swap);
467 ohdr->u.atomic_eth.compare_data = cpu_to_be64(
468 wqe->wr.wr.atomic.compare_add);
469 } else {
470 qp->s_state = OP(FETCH_ADD);
471 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
472 wqe->wr.wr.atomic.compare_add);
473 ohdr->u.atomic_eth.compare_data = 0;
474 }
475 ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
476 wqe->wr.wr.atomic.remote_addr >> 32);
477 ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
478 wqe->wr.wr.atomic.remote_addr);
479 ohdr->u.atomic_eth.rkey = cpu_to_be32(
480 wqe->wr.wr.atomic.rkey);
481 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
482 ss = NULL;
483 len = 0;
484 bth2 |= IB_BTH_REQ_ACK;
485 if (++qp->s_cur == qp->s_size)
486 qp->s_cur = 0;
487 break;
488
489 default:
490 goto bail;
491 }
492 qp->s_sge.sge = wqe->sg_list[0];
493 qp->s_sge.sg_list = wqe->sg_list + 1;
494 qp->s_sge.num_sge = wqe->wr.num_sge;
495 qp->s_sge.total_len = wqe->length;
496 qp->s_len = wqe->length;
497 if (newreq) {
498 qp->s_tail++;
499 if (qp->s_tail >= qp->s_size)
500 qp->s_tail = 0;
501 }
502 if (wqe->wr.opcode == IB_WR_RDMA_READ)
503 qp->s_psn = wqe->lpsn + 1;
504 else {
505 qp->s_psn++;
506 if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
507 qp->s_next_psn = qp->s_psn;
508 }
509 break;
510
511 case OP(RDMA_READ_RESPONSE_FIRST):
512 /*
513 * qp->s_state is normally set to the opcode of the
514 * last packet constructed for new requests and therefore
515 * is never set to RDMA read response.
516 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
517 * thread to indicate a SEND needs to be restarted from an
518 * earlier PSN without interferring with the sending thread.
519 * See qib_restart_rc().
520 */
521 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
522 /* FALLTHROUGH */
523 case OP(SEND_FIRST):
524 qp->s_state = OP(SEND_MIDDLE);
525 /* FALLTHROUGH */
526 case OP(SEND_MIDDLE):
527 bth2 = qp->s_psn++ & QIB_PSN_MASK;
528 if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
529 qp->s_next_psn = qp->s_psn;
530 ss = &qp->s_sge;
531 len = qp->s_len;
532 if (len > pmtu) {
533 len = pmtu;
534 break;
535 }
536 if (wqe->wr.opcode == IB_WR_SEND)
537 qp->s_state = OP(SEND_LAST);
538 else {
539 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
540 /* Immediate data comes after the BTH */
541 ohdr->u.imm_data = wqe->wr.ex.imm_data;
542 hwords += 1;
543 }
544 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
545 bth0 |= IB_BTH_SOLICITED;
546 bth2 |= IB_BTH_REQ_ACK;
547 qp->s_cur++;
548 if (qp->s_cur >= qp->s_size)
549 qp->s_cur = 0;
550 break;
551
552 case OP(RDMA_READ_RESPONSE_LAST):
553 /*
554 * qp->s_state is normally set to the opcode of the
555 * last packet constructed for new requests and therefore
556 * is never set to RDMA read response.
557 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
558 * thread to indicate a RDMA write needs to be restarted from
559 * an earlier PSN without interferring with the sending thread.
560 * See qib_restart_rc().
561 */
562 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
563 /* FALLTHROUGH */
564 case OP(RDMA_WRITE_FIRST):
565 qp->s_state = OP(RDMA_WRITE_MIDDLE);
566 /* FALLTHROUGH */
567 case OP(RDMA_WRITE_MIDDLE):
568 bth2 = qp->s_psn++ & QIB_PSN_MASK;
569 if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
570 qp->s_next_psn = qp->s_psn;
571 ss = &qp->s_sge;
572 len = qp->s_len;
573 if (len > pmtu) {
574 len = pmtu;
575 break;
576 }
577 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
578 qp->s_state = OP(RDMA_WRITE_LAST);
579 else {
580 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
581 /* Immediate data comes after the BTH */
582 ohdr->u.imm_data = wqe->wr.ex.imm_data;
583 hwords += 1;
584 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
585 bth0 |= IB_BTH_SOLICITED;
586 }
587 bth2 |= IB_BTH_REQ_ACK;
588 qp->s_cur++;
589 if (qp->s_cur >= qp->s_size)
590 qp->s_cur = 0;
591 break;
592
593 case OP(RDMA_READ_RESPONSE_MIDDLE):
594 /*
595 * qp->s_state is normally set to the opcode of the
596 * last packet constructed for new requests and therefore
597 * is never set to RDMA read response.
598 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
599 * thread to indicate a RDMA read needs to be restarted from
600 * an earlier PSN without interferring with the sending thread.
601 * See qib_restart_rc().
602 */
603 len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
604 ohdr->u.rc.reth.vaddr =
605 cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
606 ohdr->u.rc.reth.rkey =
607 cpu_to_be32(wqe->wr.wr.rdma.rkey);
608 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
609 qp->s_state = OP(RDMA_READ_REQUEST);
610 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
611 bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK;
612 qp->s_psn = wqe->lpsn + 1;
613 ss = NULL;
614 len = 0;
615 qp->s_cur++;
616 if (qp->s_cur == qp->s_size)
617 qp->s_cur = 0;
618 break;
619 }
620 qp->s_sending_hpsn = bth2;
621 delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
622 if (delta && delta % QIB_PSN_CREDIT == 0)
623 bth2 |= IB_BTH_REQ_ACK;
624 if (qp->s_flags & QIB_S_SEND_ONE) {
625 qp->s_flags &= ~QIB_S_SEND_ONE;
626 qp->s_flags |= QIB_S_WAIT_ACK;
627 bth2 |= IB_BTH_REQ_ACK;
628 }
629 qp->s_len -= len;
630 qp->s_hdrwords = hwords;
631 qp->s_cur_sge = ss;
632 qp->s_cur_size = len;
633 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
634done:
635 ret = 1;
636 goto unlock;
637
638bail:
639 qp->s_flags &= ~QIB_S_BUSY;
640unlock:
641 spin_unlock_irqrestore(&qp->s_lock, flags);
642 return ret;
643}
644
645/**
646 * qib_send_rc_ack - Construct an ACK packet and send it
647 * @qp: a pointer to the QP
648 *
649 * This is called from qib_rc_rcv() and qib_kreceive().
650 * Note that RDMA reads and atomics are handled in the
651 * send side QP state and tasklet.
652 */
653void qib_send_rc_ack(struct qib_qp *qp)
654{
655 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
656 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
657 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
658 u64 pbc;
659 u16 lrh0;
660 u32 bth0;
661 u32 hwords;
662 u32 pbufn;
663 u32 __iomem *piobuf;
664 struct qib_ib_header hdr;
665 struct qib_other_headers *ohdr;
666 u32 control;
667 unsigned long flags;
668
669 spin_lock_irqsave(&qp->s_lock, flags);
670
671 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
672 goto unlock;
673
674 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
675 if ((qp->s_flags & QIB_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
676 goto queue_ack;
677
678 /* Construct the header with s_lock held so APM doesn't change it. */
679 ohdr = &hdr.u.oth;
680 lrh0 = QIB_LRH_BTH;
681 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
682 hwords = 6;
683 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
684 hwords += qib_make_grh(ibp, &hdr.u.l.grh,
685 &qp->remote_ah_attr.grh, hwords, 0);
686 ohdr = &hdr.u.l.oth;
687 lrh0 = QIB_LRH_GRH;
688 }
689 /* read pkey_index w/o lock (its atomic) */
690 bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
691 if (qp->s_mig_state == IB_MIG_MIGRATED)
692 bth0 |= IB_BTH_MIG_REQ;
693 if (qp->r_nak_state)
694 ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
695 (qp->r_nak_state <<
696 QIB_AETH_CREDIT_SHIFT));
697 else
698 ohdr->u.aeth = qib_compute_aeth(qp);
699 lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
700 qp->remote_ah_attr.sl << 4;
701 hdr.lrh[0] = cpu_to_be16(lrh0);
702 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
703 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
704 hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
705 ohdr->bth[0] = cpu_to_be32(bth0);
706 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
707 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
708
709 spin_unlock_irqrestore(&qp->s_lock, flags);
710
711 /* Don't try to send ACKs if the link isn't ACTIVE */
712 if (!(ppd->lflags & QIBL_LINKACTIVE))
713 goto done;
714
715 control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
716 qp->s_srate, lrh0 >> 12);
717 /* length is + 1 for the control dword */
718 pbc = ((u64) control << 32) | (hwords + 1);
719
720 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
721 if (!piobuf) {
722 /*
723 * We are out of PIO buffers at the moment.
724 * Pass responsibility for sending the ACK to the
725 * send tasklet so that when a PIO buffer becomes
726 * available, the ACK is sent ahead of other outgoing
727 * packets.
728 */
729 spin_lock_irqsave(&qp->s_lock, flags);
730 goto queue_ack;
731 }
732
733 /*
734 * Write the pbc.
735 * We have to flush after the PBC for correctness
736 * on some cpus or WC buffer can be written out of order.
737 */
738 writeq(pbc, piobuf);
739
740 if (dd->flags & QIB_PIO_FLUSH_WC) {
741 u32 *hdrp = (u32 *) &hdr;
742
743 qib_flush_wc();
744 qib_pio_copy(piobuf + 2, hdrp, hwords - 1);
745 qib_flush_wc();
746 __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
747 } else
748 qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords);
749
750 if (dd->flags & QIB_USE_SPCL_TRIG) {
751 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
752
753 qib_flush_wc();
754 __raw_writel(0xaebecede, piobuf + spcl_off);
755 }
756
757 qib_flush_wc();
758 qib_sendbuf_done(dd, pbufn);
759
760 ibp->n_unicast_xmit++;
761 goto done;
762
763queue_ack:
764 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
765 ibp->n_rc_qacks++;
766 qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING;
767 qp->s_nak_state = qp->r_nak_state;
768 qp->s_ack_psn = qp->r_ack_psn;
769
770 /* Schedule the send tasklet. */
771 qib_schedule_send(qp);
772 }
773unlock:
774 spin_unlock_irqrestore(&qp->s_lock, flags);
775done:
776 return;
777}
778
779/**
780 * reset_psn - reset the QP state to send starting from PSN
781 * @qp: the QP
782 * @psn: the packet sequence number to restart at
783 *
784 * This is called from qib_rc_rcv() to process an incoming RC ACK
785 * for the given QP.
786 * Called at interrupt level with the QP s_lock held.
787 */
788static void reset_psn(struct qib_qp *qp, u32 psn)
789{
790 u32 n = qp->s_acked;
791 struct qib_swqe *wqe = get_swqe_ptr(qp, n);
792 u32 opcode;
793
794 qp->s_cur = n;
795
796 /*
797 * If we are starting the request from the beginning,
798 * let the normal send code handle initialization.
799 */
800 if (qib_cmp24(psn, wqe->psn) <= 0) {
801 qp->s_state = OP(SEND_LAST);
802 goto done;
803 }
804
805 /* Find the work request opcode corresponding to the given PSN. */
806 opcode = wqe->wr.opcode;
807 for (;;) {
808 int diff;
809
810 if (++n == qp->s_size)
811 n = 0;
812 if (n == qp->s_tail)
813 break;
814 wqe = get_swqe_ptr(qp, n);
815 diff = qib_cmp24(psn, wqe->psn);
816 if (diff < 0)
817 break;
818 qp->s_cur = n;
819 /*
820 * If we are starting the request from the beginning,
821 * let the normal send code handle initialization.
822 */
823 if (diff == 0) {
824 qp->s_state = OP(SEND_LAST);
825 goto done;
826 }
827 opcode = wqe->wr.opcode;
828 }
829
830 /*
831 * Set the state to restart in the middle of a request.
832 * Don't change the s_sge, s_cur_sge, or s_cur_size.
833 * See qib_make_rc_req().
834 */
835 switch (opcode) {
836 case IB_WR_SEND:
837 case IB_WR_SEND_WITH_IMM:
838 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
839 break;
840
841 case IB_WR_RDMA_WRITE:
842 case IB_WR_RDMA_WRITE_WITH_IMM:
843 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
844 break;
845
846 case IB_WR_RDMA_READ:
847 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
848 break;
849
850 default:
851 /*
852 * This case shouldn't happen since its only
853 * one PSN per req.
854 */
855 qp->s_state = OP(SEND_LAST);
856 }
857done:
858 qp->s_psn = psn;
859 /*
860 * Set QIB_S_WAIT_PSN as qib_rc_complete() may start the timer
861 * asynchronously before the send tasklet can get scheduled.
862 * Doing it in qib_make_rc_req() is too late.
863 */
864 if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
865 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
866 qp->s_flags |= QIB_S_WAIT_PSN;
867}
868
869/*
870 * Back up requester to resend the last un-ACKed request.
871 * The QP s_lock should be held and interrupts disabled.
872 */
873static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
874{
875 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked);
876 struct qib_ibport *ibp;
877
878 if (qp->s_retry == 0) {
879 if (qp->s_mig_state == IB_MIG_ARMED) {
880 qib_migrate_qp(qp);
881 qp->s_retry = qp->s_retry_cnt;
882 } else if (qp->s_last == qp->s_acked) {
883 qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
884 qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
885 return;
886 } else /* XXX need to handle delayed completion */
887 return;
888 } else
889 qp->s_retry--;
890
891 ibp = to_iport(qp->ibqp.device, qp->port_num);
892 if (wqe->wr.opcode == IB_WR_RDMA_READ)
893 ibp->n_rc_resends++;
894 else
895 ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
896
897 qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR |
898 QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN |
899 QIB_S_WAIT_ACK);
900 if (wait)
901 qp->s_flags |= QIB_S_SEND_ONE;
902 reset_psn(qp, psn);
903}
904
905/*
906 * This is called from s_timer for missing responses.
907 */
908static void rc_timeout(unsigned long arg)
909{
910 struct qib_qp *qp = (struct qib_qp *)arg;
911 struct qib_ibport *ibp;
912 unsigned long flags;
913
914 spin_lock_irqsave(&qp->s_lock, flags);
915 if (qp->s_flags & QIB_S_TIMER) {
916 ibp = to_iport(qp->ibqp.device, qp->port_num);
917 ibp->n_rc_timeouts++;
918 qp->s_flags &= ~QIB_S_TIMER;
919 del_timer(&qp->s_timer);
920 qib_restart_rc(qp, qp->s_last_psn + 1, 1);
921 qib_schedule_send(qp);
922 }
923 spin_unlock_irqrestore(&qp->s_lock, flags);
924}
925
926/*
927 * This is called from s_timer for RNR timeouts.
928 */
929void qib_rc_rnr_retry(unsigned long arg)
930{
931 struct qib_qp *qp = (struct qib_qp *)arg;
932 unsigned long flags;
933
934 spin_lock_irqsave(&qp->s_lock, flags);
935 if (qp->s_flags & QIB_S_WAIT_RNR) {
936 qp->s_flags &= ~QIB_S_WAIT_RNR;
937 del_timer(&qp->s_timer);
938 qib_schedule_send(qp);
939 }
940 spin_unlock_irqrestore(&qp->s_lock, flags);
941}
942
943/*
944 * Set qp->s_sending_psn to the next PSN after the given one.
945 * This would be psn+1 except when RDMA reads are present.
946 */
947static void reset_sending_psn(struct qib_qp *qp, u32 psn)
948{
949 struct qib_swqe *wqe;
950 u32 n = qp->s_last;
951
952 /* Find the work request corresponding to the given PSN. */
953 for (;;) {
954 wqe = get_swqe_ptr(qp, n);
955 if (qib_cmp24(psn, wqe->lpsn) <= 0) {
956 if (wqe->wr.opcode == IB_WR_RDMA_READ)
957 qp->s_sending_psn = wqe->lpsn + 1;
958 else
959 qp->s_sending_psn = psn + 1;
960 break;
961 }
962 if (++n == qp->s_size)
963 n = 0;
964 if (n == qp->s_tail)
965 break;
966 }
967}
968
969/*
970 * This should be called with the QP s_lock held and interrupts disabled.
971 */
972void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
973{
974 struct qib_other_headers *ohdr;
975 struct qib_swqe *wqe;
976 struct ib_wc wc;
977 unsigned i;
978 u32 opcode;
979 u32 psn;
980
981 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
982 return;
983
984 /* Find out where the BTH is */
985 if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH)
986 ohdr = &hdr->u.oth;
987 else
988 ohdr = &hdr->u.l.oth;
989
990 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
991 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
992 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
993 WARN_ON(!qp->s_rdma_ack_cnt);
994 qp->s_rdma_ack_cnt--;
995 return;
996 }
997
998 psn = be32_to_cpu(ohdr->bth[2]);
999 reset_sending_psn(qp, psn);
1000
1001 /*
1002 * Start timer after a packet requesting an ACK has been sent and
1003 * there are still requests that haven't been acked.
1004 */
1005 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
1006 !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)))
1007 start_timer(qp);
1008
1009 while (qp->s_last != qp->s_acked) {
1010 wqe = get_swqe_ptr(qp, qp->s_last);
1011 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
1012 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
1013 break;
1014 for (i = 0; i < wqe->wr.num_sge; i++) {
1015 struct qib_sge *sge = &wqe->sg_list[i];
1016
1017 atomic_dec(&sge->mr->refcount);
1018 }
1019 /* Post a send completion queue entry if requested. */
1020 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
1021 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
1022 memset(&wc, 0, sizeof wc);
1023 wc.wr_id = wqe->wr.wr_id;
1024 wc.status = IB_WC_SUCCESS;
1025 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
1026 wc.byte_len = wqe->length;
1027 wc.qp = &qp->ibqp;
1028 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
1029 }
1030 if (++qp->s_last >= qp->s_size)
1031 qp->s_last = 0;
1032 }
1033 /*
1034 * If we were waiting for sends to complete before resending,
1035 * and they are now complete, restart sending.
1036 */
1037 if (qp->s_flags & QIB_S_WAIT_PSN &&
1038 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1039 qp->s_flags &= ~QIB_S_WAIT_PSN;
1040 qp->s_sending_psn = qp->s_psn;
1041 qp->s_sending_hpsn = qp->s_psn - 1;
1042 qib_schedule_send(qp);
1043 }
1044}
1045
1046static inline void update_last_psn(struct qib_qp *qp, u32 psn)
1047{
1048 qp->s_last_psn = psn;
1049}
1050
1051/*
1052 * Generate a SWQE completion.
1053 * This is similar to qib_send_complete but has to check to be sure
1054 * that the SGEs are not being referenced if the SWQE is being resent.
1055 */
1056static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
1057 struct qib_swqe *wqe,
1058 struct qib_ibport *ibp)
1059{
1060 struct ib_wc wc;
1061 unsigned i;
1062
1063 /*
1064 * Don't decrement refcount and don't generate a
1065 * completion if the SWQE is being resent until the send
1066 * is finished.
1067 */
1068 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
1069 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1070 for (i = 0; i < wqe->wr.num_sge; i++) {
1071 struct qib_sge *sge = &wqe->sg_list[i];
1072
1073 atomic_dec(&sge->mr->refcount);
1074 }
1075 /* Post a send completion queue entry if requested. */
1076 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
1077 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
1078 memset(&wc, 0, sizeof wc);
1079 wc.wr_id = wqe->wr.wr_id;
1080 wc.status = IB_WC_SUCCESS;
1081 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
1082 wc.byte_len = wqe->length;
1083 wc.qp = &qp->ibqp;
1084 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
1085 }
1086 if (++qp->s_last >= qp->s_size)
1087 qp->s_last = 0;
1088 } else
1089 ibp->n_rc_delayed_comp++;
1090
1091 qp->s_retry = qp->s_retry_cnt;
1092 update_last_psn(qp, wqe->lpsn);
1093
1094 /*
1095 * If we are completing a request which is in the process of
1096 * being resent, we can stop resending it since we know the
1097 * responder has already seen it.
1098 */
1099 if (qp->s_acked == qp->s_cur) {
1100 if (++qp->s_cur >= qp->s_size)
1101 qp->s_cur = 0;
1102 qp->s_acked = qp->s_cur;
1103 wqe = get_swqe_ptr(qp, qp->s_cur);
1104 if (qp->s_acked != qp->s_tail) {
1105 qp->s_state = OP(SEND_LAST);
1106 qp->s_psn = wqe->psn;
1107 }
1108 } else {
1109 if (++qp->s_acked >= qp->s_size)
1110 qp->s_acked = 0;
1111 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
1112 qp->s_draining = 0;
1113 wqe = get_swqe_ptr(qp, qp->s_acked);
1114 }
1115 return wqe;
1116}
1117
1118/**
1119 * do_rc_ack - process an incoming RC ACK
1120 * @qp: the QP the ACK came in on
1121 * @psn: the packet sequence number of the ACK
1122 * @opcode: the opcode of the request that resulted in the ACK
1123 *
1124 * This is called from qib_rc_rcv_resp() to process an incoming RC ACK
1125 * for the given QP.
1126 * Called at interrupt level with the QP s_lock held.
1127 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
1128 */
1129static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
1130 u64 val, struct qib_ctxtdata *rcd)
1131{
1132 struct qib_ibport *ibp;
1133 enum ib_wc_status status;
1134 struct qib_swqe *wqe;
1135 int ret = 0;
1136 u32 ack_psn;
1137 int diff;
1138
1139 /* Remove QP from retry timer */
1140 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
1141 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
1142 del_timer(&qp->s_timer);
1143 }
1144
1145 /*
1146 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
1147 * requests and implicitly NAK RDMA read and atomic requests issued
1148 * before the NAK'ed request. The MSN won't include the NAK'ed
1149 * request but will include an ACK'ed request(s).
1150 */
1151 ack_psn = psn;
1152 if (aeth >> 29)
1153 ack_psn--;
1154 wqe = get_swqe_ptr(qp, qp->s_acked);
1155 ibp = to_iport(qp->ibqp.device, qp->port_num);
1156
1157 /*
1158 * The MSN might be for a later WQE than the PSN indicates so
1159 * only complete WQEs that the PSN finishes.
1160 */
1161 while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
1162 /*
1163 * RDMA_READ_RESPONSE_ONLY is a special case since
1164 * we want to generate completion events for everything
1165 * before the RDMA read, copy the data, then generate
1166 * the completion for the read.
1167 */
1168 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
1169 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
1170 diff == 0) {
1171 ret = 1;
1172 goto bail;
1173 }
1174 /*
1175 * If this request is a RDMA read or atomic, and the ACK is
1176 * for a later operation, this ACK NAKs the RDMA read or
1177 * atomic. In other words, only a RDMA_READ_LAST or ONLY
1178 * can ACK a RDMA read and likewise for atomic ops. Note
1179 * that the NAK case can only happen if relaxed ordering is
1180 * used and requests are sent after an RDMA read or atomic
1181 * is sent but before the response is received.
1182 */
1183 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
1184 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
1185 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1186 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
1187 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
1188 /* Retry this request. */
1189 if (!(qp->r_flags & QIB_R_RDMAR_SEQ)) {
1190 qp->r_flags |= QIB_R_RDMAR_SEQ;
1191 qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1192 if (list_empty(&qp->rspwait)) {
1193 qp->r_flags |= QIB_R_RSP_SEND;
1194 atomic_inc(&qp->refcount);
1195 list_add_tail(&qp->rspwait,
1196 &rcd->qp_wait_list);
1197 }
1198 }
1199 /*
1200 * No need to process the ACK/NAK since we are
1201 * restarting an earlier request.
1202 */
1203 goto bail;
1204 }
1205 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1206 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1207 u64 *vaddr = wqe->sg_list[0].vaddr;
1208 *vaddr = val;
1209 }
1210 if (qp->s_num_rd_atomic &&
1211 (wqe->wr.opcode == IB_WR_RDMA_READ ||
1212 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1213 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
1214 qp->s_num_rd_atomic--;
1215 /* Restart sending task if fence is complete */
1216 if ((qp->s_flags & QIB_S_WAIT_FENCE) &&
1217 !qp->s_num_rd_atomic) {
1218 qp->s_flags &= ~(QIB_S_WAIT_FENCE |
1219 QIB_S_WAIT_ACK);
1220 qib_schedule_send(qp);
1221 } else if (qp->s_flags & QIB_S_WAIT_RDMAR) {
1222 qp->s_flags &= ~(QIB_S_WAIT_RDMAR |
1223 QIB_S_WAIT_ACK);
1224 qib_schedule_send(qp);
1225 }
1226 }
1227 wqe = do_rc_completion(qp, wqe, ibp);
1228 if (qp->s_acked == qp->s_tail)
1229 break;
1230 }
1231
1232 switch (aeth >> 29) {
1233 case 0: /* ACK */
1234 ibp->n_rc_acks++;
1235 if (qp->s_acked != qp->s_tail) {
1236 /*
1237 * We are expecting more ACKs so
1238 * reset the retransmit timer.
1239 */
1240 start_timer(qp);
1241 /*
1242 * We can stop resending the earlier packets and
1243 * continue with the next packet the receiver wants.
1244 */
1245 if (qib_cmp24(qp->s_psn, psn) <= 0)
1246 reset_psn(qp, psn + 1);
1247 } else if (qib_cmp24(qp->s_psn, psn) <= 0) {
1248 qp->s_state = OP(SEND_LAST);
1249 qp->s_psn = psn + 1;
1250 }
1251 if (qp->s_flags & QIB_S_WAIT_ACK) {
1252 qp->s_flags &= ~QIB_S_WAIT_ACK;
1253 qib_schedule_send(qp);
1254 }
1255 qib_get_credit(qp, aeth);
1256 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1257 qp->s_retry = qp->s_retry_cnt;
1258 update_last_psn(qp, psn);
1259 ret = 1;
1260 goto bail;
1261
1262 case 1: /* RNR NAK */
1263 ibp->n_rnr_naks++;
1264 if (qp->s_acked == qp->s_tail)
1265 goto bail;
1266 if (qp->s_flags & QIB_S_WAIT_RNR)
1267 goto bail;
1268 if (qp->s_rnr_retry == 0) {
1269 status = IB_WC_RNR_RETRY_EXC_ERR;
1270 goto class_b;
1271 }
1272 if (qp->s_rnr_retry_cnt < 7)
1273 qp->s_rnr_retry--;
1274
1275 /* The last valid PSN is the previous PSN. */
1276 update_last_psn(qp, psn - 1);
1277
1278 ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
1279
1280 reset_psn(qp, psn);
1281
1282 qp->s_flags &= ~(QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_ACK);
1283 qp->s_flags |= QIB_S_WAIT_RNR;
1284 qp->s_timer.function = qib_rc_rnr_retry;
1285 qp->s_timer.expires = jiffies + usecs_to_jiffies(
1286 ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) &
1287 QIB_AETH_CREDIT_MASK]);
1288 add_timer(&qp->s_timer);
1289 goto bail;
1290
1291 case 3: /* NAK */
1292 if (qp->s_acked == qp->s_tail)
1293 goto bail;
1294 /* The last valid PSN is the previous PSN. */
1295 update_last_psn(qp, psn - 1);
1296 switch ((aeth >> QIB_AETH_CREDIT_SHIFT) &
1297 QIB_AETH_CREDIT_MASK) {
1298 case 0: /* PSN sequence error */
1299 ibp->n_seq_naks++;
1300 /*
1301 * Back up to the responder's expected PSN.
1302 * Note that we might get a NAK in the middle of an
1303 * RDMA READ response which terminates the RDMA
1304 * READ.
1305 */
1306 qib_restart_rc(qp, psn, 0);
1307 qib_schedule_send(qp);
1308 break;
1309
1310 case 1: /* Invalid Request */
1311 status = IB_WC_REM_INV_REQ_ERR;
1312 ibp->n_other_naks++;
1313 goto class_b;
1314
1315 case 2: /* Remote Access Error */
1316 status = IB_WC_REM_ACCESS_ERR;
1317 ibp->n_other_naks++;
1318 goto class_b;
1319
1320 case 3: /* Remote Operation Error */
1321 status = IB_WC_REM_OP_ERR;
1322 ibp->n_other_naks++;
1323class_b:
1324 if (qp->s_last == qp->s_acked) {
1325 qib_send_complete(qp, wqe, status);
1326 qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1327 }
1328 break;
1329
1330 default:
1331 /* Ignore other reserved NAK error codes */
1332 goto reserved;
1333 }
1334 qp->s_retry = qp->s_retry_cnt;
1335 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1336 goto bail;
1337
1338 default: /* 2: reserved */
1339reserved:
1340 /* Ignore reserved NAK codes. */
1341 goto bail;
1342 }
1343
1344bail:
1345 return ret;
1346}
1347
1348/*
1349 * We have seen an out of sequence RDMA read middle or last packet.
1350 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
1351 */
1352static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn,
1353 struct qib_ctxtdata *rcd)
1354{
1355 struct qib_swqe *wqe;
1356
1357 /* Remove QP from retry timer */
1358 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
1359 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
1360 del_timer(&qp->s_timer);
1361 }
1362
1363 wqe = get_swqe_ptr(qp, qp->s_acked);
1364
1365 while (qib_cmp24(psn, wqe->lpsn) > 0) {
1366 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1367 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1368 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1369 break;
1370 wqe = do_rc_completion(qp, wqe, ibp);
1371 }
1372
1373 ibp->n_rdma_seq++;
1374 qp->r_flags |= QIB_R_RDMAR_SEQ;
1375 qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1376 if (list_empty(&qp->rspwait)) {
1377 qp->r_flags |= QIB_R_RSP_SEND;
1378 atomic_inc(&qp->refcount);
1379 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1380 }
1381}
1382
1383/**
1384 * qib_rc_rcv_resp - process an incoming RC response packet
1385 * @ibp: the port this packet came in on
1386 * @ohdr: the other headers for this packet
1387 * @data: the packet data
1388 * @tlen: the packet length
1389 * @qp: the QP for this packet
1390 * @opcode: the opcode for this packet
1391 * @psn: the packet sequence number for this packet
1392 * @hdrsize: the header length
1393 * @pmtu: the path MTU
1394 *
1395 * This is called from qib_rc_rcv() to process an incoming RC response
1396 * packet for the given QP.
1397 * Called at interrupt level.
1398 */
1399static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1400 struct qib_other_headers *ohdr,
1401 void *data, u32 tlen,
1402 struct qib_qp *qp,
1403 u32 opcode,
1404 u32 psn, u32 hdrsize, u32 pmtu,
1405 struct qib_ctxtdata *rcd)
1406{
1407 struct qib_swqe *wqe;
1408 enum ib_wc_status status;
1409 unsigned long flags;
1410 int diff;
1411 u32 pad;
1412 u32 aeth;
1413 u64 val;
1414
1415 spin_lock_irqsave(&qp->s_lock, flags);
1416
1417 /* Double check we can process this now that we hold the s_lock. */
1418 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
1419 goto ack_done;
1420
1421 /* Ignore invalid responses. */
1422 if (qib_cmp24(psn, qp->s_next_psn) >= 0)
1423 goto ack_done;
1424
1425 /* Ignore duplicate responses. */
1426 diff = qib_cmp24(psn, qp->s_last_psn);
1427 if (unlikely(diff <= 0)) {
1428 /* Update credits for "ghost" ACKs */
1429 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1430 aeth = be32_to_cpu(ohdr->u.aeth);
1431 if ((aeth >> 29) == 0)
1432 qib_get_credit(qp, aeth);
1433 }
1434 goto ack_done;
1435 }
1436
1437 /*
1438 * Skip everything other than the PSN we expect, if we are waiting
1439 * for a reply to a restarted RDMA read or atomic op.
1440 */
1441 if (qp->r_flags & QIB_R_RDMAR_SEQ) {
1442 if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
1443 goto ack_done;
1444 qp->r_flags &= ~QIB_R_RDMAR_SEQ;
1445 }
1446
1447 if (unlikely(qp->s_acked == qp->s_tail))
1448 goto ack_done;
1449 wqe = get_swqe_ptr(qp, qp->s_acked);
1450 status = IB_WC_SUCCESS;
1451
1452 switch (opcode) {
1453 case OP(ACKNOWLEDGE):
1454 case OP(ATOMIC_ACKNOWLEDGE):
1455 case OP(RDMA_READ_RESPONSE_FIRST):
1456 aeth = be32_to_cpu(ohdr->u.aeth);
1457 if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
1458 __be32 *p = ohdr->u.at.atomic_ack_eth;
1459
1460 val = ((u64) be32_to_cpu(p[0]) << 32) |
1461 be32_to_cpu(p[1]);
1462 } else
1463 val = 0;
1464 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
1465 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1466 goto ack_done;
1467 hdrsize += 4;
1468 wqe = get_swqe_ptr(qp, qp->s_acked);
1469 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1470 goto ack_op_err;
1471 /*
1472 * If this is a response to a resent RDMA read, we
1473 * have to be careful to copy the data to the right
1474 * location.
1475 */
1476 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1477 wqe, psn, pmtu);
1478 goto read_middle;
1479
1480 case OP(RDMA_READ_RESPONSE_MIDDLE):
1481 /* no AETH, no ACK */
1482 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1483 goto ack_seq_err;
1484 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1485 goto ack_op_err;
1486read_middle:
1487 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1488 goto ack_len_err;
1489 if (unlikely(pmtu >= qp->s_rdma_read_len))
1490 goto ack_len_err;
1491
1492 /*
1493 * We got a response so update the timeout.
1494 * 4.096 usec. * (1 << qp->timeout)
1495 */
1496 qp->s_flags |= QIB_S_TIMER;
1497 mod_timer(&qp->s_timer, jiffies +
1498 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1499 1000UL));
1500 if (qp->s_flags & QIB_S_WAIT_ACK) {
1501 qp->s_flags &= ~QIB_S_WAIT_ACK;
1502 qib_schedule_send(qp);
1503 }
1504
1505 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
1506 qp->s_retry = qp->s_retry_cnt;
1507
1508 /*
1509 * Update the RDMA receive state but do the copy w/o
1510 * holding the locks and blocking interrupts.
1511 */
1512 qp->s_rdma_read_len -= pmtu;
1513 update_last_psn(qp, psn);
1514 spin_unlock_irqrestore(&qp->s_lock, flags);
1515 qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
1516 goto bail;
1517
1518 case OP(RDMA_READ_RESPONSE_ONLY):
1519 aeth = be32_to_cpu(ohdr->u.aeth);
1520 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
1521 goto ack_done;
1522 /* Get the number of bytes the message was padded by. */
1523 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1524 /*
1525 * Check that the data size is >= 0 && <= pmtu.
1526 * Remember to account for the AETH header (4) and
1527 * ICRC (4).
1528 */
1529 if (unlikely(tlen < (hdrsize + pad + 8)))
1530 goto ack_len_err;
1531 /*
1532 * If this is a response to a resent RDMA read, we
1533 * have to be careful to copy the data to the right
1534 * location.
1535 */
1536 wqe = get_swqe_ptr(qp, qp->s_acked);
1537 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1538 wqe, psn, pmtu);
1539 goto read_last;
1540
1541 case OP(RDMA_READ_RESPONSE_LAST):
1542 /* ACKs READ req. */
1543 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1544 goto ack_seq_err;
1545 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1546 goto ack_op_err;
1547 /* Get the number of bytes the message was padded by. */
1548 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1549 /*
1550 * Check that the data size is >= 1 && <= pmtu.
1551 * Remember to account for the AETH header (4) and
1552 * ICRC (4).
1553 */
1554 if (unlikely(tlen <= (hdrsize + pad + 8)))
1555 goto ack_len_err;
1556read_last:
1557 tlen -= hdrsize + pad + 8;
1558 if (unlikely(tlen != qp->s_rdma_read_len))
1559 goto ack_len_err;
1560 aeth = be32_to_cpu(ohdr->u.aeth);
1561 qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
1562 WARN_ON(qp->s_rdma_read_sge.num_sge);
1563 (void) do_rc_ack(qp, aeth, psn,
1564 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
1565 goto ack_done;
1566 }
1567
1568ack_op_err:
1569 status = IB_WC_LOC_QP_OP_ERR;
1570 goto ack_err;
1571
1572ack_seq_err:
1573 rdma_seq_err(qp, ibp, psn, rcd);
1574 goto ack_done;
1575
1576ack_len_err:
1577 status = IB_WC_LOC_LEN_ERR;
1578ack_err:
1579 if (qp->s_last == qp->s_acked) {
1580 qib_send_complete(qp, wqe, status);
1581 qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1582 }
1583ack_done:
1584 spin_unlock_irqrestore(&qp->s_lock, flags);
1585bail:
1586 return;
1587}
1588
1589/**
1590 * qib_rc_rcv_error - process an incoming duplicate or error RC packet
1591 * @ohdr: the other headers for this packet
1592 * @data: the packet data
1593 * @qp: the QP for this packet
1594 * @opcode: the opcode for this packet
1595 * @psn: the packet sequence number for this packet
1596 * @diff: the difference between the PSN and the expected PSN
1597 *
1598 * This is called from qib_rc_rcv() to process an unexpected
1599 * incoming RC packet for the given QP.
1600 * Called at interrupt level.
1601 * Return 1 if no more processing is needed; otherwise return 0 to
1602 * schedule a response to be sent.
1603 */
1604static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
1605 void *data,
1606 struct qib_qp *qp,
1607 u32 opcode,
1608 u32 psn,
1609 int diff,
1610 struct qib_ctxtdata *rcd)
1611{
1612 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1613 struct qib_ack_entry *e;
1614 unsigned long flags;
1615 u8 i, prev;
1616 int old_req;
1617
1618 if (diff > 0) {
1619 /*
1620 * Packet sequence error.
1621 * A NAK will ACK earlier sends and RDMA writes.
1622 * Don't queue the NAK if we already sent one.
1623 */
1624 if (!qp->r_nak_state) {
1625 ibp->n_rc_seqnak++;
1626 qp->r_nak_state = IB_NAK_PSN_ERROR;
1627 /* Use the expected PSN. */
1628 qp->r_ack_psn = qp->r_psn;
1629 /*
1630 * Wait to send the sequence NAK until all packets
1631 * in the receive queue have been processed.
1632 * Otherwise, we end up propagating congestion.
1633 */
1634 if (list_empty(&qp->rspwait)) {
1635 qp->r_flags |= QIB_R_RSP_NAK;
1636 atomic_inc(&qp->refcount);
1637 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1638 }
1639 }
1640 goto done;
1641 }
1642
1643 /*
1644 * Handle a duplicate request. Don't re-execute SEND, RDMA
1645 * write or atomic op. Don't NAK errors, just silently drop
1646 * the duplicate request. Note that r_sge, r_len, and
1647 * r_rcv_len may be in use so don't modify them.
1648 *
1649 * We are supposed to ACK the earliest duplicate PSN but we
1650 * can coalesce an outstanding duplicate ACK. We have to
1651 * send the earliest so that RDMA reads can be restarted at
1652 * the requester's expected PSN.
1653 *
1654 * First, find where this duplicate PSN falls within the
1655 * ACKs previously sent.
1656 * old_req is true if there is an older response that is scheduled
1657 * to be sent before sending this one.
1658 */
1659 e = NULL;
1660 old_req = 1;
1661 ibp->n_rc_dupreq++;
1662
1663 spin_lock_irqsave(&qp->s_lock, flags);
1664 /* Double check we can process this now that we hold the s_lock. */
1665 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
1666 goto unlock_done;
1667
1668 for (i = qp->r_head_ack_queue; ; i = prev) {
1669 if (i == qp->s_tail_ack_queue)
1670 old_req = 0;
1671 if (i)
1672 prev = i - 1;
1673 else
1674 prev = QIB_MAX_RDMA_ATOMIC;
1675 if (prev == qp->r_head_ack_queue) {
1676 e = NULL;
1677 break;
1678 }
1679 e = &qp->s_ack_queue[prev];
1680 if (!e->opcode) {
1681 e = NULL;
1682 break;
1683 }
1684 if (qib_cmp24(psn, e->psn) >= 0) {
1685 if (prev == qp->s_tail_ack_queue &&
1686 qib_cmp24(psn, e->lpsn) <= 0)
1687 old_req = 0;
1688 break;
1689 }
1690 }
1691 switch (opcode) {
1692 case OP(RDMA_READ_REQUEST): {
1693 struct ib_reth *reth;
1694 u32 offset;
1695 u32 len;
1696
1697 /*
1698 * If we didn't find the RDMA read request in the ack queue,
1699 * we can ignore this request.
1700 */
1701 if (!e || e->opcode != OP(RDMA_READ_REQUEST))
1702 goto unlock_done;
1703 /* RETH comes after BTH */
1704 reth = &ohdr->u.rc.reth;
1705 /*
1706 * Address range must be a subset of the original
1707 * request and start on pmtu boundaries.
1708 * We reuse the old ack_queue slot since the requester
1709 * should not back up and request an earlier PSN for the
1710 * same request.
1711 */
1712 offset = ((psn - e->psn) & QIB_PSN_MASK) *
1713 ib_mtu_enum_to_int(qp->path_mtu);
1714 len = be32_to_cpu(reth->length);
1715 if (unlikely(offset + len != e->rdma_sge.sge_length))
1716 goto unlock_done;
1717 if (e->rdma_sge.mr) {
1718 atomic_dec(&e->rdma_sge.mr->refcount);
1719 e->rdma_sge.mr = NULL;
1720 }
1721 if (len != 0) {
1722 u32 rkey = be32_to_cpu(reth->rkey);
1723 u64 vaddr = be64_to_cpu(reth->vaddr);
1724 int ok;
1725
1726 ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
1727 IB_ACCESS_REMOTE_READ);
1728 if (unlikely(!ok))
1729 goto unlock_done;
1730 } else {
1731 e->rdma_sge.vaddr = NULL;
1732 e->rdma_sge.length = 0;
1733 e->rdma_sge.sge_length = 0;
1734 }
1735 e->psn = psn;
1736 if (old_req)
1737 goto unlock_done;
1738 qp->s_tail_ack_queue = prev;
1739 break;
1740 }
1741
1742 case OP(COMPARE_SWAP):
1743 case OP(FETCH_ADD): {
1744 /*
1745 * If we didn't find the atomic request in the ack queue
1746 * or the send tasklet is already backed up to send an
1747 * earlier entry, we can ignore this request.
1748 */
1749 if (!e || e->opcode != (u8) opcode || old_req)
1750 goto unlock_done;
1751 qp->s_tail_ack_queue = prev;
1752 break;
1753 }
1754
1755 default:
1756 /*
1757 * Ignore this operation if it doesn't request an ACK
1758 * or an earlier RDMA read or atomic is going to be resent.
1759 */
1760 if (!(psn & IB_BTH_REQ_ACK) || old_req)
1761 goto unlock_done;
1762 /*
1763 * Resend the most recent ACK if this request is
1764 * after all the previous RDMA reads and atomics.
1765 */
1766 if (i == qp->r_head_ack_queue) {
1767 spin_unlock_irqrestore(&qp->s_lock, flags);
1768 qp->r_nak_state = 0;
1769 qp->r_ack_psn = qp->r_psn - 1;
1770 goto send_ack;
1771 }
1772 /*
1773 * Try to send a simple ACK to work around a Mellanox bug
1774 * which doesn't accept a RDMA read response or atomic
1775 * response as an ACK for earlier SENDs or RDMA writes.
1776 */
1777 if (!(qp->s_flags & QIB_S_RESP_PENDING)) {
1778 spin_unlock_irqrestore(&qp->s_lock, flags);
1779 qp->r_nak_state = 0;
1780 qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
1781 goto send_ack;
1782 }
1783 /*
1784 * Resend the RDMA read or atomic op which
1785 * ACKs this duplicate request.
1786 */
1787 qp->s_tail_ack_queue = i;
1788 break;
1789 }
1790 qp->s_ack_state = OP(ACKNOWLEDGE);
1791 qp->s_flags |= QIB_S_RESP_PENDING;
1792 qp->r_nak_state = 0;
1793 qib_schedule_send(qp);
1794
1795unlock_done:
1796 spin_unlock_irqrestore(&qp->s_lock, flags);
1797done:
1798 return 1;
1799
1800send_ack:
1801 return 0;
1802}
1803
1804void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err)
1805{
1806 unsigned long flags;
1807 int lastwqe;
1808
1809 spin_lock_irqsave(&qp->s_lock, flags);
1810 lastwqe = qib_error_qp(qp, err);
1811 spin_unlock_irqrestore(&qp->s_lock, flags);
1812
1813 if (lastwqe) {
1814 struct ib_event ev;
1815
1816 ev.device = qp->ibqp.device;
1817 ev.element.qp = &qp->ibqp;
1818 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1819 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1820 }
1821}
1822
1823static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n)
1824{
1825 unsigned next;
1826
1827 next = n + 1;
1828 if (next > QIB_MAX_RDMA_ATOMIC)
1829 next = 0;
1830 qp->s_tail_ack_queue = next;
1831 qp->s_ack_state = OP(ACKNOWLEDGE);
1832}
1833
1834/**
1835 * qib_rc_rcv - process an incoming RC packet
1836 * @rcd: the context pointer
1837 * @hdr: the header of this packet
1838 * @has_grh: true if the header has a GRH
1839 * @data: the packet data
1840 * @tlen: the packet length
1841 * @qp: the QP for this packet
1842 *
1843 * This is called from qib_qp_rcv() to process an incoming RC packet
1844 * for the given QP.
1845 * Called at interrupt level.
1846 */
1847void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
1848 int has_grh, void *data, u32 tlen, struct qib_qp *qp)
1849{
1850 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
1851 struct qib_other_headers *ohdr;
1852 u32 opcode;
1853 u32 hdrsize;
1854 u32 psn;
1855 u32 pad;
1856 struct ib_wc wc;
1857 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
1858 int diff;
1859 struct ib_reth *reth;
1860 unsigned long flags;
1861 int ret;
1862
1863 /* Check for GRH */
1864 if (!has_grh) {
1865 ohdr = &hdr->u.oth;
1866 hdrsize = 8 + 12; /* LRH + BTH */
1867 } else {
1868 ohdr = &hdr->u.l.oth;
1869 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
1870 }
1871
1872 opcode = be32_to_cpu(ohdr->bth[0]);
1873 spin_lock_irqsave(&qp->s_lock, flags);
1874 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
1875 goto sunlock;
1876 spin_unlock_irqrestore(&qp->s_lock, flags);
1877
1878 psn = be32_to_cpu(ohdr->bth[2]);
1879 opcode >>= 24;
1880
1881 /* Prevent simultaneous processing after APM on different CPUs */
1882 spin_lock(&qp->r_lock);
1883
1884 /*
1885 * Process responses (ACKs) before anything else. Note that the
1886 * packet sequence number will be for something in the send work
1887 * queue rather than the expected receive packet sequence number.
1888 * In other words, this QP is the requester.
1889 */
1890 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1891 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1892 qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
1893 hdrsize, pmtu, rcd);
1894 goto runlock;
1895 }
1896
1897 /* Compute 24 bits worth of difference. */
1898 diff = qib_cmp24(psn, qp->r_psn);
1899 if (unlikely(diff)) {
1900 if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
1901 goto runlock;
1902 goto send_ack;
1903 }
1904
1905 /* Check for opcode sequence errors. */
1906 switch (qp->r_state) {
1907 case OP(SEND_FIRST):
1908 case OP(SEND_MIDDLE):
1909 if (opcode == OP(SEND_MIDDLE) ||
1910 opcode == OP(SEND_LAST) ||
1911 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1912 break;
1913 goto nack_inv;
1914
1915 case OP(RDMA_WRITE_FIRST):
1916 case OP(RDMA_WRITE_MIDDLE):
1917 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1918 opcode == OP(RDMA_WRITE_LAST) ||
1919 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1920 break;
1921 goto nack_inv;
1922
1923 default:
1924 if (opcode == OP(SEND_MIDDLE) ||
1925 opcode == OP(SEND_LAST) ||
1926 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1927 opcode == OP(RDMA_WRITE_MIDDLE) ||
1928 opcode == OP(RDMA_WRITE_LAST) ||
1929 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1930 goto nack_inv;
1931 /*
1932 * Note that it is up to the requester to not send a new
1933 * RDMA read or atomic operation before receiving an ACK
1934 * for the previous operation.
1935 */
1936 break;
1937 }
1938
1939 memset(&wc, 0, sizeof wc);
1940
1941 if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
1942 qp->r_flags |= QIB_R_COMM_EST;
1943 if (qp->ibqp.event_handler) {
1944 struct ib_event ev;
1945
1946 ev.device = qp->ibqp.device;
1947 ev.element.qp = &qp->ibqp;
1948 ev.event = IB_EVENT_COMM_EST;
1949 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1950 }
1951 }
1952
1953 /* OK, process the packet. */
1954 switch (opcode) {
1955 case OP(SEND_FIRST):
1956 ret = qib_get_rwqe(qp, 0);
1957 if (ret < 0)
1958 goto nack_op_err;
1959 if (!ret)
1960 goto rnr_nak;
1961 qp->r_rcv_len = 0;
1962 /* FALLTHROUGH */
1963 case OP(SEND_MIDDLE):
1964 case OP(RDMA_WRITE_MIDDLE):
1965send_middle:
1966 /* Check for invalid length PMTU or posted rwqe len. */
1967 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1968 goto nack_inv;
1969 qp->r_rcv_len += pmtu;
1970 if (unlikely(qp->r_rcv_len > qp->r_len))
1971 goto nack_inv;
1972 qib_copy_sge(&qp->r_sge, data, pmtu, 1);
1973 break;
1974
1975 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1976 /* consume RWQE */
1977 ret = qib_get_rwqe(qp, 1);
1978 if (ret < 0)
1979 goto nack_op_err;
1980 if (!ret)
1981 goto rnr_nak;
1982 goto send_last_imm;
1983
1984 case OP(SEND_ONLY):
1985 case OP(SEND_ONLY_WITH_IMMEDIATE):
1986 ret = qib_get_rwqe(qp, 0);
1987 if (ret < 0)
1988 goto nack_op_err;
1989 if (!ret)
1990 goto rnr_nak;
1991 qp->r_rcv_len = 0;
1992 if (opcode == OP(SEND_ONLY))
1993 goto send_last;
1994 /* FALLTHROUGH */
1995 case OP(SEND_LAST_WITH_IMMEDIATE):
1996send_last_imm:
1997 wc.ex.imm_data = ohdr->u.imm_data;
1998 hdrsize += 4;
1999 wc.wc_flags = IB_WC_WITH_IMM;
2000 /* FALLTHROUGH */
2001 case OP(SEND_LAST):
2002 case OP(RDMA_WRITE_LAST):
2003send_last:
2004 /* Get the number of bytes the message was padded by. */
2005 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
2006 /* Check for invalid length. */
2007 /* XXX LAST len should be >= 1 */
2008 if (unlikely(tlen < (hdrsize + pad + 4)))
2009 goto nack_inv;
2010 /* Don't count the CRC. */
2011 tlen -= (hdrsize + pad + 4);
2012 wc.byte_len = tlen + qp->r_rcv_len;
2013 if (unlikely(wc.byte_len > qp->r_len))
2014 goto nack_inv;
2015 qib_copy_sge(&qp->r_sge, data, tlen, 1);
2016 while (qp->r_sge.num_sge) {
2017 atomic_dec(&qp->r_sge.sge.mr->refcount);
2018 if (--qp->r_sge.num_sge)
2019 qp->r_sge.sge = *qp->r_sge.sg_list++;
2020 }
2021 qp->r_msn++;
2022 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
2023 break;
2024 wc.wr_id = qp->r_wr_id;
2025 wc.status = IB_WC_SUCCESS;
2026 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
2027 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
2028 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
2029 else
2030 wc.opcode = IB_WC_RECV;
2031 wc.qp = &qp->ibqp;
2032 wc.src_qp = qp->remote_qpn;
2033 wc.slid = qp->remote_ah_attr.dlid;
2034 wc.sl = qp->remote_ah_attr.sl;
2035 /* Signal completion event if the solicited bit is set. */
2036 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
2037 (ohdr->bth[0] &
2038 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
2039 break;
2040
2041 case OP(RDMA_WRITE_FIRST):
2042 case OP(RDMA_WRITE_ONLY):
2043 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
2044 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
2045 goto nack_inv;
2046 /* consume RWQE */
2047 reth = &ohdr->u.rc.reth;
2048 hdrsize += sizeof(*reth);
2049 qp->r_len = be32_to_cpu(reth->length);
2050 qp->r_rcv_len = 0;
2051 qp->r_sge.sg_list = NULL;
2052 if (qp->r_len != 0) {
2053 u32 rkey = be32_to_cpu(reth->rkey);
2054 u64 vaddr = be64_to_cpu(reth->vaddr);
2055 int ok;
2056
2057 /* Check rkey & NAK */
2058 ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
2059 rkey, IB_ACCESS_REMOTE_WRITE);
2060 if (unlikely(!ok))
2061 goto nack_acc;
2062 qp->r_sge.num_sge = 1;
2063 } else {
2064 qp->r_sge.num_sge = 0;
2065 qp->r_sge.sge.mr = NULL;
2066 qp->r_sge.sge.vaddr = NULL;
2067 qp->r_sge.sge.length = 0;
2068 qp->r_sge.sge.sge_length = 0;
2069 }
2070 if (opcode == OP(RDMA_WRITE_FIRST))
2071 goto send_middle;
2072 else if (opcode == OP(RDMA_WRITE_ONLY))
2073 goto send_last;
2074 ret = qib_get_rwqe(qp, 1);
2075 if (ret < 0)
2076 goto nack_op_err;
2077 if (!ret)
2078 goto rnr_nak;
2079 goto send_last_imm;
2080
2081 case OP(RDMA_READ_REQUEST): {
2082 struct qib_ack_entry *e;
2083 u32 len;
2084 u8 next;
2085
2086 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
2087 goto nack_inv;
2088 next = qp->r_head_ack_queue + 1;
2089 /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */
2090 if (next > QIB_MAX_RDMA_ATOMIC)
2091 next = 0;
2092 spin_lock_irqsave(&qp->s_lock, flags);
2093 /* Double check we can process this while holding the s_lock. */
2094 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
2095 goto srunlock;
2096 if (unlikely(next == qp->s_tail_ack_queue)) {
2097 if (!qp->s_ack_queue[next].sent)
2098 goto nack_inv_unlck;
2099 qib_update_ack_queue(qp, next);
2100 }
2101 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2102 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2103 atomic_dec(&e->rdma_sge.mr->refcount);
2104 e->rdma_sge.mr = NULL;
2105 }
2106 reth = &ohdr->u.rc.reth;
2107 len = be32_to_cpu(reth->length);
2108 if (len) {
2109 u32 rkey = be32_to_cpu(reth->rkey);
2110 u64 vaddr = be64_to_cpu(reth->vaddr);
2111 int ok;
2112
2113 /* Check rkey & NAK */
2114 ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr,
2115 rkey, IB_ACCESS_REMOTE_READ);
2116 if (unlikely(!ok))
2117 goto nack_acc_unlck;
2118 /*
2119 * Update the next expected PSN. We add 1 later
2120 * below, so only add the remainder here.
2121 */
2122 if (len > pmtu)
2123 qp->r_psn += (len - 1) / pmtu;
2124 } else {
2125 e->rdma_sge.mr = NULL;
2126 e->rdma_sge.vaddr = NULL;
2127 e->rdma_sge.length = 0;
2128 e->rdma_sge.sge_length = 0;
2129 }
2130 e->opcode = opcode;
2131 e->sent = 0;
2132 e->psn = psn;
2133 e->lpsn = qp->r_psn;
2134 /*
2135 * We need to increment the MSN here instead of when we
2136 * finish sending the result since a duplicate request would
2137 * increment it more than once.
2138 */
2139 qp->r_msn++;
2140 qp->r_psn++;
2141 qp->r_state = opcode;
2142 qp->r_nak_state = 0;
2143 qp->r_head_ack_queue = next;
2144
2145 /* Schedule the send tasklet. */
2146 qp->s_flags |= QIB_S_RESP_PENDING;
2147 qib_schedule_send(qp);
2148
2149 goto srunlock;
2150 }
2151
2152 case OP(COMPARE_SWAP):
2153 case OP(FETCH_ADD): {
2154 struct ib_atomic_eth *ateth;
2155 struct qib_ack_entry *e;
2156 u64 vaddr;
2157 atomic64_t *maddr;
2158 u64 sdata;
2159 u32 rkey;
2160 u8 next;
2161
2162 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
2163 goto nack_inv;
2164 next = qp->r_head_ack_queue + 1;
2165 if (next > QIB_MAX_RDMA_ATOMIC)
2166 next = 0;
2167 spin_lock_irqsave(&qp->s_lock, flags);
2168 /* Double check we can process this while holding the s_lock. */
2169 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
2170 goto srunlock;
2171 if (unlikely(next == qp->s_tail_ack_queue)) {
2172 if (!qp->s_ack_queue[next].sent)
2173 goto nack_inv_unlck;
2174 qib_update_ack_queue(qp, next);
2175 }
2176 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2177 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2178 atomic_dec(&e->rdma_sge.mr->refcount);
2179 e->rdma_sge.mr = NULL;
2180 }
2181 ateth = &ohdr->u.atomic_eth;
2182 vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
2183 be32_to_cpu(ateth->vaddr[1]);
2184 if (unlikely(vaddr & (sizeof(u64) - 1)))
2185 goto nack_inv_unlck;
2186 rkey = be32_to_cpu(ateth->rkey);
2187 /* Check rkey & NAK */
2188 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
2189 vaddr, rkey,
2190 IB_ACCESS_REMOTE_ATOMIC)))
2191 goto nack_acc_unlck;
2192 /* Perform atomic OP and save result. */
2193 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
2194 sdata = be64_to_cpu(ateth->swap_data);
2195 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
2196 (u64) atomic64_add_return(sdata, maddr) - sdata :
2197 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
2198 be64_to_cpu(ateth->compare_data),
2199 sdata);
2200 atomic_dec(&qp->r_sge.sge.mr->refcount);
2201 qp->r_sge.num_sge = 0;
2202 e->opcode = opcode;
2203 e->sent = 0;
2204 e->psn = psn;
2205 e->lpsn = psn;
2206 qp->r_msn++;
2207 qp->r_psn++;
2208 qp->r_state = opcode;
2209 qp->r_nak_state = 0;
2210 qp->r_head_ack_queue = next;
2211
2212 /* Schedule the send tasklet. */
2213 qp->s_flags |= QIB_S_RESP_PENDING;
2214 qib_schedule_send(qp);
2215
2216 goto srunlock;
2217 }
2218
2219 default:
2220 /* NAK unknown opcodes. */
2221 goto nack_inv;
2222 }
2223 qp->r_psn++;
2224 qp->r_state = opcode;
2225 qp->r_ack_psn = psn;
2226 qp->r_nak_state = 0;
2227 /* Send an ACK if requested or required. */
2228 if (psn & (1 << 31))
2229 goto send_ack;
2230 goto runlock;
2231
2232rnr_nak:
2233 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
2234 qp->r_ack_psn = qp->r_psn;
2235 /* Queue RNR NAK for later */
2236 if (list_empty(&qp->rspwait)) {
2237 qp->r_flags |= QIB_R_RSP_NAK;
2238 atomic_inc(&qp->refcount);
2239 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2240 }
2241 goto runlock;
2242
2243nack_op_err:
2244 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2245 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2246 qp->r_ack_psn = qp->r_psn;
2247 /* Queue NAK for later */
2248 if (list_empty(&qp->rspwait)) {
2249 qp->r_flags |= QIB_R_RSP_NAK;
2250 atomic_inc(&qp->refcount);
2251 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2252 }
2253 goto runlock;
2254
2255nack_inv_unlck:
2256 spin_unlock_irqrestore(&qp->s_lock, flags);
2257nack_inv:
2258 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2259 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
2260 qp->r_ack_psn = qp->r_psn;
2261 /* Queue NAK for later */
2262 if (list_empty(&qp->rspwait)) {
2263 qp->r_flags |= QIB_R_RSP_NAK;
2264 atomic_inc(&qp->refcount);
2265 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2266 }
2267 goto runlock;
2268
2269nack_acc_unlck:
2270 spin_unlock_irqrestore(&qp->s_lock, flags);
2271nack_acc:
2272 qib_rc_error(qp, IB_WC_LOC_PROT_ERR);
2273 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2274 qp->r_ack_psn = qp->r_psn;
2275send_ack:
2276 qib_send_rc_ack(qp);
2277runlock:
2278 spin_unlock(&qp->r_lock);
2279 return;
2280
2281srunlock:
2282 spin_unlock_irqrestore(&qp->s_lock, flags);
2283 spin_unlock(&qp->r_lock);
2284 return;
2285
2286sunlock:
2287 spin_unlock_irqrestore(&qp->s_lock, flags);
2288}
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
new file mode 100644
index 000000000000..eb78d9367f06
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -0,0 +1,817 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/spinlock.h>
35
36#include "qib.h"
37#include "qib_mad.h"
38
39/*
40 * Convert the AETH RNR timeout code into the number of microseconds.
41 */
42const u32 ib_qib_rnr_table[32] = {
43 655360, /* 00: 655.36 */
44 10, /* 01: .01 */
45 20, /* 02 .02 */
46 30, /* 03: .03 */
47 40, /* 04: .04 */
48 60, /* 05: .06 */
49 80, /* 06: .08 */
50 120, /* 07: .12 */
51 160, /* 08: .16 */
52 240, /* 09: .24 */
53 320, /* 0A: .32 */
54 480, /* 0B: .48 */
55 640, /* 0C: .64 */
56 960, /* 0D: .96 */
57 1280, /* 0E: 1.28 */
58 1920, /* 0F: 1.92 */
59 2560, /* 10: 2.56 */
60 3840, /* 11: 3.84 */
61 5120, /* 12: 5.12 */
62 7680, /* 13: 7.68 */
63 10240, /* 14: 10.24 */
64 15360, /* 15: 15.36 */
65 20480, /* 16: 20.48 */
66 30720, /* 17: 30.72 */
67 40960, /* 18: 40.96 */
68 61440, /* 19: 61.44 */
69 81920, /* 1A: 81.92 */
70 122880, /* 1B: 122.88 */
71 163840, /* 1C: 163.84 */
72 245760, /* 1D: 245.76 */
73 327680, /* 1E: 327.68 */
74 491520 /* 1F: 491.52 */
75};
76
77/*
78 * Validate a RWQE and fill in the SGE state.
79 * Return 1 if OK.
80 */
81static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe)
82{
83 int i, j, ret;
84 struct ib_wc wc;
85 struct qib_lkey_table *rkt;
86 struct qib_pd *pd;
87 struct qib_sge_state *ss;
88
89 rkt = &to_idev(qp->ibqp.device)->lk_table;
90 pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
91 ss = &qp->r_sge;
92 ss->sg_list = qp->r_sg_list;
93 qp->r_len = 0;
94 for (i = j = 0; i < wqe->num_sge; i++) {
95 if (wqe->sg_list[i].length == 0)
96 continue;
97 /* Check LKEY */
98 if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
99 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
100 goto bad_lkey;
101 qp->r_len += wqe->sg_list[i].length;
102 j++;
103 }
104 ss->num_sge = j;
105 ss->total_len = qp->r_len;
106 ret = 1;
107 goto bail;
108
109bad_lkey:
110 while (j) {
111 struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
112
113 atomic_dec(&sge->mr->refcount);
114 }
115 ss->num_sge = 0;
116 memset(&wc, 0, sizeof(wc));
117 wc.wr_id = wqe->wr_id;
118 wc.status = IB_WC_LOC_PROT_ERR;
119 wc.opcode = IB_WC_RECV;
120 wc.qp = &qp->ibqp;
121 /* Signal solicited completion event. */
122 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
123 ret = 0;
124bail:
125 return ret;
126}
127
128/**
129 * qib_get_rwqe - copy the next RWQE into the QP's RWQE
130 * @qp: the QP
131 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
132 *
133 * Return -1 if there is a local error, 0 if no RWQE is available,
134 * otherwise return 1.
135 *
136 * Can be called from interrupt level.
137 */
138int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
139{
140 unsigned long flags;
141 struct qib_rq *rq;
142 struct qib_rwq *wq;
143 struct qib_srq *srq;
144 struct qib_rwqe *wqe;
145 void (*handler)(struct ib_event *, void *);
146 u32 tail;
147 int ret;
148
149 if (qp->ibqp.srq) {
150 srq = to_isrq(qp->ibqp.srq);
151 handler = srq->ibsrq.event_handler;
152 rq = &srq->rq;
153 } else {
154 srq = NULL;
155 handler = NULL;
156 rq = &qp->r_rq;
157 }
158
159 spin_lock_irqsave(&rq->lock, flags);
160 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
161 ret = 0;
162 goto unlock;
163 }
164
165 wq = rq->wq;
166 tail = wq->tail;
167 /* Validate tail before using it since it is user writable. */
168 if (tail >= rq->size)
169 tail = 0;
170 if (unlikely(tail == wq->head)) {
171 ret = 0;
172 goto unlock;
173 }
174 /* Make sure entry is read after head index is read. */
175 smp_rmb();
176 wqe = get_rwqe_ptr(rq, tail);
177 /*
178 * Even though we update the tail index in memory, the verbs
179 * consumer is not supposed to post more entries until a
180 * completion is generated.
181 */
182 if (++tail >= rq->size)
183 tail = 0;
184 wq->tail = tail;
185 if (!wr_id_only && !qib_init_sge(qp, wqe)) {
186 ret = -1;
187 goto unlock;
188 }
189 qp->r_wr_id = wqe->wr_id;
190
191 ret = 1;
192 set_bit(QIB_R_WRID_VALID, &qp->r_aflags);
193 if (handler) {
194 u32 n;
195
196 /*
197 * Validate head pointer value and compute
198 * the number of remaining WQEs.
199 */
200 n = wq->head;
201 if (n >= rq->size)
202 n = 0;
203 if (n < tail)
204 n += rq->size - tail;
205 else
206 n -= tail;
207 if (n < srq->limit) {
208 struct ib_event ev;
209
210 srq->limit = 0;
211 spin_unlock_irqrestore(&rq->lock, flags);
212 ev.device = qp->ibqp.device;
213 ev.element.srq = qp->ibqp.srq;
214 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
215 handler(&ev, srq->ibsrq.srq_context);
216 goto bail;
217 }
218 }
219unlock:
220 spin_unlock_irqrestore(&rq->lock, flags);
221bail:
222 return ret;
223}
224
225/*
226 * Switch to alternate path.
227 * The QP s_lock should be held and interrupts disabled.
228 */
229void qib_migrate_qp(struct qib_qp *qp)
230{
231 struct ib_event ev;
232
233 qp->s_mig_state = IB_MIG_MIGRATED;
234 qp->remote_ah_attr = qp->alt_ah_attr;
235 qp->port_num = qp->alt_ah_attr.port_num;
236 qp->s_pkey_index = qp->s_alt_pkey_index;
237
238 ev.device = qp->ibqp.device;
239 ev.element.qp = &qp->ibqp;
240 ev.event = IB_EVENT_PATH_MIG;
241 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
242}
243
244static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
245{
246 if (!index) {
247 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
248
249 return ppd->guid;
250 } else
251 return ibp->guids[index - 1];
252}
253
254static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
255{
256 return (gid->global.interface_id == id &&
257 (gid->global.subnet_prefix == gid_prefix ||
258 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
259}
260
261/*
262 *
263 * This should be called with the QP s_lock held.
264 */
265int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
266 int has_grh, struct qib_qp *qp, u32 bth0)
267{
268 __be64 guid;
269
270 if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
271 if (!has_grh) {
272 if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
273 goto err;
274 } else {
275 if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
276 goto err;
277 guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
278 if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
279 goto err;
280 if (!gid_ok(&hdr->u.l.grh.sgid,
281 qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
282 qp->alt_ah_attr.grh.dgid.global.interface_id))
283 goto err;
284 }
285 if (!qib_pkey_ok((u16)bth0,
286 qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
287 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
288 (u16)bth0,
289 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
290 0, qp->ibqp.qp_num,
291 hdr->lrh[3], hdr->lrh[1]);
292 goto err;
293 }
294 /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
295 if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
296 ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
297 goto err;
298 qib_migrate_qp(qp);
299 } else {
300 if (!has_grh) {
301 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
302 goto err;
303 } else {
304 if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
305 goto err;
306 guid = get_sguid(ibp,
307 qp->remote_ah_attr.grh.sgid_index);
308 if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
309 goto err;
310 if (!gid_ok(&hdr->u.l.grh.sgid,
311 qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
312 qp->remote_ah_attr.grh.dgid.global.interface_id))
313 goto err;
314 }
315 if (!qib_pkey_ok((u16)bth0,
316 qib_get_pkey(ibp, qp->s_pkey_index))) {
317 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
318 (u16)bth0,
319 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
320 0, qp->ibqp.qp_num,
321 hdr->lrh[3], hdr->lrh[1]);
322 goto err;
323 }
324 /* Validate the SLID. See Ch. 9.6.1.5 */
325 if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
326 ppd_from_ibp(ibp)->port != qp->port_num)
327 goto err;
328 if (qp->s_mig_state == IB_MIG_REARM &&
329 !(bth0 & IB_BTH_MIG_REQ))
330 qp->s_mig_state = IB_MIG_ARMED;
331 }
332
333 return 0;
334
335err:
336 return 1;
337}
338
339/**
340 * qib_ruc_loopback - handle UC and RC lookback requests
341 * @sqp: the sending QP
342 *
343 * This is called from qib_do_send() to
344 * forward a WQE addressed to the same HCA.
345 * Note that although we are single threaded due to the tasklet, we still
346 * have to protect against post_send(). We don't have to worry about
347 * receive interrupts since this is a connected protocol and all packets
348 * will pass through here.
349 */
350static void qib_ruc_loopback(struct qib_qp *sqp)
351{
352 struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
353 struct qib_qp *qp;
354 struct qib_swqe *wqe;
355 struct qib_sge *sge;
356 unsigned long flags;
357 struct ib_wc wc;
358 u64 sdata;
359 atomic64_t *maddr;
360 enum ib_wc_status send_status;
361 int release;
362 int ret;
363
364 /*
365 * Note that we check the responder QP state after
366 * checking the requester's state.
367 */
368 qp = qib_lookup_qpn(ibp, sqp->remote_qpn);
369
370 spin_lock_irqsave(&sqp->s_lock, flags);
371
372 /* Return if we are already busy processing a work request. */
373 if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) ||
374 !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND))
375 goto unlock;
376
377 sqp->s_flags |= QIB_S_BUSY;
378
379again:
380 if (sqp->s_last == sqp->s_head)
381 goto clr_busy;
382 wqe = get_swqe_ptr(sqp, sqp->s_last);
383
384 /* Return if it is not OK to start a new work reqeust. */
385 if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
386 if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND))
387 goto clr_busy;
388 /* We are in the error state, flush the work request. */
389 send_status = IB_WC_WR_FLUSH_ERR;
390 goto flush_send;
391 }
392
393 /*
394 * We can rely on the entry not changing without the s_lock
395 * being held until we update s_last.
396 * We increment s_cur to indicate s_last is in progress.
397 */
398 if (sqp->s_last == sqp->s_cur) {
399 if (++sqp->s_cur >= sqp->s_size)
400 sqp->s_cur = 0;
401 }
402 spin_unlock_irqrestore(&sqp->s_lock, flags);
403
404 if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) ||
405 qp->ibqp.qp_type != sqp->ibqp.qp_type) {
406 ibp->n_pkt_drops++;
407 /*
408 * For RC, the requester would timeout and retry so
409 * shortcut the timeouts and just signal too many retries.
410 */
411 if (sqp->ibqp.qp_type == IB_QPT_RC)
412 send_status = IB_WC_RETRY_EXC_ERR;
413 else
414 send_status = IB_WC_SUCCESS;
415 goto serr;
416 }
417
418 memset(&wc, 0, sizeof wc);
419 send_status = IB_WC_SUCCESS;
420
421 release = 1;
422 sqp->s_sge.sge = wqe->sg_list[0];
423 sqp->s_sge.sg_list = wqe->sg_list + 1;
424 sqp->s_sge.num_sge = wqe->wr.num_sge;
425 sqp->s_len = wqe->length;
426 switch (wqe->wr.opcode) {
427 case IB_WR_SEND_WITH_IMM:
428 wc.wc_flags = IB_WC_WITH_IMM;
429 wc.ex.imm_data = wqe->wr.ex.imm_data;
430 /* FALLTHROUGH */
431 case IB_WR_SEND:
432 ret = qib_get_rwqe(qp, 0);
433 if (ret < 0)
434 goto op_err;
435 if (!ret)
436 goto rnr_nak;
437 break;
438
439 case IB_WR_RDMA_WRITE_WITH_IMM:
440 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
441 goto inv_err;
442 wc.wc_flags = IB_WC_WITH_IMM;
443 wc.ex.imm_data = wqe->wr.ex.imm_data;
444 ret = qib_get_rwqe(qp, 1);
445 if (ret < 0)
446 goto op_err;
447 if (!ret)
448 goto rnr_nak;
449 /* FALLTHROUGH */
450 case IB_WR_RDMA_WRITE:
451 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
452 goto inv_err;
453 if (wqe->length == 0)
454 break;
455 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
456 wqe->wr.wr.rdma.remote_addr,
457 wqe->wr.wr.rdma.rkey,
458 IB_ACCESS_REMOTE_WRITE)))
459 goto acc_err;
460 qp->r_sge.sg_list = NULL;
461 qp->r_sge.num_sge = 1;
462 qp->r_sge.total_len = wqe->length;
463 break;
464
465 case IB_WR_RDMA_READ:
466 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
467 goto inv_err;
468 if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
469 wqe->wr.wr.rdma.remote_addr,
470 wqe->wr.wr.rdma.rkey,
471 IB_ACCESS_REMOTE_READ)))
472 goto acc_err;
473 release = 0;
474 sqp->s_sge.sg_list = NULL;
475 sqp->s_sge.num_sge = 1;
476 qp->r_sge.sge = wqe->sg_list[0];
477 qp->r_sge.sg_list = wqe->sg_list + 1;
478 qp->r_sge.num_sge = wqe->wr.num_sge;
479 qp->r_sge.total_len = wqe->length;
480 break;
481
482 case IB_WR_ATOMIC_CMP_AND_SWP:
483 case IB_WR_ATOMIC_FETCH_AND_ADD:
484 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
485 goto inv_err;
486 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
487 wqe->wr.wr.atomic.remote_addr,
488 wqe->wr.wr.atomic.rkey,
489 IB_ACCESS_REMOTE_ATOMIC)))
490 goto acc_err;
491 /* Perform atomic OP and save result. */
492 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
493 sdata = wqe->wr.wr.atomic.compare_add;
494 *(u64 *) sqp->s_sge.sge.vaddr =
495 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
496 (u64) atomic64_add_return(sdata, maddr) - sdata :
497 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
498 sdata, wqe->wr.wr.atomic.swap);
499 atomic_dec(&qp->r_sge.sge.mr->refcount);
500 qp->r_sge.num_sge = 0;
501 goto send_comp;
502
503 default:
504 send_status = IB_WC_LOC_QP_OP_ERR;
505 goto serr;
506 }
507
508 sge = &sqp->s_sge.sge;
509 while (sqp->s_len) {
510 u32 len = sqp->s_len;
511
512 if (len > sge->length)
513 len = sge->length;
514 if (len > sge->sge_length)
515 len = sge->sge_length;
516 BUG_ON(len == 0);
517 qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
518 sge->vaddr += len;
519 sge->length -= len;
520 sge->sge_length -= len;
521 if (sge->sge_length == 0) {
522 if (!release)
523 atomic_dec(&sge->mr->refcount);
524 if (--sqp->s_sge.num_sge)
525 *sge = *sqp->s_sge.sg_list++;
526 } else if (sge->length == 0 && sge->mr->lkey) {
527 if (++sge->n >= QIB_SEGSZ) {
528 if (++sge->m >= sge->mr->mapsz)
529 break;
530 sge->n = 0;
531 }
532 sge->vaddr =
533 sge->mr->map[sge->m]->segs[sge->n].vaddr;
534 sge->length =
535 sge->mr->map[sge->m]->segs[sge->n].length;
536 }
537 sqp->s_len -= len;
538 }
539 if (release)
540 while (qp->r_sge.num_sge) {
541 atomic_dec(&qp->r_sge.sge.mr->refcount);
542 if (--qp->r_sge.num_sge)
543 qp->r_sge.sge = *qp->r_sge.sg_list++;
544 }
545
546 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
547 goto send_comp;
548
549 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
550 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
551 else
552 wc.opcode = IB_WC_RECV;
553 wc.wr_id = qp->r_wr_id;
554 wc.status = IB_WC_SUCCESS;
555 wc.byte_len = wqe->length;
556 wc.qp = &qp->ibqp;
557 wc.src_qp = qp->remote_qpn;
558 wc.slid = qp->remote_ah_attr.dlid;
559 wc.sl = qp->remote_ah_attr.sl;
560 wc.port_num = 1;
561 /* Signal completion event if the solicited bit is set. */
562 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
563 wqe->wr.send_flags & IB_SEND_SOLICITED);
564
565send_comp:
566 spin_lock_irqsave(&sqp->s_lock, flags);
567 ibp->n_loop_pkts++;
568flush_send:
569 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
570 qib_send_complete(sqp, wqe, send_status);
571 goto again;
572
573rnr_nak:
574 /* Handle RNR NAK */
575 if (qp->ibqp.qp_type == IB_QPT_UC)
576 goto send_comp;
577 ibp->n_rnr_naks++;
578 /*
579 * Note: we don't need the s_lock held since the BUSY flag
580 * makes this single threaded.
581 */
582 if (sqp->s_rnr_retry == 0) {
583 send_status = IB_WC_RNR_RETRY_EXC_ERR;
584 goto serr;
585 }
586 if (sqp->s_rnr_retry_cnt < 7)
587 sqp->s_rnr_retry--;
588 spin_lock_irqsave(&sqp->s_lock, flags);
589 if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK))
590 goto clr_busy;
591 sqp->s_flags |= QIB_S_WAIT_RNR;
592 sqp->s_timer.function = qib_rc_rnr_retry;
593 sqp->s_timer.expires = jiffies +
594 usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]);
595 add_timer(&sqp->s_timer);
596 goto clr_busy;
597
598op_err:
599 send_status = IB_WC_REM_OP_ERR;
600 wc.status = IB_WC_LOC_QP_OP_ERR;
601 goto err;
602
603inv_err:
604 send_status = IB_WC_REM_INV_REQ_ERR;
605 wc.status = IB_WC_LOC_QP_OP_ERR;
606 goto err;
607
608acc_err:
609 send_status = IB_WC_REM_ACCESS_ERR;
610 wc.status = IB_WC_LOC_PROT_ERR;
611err:
612 /* responder goes to error state */
613 qib_rc_error(qp, wc.status);
614
615serr:
616 spin_lock_irqsave(&sqp->s_lock, flags);
617 qib_send_complete(sqp, wqe, send_status);
618 if (sqp->ibqp.qp_type == IB_QPT_RC) {
619 int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
620
621 sqp->s_flags &= ~QIB_S_BUSY;
622 spin_unlock_irqrestore(&sqp->s_lock, flags);
623 if (lastwqe) {
624 struct ib_event ev;
625
626 ev.device = sqp->ibqp.device;
627 ev.element.qp = &sqp->ibqp;
628 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
629 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
630 }
631 goto done;
632 }
633clr_busy:
634 sqp->s_flags &= ~QIB_S_BUSY;
635unlock:
636 spin_unlock_irqrestore(&sqp->s_lock, flags);
637done:
638 if (qp && atomic_dec_and_test(&qp->refcount))
639 wake_up(&qp->wait);
640}
641
642/**
643 * qib_make_grh - construct a GRH header
644 * @ibp: a pointer to the IB port
645 * @hdr: a pointer to the GRH header being constructed
646 * @grh: the global route address to send to
647 * @hwords: the number of 32 bit words of header being sent
648 * @nwords: the number of 32 bit words of data being sent
649 *
650 * Return the size of the header in 32 bit words.
651 */
652u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
653 struct ib_global_route *grh, u32 hwords, u32 nwords)
654{
655 hdr->version_tclass_flow =
656 cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
657 (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
658 (grh->flow_label << IB_GRH_FLOW_SHIFT));
659 hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
660 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
661 hdr->next_hdr = IB_GRH_NEXT_HDR;
662 hdr->hop_limit = grh->hop_limit;
663 /* The SGID is 32-bit aligned. */
664 hdr->sgid.global.subnet_prefix = ibp->gid_prefix;
665 hdr->sgid.global.interface_id = grh->sgid_index ?
666 ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
667 hdr->dgid = grh->dgid;
668
669 /* GRH header size in 32-bit words. */
670 return sizeof(struct ib_grh) / sizeof(u32);
671}
672
673void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
674 u32 bth0, u32 bth2)
675{
676 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
677 u16 lrh0;
678 u32 nwords;
679 u32 extra_bytes;
680
681 /* Construct the header. */
682 extra_bytes = -qp->s_cur_size & 3;
683 nwords = (qp->s_cur_size + extra_bytes) >> 2;
684 lrh0 = QIB_LRH_BTH;
685 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
686 qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
687 &qp->remote_ah_attr.grh,
688 qp->s_hdrwords, nwords);
689 lrh0 = QIB_LRH_GRH;
690 }
691 lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
692 qp->remote_ah_attr.sl << 4;
693 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
694 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
695 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
696 qp->s_hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
697 qp->remote_ah_attr.src_path_bits);
698 bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
699 bth0 |= extra_bytes << 20;
700 if (qp->s_mig_state == IB_MIG_MIGRATED)
701 bth0 |= IB_BTH_MIG_REQ;
702 ohdr->bth[0] = cpu_to_be32(bth0);
703 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
704 ohdr->bth[2] = cpu_to_be32(bth2);
705}
706
707/**
708 * qib_do_send - perform a send on a QP
709 * @work: contains a pointer to the QP
710 *
711 * Process entries in the send work queue until credit or queue is
712 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
713 * Otherwise, two threads could send packets out of order.
714 */
715void qib_do_send(struct work_struct *work)
716{
717 struct qib_qp *qp = container_of(work, struct qib_qp, s_work);
718 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
719 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
720 int (*make_req)(struct qib_qp *qp);
721 unsigned long flags;
722
723 if ((qp->ibqp.qp_type == IB_QPT_RC ||
724 qp->ibqp.qp_type == IB_QPT_UC) &&
725 (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) {
726 qib_ruc_loopback(qp);
727 return;
728 }
729
730 if (qp->ibqp.qp_type == IB_QPT_RC)
731 make_req = qib_make_rc_req;
732 else if (qp->ibqp.qp_type == IB_QPT_UC)
733 make_req = qib_make_uc_req;
734 else
735 make_req = qib_make_ud_req;
736
737 spin_lock_irqsave(&qp->s_lock, flags);
738
739 /* Return if we are already busy processing a work request. */
740 if (!qib_send_ok(qp)) {
741 spin_unlock_irqrestore(&qp->s_lock, flags);
742 return;
743 }
744
745 qp->s_flags |= QIB_S_BUSY;
746
747 spin_unlock_irqrestore(&qp->s_lock, flags);
748
749 do {
750 /* Check for a constructed packet to be sent. */
751 if (qp->s_hdrwords != 0) {
752 /*
753 * If the packet cannot be sent now, return and
754 * the send tasklet will be woken up later.
755 */
756 if (qib_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
757 qp->s_cur_sge, qp->s_cur_size))
758 break;
759 /* Record that s_hdr is empty. */
760 qp->s_hdrwords = 0;
761 }
762 } while (make_req(qp));
763}
764
765/*
766 * This should be called with s_lock held.
767 */
768void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
769 enum ib_wc_status status)
770{
771 u32 old_last, last;
772 unsigned i;
773
774 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
775 return;
776
777 for (i = 0; i < wqe->wr.num_sge; i++) {
778 struct qib_sge *sge = &wqe->sg_list[i];
779
780 atomic_dec(&sge->mr->refcount);
781 }
782 if (qp->ibqp.qp_type == IB_QPT_UD ||
783 qp->ibqp.qp_type == IB_QPT_SMI ||
784 qp->ibqp.qp_type == IB_QPT_GSI)
785 atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
786
787 /* See ch. 11.2.4.1 and 10.7.3.1 */
788 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
789 (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
790 status != IB_WC_SUCCESS) {
791 struct ib_wc wc;
792
793 memset(&wc, 0, sizeof wc);
794 wc.wr_id = wqe->wr.wr_id;
795 wc.status = status;
796 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
797 wc.qp = &qp->ibqp;
798 if (status == IB_WC_SUCCESS)
799 wc.byte_len = wqe->length;
800 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
801 status != IB_WC_SUCCESS);
802 }
803
804 last = qp->s_last;
805 old_last = last;
806 if (++last >= qp->s_size)
807 last = 0;
808 qp->s_last = last;
809 if (qp->s_acked == old_last)
810 qp->s_acked = last;
811 if (qp->s_cur == old_last)
812 qp->s_cur = last;
813 if (qp->s_tail == old_last)
814 qp->s_tail = last;
815 if (qp->state == IB_QPS_SQD && last == qp->s_cur)
816 qp->s_draining = 0;
817}
diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index 2a68d9f624dd..0aeed0e74cb6 100644
--- a/drivers/infiniband/hw/ipath/ipath_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -32,22 +32,40 @@
32 */ 32 */
33/* 33/*
34 * This file contains all of the code that is specific to the SerDes 34 * This file contains all of the code that is specific to the SerDes
35 * on the InfiniPath 7220 chip. 35 * on the QLogic_IB 7220 chip.
36 */ 36 */
37 37
38#include <linux/pci.h> 38#include <linux/pci.h>
39#include <linux/delay.h> 39#include <linux/delay.h>
40 40
41#include "ipath_kernel.h" 41#include "qib.h"
42#include "ipath_registers.h" 42#include "qib_7220.h"
43#include "ipath_7220.h" 43
44/*
45 * Same as in qib_iba7220.c, but just the registers needed here.
46 * Could move whole set to qib_7220.h, but decided better to keep
47 * local.
48 */
49#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
50#define kr_hwerrclear KREG_IDX(HwErrClear)
51#define kr_hwerrmask KREG_IDX(HwErrMask)
52#define kr_hwerrstatus KREG_IDX(HwErrStatus)
53#define kr_ibcstatus KREG_IDX(IBCStatus)
54#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
55#define kr_scratch KREG_IDX(Scratch)
56#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
57/* these are used only here, not in qib_iba7220.c */
58#define kr_ibsd_epb_access_ctrl KREG_IDX(ibsd_epb_access_ctrl)
59#define kr_ibsd_epb_transaction_reg KREG_IDX(ibsd_epb_transaction_reg)
60#define kr_pciesd_epb_transaction_reg KREG_IDX(pciesd_epb_transaction_reg)
61#define kr_pciesd_epb_access_ctrl KREG_IDX(pciesd_epb_access_ctrl)
62#define kr_serdes_ddsrxeq0 KREG_IDX(SerDes_DDSRXEQ0)
44 63
45/* 64/*
46 * The IBSerDesMappTable is a memory that holds values to be stored in 65 * The IBSerDesMappTable is a memory that holds values to be stored in
47 * various SerDes registers by IBC. It is not part of the normal kregs 66 * various SerDes registers by IBC.
48 * map and is used in exactly one place, hence the #define below.
49 */ 67 */
50#define KR_IBSerDesMappTable (0x94000 / (sizeof(uint64_t))) 68#define kr_serdes_maptable KREG_IDX(IBSerDesMappTable)
51 69
52/* 70/*
53 * Below used for sdnum parameter, selecting one of the two sections 71 * Below used for sdnum parameter, selecting one of the two sections
@@ -71,42 +89,37 @@
71#define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8)) 89#define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8))
72 90
73/* Forward declarations. */ 91/* Forward declarations. */
74static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc, 92static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
75 u32 data, u32 mask); 93 u32 data, u32 mask);
76static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val, 94static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
77 int mask); 95 int mask);
78static int ipath_sd_trimdone_poll(struct ipath_devdata *dd); 96static int qib_sd_trimdone_poll(struct qib_devdata *dd);
79static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd, 97static void qib_sd_trimdone_monitor(struct qib_devdata *dd, const char *where);
80 const char *where); 98static int qib_sd_setvals(struct qib_devdata *dd);
81static int ipath_sd_setvals(struct ipath_devdata *dd); 99static int qib_sd_early(struct qib_devdata *dd);
82static int ipath_sd_early(struct ipath_devdata *dd); 100static int qib_sd_dactrim(struct qib_devdata *dd);
83static int ipath_sd_dactrim(struct ipath_devdata *dd); 101static int qib_internal_presets(struct qib_devdata *dd);
84/* Set the registers that IBC may muck with to their default "preset" values */
85int ipath_sd7220_presets(struct ipath_devdata *dd);
86static int ipath_internal_presets(struct ipath_devdata *dd);
87/* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */ 102/* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */
88static int ipath_sd_trimself(struct ipath_devdata *dd, int val); 103static int qib_sd_trimself(struct qib_devdata *dd, int val);
89static int epb_access(struct ipath_devdata *dd, int sdnum, int claim); 104static int epb_access(struct qib_devdata *dd, int sdnum, int claim);
90
91void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup);
92 105
93/* 106/*
94 * Below keeps track of whether the "once per power-on" initialization has 107 * Below keeps track of whether the "once per power-on" initialization has
95 * been done, because uC code Version 1.32.17 or higher allows the uC to 108 * been done, because uC code Version 1.32.17 or higher allows the uC to
96 * be reset at will, and Automatic Equalization may require it. So the 109 * be reset at will, and Automatic Equalization may require it. So the
97 * state of the reset "pin", as reflected in was_reset parameter to 110 * state of the reset "pin", is no longer valid. Instead, we check for the
98 * ipath_sd7220_init() is no longer valid. Instead, we check for the
99 * actual uC code having been loaded. 111 * actual uC code having been loaded.
100 */ 112 */
101static int ipath_ibsd_ucode_loaded(struct ipath_devdata *dd) 113static int qib_ibsd_ucode_loaded(struct qib_pportdata *ppd)
102{ 114{
103 if (!dd->serdes_first_init_done && (ipath_sd7220_ib_vfy(dd) > 0)) 115 struct qib_devdata *dd = ppd->dd;
104 dd->serdes_first_init_done = 1; 116 if (!dd->cspec->serdes_first_init_done && (qib_sd7220_ib_vfy(dd) > 0))
105 return dd->serdes_first_init_done; 117 dd->cspec->serdes_first_init_done = 1;
118 return dd->cspec->serdes_first_init_done;
106} 119}
107 120
108/* repeat #define for local use. "Real" #define is in ipath_iba7220.c */ 121/* repeat #define for local use. "Real" #define is in qib_iba7220.c */
109#define INFINIPATH_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL 122#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
110#define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF)) 123#define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF))
111#define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF)) 124#define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF))
112#define UC_PAR_CLR_D 8 125#define UC_PAR_CLR_D 8
@@ -114,25 +127,25 @@ static int ipath_ibsd_ucode_loaded(struct ipath_devdata *dd)
114#define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS) 127#define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS)
115#define START_EQ1(chan) EPB_LOC(chan, 7, 0x27) 128#define START_EQ1(chan) EPB_LOC(chan, 7, 0x27)
116 129
117void ipath_sd7220_clr_ibpar(struct ipath_devdata *dd) 130void qib_sd7220_clr_ibpar(struct qib_devdata *dd)
118{ 131{
119 int ret; 132 int ret;
120 133
121 /* clear, then re-enable parity errs */ 134 /* clear, then re-enable parity errs */
122 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 135 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
123 UC_PAR_CLR_D, UC_PAR_CLR_M); 136 UC_PAR_CLR_D, UC_PAR_CLR_M);
124 if (ret < 0) { 137 if (ret < 0) {
125 ipath_dev_err(dd, "Failed clearing IBSerDes Parity err\n"); 138 qib_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
126 goto bail; 139 goto bail;
127 } 140 }
128 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0, 141 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
129 UC_PAR_CLR_M); 142 UC_PAR_CLR_M);
130 143
131 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); 144 qib_read_kreg32(dd, kr_scratch);
132 udelay(4); 145 udelay(4);
133 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 146 qib_write_kreg(dd, kr_hwerrclear,
134 INFINIPATH_HWE_IB_UC_MEMORYPARITYERR); 147 QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
135 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); 148 qib_read_kreg32(dd, kr_scratch);
136bail: 149bail:
137 return; 150 return;
138} 151}
@@ -146,7 +159,7 @@ bail:
146#define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS) 159#define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS)
147#define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS) 160#define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS)
148 161
149static int ipath_resync_ibepb(struct ipath_devdata *dd) 162static int qib_resync_ibepb(struct qib_devdata *dd)
150{ 163{
151 int ret, pat, tries, chn; 164 int ret, pat, tries, chn;
152 u32 loc; 165 u32 loc;
@@ -155,43 +168,42 @@ static int ipath_resync_ibepb(struct ipath_devdata *dd)
155 chn = 0; 168 chn = 0;
156 for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) { 169 for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) {
157 loc = IB_PGUDP(chn); 170 loc = IB_PGUDP(chn);
158 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); 171 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
159 if (ret < 0) { 172 if (ret < 0) {
160 ipath_dev_err(dd, "Failed read in resync\n"); 173 qib_dev_err(dd, "Failed read in resync\n");
161 continue; 174 continue;
162 } 175 }
163 if (ret != 0xF0 && ret != 0x55 && tries == 0) 176 if (ret != 0xF0 && ret != 0x55 && tries == 0)
164 ipath_dev_err(dd, "unexpected pattern in resync\n"); 177 qib_dev_err(dd, "unexpected pattern in resync\n");
165 pat = ret ^ 0xA5; /* alternate F0 and 55 */ 178 pat = ret ^ 0xA5; /* alternate F0 and 55 */
166 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF); 179 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
167 if (ret < 0) { 180 if (ret < 0) {
168 ipath_dev_err(dd, "Failed write in resync\n"); 181 qib_dev_err(dd, "Failed write in resync\n");
169 continue; 182 continue;
170 } 183 }
171 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); 184 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
172 if (ret < 0) { 185 if (ret < 0) {
173 ipath_dev_err(dd, "Failed re-read in resync\n"); 186 qib_dev_err(dd, "Failed re-read in resync\n");
174 continue; 187 continue;
175 } 188 }
176 if (ret != pat) { 189 if (ret != pat) {
177 ipath_dev_err(dd, "Failed compare1 in resync\n"); 190 qib_dev_err(dd, "Failed compare1 in resync\n");
178 continue; 191 continue;
179 } 192 }
180 loc = IB_CMUDONE(chn); 193 loc = IB_CMUDONE(chn);
181 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); 194 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
182 if (ret < 0) { 195 if (ret < 0) {
183 ipath_dev_err(dd, "Failed CMUDONE rd in resync\n"); 196 qib_dev_err(dd, "Failed CMUDONE rd in resync\n");
184 continue; 197 continue;
185 } 198 }
186 if ((ret & 0x70) != ((chn << 4) | 0x40)) { 199 if ((ret & 0x70) != ((chn << 4) | 0x40)) {
187 ipath_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n", 200 qib_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
188 ret, chn); 201 ret, chn);
189 continue; 202 continue;
190 } 203 }
191 if (++chn == 4) 204 if (++chn == 4)
192 break; /* Success */ 205 break; /* Success */
193 } 206 }
194 ipath_cdbg(VERBOSE, "Resync in %d tries\n", tries);
195 return (ret > 0) ? 0 : ret; 207 return (ret > 0) ? 0 : ret;
196} 208}
197 209
@@ -199,32 +211,32 @@ static int ipath_resync_ibepb(struct ipath_devdata *dd)
199 * Localize the stuff that should be done to change IB uC reset 211 * Localize the stuff that should be done to change IB uC reset
200 * returns <0 for errors. 212 * returns <0 for errors.
201 */ 213 */
202static int ipath_ibsd_reset(struct ipath_devdata *dd, int assert_rst) 214static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
203{ 215{
204 u64 rst_val; 216 u64 rst_val;
205 int ret = 0; 217 int ret = 0;
206 unsigned long flags; 218 unsigned long flags;
207 219
208 rst_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl); 220 rst_val = qib_read_kreg64(dd, kr_ibserdesctrl);
209 if (assert_rst) { 221 if (assert_rst) {
210 /* 222 /*
211 * Vendor recommends "interrupting" uC before reset, to 223 * Vendor recommends "interrupting" uC before reset, to
212 * minimize possible glitches. 224 * minimize possible glitches.
213 */ 225 */
214 spin_lock_irqsave(&dd->ipath_sdepb_lock, flags); 226 spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
215 epb_access(dd, IB_7220_SERDES, 1); 227 epb_access(dd, IB_7220_SERDES, 1);
216 rst_val |= 1ULL; 228 rst_val |= 1ULL;
217 /* Squelch possible parity error from _asserting_ reset */ 229 /* Squelch possible parity error from _asserting_ reset */
218 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, 230 qib_write_kreg(dd, kr_hwerrmask,
219 dd->ipath_hwerrmask & 231 dd->cspec->hwerrmask &
220 ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR); 232 ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
221 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val); 233 qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
222 /* flush write, delay to ensure it took effect */ 234 /* flush write, delay to ensure it took effect */
223 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); 235 qib_read_kreg32(dd, kr_scratch);
224 udelay(2); 236 udelay(2);
225 /* once it's reset, can remove interrupt */ 237 /* once it's reset, can remove interrupt */
226 epb_access(dd, IB_7220_SERDES, -1); 238 epb_access(dd, IB_7220_SERDES, -1);
227 spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags); 239 spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
228 } else { 240 } else {
229 /* 241 /*
230 * Before we de-assert reset, we need to deal with 242 * Before we de-assert reset, we need to deal with
@@ -235,46 +247,46 @@ static int ipath_ibsd_reset(struct ipath_devdata *dd, int assert_rst)
235 */ 247 */
236 u64 val; 248 u64 val;
237 rst_val &= ~(1ULL); 249 rst_val &= ~(1ULL);
238 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, 250 qib_write_kreg(dd, kr_hwerrmask,
239 dd->ipath_hwerrmask & 251 dd->cspec->hwerrmask &
240 ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR); 252 ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
241 253
242 ret = ipath_resync_ibepb(dd); 254 ret = qib_resync_ibepb(dd);
243 if (ret < 0) 255 if (ret < 0)
244 ipath_dev_err(dd, "unable to re-sync IB EPB\n"); 256 qib_dev_err(dd, "unable to re-sync IB EPB\n");
245 257
246 /* set uC control regs to suppress parity errs */ 258 /* set uC control regs to suppress parity errs */
247 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1); 259 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
248 if (ret < 0) 260 if (ret < 0)
249 goto bail; 261 goto bail;
250 /* IB uC code past Version 1.32.17 allow suppression of wdog */ 262 /* IB uC code past Version 1.32.17 allow suppression of wdog */
251 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 263 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
252 0x80); 264 0x80);
253 if (ret < 0) { 265 if (ret < 0) {
254 ipath_dev_err(dd, "Failed to set WDOG disable\n"); 266 qib_dev_err(dd, "Failed to set WDOG disable\n");
255 goto bail; 267 goto bail;
256 } 268 }
257 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, rst_val); 269 qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
258 /* flush write, delay for startup */ 270 /* flush write, delay for startup */
259 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); 271 qib_read_kreg32(dd, kr_scratch);
260 udelay(1); 272 udelay(1);
261 /* clear, then re-enable parity errs */ 273 /* clear, then re-enable parity errs */
262 ipath_sd7220_clr_ibpar(dd); 274 qib_sd7220_clr_ibpar(dd);
263 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus); 275 val = qib_read_kreg64(dd, kr_hwerrstatus);
264 if (val & INFINIPATH_HWE_IB_UC_MEMORYPARITYERR) { 276 if (val & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) {
265 ipath_dev_err(dd, "IBUC Parity still set after RST\n"); 277 qib_dev_err(dd, "IBUC Parity still set after RST\n");
266 dd->ipath_hwerrmask &= 278 dd->cspec->hwerrmask &=
267 ~INFINIPATH_HWE_IB_UC_MEMORYPARITYERR; 279 ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
268 } 280 }
269 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, 281 qib_write_kreg(dd, kr_hwerrmask,
270 dd->ipath_hwerrmask); 282 dd->cspec->hwerrmask);
271 } 283 }
272 284
273bail: 285bail:
274 return ret; 286 return ret;
275} 287}
276 288
277static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd, 289static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
278 const char *where) 290 const char *where)
279{ 291{
280 int ret, chn, baduns; 292 int ret, chn, baduns;
@@ -286,69 +298,71 @@ static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
286 /* give time for reset to settle out in EPB */ 298 /* give time for reset to settle out in EPB */
287 udelay(2); 299 udelay(2);
288 300
289 ret = ipath_resync_ibepb(dd); 301 ret = qib_resync_ibepb(dd);
290 if (ret < 0) 302 if (ret < 0)
291 ipath_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where); 303 qib_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
292 304
293 /* Do "sacrificial read" to get EPB in sane state after reset */ 305 /* Do "sacrificial read" to get EPB in sane state after reset */
294 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0); 306 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
295 if (ret < 0) 307 if (ret < 0)
296 ipath_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where); 308 qib_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
297 309
298 /* Check/show "summary" Trim-done bit in IBCStatus */ 310 /* Check/show "summary" Trim-done bit in IBCStatus */
299 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus); 311 val = qib_read_kreg64(dd, kr_ibcstatus);
300 if (val & (1ULL << 11)) 312 if (!(val & (1ULL << 11)))
301 ipath_cdbg(VERBOSE, "IBCS TRIMDONE set (%s)\n", where); 313 qib_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
302 else 314 /*
303 ipath_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where); 315 * Do "dummy read/mod/wr" to get EPB in sane state after reset
304 316 * The default value for MPREG6 is 0.
317 */
305 udelay(2); 318 udelay(2);
306 319
307 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80); 320 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
308 if (ret < 0) 321 if (ret < 0)
309 ipath_dev_err(dd, "Failed Dummy RMW, (%s)\n", where); 322 qib_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
310 udelay(10); 323 udelay(10);
311 324
312 baduns = 0; 325 baduns = 0;
313 326
314 for (chn = 3; chn >= 0; --chn) { 327 for (chn = 3; chn >= 0; --chn) {
315 /* Read CTRL reg for each channel to check TRIMDONE */ 328 /* Read CTRL reg for each channel to check TRIMDONE */
316 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, 329 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
317 IB_CTRL2(chn), 0, 0); 330 IB_CTRL2(chn), 0, 0);
318 if (ret < 0) 331 if (ret < 0)
319 ipath_dev_err(dd, "Failed checking TRIMDONE, chn %d" 332 qib_dev_err(dd, "Failed checking TRIMDONE, chn %d"
320 " (%s)\n", chn, where); 333 " (%s)\n", chn, where);
321 334
322 if (!(ret & 0x10)) { 335 if (!(ret & 0x10)) {
323 int probe; 336 int probe;
337
324 baduns |= (1 << chn); 338 baduns |= (1 << chn);
325 ipath_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)." 339 qib_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)."
326 " (%s)\n", chn, ret, where); 340 " (%s)\n", chn, ret, where);
327 probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, 341 probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
328 IB_PGUDP(0), 0, 0); 342 IB_PGUDP(0), 0, 0);
329 ipath_dev_err(dd, "probe is %d (%02X)\n", 343 qib_dev_err(dd, "probe is %d (%02X)\n",
330 probe, probe); 344 probe, probe);
331 probe = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, 345 probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
332 IB_CTRL2(chn), 0, 0); 346 IB_CTRL2(chn), 0, 0);
333 ipath_dev_err(dd, "re-read: %d (%02X)\n", 347 qib_dev_err(dd, "re-read: %d (%02X)\n",
334 probe, probe); 348 probe, probe);
335 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, 349 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
336 IB_CTRL2(chn), 0x10, 0x10); 350 IB_CTRL2(chn), 0x10, 0x10);
337 if (ret < 0) 351 if (ret < 0)
338 ipath_dev_err(dd, 352 qib_dev_err(dd,
339 "Err on TRIMDONE rewrite1\n"); 353 "Err on TRIMDONE rewrite1\n");
340 } 354 }
341 } 355 }
342 for (chn = 3; chn >= 0; --chn) { 356 for (chn = 3; chn >= 0; --chn) {
343 /* Read CTRL reg for each channel to check TRIMDONE */ 357 /* Read CTRL reg for each channel to check TRIMDONE */
344 if (baduns & (1 << chn)) { 358 if (baduns & (1 << chn)) {
345 ipath_dev_err(dd, 359 qib_dev_err(dd,
346 "Reseting TRIMDONE on chn %d (%s)\n", 360 "Reseting TRIMDONE on chn %d (%s)\n",
347 chn, where); 361 chn, where);
348 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, 362 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
349 IB_CTRL2(chn), 0x10, 0x10); 363 IB_CTRL2(chn), 0x10, 0x10);
350 if (ret < 0) 364 if (ret < 0)
351 ipath_dev_err(dd, "Failed re-setting " 365 qib_dev_err(dd, "Failed re-setting "
352 "TRIMDONE, chn %d (%s)\n", 366 "TRIMDONE, chn %d (%s)\n",
353 chn, where); 367 chn, where);
354 } 368 }
@@ -361,96 +375,86 @@ static void ipath_sd_trimdone_monitor(struct ipath_devdata *dd,
361 * Post IB uC code version 1.32.17, was_reset being 1 is not really 375 * Post IB uC code version 1.32.17, was_reset being 1 is not really
362 * informative, so we double-check. 376 * informative, so we double-check.
363 */ 377 */
364int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset) 378int qib_sd7220_init(struct qib_devdata *dd)
365{ 379{
366 int ret = 1; /* default to failure */ 380 int ret = 1; /* default to failure */
367 int first_reset; 381 int first_reset, was_reset;
368 int val_stat;
369 382
383 /* SERDES MPU reset recorded in D0 */
384 was_reset = (qib_read_kreg64(dd, kr_ibserdesctrl) & 1);
370 if (!was_reset) { 385 if (!was_reset) {
371 /* entered with reset not asserted, we need to do it */ 386 /* entered with reset not asserted, we need to do it */
372 ipath_ibsd_reset(dd, 1); 387 qib_ibsd_reset(dd, 1);
373 ipath_sd_trimdone_monitor(dd, "Driver-reload"); 388 qib_sd_trimdone_monitor(dd, "Driver-reload");
374 } 389 }
375
376 /* Substitute our deduced value for was_reset */ 390 /* Substitute our deduced value for was_reset */
377 ret = ipath_ibsd_ucode_loaded(dd); 391 ret = qib_ibsd_ucode_loaded(dd->pport);
378 if (ret < 0) { 392 if (ret < 0)
379 ret = 1; 393 goto bail;
380 goto done;
381 }
382 first_reset = !ret; /* First reset if IBSD uCode not yet loaded */
383 394
395 first_reset = !ret; /* First reset if IBSD uCode not yet loaded */
384 /* 396 /*
385 * Alter some regs per vendor latest doc, reset-defaults 397 * Alter some regs per vendor latest doc, reset-defaults
386 * are not right for IB. 398 * are not right for IB.
387 */ 399 */
388 ret = ipath_sd_early(dd); 400 ret = qib_sd_early(dd);
389 if (ret < 0) { 401 if (ret < 0) {
390 ipath_dev_err(dd, "Failed to set IB SERDES early defaults\n"); 402 qib_dev_err(dd, "Failed to set IB SERDES early defaults\n");
391 ret = 1; 403 goto bail;
392 goto done;
393 } 404 }
394
395 /* 405 /*
396 * Set DAC manual trim IB. 406 * Set DAC manual trim IB.
397 * We only do this once after chip has been reset (usually 407 * We only do this once after chip has been reset (usually
398 * same as once per system boot). 408 * same as once per system boot).
399 */ 409 */
400 if (first_reset) { 410 if (first_reset) {
401 ret = ipath_sd_dactrim(dd); 411 ret = qib_sd_dactrim(dd);
402 if (ret < 0) { 412 if (ret < 0) {
403 ipath_dev_err(dd, "Failed IB SERDES DAC trim\n"); 413 qib_dev_err(dd, "Failed IB SERDES DAC trim\n");
404 ret = 1; 414 goto bail;
405 goto done;
406 } 415 }
407 } 416 }
408
409 /* 417 /*
410 * Set various registers (DDS and RXEQ) that will be 418 * Set various registers (DDS and RXEQ) that will be
411 * controlled by IBC (in 1.2 mode) to reasonable preset values 419 * controlled by IBC (in 1.2 mode) to reasonable preset values
412 * Calling the "internal" version avoids the "check for needed" 420 * Calling the "internal" version avoids the "check for needed"
413 * and "trimdone monitor" that might be counter-productive. 421 * and "trimdone monitor" that might be counter-productive.
414 */ 422 */
415 ret = ipath_internal_presets(dd); 423 ret = qib_internal_presets(dd);
416 if (ret < 0) { 424 if (ret < 0) {
417 ipath_dev_err(dd, "Failed to set IB SERDES presets\n"); 425 qib_dev_err(dd, "Failed to set IB SERDES presets\n");
418 ret = 1; 426 goto bail;
419 goto done;
420 } 427 }
421 ret = ipath_sd_trimself(dd, 0x80); 428 ret = qib_sd_trimself(dd, 0x80);
422 if (ret < 0) { 429 if (ret < 0) {
423 ipath_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n"); 430 qib_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
424 ret = 1; 431 goto bail;
425 goto done;
426 } 432 }
427 433
428 /* Load image, then try to verify */ 434 /* Load image, then try to verify */
429 ret = 0; /* Assume success */ 435 ret = 0; /* Assume success */
430 if (first_reset) { 436 if (first_reset) {
431 int vfy; 437 int vfy;
432 int trim_done; 438 int trim_done;
433 ipath_dbg("SerDes uC was reset, reloading PRAM\n"); 439
434 ret = ipath_sd7220_ib_load(dd); 440 ret = qib_sd7220_ib_load(dd);
435 if (ret < 0) { 441 if (ret < 0) {
436 ipath_dev_err(dd, "Failed to load IB SERDES image\n"); 442 qib_dev_err(dd, "Failed to load IB SERDES image\n");
437 ret = 1; 443 goto bail;
438 goto done; 444 } else {
439 } 445 /* Loaded image, try to verify */
446 vfy = qib_sd7220_ib_vfy(dd);
447 if (vfy != ret) {
448 qib_dev_err(dd, "SERDES PRAM VFY failed\n");
449 goto bail;
450 } /* end if verified */
451 } /* end if loaded */
440 452
441 /* Loaded image, try to verify */
442 vfy = ipath_sd7220_ib_vfy(dd);
443 if (vfy != ret) {
444 ipath_dev_err(dd, "SERDES PRAM VFY failed\n");
445 ret = 1;
446 goto done;
447 }
448 /* 453 /*
449 * Loaded and verified. Almost good... 454 * Loaded and verified. Almost good...
450 * hold "success" in ret 455 * hold "success" in ret
451 */ 456 */
452 ret = 0; 457 ret = 0;
453
454 /* 458 /*
455 * Prev steps all worked, continue bringup 459 * Prev steps all worked, continue bringup
456 * De-assert RESET to uC, only in first reset, to allow 460 * De-assert RESET to uC, only in first reset, to allow
@@ -461,45 +465,47 @@ int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset)
461 */ 465 */
462 ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38); 466 ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38);
463 if (ret < 0) { 467 if (ret < 0) {
464 ipath_dev_err(dd, "Failed clearing START_EQ1\n"); 468 qib_dev_err(dd, "Failed clearing START_EQ1\n");
465 ret = 1; 469 goto bail;
466 goto done;
467 } 470 }
468 471
469 ipath_ibsd_reset(dd, 0); 472 qib_ibsd_reset(dd, 0);
470 /* 473 /*
471 * If this is not the first reset, trimdone should be set 474 * If this is not the first reset, trimdone should be set
472 * already. 475 * already. We may need to check about this.
473 */ 476 */
474 trim_done = ipath_sd_trimdone_poll(dd); 477 trim_done = qib_sd_trimdone_poll(dd);
475 /* 478 /*
476 * Whether or not trimdone succeeded, we need to put the 479 * Whether or not trimdone succeeded, we need to put the
477 * uC back into reset to avoid a possible fight with the 480 * uC back into reset to avoid a possible fight with the
478 * IBC state-machine. 481 * IBC state-machine.
479 */ 482 */
480 ipath_ibsd_reset(dd, 1); 483 qib_ibsd_reset(dd, 1);
481 484
482 if (!trim_done) { 485 if (!trim_done) {
483 ipath_dev_err(dd, "No TRIMDONE seen\n"); 486 qib_dev_err(dd, "No TRIMDONE seen\n");
484 ret = 1; 487 goto bail;
485 goto done;
486 } 488 }
487 489 /*
488 ipath_sd_trimdone_monitor(dd, "First-reset"); 490 * DEBUG: check each time we reset if trimdone bits have
491 * gotten cleared, and re-set them.
492 */
493 qib_sd_trimdone_monitor(dd, "First-reset");
489 /* Remember so we do not re-do the load, dactrim, etc. */ 494 /* Remember so we do not re-do the load, dactrim, etc. */
490 dd->serdes_first_init_done = 1; 495 dd->cspec->serdes_first_init_done = 1;
491 } 496 }
492 /* 497 /*
493 * Setup for channel training and load values for 498 * setup for channel training and load values for
494 * RxEq and DDS in tables used by IBC in IB1.2 mode 499 * RxEq and DDS in tables used by IBC in IB1.2 mode
495 */ 500 */
496 501 ret = 0;
497 val_stat = ipath_sd_setvals(dd); 502 if (qib_sd_setvals(dd) >= 0)
498 if (val_stat < 0) 503 goto done;
499 ret = 1; 504bail:
505 ret = 1;
500done: 506done:
501 /* start relock timer regardless, but start at 1 second */ 507 /* start relock timer regardless, but start at 1 second */
502 ipath_set_relock_poll(dd, -1); 508 set_7220_relock_poll(dd, -1);
503 return ret; 509 return ret;
504} 510}
505 511
@@ -517,7 +523,7 @@ done:
517 * the "claim" parameter is >0 to claim, <0 to release, 0 to query. 523 * the "claim" parameter is >0 to claim, <0 to release, 0 to query.
518 * Returns <0 for errors, >0 if we had ownership, else 0. 524 * Returns <0 for errors, >0 if we had ownership, else 0.
519 */ 525 */
520static int epb_access(struct ipath_devdata *dd, int sdnum, int claim) 526static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
521{ 527{
522 u16 acc; 528 u16 acc;
523 u64 accval; 529 u64 accval;
@@ -525,28 +531,30 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
525 u64 oct_sel = 0; 531 u64 oct_sel = 0;
526 532
527 switch (sdnum) { 533 switch (sdnum) {
528 case IB_7220_SERDES : 534 case IB_7220_SERDES:
529 /* 535 /*
530 * The IB SERDES "ownership" is fairly simple. A single each 536 * The IB SERDES "ownership" is fairly simple. A single each
531 * request/grant. 537 * request/grant.
532 */ 538 */
533 acc = dd->ipath_kregs->kr_ib_epbacc; 539 acc = kr_ibsd_epb_access_ctrl;
534 break; 540 break;
535 case PCIE_SERDES0 : 541
536 case PCIE_SERDES1 : 542 case PCIE_SERDES0:
543 case PCIE_SERDES1:
537 /* PCIe SERDES has two "octants", need to select which */ 544 /* PCIe SERDES has two "octants", need to select which */
538 acc = dd->ipath_kregs->kr_pcie_epbacc; 545 acc = kr_pciesd_epb_access_ctrl;
539 oct_sel = (2 << (sdnum - PCIE_SERDES0)); 546 oct_sel = (2 << (sdnum - PCIE_SERDES0));
540 break; 547 break;
541 default : 548
549 default:
542 return 0; 550 return 0;
543 } 551 }
544 552
545 /* Make sure any outstanding transaction was seen */ 553 /* Make sure any outstanding transaction was seen */
546 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); 554 qib_read_kreg32(dd, kr_scratch);
547 udelay(15); 555 udelay(15);
548 556
549 accval = ipath_read_kreg32(dd, acc); 557 accval = qib_read_kreg32(dd, acc);
550 558
551 owned = !!(accval & EPB_ACC_GNT); 559 owned = !!(accval & EPB_ACC_GNT);
552 if (claim < 0) { 560 if (claim < 0) {
@@ -557,22 +565,22 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
557 * Both should be clear 565 * Both should be clear
558 */ 566 */
559 u64 newval = 0; 567 u64 newval = 0;
560 ipath_write_kreg(dd, acc, newval); 568 qib_write_kreg(dd, acc, newval);
561 /* First read after write is not trustworthy */ 569 /* First read after write is not trustworthy */
562 pollval = ipath_read_kreg32(dd, acc); 570 pollval = qib_read_kreg32(dd, acc);
563 udelay(5); 571 udelay(5);
564 pollval = ipath_read_kreg32(dd, acc); 572 pollval = qib_read_kreg32(dd, acc);
565 if (pollval & EPB_ACC_GNT) 573 if (pollval & EPB_ACC_GNT)
566 owned = -1; 574 owned = -1;
567 } else if (claim > 0) { 575 } else if (claim > 0) {
568 /* Need to claim */ 576 /* Need to claim */
569 u64 pollval; 577 u64 pollval;
570 u64 newval = EPB_ACC_REQ | oct_sel; 578 u64 newval = EPB_ACC_REQ | oct_sel;
571 ipath_write_kreg(dd, acc, newval); 579 qib_write_kreg(dd, acc, newval);
572 /* First read after write is not trustworthy */ 580 /* First read after write is not trustworthy */
573 pollval = ipath_read_kreg32(dd, acc); 581 pollval = qib_read_kreg32(dd, acc);
574 udelay(5); 582 udelay(5);
575 pollval = ipath_read_kreg32(dd, acc); 583 pollval = qib_read_kreg32(dd, acc);
576 if (!(pollval & EPB_ACC_GNT)) 584 if (!(pollval & EPB_ACC_GNT))
577 owned = -1; 585 owned = -1;
578 } 586 }
@@ -582,18 +590,17 @@ static int epb_access(struct ipath_devdata *dd, int sdnum, int claim)
582/* 590/*
583 * Lemma to deal with race condition of write..read to epb regs 591 * Lemma to deal with race condition of write..read to epb regs
584 */ 592 */
585static int epb_trans(struct ipath_devdata *dd, u16 reg, u64 i_val, u64 *o_vp) 593static int epb_trans(struct qib_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
586{ 594{
587 int tries; 595 int tries;
588 u64 transval; 596 u64 transval;
589 597
590 598 qib_write_kreg(dd, reg, i_val);
591 ipath_write_kreg(dd, reg, i_val);
592 /* Throw away first read, as RDY bit may be stale */ 599 /* Throw away first read, as RDY bit may be stale */
593 transval = ipath_read_kreg64(dd, reg); 600 transval = qib_read_kreg64(dd, reg);
594 601
595 for (tries = EPB_TRANS_TRIES; tries; --tries) { 602 for (tries = EPB_TRANS_TRIES; tries; --tries) {
596 transval = ipath_read_kreg32(dd, reg); 603 transval = qib_read_kreg32(dd, reg);
597 if (transval & EPB_TRANS_RDY) 604 if (transval & EPB_TRANS_RDY)
598 break; 605 break;
599 udelay(5); 606 udelay(5);
@@ -606,21 +613,20 @@ static int epb_trans(struct ipath_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
606} 613}
607 614
608/** 615/**
609 * 616 * qib_sd7220_reg_mod - modify SERDES register
610 * ipath_sd7220_reg_mod - modify SERDES register 617 * @dd: the qlogic_ib device
611 * @dd: the infinipath device
612 * @sdnum: which SERDES to access 618 * @sdnum: which SERDES to access
613 * @loc: location - channel, element, register, as packed by EPB_LOC() macro. 619 * @loc: location - channel, element, register, as packed by EPB_LOC() macro.
614 * @wd: Write Data - value to set in register 620 * @wd: Write Data - value to set in register
615 * @mask: ones where data should be spliced into reg. 621 * @mask: ones where data should be spliced into reg.
616 * 622 *
617 * Basic register read/modify/write, with un-needed accesses elided. That is, 623 * Basic register read/modify/write, with un-needed acesses elided. That is,
618 * a mask of zero will prevent write, while a mask of 0xFF will prevent read. 624 * a mask of zero will prevent write, while a mask of 0xFF will prevent read.
619 * returns current (presumed, if a write was done) contents of selected 625 * returns current (presumed, if a write was done) contents of selected
620 * register, or <0 if errors. 626 * register, or <0 if errors.
621 */ 627 */
622static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc, 628static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
623 u32 wd, u32 mask) 629 u32 wd, u32 mask)
624{ 630{
625 u16 trans; 631 u16 trans;
626 u64 transval; 632 u64 transval;
@@ -629,14 +635,16 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
629 unsigned long flags; 635 unsigned long flags;
630 636
631 switch (sdnum) { 637 switch (sdnum) {
632 case IB_7220_SERDES : 638 case IB_7220_SERDES:
633 trans = dd->ipath_kregs->kr_ib_epbtrans; 639 trans = kr_ibsd_epb_transaction_reg;
634 break; 640 break;
635 case PCIE_SERDES0 : 641
636 case PCIE_SERDES1 : 642 case PCIE_SERDES0:
637 trans = dd->ipath_kregs->kr_pcie_epbtrans; 643 case PCIE_SERDES1:
644 trans = kr_pciesd_epb_transaction_reg;
638 break; 645 break;
639 default : 646
647 default:
640 return -1; 648 return -1;
641 } 649 }
642 650
@@ -644,23 +652,23 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
644 * All access is locked in software (vs other host threads) and 652 * All access is locked in software (vs other host threads) and
645 * hardware (vs uC access). 653 * hardware (vs uC access).
646 */ 654 */
647 spin_lock_irqsave(&dd->ipath_sdepb_lock, flags); 655 spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
648 656
649 owned = epb_access(dd, sdnum, 1); 657 owned = epb_access(dd, sdnum, 1);
650 if (owned < 0) { 658 if (owned < 0) {
651 spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags); 659 spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
652 return -1; 660 return -1;
653 } 661 }
654 ret = 0; 662 ret = 0;
655 for (tries = EPB_TRANS_TRIES; tries; --tries) { 663 for (tries = EPB_TRANS_TRIES; tries; --tries) {
656 transval = ipath_read_kreg32(dd, trans); 664 transval = qib_read_kreg32(dd, trans);
657 if (transval & EPB_TRANS_RDY) 665 if (transval & EPB_TRANS_RDY)
658 break; 666 break;
659 udelay(5); 667 udelay(5);
660 } 668 }
661 669
662 if (tries > 0) { 670 if (tries > 0) {
663 tries = 1; /* to make read-skip work */ 671 tries = 1; /* to make read-skip work */
664 if (mask != 0xFF) { 672 if (mask != 0xFF) {
665 /* 673 /*
666 * Not a pure write, so need to read. 674 * Not a pure write, so need to read.
@@ -688,7 +696,7 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
688 else 696 else
689 ret = transval & EPB_DATA_MASK; 697 ret = transval & EPB_DATA_MASK;
690 698
691 spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags); 699 spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
692 if (tries <= 0) 700 if (tries <= 0)
693 ret = -1; 701 ret = -1;
694 return ret; 702 return ret;
@@ -707,7 +715,7 @@ static int ipath_sd7220_reg_mod(struct ipath_devdata *dd, int sdnum, u32 loc,
707#define EPB_RAMDATA EPB_LOC(6, 0, 5) 715#define EPB_RAMDATA EPB_LOC(6, 0, 5)
708 716
709/* Transfer date to/from uC Program RAM of IB or PCIe SerDes */ 717/* Transfer date to/from uC Program RAM of IB or PCIe SerDes */
710static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc, 718static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
711 u8 *buf, int cnt, int rd_notwr) 719 u8 *buf, int cnt, int rd_notwr)
712{ 720{
713 u16 trans; 721 u16 trans;
@@ -723,29 +731,28 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
723 731
724 /* Pick appropriate transaction reg and "Chip select" for this serdes */ 732 /* Pick appropriate transaction reg and "Chip select" for this serdes */
725 switch (sdnum) { 733 switch (sdnum) {
726 case IB_7220_SERDES : 734 case IB_7220_SERDES:
727 csbit = 1ULL << EPB_IB_UC_CS_SHF; 735 csbit = 1ULL << EPB_IB_UC_CS_SHF;
728 trans = dd->ipath_kregs->kr_ib_epbtrans; 736 trans = kr_ibsd_epb_transaction_reg;
729 break; 737 break;
730 case PCIE_SERDES0 : 738
731 case PCIE_SERDES1 : 739 case PCIE_SERDES0:
740 case PCIE_SERDES1:
732 /* PCIe SERDES has uC "chip select" in different bit, too */ 741 /* PCIe SERDES has uC "chip select" in different bit, too */
733 csbit = 1ULL << EPB_PCIE_UC_CS_SHF; 742 csbit = 1ULL << EPB_PCIE_UC_CS_SHF;
734 trans = dd->ipath_kregs->kr_pcie_epbtrans; 743 trans = kr_pciesd_epb_transaction_reg;
735 break; 744 break;
736 default : 745
746 default:
737 return -1; 747 return -1;
738 } 748 }
739 749
740 op = rd_notwr ? "Rd" : "Wr"; 750 op = rd_notwr ? "Rd" : "Wr";
741 spin_lock_irqsave(&dd->ipath_sdepb_lock, flags); 751 spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
742 752
743 owned = epb_access(dd, sdnum, 1); 753 owned = epb_access(dd, sdnum, 1);
744 if (owned < 0) { 754 if (owned < 0) {
745 spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags); 755 spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
746 ipath_dbg("Could not get %s access to %s EPB: %X, loc %X\n",
747 op, (sdnum == IB_7220_SERDES) ? "IB" : "PCIe",
748 owned, loc);
749 return -1; 756 return -1;
750 } 757 }
751 758
@@ -758,16 +765,14 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
758 */ 765 */
759 addr = loc & 0x1FFF; 766 addr = loc & 0x1FFF;
760 for (tries = EPB_TRANS_TRIES; tries; --tries) { 767 for (tries = EPB_TRANS_TRIES; tries; --tries) {
761 transval = ipath_read_kreg32(dd, trans); 768 transval = qib_read_kreg32(dd, trans);
762 if (transval & EPB_TRANS_RDY) 769 if (transval & EPB_TRANS_RDY)
763 break; 770 break;
764 udelay(5); 771 udelay(5);
765 } 772 }
766 773
767 sofar = 0; 774 sofar = 0;
768 if (tries <= 0) 775 if (tries > 0) {
769 ipath_dbg("No initial RDY on EPB access request\n");
770 else {
771 /* 776 /*
772 * Every "memory" access is doubly-indirect. 777 * Every "memory" access is doubly-indirect.
773 * We set two bytes of address, then read/write 778 * We set two bytes of address, then read/write
@@ -778,8 +783,6 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
778 transval = csbit | EPB_UC_CTL | 783 transval = csbit | EPB_UC_CTL |
779 (rd_notwr ? EPB_ROM_R : EPB_ROM_W); 784 (rd_notwr ? EPB_ROM_R : EPB_ROM_W);
780 tries = epb_trans(dd, trans, transval, &transval); 785 tries = epb_trans(dd, trans, transval, &transval);
781 if (tries <= 0)
782 ipath_dbg("No EPB response to uC %s cmd\n", op);
783 while (tries > 0 && sofar < cnt) { 786 while (tries > 0 && sofar < cnt) {
784 if (!sofar) { 787 if (!sofar) {
785 /* Only set address at start of chunk */ 788 /* Only set address at start of chunk */
@@ -787,18 +790,14 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
787 transval = csbit | EPB_MADDRH | addrbyte; 790 transval = csbit | EPB_MADDRH | addrbyte;
788 tries = epb_trans(dd, trans, transval, 791 tries = epb_trans(dd, trans, transval,
789 &transval); 792 &transval);
790 if (tries <= 0) { 793 if (tries <= 0)
791 ipath_dbg("No EPB response ADDRH\n");
792 break; 794 break;
793 }
794 addrbyte = (addr + sofar) & 0xFF; 795 addrbyte = (addr + sofar) & 0xFF;
795 transval = csbit | EPB_MADDRL | addrbyte; 796 transval = csbit | EPB_MADDRL | addrbyte;
796 tries = epb_trans(dd, trans, transval, 797 tries = epb_trans(dd, trans, transval,
797 &transval); 798 &transval);
798 if (tries <= 0) { 799 if (tries <= 0)
799 ipath_dbg("No EPB response ADDRL\n");
800 break; 800 break;
801 }
802 } 801 }
803 802
804 if (rd_notwr) 803 if (rd_notwr)
@@ -806,10 +805,8 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
806 else 805 else
807 transval = csbit | EPB_ROMDATA | buf[sofar]; 806 transval = csbit | EPB_ROMDATA | buf[sofar];
808 tries = epb_trans(dd, trans, transval, &transval); 807 tries = epb_trans(dd, trans, transval, &transval);
809 if (tries <= 0) { 808 if (tries <= 0)
810 ipath_dbg("No EPB response DATA\n");
811 break; 809 break;
812 }
813 if (rd_notwr) 810 if (rd_notwr)
814 buf[sofar] = transval & EPB_DATA_MASK; 811 buf[sofar] = transval & EPB_DATA_MASK;
815 ++sofar; 812 ++sofar;
@@ -817,8 +814,6 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
817 /* Finally, clear control-bit for Read or Write */ 814 /* Finally, clear control-bit for Read or Write */
818 transval = csbit | EPB_UC_CTL; 815 transval = csbit | EPB_UC_CTL;
819 tries = epb_trans(dd, trans, transval, &transval); 816 tries = epb_trans(dd, trans, transval, &transval);
820 if (tries <= 0)
821 ipath_dbg("No EPB response to drop of uC %s cmd\n", op);
822 } 817 }
823 818
824 ret = sofar; 819 ret = sofar;
@@ -826,18 +821,16 @@ static int ipath_sd7220_ram_xfer(struct ipath_devdata *dd, int sdnum, u32 loc,
826 if (epb_access(dd, sdnum, -1) < 0) 821 if (epb_access(dd, sdnum, -1) < 0)
827 ret = -1; 822 ret = -1;
828 823
829 spin_unlock_irqrestore(&dd->ipath_sdepb_lock, flags); 824 spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
830 if (tries <= 0) { 825 if (tries <= 0)
831 ipath_dbg("SERDES PRAM %s failed after %d bytes\n", op, sofar);
832 ret = -1; 826 ret = -1;
833 }
834 return ret; 827 return ret;
835} 828}
836 829
837#define PROG_CHUNK 64 830#define PROG_CHUNK 64
838 831
839int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum, 832int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum,
840 u8 *img, int len, int offset) 833 u8 *img, int len, int offset)
841{ 834{
842 int cnt, sofar, req; 835 int cnt, sofar, req;
843 836
@@ -846,7 +839,7 @@ int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum,
846 req = len - sofar; 839 req = len - sofar;
847 if (req > PROG_CHUNK) 840 if (req > PROG_CHUNK)
848 req = PROG_CHUNK; 841 req = PROG_CHUNK;
849 cnt = ipath_sd7220_ram_xfer(dd, sdnum, offset + sofar, 842 cnt = qib_sd7220_ram_xfer(dd, sdnum, offset + sofar,
850 img + sofar, req, 0); 843 img + sofar, req, 0);
851 if (cnt < req) { 844 if (cnt < req) {
852 sofar = -1; 845 sofar = -1;
@@ -860,8 +853,8 @@ int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum,
860#define VFY_CHUNK 64 853#define VFY_CHUNK 64
861#define SD_PRAM_ERROR_LIMIT 42 854#define SD_PRAM_ERROR_LIMIT 42
862 855
863int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum, 856int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum,
864 const u8 *img, int len, int offset) 857 const u8 *img, int len, int offset)
865{ 858{
866 int cnt, sofar, req, idx, errors; 859 int cnt, sofar, req, idx, errors;
867 unsigned char readback[VFY_CHUNK]; 860 unsigned char readback[VFY_CHUNK];
@@ -872,7 +865,7 @@ int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum,
872 req = len - sofar; 865 req = len - sofar;
873 if (req > VFY_CHUNK) 866 if (req > VFY_CHUNK)
874 req = VFY_CHUNK; 867 req = VFY_CHUNK;
875 cnt = ipath_sd7220_ram_xfer(dd, sdnum, sofar + offset, 868 cnt = qib_sd7220_ram_xfer(dd, sdnum, sofar + offset,
876 readback, req, 1); 869 readback, req, 1);
877 if (cnt < req) { 870 if (cnt < req) {
878 /* failed in read itself */ 871 /* failed in read itself */
@@ -888,11 +881,13 @@ int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum,
888 return errors ? -errors : sofar; 881 return errors ? -errors : sofar;
889} 882}
890 883
891/* IRQ not set up at this point in init, so we poll. */ 884/*
885 * IRQ not set up at this point in init, so we poll.
886 */
892#define IB_SERDES_TRIM_DONE (1ULL << 11) 887#define IB_SERDES_TRIM_DONE (1ULL << 11)
893#define TRIM_TMO (30) 888#define TRIM_TMO (30)
894 889
895static int ipath_sd_trimdone_poll(struct ipath_devdata *dd) 890static int qib_sd_trimdone_poll(struct qib_devdata *dd)
896{ 891{
897 int trim_tmo, ret; 892 int trim_tmo, ret;
898 uint64_t val; 893 uint64_t val;
@@ -903,16 +898,15 @@ static int ipath_sd_trimdone_poll(struct ipath_devdata *dd)
903 */ 898 */
904 ret = 0; 899 ret = 0;
905 for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) { 900 for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) {
906 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus); 901 val = qib_read_kreg64(dd, kr_ibcstatus);
907 if (val & IB_SERDES_TRIM_DONE) { 902 if (val & IB_SERDES_TRIM_DONE) {
908 ipath_cdbg(VERBOSE, "TRIMDONE after %d\n", trim_tmo);
909 ret = 1; 903 ret = 1;
910 break; 904 break;
911 } 905 }
912 msleep(10); 906 msleep(10);
913 } 907 }
914 if (trim_tmo >= TRIM_TMO) { 908 if (trim_tmo >= TRIM_TMO) {
915 ipath_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo); 909 qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
916 ret = 0; 910 ret = 0;
917 } 911 }
918 return ret; 912 return ret;
@@ -964,8 +958,7 @@ static struct dds_init {
964}; 958};
965 959
966/* 960/*
967 * Next, values related to Receive Equalization. 961 * Now the RXEQ section of the table.
968 * In comments, FDR (Full) is IB DDR, HDR (Half) is IB SDR
969 */ 962 */
970/* Hardware packs an element number and register address thus: */ 963/* Hardware packs an element number and register address thus: */
971#define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4)) 964#define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4))
@@ -981,23 +974,23 @@ static struct dds_init {
981#define RXEQ_SDR_ZCNT 23 974#define RXEQ_SDR_ZCNT 23
982 975
983static struct rxeq_init { 976static struct rxeq_init {
984 u16 rdesc; /* in form used in SerDesDDSRXEQ */ 977 u16 rdesc; /* in form used in SerDesDDSRXEQ */
985 u8 rdata[4]; 978 u8 rdata[4];
986} rxeq_init_vals[] = { 979} rxeq_init_vals[] = {
987 /* Set Rcv Eq. to Preset node */ 980 /* Set Rcv Eq. to Preset node */
988 RXEQ_VAL_ALL(7, 0x27, 0x10), 981 RXEQ_VAL_ALL(7, 0x27, 0x10),
989 /* Set DFELTHFDR/HDR thresholds */ 982 /* Set DFELTHFDR/HDR thresholds */
990 RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR */ 983 RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR, was 0, 1, 2, 3 */
991 RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */ 984 RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */
992 /* Set TLTHFDR/HDR threshold */ 985 /* Set TLTHFDR/HDR theshold */
993 RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR */ 986 RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR, was 0, 2, 4, 6 */
994 RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR */ 987 RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR, was 0, 1, 2, 3 */
995 /* Set Preamp setting 2 (ZFR/ZCNT) */ 988 /* Set Preamp setting 2 (ZFR/ZCNT) */
996 RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR */ 989 RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR, was 12, 16, 20, 24 */
997 RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR */ 990 RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR, was 12, 16, 20, 24 */
998 /* Set Preamp DC gain and Setting 1 (GFR/GHR) */ 991 /* Set Preamp DC gain and Setting 1 (GFR/GHR) */
999 RXEQ_VAL(7, 0x1E, 0x10, 0x10, 0x10, 0x10), /* FDR */ 992 RXEQ_VAL(7, 0x1E, 16, 16, 16, 16), /* FDR, was 16, 17, 18, 20 */
1000 RXEQ_VAL(7, 0x1F, 0x10, 0x10, 0x10, 0x10), /* HDR */ 993 RXEQ_VAL(7, 0x1F, 16, 16, 16, 16), /* HDR, was 16, 17, 18, 20 */
1001 /* Toggle RELOCK (in VCDL_CTRL0) to lock to data */ 994 /* Toggle RELOCK (in VCDL_CTRL0) to lock to data */
1002 RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */ 995 RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */
1003 RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */ 996 RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */
@@ -1007,27 +1000,27 @@ static struct rxeq_init {
1007#define DDS_ROWS (16) 1000#define DDS_ROWS (16)
1008#define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals) 1001#define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals)
1009 1002
1010static int ipath_sd_setvals(struct ipath_devdata *dd) 1003static int qib_sd_setvals(struct qib_devdata *dd)
1011{ 1004{
1012 int idx, midx; 1005 int idx, midx;
1013 int min_idx; /* Minimum index for this portion of table */ 1006 int min_idx; /* Minimum index for this portion of table */
1014 uint32_t dds_reg_map; 1007 uint32_t dds_reg_map;
1015 u64 __iomem *taddr, *iaddr; 1008 u64 __iomem *taddr, *iaddr;
1016 uint64_t data; 1009 uint64_t data;
1017 uint64_t sdctl; 1010 uint64_t sdctl;
1018 1011
1019 taddr = dd->ipath_kregbase + KR_IBSerDesMappTable; 1012 taddr = dd->kregbase + kr_serdes_maptable;
1020 iaddr = dd->ipath_kregbase + dd->ipath_kregs->kr_ib_ddsrxeq; 1013 iaddr = dd->kregbase + kr_serdes_ddsrxeq0;
1021 1014
1022 /* 1015 /*
1023 * Init the DDS section of the table. 1016 * Init the DDS section of the table.
1024 * Each "row" of the table provokes NUM_DDS_REG writes, to the 1017 * Each "row" of the table provokes NUM_DDS_REG writes, to the
1025 * registers indicated in DDS_REG_MAP. 1018 * registers indicated in DDS_REG_MAP.
1026 */ 1019 */
1027 sdctl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibserdesctrl); 1020 sdctl = qib_read_kreg64(dd, kr_ibserdesctrl);
1028 sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8); 1021 sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8);
1029 sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13); 1022 sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13);
1030 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibserdesctrl, sdctl); 1023 qib_write_kreg(dd, kr_ibserdesctrl, sdctl);
1031 1024
1032 /* 1025 /*
1033 * Iterate down table within loop for each register to store. 1026 * Iterate down table within loop for each register to store.
@@ -1037,21 +1030,21 @@ static int ipath_sd_setvals(struct ipath_devdata *dd)
1037 data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT; 1030 data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT;
1038 writeq(data, iaddr + idx); 1031 writeq(data, iaddr + idx);
1039 mmiowb(); 1032 mmiowb();
1040 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); 1033 qib_read_kreg32(dd, kr_scratch);
1041 dds_reg_map >>= 4; 1034 dds_reg_map >>= 4;
1042 for (midx = 0; midx < DDS_ROWS; ++midx) { 1035 for (midx = 0; midx < DDS_ROWS; ++midx) {
1043 u64 __iomem *daddr = taddr + ((midx << 4) + idx); 1036 u64 __iomem *daddr = taddr + ((midx << 4) + idx);
1044 data = dds_init_vals[midx].reg_vals[idx]; 1037 data = dds_init_vals[midx].reg_vals[idx];
1045 writeq(data, daddr); 1038 writeq(data, daddr);
1046 mmiowb(); 1039 mmiowb();
1047 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); 1040 qib_read_kreg32(dd, kr_scratch);
1048 } /* End inner for (vals for this reg, each row) */ 1041 } /* End inner for (vals for this reg, each row) */
1049 } /* end outer for (regs to be stored) */ 1042 } /* end outer for (regs to be stored) */
1050 1043
1051 /* 1044 /*
1052 * Init the RXEQ section of the table. As explained above the table 1045 * Init the RXEQ section of the table.
1053 * rxeq_init_vals[], this runs in a different order, as the pattern 1046 * This runs in a different order, as the pattern of
1054 * of register references is more complex, but there are only 1047 * register references is more complex, but there are only
1055 * four "data" values per register. 1048 * four "data" values per register.
1056 */ 1049 */
1057 min_idx = idx; /* RXEQ indices pick up where DDS left off */ 1050 min_idx = idx; /* RXEQ indices pick up where DDS left off */
@@ -1066,13 +1059,13 @@ static int ipath_sd_setvals(struct ipath_devdata *dd)
1066 /* Store the next RXEQ register address */ 1059 /* Store the next RXEQ register address */
1067 writeq(rxeq_init_vals[idx].rdesc, iaddr + didx); 1060 writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
1068 mmiowb(); 1061 mmiowb();
1069 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); 1062 qib_read_kreg32(dd, kr_scratch);
1070 /* Iterate through RXEQ values */ 1063 /* Iterate through RXEQ values */
1071 for (vidx = 0; vidx < 4; vidx++) { 1064 for (vidx = 0; vidx < 4; vidx++) {
1072 data = rxeq_init_vals[idx].rdata[vidx]; 1065 data = rxeq_init_vals[idx].rdata[vidx];
1073 writeq(data, taddr + (vidx << 6) + idx); 1066 writeq(data, taddr + (vidx << 6) + idx);
1074 mmiowb(); 1067 mmiowb();
1075 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); 1068 qib_read_kreg32(dd, kr_scratch);
1076 } 1069 }
1077 } /* end outer for (Reg-writes for RXEQ) */ 1070 } /* end outer for (Reg-writes for RXEQ) */
1078 return 0; 1071 return 0;
@@ -1085,33 +1078,18 @@ static int ipath_sd_setvals(struct ipath_devdata *dd)
1085#define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8) 1078#define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8)
1086#define START_EQ2(chan) EPB_LOC(chan, 7, 0x28) 1079#define START_EQ2(chan) EPB_LOC(chan, 7, 0x28)
1087 1080
1088static int ibsd_sto_noisy(struct ipath_devdata *dd, int loc, int val, int mask)
1089{
1090 int ret = -1;
1091 int sloc; /* shifted loc, for messages */
1092
1093 loc |= (1U << EPB_IB_QUAD0_CS_SHF);
1094 sloc = loc >> EPB_ADDR_SHF;
1095
1096 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, mask);
1097 if (ret < 0)
1098 ipath_dev_err(dd, "Write failed: elt %d,"
1099 " addr 0x%X, chnl %d, val 0x%02X, mask 0x%02X\n",
1100 (sloc & 0xF), (sloc >> 9) & 0x3f, (sloc >> 4) & 7,
1101 val & 0xFF, mask & 0xFF);
1102 return ret;
1103}
1104
1105/* 1081/*
1106 * Repeat a "store" across all channels of the IB SerDes. 1082 * Repeat a "store" across all channels of the IB SerDes.
1107 * Although nominally it inherits the "read value" of the last 1083 * Although nominally it inherits the "read value" of the last
1108 * channel it modified, the only really useful return is <0 for 1084 * channel it modified, the only really useful return is <0 for
1109 * failure, >= 0 for success. The parameter 'loc' is assumed to 1085 * failure, >= 0 for success. The parameter 'loc' is assumed to
1110 * be the location for the channel-0 copy of the register to 1086 * be the location in some channel of the register to be modified
1111 * be modified. 1087 * The caller can specify use of the "gang write" option of EPB,
1088 * in which case we use the specified channel data for any fields
1089 * not explicitely written.
1112 */ 1090 */
1113static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val, 1091static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
1114 int mask) 1092 int mask)
1115{ 1093{
1116 int ret = -1; 1094 int ret = -1;
1117 int chnl; 1095 int chnl;
@@ -1126,24 +1104,27 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
1126 loc |= (1U << EPB_IB_QUAD0_CS_SHF); 1104 loc |= (1U << EPB_IB_QUAD0_CS_SHF);
1127 chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7; 1105 chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7;
1128 if (mask != 0xFF) { 1106 if (mask != 0xFF) {
1129 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, 1107 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
1130 loc & ~EPB_GLOBAL_WR, 0, 0); 1108 loc & ~EPB_GLOBAL_WR, 0, 0);
1131 if (ret < 0) { 1109 if (ret < 0) {
1132 int sloc = loc >> EPB_ADDR_SHF; 1110 int sloc = loc >> EPB_ADDR_SHF;
1133 ipath_dev_err(dd, "pre-read failed: elt %d," 1111
1134 " addr 0x%X, chnl %d\n", (sloc & 0xF), 1112 qib_dev_err(dd, "pre-read failed: elt %d,"
1135 (sloc >> 9) & 0x3f, chnl); 1113 " addr 0x%X, chnl %d\n",
1114 (sloc & 0xF),
1115 (sloc >> 9) & 0x3f, chnl);
1136 return ret; 1116 return ret;
1137 } 1117 }
1138 val = (ret & ~mask) | (val & mask); 1118 val = (ret & ~mask) | (val & mask);
1139 } 1119 }
1140 loc &= ~(7 << (4+EPB_ADDR_SHF)); 1120 loc &= ~(7 << (4+EPB_ADDR_SHF));
1141 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF); 1121 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
1142 if (ret < 0) { 1122 if (ret < 0) {
1143 int sloc = loc >> EPB_ADDR_SHF; 1123 int sloc = loc >> EPB_ADDR_SHF;
1144 ipath_dev_err(dd, "Global WR failed: elt %d," 1124
1145 " addr 0x%X, val %02X\n", 1125 qib_dev_err(dd, "Global WR failed: elt %d,"
1146 (sloc & 0xF), (sloc >> 9) & 0x3f, val); 1126 " addr 0x%X, val %02X\n",
1127 (sloc & 0xF), (sloc >> 9) & 0x3f, val);
1147 } 1128 }
1148 return ret; 1129 return ret;
1149 } 1130 }
@@ -1151,16 +1132,17 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
1151 loc &= ~(7 << (4+EPB_ADDR_SHF)); 1132 loc &= ~(7 << (4+EPB_ADDR_SHF));
1152 loc |= (1U << EPB_IB_QUAD0_CS_SHF); 1133 loc |= (1U << EPB_IB_QUAD0_CS_SHF);
1153 for (chnl = 0; chnl < 4; ++chnl) { 1134 for (chnl = 0; chnl < 4; ++chnl) {
1154 int cloc; 1135 int cloc = loc | (chnl << (4+EPB_ADDR_SHF));
1155 cloc = loc | (chnl << (4+EPB_ADDR_SHF)); 1136
1156 ret = ipath_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask); 1137 ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
1157 if (ret < 0) { 1138 if (ret < 0) {
1158 int sloc = loc >> EPB_ADDR_SHF; 1139 int sloc = loc >> EPB_ADDR_SHF;
1159 ipath_dev_err(dd, "Write failed: elt %d," 1140
1160 " addr 0x%X, chnl %d, val 0x%02X," 1141 qib_dev_err(dd, "Write failed: elt %d,"
1161 " mask 0x%02X\n", 1142 " addr 0x%X, chnl %d, val 0x%02X,"
1162 (sloc & 0xF), (sloc >> 9) & 0x3f, chnl, 1143 " mask 0x%02X\n",
1163 val & 0xFF, mask & 0xFF); 1144 (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
1145 val & 0xFF, mask & 0xFF);
1164 break; 1146 break;
1165 } 1147 }
1166 } 1148 }
@@ -1171,7 +1153,7 @@ static int ibsd_mod_allchnls(struct ipath_devdata *dd, int loc, int val,
1171 * Set the Tx values normally modified by IBC in IB1.2 mode to default 1153 * Set the Tx values normally modified by IBC in IB1.2 mode to default
1172 * values, as gotten from first row of init table. 1154 * values, as gotten from first row of init table.
1173 */ 1155 */
1174static int set_dds_vals(struct ipath_devdata *dd, struct dds_init *ddi) 1156static int set_dds_vals(struct qib_devdata *dd, struct dds_init *ddi)
1175{ 1157{
1176 int ret; 1158 int ret;
1177 int idx, reg, data; 1159 int idx, reg, data;
@@ -1194,7 +1176,7 @@ static int set_dds_vals(struct ipath_devdata *dd, struct dds_init *ddi)
1194 * Set the Rx values normally modified by IBC in IB1.2 mode to default 1176 * Set the Rx values normally modified by IBC in IB1.2 mode to default
1195 * values, as gotten from selected column of init table. 1177 * values, as gotten from selected column of init table.
1196 */ 1178 */
1197static int set_rxeq_vals(struct ipath_devdata *dd, int vsel) 1179static int set_rxeq_vals(struct qib_devdata *dd, int vsel)
1198{ 1180{
1199 int ret; 1181 int ret;
1200 int ridx; 1182 int ridx;
@@ -1202,6 +1184,7 @@ static int set_rxeq_vals(struct ipath_devdata *dd, int vsel)
1202 1184
1203 for (ridx = 0; ridx < cnt; ++ridx) { 1185 for (ridx = 0; ridx < cnt; ++ridx) {
1204 int elt, reg, val, loc; 1186 int elt, reg, val, loc;
1187
1205 elt = rxeq_init_vals[ridx].rdesc & 0xF; 1188 elt = rxeq_init_vals[ridx].rdesc & 0xF;
1206 reg = rxeq_init_vals[ridx].rdesc >> 4; 1189 reg = rxeq_init_vals[ridx].rdesc >> 4;
1207 loc = EPB_LOC(0, elt, reg); 1190 loc = EPB_LOC(0, elt, reg);
@@ -1217,83 +1200,66 @@ static int set_rxeq_vals(struct ipath_devdata *dd, int vsel)
1217/* 1200/*
1218 * Set the default values (row 0) for DDR Driver Demphasis. 1201 * Set the default values (row 0) for DDR Driver Demphasis.
1219 * we do this initially and whenever we turn off IB-1.2 1202 * we do this initially and whenever we turn off IB-1.2
1203 *
1220 * The "default" values for Rx equalization are also stored to 1204 * The "default" values for Rx equalization are also stored to
1221 * SerDes registers. Formerly (and still default), we used set 2. 1205 * SerDes registers. Formerly (and still default), we used set 2.
1222 * For experimenting with cables and link-partners, we allow changing 1206 * For experimenting with cables and link-partners, we allow changing
1223 * that via a module parameter. 1207 * that via a module parameter.
1224 */ 1208 */
1225static unsigned ipath_rxeq_set = 2; 1209static unsigned qib_rxeq_set = 2;
1226module_param_named(rxeq_default_set, ipath_rxeq_set, uint, 1210module_param_named(rxeq_default_set, qib_rxeq_set, uint,
1227 S_IWUSR | S_IRUGO); 1211 S_IWUSR | S_IRUGO);
1228MODULE_PARM_DESC(rxeq_default_set, 1212MODULE_PARM_DESC(rxeq_default_set,
1229 "Which set [0..3] of Rx Equalization values is default"); 1213 "Which set [0..3] of Rx Equalization values is default");
1230 1214
1231static int ipath_internal_presets(struct ipath_devdata *dd) 1215static int qib_internal_presets(struct qib_devdata *dd)
1232{ 1216{
1233 int ret = 0; 1217 int ret = 0;
1234 1218
1235 ret = set_dds_vals(dd, dds_init_vals + DDS_3M); 1219 ret = set_dds_vals(dd, dds_init_vals + DDS_3M);
1236 1220
1237 if (ret < 0) 1221 if (ret < 0)
1238 ipath_dev_err(dd, "Failed to set default DDS values\n"); 1222 qib_dev_err(dd, "Failed to set default DDS values\n");
1239 ret = set_rxeq_vals(dd, ipath_rxeq_set & 3); 1223 ret = set_rxeq_vals(dd, qib_rxeq_set & 3);
1240 if (ret < 0) 1224 if (ret < 0)
1241 ipath_dev_err(dd, "Failed to set default RXEQ values\n"); 1225 qib_dev_err(dd, "Failed to set default RXEQ values\n");
1242 return ret; 1226 return ret;
1243} 1227}
1244 1228
1245int ipath_sd7220_presets(struct ipath_devdata *dd) 1229int qib_sd7220_presets(struct qib_devdata *dd)
1246{ 1230{
1247 int ret = 0; 1231 int ret = 0;
1248 1232
1249 if (!dd->ipath_presets_needed) 1233 if (!dd->cspec->presets_needed)
1250 return ret; 1234 return ret;
1251 dd->ipath_presets_needed = 0; 1235 dd->cspec->presets_needed = 0;
1252 /* Assert uC reset, so we don't clash with it. */ 1236 /* Assert uC reset, so we don't clash with it. */
1253 ipath_ibsd_reset(dd, 1); 1237 qib_ibsd_reset(dd, 1);
1254 udelay(2); 1238 udelay(2);
1255 ipath_sd_trimdone_monitor(dd, "link-down"); 1239 qib_sd_trimdone_monitor(dd, "link-down");
1256 1240
1257 ret = ipath_internal_presets(dd); 1241 ret = qib_internal_presets(dd);
1258return ret; 1242 return ret;
1259} 1243}
1260 1244
1261static int ipath_sd_trimself(struct ipath_devdata *dd, int val) 1245static int qib_sd_trimself(struct qib_devdata *dd, int val)
1262{ 1246{
1263 return ibsd_sto_noisy(dd, CMUCTRL5, val, 0xFF); 1247 int loc = CMUCTRL5 | (1U << EPB_IB_QUAD0_CS_SHF);
1248
1249 return qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
1264} 1250}
1265 1251
1266static int ipath_sd_early(struct ipath_devdata *dd) 1252static int qib_sd_early(struct qib_devdata *dd)
1267{ 1253{
1268 int ret = -1; /* Default failed */ 1254 int ret;
1269 int chnl;
1270 1255
1271 for (chnl = 0; chnl < 4; ++chnl) { 1256 ret = ibsd_mod_allchnls(dd, RXHSCTRL0(0) | EPB_GLOBAL_WR, 0xD4, 0xFF);
1272 ret = ibsd_sto_noisy(dd, RXHSCTRL0(chnl), 0xD4, 0xFF); 1257 if (ret < 0)
1273 if (ret < 0) 1258 goto bail;
1274 goto bail; 1259 ret = ibsd_mod_allchnls(dd, START_EQ1(0) | EPB_GLOBAL_WR, 0x10, 0xFF);
1275 } 1260 if (ret < 0)
1276 for (chnl = 0; chnl < 4; ++chnl) { 1261 goto bail;
1277 ret = ibsd_sto_noisy(dd, VCDL_DAC2(chnl), 0x2D, 0xFF); 1262 ret = ibsd_mod_allchnls(dd, START_EQ2(0) | EPB_GLOBAL_WR, 0x30, 0xFF);
1278 if (ret < 0)
1279 goto bail;
1280 }
1281 /* more fine-tuning of what will be default */
1282 for (chnl = 0; chnl < 4; ++chnl) {
1283 ret = ibsd_sto_noisy(dd, VCDL_CTRL2(chnl), 3, 0xF);
1284 if (ret < 0)
1285 goto bail;
1286 }
1287 for (chnl = 0; chnl < 4; ++chnl) {
1288 ret = ibsd_sto_noisy(dd, START_EQ1(chnl), 0x10, 0xFF);
1289 if (ret < 0)
1290 goto bail;
1291 }
1292 for (chnl = 0; chnl < 4; ++chnl) {
1293 ret = ibsd_sto_noisy(dd, START_EQ2(chnl), 0x30, 0xFF);
1294 if (ret < 0)
1295 goto bail;
1296 }
1297bail: 1263bail:
1298 return ret; 1264 return ret;
1299} 1265}
@@ -1302,50 +1268,53 @@ bail:
1302#define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6) 1268#define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6)
1303#define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF) 1269#define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF)
1304 1270
1305static int ipath_sd_dactrim(struct ipath_devdata *dd) 1271static int qib_sd_dactrim(struct qib_devdata *dd)
1306{ 1272{
1307 int ret = -1; /* Default failed */ 1273 int ret;
1308 int chnl; 1274
1275 ret = ibsd_mod_allchnls(dd, VCDL_DAC2(0) | EPB_GLOBAL_WR, 0x2D, 0xFF);
1276 if (ret < 0)
1277 goto bail;
1278
1279 /* more fine-tuning of what will be default */
1280 ret = ibsd_mod_allchnls(dd, VCDL_CTRL2(0), 3, 0xF);
1281 if (ret < 0)
1282 goto bail;
1283
1284 ret = ibsd_mod_allchnls(dd, BACTRL(0) | EPB_GLOBAL_WR, 0x40, 0xFF);
1285 if (ret < 0)
1286 goto bail;
1287
1288 ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
1289 if (ret < 0)
1290 goto bail;
1291
1292 ret = ibsd_mod_allchnls(dd, RXHSSTATUS(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
1293 if (ret < 0)
1294 goto bail;
1309 1295
1310 for (chnl = 0; chnl < 4; ++chnl) {
1311 ret = ibsd_sto_noisy(dd, BACTRL(chnl), 0x40, 0xFF);
1312 if (ret < 0)
1313 goto bail;
1314 }
1315 for (chnl = 0; chnl < 4; ++chnl) {
1316 ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x04, 0xFF);
1317 if (ret < 0)
1318 goto bail;
1319 }
1320 for (chnl = 0; chnl < 4; ++chnl) {
1321 ret = ibsd_sto_noisy(dd, RXHSSTATUS(chnl), 0x04, 0xFF);
1322 if (ret < 0)
1323 goto bail;
1324 }
1325 /* 1296 /*
1326 * delay for max possible number of steps, with slop. 1297 * Delay for max possible number of steps, with slop.
1327 * Each step is about 4usec. 1298 * Each step is about 4usec.
1328 */ 1299 */
1329 udelay(415); 1300 udelay(415);
1330 for (chnl = 0; chnl < 4; ++chnl) { 1301
1331 ret = ibsd_sto_noisy(dd, LDOUTCTRL1(chnl), 0x00, 0xFF); 1302 ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x00, 0xFF);
1332 if (ret < 0) 1303
1333 goto bail;
1334 }
1335bail: 1304bail:
1336 return ret; 1305 return ret;
1337} 1306}
1338 1307
1339#define RELOCK_FIRST_MS 3 1308#define RELOCK_FIRST_MS 3
1340#define RXLSPPM(chan) EPB_LOC(chan, 0, 2) 1309#define RXLSPPM(chan) EPB_LOC(chan, 0, 2)
1341void ipath_toggle_rclkrls(struct ipath_devdata *dd) 1310void toggle_7220_rclkrls(struct qib_devdata *dd)
1342{ 1311{
1343 int loc = RXLSPPM(0) | EPB_GLOBAL_WR; 1312 int loc = RXLSPPM(0) | EPB_GLOBAL_WR;
1344 int ret; 1313 int ret;
1345 1314
1346 ret = ibsd_mod_allchnls(dd, loc, 0, 0x80); 1315 ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
1347 if (ret < 0) 1316 if (ret < 0)
1348 ipath_dev_err(dd, "RCLKRLS failed to clear D7\n"); 1317 qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
1349 else { 1318 else {
1350 udelay(1); 1319 udelay(1);
1351 ibsd_mod_allchnls(dd, loc, 0x80, 0x80); 1320 ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
@@ -1354,109 +1323,91 @@ void ipath_toggle_rclkrls(struct ipath_devdata *dd)
1354 udelay(1); 1323 udelay(1);
1355 ret = ibsd_mod_allchnls(dd, loc, 0, 0x80); 1324 ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
1356 if (ret < 0) 1325 if (ret < 0)
1357 ipath_dev_err(dd, "RCLKRLS failed to clear D7\n"); 1326 qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
1358 else { 1327 else {
1359 udelay(1); 1328 udelay(1);
1360 ibsd_mod_allchnls(dd, loc, 0x80, 0x80); 1329 ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
1361 } 1330 }
1362 /* Now reset xgxs and IBC to complete the recovery */ 1331 /* Now reset xgxs and IBC to complete the recovery */
1363 dd->ipath_f_xgxs_reset(dd); 1332 dd->f_xgxs_reset(dd->pport);
1364} 1333}
1365 1334
1366/* 1335/*
1367 * Shut down the timer that polls for relock occasions, if needed 1336 * Shut down the timer that polls for relock occasions, if needed
1368 * this is "hooked" from ipath_7220_quiet_serdes(), which is called 1337 * this is "hooked" from qib_7220_quiet_serdes(), which is called
1369 * just before ipath_shutdown_device() in ipath_driver.c shuts down all 1338 * just before qib_shutdown_device() in qib_driver.c shuts down all
1370 * the other timers 1339 * the other timers
1371 */ 1340 */
1372void ipath_shutdown_relock_poll(struct ipath_devdata *dd) 1341void shutdown_7220_relock_poll(struct qib_devdata *dd)
1373{ 1342{
1374 struct ipath_relock *irp = &dd->ipath_relock_singleton; 1343 if (dd->cspec->relock_timer_active)
1375 if (atomic_read(&irp->ipath_relock_timer_active)) { 1344 del_timer_sync(&dd->cspec->relock_timer);
1376 del_timer_sync(&irp->ipath_relock_timer);
1377 atomic_set(&irp->ipath_relock_timer_active, 0);
1378 }
1379} 1345}
1380 1346
1381static unsigned ipath_relock_by_timer = 1; 1347static unsigned qib_relock_by_timer = 1;
1382module_param_named(relock_by_timer, ipath_relock_by_timer, uint, 1348module_param_named(relock_by_timer, qib_relock_by_timer, uint,
1383 S_IWUSR | S_IRUGO); 1349 S_IWUSR | S_IRUGO);
1384MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up"); 1350MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up");
1385 1351
1386static void ipath_run_relock(unsigned long opaque) 1352static void qib_run_relock(unsigned long opaque)
1387{ 1353{
1388 struct ipath_devdata *dd = (struct ipath_devdata *)opaque; 1354 struct qib_devdata *dd = (struct qib_devdata *)opaque;
1389 struct ipath_relock *irp = &dd->ipath_relock_singleton; 1355 struct qib_pportdata *ppd = dd->pport;
1390 u64 val, ltstate; 1356 struct qib_chip_specific *cs = dd->cspec;
1391 1357 int timeoff;
1392 if (!(dd->ipath_flags & IPATH_INITTED)) {
1393 /* Not yet up, just reenable the timer for later */
1394 irp->ipath_relock_interval = HZ;
1395 mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
1396 return;
1397 }
1398 1358
1399 /* 1359 /*
1400 * Check link-training state for "stuck" state. 1360 * Check link-training state for "stuck" state, when down.
1401 * if found, try relock and schedule another try at 1361 * if found, try relock and schedule another try at
1402 * exponentially growing delay, maxed at one second. 1362 * exponentially growing delay, maxed at one second.
1403 * if not stuck, our work is done. 1363 * if not stuck, our work is done.
1404 */ 1364 */
1405 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus); 1365 if ((dd->flags & QIB_INITTED) && !(ppd->lflags &
1406 ltstate = ipath_ib_linktrstate(dd, val); 1366 (QIBL_IB_AUTONEG_INPROG | QIBL_LINKINIT | QIBL_LINKARMED |
1407 1367 QIBL_LINKACTIVE))) {
1408 if (ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT 1368 if (qib_relock_by_timer) {
1409 && ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) { 1369 if (!(ppd->lflags & QIBL_IB_LINK_DISABLED))
1410 int timeoff; 1370 toggle_7220_rclkrls(dd);
1411 /* Not up yet. Try again, if allowed by module-param */
1412 if (ipath_relock_by_timer) {
1413 if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)
1414 ipath_cdbg(VERBOSE, "Skip RELOCK in AUTONEG\n");
1415 else if (!(dd->ipath_flags & IPATH_IB_LINK_DISABLED)) {
1416 ipath_cdbg(VERBOSE, "RELOCK\n");
1417 ipath_toggle_rclkrls(dd);
1418 }
1419 } 1371 }
1420 /* re-set timer for next check */ 1372 /* re-set timer for next check */
1421 timeoff = irp->ipath_relock_interval << 1; 1373 timeoff = cs->relock_interval << 1;
1422 if (timeoff > HZ) 1374 if (timeoff > HZ)
1423 timeoff = HZ; 1375 timeoff = HZ;
1424 irp->ipath_relock_interval = timeoff; 1376 cs->relock_interval = timeoff;
1425 1377 } else
1426 mod_timer(&irp->ipath_relock_timer, jiffies + timeoff); 1378 timeoff = HZ;
1427 } else { 1379 mod_timer(&cs->relock_timer, jiffies + timeoff);
1428 /* Up, so no more need to check so often */
1429 mod_timer(&irp->ipath_relock_timer, jiffies + HZ);
1430 }
1431} 1380}
1432 1381
1433void ipath_set_relock_poll(struct ipath_devdata *dd, int ibup) 1382void set_7220_relock_poll(struct qib_devdata *dd, int ibup)
1434{ 1383{
1435 struct ipath_relock *irp = &dd->ipath_relock_singleton; 1384 struct qib_chip_specific *cs = dd->cspec;
1436 1385
1437 if (ibup > 0) { 1386 if (ibup) {
1438 /* we are now up, so relax timer to 1 second interval */ 1387 /* We are now up, relax timer to 1 second interval */
1439 if (atomic_read(&irp->ipath_relock_timer_active)) 1388 if (cs->relock_timer_active) {
1440 mod_timer(&irp->ipath_relock_timer, jiffies + HZ); 1389 cs->relock_interval = HZ;
1390 mod_timer(&cs->relock_timer, jiffies + HZ);
1391 }
1441 } else { 1392 } else {
1442 /* Transition to down, (re-)set timer to short interval. */ 1393 /* Transition to down, (re-)set timer to short interval. */
1443 int timeout; 1394 unsigned int timeout;
1444 timeout = (HZ * ((ibup == -1) ? 1000 : RELOCK_FIRST_MS))/1000; 1395
1396 timeout = msecs_to_jiffies(RELOCK_FIRST_MS);
1445 if (timeout == 0) 1397 if (timeout == 0)
1446 timeout = 1; 1398 timeout = 1;
1447 /* If timer has not yet been started, do so. */ 1399 /* If timer has not yet been started, do so. */
1448 if (atomic_inc_return(&irp->ipath_relock_timer_active) == 1) { 1400 if (!cs->relock_timer_active) {
1449 init_timer(&irp->ipath_relock_timer); 1401 cs->relock_timer_active = 1;
1450 irp->ipath_relock_timer.function = ipath_run_relock; 1402 init_timer(&cs->relock_timer);
1451 irp->ipath_relock_timer.data = (unsigned long) dd; 1403 cs->relock_timer.function = qib_run_relock;
1452 irp->ipath_relock_interval = timeout; 1404 cs->relock_timer.data = (unsigned long) dd;
1453 irp->ipath_relock_timer.expires = jiffies + timeout; 1405 cs->relock_interval = timeout;
1454 add_timer(&irp->ipath_relock_timer); 1406 cs->relock_timer.expires = jiffies + timeout;
1407 add_timer(&cs->relock_timer);
1455 } else { 1408 } else {
1456 irp->ipath_relock_interval = timeout; 1409 cs->relock_interval = timeout;
1457 mod_timer(&irp->ipath_relock_timer, jiffies + timeout); 1410 mod_timer(&cs->relock_timer, jiffies + timeout);
1458 atomic_dec(&irp->ipath_relock_timer_active);
1459 } 1411 }
1460 } 1412 }
1461} 1413}
1462
diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220_img.c b/drivers/infiniband/hw/qib/qib_sd7220_img.c
index 5ef59da9270a..a1118fbd2370 100644
--- a/drivers/infiniband/hw/ipath/ipath_sd7220_img.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220_img.c
@@ -38,11 +38,10 @@
38#include <linux/pci.h> 38#include <linux/pci.h>
39#include <linux/delay.h> 39#include <linux/delay.h>
40 40
41#include "ipath_kernel.h" 41#include "qib.h"
42#include "ipath_registers.h" 42#include "qib_7220.h"
43#include "ipath_7220.h"
44 43
45static unsigned char ipath_sd7220_ib_img[] = { 44static unsigned char qib_sd7220_ib_img[] = {
46/*0000*/0x02, 0x0A, 0x29, 0x02, 0x0A, 0x87, 0xE5, 0xE6, 45/*0000*/0x02, 0x0A, 0x29, 0x02, 0x0A, 0x87, 0xE5, 0xE6,
47 0x30, 0xE6, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F, 46 0x30, 0xE6, 0x04, 0x7F, 0x01, 0x80, 0x02, 0x7F,
48/*0010*/0x00, 0xE5, 0xE2, 0x30, 0xE4, 0x04, 0x7E, 0x01, 47/*0010*/0x00, 0xE5, 0xE2, 0x30, 0xE4, 0x04, 0x7E, 0x01,
@@ -1069,14 +1068,14 @@ static unsigned char ipath_sd7220_ib_img[] = {
1069 0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x81 1068 0x01, 0x20, 0x11, 0x00, 0x04, 0x20, 0x00, 0x81
1070}; 1069};
1071 1070
1072int ipath_sd7220_ib_load(struct ipath_devdata *dd) 1071int qib_sd7220_ib_load(struct qib_devdata *dd)
1073{ 1072{
1074 return ipath_sd7220_prog_ld(dd, IB_7220_SERDES, ipath_sd7220_ib_img, 1073 return qib_sd7220_prog_ld(dd, IB_7220_SERDES, qib_sd7220_ib_img,
1075 sizeof(ipath_sd7220_ib_img), 0); 1074 sizeof(qib_sd7220_ib_img), 0);
1076} 1075}
1077 1076
1078int ipath_sd7220_ib_vfy(struct ipath_devdata *dd) 1077int qib_sd7220_ib_vfy(struct qib_devdata *dd)
1079{ 1078{
1080 return ipath_sd7220_prog_vfy(dd, IB_7220_SERDES, ipath_sd7220_ib_img, 1079 return qib_sd7220_prog_vfy(dd, IB_7220_SERDES, qib_sd7220_ib_img,
1081 sizeof(ipath_sd7220_ib_img), 0); 1080 sizeof(qib_sd7220_ib_img), 0);
1082} 1081}
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
new file mode 100644
index 000000000000..b8456881f7f6
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -0,0 +1,973 @@
1/*
2 * Copyright (c) 2007, 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/spinlock.h>
34#include <linux/netdevice.h>
35
36#include "qib.h"
37#include "qib_common.h"
38
39/* default pio off, sdma on */
40static ushort sdma_descq_cnt = 256;
41module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
42MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
43
44/*
45 * Bits defined in the send DMA descriptor.
46 */
47#define SDMA_DESC_LAST (1ULL << 11)
48#define SDMA_DESC_FIRST (1ULL << 12)
49#define SDMA_DESC_DMA_HEAD (1ULL << 13)
50#define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
51#define SDMA_DESC_INTR (1ULL << 15)
52#define SDMA_DESC_COUNT_LSB 16
53#define SDMA_DESC_GEN_LSB 30
54
55char *qib_sdma_state_names[] = {
56 [qib_sdma_state_s00_hw_down] = "s00_HwDown",
57 [qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
58 [qib_sdma_state_s20_idle] = "s20_Idle",
59 [qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
60 [qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
61 [qib_sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
62 [qib_sdma_state_s99_running] = "s99_Running",
63};
64
65char *qib_sdma_event_names[] = {
66 [qib_sdma_event_e00_go_hw_down] = "e00_GoHwDown",
67 [qib_sdma_event_e10_go_hw_start] = "e10_GoHwStart",
68 [qib_sdma_event_e20_hw_started] = "e20_HwStarted",
69 [qib_sdma_event_e30_go_running] = "e30_GoRunning",
70 [qib_sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
71 [qib_sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
72 [qib_sdma_event_e60_hw_halted] = "e60_HwHalted",
73 [qib_sdma_event_e70_go_idle] = "e70_GoIdle",
74 [qib_sdma_event_e7220_err_halted] = "e7220_ErrHalted",
75 [qib_sdma_event_e7322_err_halted] = "e7322_ErrHalted",
76 [qib_sdma_event_e90_timer_tick] = "e90_TimerTick",
77};
78
79/* declare all statics here rather than keep sorting */
80static int alloc_sdma(struct qib_pportdata *);
81static void sdma_complete(struct kref *);
82static void sdma_finalput(struct qib_sdma_state *);
83static void sdma_get(struct qib_sdma_state *);
84static void sdma_put(struct qib_sdma_state *);
85static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
86static void sdma_start_sw_clean_up(struct qib_pportdata *);
87static void sdma_sw_clean_up_task(unsigned long);
88static void unmap_desc(struct qib_pportdata *, unsigned);
89
90static void sdma_get(struct qib_sdma_state *ss)
91{
92 kref_get(&ss->kref);
93}
94
95static void sdma_complete(struct kref *kref)
96{
97 struct qib_sdma_state *ss =
98 container_of(kref, struct qib_sdma_state, kref);
99
100 complete(&ss->comp);
101}
102
103static void sdma_put(struct qib_sdma_state *ss)
104{
105 kref_put(&ss->kref, sdma_complete);
106}
107
108static void sdma_finalput(struct qib_sdma_state *ss)
109{
110 sdma_put(ss);
111 wait_for_completion(&ss->comp);
112}
113
114/*
115 * Complete all the sdma requests on the active list, in the correct
116 * order, and with appropriate processing. Called when cleaning up
117 * after sdma shutdown, and when new sdma requests are submitted for
118 * a link that is down. This matches what is done for requests
119 * that complete normally, it's just the full list.
120 *
121 * Must be called with sdma_lock held
122 */
123static void clear_sdma_activelist(struct qib_pportdata *ppd)
124{
125 struct qib_sdma_txreq *txp, *txp_next;
126
127 list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
128 list_del_init(&txp->list);
129 if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
130 unsigned idx;
131
132 idx = txp->start_idx;
133 while (idx != txp->next_descq_idx) {
134 unmap_desc(ppd, idx);
135 if (++idx == ppd->sdma_descq_cnt)
136 idx = 0;
137 }
138 }
139 if (txp->callback)
140 (*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
141 }
142}
143
144static void sdma_sw_clean_up_task(unsigned long opaque)
145{
146 struct qib_pportdata *ppd = (struct qib_pportdata *) opaque;
147 unsigned long flags;
148
149 spin_lock_irqsave(&ppd->sdma_lock, flags);
150
151 /*
152 * At this point, the following should always be true:
153 * - We are halted, so no more descriptors are getting retired.
154 * - We are not running, so no one is submitting new work.
155 * - Only we can send the e40_sw_cleaned, so we can't start
156 * running again until we say so. So, the active list and
157 * descq are ours to play with.
158 */
159
160 /* Process all retired requests. */
161 qib_sdma_make_progress(ppd);
162
163 clear_sdma_activelist(ppd);
164
165 /*
166 * Resync count of added and removed. It is VERY important that
167 * sdma_descq_removed NEVER decrement - user_sdma depends on it.
168 */
169 ppd->sdma_descq_removed = ppd->sdma_descq_added;
170
171 /*
172 * Reset our notion of head and tail.
173 * Note that the HW registers will be reset when switching states
174 * due to calling __qib_sdma_process_event() below.
175 */
176 ppd->sdma_descq_tail = 0;
177 ppd->sdma_descq_head = 0;
178 ppd->sdma_head_dma[0] = 0;
179 ppd->sdma_generation = 0;
180
181 __qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
182
183 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
184}
185
186/*
187 * This is called when changing to state qib_sdma_state_s10_hw_start_up_wait
188 * as a result of send buffer errors or send DMA descriptor errors.
189 * We want to disarm the buffers in these cases.
190 */
191static void sdma_hw_start_up(struct qib_pportdata *ppd)
192{
193 struct qib_sdma_state *ss = &ppd->sdma_state;
194 unsigned bufno;
195
196 for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
197 ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
198
199 ppd->dd->f_sdma_hw_start_up(ppd);
200}
201
202static void sdma_sw_tear_down(struct qib_pportdata *ppd)
203{
204 struct qib_sdma_state *ss = &ppd->sdma_state;
205
206 /* Releasing this reference means the state machine has stopped. */
207 sdma_put(ss);
208}
209
210static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
211{
212 tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
213}
214
215static void sdma_set_state(struct qib_pportdata *ppd,
216 enum qib_sdma_states next_state)
217{
218 struct qib_sdma_state *ss = &ppd->sdma_state;
219 struct sdma_set_state_action *action = ss->set_state_action;
220 unsigned op = 0;
221
222 /* debugging bookkeeping */
223 ss->previous_state = ss->current_state;
224 ss->previous_op = ss->current_op;
225
226 ss->current_state = next_state;
227
228 if (action[next_state].op_enable)
229 op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
230
231 if (action[next_state].op_intenable)
232 op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
233
234 if (action[next_state].op_halt)
235 op |= QIB_SDMA_SENDCTRL_OP_HALT;
236
237 if (action[next_state].op_drain)
238 op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
239
240 if (action[next_state].go_s99_running_tofalse)
241 ss->go_s99_running = 0;
242
243 if (action[next_state].go_s99_running_totrue)
244 ss->go_s99_running = 1;
245
246 ss->current_op = op;
247
248 ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
249}
250
251static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
252{
253 __le64 *descqp = &ppd->sdma_descq[head].qw[0];
254 u64 desc[2];
255 dma_addr_t addr;
256 size_t len;
257
258 desc[0] = le64_to_cpu(descqp[0]);
259 desc[1] = le64_to_cpu(descqp[1]);
260
261 addr = (desc[1] << 32) | (desc[0] >> 32);
262 len = (desc[0] >> 14) & (0x7ffULL << 2);
263 dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
264}
265
266static int alloc_sdma(struct qib_pportdata *ppd)
267{
268 ppd->sdma_descq_cnt = sdma_descq_cnt;
269 if (!ppd->sdma_descq_cnt)
270 ppd->sdma_descq_cnt = 256;
271
272 /* Allocate memory for SendDMA descriptor FIFO */
273 ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
274 ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
275 GFP_KERNEL);
276
277 if (!ppd->sdma_descq) {
278 qib_dev_err(ppd->dd, "failed to allocate SendDMA descriptor "
279 "FIFO memory\n");
280 goto bail;
281 }
282
283 /* Allocate memory for DMA of head register to memory */
284 ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
285 PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
286 if (!ppd->sdma_head_dma) {
287 qib_dev_err(ppd->dd, "failed to allocate SendDMA "
288 "head memory\n");
289 goto cleanup_descq;
290 }
291 ppd->sdma_head_dma[0] = 0;
292 return 0;
293
294cleanup_descq:
295 dma_free_coherent(&ppd->dd->pcidev->dev,
296 ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
297 ppd->sdma_descq_phys);
298 ppd->sdma_descq = NULL;
299 ppd->sdma_descq_phys = 0;
300bail:
301 ppd->sdma_descq_cnt = 0;
302 return -ENOMEM;
303}
304
305static void free_sdma(struct qib_pportdata *ppd)
306{
307 struct qib_devdata *dd = ppd->dd;
308
309 if (ppd->sdma_head_dma) {
310 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
311 (void *)ppd->sdma_head_dma,
312 ppd->sdma_head_phys);
313 ppd->sdma_head_dma = NULL;
314 ppd->sdma_head_phys = 0;
315 }
316
317 if (ppd->sdma_descq) {
318 dma_free_coherent(&dd->pcidev->dev,
319 ppd->sdma_descq_cnt * sizeof(u64[2]),
320 ppd->sdma_descq, ppd->sdma_descq_phys);
321 ppd->sdma_descq = NULL;
322 ppd->sdma_descq_phys = 0;
323 }
324}
325
326static inline void make_sdma_desc(struct qib_pportdata *ppd,
327 u64 *sdmadesc, u64 addr, u64 dwlen,
328 u64 dwoffset)
329{
330
331 WARN_ON(addr & 3);
332 /* SDmaPhyAddr[47:32] */
333 sdmadesc[1] = addr >> 32;
334 /* SDmaPhyAddr[31:0] */
335 sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
336 /* SDmaGeneration[1:0] */
337 sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
338 SDMA_DESC_GEN_LSB;
339 /* SDmaDwordCount[10:0] */
340 sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
341 /* SDmaBufOffset[12:2] */
342 sdmadesc[0] |= dwoffset & 0x7ffULL;
343}
344
345/* sdma_lock must be held */
346int qib_sdma_make_progress(struct qib_pportdata *ppd)
347{
348 struct list_head *lp = NULL;
349 struct qib_sdma_txreq *txp = NULL;
350 struct qib_devdata *dd = ppd->dd;
351 int progress = 0;
352 u16 hwhead;
353 u16 idx = 0;
354
355 hwhead = dd->f_sdma_gethead(ppd);
356
357 /* The reason for some of the complexity of this code is that
358 * not all descriptors have corresponding txps. So, we have to
359 * be able to skip over descs until we wander into the range of
360 * the next txp on the list.
361 */
362
363 if (!list_empty(&ppd->sdma_activelist)) {
364 lp = ppd->sdma_activelist.next;
365 txp = list_entry(lp, struct qib_sdma_txreq, list);
366 idx = txp->start_idx;
367 }
368
369 while (ppd->sdma_descq_head != hwhead) {
370 /* if desc is part of this txp, unmap if needed */
371 if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
372 (idx == ppd->sdma_descq_head)) {
373 unmap_desc(ppd, ppd->sdma_descq_head);
374 if (++idx == ppd->sdma_descq_cnt)
375 idx = 0;
376 }
377
378 /* increment dequed desc count */
379 ppd->sdma_descq_removed++;
380
381 /* advance head, wrap if needed */
382 if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
383 ppd->sdma_descq_head = 0;
384
385 /* if now past this txp's descs, do the callback */
386 if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
387 /* remove from active list */
388 list_del_init(&txp->list);
389 if (txp->callback)
390 (*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
391 /* see if there is another txp */
392 if (list_empty(&ppd->sdma_activelist))
393 txp = NULL;
394 else {
395 lp = ppd->sdma_activelist.next;
396 txp = list_entry(lp, struct qib_sdma_txreq,
397 list);
398 idx = txp->start_idx;
399 }
400 }
401 progress = 1;
402 }
403 if (progress)
404 qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
405 return progress;
406}
407
408/*
409 * This is called from interrupt context.
410 */
411void qib_sdma_intr(struct qib_pportdata *ppd)
412{
413 unsigned long flags;
414
415 spin_lock_irqsave(&ppd->sdma_lock, flags);
416
417 __qib_sdma_intr(ppd);
418
419 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
420}
421
422void __qib_sdma_intr(struct qib_pportdata *ppd)
423{
424 if (__qib_sdma_running(ppd))
425 qib_sdma_make_progress(ppd);
426}
427
428int qib_setup_sdma(struct qib_pportdata *ppd)
429{
430 struct qib_devdata *dd = ppd->dd;
431 unsigned long flags;
432 int ret = 0;
433
434 ret = alloc_sdma(ppd);
435 if (ret)
436 goto bail;
437
438 /* set consistent sdma state */
439 ppd->dd->f_sdma_init_early(ppd);
440 spin_lock_irqsave(&ppd->sdma_lock, flags);
441 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
442 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
443
444 /* set up reference counting */
445 kref_init(&ppd->sdma_state.kref);
446 init_completion(&ppd->sdma_state.comp);
447
448 ppd->sdma_generation = 0;
449 ppd->sdma_descq_head = 0;
450 ppd->sdma_descq_removed = 0;
451 ppd->sdma_descq_added = 0;
452
453 INIT_LIST_HEAD(&ppd->sdma_activelist);
454
455 tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
456 (unsigned long)ppd);
457
458 ret = dd->f_init_sdma_regs(ppd);
459 if (ret)
460 goto bail_alloc;
461
462 qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
463
464 return 0;
465
466bail_alloc:
467 qib_teardown_sdma(ppd);
468bail:
469 return ret;
470}
471
472void qib_teardown_sdma(struct qib_pportdata *ppd)
473{
474 qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
475
476 /*
477 * This waits for the state machine to exit so it is not
478 * necessary to kill the sdma_sw_clean_up_task to make sure
479 * it is not running.
480 */
481 sdma_finalput(&ppd->sdma_state);
482
483 free_sdma(ppd);
484}
485
486int qib_sdma_running(struct qib_pportdata *ppd)
487{
488 unsigned long flags;
489 int ret;
490
491 spin_lock_irqsave(&ppd->sdma_lock, flags);
492 ret = __qib_sdma_running(ppd);
493 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
494
495 return ret;
496}
497
498/*
499 * Complete a request when sdma not running; likely only request
500 * but to simplify the code, always queue it, then process the full
501 * activelist. We process the entire list to ensure that this particular
502 * request does get it's callback, but in the correct order.
503 * Must be called with sdma_lock held
504 */
505static void complete_sdma_err_req(struct qib_pportdata *ppd,
506 struct qib_verbs_txreq *tx)
507{
508 atomic_inc(&tx->qp->s_dma_busy);
509 /* no sdma descriptors, so no unmap_desc */
510 tx->txreq.start_idx = 0;
511 tx->txreq.next_descq_idx = 0;
512 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
513 clear_sdma_activelist(ppd);
514}
515
516/*
517 * This function queues one IB packet onto the send DMA queue per call.
518 * The caller is responsible for checking:
519 * 1) The number of send DMA descriptor entries is less than the size of
520 * the descriptor queue.
521 * 2) The IB SGE addresses and lengths are 32-bit aligned
522 * (except possibly the last SGE's length)
523 * 3) The SGE addresses are suitable for passing to dma_map_single().
524 */
525int qib_sdma_verbs_send(struct qib_pportdata *ppd,
526 struct qib_sge_state *ss, u32 dwords,
527 struct qib_verbs_txreq *tx)
528{
529 unsigned long flags;
530 struct qib_sge *sge;
531 struct qib_qp *qp;
532 int ret = 0;
533 u16 tail;
534 __le64 *descqp;
535 u64 sdmadesc[2];
536 u32 dwoffset;
537 dma_addr_t addr;
538
539 spin_lock_irqsave(&ppd->sdma_lock, flags);
540
541retry:
542 if (unlikely(!__qib_sdma_running(ppd))) {
543 complete_sdma_err_req(ppd, tx);
544 goto unlock;
545 }
546
547 if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
548 if (qib_sdma_make_progress(ppd))
549 goto retry;
550 if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
551 ppd->dd->f_sdma_set_desc_cnt(ppd,
552 ppd->sdma_descq_cnt / 2);
553 goto busy;
554 }
555
556 dwoffset = tx->hdr_dwords;
557 make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
558
559 sdmadesc[0] |= SDMA_DESC_FIRST;
560 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
561 sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
562
563 /* write to the descq */
564 tail = ppd->sdma_descq_tail;
565 descqp = &ppd->sdma_descq[tail].qw[0];
566 *descqp++ = cpu_to_le64(sdmadesc[0]);
567 *descqp++ = cpu_to_le64(sdmadesc[1]);
568
569 /* increment the tail */
570 if (++tail == ppd->sdma_descq_cnt) {
571 tail = 0;
572 descqp = &ppd->sdma_descq[0].qw[0];
573 ++ppd->sdma_generation;
574 }
575
576 tx->txreq.start_idx = tail;
577
578 sge = &ss->sge;
579 while (dwords) {
580 u32 dw;
581 u32 len;
582
583 len = dwords << 2;
584 if (len > sge->length)
585 len = sge->length;
586 if (len > sge->sge_length)
587 len = sge->sge_length;
588 BUG_ON(len == 0);
589 dw = (len + 3) >> 2;
590 addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
591 dw << 2, DMA_TO_DEVICE);
592 if (dma_mapping_error(&ppd->dd->pcidev->dev, addr))
593 goto unmap;
594 sdmadesc[0] = 0;
595 make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
596 /* SDmaUseLargeBuf has to be set in every descriptor */
597 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
598 sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
599 /* write to the descq */
600 *descqp++ = cpu_to_le64(sdmadesc[0]);
601 *descqp++ = cpu_to_le64(sdmadesc[1]);
602
603 /* increment the tail */
604 if (++tail == ppd->sdma_descq_cnt) {
605 tail = 0;
606 descqp = &ppd->sdma_descq[0].qw[0];
607 ++ppd->sdma_generation;
608 }
609 sge->vaddr += len;
610 sge->length -= len;
611 sge->sge_length -= len;
612 if (sge->sge_length == 0) {
613 if (--ss->num_sge)
614 *sge = *ss->sg_list++;
615 } else if (sge->length == 0 && sge->mr->lkey) {
616 if (++sge->n >= QIB_SEGSZ) {
617 if (++sge->m >= sge->mr->mapsz)
618 break;
619 sge->n = 0;
620 }
621 sge->vaddr =
622 sge->mr->map[sge->m]->segs[sge->n].vaddr;
623 sge->length =
624 sge->mr->map[sge->m]->segs[sge->n].length;
625 }
626
627 dwoffset += dw;
628 dwords -= dw;
629 }
630
631 if (!tail)
632 descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
633 descqp -= 2;
634 descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
635 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
636 descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
637 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
638 descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
639
640 atomic_inc(&tx->qp->s_dma_busy);
641 tx->txreq.next_descq_idx = tail;
642 ppd->dd->f_sdma_update_tail(ppd, tail);
643 ppd->sdma_descq_added += tx->txreq.sg_count;
644 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
645 goto unlock;
646
647unmap:
648 for (;;) {
649 if (!tail)
650 tail = ppd->sdma_descq_cnt - 1;
651 else
652 tail--;
653 if (tail == ppd->sdma_descq_tail)
654 break;
655 unmap_desc(ppd, tail);
656 }
657 qp = tx->qp;
658 qib_put_txreq(tx);
659 spin_lock(&qp->s_lock);
660 if (qp->ibqp.qp_type == IB_QPT_RC) {
661 /* XXX what about error sending RDMA read responses? */
662 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)
663 qib_error_qp(qp, IB_WC_GENERAL_ERR);
664 } else if (qp->s_wqe)
665 qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
666 spin_unlock(&qp->s_lock);
667 /* return zero to process the next send work request */
668 goto unlock;
669
670busy:
671 qp = tx->qp;
672 spin_lock(&qp->s_lock);
673 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
674 struct qib_ibdev *dev;
675
676 /*
677 * If we couldn't queue the DMA request, save the info
678 * and try again later rather than destroying the
679 * buffer and undoing the side effects of the copy.
680 */
681 tx->ss = ss;
682 tx->dwords = dwords;
683 qp->s_tx = tx;
684 dev = &ppd->dd->verbs_dev;
685 spin_lock(&dev->pending_lock);
686 if (list_empty(&qp->iowait)) {
687 struct qib_ibport *ibp;
688
689 ibp = &ppd->ibport_data;
690 ibp->n_dmawait++;
691 qp->s_flags |= QIB_S_WAIT_DMA_DESC;
692 list_add_tail(&qp->iowait, &dev->dmawait);
693 }
694 spin_unlock(&dev->pending_lock);
695 qp->s_flags &= ~QIB_S_BUSY;
696 spin_unlock(&qp->s_lock);
697 ret = -EBUSY;
698 } else {
699 spin_unlock(&qp->s_lock);
700 qib_put_txreq(tx);
701 }
702unlock:
703 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
704 return ret;
705}
706
707void qib_sdma_process_event(struct qib_pportdata *ppd,
708 enum qib_sdma_events event)
709{
710 unsigned long flags;
711
712 spin_lock_irqsave(&ppd->sdma_lock, flags);
713
714 __qib_sdma_process_event(ppd, event);
715
716 if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
717 qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
718
719 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
720}
721
722void __qib_sdma_process_event(struct qib_pportdata *ppd,
723 enum qib_sdma_events event)
724{
725 struct qib_sdma_state *ss = &ppd->sdma_state;
726
727 switch (ss->current_state) {
728 case qib_sdma_state_s00_hw_down:
729 switch (event) {
730 case qib_sdma_event_e00_go_hw_down:
731 break;
732 case qib_sdma_event_e30_go_running:
733 /*
734 * If down, but running requested (usually result
735 * of link up, then we need to start up.
736 * This can happen when hw down is requested while
737 * bringing the link up with traffic active on
738 * 7220, e.g. */
739 ss->go_s99_running = 1;
740 /* fall through and start dma engine */
741 case qib_sdma_event_e10_go_hw_start:
742 /* This reference means the state machine is started */
743 sdma_get(&ppd->sdma_state);
744 sdma_set_state(ppd,
745 qib_sdma_state_s10_hw_start_up_wait);
746 break;
747 case qib_sdma_event_e20_hw_started:
748 break;
749 case qib_sdma_event_e40_sw_cleaned:
750 sdma_sw_tear_down(ppd);
751 break;
752 case qib_sdma_event_e50_hw_cleaned:
753 break;
754 case qib_sdma_event_e60_hw_halted:
755 break;
756 case qib_sdma_event_e70_go_idle:
757 break;
758 case qib_sdma_event_e7220_err_halted:
759 break;
760 case qib_sdma_event_e7322_err_halted:
761 break;
762 case qib_sdma_event_e90_timer_tick:
763 break;
764 }
765 break;
766
767 case qib_sdma_state_s10_hw_start_up_wait:
768 switch (event) {
769 case qib_sdma_event_e00_go_hw_down:
770 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
771 sdma_sw_tear_down(ppd);
772 break;
773 case qib_sdma_event_e10_go_hw_start:
774 break;
775 case qib_sdma_event_e20_hw_started:
776 sdma_set_state(ppd, ss->go_s99_running ?
777 qib_sdma_state_s99_running :
778 qib_sdma_state_s20_idle);
779 break;
780 case qib_sdma_event_e30_go_running:
781 ss->go_s99_running = 1;
782 break;
783 case qib_sdma_event_e40_sw_cleaned:
784 break;
785 case qib_sdma_event_e50_hw_cleaned:
786 break;
787 case qib_sdma_event_e60_hw_halted:
788 break;
789 case qib_sdma_event_e70_go_idle:
790 ss->go_s99_running = 0;
791 break;
792 case qib_sdma_event_e7220_err_halted:
793 break;
794 case qib_sdma_event_e7322_err_halted:
795 break;
796 case qib_sdma_event_e90_timer_tick:
797 break;
798 }
799 break;
800
801 case qib_sdma_state_s20_idle:
802 switch (event) {
803 case qib_sdma_event_e00_go_hw_down:
804 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
805 sdma_sw_tear_down(ppd);
806 break;
807 case qib_sdma_event_e10_go_hw_start:
808 break;
809 case qib_sdma_event_e20_hw_started:
810 break;
811 case qib_sdma_event_e30_go_running:
812 sdma_set_state(ppd, qib_sdma_state_s99_running);
813 ss->go_s99_running = 1;
814 break;
815 case qib_sdma_event_e40_sw_cleaned:
816 break;
817 case qib_sdma_event_e50_hw_cleaned:
818 break;
819 case qib_sdma_event_e60_hw_halted:
820 break;
821 case qib_sdma_event_e70_go_idle:
822 break;
823 case qib_sdma_event_e7220_err_halted:
824 break;
825 case qib_sdma_event_e7322_err_halted:
826 break;
827 case qib_sdma_event_e90_timer_tick:
828 break;
829 }
830 break;
831
832 case qib_sdma_state_s30_sw_clean_up_wait:
833 switch (event) {
834 case qib_sdma_event_e00_go_hw_down:
835 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
836 break;
837 case qib_sdma_event_e10_go_hw_start:
838 break;
839 case qib_sdma_event_e20_hw_started:
840 break;
841 case qib_sdma_event_e30_go_running:
842 ss->go_s99_running = 1;
843 break;
844 case qib_sdma_event_e40_sw_cleaned:
845 sdma_set_state(ppd,
846 qib_sdma_state_s10_hw_start_up_wait);
847 sdma_hw_start_up(ppd);
848 break;
849 case qib_sdma_event_e50_hw_cleaned:
850 break;
851 case qib_sdma_event_e60_hw_halted:
852 break;
853 case qib_sdma_event_e70_go_idle:
854 ss->go_s99_running = 0;
855 break;
856 case qib_sdma_event_e7220_err_halted:
857 break;
858 case qib_sdma_event_e7322_err_halted:
859 break;
860 case qib_sdma_event_e90_timer_tick:
861 break;
862 }
863 break;
864
865 case qib_sdma_state_s40_hw_clean_up_wait:
866 switch (event) {
867 case qib_sdma_event_e00_go_hw_down:
868 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
869 sdma_start_sw_clean_up(ppd);
870 break;
871 case qib_sdma_event_e10_go_hw_start:
872 break;
873 case qib_sdma_event_e20_hw_started:
874 break;
875 case qib_sdma_event_e30_go_running:
876 ss->go_s99_running = 1;
877 break;
878 case qib_sdma_event_e40_sw_cleaned:
879 break;
880 case qib_sdma_event_e50_hw_cleaned:
881 sdma_set_state(ppd,
882 qib_sdma_state_s30_sw_clean_up_wait);
883 sdma_start_sw_clean_up(ppd);
884 break;
885 case qib_sdma_event_e60_hw_halted:
886 break;
887 case qib_sdma_event_e70_go_idle:
888 ss->go_s99_running = 0;
889 break;
890 case qib_sdma_event_e7220_err_halted:
891 break;
892 case qib_sdma_event_e7322_err_halted:
893 break;
894 case qib_sdma_event_e90_timer_tick:
895 break;
896 }
897 break;
898
899 case qib_sdma_state_s50_hw_halt_wait:
900 switch (event) {
901 case qib_sdma_event_e00_go_hw_down:
902 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
903 sdma_start_sw_clean_up(ppd);
904 break;
905 case qib_sdma_event_e10_go_hw_start:
906 break;
907 case qib_sdma_event_e20_hw_started:
908 break;
909 case qib_sdma_event_e30_go_running:
910 ss->go_s99_running = 1;
911 break;
912 case qib_sdma_event_e40_sw_cleaned:
913 break;
914 case qib_sdma_event_e50_hw_cleaned:
915 break;
916 case qib_sdma_event_e60_hw_halted:
917 sdma_set_state(ppd,
918 qib_sdma_state_s40_hw_clean_up_wait);
919 ppd->dd->f_sdma_hw_clean_up(ppd);
920 break;
921 case qib_sdma_event_e70_go_idle:
922 ss->go_s99_running = 0;
923 break;
924 case qib_sdma_event_e7220_err_halted:
925 break;
926 case qib_sdma_event_e7322_err_halted:
927 break;
928 case qib_sdma_event_e90_timer_tick:
929 break;
930 }
931 break;
932
933 case qib_sdma_state_s99_running:
934 switch (event) {
935 case qib_sdma_event_e00_go_hw_down:
936 sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
937 sdma_start_sw_clean_up(ppd);
938 break;
939 case qib_sdma_event_e10_go_hw_start:
940 break;
941 case qib_sdma_event_e20_hw_started:
942 break;
943 case qib_sdma_event_e30_go_running:
944 break;
945 case qib_sdma_event_e40_sw_cleaned:
946 break;
947 case qib_sdma_event_e50_hw_cleaned:
948 break;
949 case qib_sdma_event_e60_hw_halted:
950 sdma_set_state(ppd,
951 qib_sdma_state_s30_sw_clean_up_wait);
952 sdma_start_sw_clean_up(ppd);
953 break;
954 case qib_sdma_event_e70_go_idle:
955 sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
956 ss->go_s99_running = 0;
957 break;
958 case qib_sdma_event_e7220_err_halted:
959 sdma_set_state(ppd,
960 qib_sdma_state_s30_sw_clean_up_wait);
961 sdma_start_sw_clean_up(ppd);
962 break;
963 case qib_sdma_event_e7322_err_halted:
964 sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
965 break;
966 case qib_sdma_event_e90_timer_tick:
967 break;
968 }
969 break;
970 }
971
972 ss->last_event = event;
973}
diff --git a/drivers/infiniband/hw/qib/qib_srq.c b/drivers/infiniband/hw/qib/qib_srq.c
new file mode 100644
index 000000000000..c3ec8efc2ed8
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_srq.c
@@ -0,0 +1,375 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/err.h>
35#include <linux/slab.h>
36#include <linux/vmalloc.h>
37
38#include "qib_verbs.h"
39
40/**
41 * qib_post_srq_receive - post a receive on a shared receive queue
42 * @ibsrq: the SRQ to post the receive on
43 * @wr: the list of work requests to post
44 * @bad_wr: A pointer to the first WR to cause a problem is put here
45 *
46 * This may be called from interrupt context.
47 */
48int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
49 struct ib_recv_wr **bad_wr)
50{
51 struct qib_srq *srq = to_isrq(ibsrq);
52 struct qib_rwq *wq;
53 unsigned long flags;
54 int ret;
55
56 for (; wr; wr = wr->next) {
57 struct qib_rwqe *wqe;
58 u32 next;
59 int i;
60
61 if ((unsigned) wr->num_sge > srq->rq.max_sge) {
62 *bad_wr = wr;
63 ret = -EINVAL;
64 goto bail;
65 }
66
67 spin_lock_irqsave(&srq->rq.lock, flags);
68 wq = srq->rq.wq;
69 next = wq->head + 1;
70 if (next >= srq->rq.size)
71 next = 0;
72 if (next == wq->tail) {
73 spin_unlock_irqrestore(&srq->rq.lock, flags);
74 *bad_wr = wr;
75 ret = -ENOMEM;
76 goto bail;
77 }
78
79 wqe = get_rwqe_ptr(&srq->rq, wq->head);
80 wqe->wr_id = wr->wr_id;
81 wqe->num_sge = wr->num_sge;
82 for (i = 0; i < wr->num_sge; i++)
83 wqe->sg_list[i] = wr->sg_list[i];
84 /* Make sure queue entry is written before the head index. */
85 smp_wmb();
86 wq->head = next;
87 spin_unlock_irqrestore(&srq->rq.lock, flags);
88 }
89 ret = 0;
90
91bail:
92 return ret;
93}
94
95/**
96 * qib_create_srq - create a shared receive queue
97 * @ibpd: the protection domain of the SRQ to create
98 * @srq_init_attr: the attributes of the SRQ
99 * @udata: data from libibverbs when creating a user SRQ
100 */
101struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
102 struct ib_srq_init_attr *srq_init_attr,
103 struct ib_udata *udata)
104{
105 struct qib_ibdev *dev = to_idev(ibpd->device);
106 struct qib_srq *srq;
107 u32 sz;
108 struct ib_srq *ret;
109
110 if (srq_init_attr->attr.max_sge == 0 ||
111 srq_init_attr->attr.max_sge > ib_qib_max_srq_sges ||
112 srq_init_attr->attr.max_wr == 0 ||
113 srq_init_attr->attr.max_wr > ib_qib_max_srq_wrs) {
114 ret = ERR_PTR(-EINVAL);
115 goto done;
116 }
117
118 srq = kmalloc(sizeof(*srq), GFP_KERNEL);
119 if (!srq) {
120 ret = ERR_PTR(-ENOMEM);
121 goto done;
122 }
123
124 /*
125 * Need to use vmalloc() if we want to support large #s of entries.
126 */
127 srq->rq.size = srq_init_attr->attr.max_wr + 1;
128 srq->rq.max_sge = srq_init_attr->attr.max_sge;
129 sz = sizeof(struct ib_sge) * srq->rq.max_sge +
130 sizeof(struct qib_rwqe);
131 srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz);
132 if (!srq->rq.wq) {
133 ret = ERR_PTR(-ENOMEM);
134 goto bail_srq;
135 }
136
137 /*
138 * Return the address of the RWQ as the offset to mmap.
139 * See qib_mmap() for details.
140 */
141 if (udata && udata->outlen >= sizeof(__u64)) {
142 int err;
143 u32 s = sizeof(struct qib_rwq) + srq->rq.size * sz;
144
145 srq->ip =
146 qib_create_mmap_info(dev, s, ibpd->uobject->context,
147 srq->rq.wq);
148 if (!srq->ip) {
149 ret = ERR_PTR(-ENOMEM);
150 goto bail_wq;
151 }
152
153 err = ib_copy_to_udata(udata, &srq->ip->offset,
154 sizeof(srq->ip->offset));
155 if (err) {
156 ret = ERR_PTR(err);
157 goto bail_ip;
158 }
159 } else
160 srq->ip = NULL;
161
162 /*
163 * ib_create_srq() will initialize srq->ibsrq.
164 */
165 spin_lock_init(&srq->rq.lock);
166 srq->rq.wq->head = 0;
167 srq->rq.wq->tail = 0;
168 srq->limit = srq_init_attr->attr.srq_limit;
169
170 spin_lock(&dev->n_srqs_lock);
171 if (dev->n_srqs_allocated == ib_qib_max_srqs) {
172 spin_unlock(&dev->n_srqs_lock);
173 ret = ERR_PTR(-ENOMEM);
174 goto bail_ip;
175 }
176
177 dev->n_srqs_allocated++;
178 spin_unlock(&dev->n_srqs_lock);
179
180 if (srq->ip) {
181 spin_lock_irq(&dev->pending_lock);
182 list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
183 spin_unlock_irq(&dev->pending_lock);
184 }
185
186 ret = &srq->ibsrq;
187 goto done;
188
189bail_ip:
190 kfree(srq->ip);
191bail_wq:
192 vfree(srq->rq.wq);
193bail_srq:
194 kfree(srq);
195done:
196 return ret;
197}
198
199/**
200 * qib_modify_srq - modify a shared receive queue
201 * @ibsrq: the SRQ to modify
202 * @attr: the new attributes of the SRQ
203 * @attr_mask: indicates which attributes to modify
204 * @udata: user data for libibverbs.so
205 */
206int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
207 enum ib_srq_attr_mask attr_mask,
208 struct ib_udata *udata)
209{
210 struct qib_srq *srq = to_isrq(ibsrq);
211 struct qib_rwq *wq;
212 int ret = 0;
213
214 if (attr_mask & IB_SRQ_MAX_WR) {
215 struct qib_rwq *owq;
216 struct qib_rwqe *p;
217 u32 sz, size, n, head, tail;
218
219 /* Check that the requested sizes are below the limits. */
220 if ((attr->max_wr > ib_qib_max_srq_wrs) ||
221 ((attr_mask & IB_SRQ_LIMIT) ?
222 attr->srq_limit : srq->limit) > attr->max_wr) {
223 ret = -EINVAL;
224 goto bail;
225 }
226
227 sz = sizeof(struct qib_rwqe) +
228 srq->rq.max_sge * sizeof(struct ib_sge);
229 size = attr->max_wr + 1;
230 wq = vmalloc_user(sizeof(struct qib_rwq) + size * sz);
231 if (!wq) {
232 ret = -ENOMEM;
233 goto bail;
234 }
235
236 /* Check that we can write the offset to mmap. */
237 if (udata && udata->inlen >= sizeof(__u64)) {
238 __u64 offset_addr;
239 __u64 offset = 0;
240
241 ret = ib_copy_from_udata(&offset_addr, udata,
242 sizeof(offset_addr));
243 if (ret)
244 goto bail_free;
245 udata->outbuf =
246 (void __user *) (unsigned long) offset_addr;
247 ret = ib_copy_to_udata(udata, &offset,
248 sizeof(offset));
249 if (ret)
250 goto bail_free;
251 }
252
253 spin_lock_irq(&srq->rq.lock);
254 /*
255 * validate head and tail pointer values and compute
256 * the number of remaining WQEs.
257 */
258 owq = srq->rq.wq;
259 head = owq->head;
260 tail = owq->tail;
261 if (head >= srq->rq.size || tail >= srq->rq.size) {
262 ret = -EINVAL;
263 goto bail_unlock;
264 }
265 n = head;
266 if (n < tail)
267 n += srq->rq.size - tail;
268 else
269 n -= tail;
270 if (size <= n) {
271 ret = -EINVAL;
272 goto bail_unlock;
273 }
274 n = 0;
275 p = wq->wq;
276 while (tail != head) {
277 struct qib_rwqe *wqe;
278 int i;
279
280 wqe = get_rwqe_ptr(&srq->rq, tail);
281 p->wr_id = wqe->wr_id;
282 p->num_sge = wqe->num_sge;
283 for (i = 0; i < wqe->num_sge; i++)
284 p->sg_list[i] = wqe->sg_list[i];
285 n++;
286 p = (struct qib_rwqe *)((char *) p + sz);
287 if (++tail >= srq->rq.size)
288 tail = 0;
289 }
290 srq->rq.wq = wq;
291 srq->rq.size = size;
292 wq->head = n;
293 wq->tail = 0;
294 if (attr_mask & IB_SRQ_LIMIT)
295 srq->limit = attr->srq_limit;
296 spin_unlock_irq(&srq->rq.lock);
297
298 vfree(owq);
299
300 if (srq->ip) {
301 struct qib_mmap_info *ip = srq->ip;
302 struct qib_ibdev *dev = to_idev(srq->ibsrq.device);
303 u32 s = sizeof(struct qib_rwq) + size * sz;
304
305 qib_update_mmap_info(dev, ip, s, wq);
306
307 /*
308 * Return the offset to mmap.
309 * See qib_mmap() for details.
310 */
311 if (udata && udata->inlen >= sizeof(__u64)) {
312 ret = ib_copy_to_udata(udata, &ip->offset,
313 sizeof(ip->offset));
314 if (ret)
315 goto bail;
316 }
317
318 /*
319 * Put user mapping info onto the pending list
320 * unless it already is on the list.
321 */
322 spin_lock_irq(&dev->pending_lock);
323 if (list_empty(&ip->pending_mmaps))
324 list_add(&ip->pending_mmaps,
325 &dev->pending_mmaps);
326 spin_unlock_irq(&dev->pending_lock);
327 }
328 } else if (attr_mask & IB_SRQ_LIMIT) {
329 spin_lock_irq(&srq->rq.lock);
330 if (attr->srq_limit >= srq->rq.size)
331 ret = -EINVAL;
332 else
333 srq->limit = attr->srq_limit;
334 spin_unlock_irq(&srq->rq.lock);
335 }
336 goto bail;
337
338bail_unlock:
339 spin_unlock_irq(&srq->rq.lock);
340bail_free:
341 vfree(wq);
342bail:
343 return ret;
344}
345
346int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
347{
348 struct qib_srq *srq = to_isrq(ibsrq);
349
350 attr->max_wr = srq->rq.size - 1;
351 attr->max_sge = srq->rq.max_sge;
352 attr->srq_limit = srq->limit;
353 return 0;
354}
355
356/**
357 * qib_destroy_srq - destroy a shared receive queue
358 * @ibsrq: the SRQ to destroy
359 */
360int qib_destroy_srq(struct ib_srq *ibsrq)
361{
362 struct qib_srq *srq = to_isrq(ibsrq);
363 struct qib_ibdev *dev = to_idev(ibsrq->device);
364
365 spin_lock(&dev->n_srqs_lock);
366 dev->n_srqs_allocated--;
367 spin_unlock(&dev->n_srqs_lock);
368 if (srq->ip)
369 kref_put(&srq->ip->ref, qib_release_mmap_info);
370 else
371 vfree(srq->rq.wq);
372 kfree(srq);
373
374 return 0;
375}
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
new file mode 100644
index 000000000000..dab4d9f4a2cc
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -0,0 +1,691 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include <linux/ctype.h>
34
35#include "qib.h"
36
37/**
38 * qib_parse_ushort - parse an unsigned short value in an arbitrary base
39 * @str: the string containing the number
40 * @valp: where to put the result
41 *
42 * Returns the number of bytes consumed, or negative value on error.
43 */
44static int qib_parse_ushort(const char *str, unsigned short *valp)
45{
46 unsigned long val;
47 char *end;
48 int ret;
49
50 if (!isdigit(str[0])) {
51 ret = -EINVAL;
52 goto bail;
53 }
54
55 val = simple_strtoul(str, &end, 0);
56
57 if (val > 0xffff) {
58 ret = -EINVAL;
59 goto bail;
60 }
61
62 *valp = val;
63
64 ret = end + 1 - str;
65 if (ret == 0)
66 ret = -EINVAL;
67
68bail:
69 return ret;
70}
71
72/* start of per-port functions */
73/*
74 * Get/Set heartbeat enable. OR of 1=enabled, 2=auto
75 */
76static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf)
77{
78 struct qib_devdata *dd = ppd->dd;
79 int ret;
80
81 ret = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT);
82 ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
83 return ret;
84}
85
86static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
87 size_t count)
88{
89 struct qib_devdata *dd = ppd->dd;
90 int ret;
91 u16 val;
92
93 ret = qib_parse_ushort(buf, &val);
94
95 /*
96 * Set the "intentional" heartbeat enable per either of
97 * "Enable" and "Auto", as these are normally set together.
98 * This bit is consulted when leaving loopback mode,
99 * because entering loopback mode overrides it and automatically
100 * disables heartbeat.
101 */
102 if (ret >= 0)
103 ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
104 if (ret < 0)
105 qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
106 return ret < 0 ? ret : count;
107}
108
109static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf,
110 size_t count)
111{
112 struct qib_devdata *dd = ppd->dd;
113 int ret = count, r;
114
115 r = dd->f_set_ib_loopback(ppd, buf);
116 if (r < 0)
117 ret = r;
118
119 return ret;
120}
121
122static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
123 size_t count)
124{
125 struct qib_devdata *dd = ppd->dd;
126 int ret;
127 u16 val;
128
129 ret = qib_parse_ushort(buf, &val);
130 if (ret > 0)
131 qib_set_led_override(ppd, val);
132 else
133 qib_dev_err(dd, "attempt to set invalid LED override\n");
134 return ret < 0 ? ret : count;
135}
136
137static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
138{
139 ssize_t ret;
140
141 if (!ppd->statusp)
142 ret = -EINVAL;
143 else
144 ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
145 (unsigned long long) *(ppd->statusp));
146 return ret;
147}
148
149/*
150 * For userland compatibility, these offsets must remain fixed.
151 * They are strings for QIB_STATUS_*
152 */
153static const char *qib_status_str[] = {
154 "Initted",
155 "",
156 "",
157 "",
158 "",
159 "Present",
160 "IB_link_up",
161 "IB_configured",
162 "",
163 "Fatal_Hardware_Error",
164 NULL,
165};
166
167static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf)
168{
169 int i, any;
170 u64 s;
171 ssize_t ret;
172
173 if (!ppd->statusp) {
174 ret = -EINVAL;
175 goto bail;
176 }
177
178 s = *(ppd->statusp);
179 *buf = '\0';
180 for (any = i = 0; s && qib_status_str[i]; i++) {
181 if (s & 1) {
182 /* if overflow */
183 if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
184 break;
185 if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >=
186 PAGE_SIZE)
187 break;
188 any = 1;
189 }
190 s >>= 1;
191 }
192 if (any)
193 strlcat(buf, "\n", PAGE_SIZE);
194
195 ret = strlen(buf);
196
197bail:
198 return ret;
199}
200
201/* end of per-port functions */
202
203/*
204 * Start of per-port file structures and support code
205 * Because we are fitting into other infrastructure, we have to supply the
206 * full set of kobject/sysfs_ops structures and routines.
207 */
208#define QIB_PORT_ATTR(name, mode, show, store) \
209 static struct qib_port_attr qib_port_attr_##name = \
210 __ATTR(name, mode, show, store)
211
212struct qib_port_attr {
213 struct attribute attr;
214 ssize_t (*show)(struct qib_pportdata *, char *);
215 ssize_t (*store)(struct qib_pportdata *, const char *, size_t);
216};
217
218QIB_PORT_ATTR(loopback, S_IWUSR, NULL, store_loopback);
219QIB_PORT_ATTR(led_override, S_IWUSR, NULL, store_led_override);
220QIB_PORT_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
221 store_hrtbt_enb);
222QIB_PORT_ATTR(status, S_IRUGO, show_status, NULL);
223QIB_PORT_ATTR(status_str, S_IRUGO, show_status_str, NULL);
224
225static struct attribute *port_default_attributes[] = {
226 &qib_port_attr_loopback.attr,
227 &qib_port_attr_led_override.attr,
228 &qib_port_attr_hrtbt_enable.attr,
229 &qib_port_attr_status.attr,
230 &qib_port_attr_status_str.attr,
231 NULL
232};
233
234static ssize_t qib_portattr_show(struct kobject *kobj,
235 struct attribute *attr, char *buf)
236{
237 struct qib_port_attr *pattr =
238 container_of(attr, struct qib_port_attr, attr);
239 struct qib_pportdata *ppd =
240 container_of(kobj, struct qib_pportdata, pport_kobj);
241
242 return pattr->show(ppd, buf);
243}
244
245static ssize_t qib_portattr_store(struct kobject *kobj,
246 struct attribute *attr, const char *buf, size_t len)
247{
248 struct qib_port_attr *pattr =
249 container_of(attr, struct qib_port_attr, attr);
250 struct qib_pportdata *ppd =
251 container_of(kobj, struct qib_pportdata, pport_kobj);
252
253 return pattr->store(ppd, buf, len);
254}
255
256static void qib_port_release(struct kobject *kobj)
257{
258 /* nothing to do since memory is freed by qib_free_devdata() */
259}
260
261static const struct sysfs_ops qib_port_ops = {
262 .show = qib_portattr_show,
263 .store = qib_portattr_store,
264};
265
266static struct kobj_type qib_port_ktype = {
267 .release = qib_port_release,
268 .sysfs_ops = &qib_port_ops,
269 .default_attrs = port_default_attributes
270};
271
272/* Start sl2vl */
273
274#define QIB_SL2VL_ATTR(N) \
275 static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
276 .attr = { .name = __stringify(N), .mode = 0444 }, \
277 .sl = N \
278 }
279
280struct qib_sl2vl_attr {
281 struct attribute attr;
282 int sl;
283};
284
285QIB_SL2VL_ATTR(0);
286QIB_SL2VL_ATTR(1);
287QIB_SL2VL_ATTR(2);
288QIB_SL2VL_ATTR(3);
289QIB_SL2VL_ATTR(4);
290QIB_SL2VL_ATTR(5);
291QIB_SL2VL_ATTR(6);
292QIB_SL2VL_ATTR(7);
293QIB_SL2VL_ATTR(8);
294QIB_SL2VL_ATTR(9);
295QIB_SL2VL_ATTR(10);
296QIB_SL2VL_ATTR(11);
297QIB_SL2VL_ATTR(12);
298QIB_SL2VL_ATTR(13);
299QIB_SL2VL_ATTR(14);
300QIB_SL2VL_ATTR(15);
301
302static struct attribute *sl2vl_default_attributes[] = {
303 &qib_sl2vl_attr_0.attr,
304 &qib_sl2vl_attr_1.attr,
305 &qib_sl2vl_attr_2.attr,
306 &qib_sl2vl_attr_3.attr,
307 &qib_sl2vl_attr_4.attr,
308 &qib_sl2vl_attr_5.attr,
309 &qib_sl2vl_attr_6.attr,
310 &qib_sl2vl_attr_7.attr,
311 &qib_sl2vl_attr_8.attr,
312 &qib_sl2vl_attr_9.attr,
313 &qib_sl2vl_attr_10.attr,
314 &qib_sl2vl_attr_11.attr,
315 &qib_sl2vl_attr_12.attr,
316 &qib_sl2vl_attr_13.attr,
317 &qib_sl2vl_attr_14.attr,
318 &qib_sl2vl_attr_15.attr,
319 NULL
320};
321
322static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr,
323 char *buf)
324{
325 struct qib_sl2vl_attr *sattr =
326 container_of(attr, struct qib_sl2vl_attr, attr);
327 struct qib_pportdata *ppd =
328 container_of(kobj, struct qib_pportdata, sl2vl_kobj);
329 struct qib_ibport *qibp = &ppd->ibport_data;
330
331 return sprintf(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
332}
333
334static const struct sysfs_ops qib_sl2vl_ops = {
335 .show = sl2vl_attr_show,
336};
337
338static struct kobj_type qib_sl2vl_ktype = {
339 .release = qib_port_release,
340 .sysfs_ops = &qib_sl2vl_ops,
341 .default_attrs = sl2vl_default_attributes
342};
343
344/* End sl2vl */
345
346/* Start diag_counters */
347
348#define QIB_DIAGC_ATTR(N) \
349 static struct qib_diagc_attr qib_diagc_attr_##N = { \
350 .attr = { .name = __stringify(N), .mode = 0444 }, \
351 .counter = offsetof(struct qib_ibport, n_##N) \
352 }
353
354struct qib_diagc_attr {
355 struct attribute attr;
356 size_t counter;
357};
358
359QIB_DIAGC_ATTR(rc_resends);
360QIB_DIAGC_ATTR(rc_acks);
361QIB_DIAGC_ATTR(rc_qacks);
362QIB_DIAGC_ATTR(rc_delayed_comp);
363QIB_DIAGC_ATTR(seq_naks);
364QIB_DIAGC_ATTR(rdma_seq);
365QIB_DIAGC_ATTR(rnr_naks);
366QIB_DIAGC_ATTR(other_naks);
367QIB_DIAGC_ATTR(rc_timeouts);
368QIB_DIAGC_ATTR(loop_pkts);
369QIB_DIAGC_ATTR(pkt_drops);
370QIB_DIAGC_ATTR(dmawait);
371QIB_DIAGC_ATTR(unaligned);
372QIB_DIAGC_ATTR(rc_dupreq);
373QIB_DIAGC_ATTR(rc_seqnak);
374
375static struct attribute *diagc_default_attributes[] = {
376 &qib_diagc_attr_rc_resends.attr,
377 &qib_diagc_attr_rc_acks.attr,
378 &qib_diagc_attr_rc_qacks.attr,
379 &qib_diagc_attr_rc_delayed_comp.attr,
380 &qib_diagc_attr_seq_naks.attr,
381 &qib_diagc_attr_rdma_seq.attr,
382 &qib_diagc_attr_rnr_naks.attr,
383 &qib_diagc_attr_other_naks.attr,
384 &qib_diagc_attr_rc_timeouts.attr,
385 &qib_diagc_attr_loop_pkts.attr,
386 &qib_diagc_attr_pkt_drops.attr,
387 &qib_diagc_attr_dmawait.attr,
388 &qib_diagc_attr_unaligned.attr,
389 &qib_diagc_attr_rc_dupreq.attr,
390 &qib_diagc_attr_rc_seqnak.attr,
391 NULL
392};
393
394static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
395 char *buf)
396{
397 struct qib_diagc_attr *dattr =
398 container_of(attr, struct qib_diagc_attr, attr);
399 struct qib_pportdata *ppd =
400 container_of(kobj, struct qib_pportdata, diagc_kobj);
401 struct qib_ibport *qibp = &ppd->ibport_data;
402
403 return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter));
404}
405
406static const struct sysfs_ops qib_diagc_ops = {
407 .show = diagc_attr_show,
408};
409
410static struct kobj_type qib_diagc_ktype = {
411 .release = qib_port_release,
412 .sysfs_ops = &qib_diagc_ops,
413 .default_attrs = diagc_default_attributes
414};
415
416/* End diag_counters */
417
418/* end of per-port file structures and support code */
419
420/*
421 * Start of per-unit (or driver, in some cases, but replicated
422 * per unit) functions (these get a device *)
423 */
424static ssize_t show_rev(struct device *device, struct device_attribute *attr,
425 char *buf)
426{
427 struct qib_ibdev *dev =
428 container_of(device, struct qib_ibdev, ibdev.dev);
429
430 return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
431}
432
433static ssize_t show_hca(struct device *device, struct device_attribute *attr,
434 char *buf)
435{
436 struct qib_ibdev *dev =
437 container_of(device, struct qib_ibdev, ibdev.dev);
438 struct qib_devdata *dd = dd_from_dev(dev);
439 int ret;
440
441 if (!dd->boardname)
442 ret = -EINVAL;
443 else
444 ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
445 return ret;
446}
447
448static ssize_t show_version(struct device *device,
449 struct device_attribute *attr, char *buf)
450{
451 /* The string printed here is already newline-terminated. */
452 return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version);
453}
454
455static ssize_t show_boardversion(struct device *device,
456 struct device_attribute *attr, char *buf)
457{
458 struct qib_ibdev *dev =
459 container_of(device, struct qib_ibdev, ibdev.dev);
460 struct qib_devdata *dd = dd_from_dev(dev);
461
462 /* The string printed here is already newline-terminated. */
463 return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
464}
465
466
467static ssize_t show_localbus_info(struct device *device,
468 struct device_attribute *attr, char *buf)
469{
470 struct qib_ibdev *dev =
471 container_of(device, struct qib_ibdev, ibdev.dev);
472 struct qib_devdata *dd = dd_from_dev(dev);
473
474 /* The string printed here is already newline-terminated. */
475 return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info);
476}
477
478
479static ssize_t show_nctxts(struct device *device,
480 struct device_attribute *attr, char *buf)
481{
482 struct qib_ibdev *dev =
483 container_of(device, struct qib_ibdev, ibdev.dev);
484 struct qib_devdata *dd = dd_from_dev(dev);
485
486 /* Return the number of user ports (contexts) available. */
487 return scnprintf(buf, PAGE_SIZE, "%u\n", dd->cfgctxts -
488 dd->first_user_ctxt);
489}
490
491static ssize_t show_serial(struct device *device,
492 struct device_attribute *attr, char *buf)
493{
494 struct qib_ibdev *dev =
495 container_of(device, struct qib_ibdev, ibdev.dev);
496 struct qib_devdata *dd = dd_from_dev(dev);
497
498 buf[sizeof dd->serial] = '\0';
499 memcpy(buf, dd->serial, sizeof dd->serial);
500 strcat(buf, "\n");
501 return strlen(buf);
502}
503
504static ssize_t store_chip_reset(struct device *device,
505 struct device_attribute *attr, const char *buf,
506 size_t count)
507{
508 struct qib_ibdev *dev =
509 container_of(device, struct qib_ibdev, ibdev.dev);
510 struct qib_devdata *dd = dd_from_dev(dev);
511 int ret;
512
513 if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
514 ret = -EINVAL;
515 goto bail;
516 }
517
518 ret = qib_reset_device(dd->unit);
519bail:
520 return ret < 0 ? ret : count;
521}
522
523static ssize_t show_logged_errs(struct device *device,
524 struct device_attribute *attr, char *buf)
525{
526 struct qib_ibdev *dev =
527 container_of(device, struct qib_ibdev, ibdev.dev);
528 struct qib_devdata *dd = dd_from_dev(dev);
529 int idx, count;
530
531 /* force consistency with actual EEPROM */
532 if (qib_update_eeprom_log(dd) != 0)
533 return -ENXIO;
534
535 count = 0;
536 for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
537 count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
538 dd->eep_st_errs[idx],
539 idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
540 }
541
542 return count;
543}
544
545/*
546 * Dump tempsense regs. in decimal, to ease shell-scripts.
547 */
548static ssize_t show_tempsense(struct device *device,
549 struct device_attribute *attr, char *buf)
550{
551 struct qib_ibdev *dev =
552 container_of(device, struct qib_ibdev, ibdev.dev);
553 struct qib_devdata *dd = dd_from_dev(dev);
554 int ret;
555 int idx;
556 u8 regvals[8];
557
558 ret = -ENXIO;
559 for (idx = 0; idx < 8; ++idx) {
560 if (idx == 6)
561 continue;
562 ret = dd->f_tempsense_rd(dd, idx);
563 if (ret < 0)
564 break;
565 regvals[idx] = ret;
566 }
567 if (idx == 8)
568 ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
569 *(signed char *)(regvals),
570 *(signed char *)(regvals + 1),
571 regvals[2], regvals[3],
572 *(signed char *)(regvals + 5),
573 *(signed char *)(regvals + 7));
574 return ret;
575}
576
577/*
578 * end of per-unit (or driver, in some cases, but replicated
579 * per unit) functions
580 */
581
582/* start of per-unit file structures and support code */
583static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
584static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
585static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
586static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
587static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
588static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
589static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
590static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
591static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
592static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
593static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
594
595static struct device_attribute *qib_attributes[] = {
596 &dev_attr_hw_rev,
597 &dev_attr_hca_type,
598 &dev_attr_board_id,
599 &dev_attr_version,
600 &dev_attr_nctxts,
601 &dev_attr_serial,
602 &dev_attr_boardversion,
603 &dev_attr_logged_errors,
604 &dev_attr_tempsense,
605 &dev_attr_localbus_info,
606 &dev_attr_chip_reset,
607};
608
609int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
610 struct kobject *kobj)
611{
612 struct qib_pportdata *ppd;
613 struct qib_devdata *dd = dd_from_ibdev(ibdev);
614 int ret;
615
616 if (!port_num || port_num > dd->num_pports) {
617 qib_dev_err(dd, "Skipping infiniband class with "
618 "invalid port %u\n", port_num);
619 ret = -ENODEV;
620 goto bail;
621 }
622 ppd = &dd->pport[port_num - 1];
623
624 ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj,
625 "linkcontrol");
626 if (ret) {
627 qib_dev_err(dd, "Skipping linkcontrol sysfs info, "
628 "(err %d) port %u\n", ret, port_num);
629 goto bail;
630 }
631 kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
632
633 ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj,
634 "sl2vl");
635 if (ret) {
636 qib_dev_err(dd, "Skipping sl2vl sysfs info, "
637 "(err %d) port %u\n", ret, port_num);
638 goto bail_sl;
639 }
640 kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
641
642 ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj,
643 "diag_counters");
644 if (ret) {
645 qib_dev_err(dd, "Skipping diag_counters sysfs info, "
646 "(err %d) port %u\n", ret, port_num);
647 goto bail_diagc;
648 }
649 kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
650
651 return 0;
652
653bail_diagc:
654 kobject_put(&ppd->sl2vl_kobj);
655bail_sl:
656 kobject_put(&ppd->pport_kobj);
657bail:
658 return ret;
659}
660
661/*
662 * Register and create our files in /sys/class/infiniband.
663 */
664int qib_verbs_register_sysfs(struct qib_devdata *dd)
665{
666 struct ib_device *dev = &dd->verbs_dev.ibdev;
667 int i, ret;
668
669 for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) {
670 ret = device_create_file(&dev->dev, qib_attributes[i]);
671 if (ret)
672 return ret;
673 }
674
675 return 0;
676}
677
678/*
679 * Unregister and remove our files in /sys/class/infiniband.
680 */
681void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
682{
683 struct qib_pportdata *ppd;
684 int i;
685
686 for (i = 0; i < dd->num_pports; i++) {
687 ppd = &dd->pport[i];
688 kobject_put(&ppd->pport_kobj);
689 kobject_put(&ppd->sl2vl_kobj);
690 }
691}
diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c
new file mode 100644
index 000000000000..6f31ca5039db
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_twsi.c
@@ -0,0 +1,498 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/delay.h>
35#include <linux/pci.h>
36#include <linux/vmalloc.h>
37
38#include "qib.h"
39
40/*
41 * QLogic_IB "Two Wire Serial Interface" driver.
42 * Originally written for a not-quite-i2c serial eeprom, which is
43 * still used on some supported boards. Later boards have added a
44 * variety of other uses, most board-specific, so teh bit-boffing
45 * part has been split off to this file, while the other parts
46 * have been moved to chip-specific files.
47 *
48 * We have also dropped all pretense of fully generic (e.g. pretend
49 * we don't know whether '1' is the higher voltage) interface, as
50 * the restrictions of the generic i2c interface (e.g. no access from
51 * driver itself) make it unsuitable for this use.
52 */
53
54#define READ_CMD 1
55#define WRITE_CMD 0
56
57/**
58 * i2c_wait_for_writes - wait for a write
59 * @dd: the qlogic_ib device
60 *
61 * We use this instead of udelay directly, so we can make sure
62 * that previous register writes have been flushed all the way
63 * to the chip. Since we are delaying anyway, the cost doesn't
64 * hurt, and makes the bit twiddling more regular
65 */
66static void i2c_wait_for_writes(struct qib_devdata *dd)
67{
68 /*
69 * implicit read of EXTStatus is as good as explicit
70 * read of scratch, if all we want to do is flush
71 * writes.
72 */
73 dd->f_gpio_mod(dd, 0, 0, 0);
74 rmb(); /* inlined, so prevent compiler reordering */
75}
76
77/*
78 * QSFP modules are allowed to hold SCL low for 500uSec. Allow twice that
79 * for "almost compliant" modules
80 */
81#define SCL_WAIT_USEC 1000
82
83/* BUF_WAIT is time bus must be free between STOP or ACK and to next START.
84 * Should be 20, but some chips need more.
85 */
86#define TWSI_BUF_WAIT_USEC 60
87
88static void scl_out(struct qib_devdata *dd, u8 bit)
89{
90 u32 mask;
91
92 udelay(1);
93
94 mask = 1UL << dd->gpio_scl_num;
95
96 /* SCL is meant to be bare-drain, so never set "OUT", just DIR */
97 dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
98
99 /*
100 * Allow for slow slaves by simple
101 * delay for falling edge, sampling on rise.
102 */
103 if (!bit)
104 udelay(2);
105 else {
106 int rise_usec;
107 for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
108 if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
109 break;
110 udelay(2);
111 }
112 if (rise_usec <= 0)
113 qib_dev_err(dd, "SCL interface stuck low > %d uSec\n",
114 SCL_WAIT_USEC);
115 }
116 i2c_wait_for_writes(dd);
117}
118
119static void sda_out(struct qib_devdata *dd, u8 bit)
120{
121 u32 mask;
122
123 mask = 1UL << dd->gpio_sda_num;
124
125 /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
126 dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
127
128 i2c_wait_for_writes(dd);
129 udelay(2);
130}
131
132static u8 sda_in(struct qib_devdata *dd, int wait)
133{
134 int bnum;
135 u32 read_val, mask;
136
137 bnum = dd->gpio_sda_num;
138 mask = (1UL << bnum);
139 /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
140 dd->f_gpio_mod(dd, 0, 0, mask);
141 read_val = dd->f_gpio_mod(dd, 0, 0, 0);
142 if (wait)
143 i2c_wait_for_writes(dd);
144 return (read_val & mask) >> bnum;
145}
146
147/**
148 * i2c_ackrcv - see if ack following write is true
149 * @dd: the qlogic_ib device
150 */
151static int i2c_ackrcv(struct qib_devdata *dd)
152{
153 u8 ack_received;
154
155 /* AT ENTRY SCL = LOW */
156 /* change direction, ignore data */
157 ack_received = sda_in(dd, 1);
158 scl_out(dd, 1);
159 ack_received = sda_in(dd, 1) == 0;
160 scl_out(dd, 0);
161 return ack_received;
162}
163
164static void stop_cmd(struct qib_devdata *dd);
165
166/**
167 * rd_byte - read a byte, sending STOP on last, else ACK
168 * @dd: the qlogic_ib device
169 *
170 * Returns byte shifted out of device
171 */
172static int rd_byte(struct qib_devdata *dd, int last)
173{
174 int bit_cntr, data;
175
176 data = 0;
177
178 for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
179 data <<= 1;
180 scl_out(dd, 1);
181 data |= sda_in(dd, 0);
182 scl_out(dd, 0);
183 }
184 if (last) {
185 scl_out(dd, 1);
186 stop_cmd(dd);
187 } else {
188 sda_out(dd, 0);
189 scl_out(dd, 1);
190 scl_out(dd, 0);
191 sda_out(dd, 1);
192 }
193 return data;
194}
195
196/**
197 * wr_byte - write a byte, one bit at a time
198 * @dd: the qlogic_ib device
199 * @data: the byte to write
200 *
201 * Returns 0 if we got the following ack, otherwise 1
202 */
203static int wr_byte(struct qib_devdata *dd, u8 data)
204{
205 int bit_cntr;
206 u8 bit;
207
208 for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
209 bit = (data >> bit_cntr) & 1;
210 sda_out(dd, bit);
211 scl_out(dd, 1);
212 scl_out(dd, 0);
213 }
214 return (!i2c_ackrcv(dd)) ? 1 : 0;
215}
216
217/*
218 * issue TWSI start sequence:
219 * (both clock/data high, clock high, data low while clock is high)
220 */
221static void start_seq(struct qib_devdata *dd)
222{
223 sda_out(dd, 1);
224 scl_out(dd, 1);
225 sda_out(dd, 0);
226 udelay(1);
227 scl_out(dd, 0);
228}
229
230/**
231 * stop_seq - transmit the stop sequence
232 * @dd: the qlogic_ib device
233 *
234 * (both clock/data low, clock high, data high while clock is high)
235 */
236static void stop_seq(struct qib_devdata *dd)
237{
238 scl_out(dd, 0);
239 sda_out(dd, 0);
240 scl_out(dd, 1);
241 sda_out(dd, 1);
242}
243
244/**
245 * stop_cmd - transmit the stop condition
246 * @dd: the qlogic_ib device
247 *
248 * (both clock/data low, clock high, data high while clock is high)
249 */
250static void stop_cmd(struct qib_devdata *dd)
251{
252 stop_seq(dd);
253 udelay(TWSI_BUF_WAIT_USEC);
254}
255
256/**
257 * qib_twsi_reset - reset I2C communication
258 * @dd: the qlogic_ib device
259 */
260
261int qib_twsi_reset(struct qib_devdata *dd)
262{
263 int clock_cycles_left = 9;
264 int was_high = 0;
265 u32 pins, mask;
266
267 /* Both SCL and SDA should be high. If not, there
268 * is something wrong.
269 */
270 mask = (1UL << dd->gpio_scl_num) | (1UL << dd->gpio_sda_num);
271
272 /*
273 * Force pins to desired innocuous state.
274 * This is the default power-on state with out=0 and dir=0,
275 * So tri-stated and should be floating high (barring HW problems)
276 */
277 dd->f_gpio_mod(dd, 0, 0, mask);
278
279 /*
280 * Clock nine times to get all listeners into a sane state.
281 * If SDA does not go high at any point, we are wedged.
282 * One vendor recommends then issuing START followed by STOP.
283 * we cannot use our "normal" functions to do that, because
284 * if SCL drops between them, another vendor's part will
285 * wedge, dropping SDA and keeping it low forever, at the end of
286 * the next transaction (even if it was not the device addressed).
287 * So our START and STOP take place with SCL held high.
288 */
289 while (clock_cycles_left--) {
290 scl_out(dd, 0);
291 scl_out(dd, 1);
292 /* Note if SDA is high, but keep clocking to sync slave */
293 was_high |= sda_in(dd, 0);
294 }
295
296 if (was_high) {
297 /*
298 * We saw a high, which we hope means the slave is sync'd.
299 * Issue START, STOP, pause for T_BUF.
300 */
301
302 pins = dd->f_gpio_mod(dd, 0, 0, 0);
303 if ((pins & mask) != mask)
304 qib_dev_err(dd, "GPIO pins not at rest: %d\n",
305 pins & mask);
306 /* Drop SDA to issue START */
307 udelay(1); /* Guarantee .6 uSec setup */
308 sda_out(dd, 0);
309 udelay(1); /* Guarantee .6 uSec hold */
310 /* At this point, SCL is high, SDA low. Raise SDA for STOP */
311 sda_out(dd, 1);
312 udelay(TWSI_BUF_WAIT_USEC);
313 }
314
315 return !was_high;
316}
317
318#define QIB_TWSI_START 0x100
319#define QIB_TWSI_STOP 0x200
320
321/* Write byte to TWSI, optionally prefixed with START or suffixed with
322 * STOP.
323 * returns 0 if OK (ACK received), else != 0
324 */
325static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
326{
327 int ret = 1;
328 if (flags & QIB_TWSI_START)
329 start_seq(dd);
330
331 ret = wr_byte(dd, data); /* Leaves SCL low (from i2c_ackrcv()) */
332
333 if (flags & QIB_TWSI_STOP)
334 stop_cmd(dd);
335 return ret;
336}
337
338/* Added functionality for IBA7220-based cards */
339#define QIB_TEMP_DEV 0x98
340
341/*
342 * qib_twsi_blk_rd
343 * Formerly called qib_eeprom_internal_read, and only used for eeprom,
344 * but now the general interface for data transfer from twsi devices.
345 * One vestige of its former role is that it recognizes a device
346 * QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
347 * which responded to all TWSI device codes, interpreting them as
348 * address within device. On all other devices found on board handled by
349 * this driver, the device is followed by a one-byte "address" which selects
350 * the "register" or "offset" within the device from which data should
351 * be read.
352 */
353int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr,
354 void *buffer, int len)
355{
356 int ret;
357 u8 *bp = buffer;
358
359 ret = 1;
360
361 if (dev == QIB_TWSI_NO_DEV) {
362 /* legacy not-really-I2C */
363 addr = (addr << 1) | READ_CMD;
364 ret = qib_twsi_wr(dd, addr, QIB_TWSI_START);
365 } else {
366 /* Actual I2C */
367 ret = qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START);
368 if (ret) {
369 stop_cmd(dd);
370 ret = 1;
371 goto bail;
372 }
373 /*
374 * SFF spec claims we do _not_ stop after the addr
375 * but simply issue a start with the "read" dev-addr.
376 * Since we are implicitely waiting for ACK here,
377 * we need t_buf (nominally 20uSec) before that start,
378 * and cannot rely on the delay built in to the STOP
379 */
380 ret = qib_twsi_wr(dd, addr, 0);
381 udelay(TWSI_BUF_WAIT_USEC);
382
383 if (ret) {
384 qib_dev_err(dd,
385 "Failed to write interface read addr %02X\n",
386 addr);
387 ret = 1;
388 goto bail;
389 }
390 ret = qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START);
391 }
392 if (ret) {
393 stop_cmd(dd);
394 ret = 1;
395 goto bail;
396 }
397
398 /*
399 * block devices keeps clocking data out as long as we ack,
400 * automatically incrementing the address. Some have "pages"
401 * whose boundaries will not be crossed, but the handling
402 * of these is left to the caller, who is in a better
403 * position to know.
404 */
405 while (len-- > 0) {
406 /*
407 * Get and store data, sending ACK if length remaining,
408 * else STOP
409 */
410 *bp++ = rd_byte(dd, !len);
411 }
412
413 ret = 0;
414
415bail:
416 return ret;
417}
418
419/*
420 * qib_twsi_blk_wr
421 * Formerly called qib_eeprom_internal_write, and only used for eeprom,
422 * but now the general interface for data transfer to twsi devices.
423 * One vestige of its former role is that it recognizes a device
424 * QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
425 * which responded to all TWSI device codes, interpreting them as
426 * address within device. On all other devices found on board handled by
427 * this driver, the device is followed by a one-byte "address" which selects
428 * the "register" or "offset" within the device to which data should
429 * be written.
430 */
431int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
432 const void *buffer, int len)
433{
434 int sub_len;
435 const u8 *bp = buffer;
436 int max_wait_time, i;
437 int ret;
438 ret = 1;
439
440 while (len > 0) {
441 if (dev == QIB_TWSI_NO_DEV) {
442 if (qib_twsi_wr(dd, (addr << 1) | WRITE_CMD,
443 QIB_TWSI_START)) {
444 goto failed_write;
445 }
446 } else {
447 /* Real I2C */
448 if (qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START))
449 goto failed_write;
450 ret = qib_twsi_wr(dd, addr, 0);
451 if (ret) {
452 qib_dev_err(dd, "Failed to write interface"
453 " write addr %02X\n", addr);
454 goto failed_write;
455 }
456 }
457
458 sub_len = min(len, 4);
459 addr += sub_len;
460 len -= sub_len;
461
462 for (i = 0; i < sub_len; i++)
463 if (qib_twsi_wr(dd, *bp++, 0))
464 goto failed_write;
465
466 stop_cmd(dd);
467
468 /*
469 * Wait for write complete by waiting for a successful
470 * read (the chip replies with a zero after the write
471 * cmd completes, and before it writes to the eeprom.
472 * The startcmd for the read will fail the ack until
473 * the writes have completed. We do this inline to avoid
474 * the debug prints that are in the real read routine
475 * if the startcmd fails.
476 * We also use the proper device address, so it doesn't matter
477 * whether we have real eeprom_dev. Legacy likes any address.
478 */
479 max_wait_time = 100;
480 while (qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START)) {
481 stop_cmd(dd);
482 if (!--max_wait_time)
483 goto failed_write;
484 }
485 /* now read (and ignore) the resulting byte */
486 rd_byte(dd, 1);
487 }
488
489 ret = 0;
490 goto bail;
491
492failed_write:
493 stop_cmd(dd);
494 ret = 1;
495
496bail:
497 return ret;
498}
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
new file mode 100644
index 000000000000..f7eb1ddff5f3
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -0,0 +1,557 @@
1/*
2 * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/spinlock.h>
34#include <linux/pci.h>
35#include <linux/io.h>
36#include <linux/delay.h>
37#include <linux/netdevice.h>
38#include <linux/vmalloc.h>
39
40#include "qib.h"
41
42static unsigned qib_hol_timeout_ms = 3000;
43module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO);
44MODULE_PARM_DESC(hol_timeout_ms,
45 "duration of user app suspension after link failure");
46
47unsigned qib_sdma_fetch_arb = 1;
48module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO);
49MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
50
51/**
52 * qib_disarm_piobufs - cancel a range of PIO buffers
53 * @dd: the qlogic_ib device
54 * @first: the first PIO buffer to cancel
55 * @cnt: the number of PIO buffers to cancel
56 *
57 * Cancel a range of PIO buffers. Used at user process close,
58 * in case it died while writing to a PIO buffer.
59 */
60void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)
61{
62 unsigned long flags;
63 unsigned i;
64 unsigned last;
65
66 last = first + cnt;
67 spin_lock_irqsave(&dd->pioavail_lock, flags);
68 for (i = first; i < last; i++) {
69 __clear_bit(i, dd->pio_need_disarm);
70 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
71 }
72 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
73}
74
75/*
76 * This is called by a user process when it sees the DISARM_BUFS event
77 * bit is set.
78 */
79int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd)
80{
81 struct qib_devdata *dd = rcd->dd;
82 unsigned i;
83 unsigned last;
84 unsigned n = 0;
85
86 last = rcd->pio_base + rcd->piocnt;
87 /*
88 * Don't need uctxt_lock here, since user has called in to us.
89 * Clear at start in case more interrupts set bits while we
90 * are disarming
91 */
92 if (rcd->user_event_mask) {
93 /*
94 * subctxt_cnt is 0 if not shared, so do base
95 * separately, first, then remaining subctxt, if any
96 */
97 clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]);
98 for (i = 1; i < rcd->subctxt_cnt; i++)
99 clear_bit(_QIB_EVENT_DISARM_BUFS_BIT,
100 &rcd->user_event_mask[i]);
101 }
102 spin_lock_irq(&dd->pioavail_lock);
103 for (i = rcd->pio_base; i < last; i++) {
104 if (__test_and_clear_bit(i, dd->pio_need_disarm)) {
105 n++;
106 dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i));
107 }
108 }
109 spin_unlock_irq(&dd->pioavail_lock);
110 return 0;
111}
112
113static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i)
114{
115 struct qib_pportdata *ppd;
116 unsigned pidx;
117
118 for (pidx = 0; pidx < dd->num_pports; pidx++) {
119 ppd = dd->pport + pidx;
120 if (i >= ppd->sdma_state.first_sendbuf &&
121 i < ppd->sdma_state.last_sendbuf)
122 return ppd;
123 }
124 return NULL;
125}
126
127/*
128 * Return true if send buffer is being used by a user context.
129 * Sets _QIB_EVENT_DISARM_BUFS_BIT in user_event_mask as a side effect
130 */
131static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
132{
133 struct qib_ctxtdata *rcd;
134 unsigned ctxt;
135 int ret = 0;
136
137 spin_lock(&dd->uctxt_lock);
138 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
139 rcd = dd->rcd[ctxt];
140 if (!rcd || bufn < rcd->pio_base ||
141 bufn >= rcd->pio_base + rcd->piocnt)
142 continue;
143 if (rcd->user_event_mask) {
144 int i;
145 /*
146 * subctxt_cnt is 0 if not shared, so do base
147 * separately, first, then remaining subctxt, if any
148 */
149 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
150 &rcd->user_event_mask[0]);
151 for (i = 1; i < rcd->subctxt_cnt; i++)
152 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
153 &rcd->user_event_mask[i]);
154 }
155 ret = 1;
156 break;
157 }
158 spin_unlock(&dd->uctxt_lock);
159
160 return ret;
161}
162
163/*
164 * Disarm a set of send buffers. If the buffer might be actively being
165 * written to, mark the buffer to be disarmed later when it is not being
166 * written to.
167 *
168 * This should only be called from the IRQ error handler.
169 */
170void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
171 unsigned cnt)
172{
173 struct qib_pportdata *ppd, *pppd[dd->num_pports];
174 unsigned i;
175 unsigned long flags;
176
177 for (i = 0; i < dd->num_pports; i++)
178 pppd[i] = NULL;
179
180 for (i = 0; i < cnt; i++) {
181 int which;
182 if (!test_bit(i, mask))
183 continue;
184 /*
185 * If the buffer is owned by the DMA hardware,
186 * reset the DMA engine.
187 */
188 ppd = is_sdma_buf(dd, i);
189 if (ppd) {
190 pppd[ppd->port] = ppd;
191 continue;
192 }
193 /*
194 * If the kernel is writing the buffer or the buffer is
195 * owned by a user process, we can't clear it yet.
196 */
197 spin_lock_irqsave(&dd->pioavail_lock, flags);
198 if (test_bit(i, dd->pio_writing) ||
199 (!test_bit(i << 1, dd->pioavailkernel) &&
200 find_ctxt(dd, i))) {
201 __set_bit(i, dd->pio_need_disarm);
202 which = 0;
203 } else {
204 which = 1;
205 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
206 }
207 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
208 }
209
210 /* do cancel_sends once per port that had sdma piobufs in error */
211 for (i = 0; i < dd->num_pports; i++)
212 if (pppd[i])
213 qib_cancel_sends(pppd[i]);
214}
215
216/**
217 * update_send_bufs - update shadow copy of the PIO availability map
218 * @dd: the qlogic_ib device
219 *
220 * called whenever our local copy indicates we have run out of send buffers
221 */
222static void update_send_bufs(struct qib_devdata *dd)
223{
224 unsigned long flags;
225 unsigned i;
226 const unsigned piobregs = dd->pioavregs;
227
228 /*
229 * If the generation (check) bits have changed, then we update the
230 * busy bit for the corresponding PIO buffer. This algorithm will
231 * modify positions to the value they already have in some cases
232 * (i.e., no change), but it's faster than changing only the bits
233 * that have changed.
234 *
235 * We would like to do this atomicly, to avoid spinlocks in the
236 * critical send path, but that's not really possible, given the
237 * type of changes, and that this routine could be called on
238 * multiple cpu's simultaneously, so we lock in this routine only,
239 * to avoid conflicting updates; all we change is the shadow, and
240 * it's a single 64 bit memory location, so by definition the update
241 * is atomic in terms of what other cpu's can see in testing the
242 * bits. The spin_lock overhead isn't too bad, since it only
243 * happens when all buffers are in use, so only cpu overhead, not
244 * latency or bandwidth is affected.
245 */
246 if (!dd->pioavailregs_dma)
247 return;
248 spin_lock_irqsave(&dd->pioavail_lock, flags);
249 for (i = 0; i < piobregs; i++) {
250 u64 pchbusy, pchg, piov, pnew;
251
252 piov = le64_to_cpu(dd->pioavailregs_dma[i]);
253 pchg = dd->pioavailkernel[i] &
254 ~(dd->pioavailshadow[i] ^ piov);
255 pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT;
256 if (pchg && (pchbusy & dd->pioavailshadow[i])) {
257 pnew = dd->pioavailshadow[i] & ~pchbusy;
258 pnew |= piov & pchbusy;
259 dd->pioavailshadow[i] = pnew;
260 }
261 }
262 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
263}
264
265/*
266 * Debugging code and stats updates if no pio buffers available.
267 */
268static noinline void no_send_bufs(struct qib_devdata *dd)
269{
270 dd->upd_pio_shadow = 1;
271
272 /* not atomic, but if we lose a stat count in a while, that's OK */
273 qib_stats.sps_nopiobufs++;
274}
275
276/*
277 * Common code for normal driver send buffer allocation, and reserved
278 * allocation.
279 *
280 * Do appropriate marking as busy, etc.
281 * Returns buffer pointer if one is found, otherwise NULL.
282 */
283u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
284 u32 first, u32 last)
285{
286 unsigned i, j, updated = 0;
287 unsigned nbufs;
288 unsigned long flags;
289 unsigned long *shadow = dd->pioavailshadow;
290 u32 __iomem *buf;
291
292 if (!(dd->flags & QIB_PRESENT))
293 return NULL;
294
295 nbufs = last - first + 1; /* number in range to check */
296 if (dd->upd_pio_shadow) {
297 /*
298 * Minor optimization. If we had no buffers on last call,
299 * start out by doing the update; continue and do scan even
300 * if no buffers were updated, to be paranoid.
301 */
302 update_send_bufs(dd);
303 updated++;
304 }
305 i = first;
306rescan:
307 /*
308 * While test_and_set_bit() is atomic, we do that and then the
309 * change_bit(), and the pair is not. See if this is the cause
310 * of the remaining armlaunch errors.
311 */
312 spin_lock_irqsave(&dd->pioavail_lock, flags);
313 for (j = 0; j < nbufs; j++, i++) {
314 if (i > last)
315 i = first;
316 if (__test_and_set_bit((2 * i) + 1, shadow))
317 continue;
318 /* flip generation bit */
319 __change_bit(2 * i, shadow);
320 /* remember that the buffer can be written to now */
321 __set_bit(i, dd->pio_writing);
322 break;
323 }
324 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
325
326 if (j == nbufs) {
327 if (!updated) {
328 /*
329 * First time through; shadow exhausted, but may be
330 * buffers available, try an update and then rescan.
331 */
332 update_send_bufs(dd);
333 updated++;
334 i = first;
335 goto rescan;
336 }
337 no_send_bufs(dd);
338 buf = NULL;
339 } else {
340 if (i < dd->piobcnt2k)
341 buf = (u32 __iomem *)(dd->pio2kbase +
342 i * dd->palign);
343 else
344 buf = (u32 __iomem *)(dd->pio4kbase +
345 (i - dd->piobcnt2k) * dd->align4k);
346 if (pbufnum)
347 *pbufnum = i;
348 dd->upd_pio_shadow = 0;
349 }
350
351 return buf;
352}
353
354/*
355 * Record that the caller is finished writing to the buffer so we don't
356 * disarm it while it is being written and disarm it now if needed.
357 */
358void qib_sendbuf_done(struct qib_devdata *dd, unsigned n)
359{
360 unsigned long flags;
361
362 spin_lock_irqsave(&dd->pioavail_lock, flags);
363 __clear_bit(n, dd->pio_writing);
364 if (__test_and_clear_bit(n, dd->pio_need_disarm))
365 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n));
366 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
367}
368
369/**
370 * qib_chg_pioavailkernel - change which send buffers are available for kernel
371 * @dd: the qlogic_ib device
372 * @start: the starting send buffer number
373 * @len: the number of send buffers
374 * @avail: true if the buffers are available for kernel use, false otherwise
375 */
376void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
377 unsigned len, u32 avail, struct qib_ctxtdata *rcd)
378{
379 unsigned long flags;
380 unsigned end;
381 unsigned ostart = start;
382
383 /* There are two bits per send buffer (busy and generation) */
384 start *= 2;
385 end = start + len * 2;
386
387 spin_lock_irqsave(&dd->pioavail_lock, flags);
388 /* Set or clear the busy bit in the shadow. */
389 while (start < end) {
390 if (avail) {
391 unsigned long dma;
392 int i;
393
394 /*
395 * The BUSY bit will never be set, because we disarm
396 * the user buffers before we hand them back to the
397 * kernel. We do have to make sure the generation
398 * bit is set correctly in shadow, since it could
399 * have changed many times while allocated to user.
400 * We can't use the bitmap functions on the full
401 * dma array because it is always little-endian, so
402 * we have to flip to host-order first.
403 * BITS_PER_LONG is slightly wrong, since it's
404 * always 64 bits per register in chip...
405 * We only work on 64 bit kernels, so that's OK.
406 */
407 i = start / BITS_PER_LONG;
408 __clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start,
409 dd->pioavailshadow);
410 dma = (unsigned long)
411 le64_to_cpu(dd->pioavailregs_dma[i]);
412 if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
413 start) % BITS_PER_LONG, &dma))
414 __set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
415 start, dd->pioavailshadow);
416 else
417 __clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT
418 + start, dd->pioavailshadow);
419 __set_bit(start, dd->pioavailkernel);
420 } else {
421 __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT,
422 dd->pioavailshadow);
423 __clear_bit(start, dd->pioavailkernel);
424 }
425 start += 2;
426 }
427
428 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
429
430 dd->f_txchk_change(dd, ostart, len, avail, rcd);
431}
432
433/*
434 * Flush all sends that might be in the ready to send state, as well as any
435 * that are in the process of being sent. Used whenever we need to be
436 * sure the send side is idle. Cleans up all buffer state by canceling
437 * all pio buffers, and issuing an abort, which cleans up anything in the
438 * launch fifo. The cancel is superfluous on some chip versions, but
439 * it's safer to always do it.
440 * PIOAvail bits are updated by the chip as if a normal send had happened.
441 */
442void qib_cancel_sends(struct qib_pportdata *ppd)
443{
444 struct qib_devdata *dd = ppd->dd;
445 struct qib_ctxtdata *rcd;
446 unsigned long flags;
447 unsigned ctxt;
448 unsigned i;
449 unsigned last;
450
451 /*
452 * Tell PSM to disarm buffers again before trying to reuse them.
453 * We need to be sure the rcd doesn't change out from under us
454 * while we do so. We hold the two locks sequentially. We might
455 * needlessly set some need_disarm bits as a result, if the
456 * context is closed after we release the uctxt_lock, but that's
457 * fairly benign, and safer than nesting the locks.
458 */
459 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
460 spin_lock_irqsave(&dd->uctxt_lock, flags);
461 rcd = dd->rcd[ctxt];
462 if (rcd && rcd->ppd == ppd) {
463 last = rcd->pio_base + rcd->piocnt;
464 if (rcd->user_event_mask) {
465 /*
466 * subctxt_cnt is 0 if not shared, so do base
467 * separately, first, then remaining subctxt,
468 * if any
469 */
470 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
471 &rcd->user_event_mask[0]);
472 for (i = 1; i < rcd->subctxt_cnt; i++)
473 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
474 &rcd->user_event_mask[i]);
475 }
476 i = rcd->pio_base;
477 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
478 spin_lock_irqsave(&dd->pioavail_lock, flags);
479 for (; i < last; i++)
480 __set_bit(i, dd->pio_need_disarm);
481 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
482 } else
483 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
484 }
485
486 if (!(dd->flags & QIB_HAS_SEND_DMA))
487 dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL |
488 QIB_SENDCTRL_FLUSH);
489}
490
491/*
492 * Force an update of in-memory copy of the pioavail registers, when
493 * needed for any of a variety of reasons.
494 * If already off, this routine is a nop, on the assumption that the
495 * caller (or set of callers) will "do the right thing".
496 * This is a per-device operation, so just the first port.
497 */
498void qib_force_pio_avail_update(struct qib_devdata *dd)
499{
500 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
501}
502
503void qib_hol_down(struct qib_pportdata *ppd)
504{
505 /*
506 * Cancel sends when the link goes DOWN so that we aren't doing it
507 * at INIT when we might be trying to send SMI packets.
508 */
509 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
510 qib_cancel_sends(ppd);
511}
512
513/*
514 * Link is at INIT.
515 * We start the HoL timer so we can detect stuck packets blocking SMP replies.
516 * Timer may already be running, so use mod_timer, not add_timer.
517 */
518void qib_hol_init(struct qib_pportdata *ppd)
519{
520 if (ppd->hol_state != QIB_HOL_INIT) {
521 ppd->hol_state = QIB_HOL_INIT;
522 mod_timer(&ppd->hol_timer,
523 jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
524 }
525}
526
527/*
528 * Link is up, continue any user processes, and ensure timer
529 * is a nop, if running. Let timer keep running, if set; it
530 * will nop when it sees the link is up.
531 */
532void qib_hol_up(struct qib_pportdata *ppd)
533{
534 ppd->hol_state = QIB_HOL_UP;
535}
536
537/*
538 * This is only called via the timer.
539 */
540void qib_hol_event(unsigned long opaque)
541{
542 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
543
544 /* If hardware error, etc, skip. */
545 if (!(ppd->dd->flags & QIB_INITTED))
546 return;
547
548 if (ppd->hol_state != QIB_HOL_UP) {
549 /*
550 * Try to flush sends in case a stuck packet is blocking
551 * SMP replies.
552 */
553 qib_hol_down(ppd);
554 mod_timer(&ppd->hol_timer,
555 jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
556 }
557}
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
new file mode 100644
index 000000000000..6c7fe78cca64
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -0,0 +1,555 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include "qib.h"
36
37/* cut down ridiculously long IB macro names */
38#define OP(x) IB_OPCODE_UC_##x
39
40/**
41 * qib_make_uc_req - construct a request packet (SEND, RDMA write)
42 * @qp: a pointer to the QP
43 *
44 * Return 1 if constructed; otherwise, return 0.
45 */
46int qib_make_uc_req(struct qib_qp *qp)
47{
48 struct qib_other_headers *ohdr;
49 struct qib_swqe *wqe;
50 unsigned long flags;
51 u32 hwords;
52 u32 bth0;
53 u32 len;
54 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
55 int ret = 0;
56
57 spin_lock_irqsave(&qp->s_lock, flags);
58
59 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
60 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
61 goto bail;
62 /* We are in the error state, flush the work request. */
63 if (qp->s_last == qp->s_head)
64 goto bail;
65 /* If DMAs are in progress, we can't flush immediately. */
66 if (atomic_read(&qp->s_dma_busy)) {
67 qp->s_flags |= QIB_S_WAIT_DMA;
68 goto bail;
69 }
70 wqe = get_swqe_ptr(qp, qp->s_last);
71 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
72 goto done;
73 }
74
75 ohdr = &qp->s_hdr.u.oth;
76 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
77 ohdr = &qp->s_hdr.u.l.oth;
78
79 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
80 hwords = 5;
81 bth0 = 0;
82
83 /* Get the next send request. */
84 wqe = get_swqe_ptr(qp, qp->s_cur);
85 qp->s_wqe = NULL;
86 switch (qp->s_state) {
87 default:
88 if (!(ib_qib_state_ops[qp->state] &
89 QIB_PROCESS_NEXT_SEND_OK))
90 goto bail;
91 /* Check if send work queue is empty. */
92 if (qp->s_cur == qp->s_head)
93 goto bail;
94 /*
95 * Start a new request.
96 */
97 wqe->psn = qp->s_next_psn;
98 qp->s_psn = qp->s_next_psn;
99 qp->s_sge.sge = wqe->sg_list[0];
100 qp->s_sge.sg_list = wqe->sg_list + 1;
101 qp->s_sge.num_sge = wqe->wr.num_sge;
102 qp->s_sge.total_len = wqe->length;
103 len = wqe->length;
104 qp->s_len = len;
105 switch (wqe->wr.opcode) {
106 case IB_WR_SEND:
107 case IB_WR_SEND_WITH_IMM:
108 if (len > pmtu) {
109 qp->s_state = OP(SEND_FIRST);
110 len = pmtu;
111 break;
112 }
113 if (wqe->wr.opcode == IB_WR_SEND)
114 qp->s_state = OP(SEND_ONLY);
115 else {
116 qp->s_state =
117 OP(SEND_ONLY_WITH_IMMEDIATE);
118 /* Immediate data comes after the BTH */
119 ohdr->u.imm_data = wqe->wr.ex.imm_data;
120 hwords += 1;
121 }
122 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
123 bth0 |= IB_BTH_SOLICITED;
124 qp->s_wqe = wqe;
125 if (++qp->s_cur >= qp->s_size)
126 qp->s_cur = 0;
127 break;
128
129 case IB_WR_RDMA_WRITE:
130 case IB_WR_RDMA_WRITE_WITH_IMM:
131 ohdr->u.rc.reth.vaddr =
132 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
133 ohdr->u.rc.reth.rkey =
134 cpu_to_be32(wqe->wr.wr.rdma.rkey);
135 ohdr->u.rc.reth.length = cpu_to_be32(len);
136 hwords += sizeof(struct ib_reth) / 4;
137 if (len > pmtu) {
138 qp->s_state = OP(RDMA_WRITE_FIRST);
139 len = pmtu;
140 break;
141 }
142 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
143 qp->s_state = OP(RDMA_WRITE_ONLY);
144 else {
145 qp->s_state =
146 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
147 /* Immediate data comes after the RETH */
148 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
149 hwords += 1;
150 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
151 bth0 |= IB_BTH_SOLICITED;
152 }
153 qp->s_wqe = wqe;
154 if (++qp->s_cur >= qp->s_size)
155 qp->s_cur = 0;
156 break;
157
158 default:
159 goto bail;
160 }
161 break;
162
163 case OP(SEND_FIRST):
164 qp->s_state = OP(SEND_MIDDLE);
165 /* FALLTHROUGH */
166 case OP(SEND_MIDDLE):
167 len = qp->s_len;
168 if (len > pmtu) {
169 len = pmtu;
170 break;
171 }
172 if (wqe->wr.opcode == IB_WR_SEND)
173 qp->s_state = OP(SEND_LAST);
174 else {
175 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
176 /* Immediate data comes after the BTH */
177 ohdr->u.imm_data = wqe->wr.ex.imm_data;
178 hwords += 1;
179 }
180 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
181 bth0 |= IB_BTH_SOLICITED;
182 qp->s_wqe = wqe;
183 if (++qp->s_cur >= qp->s_size)
184 qp->s_cur = 0;
185 break;
186
187 case OP(RDMA_WRITE_FIRST):
188 qp->s_state = OP(RDMA_WRITE_MIDDLE);
189 /* FALLTHROUGH */
190 case OP(RDMA_WRITE_MIDDLE):
191 len = qp->s_len;
192 if (len > pmtu) {
193 len = pmtu;
194 break;
195 }
196 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
197 qp->s_state = OP(RDMA_WRITE_LAST);
198 else {
199 qp->s_state =
200 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
201 /* Immediate data comes after the BTH */
202 ohdr->u.imm_data = wqe->wr.ex.imm_data;
203 hwords += 1;
204 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
205 bth0 |= IB_BTH_SOLICITED;
206 }
207 qp->s_wqe = wqe;
208 if (++qp->s_cur >= qp->s_size)
209 qp->s_cur = 0;
210 break;
211 }
212 qp->s_len -= len;
213 qp->s_hdrwords = hwords;
214 qp->s_cur_sge = &qp->s_sge;
215 qp->s_cur_size = len;
216 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
217 qp->s_next_psn++ & QIB_PSN_MASK);
218done:
219 ret = 1;
220 goto unlock;
221
222bail:
223 qp->s_flags &= ~QIB_S_BUSY;
224unlock:
225 spin_unlock_irqrestore(&qp->s_lock, flags);
226 return ret;
227}
228
229/**
230 * qib_uc_rcv - handle an incoming UC packet
231 * @ibp: the port the packet came in on
232 * @hdr: the header of the packet
233 * @has_grh: true if the packet has a GRH
234 * @data: the packet data
235 * @tlen: the length of the packet
236 * @qp: the QP for this packet.
237 *
238 * This is called from qib_qp_rcv() to process an incoming UC packet
239 * for the given QP.
240 * Called at interrupt level.
241 */
242void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
243 int has_grh, void *data, u32 tlen, struct qib_qp *qp)
244{
245 struct qib_other_headers *ohdr;
246 unsigned long flags;
247 u32 opcode;
248 u32 hdrsize;
249 u32 psn;
250 u32 pad;
251 struct ib_wc wc;
252 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
253 struct ib_reth *reth;
254 int ret;
255
256 /* Check for GRH */
257 if (!has_grh) {
258 ohdr = &hdr->u.oth;
259 hdrsize = 8 + 12; /* LRH + BTH */
260 } else {
261 ohdr = &hdr->u.l.oth;
262 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
263 }
264
265 opcode = be32_to_cpu(ohdr->bth[0]);
266 spin_lock_irqsave(&qp->s_lock, flags);
267 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
268 goto sunlock;
269 spin_unlock_irqrestore(&qp->s_lock, flags);
270
271 psn = be32_to_cpu(ohdr->bth[2]);
272 opcode >>= 24;
273 memset(&wc, 0, sizeof wc);
274
275 /* Prevent simultaneous processing after APM on different CPUs */
276 spin_lock(&qp->r_lock);
277
278 /* Compare the PSN verses the expected PSN. */
279 if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
280 /*
281 * Handle a sequence error.
282 * Silently drop any current message.
283 */
284 qp->r_psn = psn;
285inv:
286 if (qp->r_state == OP(SEND_FIRST) ||
287 qp->r_state == OP(SEND_MIDDLE)) {
288 set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
289 qp->r_sge.num_sge = 0;
290 } else
291 while (qp->r_sge.num_sge) {
292 atomic_dec(&qp->r_sge.sge.mr->refcount);
293 if (--qp->r_sge.num_sge)
294 qp->r_sge.sge = *qp->r_sge.sg_list++;
295 }
296 qp->r_state = OP(SEND_LAST);
297 switch (opcode) {
298 case OP(SEND_FIRST):
299 case OP(SEND_ONLY):
300 case OP(SEND_ONLY_WITH_IMMEDIATE):
301 goto send_first;
302
303 case OP(RDMA_WRITE_FIRST):
304 case OP(RDMA_WRITE_ONLY):
305 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
306 goto rdma_first;
307
308 default:
309 goto drop;
310 }
311 }
312
313 /* Check for opcode sequence errors. */
314 switch (qp->r_state) {
315 case OP(SEND_FIRST):
316 case OP(SEND_MIDDLE):
317 if (opcode == OP(SEND_MIDDLE) ||
318 opcode == OP(SEND_LAST) ||
319 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
320 break;
321 goto inv;
322
323 case OP(RDMA_WRITE_FIRST):
324 case OP(RDMA_WRITE_MIDDLE):
325 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
326 opcode == OP(RDMA_WRITE_LAST) ||
327 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
328 break;
329 goto inv;
330
331 default:
332 if (opcode == OP(SEND_FIRST) ||
333 opcode == OP(SEND_ONLY) ||
334 opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
335 opcode == OP(RDMA_WRITE_FIRST) ||
336 opcode == OP(RDMA_WRITE_ONLY) ||
337 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
338 break;
339 goto inv;
340 }
341
342 if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
343 qp->r_flags |= QIB_R_COMM_EST;
344 if (qp->ibqp.event_handler) {
345 struct ib_event ev;
346
347 ev.device = qp->ibqp.device;
348 ev.element.qp = &qp->ibqp;
349 ev.event = IB_EVENT_COMM_EST;
350 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
351 }
352 }
353
354 /* OK, process the packet. */
355 switch (opcode) {
356 case OP(SEND_FIRST):
357 case OP(SEND_ONLY):
358 case OP(SEND_ONLY_WITH_IMMEDIATE):
359send_first:
360 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
361 qp->r_sge = qp->s_rdma_read_sge;
362 else {
363 ret = qib_get_rwqe(qp, 0);
364 if (ret < 0)
365 goto op_err;
366 if (!ret)
367 goto drop;
368 /*
369 * qp->s_rdma_read_sge will be the owner
370 * of the mr references.
371 */
372 qp->s_rdma_read_sge = qp->r_sge;
373 }
374 qp->r_rcv_len = 0;
375 if (opcode == OP(SEND_ONLY))
376 goto send_last;
377 else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
378 goto send_last_imm;
379 /* FALLTHROUGH */
380 case OP(SEND_MIDDLE):
381 /* Check for invalid length PMTU or posted rwqe len. */
382 if (unlikely(tlen != (hdrsize + pmtu + 4)))
383 goto rewind;
384 qp->r_rcv_len += pmtu;
385 if (unlikely(qp->r_rcv_len > qp->r_len))
386 goto rewind;
387 qib_copy_sge(&qp->r_sge, data, pmtu, 0);
388 break;
389
390 case OP(SEND_LAST_WITH_IMMEDIATE):
391send_last_imm:
392 wc.ex.imm_data = ohdr->u.imm_data;
393 hdrsize += 4;
394 wc.wc_flags = IB_WC_WITH_IMM;
395 /* FALLTHROUGH */
396 case OP(SEND_LAST):
397send_last:
398 /* Get the number of bytes the message was padded by. */
399 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
400 /* Check for invalid length. */
401 /* XXX LAST len should be >= 1 */
402 if (unlikely(tlen < (hdrsize + pad + 4)))
403 goto rewind;
404 /* Don't count the CRC. */
405 tlen -= (hdrsize + pad + 4);
406 wc.byte_len = tlen + qp->r_rcv_len;
407 if (unlikely(wc.byte_len > qp->r_len))
408 goto rewind;
409 wc.opcode = IB_WC_RECV;
410last_imm:
411 qib_copy_sge(&qp->r_sge, data, tlen, 0);
412 while (qp->s_rdma_read_sge.num_sge) {
413 atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
414 if (--qp->s_rdma_read_sge.num_sge)
415 qp->s_rdma_read_sge.sge =
416 *qp->s_rdma_read_sge.sg_list++;
417 }
418 wc.wr_id = qp->r_wr_id;
419 wc.status = IB_WC_SUCCESS;
420 wc.qp = &qp->ibqp;
421 wc.src_qp = qp->remote_qpn;
422 wc.slid = qp->remote_ah_attr.dlid;
423 wc.sl = qp->remote_ah_attr.sl;
424 /* Signal completion event if the solicited bit is set. */
425 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
426 (ohdr->bth[0] &
427 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
428 break;
429
430 case OP(RDMA_WRITE_FIRST):
431 case OP(RDMA_WRITE_ONLY):
432 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
433rdma_first:
434 if (unlikely(!(qp->qp_access_flags &
435 IB_ACCESS_REMOTE_WRITE))) {
436 goto drop;
437 }
438 reth = &ohdr->u.rc.reth;
439 hdrsize += sizeof(*reth);
440 qp->r_len = be32_to_cpu(reth->length);
441 qp->r_rcv_len = 0;
442 qp->r_sge.sg_list = NULL;
443 if (qp->r_len != 0) {
444 u32 rkey = be32_to_cpu(reth->rkey);
445 u64 vaddr = be64_to_cpu(reth->vaddr);
446 int ok;
447
448 /* Check rkey */
449 ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
450 vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
451 if (unlikely(!ok))
452 goto drop;
453 qp->r_sge.num_sge = 1;
454 } else {
455 qp->r_sge.num_sge = 0;
456 qp->r_sge.sge.mr = NULL;
457 qp->r_sge.sge.vaddr = NULL;
458 qp->r_sge.sge.length = 0;
459 qp->r_sge.sge.sge_length = 0;
460 }
461 if (opcode == OP(RDMA_WRITE_ONLY))
462 goto rdma_last;
463 else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
464 goto rdma_last_imm;
465 /* FALLTHROUGH */
466 case OP(RDMA_WRITE_MIDDLE):
467 /* Check for invalid length PMTU or posted rwqe len. */
468 if (unlikely(tlen != (hdrsize + pmtu + 4)))
469 goto drop;
470 qp->r_rcv_len += pmtu;
471 if (unlikely(qp->r_rcv_len > qp->r_len))
472 goto drop;
473 qib_copy_sge(&qp->r_sge, data, pmtu, 1);
474 break;
475
476 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
477rdma_last_imm:
478 wc.ex.imm_data = ohdr->u.imm_data;
479 hdrsize += 4;
480 wc.wc_flags = IB_WC_WITH_IMM;
481
482 /* Get the number of bytes the message was padded by. */
483 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
484 /* Check for invalid length. */
485 /* XXX LAST len should be >= 1 */
486 if (unlikely(tlen < (hdrsize + pad + 4)))
487 goto drop;
488 /* Don't count the CRC. */
489 tlen -= (hdrsize + pad + 4);
490 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
491 goto drop;
492 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
493 while (qp->s_rdma_read_sge.num_sge) {
494 atomic_dec(&qp->s_rdma_read_sge.sge.mr->
495 refcount);
496 if (--qp->s_rdma_read_sge.num_sge)
497 qp->s_rdma_read_sge.sge =
498 *qp->s_rdma_read_sge.sg_list++;
499 }
500 else {
501 ret = qib_get_rwqe(qp, 1);
502 if (ret < 0)
503 goto op_err;
504 if (!ret)
505 goto drop;
506 }
507 wc.byte_len = qp->r_len;
508 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
509 goto last_imm;
510
511 case OP(RDMA_WRITE_LAST):
512rdma_last:
513 /* Get the number of bytes the message was padded by. */
514 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
515 /* Check for invalid length. */
516 /* XXX LAST len should be >= 1 */
517 if (unlikely(tlen < (hdrsize + pad + 4)))
518 goto drop;
519 /* Don't count the CRC. */
520 tlen -= (hdrsize + pad + 4);
521 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
522 goto drop;
523 qib_copy_sge(&qp->r_sge, data, tlen, 1);
524 while (qp->r_sge.num_sge) {
525 atomic_dec(&qp->r_sge.sge.mr->refcount);
526 if (--qp->r_sge.num_sge)
527 qp->r_sge.sge = *qp->r_sge.sg_list++;
528 }
529 break;
530
531 default:
532 /* Drop packet for unknown opcodes. */
533 goto drop;
534 }
535 qp->r_psn++;
536 qp->r_state = opcode;
537 spin_unlock(&qp->r_lock);
538 return;
539
540rewind:
541 set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
542 qp->r_sge.num_sge = 0;
543drop:
544 ibp->n_pkt_drops++;
545 spin_unlock(&qp->r_lock);
546 return;
547
548op_err:
549 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
550 spin_unlock(&qp->r_lock);
551 return;
552
553sunlock:
554 spin_unlock_irqrestore(&qp->s_lock, flags);
555}
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
new file mode 100644
index 000000000000..c838cda73347
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -0,0 +1,607 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <rdma/ib_smi.h>
35
36#include "qib.h"
37#include "qib_mad.h"
38
39/**
40 * qib_ud_loopback - handle send on loopback QPs
41 * @sqp: the sending QP
42 * @swqe: the send work request
43 *
44 * This is called from qib_make_ud_req() to forward a WQE addressed
45 * to the same HCA.
46 * Note that the receive interrupt handler may be calling qib_ud_rcv()
47 * while this is being called.
48 */
49static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
50{
51 struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
52 struct qib_pportdata *ppd;
53 struct qib_qp *qp;
54 struct ib_ah_attr *ah_attr;
55 unsigned long flags;
56 struct qib_sge_state ssge;
57 struct qib_sge *sge;
58 struct ib_wc wc;
59 u32 length;
60
61 qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
62 if (!qp) {
63 ibp->n_pkt_drops++;
64 return;
65 }
66 if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
67 !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
68 ibp->n_pkt_drops++;
69 goto drop;
70 }
71
72 ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr;
73 ppd = ppd_from_ibp(ibp);
74
75 if (qp->ibqp.qp_num > 1) {
76 u16 pkey1;
77 u16 pkey2;
78 u16 lid;
79
80 pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
81 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
82 if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
83 lid = ppd->lid | (ah_attr->src_path_bits &
84 ((1 << ppd->lmc) - 1));
85 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1,
86 ah_attr->sl,
87 sqp->ibqp.qp_num, qp->ibqp.qp_num,
88 cpu_to_be16(lid),
89 cpu_to_be16(ah_attr->dlid));
90 goto drop;
91 }
92 }
93
94 /*
95 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
96 * Qkeys with the high order bit set mean use the
97 * qkey from the QP context instead of the WR (see 10.2.5).
98 */
99 if (qp->ibqp.qp_num) {
100 u32 qkey;
101
102 qkey = (int)swqe->wr.wr.ud.remote_qkey < 0 ?
103 sqp->qkey : swqe->wr.wr.ud.remote_qkey;
104 if (unlikely(qkey != qp->qkey)) {
105 u16 lid;
106
107 lid = ppd->lid | (ah_attr->src_path_bits &
108 ((1 << ppd->lmc) - 1));
109 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
110 ah_attr->sl,
111 sqp->ibqp.qp_num, qp->ibqp.qp_num,
112 cpu_to_be16(lid),
113 cpu_to_be16(ah_attr->dlid));
114 goto drop;
115 }
116 }
117
118 /*
119 * A GRH is expected to preceed the data even if not
120 * present on the wire.
121 */
122 length = swqe->length;
123 memset(&wc, 0, sizeof wc);
124 wc.byte_len = length + sizeof(struct ib_grh);
125
126 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
127 wc.wc_flags = IB_WC_WITH_IMM;
128 wc.ex.imm_data = swqe->wr.ex.imm_data;
129 }
130
131 spin_lock_irqsave(&qp->r_lock, flags);
132
133 /*
134 * Get the next work request entry to find where to put the data.
135 */
136 if (qp->r_flags & QIB_R_REUSE_SGE)
137 qp->r_flags &= ~QIB_R_REUSE_SGE;
138 else {
139 int ret;
140
141 ret = qib_get_rwqe(qp, 0);
142 if (ret < 0) {
143 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
144 goto bail_unlock;
145 }
146 if (!ret) {
147 if (qp->ibqp.qp_num == 0)
148 ibp->n_vl15_dropped++;
149 goto bail_unlock;
150 }
151 }
152 /* Silently drop packets which are too big. */
153 if (unlikely(wc.byte_len > qp->r_len)) {
154 qp->r_flags |= QIB_R_REUSE_SGE;
155 ibp->n_pkt_drops++;
156 goto bail_unlock;
157 }
158
159 if (ah_attr->ah_flags & IB_AH_GRH) {
160 qib_copy_sge(&qp->r_sge, &ah_attr->grh,
161 sizeof(struct ib_grh), 1);
162 wc.wc_flags |= IB_WC_GRH;
163 } else
164 qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
165 ssge.sg_list = swqe->sg_list + 1;
166 ssge.sge = *swqe->sg_list;
167 ssge.num_sge = swqe->wr.num_sge;
168 sge = &ssge.sge;
169 while (length) {
170 u32 len = sge->length;
171
172 if (len > length)
173 len = length;
174 if (len > sge->sge_length)
175 len = sge->sge_length;
176 BUG_ON(len == 0);
177 qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
178 sge->vaddr += len;
179 sge->length -= len;
180 sge->sge_length -= len;
181 if (sge->sge_length == 0) {
182 if (--ssge.num_sge)
183 *sge = *ssge.sg_list++;
184 } else if (sge->length == 0 && sge->mr->lkey) {
185 if (++sge->n >= QIB_SEGSZ) {
186 if (++sge->m >= sge->mr->mapsz)
187 break;
188 sge->n = 0;
189 }
190 sge->vaddr =
191 sge->mr->map[sge->m]->segs[sge->n].vaddr;
192 sge->length =
193 sge->mr->map[sge->m]->segs[sge->n].length;
194 }
195 length -= len;
196 }
197 while (qp->r_sge.num_sge) {
198 atomic_dec(&qp->r_sge.sge.mr->refcount);
199 if (--qp->r_sge.num_sge)
200 qp->r_sge.sge = *qp->r_sge.sg_list++;
201 }
202 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
203 goto bail_unlock;
204 wc.wr_id = qp->r_wr_id;
205 wc.status = IB_WC_SUCCESS;
206 wc.opcode = IB_WC_RECV;
207 wc.qp = &qp->ibqp;
208 wc.src_qp = sqp->ibqp.qp_num;
209 wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
210 swqe->wr.wr.ud.pkey_index : 0;
211 wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
212 wc.sl = ah_attr->sl;
213 wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
214 wc.port_num = qp->port_num;
215 /* Signal completion event if the solicited bit is set. */
216 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
217 swqe->wr.send_flags & IB_SEND_SOLICITED);
218 ibp->n_loop_pkts++;
219bail_unlock:
220 spin_unlock_irqrestore(&qp->r_lock, flags);
221drop:
222 if (atomic_dec_and_test(&qp->refcount))
223 wake_up(&qp->wait);
224}
225
226/**
227 * qib_make_ud_req - construct a UD request packet
228 * @qp: the QP
229 *
230 * Return 1 if constructed; otherwise, return 0.
231 */
232int qib_make_ud_req(struct qib_qp *qp)
233{
234 struct qib_other_headers *ohdr;
235 struct ib_ah_attr *ah_attr;
236 struct qib_pportdata *ppd;
237 struct qib_ibport *ibp;
238 struct qib_swqe *wqe;
239 unsigned long flags;
240 u32 nwords;
241 u32 extra_bytes;
242 u32 bth0;
243 u16 lrh0;
244 u16 lid;
245 int ret = 0;
246 int next_cur;
247
248 spin_lock_irqsave(&qp->s_lock, flags);
249
250 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
251 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
252 goto bail;
253 /* We are in the error state, flush the work request. */
254 if (qp->s_last == qp->s_head)
255 goto bail;
256 /* If DMAs are in progress, we can't flush immediately. */
257 if (atomic_read(&qp->s_dma_busy)) {
258 qp->s_flags |= QIB_S_WAIT_DMA;
259 goto bail;
260 }
261 wqe = get_swqe_ptr(qp, qp->s_last);
262 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
263 goto done;
264 }
265
266 if (qp->s_cur == qp->s_head)
267 goto bail;
268
269 wqe = get_swqe_ptr(qp, qp->s_cur);
270 next_cur = qp->s_cur + 1;
271 if (next_cur >= qp->s_size)
272 next_cur = 0;
273
274 /* Construct the header. */
275 ibp = to_iport(qp->ibqp.device, qp->port_num);
276 ppd = ppd_from_ibp(ibp);
277 ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
278 if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) {
279 if (ah_attr->dlid != QIB_PERMISSIVE_LID)
280 ibp->n_multicast_xmit++;
281 else
282 ibp->n_unicast_xmit++;
283 } else {
284 ibp->n_unicast_xmit++;
285 lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
286 if (unlikely(lid == ppd->lid)) {
287 /*
288 * If DMAs are in progress, we can't generate
289 * a completion for the loopback packet since
290 * it would be out of order.
291 * XXX Instead of waiting, we could queue a
292 * zero length descriptor so we get a callback.
293 */
294 if (atomic_read(&qp->s_dma_busy)) {
295 qp->s_flags |= QIB_S_WAIT_DMA;
296 goto bail;
297 }
298 qp->s_cur = next_cur;
299 spin_unlock_irqrestore(&qp->s_lock, flags);
300 qib_ud_loopback(qp, wqe);
301 spin_lock_irqsave(&qp->s_lock, flags);
302 qib_send_complete(qp, wqe, IB_WC_SUCCESS);
303 goto done;
304 }
305 }
306
307 qp->s_cur = next_cur;
308 extra_bytes = -wqe->length & 3;
309 nwords = (wqe->length + extra_bytes) >> 2;
310
311 /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
312 qp->s_hdrwords = 7;
313 qp->s_cur_size = wqe->length;
314 qp->s_cur_sge = &qp->s_sge;
315 qp->s_srate = ah_attr->static_rate;
316 qp->s_wqe = wqe;
317 qp->s_sge.sge = wqe->sg_list[0];
318 qp->s_sge.sg_list = wqe->sg_list + 1;
319 qp->s_sge.num_sge = wqe->wr.num_sge;
320 qp->s_sge.total_len = wqe->length;
321
322 if (ah_attr->ah_flags & IB_AH_GRH) {
323 /* Header size in 32-bit words. */
324 qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
325 &ah_attr->grh,
326 qp->s_hdrwords, nwords);
327 lrh0 = QIB_LRH_GRH;
328 ohdr = &qp->s_hdr.u.l.oth;
329 /*
330 * Don't worry about sending to locally attached multicast
331 * QPs. It is unspecified by the spec. what happens.
332 */
333 } else {
334 /* Header size in 32-bit words. */
335 lrh0 = QIB_LRH_BTH;
336 ohdr = &qp->s_hdr.u.oth;
337 }
338 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
339 qp->s_hdrwords++;
340 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
341 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
342 } else
343 bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
344 lrh0 |= ah_attr->sl << 4;
345 if (qp->ibqp.qp_type == IB_QPT_SMI)
346 lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
347 else
348 lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12;
349 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
350 qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
351 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
352 lid = ppd->lid;
353 if (lid) {
354 lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
355 qp->s_hdr.lrh[3] = cpu_to_be16(lid);
356 } else
357 qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
358 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
359 bth0 |= IB_BTH_SOLICITED;
360 bth0 |= extra_bytes << 20;
361 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
362 qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
363 wqe->wr.wr.ud.pkey_index : qp->s_pkey_index);
364 ohdr->bth[0] = cpu_to_be32(bth0);
365 /*
366 * Use the multicast QP if the destination LID is a multicast LID.
367 */
368 ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
369 ah_attr->dlid != QIB_PERMISSIVE_LID ?
370 cpu_to_be32(QIB_MULTICAST_QPN) :
371 cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
372 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK);
373 /*
374 * Qkeys with the high order bit set mean use the
375 * qkey from the QP context instead of the WR (see 10.2.5).
376 */
377 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ?
378 qp->qkey : wqe->wr.wr.ud.remote_qkey);
379 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
380
381done:
382 ret = 1;
383 goto unlock;
384
385bail:
386 qp->s_flags &= ~QIB_S_BUSY;
387unlock:
388 spin_unlock_irqrestore(&qp->s_lock, flags);
389 return ret;
390}
391
392static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
393{
394 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
395 struct qib_devdata *dd = ppd->dd;
396 unsigned ctxt = ppd->hw_pidx;
397 unsigned i;
398
399 pkey &= 0x7fff; /* remove limited/full membership bit */
400
401 for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
402 if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
403 return i;
404
405 /*
406 * Should not get here, this means hardware failed to validate pkeys.
407 * Punt and return index 0.
408 */
409 return 0;
410}
411
412/**
413 * qib_ud_rcv - receive an incoming UD packet
414 * @ibp: the port the packet came in on
415 * @hdr: the packet header
416 * @has_grh: true if the packet has a GRH
417 * @data: the packet data
418 * @tlen: the packet length
419 * @qp: the QP the packet came on
420 *
421 * This is called from qib_qp_rcv() to process an incoming UD packet
422 * for the given QP.
423 * Called at interrupt level.
424 */
425void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
426 int has_grh, void *data, u32 tlen, struct qib_qp *qp)
427{
428 struct qib_other_headers *ohdr;
429 int opcode;
430 u32 hdrsize;
431 u32 pad;
432 struct ib_wc wc;
433 u32 qkey;
434 u32 src_qp;
435 u16 dlid;
436
437 /* Check for GRH */
438 if (!has_grh) {
439 ohdr = &hdr->u.oth;
440 hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
441 } else {
442 ohdr = &hdr->u.l.oth;
443 hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
444 }
445 qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
446 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
447
448 /* Get the number of bytes the message was padded by. */
449 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
450 if (unlikely(tlen < (hdrsize + pad + 4))) {
451 /* Drop incomplete packets. */
452 ibp->n_pkt_drops++;
453 goto bail;
454 }
455 tlen -= hdrsize + pad + 4;
456
457 /*
458 * Check that the permissive LID is only used on QP0
459 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
460 */
461 if (qp->ibqp.qp_num) {
462 if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
463 hdr->lrh[3] == IB_LID_PERMISSIVE)) {
464 ibp->n_pkt_drops++;
465 goto bail;
466 }
467 if (qp->ibqp.qp_num > 1) {
468 u16 pkey1, pkey2;
469
470 pkey1 = be32_to_cpu(ohdr->bth[0]);
471 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
472 if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
473 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
474 pkey1,
475 (be16_to_cpu(hdr->lrh[0]) >> 4) &
476 0xF,
477 src_qp, qp->ibqp.qp_num,
478 hdr->lrh[3], hdr->lrh[1]);
479 goto bail;
480 }
481 }
482 if (unlikely(qkey != qp->qkey)) {
483 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
484 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
485 src_qp, qp->ibqp.qp_num,
486 hdr->lrh[3], hdr->lrh[1]);
487 goto bail;
488 }
489 /* Drop invalid MAD packets (see 13.5.3.1). */
490 if (unlikely(qp->ibqp.qp_num == 1 &&
491 (tlen != 256 ||
492 (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) {
493 ibp->n_pkt_drops++;
494 goto bail;
495 }
496 } else {
497 struct ib_smp *smp;
498
499 /* Drop invalid MAD packets (see 13.5.3.1). */
500 if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) {
501 ibp->n_pkt_drops++;
502 goto bail;
503 }
504 smp = (struct ib_smp *) data;
505 if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
506 hdr->lrh[3] == IB_LID_PERMISSIVE) &&
507 smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
508 ibp->n_pkt_drops++;
509 goto bail;
510 }
511 }
512
513 /*
514 * The opcode is in the low byte when its in network order
515 * (top byte when in host order).
516 */
517 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
518 if (qp->ibqp.qp_num > 1 &&
519 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
520 wc.ex.imm_data = ohdr->u.ud.imm_data;
521 wc.wc_flags = IB_WC_WITH_IMM;
522 hdrsize += sizeof(u32);
523 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
524 wc.ex.imm_data = 0;
525 wc.wc_flags = 0;
526 } else {
527 ibp->n_pkt_drops++;
528 goto bail;
529 }
530
531 /*
532 * A GRH is expected to preceed the data even if not
533 * present on the wire.
534 */
535 wc.byte_len = tlen + sizeof(struct ib_grh);
536
537 /*
538 * We need to serialize getting a receive work queue entry and
539 * generating a completion for it against QPs sending to this QP
540 * locally.
541 */
542 spin_lock(&qp->r_lock);
543
544 /*
545 * Get the next work request entry to find where to put the data.
546 */
547 if (qp->r_flags & QIB_R_REUSE_SGE)
548 qp->r_flags &= ~QIB_R_REUSE_SGE;
549 else {
550 int ret;
551
552 ret = qib_get_rwqe(qp, 0);
553 if (ret < 0) {
554 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
555 goto bail_unlock;
556 }
557 if (!ret) {
558 if (qp->ibqp.qp_num == 0)
559 ibp->n_vl15_dropped++;
560 goto bail_unlock;
561 }
562 }
563 /* Silently drop packets which are too big. */
564 if (unlikely(wc.byte_len > qp->r_len)) {
565 qp->r_flags |= QIB_R_REUSE_SGE;
566 ibp->n_pkt_drops++;
567 goto bail_unlock;
568 }
569 if (has_grh) {
570 qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
571 sizeof(struct ib_grh), 1);
572 wc.wc_flags |= IB_WC_GRH;
573 } else
574 qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
575 qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
576 while (qp->r_sge.num_sge) {
577 atomic_dec(&qp->r_sge.sge.mr->refcount);
578 if (--qp->r_sge.num_sge)
579 qp->r_sge.sge = *qp->r_sge.sg_list++;
580 }
581 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
582 goto bail_unlock;
583 wc.wr_id = qp->r_wr_id;
584 wc.status = IB_WC_SUCCESS;
585 wc.opcode = IB_WC_RECV;
586 wc.vendor_err = 0;
587 wc.qp = &qp->ibqp;
588 wc.src_qp = src_qp;
589 wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
590 qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
591 wc.slid = be16_to_cpu(hdr->lrh[3]);
592 wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
593 dlid = be16_to_cpu(hdr->lrh[1]);
594 /*
595 * Save the LMC lower bits if the destination LID is a unicast LID.
596 */
597 wc.dlid_path_bits = dlid >= QIB_MULTICAST_LID_BASE ? 0 :
598 dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
599 wc.port_num = qp->port_num;
600 /* Signal completion event if the solicited bit is set. */
601 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
602 (ohdr->bth[0] &
603 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
604bail_unlock:
605 spin_unlock(&qp->r_lock);
606bail:;
607}
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
new file mode 100644
index 000000000000..d7a26c1d4f37
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -0,0 +1,157 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/mm.h>
35#include <linux/device.h>
36
37#include "qib.h"
38
39static void __qib_release_user_pages(struct page **p, size_t num_pages,
40 int dirty)
41{
42 size_t i;
43
44 for (i = 0; i < num_pages; i++) {
45 if (dirty)
46 set_page_dirty_lock(p[i]);
47 put_page(p[i]);
48 }
49}
50
51/*
52 * Call with current->mm->mmap_sem held.
53 */
54static int __get_user_pages(unsigned long start_page, size_t num_pages,
55 struct page **p, struct vm_area_struct **vma)
56{
57 unsigned long lock_limit;
58 size_t got;
59 int ret;
60
61 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
62
63 if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) {
64 ret = -ENOMEM;
65 goto bail;
66 }
67
68 for (got = 0; got < num_pages; got += ret) {
69 ret = get_user_pages(current, current->mm,
70 start_page + got * PAGE_SIZE,
71 num_pages - got, 1, 1,
72 p + got, vma);
73 if (ret < 0)
74 goto bail_release;
75 }
76
77 current->mm->locked_vm += num_pages;
78
79 ret = 0;
80 goto bail;
81
82bail_release:
83 __qib_release_user_pages(p, got, 0);
84bail:
85 return ret;
86}
87
88/**
89 * qib_map_page - a safety wrapper around pci_map_page()
90 *
91 * A dma_addr of all 0's is interpreted by the chip as "disabled".
92 * Unfortunately, it can also be a valid dma_addr returned on some
93 * architectures.
94 *
95 * The powerpc iommu assigns dma_addrs in ascending order, so we don't
96 * have to bother with retries or mapping a dummy page to insure we
97 * don't just get the same mapping again.
98 *
99 * I'm sure we won't be so lucky with other iommu's, so FIXME.
100 */
101dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page,
102 unsigned long offset, size_t size, int direction)
103{
104 dma_addr_t phys;
105
106 phys = pci_map_page(hwdev, page, offset, size, direction);
107
108 if (phys == 0) {
109 pci_unmap_page(hwdev, phys, size, direction);
110 phys = pci_map_page(hwdev, page, offset, size, direction);
111 /*
112 * FIXME: If we get 0 again, we should keep this page,
113 * map another, then free the 0 page.
114 */
115 }
116
117 return phys;
118}
119
120/**
121 * qib_get_user_pages - lock user pages into memory
122 * @start_page: the start page
123 * @num_pages: the number of pages
124 * @p: the output page structures
125 *
126 * This function takes a given start page (page aligned user virtual
127 * address) and pins it and the following specified number of pages. For
128 * now, num_pages is always 1, but that will probably change at some point
129 * (because caller is doing expected sends on a single virtually contiguous
130 * buffer, so we can do all pages at once).
131 */
132int qib_get_user_pages(unsigned long start_page, size_t num_pages,
133 struct page **p)
134{
135 int ret;
136
137 down_write(&current->mm->mmap_sem);
138
139 ret = __get_user_pages(start_page, num_pages, p, NULL);
140
141 up_write(&current->mm->mmap_sem);
142
143 return ret;
144}
145
146void qib_release_user_pages(struct page **p, size_t num_pages)
147{
148 if (current->mm) /* during close after signal, mm can be NULL */
149 down_write(&current->mm->mmap_sem);
150
151 __qib_release_user_pages(p, num_pages, 1);
152
153 if (current->mm) {
154 current->mm->locked_vm -= num_pages;
155 up_write(&current->mm->mmap_sem);
156 }
157}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
new file mode 100644
index 000000000000..4c19e06b5e85
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -0,0 +1,897 @@
1/*
2 * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/mm.h>
33#include <linux/types.h>
34#include <linux/device.h>
35#include <linux/dmapool.h>
36#include <linux/slab.h>
37#include <linux/list.h>
38#include <linux/highmem.h>
39#include <linux/io.h>
40#include <linux/uio.h>
41#include <linux/rbtree.h>
42#include <linux/spinlock.h>
43#include <linux/delay.h>
44
45#include "qib.h"
46#include "qib_user_sdma.h"
47
48/* minimum size of header */
49#define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
50/* expected size of headers (for dma_pool) */
51#define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
52/* attempt to drain the queue for 5secs */
53#define QIB_USER_SDMA_DRAIN_TIMEOUT 500
54
55struct qib_user_sdma_pkt {
56 u8 naddr; /* dimension of addr (1..3) ... */
57 u32 counter; /* sdma pkts queued counter for this entry */
58 u64 added; /* global descq number of entries */
59
60 struct {
61 u32 offset; /* offset for kvaddr, addr */
62 u32 length; /* length in page */
63 u8 put_page; /* should we put_page? */
64 u8 dma_mapped; /* is page dma_mapped? */
65 struct page *page; /* may be NULL (coherent mem) */
66 void *kvaddr; /* FIXME: only for pio hack */
67 dma_addr_t addr;
68 } addr[4]; /* max pages, any more and we coalesce */
69 struct list_head list; /* list element */
70};
71
72struct qib_user_sdma_queue {
73 /*
74 * pkts sent to dma engine are queued on this
75 * list head. the type of the elements of this
76 * list are struct qib_user_sdma_pkt...
77 */
78 struct list_head sent;
79
80 /* headers with expected length are allocated from here... */
81 char header_cache_name[64];
82 struct dma_pool *header_cache;
83
84 /* packets are allocated from the slab cache... */
85 char pkt_slab_name[64];
86 struct kmem_cache *pkt_slab;
87
88 /* as packets go on the queued queue, they are counted... */
89 u32 counter;
90 u32 sent_counter;
91
92 /* dma page table */
93 struct rb_root dma_pages_root;
94
95 /* protect everything above... */
96 struct mutex lock;
97};
98
99struct qib_user_sdma_queue *
100qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
101{
102 struct qib_user_sdma_queue *pq =
103 kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
104
105 if (!pq)
106 goto done;
107
108 pq->counter = 0;
109 pq->sent_counter = 0;
110 INIT_LIST_HEAD(&pq->sent);
111
112 mutex_init(&pq->lock);
113
114 snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
115 "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
116 pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
117 sizeof(struct qib_user_sdma_pkt),
118 0, 0, NULL);
119
120 if (!pq->pkt_slab)
121 goto err_kfree;
122
123 snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
124 "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
125 pq->header_cache = dma_pool_create(pq->header_cache_name,
126 dev,
127 QIB_USER_SDMA_EXP_HEADER_LENGTH,
128 4, 0);
129 if (!pq->header_cache)
130 goto err_slab;
131
132 pq->dma_pages_root = RB_ROOT;
133
134 goto done;
135
136err_slab:
137 kmem_cache_destroy(pq->pkt_slab);
138err_kfree:
139 kfree(pq);
140 pq = NULL;
141
142done:
143 return pq;
144}
145
146static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
147 int i, size_t offset, size_t len,
148 int put_page, int dma_mapped,
149 struct page *page,
150 void *kvaddr, dma_addr_t dma_addr)
151{
152 pkt->addr[i].offset = offset;
153 pkt->addr[i].length = len;
154 pkt->addr[i].put_page = put_page;
155 pkt->addr[i].dma_mapped = dma_mapped;
156 pkt->addr[i].page = page;
157 pkt->addr[i].kvaddr = kvaddr;
158 pkt->addr[i].addr = dma_addr;
159}
160
161static void qib_user_sdma_init_header(struct qib_user_sdma_pkt *pkt,
162 u32 counter, size_t offset,
163 size_t len, int dma_mapped,
164 struct page *page,
165 void *kvaddr, dma_addr_t dma_addr)
166{
167 pkt->naddr = 1;
168 pkt->counter = counter;
169 qib_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
170 kvaddr, dma_addr);
171}
172
173/* we've too many pages in the iovec, coalesce to a single page */
174static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
175 struct qib_user_sdma_pkt *pkt,
176 const struct iovec *iov,
177 unsigned long niov)
178{
179 int ret = 0;
180 struct page *page = alloc_page(GFP_KERNEL);
181 void *mpage_save;
182 char *mpage;
183 int i;
184 int len = 0;
185 dma_addr_t dma_addr;
186
187 if (!page) {
188 ret = -ENOMEM;
189 goto done;
190 }
191
192 mpage = kmap(page);
193 mpage_save = mpage;
194 for (i = 0; i < niov; i++) {
195 int cfur;
196
197 cfur = copy_from_user(mpage,
198 iov[i].iov_base, iov[i].iov_len);
199 if (cfur) {
200 ret = -EFAULT;
201 goto free_unmap;
202 }
203
204 mpage += iov[i].iov_len;
205 len += iov[i].iov_len;
206 }
207
208 dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
209 DMA_TO_DEVICE);
210 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
211 ret = -ENOMEM;
212 goto free_unmap;
213 }
214
215 qib_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
216 dma_addr);
217 pkt->naddr = 2;
218
219 goto done;
220
221free_unmap:
222 kunmap(page);
223 __free_page(page);
224done:
225 return ret;
226}
227
228/*
229 * How many pages in this iovec element?
230 */
231static int qib_user_sdma_num_pages(const struct iovec *iov)
232{
233 const unsigned long addr = (unsigned long) iov->iov_base;
234 const unsigned long len = iov->iov_len;
235 const unsigned long spage = addr & PAGE_MASK;
236 const unsigned long epage = (addr + len - 1) & PAGE_MASK;
237
238 return 1 + ((epage - spage) >> PAGE_SHIFT);
239}
240
241/*
242 * Truncate length to page boundry.
243 */
244static int qib_user_sdma_page_length(unsigned long addr, unsigned long len)
245{
246 const unsigned long offset = addr & ~PAGE_MASK;
247
248 return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
249}
250
251static void qib_user_sdma_free_pkt_frag(struct device *dev,
252 struct qib_user_sdma_queue *pq,
253 struct qib_user_sdma_pkt *pkt,
254 int frag)
255{
256 const int i = frag;
257
258 if (pkt->addr[i].page) {
259 if (pkt->addr[i].dma_mapped)
260 dma_unmap_page(dev,
261 pkt->addr[i].addr,
262 pkt->addr[i].length,
263 DMA_TO_DEVICE);
264
265 if (pkt->addr[i].kvaddr)
266 kunmap(pkt->addr[i].page);
267
268 if (pkt->addr[i].put_page)
269 put_page(pkt->addr[i].page);
270 else
271 __free_page(pkt->addr[i].page);
272 } else if (pkt->addr[i].kvaddr)
273 /* free coherent mem from cache... */
274 dma_pool_free(pq->header_cache,
275 pkt->addr[i].kvaddr, pkt->addr[i].addr);
276}
277
278/* return number of pages pinned... */
279static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
280 struct qib_user_sdma_pkt *pkt,
281 unsigned long addr, int tlen, int npages)
282{
283 struct page *pages[2];
284 int j;
285 int ret;
286
287 ret = get_user_pages(current, current->mm, addr,
288 npages, 0, 1, pages, NULL);
289
290 if (ret != npages) {
291 int i;
292
293 for (i = 0; i < ret; i++)
294 put_page(pages[i]);
295
296 ret = -ENOMEM;
297 goto done;
298 }
299
300 for (j = 0; j < npages; j++) {
301 /* map the pages... */
302 const int flen = qib_user_sdma_page_length(addr, tlen);
303 dma_addr_t dma_addr =
304 dma_map_page(&dd->pcidev->dev,
305 pages[j], 0, flen, DMA_TO_DEVICE);
306 unsigned long fofs = addr & ~PAGE_MASK;
307
308 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
309 ret = -ENOMEM;
310 goto done;
311 }
312
313 qib_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
314 pages[j], kmap(pages[j]), dma_addr);
315
316 pkt->naddr++;
317 addr += flen;
318 tlen -= flen;
319 }
320
321done:
322 return ret;
323}
324
325static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
326 struct qib_user_sdma_queue *pq,
327 struct qib_user_sdma_pkt *pkt,
328 const struct iovec *iov,
329 unsigned long niov)
330{
331 int ret = 0;
332 unsigned long idx;
333
334 for (idx = 0; idx < niov; idx++) {
335 const int npages = qib_user_sdma_num_pages(iov + idx);
336 const unsigned long addr = (unsigned long) iov[idx].iov_base;
337
338 ret = qib_user_sdma_pin_pages(dd, pkt, addr,
339 iov[idx].iov_len, npages);
340 if (ret < 0)
341 goto free_pkt;
342 }
343
344 goto done;
345
346free_pkt:
347 for (idx = 0; idx < pkt->naddr; idx++)
348 qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
349
350done:
351 return ret;
352}
353
354static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
355 struct qib_user_sdma_queue *pq,
356 struct qib_user_sdma_pkt *pkt,
357 const struct iovec *iov,
358 unsigned long niov, int npages)
359{
360 int ret = 0;
361
362 if (npages >= ARRAY_SIZE(pkt->addr))
363 ret = qib_user_sdma_coalesce(dd, pkt, iov, niov);
364 else
365 ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
366
367 return ret;
368}
369
370/* free a packet list -- return counter value of last packet */
371static void qib_user_sdma_free_pkt_list(struct device *dev,
372 struct qib_user_sdma_queue *pq,
373 struct list_head *list)
374{
375 struct qib_user_sdma_pkt *pkt, *pkt_next;
376
377 list_for_each_entry_safe(pkt, pkt_next, list, list) {
378 int i;
379
380 for (i = 0; i < pkt->naddr; i++)
381 qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
382
383 kmem_cache_free(pq->pkt_slab, pkt);
384 }
385}
386
387/*
388 * copy headers, coalesce etc -- pq->lock must be held
389 *
390 * we queue all the packets to list, returning the
391 * number of bytes total. list must be empty initially,
392 * as, if there is an error we clean it...
393 */
394static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
395 struct qib_user_sdma_queue *pq,
396 struct list_head *list,
397 const struct iovec *iov,
398 unsigned long niov,
399 int maxpkts)
400{
401 unsigned long idx = 0;
402 int ret = 0;
403 int npkts = 0;
404 struct page *page = NULL;
405 __le32 *pbc;
406 dma_addr_t dma_addr;
407 struct qib_user_sdma_pkt *pkt = NULL;
408 size_t len;
409 size_t nw;
410 u32 counter = pq->counter;
411 int dma_mapped = 0;
412
413 while (idx < niov && npkts < maxpkts) {
414 const unsigned long addr = (unsigned long) iov[idx].iov_base;
415 const unsigned long idx_save = idx;
416 unsigned pktnw;
417 unsigned pktnwc;
418 int nfrags = 0;
419 int npages = 0;
420 int cfur;
421
422 dma_mapped = 0;
423 len = iov[idx].iov_len;
424 nw = len >> 2;
425 page = NULL;
426
427 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
428 if (!pkt) {
429 ret = -ENOMEM;
430 goto free_list;
431 }
432
433 if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
434 len > PAGE_SIZE || len & 3 || addr & 3) {
435 ret = -EINVAL;
436 goto free_pkt;
437 }
438
439 if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
440 pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
441 &dma_addr);
442 else
443 pbc = NULL;
444
445 if (!pbc) {
446 page = alloc_page(GFP_KERNEL);
447 if (!page) {
448 ret = -ENOMEM;
449 goto free_pkt;
450 }
451 pbc = kmap(page);
452 }
453
454 cfur = copy_from_user(pbc, iov[idx].iov_base, len);
455 if (cfur) {
456 ret = -EFAULT;
457 goto free_pbc;
458 }
459
460 /*
461 * This assignment is a bit strange. it's because the
462 * the pbc counts the number of 32 bit words in the full
463 * packet _except_ the first word of the pbc itself...
464 */
465 pktnwc = nw - 1;
466
467 /*
468 * pktnw computation yields the number of 32 bit words
469 * that the caller has indicated in the PBC. note that
470 * this is one less than the total number of words that
471 * goes to the send DMA engine as the first 32 bit word
472 * of the PBC itself is not counted. Armed with this count,
473 * we can verify that the packet is consistent with the
474 * iovec lengths.
475 */
476 pktnw = le32_to_cpu(*pbc) & QIB_PBC_LENGTH_MASK;
477 if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
478 ret = -EINVAL;
479 goto free_pbc;
480 }
481
482 idx++;
483 while (pktnwc < pktnw && idx < niov) {
484 const size_t slen = iov[idx].iov_len;
485 const unsigned long faddr =
486 (unsigned long) iov[idx].iov_base;
487
488 if (slen & 3 || faddr & 3 || !slen ||
489 slen > PAGE_SIZE) {
490 ret = -EINVAL;
491 goto free_pbc;
492 }
493
494 npages++;
495 if ((faddr & PAGE_MASK) !=
496 ((faddr + slen - 1) & PAGE_MASK))
497 npages++;
498
499 pktnwc += slen >> 2;
500 idx++;
501 nfrags++;
502 }
503
504 if (pktnwc != pktnw) {
505 ret = -EINVAL;
506 goto free_pbc;
507 }
508
509 if (page) {
510 dma_addr = dma_map_page(&dd->pcidev->dev,
511 page, 0, len, DMA_TO_DEVICE);
512 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
513 ret = -ENOMEM;
514 goto free_pbc;
515 }
516
517 dma_mapped = 1;
518 }
519
520 qib_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
521 page, pbc, dma_addr);
522
523 if (nfrags) {
524 ret = qib_user_sdma_init_payload(dd, pq, pkt,
525 iov + idx_save + 1,
526 nfrags, npages);
527 if (ret < 0)
528 goto free_pbc_dma;
529 }
530
531 counter++;
532 npkts++;
533
534 list_add_tail(&pkt->list, list);
535 }
536
537 ret = idx;
538 goto done;
539
540free_pbc_dma:
541 if (dma_mapped)
542 dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
543free_pbc:
544 if (page) {
545 kunmap(page);
546 __free_page(page);
547 } else
548 dma_pool_free(pq->header_cache, pbc, dma_addr);
549free_pkt:
550 kmem_cache_free(pq->pkt_slab, pkt);
551free_list:
552 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
553done:
554 return ret;
555}
556
557static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
558 u32 c)
559{
560 pq->sent_counter = c;
561}
562
563/* try to clean out queue -- needs pq->lock */
564static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
565 struct qib_user_sdma_queue *pq)
566{
567 struct qib_devdata *dd = ppd->dd;
568 struct list_head free_list;
569 struct qib_user_sdma_pkt *pkt;
570 struct qib_user_sdma_pkt *pkt_prev;
571 int ret = 0;
572
573 INIT_LIST_HEAD(&free_list);
574
575 list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
576 s64 descd = ppd->sdma_descq_removed - pkt->added;
577
578 if (descd < 0)
579 break;
580
581 list_move_tail(&pkt->list, &free_list);
582
583 /* one more packet cleaned */
584 ret++;
585 }
586
587 if (!list_empty(&free_list)) {
588 u32 counter;
589
590 pkt = list_entry(free_list.prev,
591 struct qib_user_sdma_pkt, list);
592 counter = pkt->counter;
593
594 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
595 qib_user_sdma_set_complete_counter(pq, counter);
596 }
597
598 return ret;
599}
600
601void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
602{
603 if (!pq)
604 return;
605
606 kmem_cache_destroy(pq->pkt_slab);
607 dma_pool_destroy(pq->header_cache);
608 kfree(pq);
609}
610
611/* clean descriptor queue, returns > 0 if some elements cleaned */
612static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
613{
614 int ret;
615 unsigned long flags;
616
617 spin_lock_irqsave(&ppd->sdma_lock, flags);
618 ret = qib_sdma_make_progress(ppd);
619 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
620
621 return ret;
622}
623
624/* we're in close, drain packets so that we can cleanup successfully... */
625void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
626 struct qib_user_sdma_queue *pq)
627{
628 struct qib_devdata *dd = ppd->dd;
629 int i;
630
631 if (!pq)
632 return;
633
634 for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
635 mutex_lock(&pq->lock);
636 if (list_empty(&pq->sent)) {
637 mutex_unlock(&pq->lock);
638 break;
639 }
640 qib_user_sdma_hwqueue_clean(ppd);
641 qib_user_sdma_queue_clean(ppd, pq);
642 mutex_unlock(&pq->lock);
643 msleep(10);
644 }
645
646 if (!list_empty(&pq->sent)) {
647 struct list_head free_list;
648
649 qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
650 INIT_LIST_HEAD(&free_list);
651 mutex_lock(&pq->lock);
652 list_splice_init(&pq->sent, &free_list);
653 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
654 mutex_unlock(&pq->lock);
655 }
656}
657
658static inline __le64 qib_sdma_make_desc0(struct qib_pportdata *ppd,
659 u64 addr, u64 dwlen, u64 dwoffset)
660{
661 u8 tmpgen;
662
663 tmpgen = ppd->sdma_generation;
664
665 return cpu_to_le64(/* SDmaPhyAddr[31:0] */
666 ((addr & 0xfffffffcULL) << 32) |
667 /* SDmaGeneration[1:0] */
668 ((tmpgen & 3ULL) << 30) |
669 /* SDmaDwordCount[10:0] */
670 ((dwlen & 0x7ffULL) << 16) |
671 /* SDmaBufOffset[12:2] */
672 (dwoffset & 0x7ffULL));
673}
674
675static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
676{
677 return descq | cpu_to_le64(1ULL << 12);
678}
679
680static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
681{
682 /* last */ /* dma head */
683 return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
684}
685
686static inline __le64 qib_sdma_make_desc1(u64 addr)
687{
688 /* SDmaPhyAddr[47:32] */
689 return cpu_to_le64(addr >> 32);
690}
691
692static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
693 struct qib_user_sdma_pkt *pkt, int idx,
694 unsigned ofs, u16 tail)
695{
696 const u64 addr = (u64) pkt->addr[idx].addr +
697 (u64) pkt->addr[idx].offset;
698 const u64 dwlen = (u64) pkt->addr[idx].length / 4;
699 __le64 *descqp;
700 __le64 descq0;
701
702 descqp = &ppd->sdma_descq[tail].qw[0];
703
704 descq0 = qib_sdma_make_desc0(ppd, addr, dwlen, ofs);
705 if (idx == 0)
706 descq0 = qib_sdma_make_first_desc0(descq0);
707 if (idx == pkt->naddr - 1)
708 descq0 = qib_sdma_make_last_desc0(descq0);
709
710 descqp[0] = descq0;
711 descqp[1] = qib_sdma_make_desc1(addr);
712}
713
714/* pq->lock must be held, get packets on the wire... */
715static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
716 struct qib_user_sdma_queue *pq,
717 struct list_head *pktlist)
718{
719 struct qib_devdata *dd = ppd->dd;
720 int ret = 0;
721 unsigned long flags;
722 u16 tail;
723 u8 generation;
724 u64 descq_added;
725
726 if (list_empty(pktlist))
727 return 0;
728
729 if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
730 return -ECOMM;
731
732 spin_lock_irqsave(&ppd->sdma_lock, flags);
733
734 /* keep a copy for restoring purposes in case of problems */
735 generation = ppd->sdma_generation;
736 descq_added = ppd->sdma_descq_added;
737
738 if (unlikely(!__qib_sdma_running(ppd))) {
739 ret = -ECOMM;
740 goto unlock;
741 }
742
743 tail = ppd->sdma_descq_tail;
744 while (!list_empty(pktlist)) {
745 struct qib_user_sdma_pkt *pkt =
746 list_entry(pktlist->next, struct qib_user_sdma_pkt,
747 list);
748 int i;
749 unsigned ofs = 0;
750 u16 dtail = tail;
751
752 if (pkt->naddr > qib_sdma_descq_freecnt(ppd))
753 goto unlock_check_tail;
754
755 for (i = 0; i < pkt->naddr; i++) {
756 qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail);
757 ofs += pkt->addr[i].length >> 2;
758
759 if (++tail == ppd->sdma_descq_cnt) {
760 tail = 0;
761 ++ppd->sdma_generation;
762 }
763 }
764
765 if ((ofs << 2) > ppd->ibmaxlen) {
766 ret = -EMSGSIZE;
767 goto unlock;
768 }
769
770 /*
771 * If the packet is >= 2KB mtu equivalent, we have to use
772 * the large buffers, and have to mark each descriptor as
773 * part of a large buffer packet.
774 */
775 if (ofs > dd->piosize2kmax_dwords) {
776 for (i = 0; i < pkt->naddr; i++) {
777 ppd->sdma_descq[dtail].qw[0] |=
778 cpu_to_le64(1ULL << 14);
779 if (++dtail == ppd->sdma_descq_cnt)
780 dtail = 0;
781 }
782 }
783
784 ppd->sdma_descq_added += pkt->naddr;
785 pkt->added = ppd->sdma_descq_added;
786 list_move_tail(&pkt->list, &pq->sent);
787 ret++;
788 }
789
790unlock_check_tail:
791 /* advance the tail on the chip if necessary */
792 if (ppd->sdma_descq_tail != tail)
793 dd->f_sdma_update_tail(ppd, tail);
794
795unlock:
796 if (unlikely(ret < 0)) {
797 ppd->sdma_generation = generation;
798 ppd->sdma_descq_added = descq_added;
799 }
800 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
801
802 return ret;
803}
804
805int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
806 struct qib_user_sdma_queue *pq,
807 const struct iovec *iov,
808 unsigned long dim)
809{
810 struct qib_devdata *dd = rcd->dd;
811 struct qib_pportdata *ppd = rcd->ppd;
812 int ret = 0;
813 struct list_head list;
814 int npkts = 0;
815
816 INIT_LIST_HEAD(&list);
817
818 mutex_lock(&pq->lock);
819
820 /* why not -ECOMM like qib_user_sdma_push_pkts() below? */
821 if (!qib_sdma_running(ppd))
822 goto done_unlock;
823
824 if (ppd->sdma_descq_added != ppd->sdma_descq_removed) {
825 qib_user_sdma_hwqueue_clean(ppd);
826 qib_user_sdma_queue_clean(ppd, pq);
827 }
828
829 while (dim) {
830 const int mxp = 8;
831
832 down_write(&current->mm->mmap_sem);
833 ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
834 up_write(&current->mm->mmap_sem);
835
836 if (ret <= 0)
837 goto done_unlock;
838 else {
839 dim -= ret;
840 iov += ret;
841 }
842
843 /* force packets onto the sdma hw queue... */
844 if (!list_empty(&list)) {
845 /*
846 * Lazily clean hw queue. the 4 is a guess of about
847 * how many sdma descriptors a packet will take (it
848 * doesn't have to be perfect).
849 */
850 if (qib_sdma_descq_freecnt(ppd) < ret * 4) {
851 qib_user_sdma_hwqueue_clean(ppd);
852 qib_user_sdma_queue_clean(ppd, pq);
853 }
854
855 ret = qib_user_sdma_push_pkts(ppd, pq, &list);
856 if (ret < 0)
857 goto done_unlock;
858 else {
859 npkts += ret;
860 pq->counter += ret;
861
862 if (!list_empty(&list))
863 goto done_unlock;
864 }
865 }
866 }
867
868done_unlock:
869 if (!list_empty(&list))
870 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
871 mutex_unlock(&pq->lock);
872
873 return (ret < 0) ? ret : npkts;
874}
875
876int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
877 struct qib_user_sdma_queue *pq)
878{
879 int ret = 0;
880
881 mutex_lock(&pq->lock);
882 qib_user_sdma_hwqueue_clean(ppd);
883 ret = qib_user_sdma_queue_clean(ppd, pq);
884 mutex_unlock(&pq->lock);
885
886 return ret;
887}
888
889u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
890{
891 return pq ? pq->sent_counter : 0;
892}
893
894u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
895{
896 return pq ? pq->counter : 0;
897}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.h b/drivers/infiniband/hw/qib/qib_user_sdma.h
new file mode 100644
index 000000000000..ce8cbaf6a5c2
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.h
@@ -0,0 +1,52 @@
1/*
2 * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/device.h>
33
34struct qib_user_sdma_queue;
35
36struct qib_user_sdma_queue *
37qib_user_sdma_queue_create(struct device *dev, int unit, int port, int sport);
38void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq);
39
40int qib_user_sdma_writev(struct qib_ctxtdata *pd,
41 struct qib_user_sdma_queue *pq,
42 const struct iovec *iov,
43 unsigned long dim);
44
45int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
46 struct qib_user_sdma_queue *pq);
47
48void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
49 struct qib_user_sdma_queue *pq);
50
51u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq);
52u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
new file mode 100644
index 000000000000..cda8f4173d23
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -0,0 +1,2248 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <rdma/ib_mad.h>
36#include <rdma/ib_user_verbs.h>
37#include <linux/io.h>
38#include <linux/utsname.h>
39#include <linux/rculist.h>
40#include <linux/mm.h>
41
42#include "qib.h"
43#include "qib_common.h"
44
45static unsigned int ib_qib_qp_table_size = 251;
46module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
47MODULE_PARM_DESC(qp_table_size, "QP table size");
48
49unsigned int ib_qib_lkey_table_size = 16;
50module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint,
51 S_IRUGO);
52MODULE_PARM_DESC(lkey_table_size,
53 "LKEY table size in bits (2^n, 1 <= n <= 23)");
54
55static unsigned int ib_qib_max_pds = 0xFFFF;
56module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
57MODULE_PARM_DESC(max_pds,
58 "Maximum number of protection domains to support");
59
60static unsigned int ib_qib_max_ahs = 0xFFFF;
61module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
62MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
63
64unsigned int ib_qib_max_cqes = 0x2FFFF;
65module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
66MODULE_PARM_DESC(max_cqes,
67 "Maximum number of completion queue entries to support");
68
69unsigned int ib_qib_max_cqs = 0x1FFFF;
70module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
71MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
72
73unsigned int ib_qib_max_qp_wrs = 0x3FFF;
74module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
75MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
76
77unsigned int ib_qib_max_qps = 16384;
78module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
79MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
80
81unsigned int ib_qib_max_sges = 0x60;
82module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
83MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
84
85unsigned int ib_qib_max_mcast_grps = 16384;
86module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
87MODULE_PARM_DESC(max_mcast_grps,
88 "Maximum number of multicast groups to support");
89
90unsigned int ib_qib_max_mcast_qp_attached = 16;
91module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
92 uint, S_IRUGO);
93MODULE_PARM_DESC(max_mcast_qp_attached,
94 "Maximum number of attached QPs to support");
95
96unsigned int ib_qib_max_srqs = 1024;
97module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
98MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
99
100unsigned int ib_qib_max_srq_sges = 128;
101module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
102MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
103
104unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
105module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
106MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
107
108static unsigned int ib_qib_disable_sma;
109module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
110MODULE_PARM_DESC(disable_sma, "Disable the SMA");
111
112/*
113 * Note that it is OK to post send work requests in the SQE and ERR
114 * states; qib_do_send() will process them and generate error
115 * completions as per IB 1.2 C10-96.
116 */
117const int ib_qib_state_ops[IB_QPS_ERR + 1] = {
118 [IB_QPS_RESET] = 0,
119 [IB_QPS_INIT] = QIB_POST_RECV_OK,
120 [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK,
121 [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
122 QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK |
123 QIB_PROCESS_NEXT_SEND_OK,
124 [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
125 QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK,
126 [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
127 QIB_POST_SEND_OK | QIB_FLUSH_SEND,
128 [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV |
129 QIB_POST_SEND_OK | QIB_FLUSH_SEND,
130};
131
132struct qib_ucontext {
133 struct ib_ucontext ibucontext;
134};
135
136static inline struct qib_ucontext *to_iucontext(struct ib_ucontext
137 *ibucontext)
138{
139 return container_of(ibucontext, struct qib_ucontext, ibucontext);
140}
141
142/*
143 * Translate ib_wr_opcode into ib_wc_opcode.
144 */
145const enum ib_wc_opcode ib_qib_wc_opcode[] = {
146 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
147 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
148 [IB_WR_SEND] = IB_WC_SEND,
149 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
150 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
151 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
152 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
153};
154
155/*
156 * System image GUID.
157 */
158__be64 ib_qib_sys_image_guid;
159
160/**
161 * qib_copy_sge - copy data to SGE memory
162 * @ss: the SGE state
163 * @data: the data to copy
164 * @length: the length of the data
165 */
166void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
167{
168 struct qib_sge *sge = &ss->sge;
169
170 while (length) {
171 u32 len = sge->length;
172
173 if (len > length)
174 len = length;
175 if (len > sge->sge_length)
176 len = sge->sge_length;
177 BUG_ON(len == 0);
178 memcpy(sge->vaddr, data, len);
179 sge->vaddr += len;
180 sge->length -= len;
181 sge->sge_length -= len;
182 if (sge->sge_length == 0) {
183 if (release)
184 atomic_dec(&sge->mr->refcount);
185 if (--ss->num_sge)
186 *sge = *ss->sg_list++;
187 } else if (sge->length == 0 && sge->mr->lkey) {
188 if (++sge->n >= QIB_SEGSZ) {
189 if (++sge->m >= sge->mr->mapsz)
190 break;
191 sge->n = 0;
192 }
193 sge->vaddr =
194 sge->mr->map[sge->m]->segs[sge->n].vaddr;
195 sge->length =
196 sge->mr->map[sge->m]->segs[sge->n].length;
197 }
198 data += len;
199 length -= len;
200 }
201}
202
203/**
204 * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
205 * @ss: the SGE state
206 * @length: the number of bytes to skip
207 */
208void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
209{
210 struct qib_sge *sge = &ss->sge;
211
212 while (length) {
213 u32 len = sge->length;
214
215 if (len > length)
216 len = length;
217 if (len > sge->sge_length)
218 len = sge->sge_length;
219 BUG_ON(len == 0);
220 sge->vaddr += len;
221 sge->length -= len;
222 sge->sge_length -= len;
223 if (sge->sge_length == 0) {
224 if (release)
225 atomic_dec(&sge->mr->refcount);
226 if (--ss->num_sge)
227 *sge = *ss->sg_list++;
228 } else if (sge->length == 0 && sge->mr->lkey) {
229 if (++sge->n >= QIB_SEGSZ) {
230 if (++sge->m >= sge->mr->mapsz)
231 break;
232 sge->n = 0;
233 }
234 sge->vaddr =
235 sge->mr->map[sge->m]->segs[sge->n].vaddr;
236 sge->length =
237 sge->mr->map[sge->m]->segs[sge->n].length;
238 }
239 length -= len;
240 }
241}
242
243/*
244 * Count the number of DMA descriptors needed to send length bytes of data.
245 * Don't modify the qib_sge_state to get the count.
246 * Return zero if any of the segments is not aligned.
247 */
248static u32 qib_count_sge(struct qib_sge_state *ss, u32 length)
249{
250 struct qib_sge *sg_list = ss->sg_list;
251 struct qib_sge sge = ss->sge;
252 u8 num_sge = ss->num_sge;
253 u32 ndesc = 1; /* count the header */
254
255 while (length) {
256 u32 len = sge.length;
257
258 if (len > length)
259 len = length;
260 if (len > sge.sge_length)
261 len = sge.sge_length;
262 BUG_ON(len == 0);
263 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
264 (len != length && (len & (sizeof(u32) - 1)))) {
265 ndesc = 0;
266 break;
267 }
268 ndesc++;
269 sge.vaddr += len;
270 sge.length -= len;
271 sge.sge_length -= len;
272 if (sge.sge_length == 0) {
273 if (--num_sge)
274 sge = *sg_list++;
275 } else if (sge.length == 0 && sge.mr->lkey) {
276 if (++sge.n >= QIB_SEGSZ) {
277 if (++sge.m >= sge.mr->mapsz)
278 break;
279 sge.n = 0;
280 }
281 sge.vaddr =
282 sge.mr->map[sge.m]->segs[sge.n].vaddr;
283 sge.length =
284 sge.mr->map[sge.m]->segs[sge.n].length;
285 }
286 length -= len;
287 }
288 return ndesc;
289}
290
291/*
292 * Copy from the SGEs to the data buffer.
293 */
294static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
295{
296 struct qib_sge *sge = &ss->sge;
297
298 while (length) {
299 u32 len = sge->length;
300
301 if (len > length)
302 len = length;
303 if (len > sge->sge_length)
304 len = sge->sge_length;
305 BUG_ON(len == 0);
306 memcpy(data, sge->vaddr, len);
307 sge->vaddr += len;
308 sge->length -= len;
309 sge->sge_length -= len;
310 if (sge->sge_length == 0) {
311 if (--ss->num_sge)
312 *sge = *ss->sg_list++;
313 } else if (sge->length == 0 && sge->mr->lkey) {
314 if (++sge->n >= QIB_SEGSZ) {
315 if (++sge->m >= sge->mr->mapsz)
316 break;
317 sge->n = 0;
318 }
319 sge->vaddr =
320 sge->mr->map[sge->m]->segs[sge->n].vaddr;
321 sge->length =
322 sge->mr->map[sge->m]->segs[sge->n].length;
323 }
324 data += len;
325 length -= len;
326 }
327}
328
329/**
330 * qib_post_one_send - post one RC, UC, or UD send work request
331 * @qp: the QP to post on
332 * @wr: the work request to send
333 */
334static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr)
335{
336 struct qib_swqe *wqe;
337 u32 next;
338 int i;
339 int j;
340 int acc;
341 int ret;
342 unsigned long flags;
343 struct qib_lkey_table *rkt;
344 struct qib_pd *pd;
345
346 spin_lock_irqsave(&qp->s_lock, flags);
347
348 /* Check that state is OK to post send. */
349 if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
350 goto bail_inval;
351
352 /* IB spec says that num_sge == 0 is OK. */
353 if (wr->num_sge > qp->s_max_sge)
354 goto bail_inval;
355
356 /*
357 * Don't allow RDMA reads or atomic operations on UC or
358 * undefined operations.
359 * Make sure buffer is large enough to hold the result for atomics.
360 */
361 if (wr->opcode == IB_WR_FAST_REG_MR) {
362 if (qib_fast_reg_mr(qp, wr))
363 goto bail_inval;
364 } else if (qp->ibqp.qp_type == IB_QPT_UC) {
365 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
366 goto bail_inval;
367 } else if (qp->ibqp.qp_type != IB_QPT_RC) {
368 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
369 if (wr->opcode != IB_WR_SEND &&
370 wr->opcode != IB_WR_SEND_WITH_IMM)
371 goto bail_inval;
372 /* Check UD destination address PD */
373 if (qp->ibqp.pd != wr->wr.ud.ah->pd)
374 goto bail_inval;
375 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
376 goto bail_inval;
377 else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
378 (wr->num_sge == 0 ||
379 wr->sg_list[0].length < sizeof(u64) ||
380 wr->sg_list[0].addr & (sizeof(u64) - 1)))
381 goto bail_inval;
382 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
383 goto bail_inval;
384
385 next = qp->s_head + 1;
386 if (next >= qp->s_size)
387 next = 0;
388 if (next == qp->s_last) {
389 ret = -ENOMEM;
390 goto bail;
391 }
392
393 rkt = &to_idev(qp->ibqp.device)->lk_table;
394 pd = to_ipd(qp->ibqp.pd);
395 wqe = get_swqe_ptr(qp, qp->s_head);
396 wqe->wr = *wr;
397 wqe->length = 0;
398 j = 0;
399 if (wr->num_sge) {
400 acc = wr->opcode >= IB_WR_RDMA_READ ?
401 IB_ACCESS_LOCAL_WRITE : 0;
402 for (i = 0; i < wr->num_sge; i++) {
403 u32 length = wr->sg_list[i].length;
404 int ok;
405
406 if (length == 0)
407 continue;
408 ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j],
409 &wr->sg_list[i], acc);
410 if (!ok)
411 goto bail_inval_free;
412 wqe->length += length;
413 j++;
414 }
415 wqe->wr.num_sge = j;
416 }
417 if (qp->ibqp.qp_type == IB_QPT_UC ||
418 qp->ibqp.qp_type == IB_QPT_RC) {
419 if (wqe->length > 0x80000000U)
420 goto bail_inval_free;
421 } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
422 qp->port_num - 1)->ibmtu)
423 goto bail_inval_free;
424 else
425 atomic_inc(&to_iah(wr->wr.ud.ah)->refcount);
426 wqe->ssn = qp->s_ssn++;
427 qp->s_head = next;
428
429 ret = 0;
430 goto bail;
431
432bail_inval_free:
433 while (j) {
434 struct qib_sge *sge = &wqe->sg_list[--j];
435
436 atomic_dec(&sge->mr->refcount);
437 }
438bail_inval:
439 ret = -EINVAL;
440bail:
441 spin_unlock_irqrestore(&qp->s_lock, flags);
442 return ret;
443}
444
445/**
446 * qib_post_send - post a send on a QP
447 * @ibqp: the QP to post the send on
448 * @wr: the list of work requests to post
449 * @bad_wr: the first bad WR is put here
450 *
451 * This may be called from interrupt context.
452 */
453static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
454 struct ib_send_wr **bad_wr)
455{
456 struct qib_qp *qp = to_iqp(ibqp);
457 int err = 0;
458
459 for (; wr; wr = wr->next) {
460 err = qib_post_one_send(qp, wr);
461 if (err) {
462 *bad_wr = wr;
463 goto bail;
464 }
465 }
466
467 /* Try to do the send work in the caller's context. */
468 qib_do_send(&qp->s_work);
469
470bail:
471 return err;
472}
473
474/**
475 * qib_post_receive - post a receive on a QP
476 * @ibqp: the QP to post the receive on
477 * @wr: the WR to post
478 * @bad_wr: the first bad WR is put here
479 *
480 * This may be called from interrupt context.
481 */
482static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
483 struct ib_recv_wr **bad_wr)
484{
485 struct qib_qp *qp = to_iqp(ibqp);
486 struct qib_rwq *wq = qp->r_rq.wq;
487 unsigned long flags;
488 int ret;
489
490 /* Check that state is OK to post receive. */
491 if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) {
492 *bad_wr = wr;
493 ret = -EINVAL;
494 goto bail;
495 }
496
497 for (; wr; wr = wr->next) {
498 struct qib_rwqe *wqe;
499 u32 next;
500 int i;
501
502 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
503 *bad_wr = wr;
504 ret = -EINVAL;
505 goto bail;
506 }
507
508 spin_lock_irqsave(&qp->r_rq.lock, flags);
509 next = wq->head + 1;
510 if (next >= qp->r_rq.size)
511 next = 0;
512 if (next == wq->tail) {
513 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
514 *bad_wr = wr;
515 ret = -ENOMEM;
516 goto bail;
517 }
518
519 wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
520 wqe->wr_id = wr->wr_id;
521 wqe->num_sge = wr->num_sge;
522 for (i = 0; i < wr->num_sge; i++)
523 wqe->sg_list[i] = wr->sg_list[i];
524 /* Make sure queue entry is written before the head index. */
525 smp_wmb();
526 wq->head = next;
527 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
528 }
529 ret = 0;
530
531bail:
532 return ret;
533}
534
535/**
536 * qib_qp_rcv - processing an incoming packet on a QP
537 * @rcd: the context pointer
538 * @hdr: the packet header
539 * @has_grh: true if the packet has a GRH
540 * @data: the packet data
541 * @tlen: the packet length
542 * @qp: the QP the packet came on
543 *
544 * This is called from qib_ib_rcv() to process an incoming packet
545 * for the given QP.
546 * Called at interrupt level.
547 */
548static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
549 int has_grh, void *data, u32 tlen, struct qib_qp *qp)
550{
551 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
552
553 /* Check for valid receive state. */
554 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
555 ibp->n_pkt_drops++;
556 return;
557 }
558
559 switch (qp->ibqp.qp_type) {
560 case IB_QPT_SMI:
561 case IB_QPT_GSI:
562 if (ib_qib_disable_sma)
563 break;
564 /* FALLTHROUGH */
565 case IB_QPT_UD:
566 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
567 break;
568
569 case IB_QPT_RC:
570 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
571 break;
572
573 case IB_QPT_UC:
574 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
575 break;
576
577 default:
578 break;
579 }
580}
581
582/**
583 * qib_ib_rcv - process an incoming packet
584 * @rcd: the context pointer
585 * @rhdr: the header of the packet
586 * @data: the packet payload
587 * @tlen: the packet length
588 *
589 * This is called from qib_kreceive() to process an incoming packet at
590 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
591 */
592void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
593{
594 struct qib_pportdata *ppd = rcd->ppd;
595 struct qib_ibport *ibp = &ppd->ibport_data;
596 struct qib_ib_header *hdr = rhdr;
597 struct qib_other_headers *ohdr;
598 struct qib_qp *qp;
599 u32 qp_num;
600 int lnh;
601 u8 opcode;
602 u16 lid;
603
604 /* 24 == LRH+BTH+CRC */
605 if (unlikely(tlen < 24))
606 goto drop;
607
608 /* Check for a valid destination LID (see ch. 7.11.1). */
609 lid = be16_to_cpu(hdr->lrh[1]);
610 if (lid < QIB_MULTICAST_LID_BASE) {
611 lid &= ~((1 << ppd->lmc) - 1);
612 if (unlikely(lid != ppd->lid))
613 goto drop;
614 }
615
616 /* Check for GRH */
617 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
618 if (lnh == QIB_LRH_BTH)
619 ohdr = &hdr->u.oth;
620 else if (lnh == QIB_LRH_GRH) {
621 u32 vtf;
622
623 ohdr = &hdr->u.l.oth;
624 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
625 goto drop;
626 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
627 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
628 goto drop;
629 } else
630 goto drop;
631
632 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
633 ibp->opstats[opcode & 0x7f].n_bytes += tlen;
634 ibp->opstats[opcode & 0x7f].n_packets++;
635
636 /* Get the destination QP number. */
637 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
638 if (qp_num == QIB_MULTICAST_QPN) {
639 struct qib_mcast *mcast;
640 struct qib_mcast_qp *p;
641
642 if (lnh != QIB_LRH_GRH)
643 goto drop;
644 mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
645 if (mcast == NULL)
646 goto drop;
647 ibp->n_multicast_rcv++;
648 list_for_each_entry_rcu(p, &mcast->qp_list, list)
649 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
650 /*
651 * Notify qib_multicast_detach() if it is waiting for us
652 * to finish.
653 */
654 if (atomic_dec_return(&mcast->refcount) <= 1)
655 wake_up(&mcast->wait);
656 } else {
657 qp = qib_lookup_qpn(ibp, qp_num);
658 if (!qp)
659 goto drop;
660 ibp->n_unicast_rcv++;
661 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
662 /*
663 * Notify qib_destroy_qp() if it is waiting
664 * for us to finish.
665 */
666 if (atomic_dec_and_test(&qp->refcount))
667 wake_up(&qp->wait);
668 }
669 return;
670
671drop:
672 ibp->n_pkt_drops++;
673}
674
675/*
676 * This is called from a timer to check for QPs
677 * which need kernel memory in order to send a packet.
678 */
679static void mem_timer(unsigned long data)
680{
681 struct qib_ibdev *dev = (struct qib_ibdev *) data;
682 struct list_head *list = &dev->memwait;
683 struct qib_qp *qp = NULL;
684 unsigned long flags;
685
686 spin_lock_irqsave(&dev->pending_lock, flags);
687 if (!list_empty(list)) {
688 qp = list_entry(list->next, struct qib_qp, iowait);
689 list_del_init(&qp->iowait);
690 atomic_inc(&qp->refcount);
691 if (!list_empty(list))
692 mod_timer(&dev->mem_timer, jiffies + 1);
693 }
694 spin_unlock_irqrestore(&dev->pending_lock, flags);
695
696 if (qp) {
697 spin_lock_irqsave(&qp->s_lock, flags);
698 if (qp->s_flags & QIB_S_WAIT_KMEM) {
699 qp->s_flags &= ~QIB_S_WAIT_KMEM;
700 qib_schedule_send(qp);
701 }
702 spin_unlock_irqrestore(&qp->s_lock, flags);
703 if (atomic_dec_and_test(&qp->refcount))
704 wake_up(&qp->wait);
705 }
706}
707
708static void update_sge(struct qib_sge_state *ss, u32 length)
709{
710 struct qib_sge *sge = &ss->sge;
711
712 sge->vaddr += length;
713 sge->length -= length;
714 sge->sge_length -= length;
715 if (sge->sge_length == 0) {
716 if (--ss->num_sge)
717 *sge = *ss->sg_list++;
718 } else if (sge->length == 0 && sge->mr->lkey) {
719 if (++sge->n >= QIB_SEGSZ) {
720 if (++sge->m >= sge->mr->mapsz)
721 return;
722 sge->n = 0;
723 }
724 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
725 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
726 }
727}
728
729#ifdef __LITTLE_ENDIAN
730static inline u32 get_upper_bits(u32 data, u32 shift)
731{
732 return data >> shift;
733}
734
735static inline u32 set_upper_bits(u32 data, u32 shift)
736{
737 return data << shift;
738}
739
740static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
741{
742 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
743 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
744 return data;
745}
746#else
747static inline u32 get_upper_bits(u32 data, u32 shift)
748{
749 return data << shift;
750}
751
752static inline u32 set_upper_bits(u32 data, u32 shift)
753{
754 return data >> shift;
755}
756
757static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
758{
759 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
760 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
761 return data;
762}
763#endif
764
765static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
766 u32 length, unsigned flush_wc)
767{
768 u32 extra = 0;
769 u32 data = 0;
770 u32 last;
771
772 while (1) {
773 u32 len = ss->sge.length;
774 u32 off;
775
776 if (len > length)
777 len = length;
778 if (len > ss->sge.sge_length)
779 len = ss->sge.sge_length;
780 BUG_ON(len == 0);
781 /* If the source address is not aligned, try to align it. */
782 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
783 if (off) {
784 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
785 ~(sizeof(u32) - 1));
786 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
787 u32 y;
788
789 y = sizeof(u32) - off;
790 if (len > y)
791 len = y;
792 if (len + extra >= sizeof(u32)) {
793 data |= set_upper_bits(v, extra *
794 BITS_PER_BYTE);
795 len = sizeof(u32) - extra;
796 if (len == length) {
797 last = data;
798 break;
799 }
800 __raw_writel(data, piobuf);
801 piobuf++;
802 extra = 0;
803 data = 0;
804 } else {
805 /* Clear unused upper bytes */
806 data |= clear_upper_bytes(v, len, extra);
807 if (len == length) {
808 last = data;
809 break;
810 }
811 extra += len;
812 }
813 } else if (extra) {
814 /* Source address is aligned. */
815 u32 *addr = (u32 *) ss->sge.vaddr;
816 int shift = extra * BITS_PER_BYTE;
817 int ushift = 32 - shift;
818 u32 l = len;
819
820 while (l >= sizeof(u32)) {
821 u32 v = *addr;
822
823 data |= set_upper_bits(v, shift);
824 __raw_writel(data, piobuf);
825 data = get_upper_bits(v, ushift);
826 piobuf++;
827 addr++;
828 l -= sizeof(u32);
829 }
830 /*
831 * We still have 'extra' number of bytes leftover.
832 */
833 if (l) {
834 u32 v = *addr;
835
836 if (l + extra >= sizeof(u32)) {
837 data |= set_upper_bits(v, shift);
838 len -= l + extra - sizeof(u32);
839 if (len == length) {
840 last = data;
841 break;
842 }
843 __raw_writel(data, piobuf);
844 piobuf++;
845 extra = 0;
846 data = 0;
847 } else {
848 /* Clear unused upper bytes */
849 data |= clear_upper_bytes(v, l, extra);
850 if (len == length) {
851 last = data;
852 break;
853 }
854 extra += l;
855 }
856 } else if (len == length) {
857 last = data;
858 break;
859 }
860 } else if (len == length) {
861 u32 w;
862
863 /*
864 * Need to round up for the last dword in the
865 * packet.
866 */
867 w = (len + 3) >> 2;
868 qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
869 piobuf += w - 1;
870 last = ((u32 *) ss->sge.vaddr)[w - 1];
871 break;
872 } else {
873 u32 w = len >> 2;
874
875 qib_pio_copy(piobuf, ss->sge.vaddr, w);
876 piobuf += w;
877
878 extra = len & (sizeof(u32) - 1);
879 if (extra) {
880 u32 v = ((u32 *) ss->sge.vaddr)[w];
881
882 /* Clear unused upper bytes */
883 data = clear_upper_bytes(v, extra, 0);
884 }
885 }
886 update_sge(ss, len);
887 length -= len;
888 }
889 /* Update address before sending packet. */
890 update_sge(ss, length);
891 if (flush_wc) {
892 /* must flush early everything before trigger word */
893 qib_flush_wc();
894 __raw_writel(last, piobuf);
895 /* be sure trigger word is written */
896 qib_flush_wc();
897 } else
898 __raw_writel(last, piobuf);
899}
900
901static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
902 struct qib_qp *qp, int *retp)
903{
904 struct qib_verbs_txreq *tx;
905 unsigned long flags;
906
907 spin_lock_irqsave(&qp->s_lock, flags);
908 spin_lock(&dev->pending_lock);
909
910 if (!list_empty(&dev->txreq_free)) {
911 struct list_head *l = dev->txreq_free.next;
912
913 list_del(l);
914 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
915 *retp = 0;
916 } else {
917 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
918 list_empty(&qp->iowait)) {
919 dev->n_txwait++;
920 qp->s_flags |= QIB_S_WAIT_TX;
921 list_add_tail(&qp->iowait, &dev->txwait);
922 }
923 tx = NULL;
924 qp->s_flags &= ~QIB_S_BUSY;
925 *retp = -EBUSY;
926 }
927
928 spin_unlock(&dev->pending_lock);
929 spin_unlock_irqrestore(&qp->s_lock, flags);
930
931 return tx;
932}
933
934void qib_put_txreq(struct qib_verbs_txreq *tx)
935{
936 struct qib_ibdev *dev;
937 struct qib_qp *qp;
938 unsigned long flags;
939
940 qp = tx->qp;
941 dev = to_idev(qp->ibqp.device);
942
943 if (atomic_dec_and_test(&qp->refcount))
944 wake_up(&qp->wait);
945 if (tx->mr) {
946 atomic_dec(&tx->mr->refcount);
947 tx->mr = NULL;
948 }
949 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
950 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
951 dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
952 tx->txreq.addr, tx->hdr_dwords << 2,
953 DMA_TO_DEVICE);
954 kfree(tx->align_buf);
955 }
956
957 spin_lock_irqsave(&dev->pending_lock, flags);
958
959 /* Put struct back on free list */
960 list_add(&tx->txreq.list, &dev->txreq_free);
961
962 if (!list_empty(&dev->txwait)) {
963 /* Wake up first QP wanting a free struct */
964 qp = list_entry(dev->txwait.next, struct qib_qp, iowait);
965 list_del_init(&qp->iowait);
966 atomic_inc(&qp->refcount);
967 spin_unlock_irqrestore(&dev->pending_lock, flags);
968
969 spin_lock_irqsave(&qp->s_lock, flags);
970 if (qp->s_flags & QIB_S_WAIT_TX) {
971 qp->s_flags &= ~QIB_S_WAIT_TX;
972 qib_schedule_send(qp);
973 }
974 spin_unlock_irqrestore(&qp->s_lock, flags);
975
976 if (atomic_dec_and_test(&qp->refcount))
977 wake_up(&qp->wait);
978 } else
979 spin_unlock_irqrestore(&dev->pending_lock, flags);
980}
981
982/*
983 * This is called when there are send DMA descriptors that might be
984 * available.
985 *
986 * This is called with ppd->sdma_lock held.
987 */
988void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
989{
990 struct qib_qp *qp, *nqp;
991 struct qib_qp *qps[20];
992 struct qib_ibdev *dev;
993 unsigned i, n;
994
995 n = 0;
996 dev = &ppd->dd->verbs_dev;
997 spin_lock(&dev->pending_lock);
998
999 /* Search wait list for first QP wanting DMA descriptors. */
1000 list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) {
1001 if (qp->port_num != ppd->port)
1002 continue;
1003 if (n == ARRAY_SIZE(qps))
1004 break;
1005 if (qp->s_tx->txreq.sg_count > avail)
1006 break;
1007 avail -= qp->s_tx->txreq.sg_count;
1008 list_del_init(&qp->iowait);
1009 atomic_inc(&qp->refcount);
1010 qps[n++] = qp;
1011 }
1012
1013 spin_unlock(&dev->pending_lock);
1014
1015 for (i = 0; i < n; i++) {
1016 qp = qps[i];
1017 spin_lock(&qp->s_lock);
1018 if (qp->s_flags & QIB_S_WAIT_DMA_DESC) {
1019 qp->s_flags &= ~QIB_S_WAIT_DMA_DESC;
1020 qib_schedule_send(qp);
1021 }
1022 spin_unlock(&qp->s_lock);
1023 if (atomic_dec_and_test(&qp->refcount))
1024 wake_up(&qp->wait);
1025 }
1026}
1027
1028/*
1029 * This is called with ppd->sdma_lock held.
1030 */
1031static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
1032{
1033 struct qib_verbs_txreq *tx =
1034 container_of(cookie, struct qib_verbs_txreq, txreq);
1035 struct qib_qp *qp = tx->qp;
1036
1037 spin_lock(&qp->s_lock);
1038 if (tx->wqe)
1039 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
1040 else if (qp->ibqp.qp_type == IB_QPT_RC) {
1041 struct qib_ib_header *hdr;
1042
1043 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
1044 hdr = &tx->align_buf->hdr;
1045 else {
1046 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1047
1048 hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
1049 }
1050 qib_rc_send_complete(qp, hdr);
1051 }
1052 if (atomic_dec_and_test(&qp->s_dma_busy)) {
1053 if (qp->state == IB_QPS_RESET)
1054 wake_up(&qp->wait_dma);
1055 else if (qp->s_flags & QIB_S_WAIT_DMA) {
1056 qp->s_flags &= ~QIB_S_WAIT_DMA;
1057 qib_schedule_send(qp);
1058 }
1059 }
1060 spin_unlock(&qp->s_lock);
1061
1062 qib_put_txreq(tx);
1063}
1064
1065static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp)
1066{
1067 unsigned long flags;
1068 int ret = 0;
1069
1070 spin_lock_irqsave(&qp->s_lock, flags);
1071 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
1072 spin_lock(&dev->pending_lock);
1073 if (list_empty(&qp->iowait)) {
1074 if (list_empty(&dev->memwait))
1075 mod_timer(&dev->mem_timer, jiffies + 1);
1076 qp->s_flags |= QIB_S_WAIT_KMEM;
1077 list_add_tail(&qp->iowait, &dev->memwait);
1078 }
1079 spin_unlock(&dev->pending_lock);
1080 qp->s_flags &= ~QIB_S_BUSY;
1081 ret = -EBUSY;
1082 }
1083 spin_unlock_irqrestore(&qp->s_lock, flags);
1084
1085 return ret;
1086}
1087
1088static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
1089 u32 hdrwords, struct qib_sge_state *ss, u32 len,
1090 u32 plen, u32 dwords)
1091{
1092 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1093 struct qib_devdata *dd = dd_from_dev(dev);
1094 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1095 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1096 struct qib_verbs_txreq *tx;
1097 struct qib_pio_header *phdr;
1098 u32 control;
1099 u32 ndesc;
1100 int ret;
1101
1102 tx = qp->s_tx;
1103 if (tx) {
1104 qp->s_tx = NULL;
1105 /* resend previously constructed packet */
1106 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
1107 goto bail;
1108 }
1109
1110 tx = get_txreq(dev, qp, &ret);
1111 if (!tx)
1112 goto bail;
1113
1114 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1115 be16_to_cpu(hdr->lrh[0]) >> 12);
1116 tx->qp = qp;
1117 atomic_inc(&qp->refcount);
1118 tx->wqe = qp->s_wqe;
1119 tx->mr = qp->s_rdma_mr;
1120 if (qp->s_rdma_mr)
1121 qp->s_rdma_mr = NULL;
1122 tx->txreq.callback = sdma_complete;
1123 if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
1124 tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
1125 else
1126 tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
1127 if (plen + 1 > dd->piosize2kmax_dwords)
1128 tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
1129
1130 if (len) {
1131 /*
1132 * Don't try to DMA if it takes more descriptors than
1133 * the queue holds.
1134 */
1135 ndesc = qib_count_sge(ss, len);
1136 if (ndesc >= ppd->sdma_descq_cnt)
1137 ndesc = 0;
1138 } else
1139 ndesc = 1;
1140 if (ndesc) {
1141 phdr = &dev->pio_hdrs[tx->hdr_inx];
1142 phdr->pbc[0] = cpu_to_le32(plen);
1143 phdr->pbc[1] = cpu_to_le32(control);
1144 memcpy(&phdr->hdr, hdr, hdrwords << 2);
1145 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
1146 tx->txreq.sg_count = ndesc;
1147 tx->txreq.addr = dev->pio_hdrs_phys +
1148 tx->hdr_inx * sizeof(struct qib_pio_header);
1149 tx->hdr_dwords = hdrwords + 2; /* add PBC length */
1150 ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
1151 goto bail;
1152 }
1153
1154 /* Allocate a buffer and copy the header and payload to it. */
1155 tx->hdr_dwords = plen + 1;
1156 phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
1157 if (!phdr)
1158 goto err_tx;
1159 phdr->pbc[0] = cpu_to_le32(plen);
1160 phdr->pbc[1] = cpu_to_le32(control);
1161 memcpy(&phdr->hdr, hdr, hdrwords << 2);
1162 qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
1163
1164 tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
1165 tx->hdr_dwords << 2, DMA_TO_DEVICE);
1166 if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
1167 goto map_err;
1168 tx->align_buf = phdr;
1169 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
1170 tx->txreq.sg_count = 1;
1171 ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
1172 goto unaligned;
1173
1174map_err:
1175 kfree(phdr);
1176err_tx:
1177 qib_put_txreq(tx);
1178 ret = wait_kmem(dev, qp);
1179unaligned:
1180 ibp->n_unaligned++;
1181bail:
1182 return ret;
1183}
1184
1185/*
1186 * If we are now in the error state, return zero to flush the
1187 * send work request.
1188 */
1189static int no_bufs_available(struct qib_qp *qp)
1190{
1191 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1192 struct qib_devdata *dd;
1193 unsigned long flags;
1194 int ret = 0;
1195
1196 /*
1197 * Note that as soon as want_buffer() is called and
1198 * possibly before it returns, qib_ib_piobufavail()
1199 * could be called. Therefore, put QP on the I/O wait list before
1200 * enabling the PIO avail interrupt.
1201 */
1202 spin_lock_irqsave(&qp->s_lock, flags);
1203 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
1204 spin_lock(&dev->pending_lock);
1205 if (list_empty(&qp->iowait)) {
1206 dev->n_piowait++;
1207 qp->s_flags |= QIB_S_WAIT_PIO;
1208 list_add_tail(&qp->iowait, &dev->piowait);
1209 dd = dd_from_dev(dev);
1210 dd->f_wantpiobuf_intr(dd, 1);
1211 }
1212 spin_unlock(&dev->pending_lock);
1213 qp->s_flags &= ~QIB_S_BUSY;
1214 ret = -EBUSY;
1215 }
1216 spin_unlock_irqrestore(&qp->s_lock, flags);
1217 return ret;
1218}
1219
1220static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
1221 u32 hdrwords, struct qib_sge_state *ss, u32 len,
1222 u32 plen, u32 dwords)
1223{
1224 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1225 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
1226 u32 *hdr = (u32 *) ibhdr;
1227 u32 __iomem *piobuf_orig;
1228 u32 __iomem *piobuf;
1229 u64 pbc;
1230 unsigned long flags;
1231 unsigned flush_wc;
1232 u32 control;
1233 u32 pbufn;
1234
1235 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1236 be16_to_cpu(ibhdr->lrh[0]) >> 12);
1237 pbc = ((u64) control << 32) | plen;
1238 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
1239 if (unlikely(piobuf == NULL))
1240 return no_bufs_available(qp);
1241
1242 /*
1243 * Write the pbc.
1244 * We have to flush after the PBC for correctness on some cpus
1245 * or WC buffer can be written out of order.
1246 */
1247 writeq(pbc, piobuf);
1248 piobuf_orig = piobuf;
1249 piobuf += 2;
1250
1251 flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
1252 if (len == 0) {
1253 /*
1254 * If there is just the header portion, must flush before
1255 * writing last word of header for correctness, and after
1256 * the last header word (trigger word).
1257 */
1258 if (flush_wc) {
1259 qib_flush_wc();
1260 qib_pio_copy(piobuf, hdr, hdrwords - 1);
1261 qib_flush_wc();
1262 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
1263 qib_flush_wc();
1264 } else
1265 qib_pio_copy(piobuf, hdr, hdrwords);
1266 goto done;
1267 }
1268
1269 if (flush_wc)
1270 qib_flush_wc();
1271 qib_pio_copy(piobuf, hdr, hdrwords);
1272 piobuf += hdrwords;
1273
1274 /* The common case is aligned and contained in one segment. */
1275 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
1276 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
1277 u32 *addr = (u32 *) ss->sge.vaddr;
1278
1279 /* Update address before sending packet. */
1280 update_sge(ss, len);
1281 if (flush_wc) {
1282 qib_pio_copy(piobuf, addr, dwords - 1);
1283 /* must flush early everything before trigger word */
1284 qib_flush_wc();
1285 __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1286 /* be sure trigger word is written */
1287 qib_flush_wc();
1288 } else
1289 qib_pio_copy(piobuf, addr, dwords);
1290 goto done;
1291 }
1292 copy_io(piobuf, ss, len, flush_wc);
1293done:
1294 if (dd->flags & QIB_USE_SPCL_TRIG) {
1295 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
1296 qib_flush_wc();
1297 __raw_writel(0xaebecede, piobuf_orig + spcl_off);
1298 }
1299 qib_sendbuf_done(dd, pbufn);
1300 if (qp->s_rdma_mr) {
1301 atomic_dec(&qp->s_rdma_mr->refcount);
1302 qp->s_rdma_mr = NULL;
1303 }
1304 if (qp->s_wqe) {
1305 spin_lock_irqsave(&qp->s_lock, flags);
1306 qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1307 spin_unlock_irqrestore(&qp->s_lock, flags);
1308 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1309 spin_lock_irqsave(&qp->s_lock, flags);
1310 qib_rc_send_complete(qp, ibhdr);
1311 spin_unlock_irqrestore(&qp->s_lock, flags);
1312 }
1313 return 0;
1314}
1315
1316/**
1317 * qib_verbs_send - send a packet
1318 * @qp: the QP to send on
1319 * @hdr: the packet header
1320 * @hdrwords: the number of 32-bit words in the header
1321 * @ss: the SGE to send
1322 * @len: the length of the packet in bytes
1323 *
1324 * Return zero if packet is sent or queued OK.
1325 * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise.
1326 */
1327int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
1328 u32 hdrwords, struct qib_sge_state *ss, u32 len)
1329{
1330 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1331 u32 plen;
1332 int ret;
1333 u32 dwords = (len + 3) >> 2;
1334
1335 /*
1336 * Calculate the send buffer trigger address.
1337 * The +1 counts for the pbc control dword following the pbc length.
1338 */
1339 plen = hdrwords + dwords + 1;
1340
1341 /*
1342 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1343 * can defer SDMA restart until link goes ACTIVE without
1344 * worrying about just how we got there.
1345 */
1346 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1347 !(dd->flags & QIB_HAS_SEND_DMA))
1348 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1349 plen, dwords);
1350 else
1351 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1352 plen, dwords);
1353
1354 return ret;
1355}
1356
1357int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
1358 u64 *rwords, u64 *spkts, u64 *rpkts,
1359 u64 *xmit_wait)
1360{
1361 int ret;
1362 struct qib_devdata *dd = ppd->dd;
1363
1364 if (!(dd->flags & QIB_PRESENT)) {
1365 /* no hardware, freeze, etc. */
1366 ret = -EINVAL;
1367 goto bail;
1368 }
1369 *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
1370 *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
1371 *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
1372 *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
1373 *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
1374
1375 ret = 0;
1376
1377bail:
1378 return ret;
1379}
1380
1381/**
1382 * qib_get_counters - get various chip counters
1383 * @dd: the qlogic_ib device
1384 * @cntrs: counters are placed here
1385 *
1386 * Return the counters needed by recv_pma_get_portcounters().
1387 */
1388int qib_get_counters(struct qib_pportdata *ppd,
1389 struct qib_verbs_counters *cntrs)
1390{
1391 int ret;
1392
1393 if (!(ppd->dd->flags & QIB_PRESENT)) {
1394 /* no hardware, freeze, etc. */
1395 ret = -EINVAL;
1396 goto bail;
1397 }
1398 cntrs->symbol_error_counter =
1399 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
1400 cntrs->link_error_recovery_counter =
1401 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
1402 /*
1403 * The link downed counter counts when the other side downs the
1404 * connection. We add in the number of times we downed the link
1405 * due to local link integrity errors to compensate.
1406 */
1407 cntrs->link_downed_counter =
1408 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
1409 cntrs->port_rcv_errors =
1410 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
1411 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
1412 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
1413 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
1414 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
1415 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
1416 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
1417 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
1418 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
1419 cntrs->port_rcv_errors +=
1420 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
1421 cntrs->port_rcv_errors +=
1422 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
1423 cntrs->port_rcv_remphys_errors =
1424 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
1425 cntrs->port_xmit_discards =
1426 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
1427 cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
1428 QIBPORTCNTR_WORDSEND);
1429 cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
1430 QIBPORTCNTR_WORDRCV);
1431 cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
1432 QIBPORTCNTR_PKTSEND);
1433 cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
1434 QIBPORTCNTR_PKTRCV);
1435 cntrs->local_link_integrity_errors =
1436 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
1437 cntrs->excessive_buffer_overrun_errors =
1438 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
1439 cntrs->vl15_dropped =
1440 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
1441
1442 ret = 0;
1443
1444bail:
1445 return ret;
1446}
1447
1448/**
1449 * qib_ib_piobufavail - callback when a PIO buffer is available
1450 * @dd: the device pointer
1451 *
1452 * This is called from qib_intr() at interrupt level when a PIO buffer is
1453 * available after qib_verbs_send() returned an error that no buffers were
1454 * available. Disable the interrupt if there are no more QPs waiting.
1455 */
1456void qib_ib_piobufavail(struct qib_devdata *dd)
1457{
1458 struct qib_ibdev *dev = &dd->verbs_dev;
1459 struct list_head *list;
1460 struct qib_qp *qps[5];
1461 struct qib_qp *qp;
1462 unsigned long flags;
1463 unsigned i, n;
1464
1465 list = &dev->piowait;
1466 n = 0;
1467
1468 /*
1469 * Note: checking that the piowait list is empty and clearing
1470 * the buffer available interrupt needs to be atomic or we
1471 * could end up with QPs on the wait list with the interrupt
1472 * disabled.
1473 */
1474 spin_lock_irqsave(&dev->pending_lock, flags);
1475 while (!list_empty(list)) {
1476 if (n == ARRAY_SIZE(qps))
1477 goto full;
1478 qp = list_entry(list->next, struct qib_qp, iowait);
1479 list_del_init(&qp->iowait);
1480 atomic_inc(&qp->refcount);
1481 qps[n++] = qp;
1482 }
1483 dd->f_wantpiobuf_intr(dd, 0);
1484full:
1485 spin_unlock_irqrestore(&dev->pending_lock, flags);
1486
1487 for (i = 0; i < n; i++) {
1488 qp = qps[i];
1489
1490 spin_lock_irqsave(&qp->s_lock, flags);
1491 if (qp->s_flags & QIB_S_WAIT_PIO) {
1492 qp->s_flags &= ~QIB_S_WAIT_PIO;
1493 qib_schedule_send(qp);
1494 }
1495 spin_unlock_irqrestore(&qp->s_lock, flags);
1496
1497 /* Notify qib_destroy_qp() if it is waiting. */
1498 if (atomic_dec_and_test(&qp->refcount))
1499 wake_up(&qp->wait);
1500 }
1501}
1502
1503static int qib_query_device(struct ib_device *ibdev,
1504 struct ib_device_attr *props)
1505{
1506 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1507 struct qib_ibdev *dev = to_idev(ibdev);
1508
1509 memset(props, 0, sizeof(*props));
1510
1511 props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1512 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1513 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1514 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1515 props->page_size_cap = PAGE_SIZE;
1516 props->vendor_id =
1517 QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
1518 props->vendor_part_id = dd->deviceid;
1519 props->hw_ver = dd->minrev;
1520 props->sys_image_guid = ib_qib_sys_image_guid;
1521 props->max_mr_size = ~0ULL;
1522 props->max_qp = ib_qib_max_qps;
1523 props->max_qp_wr = ib_qib_max_qp_wrs;
1524 props->max_sge = ib_qib_max_sges;
1525 props->max_cq = ib_qib_max_cqs;
1526 props->max_ah = ib_qib_max_ahs;
1527 props->max_cqe = ib_qib_max_cqes;
1528 props->max_mr = dev->lk_table.max;
1529 props->max_fmr = dev->lk_table.max;
1530 props->max_map_per_fmr = 32767;
1531 props->max_pd = ib_qib_max_pds;
1532 props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1533 props->max_qp_init_rd_atom = 255;
1534 /* props->max_res_rd_atom */
1535 props->max_srq = ib_qib_max_srqs;
1536 props->max_srq_wr = ib_qib_max_srq_wrs;
1537 props->max_srq_sge = ib_qib_max_srq_sges;
1538 /* props->local_ca_ack_delay */
1539 props->atomic_cap = IB_ATOMIC_GLOB;
1540 props->max_pkeys = qib_get_npkeys(dd);
1541 props->max_mcast_grp = ib_qib_max_mcast_grps;
1542 props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
1543 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1544 props->max_mcast_grp;
1545
1546 return 0;
1547}
1548
1549static int qib_query_port(struct ib_device *ibdev, u8 port,
1550 struct ib_port_attr *props)
1551{
1552 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1553 struct qib_ibport *ibp = to_iport(ibdev, port);
1554 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1555 enum ib_mtu mtu;
1556 u16 lid = ppd->lid;
1557
1558 memset(props, 0, sizeof(*props));
1559 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1560 props->lmc = ppd->lmc;
1561 props->sm_lid = ibp->sm_lid;
1562 props->sm_sl = ibp->sm_sl;
1563 props->state = dd->f_iblink_state(ppd->lastibcstat);
1564 props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
1565 props->port_cap_flags = ibp->port_cap_flags;
1566 props->gid_tbl_len = QIB_GUIDS_PER_PORT;
1567 props->max_msg_sz = 0x80000000;
1568 props->pkey_tbl_len = qib_get_npkeys(dd);
1569 props->bad_pkey_cntr = ibp->pkey_violations;
1570 props->qkey_viol_cntr = ibp->qkey_violations;
1571 props->active_width = ppd->link_width_active;
1572 /* See rate_show() */
1573 props->active_speed = ppd->link_speed_active;
1574 props->max_vl_num = qib_num_vls(ppd->vls_supported);
1575 props->init_type_reply = 0;
1576
1577 props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
1578 switch (ppd->ibmtu) {
1579 case 4096:
1580 mtu = IB_MTU_4096;
1581 break;
1582 case 2048:
1583 mtu = IB_MTU_2048;
1584 break;
1585 case 1024:
1586 mtu = IB_MTU_1024;
1587 break;
1588 case 512:
1589 mtu = IB_MTU_512;
1590 break;
1591 case 256:
1592 mtu = IB_MTU_256;
1593 break;
1594 default:
1595 mtu = IB_MTU_2048;
1596 }
1597 props->active_mtu = mtu;
1598 props->subnet_timeout = ibp->subnet_timeout;
1599
1600 return 0;
1601}
1602
1603static int qib_modify_device(struct ib_device *device,
1604 int device_modify_mask,
1605 struct ib_device_modify *device_modify)
1606{
1607 struct qib_devdata *dd = dd_from_ibdev(device);
1608 unsigned i;
1609 int ret;
1610
1611 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1612 IB_DEVICE_MODIFY_NODE_DESC)) {
1613 ret = -EOPNOTSUPP;
1614 goto bail;
1615 }
1616
1617 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1618 memcpy(device->node_desc, device_modify->node_desc, 64);
1619 for (i = 0; i < dd->num_pports; i++) {
1620 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1621
1622 qib_node_desc_chg(ibp);
1623 }
1624 }
1625
1626 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1627 ib_qib_sys_image_guid =
1628 cpu_to_be64(device_modify->sys_image_guid);
1629 for (i = 0; i < dd->num_pports; i++) {
1630 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1631
1632 qib_sys_guid_chg(ibp);
1633 }
1634 }
1635
1636 ret = 0;
1637
1638bail:
1639 return ret;
1640}
1641
1642static int qib_modify_port(struct ib_device *ibdev, u8 port,
1643 int port_modify_mask, struct ib_port_modify *props)
1644{
1645 struct qib_ibport *ibp = to_iport(ibdev, port);
1646 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1647
1648 ibp->port_cap_flags |= props->set_port_cap_mask;
1649 ibp->port_cap_flags &= ~props->clr_port_cap_mask;
1650 if (props->set_port_cap_mask || props->clr_port_cap_mask)
1651 qib_cap_mask_chg(ibp);
1652 if (port_modify_mask & IB_PORT_SHUTDOWN)
1653 qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
1654 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
1655 ibp->qkey_violations = 0;
1656 return 0;
1657}
1658
1659static int qib_query_gid(struct ib_device *ibdev, u8 port,
1660 int index, union ib_gid *gid)
1661{
1662 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1663 int ret = 0;
1664
1665 if (!port || port > dd->num_pports)
1666 ret = -EINVAL;
1667 else {
1668 struct qib_ibport *ibp = to_iport(ibdev, port);
1669 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1670
1671 gid->global.subnet_prefix = ibp->gid_prefix;
1672 if (index == 0)
1673 gid->global.interface_id = ppd->guid;
1674 else if (index < QIB_GUIDS_PER_PORT)
1675 gid->global.interface_id = ibp->guids[index - 1];
1676 else
1677 ret = -EINVAL;
1678 }
1679
1680 return ret;
1681}
1682
1683static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
1684 struct ib_ucontext *context,
1685 struct ib_udata *udata)
1686{
1687 struct qib_ibdev *dev = to_idev(ibdev);
1688 struct qib_pd *pd;
1689 struct ib_pd *ret;
1690
1691 /*
1692 * This is actually totally arbitrary. Some correctness tests
1693 * assume there's a maximum number of PDs that can be allocated.
1694 * We don't actually have this limit, but we fail the test if
1695 * we allow allocations of more than we report for this value.
1696 */
1697
1698 pd = kmalloc(sizeof *pd, GFP_KERNEL);
1699 if (!pd) {
1700 ret = ERR_PTR(-ENOMEM);
1701 goto bail;
1702 }
1703
1704 spin_lock(&dev->n_pds_lock);
1705 if (dev->n_pds_allocated == ib_qib_max_pds) {
1706 spin_unlock(&dev->n_pds_lock);
1707 kfree(pd);
1708 ret = ERR_PTR(-ENOMEM);
1709 goto bail;
1710 }
1711
1712 dev->n_pds_allocated++;
1713 spin_unlock(&dev->n_pds_lock);
1714
1715 /* ib_alloc_pd() will initialize pd->ibpd. */
1716 pd->user = udata != NULL;
1717
1718 ret = &pd->ibpd;
1719
1720bail:
1721 return ret;
1722}
1723
1724static int qib_dealloc_pd(struct ib_pd *ibpd)
1725{
1726 struct qib_pd *pd = to_ipd(ibpd);
1727 struct qib_ibdev *dev = to_idev(ibpd->device);
1728
1729 spin_lock(&dev->n_pds_lock);
1730 dev->n_pds_allocated--;
1731 spin_unlock(&dev->n_pds_lock);
1732
1733 kfree(pd);
1734
1735 return 0;
1736}
1737
1738int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
1739{
1740 /* A multicast address requires a GRH (see ch. 8.4.1). */
1741 if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
1742 ah_attr->dlid != QIB_PERMISSIVE_LID &&
1743 !(ah_attr->ah_flags & IB_AH_GRH))
1744 goto bail;
1745 if ((ah_attr->ah_flags & IB_AH_GRH) &&
1746 ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT)
1747 goto bail;
1748 if (ah_attr->dlid == 0)
1749 goto bail;
1750 if (ah_attr->port_num < 1 ||
1751 ah_attr->port_num > ibdev->phys_port_cnt)
1752 goto bail;
1753 if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
1754 ib_rate_to_mult(ah_attr->static_rate) < 0)
1755 goto bail;
1756 if (ah_attr->sl > 15)
1757 goto bail;
1758 return 0;
1759bail:
1760 return -EINVAL;
1761}
1762
1763/**
1764 * qib_create_ah - create an address handle
1765 * @pd: the protection domain
1766 * @ah_attr: the attributes of the AH
1767 *
1768 * This may be called from interrupt context.
1769 */
1770static struct ib_ah *qib_create_ah(struct ib_pd *pd,
1771 struct ib_ah_attr *ah_attr)
1772{
1773 struct qib_ah *ah;
1774 struct ib_ah *ret;
1775 struct qib_ibdev *dev = to_idev(pd->device);
1776 unsigned long flags;
1777
1778 if (qib_check_ah(pd->device, ah_attr)) {
1779 ret = ERR_PTR(-EINVAL);
1780 goto bail;
1781 }
1782
1783 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
1784 if (!ah) {
1785 ret = ERR_PTR(-ENOMEM);
1786 goto bail;
1787 }
1788
1789 spin_lock_irqsave(&dev->n_ahs_lock, flags);
1790 if (dev->n_ahs_allocated == ib_qib_max_ahs) {
1791 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1792 kfree(ah);
1793 ret = ERR_PTR(-ENOMEM);
1794 goto bail;
1795 }
1796
1797 dev->n_ahs_allocated++;
1798 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1799
1800 /* ib_create_ah() will initialize ah->ibah. */
1801 ah->attr = *ah_attr;
1802 atomic_set(&ah->refcount, 0);
1803
1804 ret = &ah->ibah;
1805
1806bail:
1807 return ret;
1808}
1809
1810/**
1811 * qib_destroy_ah - destroy an address handle
1812 * @ibah: the AH to destroy
1813 *
1814 * This may be called from interrupt context.
1815 */
1816static int qib_destroy_ah(struct ib_ah *ibah)
1817{
1818 struct qib_ibdev *dev = to_idev(ibah->device);
1819 struct qib_ah *ah = to_iah(ibah);
1820 unsigned long flags;
1821
1822 if (atomic_read(&ah->refcount) != 0)
1823 return -EBUSY;
1824
1825 spin_lock_irqsave(&dev->n_ahs_lock, flags);
1826 dev->n_ahs_allocated--;
1827 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1828
1829 kfree(ah);
1830
1831 return 0;
1832}
1833
1834static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1835{
1836 struct qib_ah *ah = to_iah(ibah);
1837
1838 if (qib_check_ah(ibah->device, ah_attr))
1839 return -EINVAL;
1840
1841 ah->attr = *ah_attr;
1842
1843 return 0;
1844}
1845
1846static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1847{
1848 struct qib_ah *ah = to_iah(ibah);
1849
1850 *ah_attr = ah->attr;
1851
1852 return 0;
1853}
1854
1855/**
1856 * qib_get_npkeys - return the size of the PKEY table for context 0
1857 * @dd: the qlogic_ib device
1858 */
1859unsigned qib_get_npkeys(struct qib_devdata *dd)
1860{
1861 return ARRAY_SIZE(dd->rcd[0]->pkeys);
1862}
1863
1864/*
1865 * Return the indexed PKEY from the port PKEY table.
1866 * No need to validate rcd[ctxt]; the port is setup if we are here.
1867 */
1868unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
1869{
1870 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1871 struct qib_devdata *dd = ppd->dd;
1872 unsigned ctxt = ppd->hw_pidx;
1873 unsigned ret;
1874
1875 /* dd->rcd null if mini_init or some init failures */
1876 if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
1877 ret = 0;
1878 else
1879 ret = dd->rcd[ctxt]->pkeys[index];
1880
1881 return ret;
1882}
1883
1884static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1885 u16 *pkey)
1886{
1887 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1888 int ret;
1889
1890 if (index >= qib_get_npkeys(dd)) {
1891 ret = -EINVAL;
1892 goto bail;
1893 }
1894
1895 *pkey = qib_get_pkey(to_iport(ibdev, port), index);
1896 ret = 0;
1897
1898bail:
1899 return ret;
1900}
1901
1902/**
1903 * qib_alloc_ucontext - allocate a ucontest
1904 * @ibdev: the infiniband device
1905 * @udata: not used by the QLogic_IB driver
1906 */
1907
1908static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
1909 struct ib_udata *udata)
1910{
1911 struct qib_ucontext *context;
1912 struct ib_ucontext *ret;
1913
1914 context = kmalloc(sizeof *context, GFP_KERNEL);
1915 if (!context) {
1916 ret = ERR_PTR(-ENOMEM);
1917 goto bail;
1918 }
1919
1920 ret = &context->ibucontext;
1921
1922bail:
1923 return ret;
1924}
1925
1926static int qib_dealloc_ucontext(struct ib_ucontext *context)
1927{
1928 kfree(to_iucontext(context));
1929 return 0;
1930}
1931
1932static void init_ibport(struct qib_pportdata *ppd)
1933{
1934 struct qib_verbs_counters cntrs;
1935 struct qib_ibport *ibp = &ppd->ibport_data;
1936
1937 spin_lock_init(&ibp->lock);
1938 /* Set the prefix to the default value (see ch. 4.1.1) */
1939 ibp->gid_prefix = IB_DEFAULT_GID_PREFIX;
1940 ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
1941 ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
1942 IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
1943 IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
1944 IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
1945 IB_PORT_OTHER_LOCAL_CHANGES_SUP;
1946 if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
1947 ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1948 ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1949 ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1950 ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1951 ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1952 ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1953
1954 /* Snapshot current HW counters to "clear" them. */
1955 qib_get_counters(ppd, &cntrs);
1956 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1957 ibp->z_link_error_recovery_counter =
1958 cntrs.link_error_recovery_counter;
1959 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1960 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1961 ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
1962 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1963 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1964 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1965 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1966 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1967 ibp->z_local_link_integrity_errors =
1968 cntrs.local_link_integrity_errors;
1969 ibp->z_excessive_buffer_overrun_errors =
1970 cntrs.excessive_buffer_overrun_errors;
1971 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1972}
1973
1974/**
1975 * qib_register_ib_device - register our device with the infiniband core
1976 * @dd: the device data structure
1977 * Return the allocated qib_ibdev pointer or NULL on error.
1978 */
1979int qib_register_ib_device(struct qib_devdata *dd)
1980{
1981 struct qib_ibdev *dev = &dd->verbs_dev;
1982 struct ib_device *ibdev = &dev->ibdev;
1983 struct qib_pportdata *ppd = dd->pport;
1984 unsigned i, lk_tab_size;
1985 int ret;
1986
1987 dev->qp_table_size = ib_qib_qp_table_size;
1988 dev->qp_table = kzalloc(dev->qp_table_size * sizeof *dev->qp_table,
1989 GFP_KERNEL);
1990 if (!dev->qp_table) {
1991 ret = -ENOMEM;
1992 goto err_qpt;
1993 }
1994
1995 for (i = 0; i < dd->num_pports; i++)
1996 init_ibport(ppd + i);
1997
1998 /* Only need to initialize non-zero fields. */
1999 spin_lock_init(&dev->qpt_lock);
2000 spin_lock_init(&dev->n_pds_lock);
2001 spin_lock_init(&dev->n_ahs_lock);
2002 spin_lock_init(&dev->n_cqs_lock);
2003 spin_lock_init(&dev->n_qps_lock);
2004 spin_lock_init(&dev->n_srqs_lock);
2005 spin_lock_init(&dev->n_mcast_grps_lock);
2006 init_timer(&dev->mem_timer);
2007 dev->mem_timer.function = mem_timer;
2008 dev->mem_timer.data = (unsigned long) dev;
2009
2010 qib_init_qpn_table(dd, &dev->qpn_table);
2011
2012 /*
2013 * The top ib_qib_lkey_table_size bits are used to index the
2014 * table. The lower 8 bits can be owned by the user (copied from
2015 * the LKEY). The remaining bits act as a generation number or tag.
2016 */
2017 spin_lock_init(&dev->lk_table.lock);
2018 dev->lk_table.max = 1 << ib_qib_lkey_table_size;
2019 lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
2020 dev->lk_table.table = (struct qib_mregion **)
2021 __get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
2022 if (dev->lk_table.table == NULL) {
2023 ret = -ENOMEM;
2024 goto err_lk;
2025 }
2026 memset(dev->lk_table.table, 0, lk_tab_size);
2027 INIT_LIST_HEAD(&dev->pending_mmaps);
2028 spin_lock_init(&dev->pending_lock);
2029 dev->mmap_offset = PAGE_SIZE;
2030 spin_lock_init(&dev->mmap_offset_lock);
2031 INIT_LIST_HEAD(&dev->piowait);
2032 INIT_LIST_HEAD(&dev->dmawait);
2033 INIT_LIST_HEAD(&dev->txwait);
2034 INIT_LIST_HEAD(&dev->memwait);
2035 INIT_LIST_HEAD(&dev->txreq_free);
2036
2037 if (ppd->sdma_descq_cnt) {
2038 dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
2039 ppd->sdma_descq_cnt *
2040 sizeof(struct qib_pio_header),
2041 &dev->pio_hdrs_phys,
2042 GFP_KERNEL);
2043 if (!dev->pio_hdrs) {
2044 ret = -ENOMEM;
2045 goto err_hdrs;
2046 }
2047 }
2048
2049 for (i = 0; i < ppd->sdma_descq_cnt; i++) {
2050 struct qib_verbs_txreq *tx;
2051
2052 tx = kzalloc(sizeof *tx, GFP_KERNEL);
2053 if (!tx) {
2054 ret = -ENOMEM;
2055 goto err_tx;
2056 }
2057 tx->hdr_inx = i;
2058 list_add(&tx->txreq.list, &dev->txreq_free);
2059 }
2060
2061 /*
2062 * The system image GUID is supposed to be the same for all
2063 * IB HCAs in a single system but since there can be other
2064 * device types in the system, we can't be sure this is unique.
2065 */
2066 if (!ib_qib_sys_image_guid)
2067 ib_qib_sys_image_guid = ppd->guid;
2068
2069 strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
2070 ibdev->owner = THIS_MODULE;
2071 ibdev->node_guid = ppd->guid;
2072 ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
2073 ibdev->uverbs_cmd_mask =
2074 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2075 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2076 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2077 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2078 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2079 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
2080 (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
2081 (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
2082 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
2083 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2084 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2085 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2086 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2087 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
2088 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2089 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
2090 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2091 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2092 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2093 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2094 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2095 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
2096 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2097 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2098 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2099 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2100 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
2101 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
2102 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
2103 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
2104 ibdev->node_type = RDMA_NODE_IB_CA;
2105 ibdev->phys_port_cnt = dd->num_pports;
2106 ibdev->num_comp_vectors = 1;
2107 ibdev->dma_device = &dd->pcidev->dev;
2108 ibdev->query_device = qib_query_device;
2109 ibdev->modify_device = qib_modify_device;
2110 ibdev->query_port = qib_query_port;
2111 ibdev->modify_port = qib_modify_port;
2112 ibdev->query_pkey = qib_query_pkey;
2113 ibdev->query_gid = qib_query_gid;
2114 ibdev->alloc_ucontext = qib_alloc_ucontext;
2115 ibdev->dealloc_ucontext = qib_dealloc_ucontext;
2116 ibdev->alloc_pd = qib_alloc_pd;
2117 ibdev->dealloc_pd = qib_dealloc_pd;
2118 ibdev->create_ah = qib_create_ah;
2119 ibdev->destroy_ah = qib_destroy_ah;
2120 ibdev->modify_ah = qib_modify_ah;
2121 ibdev->query_ah = qib_query_ah;
2122 ibdev->create_srq = qib_create_srq;
2123 ibdev->modify_srq = qib_modify_srq;
2124 ibdev->query_srq = qib_query_srq;
2125 ibdev->destroy_srq = qib_destroy_srq;
2126 ibdev->create_qp = qib_create_qp;
2127 ibdev->modify_qp = qib_modify_qp;
2128 ibdev->query_qp = qib_query_qp;
2129 ibdev->destroy_qp = qib_destroy_qp;
2130 ibdev->post_send = qib_post_send;
2131 ibdev->post_recv = qib_post_receive;
2132 ibdev->post_srq_recv = qib_post_srq_receive;
2133 ibdev->create_cq = qib_create_cq;
2134 ibdev->destroy_cq = qib_destroy_cq;
2135 ibdev->resize_cq = qib_resize_cq;
2136 ibdev->poll_cq = qib_poll_cq;
2137 ibdev->req_notify_cq = qib_req_notify_cq;
2138 ibdev->get_dma_mr = qib_get_dma_mr;
2139 ibdev->reg_phys_mr = qib_reg_phys_mr;
2140 ibdev->reg_user_mr = qib_reg_user_mr;
2141 ibdev->dereg_mr = qib_dereg_mr;
2142 ibdev->alloc_fast_reg_mr = qib_alloc_fast_reg_mr;
2143 ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list;
2144 ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list;
2145 ibdev->alloc_fmr = qib_alloc_fmr;
2146 ibdev->map_phys_fmr = qib_map_phys_fmr;
2147 ibdev->unmap_fmr = qib_unmap_fmr;
2148 ibdev->dealloc_fmr = qib_dealloc_fmr;
2149 ibdev->attach_mcast = qib_multicast_attach;
2150 ibdev->detach_mcast = qib_multicast_detach;
2151 ibdev->process_mad = qib_process_mad;
2152 ibdev->mmap = qib_mmap;
2153 ibdev->dma_ops = &qib_dma_mapping_ops;
2154
2155 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
2156 QIB_IDSTR " %s", init_utsname()->nodename);
2157
2158 ret = ib_register_device(ibdev, qib_create_port_files);
2159 if (ret)
2160 goto err_reg;
2161
2162 ret = qib_create_agents(dev);
2163 if (ret)
2164 goto err_agents;
2165
2166 if (qib_verbs_register_sysfs(dd))
2167 goto err_class;
2168
2169 goto bail;
2170
2171err_class:
2172 qib_free_agents(dev);
2173err_agents:
2174 ib_unregister_device(ibdev);
2175err_reg:
2176err_tx:
2177 while (!list_empty(&dev->txreq_free)) {
2178 struct list_head *l = dev->txreq_free.next;
2179 struct qib_verbs_txreq *tx;
2180
2181 list_del(l);
2182 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2183 kfree(tx);
2184 }
2185 if (ppd->sdma_descq_cnt)
2186 dma_free_coherent(&dd->pcidev->dev,
2187 ppd->sdma_descq_cnt *
2188 sizeof(struct qib_pio_header),
2189 dev->pio_hdrs, dev->pio_hdrs_phys);
2190err_hdrs:
2191 free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
2192err_lk:
2193 kfree(dev->qp_table);
2194err_qpt:
2195 qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
2196bail:
2197 return ret;
2198}
2199
2200void qib_unregister_ib_device(struct qib_devdata *dd)
2201{
2202 struct qib_ibdev *dev = &dd->verbs_dev;
2203 struct ib_device *ibdev = &dev->ibdev;
2204 u32 qps_inuse;
2205 unsigned lk_tab_size;
2206
2207 qib_verbs_unregister_sysfs(dd);
2208
2209 qib_free_agents(dev);
2210
2211 ib_unregister_device(ibdev);
2212
2213 if (!list_empty(&dev->piowait))
2214 qib_dev_err(dd, "piowait list not empty!\n");
2215 if (!list_empty(&dev->dmawait))
2216 qib_dev_err(dd, "dmawait list not empty!\n");
2217 if (!list_empty(&dev->txwait))
2218 qib_dev_err(dd, "txwait list not empty!\n");
2219 if (!list_empty(&dev->memwait))
2220 qib_dev_err(dd, "memwait list not empty!\n");
2221 if (dev->dma_mr)
2222 qib_dev_err(dd, "DMA MR not NULL!\n");
2223
2224 qps_inuse = qib_free_all_qps(dd);
2225 if (qps_inuse)
2226 qib_dev_err(dd, "QP memory leak! %u still in use\n",
2227 qps_inuse);
2228
2229 del_timer_sync(&dev->mem_timer);
2230 qib_free_qpn_table(&dev->qpn_table);
2231 while (!list_empty(&dev->txreq_free)) {
2232 struct list_head *l = dev->txreq_free.next;
2233 struct qib_verbs_txreq *tx;
2234
2235 list_del(l);
2236 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2237 kfree(tx);
2238 }
2239 if (dd->pport->sdma_descq_cnt)
2240 dma_free_coherent(&dd->pcidev->dev,
2241 dd->pport->sdma_descq_cnt *
2242 sizeof(struct qib_pio_header),
2243 dev->pio_hdrs, dev->pio_hdrs_phys);
2244 lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
2245 free_pages((unsigned long) dev->lk_table.table,
2246 get_order(lk_tab_size));
2247 kfree(dev->qp_table);
2248}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
new file mode 100644
index 000000000000..bd57c1273225
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -0,0 +1,1100 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef QIB_VERBS_H
36#define QIB_VERBS_H
37
38#include <linux/types.h>
39#include <linux/spinlock.h>
40#include <linux/kernel.h>
41#include <linux/interrupt.h>
42#include <linux/kref.h>
43#include <linux/workqueue.h>
44#include <rdma/ib_pack.h>
45#include <rdma/ib_user_verbs.h>
46
47struct qib_ctxtdata;
48struct qib_pportdata;
49struct qib_devdata;
50struct qib_verbs_txreq;
51
52#define QIB_MAX_RDMA_ATOMIC 16
53#define QIB_GUIDS_PER_PORT 5
54
55#define QPN_MAX (1 << 24)
56#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
57
58/*
59 * Increment this value if any changes that break userspace ABI
60 * compatibility are made.
61 */
62#define QIB_UVERBS_ABI_VERSION 2
63
64/*
65 * Define an ib_cq_notify value that is not valid so we know when CQ
66 * notifications are armed.
67 */
68#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
69
70#define IB_SEQ_NAK (3 << 29)
71
72/* AETH NAK opcode values */
73#define IB_RNR_NAK 0x20
74#define IB_NAK_PSN_ERROR 0x60
75#define IB_NAK_INVALID_REQUEST 0x61
76#define IB_NAK_REMOTE_ACCESS_ERROR 0x62
77#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
78#define IB_NAK_INVALID_RD_REQUEST 0x64
79
80/* Flags for checking QP state (see ib_qib_state_ops[]) */
81#define QIB_POST_SEND_OK 0x01
82#define QIB_POST_RECV_OK 0x02
83#define QIB_PROCESS_RECV_OK 0x04
84#define QIB_PROCESS_SEND_OK 0x08
85#define QIB_PROCESS_NEXT_SEND_OK 0x10
86#define QIB_FLUSH_SEND 0x20
87#define QIB_FLUSH_RECV 0x40
88#define QIB_PROCESS_OR_FLUSH_SEND \
89 (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
90
91/* IB Performance Manager status values */
92#define IB_PMA_SAMPLE_STATUS_DONE 0x00
93#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
94#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
95
96/* Mandatory IB performance counter select values. */
97#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
98#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
99#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
100#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
101#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
102
103#define QIB_VENDOR_IPG cpu_to_be16(0xFFA0)
104
105#define IB_BTH_REQ_ACK (1 << 31)
106#define IB_BTH_SOLICITED (1 << 23)
107#define IB_BTH_MIG_REQ (1 << 22)
108
109/* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */
110#define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26)
111
112#define IB_GRH_VERSION 6
113#define IB_GRH_VERSION_MASK 0xF
114#define IB_GRH_VERSION_SHIFT 28
115#define IB_GRH_TCLASS_MASK 0xFF
116#define IB_GRH_TCLASS_SHIFT 20
117#define IB_GRH_FLOW_MASK 0xFFFFF
118#define IB_GRH_FLOW_SHIFT 0
119#define IB_GRH_NEXT_HDR 0x1B
120
121#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
122
123/* Values for set/get portinfo VLCap OperationalVLs */
124#define IB_VL_VL0 1
125#define IB_VL_VL0_1 2
126#define IB_VL_VL0_3 3
127#define IB_VL_VL0_7 4
128#define IB_VL_VL0_14 5
129
130static inline int qib_num_vls(int vls)
131{
132 switch (vls) {
133 default:
134 case IB_VL_VL0:
135 return 1;
136 case IB_VL_VL0_1:
137 return 2;
138 case IB_VL_VL0_3:
139 return 4;
140 case IB_VL_VL0_7:
141 return 8;
142 case IB_VL_VL0_14:
143 return 15;
144 }
145}
146
147struct ib_reth {
148 __be64 vaddr;
149 __be32 rkey;
150 __be32 length;
151} __attribute__ ((packed));
152
153struct ib_atomic_eth {
154 __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
155 __be32 rkey;
156 __be64 swap_data;
157 __be64 compare_data;
158} __attribute__ ((packed));
159
160struct qib_other_headers {
161 __be32 bth[3];
162 union {
163 struct {
164 __be32 deth[2];
165 __be32 imm_data;
166 } ud;
167 struct {
168 struct ib_reth reth;
169 __be32 imm_data;
170 } rc;
171 struct {
172 __be32 aeth;
173 __be32 atomic_ack_eth[2];
174 } at;
175 __be32 imm_data;
176 __be32 aeth;
177 struct ib_atomic_eth atomic_eth;
178 } u;
179} __attribute__ ((packed));
180
181/*
182 * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
183 * long (72 w/ imm_data). Only the first 56 bytes of the IB header
184 * will be in the eager header buffer. The remaining 12 or 16 bytes
185 * are in the data buffer.
186 */
187struct qib_ib_header {
188 __be16 lrh[4];
189 union {
190 struct {
191 struct ib_grh grh;
192 struct qib_other_headers oth;
193 } l;
194 struct qib_other_headers oth;
195 } u;
196} __attribute__ ((packed));
197
198struct qib_pio_header {
199 __le32 pbc[2];
200 struct qib_ib_header hdr;
201} __attribute__ ((packed));
202
203/*
204 * There is one struct qib_mcast for each multicast GID.
205 * All attached QPs are then stored as a list of
206 * struct qib_mcast_qp.
207 */
208struct qib_mcast_qp {
209 struct list_head list;
210 struct qib_qp *qp;
211};
212
213struct qib_mcast {
214 struct rb_node rb_node;
215 union ib_gid mgid;
216 struct list_head qp_list;
217 wait_queue_head_t wait;
218 atomic_t refcount;
219 int n_attached;
220};
221
222/* Protection domain */
223struct qib_pd {
224 struct ib_pd ibpd;
225 int user; /* non-zero if created from user space */
226};
227
228/* Address Handle */
229struct qib_ah {
230 struct ib_ah ibah;
231 struct ib_ah_attr attr;
232 atomic_t refcount;
233};
234
235/*
236 * This structure is used by qib_mmap() to validate an offset
237 * when an mmap() request is made. The vm_area_struct then uses
238 * this as its vm_private_data.
239 */
240struct qib_mmap_info {
241 struct list_head pending_mmaps;
242 struct ib_ucontext *context;
243 void *obj;
244 __u64 offset;
245 struct kref ref;
246 unsigned size;
247};
248
249/*
250 * This structure is used to contain the head pointer, tail pointer,
251 * and completion queue entries as a single memory allocation so
252 * it can be mmap'ed into user space.
253 */
254struct qib_cq_wc {
255 u32 head; /* index of next entry to fill */
256 u32 tail; /* index of next ib_poll_cq() entry */
257 union {
258 /* these are actually size ibcq.cqe + 1 */
259 struct ib_uverbs_wc uqueue[0];
260 struct ib_wc kqueue[0];
261 };
262};
263
264/*
265 * The completion queue structure.
266 */
267struct qib_cq {
268 struct ib_cq ibcq;
269 struct work_struct comptask;
270 spinlock_t lock; /* protect changes in this struct */
271 u8 notify;
272 u8 triggered;
273 struct qib_cq_wc *queue;
274 struct qib_mmap_info *ip;
275};
276
277/*
278 * A segment is a linear region of low physical memory.
279 * XXX Maybe we should use phys addr here and kmap()/kunmap().
280 * Used by the verbs layer.
281 */
282struct qib_seg {
283 void *vaddr;
284 size_t length;
285};
286
287/* The number of qib_segs that fit in a page. */
288#define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg))
289
290struct qib_segarray {
291 struct qib_seg segs[QIB_SEGSZ];
292};
293
294struct qib_mregion {
295 struct ib_pd *pd; /* shares refcnt of ibmr.pd */
296 u64 user_base; /* User's address for this region */
297 u64 iova; /* IB start address of this region */
298 size_t length;
299 u32 lkey;
300 u32 offset; /* offset (bytes) to start of region */
301 int access_flags;
302 u32 max_segs; /* number of qib_segs in all the arrays */
303 u32 mapsz; /* size of the map array */
304 atomic_t refcount;
305 struct qib_segarray *map[0]; /* the segments */
306};
307
308/*
309 * These keep track of the copy progress within a memory region.
310 * Used by the verbs layer.
311 */
312struct qib_sge {
313 struct qib_mregion *mr;
314 void *vaddr; /* kernel virtual address of segment */
315 u32 sge_length; /* length of the SGE */
316 u32 length; /* remaining length of the segment */
317 u16 m; /* current index: mr->map[m] */
318 u16 n; /* current index: mr->map[m]->segs[n] */
319};
320
321/* Memory region */
322struct qib_mr {
323 struct ib_mr ibmr;
324 struct ib_umem *umem;
325 struct qib_mregion mr; /* must be last */
326};
327
328/*
329 * Send work request queue entry.
330 * The size of the sg_list is determined when the QP is created and stored
331 * in qp->s_max_sge.
332 */
333struct qib_swqe {
334 struct ib_send_wr wr; /* don't use wr.sg_list */
335 u32 psn; /* first packet sequence number */
336 u32 lpsn; /* last packet sequence number */
337 u32 ssn; /* send sequence number */
338 u32 length; /* total length of data in sg_list */
339 struct qib_sge sg_list[0];
340};
341
342/*
343 * Receive work request queue entry.
344 * The size of the sg_list is determined when the QP (or SRQ) is created
345 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
346 */
347struct qib_rwqe {
348 u64 wr_id;
349 u8 num_sge;
350 struct ib_sge sg_list[0];
351};
352
353/*
354 * This structure is used to contain the head pointer, tail pointer,
355 * and receive work queue entries as a single memory allocation so
356 * it can be mmap'ed into user space.
357 * Note that the wq array elements are variable size so you can't
358 * just index into the array to get the N'th element;
359 * use get_rwqe_ptr() instead.
360 */
361struct qib_rwq {
362 u32 head; /* new work requests posted to the head */
363 u32 tail; /* receives pull requests from here. */
364 struct qib_rwqe wq[0];
365};
366
367struct qib_rq {
368 struct qib_rwq *wq;
369 spinlock_t lock; /* protect changes in this struct */
370 u32 size; /* size of RWQE array */
371 u8 max_sge;
372};
373
374struct qib_srq {
375 struct ib_srq ibsrq;
376 struct qib_rq rq;
377 struct qib_mmap_info *ip;
378 /* send signal when number of RWQEs < limit */
379 u32 limit;
380};
381
382struct qib_sge_state {
383 struct qib_sge *sg_list; /* next SGE to be used if any */
384 struct qib_sge sge; /* progress state for the current SGE */
385 u32 total_len;
386 u8 num_sge;
387};
388
389/*
390 * This structure holds the information that the send tasklet needs
391 * to send a RDMA read response or atomic operation.
392 */
393struct qib_ack_entry {
394 u8 opcode;
395 u8 sent;
396 u32 psn;
397 u32 lpsn;
398 union {
399 struct qib_sge rdma_sge;
400 u64 atomic_data;
401 };
402};
403
404/*
405 * Variables prefixed with s_ are for the requester (sender).
406 * Variables prefixed with r_ are for the responder (receiver).
407 * Variables prefixed with ack_ are for responder replies.
408 *
409 * Common variables are protected by both r_rq.lock and s_lock in that order
410 * which only happens in modify_qp() or changing the QP 'state'.
411 */
412struct qib_qp {
413 struct ib_qp ibqp;
414 struct qib_qp *next; /* link list for QPN hash table */
415 struct qib_qp *timer_next; /* link list for qib_ib_timer() */
416 struct list_head iowait; /* link for wait PIO buf */
417 struct list_head rspwait; /* link for waititing to respond */
418 struct ib_ah_attr remote_ah_attr;
419 struct ib_ah_attr alt_ah_attr;
420 struct qib_ib_header s_hdr; /* next packet header to send */
421 atomic_t refcount;
422 wait_queue_head_t wait;
423 wait_queue_head_t wait_dma;
424 struct timer_list s_timer;
425 struct work_struct s_work;
426 struct qib_mmap_info *ip;
427 struct qib_sge_state *s_cur_sge;
428 struct qib_verbs_txreq *s_tx;
429 struct qib_mregion *s_rdma_mr;
430 struct qib_sge_state s_sge; /* current send request data */
431 struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1];
432 struct qib_sge_state s_ack_rdma_sge;
433 struct qib_sge_state s_rdma_read_sge;
434 struct qib_sge_state r_sge; /* current receive data */
435 spinlock_t r_lock; /* used for APM */
436 spinlock_t s_lock;
437 atomic_t s_dma_busy;
438 unsigned processor_id; /* Processor ID QP is bound to */
439 u32 s_flags;
440 u32 s_cur_size; /* size of send packet in bytes */
441 u32 s_len; /* total length of s_sge */
442 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
443 u32 s_next_psn; /* PSN for next request */
444 u32 s_last_psn; /* last response PSN processed */
445 u32 s_sending_psn; /* lowest PSN that is being sent */
446 u32 s_sending_hpsn; /* highest PSN that is being sent */
447 u32 s_psn; /* current packet sequence number */
448 u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
449 u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
450 u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
451 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
452 u64 r_wr_id; /* ID for current receive WQE */
453 unsigned long r_aflags;
454 u32 r_len; /* total length of r_sge */
455 u32 r_rcv_len; /* receive data len processed */
456 u32 r_psn; /* expected rcv packet sequence number */
457 u32 r_msn; /* message sequence number */
458 u16 s_hdrwords; /* size of s_hdr in 32 bit words */
459 u16 s_rdma_ack_cnt;
460 u8 state; /* QP state */
461 u8 s_state; /* opcode of last packet sent */
462 u8 s_ack_state; /* opcode of packet to ACK */
463 u8 s_nak_state; /* non-zero if NAK is pending */
464 u8 r_state; /* opcode of last packet received */
465 u8 r_nak_state; /* non-zero if NAK is pending */
466 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
467 u8 r_flags;
468 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
469 u8 r_head_ack_queue; /* index into s_ack_queue[] */
470 u8 qp_access_flags;
471 u8 s_max_sge; /* size of s_wq->sg_list */
472 u8 s_retry_cnt; /* number of times to retry */
473 u8 s_rnr_retry_cnt;
474 u8 s_retry; /* requester retry counter */
475 u8 s_rnr_retry; /* requester RNR retry counter */
476 u8 s_pkey_index; /* PKEY index to use */
477 u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
478 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
479 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
480 u8 s_tail_ack_queue; /* index into s_ack_queue[] */
481 u8 s_srate;
482 u8 s_draining;
483 u8 s_mig_state;
484 u8 timeout; /* Timeout for this QP */
485 u8 alt_timeout; /* Alternate path timeout for this QP */
486 u8 port_num;
487 enum ib_mtu path_mtu;
488 u32 remote_qpn;
489 u32 qkey; /* QKEY for this QP (for UD or RD) */
490 u32 s_size; /* send work queue size */
491 u32 s_head; /* new entries added here */
492 u32 s_tail; /* next entry to process */
493 u32 s_cur; /* current work queue entry */
494 u32 s_acked; /* last un-ACK'ed entry */
495 u32 s_last; /* last completed entry */
496 u32 s_ssn; /* SSN of tail entry */
497 u32 s_lsn; /* limit sequence number (credit) */
498 struct qib_swqe *s_wq; /* send work queue */
499 struct qib_swqe *s_wqe;
500 struct qib_rq r_rq; /* receive work queue */
501 struct qib_sge r_sg_list[0]; /* verified SGEs */
502};
503
504/*
505 * Atomic bit definitions for r_aflags.
506 */
507#define QIB_R_WRID_VALID 0
508#define QIB_R_REWIND_SGE 1
509
510/*
511 * Bit definitions for r_flags.
512 */
513#define QIB_R_REUSE_SGE 0x01
514#define QIB_R_RDMAR_SEQ 0x02
515#define QIB_R_RSP_NAK 0x04
516#define QIB_R_RSP_SEND 0x08
517#define QIB_R_COMM_EST 0x10
518
519/*
520 * Bit definitions for s_flags.
521 *
522 * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
523 * QIB_S_BUSY - send tasklet is processing the QP
524 * QIB_S_TIMER - the RC retry timer is active
525 * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
526 * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
527 * before processing the next SWQE
528 * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
529 * before processing the next SWQE
530 * QIB_S_WAIT_RNR - waiting for RNR timeout
531 * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
532 * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
533 * next send completion entry not via send DMA
534 * QIB_S_WAIT_PIO - waiting for a send buffer to be available
535 * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
536 * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
537 * QIB_S_WAIT_KMEM - waiting for kernel memory to be available
538 * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
539 * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
540 * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
541 */
542#define QIB_S_SIGNAL_REQ_WR 0x0001
543#define QIB_S_BUSY 0x0002
544#define QIB_S_TIMER 0x0004
545#define QIB_S_RESP_PENDING 0x0008
546#define QIB_S_ACK_PENDING 0x0010
547#define QIB_S_WAIT_FENCE 0x0020
548#define QIB_S_WAIT_RDMAR 0x0040
549#define QIB_S_WAIT_RNR 0x0080
550#define QIB_S_WAIT_SSN_CREDIT 0x0100
551#define QIB_S_WAIT_DMA 0x0200
552#define QIB_S_WAIT_PIO 0x0400
553#define QIB_S_WAIT_TX 0x0800
554#define QIB_S_WAIT_DMA_DESC 0x1000
555#define QIB_S_WAIT_KMEM 0x2000
556#define QIB_S_WAIT_PSN 0x4000
557#define QIB_S_WAIT_ACK 0x8000
558#define QIB_S_SEND_ONE 0x10000
559#define QIB_S_UNLIMITED_CREDIT 0x20000
560
561/*
562 * Wait flags that would prevent any packet type from being sent.
563 */
564#define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
565 QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
566
567/*
568 * Wait flags that would prevent send work requests from making progress.
569 */
570#define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
571 QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
572 QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
573
574#define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
575
576#define QIB_PSN_CREDIT 16
577
578/*
579 * Since struct qib_swqe is not a fixed size, we can't simply index into
580 * struct qib_qp.s_wq. This function does the array index computation.
581 */
582static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp,
583 unsigned n)
584{
585 return (struct qib_swqe *)((char *)qp->s_wq +
586 (sizeof(struct qib_swqe) +
587 qp->s_max_sge *
588 sizeof(struct qib_sge)) * n);
589}
590
591/*
592 * Since struct qib_rwqe is not a fixed size, we can't simply index into
593 * struct qib_rwq.wq. This function does the array index computation.
594 */
595static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n)
596{
597 return (struct qib_rwqe *)
598 ((char *) rq->wq->wq +
599 (sizeof(struct qib_rwqe) +
600 rq->max_sge * sizeof(struct ib_sge)) * n);
601}
602
603/*
604 * QPN-map pages start out as NULL, they get allocated upon
605 * first use and are never deallocated. This way,
606 * large bitmaps are not allocated unless large numbers of QPs are used.
607 */
608struct qpn_map {
609 void *page;
610};
611
612struct qib_qpn_table {
613 spinlock_t lock; /* protect changes in this struct */
614 unsigned flags; /* flags for QP0/1 allocated for each port */
615 u32 last; /* last QP number allocated */
616 u32 nmaps; /* size of the map table */
617 u16 limit;
618 u16 mask;
619 /* bit map of free QP numbers other than 0/1 */
620 struct qpn_map map[QPNMAP_ENTRIES];
621};
622
623struct qib_lkey_table {
624 spinlock_t lock; /* protect changes in this struct */
625 u32 next; /* next unused index (speeds search) */
626 u32 gen; /* generation count */
627 u32 max; /* size of the table */
628 struct qib_mregion **table;
629};
630
631struct qib_opcode_stats {
632 u64 n_packets; /* number of packets */
633 u64 n_bytes; /* total number of bytes */
634};
635
636struct qib_ibport {
637 struct qib_qp *qp0;
638 struct qib_qp *qp1;
639 struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
640 struct qib_ah *sm_ah;
641 struct qib_ah *smi_ah;
642 struct rb_root mcast_tree;
643 spinlock_t lock; /* protect changes in this struct */
644
645 /* non-zero when timer is set */
646 unsigned long mkey_lease_timeout;
647 unsigned long trap_timeout;
648 __be64 gid_prefix; /* in network order */
649 __be64 mkey;
650 __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */
651 u64 tid; /* TID for traps */
652 u64 n_unicast_xmit; /* total unicast packets sent */
653 u64 n_unicast_rcv; /* total unicast packets received */
654 u64 n_multicast_xmit; /* total multicast packets sent */
655 u64 n_multicast_rcv; /* total multicast packets received */
656 u64 z_symbol_error_counter; /* starting count for PMA */
657 u64 z_link_error_recovery_counter; /* starting count for PMA */
658 u64 z_link_downed_counter; /* starting count for PMA */
659 u64 z_port_rcv_errors; /* starting count for PMA */
660 u64 z_port_rcv_remphys_errors; /* starting count for PMA */
661 u64 z_port_xmit_discards; /* starting count for PMA */
662 u64 z_port_xmit_data; /* starting count for PMA */
663 u64 z_port_rcv_data; /* starting count for PMA */
664 u64 z_port_xmit_packets; /* starting count for PMA */
665 u64 z_port_rcv_packets; /* starting count for PMA */
666 u32 z_local_link_integrity_errors; /* starting count for PMA */
667 u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
668 u32 z_vl15_dropped; /* starting count for PMA */
669 u32 n_rc_resends;
670 u32 n_rc_acks;
671 u32 n_rc_qacks;
672 u32 n_rc_delayed_comp;
673 u32 n_seq_naks;
674 u32 n_rdma_seq;
675 u32 n_rnr_naks;
676 u32 n_other_naks;
677 u32 n_loop_pkts;
678 u32 n_pkt_drops;
679 u32 n_vl15_dropped;
680 u32 n_rc_timeouts;
681 u32 n_dmawait;
682 u32 n_unaligned;
683 u32 n_rc_dupreq;
684 u32 n_rc_seqnak;
685 u32 port_cap_flags;
686 u32 pma_sample_start;
687 u32 pma_sample_interval;
688 __be16 pma_counter_select[5];
689 u16 pma_tag;
690 u16 pkey_violations;
691 u16 qkey_violations;
692 u16 mkey_violations;
693 u16 mkey_lease_period;
694 u16 sm_lid;
695 u16 repress_traps;
696 u8 sm_sl;
697 u8 mkeyprot;
698 u8 subnet_timeout;
699 u8 vl_high_limit;
700 u8 sl_to_vl[16];
701
702 struct qib_opcode_stats opstats[128];
703};
704
705struct qib_ibdev {
706 struct ib_device ibdev;
707 struct list_head pending_mmaps;
708 spinlock_t mmap_offset_lock; /* protect mmap_offset */
709 u32 mmap_offset;
710 struct qib_mregion *dma_mr;
711
712 /* QP numbers are shared by all IB ports */
713 struct qib_qpn_table qpn_table;
714 struct qib_lkey_table lk_table;
715 struct list_head piowait; /* list for wait PIO buf */
716 struct list_head dmawait; /* list for wait DMA */
717 struct list_head txwait; /* list for wait qib_verbs_txreq */
718 struct list_head memwait; /* list for wait kernel memory */
719 struct list_head txreq_free;
720 struct timer_list mem_timer;
721 struct qib_qp **qp_table;
722 struct qib_pio_header *pio_hdrs;
723 dma_addr_t pio_hdrs_phys;
724 /* list of QPs waiting for RNR timer */
725 spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
726 unsigned qp_table_size; /* size of the hash table */
727 spinlock_t qpt_lock;
728
729 u32 n_piowait;
730 u32 n_txwait;
731
732 u32 n_pds_allocated; /* number of PDs allocated for device */
733 spinlock_t n_pds_lock;
734 u32 n_ahs_allocated; /* number of AHs allocated for device */
735 spinlock_t n_ahs_lock;
736 u32 n_cqs_allocated; /* number of CQs allocated for device */
737 spinlock_t n_cqs_lock;
738 u32 n_qps_allocated; /* number of QPs allocated for device */
739 spinlock_t n_qps_lock;
740 u32 n_srqs_allocated; /* number of SRQs allocated for device */
741 spinlock_t n_srqs_lock;
742 u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
743 spinlock_t n_mcast_grps_lock;
744};
745
746struct qib_verbs_counters {
747 u64 symbol_error_counter;
748 u64 link_error_recovery_counter;
749 u64 link_downed_counter;
750 u64 port_rcv_errors;
751 u64 port_rcv_remphys_errors;
752 u64 port_xmit_discards;
753 u64 port_xmit_data;
754 u64 port_rcv_data;
755 u64 port_xmit_packets;
756 u64 port_rcv_packets;
757 u32 local_link_integrity_errors;
758 u32 excessive_buffer_overrun_errors;
759 u32 vl15_dropped;
760};
761
762static inline struct qib_mr *to_imr(struct ib_mr *ibmr)
763{
764 return container_of(ibmr, struct qib_mr, ibmr);
765}
766
767static inline struct qib_pd *to_ipd(struct ib_pd *ibpd)
768{
769 return container_of(ibpd, struct qib_pd, ibpd);
770}
771
772static inline struct qib_ah *to_iah(struct ib_ah *ibah)
773{
774 return container_of(ibah, struct qib_ah, ibah);
775}
776
777static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
778{
779 return container_of(ibcq, struct qib_cq, ibcq);
780}
781
782static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq)
783{
784 return container_of(ibsrq, struct qib_srq, ibsrq);
785}
786
787static inline struct qib_qp *to_iqp(struct ib_qp *ibqp)
788{
789 return container_of(ibqp, struct qib_qp, ibqp);
790}
791
792static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
793{
794 return container_of(ibdev, struct qib_ibdev, ibdev);
795}
796
797/*
798 * Send if not busy or waiting for I/O and either
799 * a RC response is pending or we can process send work requests.
800 */
801static inline int qib_send_ok(struct qib_qp *qp)
802{
803 return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&
804 (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||
805 !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
806}
807
808extern struct workqueue_struct *qib_wq;
809extern struct workqueue_struct *qib_cq_wq;
810
811/*
812 * This must be called with s_lock held.
813 */
814static inline void qib_schedule_send(struct qib_qp *qp)
815{
816 if (qib_send_ok(qp)) {
817 if (qp->processor_id == smp_processor_id())
818 queue_work(qib_wq, &qp->s_work);
819 else
820 queue_work_on(qp->processor_id,
821 qib_wq, &qp->s_work);
822 }
823}
824
825static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
826{
827 u16 p1 = pkey1 & 0x7FFF;
828 u16 p2 = pkey2 & 0x7FFF;
829
830 /*
831 * Low 15 bits must be non-zero and match, and
832 * one of the two must be a full member.
833 */
834 return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
835}
836
837void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
838 u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
839void qib_cap_mask_chg(struct qib_ibport *ibp);
840void qib_sys_guid_chg(struct qib_ibport *ibp);
841void qib_node_desc_chg(struct qib_ibport *ibp);
842int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
843 struct ib_wc *in_wc, struct ib_grh *in_grh,
844 struct ib_mad *in_mad, struct ib_mad *out_mad);
845int qib_create_agents(struct qib_ibdev *dev);
846void qib_free_agents(struct qib_ibdev *dev);
847
848/*
849 * Compare the lower 24 bits of the two values.
850 * Returns an integer <, ==, or > than zero.
851 */
852static inline int qib_cmp24(u32 a, u32 b)
853{
854 return (((int) a) - ((int) b)) << 8;
855}
856
857struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);
858
859int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
860 u64 *rwords, u64 *spkts, u64 *rpkts,
861 u64 *xmit_wait);
862
863int qib_get_counters(struct qib_pportdata *ppd,
864 struct qib_verbs_counters *cntrs);
865
866int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
867
868int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
869
870int qib_mcast_tree_empty(struct qib_ibport *ibp);
871
872__be32 qib_compute_aeth(struct qib_qp *qp);
873
874struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
875
876struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
877 struct ib_qp_init_attr *init_attr,
878 struct ib_udata *udata);
879
880int qib_destroy_qp(struct ib_qp *ibqp);
881
882int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);
883
884int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
885 int attr_mask, struct ib_udata *udata);
886
887int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
888 int attr_mask, struct ib_qp_init_attr *init_attr);
889
890unsigned qib_free_all_qps(struct qib_devdata *dd);
891
892void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
893
894void qib_free_qpn_table(struct qib_qpn_table *qpt);
895
896void qib_get_credit(struct qib_qp *qp, u32 aeth);
897
898unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
899
900void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
901
902void qib_put_txreq(struct qib_verbs_txreq *tx);
903
904int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
905 u32 hdrwords, struct qib_sge_state *ss, u32 len);
906
907void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,
908 int release);
909
910void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);
911
912void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
913 int has_grh, void *data, u32 tlen, struct qib_qp *qp);
914
915void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
916 int has_grh, void *data, u32 tlen, struct qib_qp *qp);
917
918int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
919
920void qib_rc_rnr_retry(unsigned long arg);
921
922void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
923
924void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);
925
926int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
927
928void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
929 int has_grh, void *data, u32 tlen, struct qib_qp *qp);
930
931int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr);
932
933int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr);
934
935int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
936 struct qib_sge *isge, struct ib_sge *sge, int acc);
937
938int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
939 u32 len, u64 vaddr, u32 rkey, int acc);
940
941int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
942 struct ib_recv_wr **bad_wr);
943
944struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
945 struct ib_srq_init_attr *srq_init_attr,
946 struct ib_udata *udata);
947
948int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
949 enum ib_srq_attr_mask attr_mask,
950 struct ib_udata *udata);
951
952int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
953
954int qib_destroy_srq(struct ib_srq *ibsrq);
955
956void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
957
958int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
959
960struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
961 int comp_vector, struct ib_ucontext *context,
962 struct ib_udata *udata);
963
964int qib_destroy_cq(struct ib_cq *ibcq);
965
966int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
967
968int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
969
970struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc);
971
972struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
973 struct ib_phys_buf *buffer_list,
974 int num_phys_buf, int acc, u64 *iova_start);
975
976struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
977 u64 virt_addr, int mr_access_flags,
978 struct ib_udata *udata);
979
980int qib_dereg_mr(struct ib_mr *ibmr);
981
982struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
983
984struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list(
985 struct ib_device *ibdev, int page_list_len);
986
987void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl);
988
989int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr);
990
991struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
992 struct ib_fmr_attr *fmr_attr);
993
994int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
995 int list_len, u64 iova);
996
997int qib_unmap_fmr(struct list_head *fmr_list);
998
999int qib_dealloc_fmr(struct ib_fmr *ibfmr);
1000
1001void qib_release_mmap_info(struct kref *ref);
1002
1003struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
1004 struct ib_ucontext *context,
1005 void *obj);
1006
1007void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
1008 u32 size, void *obj);
1009
1010int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
1011
1012int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);
1013
1014void qib_migrate_qp(struct qib_qp *qp);
1015
1016int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
1017 int has_grh, struct qib_qp *qp, u32 bth0);
1018
1019u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
1020 struct ib_global_route *grh, u32 hwords, u32 nwords);
1021
1022void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
1023 u32 bth0, u32 bth2);
1024
1025void qib_do_send(struct work_struct *work);
1026
1027void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
1028 enum ib_wc_status status);
1029
1030void qib_send_rc_ack(struct qib_qp *qp);
1031
1032int qib_make_rc_req(struct qib_qp *qp);
1033
1034int qib_make_uc_req(struct qib_qp *qp);
1035
1036int qib_make_ud_req(struct qib_qp *qp);
1037
1038int qib_register_ib_device(struct qib_devdata *);
1039
1040void qib_unregister_ib_device(struct qib_devdata *);
1041
1042void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
1043
1044void qib_ib_piobufavail(struct qib_devdata *);
1045
1046unsigned qib_get_npkeys(struct qib_devdata *);
1047
1048unsigned qib_get_pkey(struct qib_ibport *, unsigned);
1049
1050extern const enum ib_wc_opcode ib_qib_wc_opcode[];
1051
1052/*
1053 * Below HCA-independent IB PhysPortState values, returned
1054 * by the f_ibphys_portstate() routine.
1055 */
1056#define IB_PHYSPORTSTATE_SLEEP 1
1057#define IB_PHYSPORTSTATE_POLL 2
1058#define IB_PHYSPORTSTATE_DISABLED 3
1059#define IB_PHYSPORTSTATE_CFG_TRAIN 4
1060#define IB_PHYSPORTSTATE_LINKUP 5
1061#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
1062#define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
1063#define IB_PHYSPORTSTATE_CFG_IDLE 0xB
1064#define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
1065#define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
1066#define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
1067#define IB_PHYSPORTSTATE_CFG_ENH 0x10
1068#define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
1069
1070extern const int ib_qib_state_ops[];
1071
1072extern __be64 ib_qib_sys_image_guid; /* in network order */
1073
1074extern unsigned int ib_qib_lkey_table_size;
1075
1076extern unsigned int ib_qib_max_cqes;
1077
1078extern unsigned int ib_qib_max_cqs;
1079
1080extern unsigned int ib_qib_max_qp_wrs;
1081
1082extern unsigned int ib_qib_max_qps;
1083
1084extern unsigned int ib_qib_max_sges;
1085
1086extern unsigned int ib_qib_max_mcast_grps;
1087
1088extern unsigned int ib_qib_max_mcast_qp_attached;
1089
1090extern unsigned int ib_qib_max_srqs;
1091
1092extern unsigned int ib_qib_max_srq_sges;
1093
1094extern unsigned int ib_qib_max_srq_wrs;
1095
1096extern const u32 ib_qib_rnr_table[];
1097
1098extern struct ib_dma_mapping_ops qib_dma_mapping_ops;
1099
1100#endif /* QIB_VERBS_H */
diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
new file mode 100644
index 000000000000..dabb697b1c2a
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
@@ -0,0 +1,368 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/rculist.h>
35
36#include "qib.h"
37
38/**
39 * qib_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
40 * @qp: the QP to link
41 */
42static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
43{
44 struct qib_mcast_qp *mqp;
45
46 mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
47 if (!mqp)
48 goto bail;
49
50 mqp->qp = qp;
51 atomic_inc(&qp->refcount);
52
53bail:
54 return mqp;
55}
56
57static void qib_mcast_qp_free(struct qib_mcast_qp *mqp)
58{
59 struct qib_qp *qp = mqp->qp;
60
61 /* Notify qib_destroy_qp() if it is waiting. */
62 if (atomic_dec_and_test(&qp->refcount))
63 wake_up(&qp->wait);
64
65 kfree(mqp);
66}
67
68/**
69 * qib_mcast_alloc - allocate the multicast GID structure
70 * @mgid: the multicast GID
71 *
72 * A list of QPs will be attached to this structure.
73 */
74static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
75{
76 struct qib_mcast *mcast;
77
78 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
79 if (!mcast)
80 goto bail;
81
82 mcast->mgid = *mgid;
83 INIT_LIST_HEAD(&mcast->qp_list);
84 init_waitqueue_head(&mcast->wait);
85 atomic_set(&mcast->refcount, 0);
86 mcast->n_attached = 0;
87
88bail:
89 return mcast;
90}
91
92static void qib_mcast_free(struct qib_mcast *mcast)
93{
94 struct qib_mcast_qp *p, *tmp;
95
96 list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
97 qib_mcast_qp_free(p);
98
99 kfree(mcast);
100}
101
102/**
103 * qib_mcast_find - search the global table for the given multicast GID
104 * @ibp: the IB port structure
105 * @mgid: the multicast GID to search for
106 *
107 * Returns NULL if not found.
108 *
109 * The caller is responsible for decrementing the reference count if found.
110 */
111struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid)
112{
113 struct rb_node *n;
114 unsigned long flags;
115 struct qib_mcast *mcast;
116
117 spin_lock_irqsave(&ibp->lock, flags);
118 n = ibp->mcast_tree.rb_node;
119 while (n) {
120 int ret;
121
122 mcast = rb_entry(n, struct qib_mcast, rb_node);
123
124 ret = memcmp(mgid->raw, mcast->mgid.raw,
125 sizeof(union ib_gid));
126 if (ret < 0)
127 n = n->rb_left;
128 else if (ret > 0)
129 n = n->rb_right;
130 else {
131 atomic_inc(&mcast->refcount);
132 spin_unlock_irqrestore(&ibp->lock, flags);
133 goto bail;
134 }
135 }
136 spin_unlock_irqrestore(&ibp->lock, flags);
137
138 mcast = NULL;
139
140bail:
141 return mcast;
142}
143
144/**
145 * qib_mcast_add - insert mcast GID into table and attach QP struct
146 * @mcast: the mcast GID table
147 * @mqp: the QP to attach
148 *
149 * Return zero if both were added. Return EEXIST if the GID was already in
150 * the table but the QP was added. Return ESRCH if the QP was already
151 * attached and neither structure was added.
152 */
153static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp,
154 struct qib_mcast *mcast, struct qib_mcast_qp *mqp)
155{
156 struct rb_node **n = &ibp->mcast_tree.rb_node;
157 struct rb_node *pn = NULL;
158 int ret;
159
160 spin_lock_irq(&ibp->lock);
161
162 while (*n) {
163 struct qib_mcast *tmcast;
164 struct qib_mcast_qp *p;
165
166 pn = *n;
167 tmcast = rb_entry(pn, struct qib_mcast, rb_node);
168
169 ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
170 sizeof(union ib_gid));
171 if (ret < 0) {
172 n = &pn->rb_left;
173 continue;
174 }
175 if (ret > 0) {
176 n = &pn->rb_right;
177 continue;
178 }
179
180 /* Search the QP list to see if this is already there. */
181 list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
182 if (p->qp == mqp->qp) {
183 ret = ESRCH;
184 goto bail;
185 }
186 }
187 if (tmcast->n_attached == ib_qib_max_mcast_qp_attached) {
188 ret = ENOMEM;
189 goto bail;
190 }
191
192 tmcast->n_attached++;
193
194 list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
195 ret = EEXIST;
196 goto bail;
197 }
198
199 spin_lock(&dev->n_mcast_grps_lock);
200 if (dev->n_mcast_grps_allocated == ib_qib_max_mcast_grps) {
201 spin_unlock(&dev->n_mcast_grps_lock);
202 ret = ENOMEM;
203 goto bail;
204 }
205
206 dev->n_mcast_grps_allocated++;
207 spin_unlock(&dev->n_mcast_grps_lock);
208
209 mcast->n_attached++;
210
211 list_add_tail_rcu(&mqp->list, &mcast->qp_list);
212
213 atomic_inc(&mcast->refcount);
214 rb_link_node(&mcast->rb_node, pn, n);
215 rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
216
217 ret = 0;
218
219bail:
220 spin_unlock_irq(&ibp->lock);
221
222 return ret;
223}
224
225int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
226{
227 struct qib_qp *qp = to_iqp(ibqp);
228 struct qib_ibdev *dev = to_idev(ibqp->device);
229 struct qib_ibport *ibp;
230 struct qib_mcast *mcast;
231 struct qib_mcast_qp *mqp;
232 int ret;
233
234 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
235 ret = -EINVAL;
236 goto bail;
237 }
238
239 /*
240 * Allocate data structures since its better to do this outside of
241 * spin locks and it will most likely be needed.
242 */
243 mcast = qib_mcast_alloc(gid);
244 if (mcast == NULL) {
245 ret = -ENOMEM;
246 goto bail;
247 }
248 mqp = qib_mcast_qp_alloc(qp);
249 if (mqp == NULL) {
250 qib_mcast_free(mcast);
251 ret = -ENOMEM;
252 goto bail;
253 }
254 ibp = to_iport(ibqp->device, qp->port_num);
255 switch (qib_mcast_add(dev, ibp, mcast, mqp)) {
256 case ESRCH:
257 /* Neither was used: OK to attach the same QP twice. */
258 qib_mcast_qp_free(mqp);
259 qib_mcast_free(mcast);
260 break;
261
262 case EEXIST: /* The mcast wasn't used */
263 qib_mcast_free(mcast);
264 break;
265
266 case ENOMEM:
267 /* Exceeded the maximum number of mcast groups. */
268 qib_mcast_qp_free(mqp);
269 qib_mcast_free(mcast);
270 ret = -ENOMEM;
271 goto bail;
272
273 default:
274 break;
275 }
276
277 ret = 0;
278
279bail:
280 return ret;
281}
282
283int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
284{
285 struct qib_qp *qp = to_iqp(ibqp);
286 struct qib_ibdev *dev = to_idev(ibqp->device);
287 struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
288 struct qib_mcast *mcast = NULL;
289 struct qib_mcast_qp *p, *tmp;
290 struct rb_node *n;
291 int last = 0;
292 int ret;
293
294 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
295 ret = -EINVAL;
296 goto bail;
297 }
298
299 spin_lock_irq(&ibp->lock);
300
301 /* Find the GID in the mcast table. */
302 n = ibp->mcast_tree.rb_node;
303 while (1) {
304 if (n == NULL) {
305 spin_unlock_irq(&ibp->lock);
306 ret = -EINVAL;
307 goto bail;
308 }
309
310 mcast = rb_entry(n, struct qib_mcast, rb_node);
311 ret = memcmp(gid->raw, mcast->mgid.raw,
312 sizeof(union ib_gid));
313 if (ret < 0)
314 n = n->rb_left;
315 else if (ret > 0)
316 n = n->rb_right;
317 else
318 break;
319 }
320
321 /* Search the QP list. */
322 list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
323 if (p->qp != qp)
324 continue;
325 /*
326 * We found it, so remove it, but don't poison the forward
327 * link until we are sure there are no list walkers.
328 */
329 list_del_rcu(&p->list);
330 mcast->n_attached--;
331
332 /* If this was the last attached QP, remove the GID too. */
333 if (list_empty(&mcast->qp_list)) {
334 rb_erase(&mcast->rb_node, &ibp->mcast_tree);
335 last = 1;
336 }
337 break;
338 }
339
340 spin_unlock_irq(&ibp->lock);
341
342 if (p) {
343 /*
344 * Wait for any list walkers to finish before freeing the
345 * list element.
346 */
347 wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
348 qib_mcast_qp_free(p);
349 }
350 if (last) {
351 atomic_dec(&mcast->refcount);
352 wait_event(mcast->wait, !atomic_read(&mcast->refcount));
353 qib_mcast_free(mcast);
354 spin_lock_irq(&dev->n_mcast_grps_lock);
355 dev->n_mcast_grps_allocated--;
356 spin_unlock_irq(&dev->n_mcast_grps_lock);
357 }
358
359 ret = 0;
360
361bail:
362 return ret;
363}
364
365int qib_mcast_tree_empty(struct qib_ibport *ibp)
366{
367 return ibp->mcast_tree.rb_node == NULL;
368}
diff --git a/drivers/infiniband/hw/ipath/ipath_7220.h b/drivers/infiniband/hw/qib/qib_wc_ppc64.c
index 74fa5cc5131d..673cf4c22ebd 100644
--- a/drivers/infiniband/hw/ipath/ipath_7220.h
+++ b/drivers/infiniband/hw/qib/qib_wc_ppc64.c
@@ -1,7 +1,5 @@
1#ifndef _IPATH_7220_H
2#define _IPATH_7220_H
3/* 1/*
4 * Copyright (c) 2007 QLogic Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
5 * 3 *
6 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -33,25 +31,32 @@
33 */ 31 */
34 32
35/* 33/*
36 * This header file provides the declarations and common definitions 34 * This file is conditionally built on PowerPC only. Otherwise weak symbol
37 * for (mostly) manipulation of the SerDes blocks within the IBA7220. 35 * versions of the functions exported from here are used.
38 * the functions declared should only be called from within other
39 * 7220-related files such as ipath_iba7220.c or ipath_sd7220.c.
40 */ 36 */
41int ipath_sd7220_presets(struct ipath_devdata *dd);
42int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset);
43int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum, u8 *img,
44 int len, int offset);
45int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum, const u8 *img,
46 int len, int offset);
47/*
48 * Below used for sdnum parameter, selecting one of the two sections
49 * used for PCIe, or the single SerDes used for IB, which is the
50 * only one currently used
51 */
52#define IB_7220_SERDES 2
53 37
54int ipath_sd7220_ib_load(struct ipath_devdata *dd); 38#include "qib.h"
55int ipath_sd7220_ib_vfy(struct ipath_devdata *dd);
56 39
57#endif /* _IPATH_7220_H */ 40/**
41 * qib_enable_wc - enable write combining for MMIO writes to the device
42 * @dd: qlogic_ib device
43 *
44 * Nothing to do on PowerPC, so just return without error.
45 */
46int qib_enable_wc(struct qib_devdata *dd)
47{
48 return 0;
49}
50
51/**
52 * qib_unordered_wc - indicate whether write combining is unordered
53 *
54 * Because our performance depends on our ability to do write
55 * combining mmio writes in the most efficient way, we need to
56 * know if we are on a processor that may reorder stores when
57 * write combining.
58 */
59int qib_unordered_wc(void)
60{
61 return 1;
62}
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
new file mode 100644
index 000000000000..561b8bca4060
--- /dev/null
+++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
@@ -0,0 +1,171 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/*
35 * This file is conditionally built on x86_64 only. Otherwise weak symbol
36 * versions of the functions exported from here are used.
37 */
38
39#include <linux/pci.h>
40#include <asm/mtrr.h>
41#include <asm/processor.h>
42
43#include "qib.h"
44
45/**
46 * qib_enable_wc - enable write combining for MMIO writes to the device
47 * @dd: qlogic_ib device
48 *
49 * This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable
50 * write combining.
51 */
52int qib_enable_wc(struct qib_devdata *dd)
53{
54 int ret = 0;
55 u64 pioaddr, piolen;
56 unsigned bits;
57 const unsigned long addr = pci_resource_start(dd->pcidev, 0);
58 const size_t len = pci_resource_len(dd->pcidev, 0);
59
60 /*
61 * Set the PIO buffers to be WCCOMB, so we get HT bursts to the
62 * chip. Linux (possibly the hardware) requires it to be on a power
63 * of 2 address matching the length (which has to be a power of 2).
64 * For rev1, that means the base address, for rev2, it will be just
65 * the PIO buffers themselves.
66 * For chips with two sets of buffers, the calculations are
67 * somewhat more complicated; we need to sum, and the piobufbase
68 * register has both offsets, 2K in low 32 bits, 4K in high 32 bits.
69 * The buffers are still packed, so a single range covers both.
70 */
71 if (dd->piobcnt2k && dd->piobcnt4k) {
72 /* 2 sizes for chip */
73 unsigned long pio2kbase, pio4kbase;
74 pio2kbase = dd->piobufbase & 0xffffffffUL;
75 pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
76 if (pio2kbase < pio4kbase) {
77 /* all current chips */
78 pioaddr = addr + pio2kbase;
79 piolen = pio4kbase - pio2kbase +
80 dd->piobcnt4k * dd->align4k;
81 } else {
82 pioaddr = addr + pio4kbase;
83 piolen = pio2kbase - pio4kbase +
84 dd->piobcnt2k * dd->palign;
85 }
86 } else { /* single buffer size (2K, currently) */
87 pioaddr = addr + dd->piobufbase;
88 piolen = dd->piobcnt2k * dd->palign +
89 dd->piobcnt4k * dd->align4k;
90 }
91
92 for (bits = 0; !(piolen & (1ULL << bits)); bits++)
93 /* do nothing */ ;
94
95 if (piolen != (1ULL << bits)) {
96 piolen >>= bits;
97 while (piolen >>= 1)
98 bits++;
99 piolen = 1ULL << (bits + 1);
100 }
101 if (pioaddr & (piolen - 1)) {
102 u64 atmp;
103 atmp = pioaddr & ~(piolen - 1);
104 if (atmp < addr || (atmp + piolen) > (addr + len)) {
105 qib_dev_err(dd, "No way to align address/size "
106 "(%llx/%llx), no WC mtrr\n",
107 (unsigned long long) atmp,
108 (unsigned long long) piolen << 1);
109 ret = -ENODEV;
110 } else {
111 pioaddr = atmp;
112 piolen <<= 1;
113 }
114 }
115
116 if (!ret) {
117 int cookie;
118
119 cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0);
120 if (cookie < 0) {
121 {
122 qib_devinfo(dd->pcidev,
123 "mtrr_add() WC for PIO bufs "
124 "failed (%d)\n",
125 cookie);
126 ret = -EINVAL;
127 }
128 } else {
129 dd->wc_cookie = cookie;
130 dd->wc_base = (unsigned long) pioaddr;
131 dd->wc_len = (unsigned long) piolen;
132 }
133 }
134
135 return ret;
136}
137
138/**
139 * qib_disable_wc - disable write combining for MMIO writes to the device
140 * @dd: qlogic_ib device
141 */
142void qib_disable_wc(struct qib_devdata *dd)
143{
144 if (dd->wc_cookie) {
145 int r;
146
147 r = mtrr_del(dd->wc_cookie, dd->wc_base,
148 dd->wc_len);
149 if (r < 0)
150 qib_devinfo(dd->pcidev,
151 "mtrr_del(%lx, %lx, %lx) failed: %d\n",
152 dd->wc_cookie, dd->wc_base,
153 dd->wc_len, r);
154 dd->wc_cookie = 0; /* even on failure */
155 }
156}
157
158/**
159 * qib_unordered_wc - indicate whether write combining is ordered
160 *
161 * Because our performance depends on our ability to do write combining mmio
162 * writes in the most efficient way, we need to know if we are on an Intel
163 * or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in
164 * the order completed, and so no special flushing is required to get
165 * correct ordering. Intel processors, however, will flush write buffers
166 * out in "random" orders, and so explicit ordering is needed at times.
167 */
168int qib_unordered_wc(void)
169{
170 return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
171}
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 423e0e6031ab..34157bb97ed6 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -47,15 +47,15 @@ struct joydev {
47 struct mutex mutex; 47 struct mutex mutex;
48 struct device dev; 48 struct device dev;
49 49
50 struct js_corr corr[ABS_MAX + 1]; 50 struct js_corr corr[ABS_CNT];
51 struct JS_DATA_SAVE_TYPE glue; 51 struct JS_DATA_SAVE_TYPE glue;
52 int nabs; 52 int nabs;
53 int nkey; 53 int nkey;
54 __u16 keymap[KEY_MAX - BTN_MISC + 1]; 54 __u16 keymap[KEY_MAX - BTN_MISC + 1];
55 __u16 keypam[KEY_MAX - BTN_MISC + 1]; 55 __u16 keypam[KEY_MAX - BTN_MISC + 1];
56 __u8 absmap[ABS_MAX + 1]; 56 __u8 absmap[ABS_CNT];
57 __u8 abspam[ABS_MAX + 1]; 57 __u8 abspam[ABS_CNT];
58 __s16 abs[ABS_MAX + 1]; 58 __s16 abs[ABS_CNT];
59}; 59};
60 60
61struct joydev_client { 61struct joydev_client {
@@ -826,7 +826,7 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
826 joydev->handle.handler = handler; 826 joydev->handle.handler = handler;
827 joydev->handle.private = joydev; 827 joydev->handle.private = joydev;
828 828
829 for (i = 0; i < ABS_MAX + 1; i++) 829 for (i = 0; i < ABS_CNT; i++)
830 if (test_bit(i, dev->absbit)) { 830 if (test_bit(i, dev->absbit)) {
831 joydev->absmap[i] = joydev->nabs; 831 joydev->absmap[i] = joydev->nabs;
832 joydev->abspam[joydev->nabs] = i; 832 joydev->abspam[joydev->nabs] = i;
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index 35149ec455a9..79172af164f2 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -35,6 +35,7 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37#include <linux/keyboard.h> 37#include <linux/keyboard.h>
38#include <linux/platform_device.h>
38 39
39#include <asm/amigaints.h> 40#include <asm/amigaints.h>
40#include <asm/amigahw.h> 41#include <asm/amigahw.h>
@@ -154,10 +155,9 @@ static const char *amikbd_messages[8] = {
154 [7] = KERN_WARNING "amikbd: keyboard interrupt\n" 155 [7] = KERN_WARNING "amikbd: keyboard interrupt\n"
155}; 156};
156 157
157static struct input_dev *amikbd_dev; 158static irqreturn_t amikbd_interrupt(int irq, void *data)
158
159static irqreturn_t amikbd_interrupt(int irq, void *dummy)
160{ 159{
160 struct input_dev *dev = data;
161 unsigned char scancode, down; 161 unsigned char scancode, down;
162 162
163 scancode = ~ciaa.sdr; /* get and invert scancode (keyboard is active low) */ 163 scancode = ~ciaa.sdr; /* get and invert scancode (keyboard is active low) */
@@ -170,47 +170,42 @@ static irqreturn_t amikbd_interrupt(int irq, void *dummy)
170 170
171 if (scancode < 0x78) { /* scancodes < 0x78 are keys */ 171 if (scancode < 0x78) { /* scancodes < 0x78 are keys */
172 if (scancode == 98) { /* CapsLock is a toggle switch key on Amiga */ 172 if (scancode == 98) { /* CapsLock is a toggle switch key on Amiga */
173 input_report_key(amikbd_dev, scancode, 1); 173 input_report_key(dev, scancode, 1);
174 input_report_key(amikbd_dev, scancode, 0); 174 input_report_key(dev, scancode, 0);
175 } else { 175 } else {
176 input_report_key(amikbd_dev, scancode, down); 176 input_report_key(dev, scancode, down);
177 } 177 }
178 178
179 input_sync(amikbd_dev); 179 input_sync(dev);
180 } else /* scancodes >= 0x78 are error codes */ 180 } else /* scancodes >= 0x78 are error codes */
181 printk(amikbd_messages[scancode - 0x78]); 181 printk(amikbd_messages[scancode - 0x78]);
182 182
183 return IRQ_HANDLED; 183 return IRQ_HANDLED;
184} 184}
185 185
186static int __init amikbd_init(void) 186static int __init amikbd_probe(struct platform_device *pdev)
187{ 187{
188 struct input_dev *dev;
188 int i, j, err; 189 int i, j, err;
189 190
190 if (!AMIGAHW_PRESENT(AMI_KEYBOARD)) 191 dev = input_allocate_device();
191 return -ENODEV; 192 if (!dev) {
192 193 dev_err(&pdev->dev, "Not enough memory for input device\n");
193 if (!request_mem_region(CIAA_PHYSADDR-1+0xb00, 0x100, "amikeyb")) 194 return -ENOMEM;
194 return -EBUSY;
195
196 amikbd_dev = input_allocate_device();
197 if (!amikbd_dev) {
198 printk(KERN_ERR "amikbd: not enough memory for input device\n");
199 err = -ENOMEM;
200 goto fail1;
201 } 195 }
202 196
203 amikbd_dev->name = "Amiga Keyboard"; 197 dev->name = pdev->name;
204 amikbd_dev->phys = "amikbd/input0"; 198 dev->phys = "amikbd/input0";
205 amikbd_dev->id.bustype = BUS_AMIGA; 199 dev->id.bustype = BUS_AMIGA;
206 amikbd_dev->id.vendor = 0x0001; 200 dev->id.vendor = 0x0001;
207 amikbd_dev->id.product = 0x0001; 201 dev->id.product = 0x0001;
208 amikbd_dev->id.version = 0x0100; 202 dev->id.version = 0x0100;
203 dev->dev.parent = &pdev->dev;
209 204
210 amikbd_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); 205 dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
211 206
212 for (i = 0; i < 0x78; i++) 207 for (i = 0; i < 0x78; i++)
213 set_bit(i, amikbd_dev->keybit); 208 set_bit(i, dev->keybit);
214 209
215 for (i = 0; i < MAX_NR_KEYMAPS; i++) { 210 for (i = 0; i < MAX_NR_KEYMAPS; i++) {
216 static u_short temp_map[NR_KEYS] __initdata; 211 static u_short temp_map[NR_KEYS] __initdata;
@@ -229,30 +224,54 @@ static int __init amikbd_init(void)
229 memcpy(key_maps[i], temp_map, sizeof(temp_map)); 224 memcpy(key_maps[i], temp_map, sizeof(temp_map));
230 } 225 }
231 ciaa.cra &= ~0x41; /* serial data in, turn off TA */ 226 ciaa.cra &= ~0x41; /* serial data in, turn off TA */
232 if (request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd", 227 err = request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd",
233 amikbd_interrupt)) { 228 dev);
234 err = -EBUSY; 229 if (err)
235 goto fail2; 230 goto fail2;
236 }
237 231
238 err = input_register_device(amikbd_dev); 232 err = input_register_device(dev);
239 if (err) 233 if (err)
240 goto fail3; 234 goto fail3;
241 235
236 platform_set_drvdata(pdev, dev);
237
242 return 0; 238 return 0;
243 239
244 fail3: free_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt); 240 fail3: free_irq(IRQ_AMIGA_CIAA_SP, dev);
245 fail2: input_free_device(amikbd_dev); 241 fail2: input_free_device(dev);
246 fail1: release_mem_region(CIAA_PHYSADDR - 1 + 0xb00, 0x100);
247 return err; 242 return err;
248} 243}
249 244
250static void __exit amikbd_exit(void) 245static int __exit amikbd_remove(struct platform_device *pdev)
246{
247 struct input_dev *dev = platform_get_drvdata(pdev);
248
249 platform_set_drvdata(pdev, NULL);
250 free_irq(IRQ_AMIGA_CIAA_SP, dev);
251 input_unregister_device(dev);
252 return 0;
253}
254
255static struct platform_driver amikbd_driver = {
256 .remove = __exit_p(amikbd_remove),
257 .driver = {
258 .name = "amiga-keyboard",
259 .owner = THIS_MODULE,
260 },
261};
262
263static int __init amikbd_init(void)
251{ 264{
252 free_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt); 265 return platform_driver_probe(&amikbd_driver, amikbd_probe);
253 input_unregister_device(amikbd_dev);
254 release_mem_region(CIAA_PHYSADDR - 1 + 0xb00, 0x100);
255} 266}
256 267
257module_init(amikbd_init); 268module_init(amikbd_init);
269
270static void __exit amikbd_exit(void)
271{
272 platform_driver_unregister(&amikbd_driver);
273}
274
258module_exit(amikbd_exit); 275module_exit(amikbd_exit);
276
277MODULE_ALIAS("platform:amiga-keyboard");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 48cdabec372a..c44b9eafc556 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -80,6 +80,16 @@ config INPUT_M68K_BEEP
80 tristate "M68k Beeper support" 80 tristate "M68k Beeper support"
81 depends on M68K 81 depends on M68K
82 82
83config INPUT_MAX8925_ONKEY
84 tristate "MAX8925 ONKEY support"
85 depends on MFD_MAX8925
86 help
87 Support the ONKEY of MAX8925 PMICs as an input device
88 reporting power button status.
89
90 To compile this driver as a module, choose M here: the module
91 will be called max8925_onkey.
92
83config INPUT_APANEL 93config INPUT_APANEL
84 tristate "Fujitsu Lifebook Application Panel buttons" 94 tristate "Fujitsu Lifebook Application Panel buttons"
85 depends on X86 && I2C && LEDS_CLASS 95 depends on X86 && I2C && LEDS_CLASS
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index f9f577031e06..71fe57d8023f 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
20obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o 20obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
21obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o 21obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
22obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o 22obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
23obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
23obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o 24obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o
24obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o 25obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
25obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o 26obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index ad730e15afc0..e00a1cc79c0a 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -43,6 +43,7 @@
43#include <linux/proc_fs.h> 43#include <linux/proc_fs.h>
44#include <linux/poll.h> 44#include <linux/poll.h>
45#include <linux/rtc.h> 45#include <linux/rtc.h>
46#include <linux/smp_lock.h>
46#include <linux/semaphore.h> 47#include <linux/semaphore.h>
47 48
48MODULE_AUTHOR("Brian S. Julin <bri@calyx.com>"); 49MODULE_AUTHOR("Brian S. Julin <bri@calyx.com>");
@@ -64,8 +65,8 @@ static DECLARE_WAIT_QUEUE_HEAD(hp_sdc_rtc_wait);
64static ssize_t hp_sdc_rtc_read(struct file *file, char __user *buf, 65static ssize_t hp_sdc_rtc_read(struct file *file, char __user *buf,
65 size_t count, loff_t *ppos); 66 size_t count, loff_t *ppos);
66 67
67static int hp_sdc_rtc_ioctl(struct inode *inode, struct file *file, 68static long hp_sdc_rtc_unlocked_ioctl(struct file *file,
68 unsigned int cmd, unsigned long arg); 69 unsigned int cmd, unsigned long arg);
69 70
70static unsigned int hp_sdc_rtc_poll(struct file *file, poll_table *wait); 71static unsigned int hp_sdc_rtc_poll(struct file *file, poll_table *wait);
71 72
@@ -512,7 +513,7 @@ static int hp_sdc_rtc_read_proc(char *page, char **start, off_t off,
512 return len; 513 return len;
513} 514}
514 515
515static int hp_sdc_rtc_ioctl(struct inode *inode, struct file *file, 516static int hp_sdc_rtc_ioctl(struct file *file,
516 unsigned int cmd, unsigned long arg) 517 unsigned int cmd, unsigned long arg)
517{ 518{
518#if 1 519#if 1
@@ -659,14 +660,27 @@ static int hp_sdc_rtc_ioctl(struct inode *inode, struct file *file,
659#endif 660#endif
660} 661}
661 662
663static long hp_sdc_rtc_unlocked_ioctl(struct file *file,
664 unsigned int cmd, unsigned long arg)
665{
666 int ret;
667
668 lock_kernel();
669 ret = hp_sdc_rtc_ioctl(file, cmd, arg);
670 unlock_kernel();
671
672 return ret;
673}
674
675
662static const struct file_operations hp_sdc_rtc_fops = { 676static const struct file_operations hp_sdc_rtc_fops = {
663 .owner = THIS_MODULE, 677 .owner = THIS_MODULE,
664 .llseek = no_llseek, 678 .llseek = no_llseek,
665 .read = hp_sdc_rtc_read, 679 .read = hp_sdc_rtc_read,
666 .poll = hp_sdc_rtc_poll, 680 .poll = hp_sdc_rtc_poll,
667 .ioctl = hp_sdc_rtc_ioctl, 681 .unlocked_ioctl = hp_sdc_rtc_ioctl,
668 .open = hp_sdc_rtc_open, 682 .open = hp_sdc_rtc_open,
669 .fasync = hp_sdc_rtc_fasync, 683 .fasync = hp_sdc_rtc_fasync,
670}; 684};
671 685
672static struct miscdevice hp_sdc_rtc_dev = { 686static struct miscdevice hp_sdc_rtc_dev = {
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
new file mode 100644
index 000000000000..80af44608018
--- /dev/null
+++ b/drivers/input/misc/max8925_onkey.c
@@ -0,0 +1,148 @@
1/**
2 * max8925_onkey.c - MAX8925 ONKEY driver
3 *
4 * Copyright (C) 2009 Marvell International Ltd.
5 * Haojian Zhuang <haojian.zhuang@marvell.com>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file "COPYING" in the main directory of this
9 * archive for more details.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/i2c.h>
25#include <linux/input.h>
26#include <linux/interrupt.h>
27#include <linux/mfd/max8925.h>
28#include <linux/slab.h>
29
30#define HARDRESET_EN (1 << 7)
31#define PWREN_EN (1 << 7)
32
33struct max8925_onkey_info {
34 struct input_dev *idev;
35 struct i2c_client *i2c;
36 int irq;
37};
38
39/*
40 * MAX8925 gives us an interrupt when ONKEY is held for 3 seconds.
41 * max8925_set_bits() operates I2C bus and may sleep. So implement
42 * it in thread IRQ handler.
43 */
44static irqreturn_t max8925_onkey_handler(int irq, void *data)
45{
46 struct max8925_onkey_info *info = data;
47
48 input_report_key(info->idev, KEY_POWER, 1);
49 input_sync(info->idev);
50
51 /* Enable hardreset to halt if system isn't shutdown on time */
52 max8925_set_bits(info->i2c, MAX8925_SYSENSEL,
53 HARDRESET_EN, HARDRESET_EN);
54
55 return IRQ_HANDLED;
56}
57
58static int __devinit max8925_onkey_probe(struct platform_device *pdev)
59{
60 struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
61 struct max8925_onkey_info *info;
62 int error;
63
64 info = kzalloc(sizeof(struct max8925_onkey_info), GFP_KERNEL);
65 if (!info)
66 return -ENOMEM;
67
68 info->i2c = chip->i2c;
69 info->irq = chip->irq_base + MAX8925_IRQ_GPM_SW_3SEC;
70
71 info->idev = input_allocate_device();
72 if (!info->idev) {
73 dev_err(chip->dev, "Failed to allocate input dev\n");
74 error = -ENOMEM;
75 goto out_input;
76 }
77
78 info->idev->name = "max8925_on";
79 info->idev->phys = "max8925_on/input0";
80 info->idev->id.bustype = BUS_I2C;
81 info->idev->dev.parent = &pdev->dev;
82 info->idev->evbit[0] = BIT_MASK(EV_KEY);
83 info->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER);
84
85 error = request_threaded_irq(info->irq, NULL, max8925_onkey_handler,
86 IRQF_ONESHOT, "onkey", info);
87 if (error < 0) {
88 dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
89 info->irq, error);
90 goto out_irq;
91 }
92
93 error = input_register_device(info->idev);
94 if (error) {
95 dev_err(chip->dev, "Can't register input device: %d\n", error);
96 goto out;
97 }
98
99 platform_set_drvdata(pdev, info);
100
101 return 0;
102
103out:
104 free_irq(info->irq, info);
105out_irq:
106 input_free_device(info->idev);
107out_input:
108 kfree(info);
109 return error;
110}
111
112static int __devexit max8925_onkey_remove(struct platform_device *pdev)
113{
114 struct max8925_onkey_info *info = platform_get_drvdata(pdev);
115
116 free_irq(info->irq, info);
117 input_unregister_device(info->idev);
118 kfree(info);
119
120 platform_set_drvdata(pdev, NULL);
121
122 return 0;
123}
124
125static struct platform_driver max8925_onkey_driver = {
126 .driver = {
127 .name = "max8925-onkey",
128 .owner = THIS_MODULE,
129 },
130 .probe = max8925_onkey_probe,
131 .remove = __devexit_p(max8925_onkey_remove),
132};
133
134static int __init max8925_onkey_init(void)
135{
136 return platform_driver_register(&max8925_onkey_driver);
137}
138module_init(max8925_onkey_init);
139
140static void __exit max8925_onkey_exit(void)
141{
142 platform_driver_unregister(&max8925_onkey_driver);
143}
144module_exit(max8925_onkey_exit);
145
146MODULE_DESCRIPTION("Maxim MAX8925 ONKEY driver");
147MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
148MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index 0d45422f8095..1dacae4b43f0 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -259,8 +259,11 @@ static const struct of_device_id bbc_beep_match[] = {
259}; 259};
260 260
261static struct of_platform_driver bbc_beep_driver = { 261static struct of_platform_driver bbc_beep_driver = {
262 .name = "bbcbeep", 262 .driver = {
263 .match_table = bbc_beep_match, 263 .name = "bbcbeep",
264 .owner = THIS_MODULE,
265 .of_match_table = bbc_beep_match,
266 },
264 .probe = bbc_beep_probe, 267 .probe = bbc_beep_probe,
265 .remove = __devexit_p(bbc_remove), 268 .remove = __devexit_p(bbc_remove),
266 .shutdown = sparcspkr_shutdown, 269 .shutdown = sparcspkr_shutdown,
@@ -338,8 +341,11 @@ static const struct of_device_id grover_beep_match[] = {
338}; 341};
339 342
340static struct of_platform_driver grover_beep_driver = { 343static struct of_platform_driver grover_beep_driver = {
341 .name = "groverbeep", 344 .driver = {
342 .match_table = grover_beep_match, 345 .name = "groverbeep",
346 .owner = THIS_MODULE,
347 .of_match_table = grover_beep_match,
348 },
343 .probe = grover_beep_probe, 349 .probe = grover_beep_probe,
344 .remove = __devexit_p(grover_remove), 350 .remove = __devexit_p(grover_remove),
345 .shutdown = sparcspkr_shutdown, 351 .shutdown = sparcspkr_shutdown,
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index fee9eac8e04a..4f9b2afc24e8 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -90,8 +90,8 @@ static void vibra_disable(struct vibra_info *info)
90 twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, 90 twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
91 (reg & ~TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL); 91 (reg & ~TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL);
92 92
93 twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER);
94 twl4030_codec_disable_resource(TWL4030_CODEC_RES_APLL); 93 twl4030_codec_disable_resource(TWL4030_CODEC_RES_APLL);
94 twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER);
95 95
96 info->enabled = false; 96 info->enabled = false;
97} 97}
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 1477466076ad..b71eb55f2dbc 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -300,7 +300,7 @@ static int uinput_validate_absbits(struct input_dev *dev)
300 unsigned int cnt; 300 unsigned int cnt;
301 int retval = 0; 301 int retval = 0;
302 302
303 for (cnt = 0; cnt < ABS_MAX + 1; cnt++) { 303 for (cnt = 0; cnt < ABS_CNT; cnt++) {
304 if (!test_bit(cnt, dev->absbit)) 304 if (!test_bit(cnt, dev->absbit))
305 continue; 305 continue;
306 306
@@ -387,7 +387,7 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu
387 dev->id.product = user_dev->id.product; 387 dev->id.product = user_dev->id.product;
388 dev->id.version = user_dev->id.version; 388 dev->id.version = user_dev->id.version;
389 389
390 size = sizeof(int) * (ABS_MAX + 1); 390 size = sizeof(int) * ABS_CNT;
391 memcpy(dev->absmax, user_dev->absmax, size); 391 memcpy(dev->absmax, user_dev->absmax, size);
392 memcpy(dev->absmin, user_dev->absmin, size); 392 memcpy(dev->absmin, user_dev->absmin, size);
393 memcpy(dev->absfuzz, user_dev->absfuzz, size); 393 memcpy(dev->absfuzz, user_dev->absfuzz, size);
diff --git a/drivers/input/mouse/amimouse.c b/drivers/input/mouse/amimouse.c
index a185ac78a42c..ff5f61a0fd3a 100644
--- a/drivers/input/mouse/amimouse.c
+++ b/drivers/input/mouse/amimouse.c
@@ -21,6 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/input.h> 22#include <linux/input.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/platform_device.h>
24 25
25#include <asm/irq.h> 26#include <asm/irq.h>
26#include <asm/setup.h> 27#include <asm/setup.h>
@@ -34,10 +35,10 @@ MODULE_DESCRIPTION("Amiga mouse driver");
34MODULE_LICENSE("GPL"); 35MODULE_LICENSE("GPL");
35 36
36static int amimouse_lastx, amimouse_lasty; 37static int amimouse_lastx, amimouse_lasty;
37static struct input_dev *amimouse_dev;
38 38
39static irqreturn_t amimouse_interrupt(int irq, void *dummy) 39static irqreturn_t amimouse_interrupt(int irq, void *data)
40{ 40{
41 struct input_dev *dev = data;
41 unsigned short joy0dat, potgor; 42 unsigned short joy0dat, potgor;
42 int nx, ny, dx, dy; 43 int nx, ny, dx, dy;
43 44
@@ -59,14 +60,14 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy)
59 60
60 potgor = amiga_custom.potgor; 61 potgor = amiga_custom.potgor;
61 62
62 input_report_rel(amimouse_dev, REL_X, dx); 63 input_report_rel(dev, REL_X, dx);
63 input_report_rel(amimouse_dev, REL_Y, dy); 64 input_report_rel(dev, REL_Y, dy);
64 65
65 input_report_key(amimouse_dev, BTN_LEFT, ciaa.pra & 0x40); 66 input_report_key(dev, BTN_LEFT, ciaa.pra & 0x40);
66 input_report_key(amimouse_dev, BTN_MIDDLE, potgor & 0x0100); 67 input_report_key(dev, BTN_MIDDLE, potgor & 0x0100);
67 input_report_key(amimouse_dev, BTN_RIGHT, potgor & 0x0400); 68 input_report_key(dev, BTN_RIGHT, potgor & 0x0400);
68 69
69 input_sync(amimouse_dev); 70 input_sync(dev);
70 71
71 return IRQ_HANDLED; 72 return IRQ_HANDLED;
72} 73}
@@ -74,63 +75,90 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy)
74static int amimouse_open(struct input_dev *dev) 75static int amimouse_open(struct input_dev *dev)
75{ 76{
76 unsigned short joy0dat; 77 unsigned short joy0dat;
78 int error;
77 79
78 joy0dat = amiga_custom.joy0dat; 80 joy0dat = amiga_custom.joy0dat;
79 81
80 amimouse_lastx = joy0dat & 0xff; 82 amimouse_lastx = joy0dat & 0xff;
81 amimouse_lasty = joy0dat >> 8; 83 amimouse_lasty = joy0dat >> 8;
82 84
83 if (request_irq(IRQ_AMIGA_VERTB, amimouse_interrupt, 0, "amimouse", amimouse_interrupt)) { 85 error = request_irq(IRQ_AMIGA_VERTB, amimouse_interrupt, 0, "amimouse",
84 printk(KERN_ERR "amimouse.c: Can't allocate irq %d\n", IRQ_AMIGA_VERTB); 86 dev);
85 return -EBUSY; 87 if (error)
86 } 88 dev_err(&dev->dev, "Can't allocate irq %d\n", IRQ_AMIGA_VERTB);
87 89
88 return 0; 90 return error;
89} 91}
90 92
91static void amimouse_close(struct input_dev *dev) 93static void amimouse_close(struct input_dev *dev)
92{ 94{
93 free_irq(IRQ_AMIGA_VERTB, amimouse_interrupt); 95 free_irq(IRQ_AMIGA_VERTB, dev);
94} 96}
95 97
96static int __init amimouse_init(void) 98static int __init amimouse_probe(struct platform_device *pdev)
97{ 99{
98 int err; 100 int err;
101 struct input_dev *dev;
99 102
100 if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_MOUSE)) 103 dev = input_allocate_device();
101 return -ENODEV; 104 if (!dev)
102
103 amimouse_dev = input_allocate_device();
104 if (!amimouse_dev)
105 return -ENOMEM; 105 return -ENOMEM;
106 106
107 amimouse_dev->name = "Amiga mouse"; 107 dev->name = pdev->name;
108 amimouse_dev->phys = "amimouse/input0"; 108 dev->phys = "amimouse/input0";
109 amimouse_dev->id.bustype = BUS_AMIGA; 109 dev->id.bustype = BUS_AMIGA;
110 amimouse_dev->id.vendor = 0x0001; 110 dev->id.vendor = 0x0001;
111 amimouse_dev->id.product = 0x0002; 111 dev->id.product = 0x0002;
112 amimouse_dev->id.version = 0x0100; 112 dev->id.version = 0x0100;
113 113
114 amimouse_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); 114 dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
115 amimouse_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); 115 dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
116 amimouse_dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) | 116 dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) |
117 BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT); 117 BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT);
118 amimouse_dev->open = amimouse_open; 118 dev->open = amimouse_open;
119 amimouse_dev->close = amimouse_close; 119 dev->close = amimouse_close;
120 dev->dev.parent = &pdev->dev;
120 121
121 err = input_register_device(amimouse_dev); 122 err = input_register_device(dev);
122 if (err) { 123 if (err) {
123 input_free_device(amimouse_dev); 124 input_free_device(dev);
124 return err; 125 return err;
125 } 126 }
126 127
128 platform_set_drvdata(pdev, dev);
129
127 return 0; 130 return 0;
128} 131}
129 132
130static void __exit amimouse_exit(void) 133static int __exit amimouse_remove(struct platform_device *pdev)
131{ 134{
132 input_unregister_device(amimouse_dev); 135 struct input_dev *dev = platform_get_drvdata(pdev);
136
137 platform_set_drvdata(pdev, NULL);
138 input_unregister_device(dev);
139 return 0;
140}
141
142static struct platform_driver amimouse_driver = {
143 .remove = __exit_p(amimouse_remove),
144 .driver = {
145 .name = "amiga-mouse",
146 .owner = THIS_MODULE,
147 },
148};
149
150static int __init amimouse_init(void)
151{
152 return platform_driver_probe(&amimouse_driver, amimouse_probe);
133} 153}
134 154
135module_init(amimouse_init); 155module_init(amimouse_init);
156
157static void __exit amimouse_exit(void)
158{
159 platform_driver_unregister(&amimouse_driver);
160}
161
136module_exit(amimouse_exit); 162module_exit(amimouse_exit);
163
164MODULE_ALIAS("platform:amiga-mouse");
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index 5071af2c0604..04e32f2d1241 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -51,7 +51,7 @@ static inline void i8042_write_command(int val)
51 51
52static int __devinit sparc_i8042_probe(struct of_device *op, const struct of_device_id *match) 52static int __devinit sparc_i8042_probe(struct of_device *op, const struct of_device_id *match)
53{ 53{
54 struct device_node *dp = op->node; 54 struct device_node *dp = op->dev.of_node;
55 55
56 dp = dp->child; 56 dp = dp->child;
57 while (dp) { 57 while (dp) {
@@ -96,8 +96,11 @@ static const struct of_device_id sparc_i8042_match[] = {
96MODULE_DEVICE_TABLE(of, sparc_i8042_match); 96MODULE_DEVICE_TABLE(of, sparc_i8042_match);
97 97
98static struct of_platform_driver sparc_i8042_driver = { 98static struct of_platform_driver sparc_i8042_driver = {
99 .name = "i8042", 99 .driver = {
100 .match_table = sparc_i8042_match, 100 .name = "i8042",
101 .owner = THIS_MODULE,
102 .of_match_table = sparc_i8042_match,
103 },
101 .probe = sparc_i8042_probe, 104 .probe = sparc_i8042_probe,
102 .remove = __devexit_p(sparc_i8042_remove), 105 .remove = __devexit_p(sparc_i8042_remove),
103}; 106};
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index f84f8e32e3f1..e2c028d2638f 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -244,17 +244,17 @@ static int __devinit xps2_of_probe(struct of_device *ofdev,
244 int error; 244 int error;
245 245
246 dev_info(dev, "Device Tree Probing \'%s\'\n", 246 dev_info(dev, "Device Tree Probing \'%s\'\n",
247 ofdev->node->name); 247 ofdev->dev.of_node->name);
248 248
249 /* Get iospace for the device */ 249 /* Get iospace for the device */
250 error = of_address_to_resource(ofdev->node, 0, &r_mem); 250 error = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem);
251 if (error) { 251 if (error) {
252 dev_err(dev, "invalid address\n"); 252 dev_err(dev, "invalid address\n");
253 return error; 253 return error;
254 } 254 }
255 255
256 /* Get IRQ for the device */ 256 /* Get IRQ for the device */
257 if (of_irq_to_resource(ofdev->node, 0, &r_irq) == NO_IRQ) { 257 if (of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq) == NO_IRQ) {
258 dev_err(dev, "no IRQ found\n"); 258 dev_err(dev, "no IRQ found\n");
259 return -ENODEV; 259 return -ENODEV;
260 } 260 }
@@ -342,7 +342,7 @@ static int __devexit xps2_of_remove(struct of_device *of_dev)
342 iounmap(drvdata->base_address); 342 iounmap(drvdata->base_address);
343 343
344 /* Get iospace of the device */ 344 /* Get iospace of the device */
345 if (of_address_to_resource(of_dev->node, 0, &r_mem)) 345 if (of_address_to_resource(of_dev->dev.of_node, 0, &r_mem))
346 dev_err(dev, "invalid address\n"); 346 dev_err(dev, "invalid address\n");
347 else 347 else
348 release_mem_region(r_mem.start, resource_size(&r_mem)); 348 release_mem_region(r_mem.start, resource_size(&r_mem));
@@ -362,8 +362,11 @@ static const struct of_device_id xps2_of_match[] __devinitconst = {
362MODULE_DEVICE_TABLE(of, xps2_of_match); 362MODULE_DEVICE_TABLE(of, xps2_of_match);
363 363
364static struct of_platform_driver xps2_of_driver = { 364static struct of_platform_driver xps2_of_driver = {
365 .name = DRIVER_NAME, 365 .driver = {
366 .match_table = xps2_of_match, 366 .name = DRIVER_NAME,
367 .owner = THIS_MODULE,
368 .of_match_table = xps2_of_match,
369 },
367 .probe = xps2_of_probe, 370 .probe = xps2_of_probe,
368 .remove = __devexit_p(xps2_of_remove), 371 .remove = __devexit_p(xps2_of_remove),
369}; 372};
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 532279cda0e4..634f6f6b9b13 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1163,8 +1163,8 @@ static int __devinit ads7846_probe(struct spi_device *spi)
1163 1163
1164 ts->reg = regulator_get(&spi->dev, "vcc"); 1164 ts->reg = regulator_get(&spi->dev, "vcc");
1165 if (IS_ERR(ts->reg)) { 1165 if (IS_ERR(ts->reg)) {
1166 dev_err(&spi->dev, "unable to get regulator: %ld\n", 1166 err = PTR_ERR(ts->reg);
1167 PTR_ERR(ts->reg)); 1167 dev_err(&spi->dev, "unable to get regulator: %ld\n", err);
1168 goto err_free_gpio; 1168 goto err_free_gpio;
1169 } 1169 }
1170 1170
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index e0b7c834111d..ac5d0f9b0cb1 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -413,6 +413,8 @@ static struct dev_pm_ops s3c_ts_pmops = {
413#endif 413#endif
414 414
415static struct platform_device_id s3cts_driver_ids[] = { 415static struct platform_device_id s3cts_driver_ids[] = {
416 { "s3c2410-ts", 0 },
417 { "s3c2440-ts", 0 },
416 { "s3c64xx-ts", FEAT_PEN_IRQ }, 418 { "s3c64xx-ts", FEAT_PEN_IRQ },
417 { } 419 { }
418}; 420};
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 29a8bbf3f086..567d57215c28 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -857,6 +857,11 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt)
857 if ((pkt[0] & 0xe0) != 0xe0) 857 if ((pkt[0] & 0xe0) != 0xe0)
858 return 0; 858 return 0;
859 859
860 if (be16_to_cpu(packet->data_len) > 0xff)
861 packet->data_len = cpu_to_be16(be16_to_cpu(packet->data_len) - 0x100);
862 if (be16_to_cpu(packet->x_len) > 0xff)
863 packet->x_len = cpu_to_be16(be16_to_cpu(packet->x_len) - 0x80);
864
860 /* send ACK */ 865 /* send ACK */
861 ret = usb_submit_urb(priv->ack, GFP_ATOMIC); 866 ret = usb_submit_urb(priv->ack, GFP_ATOMIC);
862 867
@@ -1112,7 +1117,7 @@ static struct usbtouch_device_info usbtouch_dev_info[] = {
1112 1117
1113#ifdef CONFIG_TOUCHSCREEN_USB_NEXIO 1118#ifdef CONFIG_TOUCHSCREEN_USB_NEXIO
1114 [DEVTYPE_NEXIO] = { 1119 [DEVTYPE_NEXIO] = {
1115 .rept_size = 128, 1120 .rept_size = 1024,
1116 .irq_always = true, 1121 .irq_always = true,
1117 .read_data = nexio_read_data, 1122 .read_data = nexio_read_data,
1118 .init = nexio_init, 1123 .init = nexio_init,
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index ee5837522f5a..0cabe31f26df 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -787,8 +787,7 @@ capi_poll(struct file *file, poll_table * wait)
787} 787}
788 788
789static int 789static int
790capi_ioctl(struct inode *inode, struct file *file, 790capi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
791 unsigned int cmd, unsigned long arg)
792{ 791{
793 struct capidev *cdev = file->private_data; 792 struct capidev *cdev = file->private_data;
794 capi_ioctl_struct data; 793 capi_ioctl_struct data;
@@ -981,6 +980,18 @@ register_out:
981 } 980 }
982} 981}
983 982
983static long
984capi_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
985{
986 int ret;
987
988 lock_kernel();
989 ret = capi_ioctl(file, cmd, arg);
990 unlock_kernel();
991
992 return ret;
993}
994
984static int capi_open(struct inode *inode, struct file *file) 995static int capi_open(struct inode *inode, struct file *file)
985{ 996{
986 struct capidev *cdev; 997 struct capidev *cdev;
@@ -1026,7 +1037,7 @@ static const struct file_operations capi_fops =
1026 .read = capi_read, 1037 .read = capi_read,
1027 .write = capi_write, 1038 .write = capi_write,
1028 .poll = capi_poll, 1039 .poll = capi_poll,
1029 .ioctl = capi_ioctl, 1040 .unlocked_ioctl = capi_unlocked_ioctl,
1030 .open = capi_open, 1041 .open = capi_open,
1031 .release = capi_release, 1042 .release = capi_release,
1032}; 1043};
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index bd00dceacaf0..bde3c88b8b27 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -1147,6 +1147,12 @@ load_unlock_out:
1147 if (ctr->state == CAPI_CTR_DETECTED) 1147 if (ctr->state == CAPI_CTR_DETECTED)
1148 goto reset_unlock_out; 1148 goto reset_unlock_out;
1149 1149
1150 if (ctr->reset_ctr == NULL) {
1151 printk(KERN_DEBUG "kcapi: reset: no reset function\n");
1152 retval = -ESRCH;
1153 goto reset_unlock_out;
1154 }
1155
1150 ctr->reset_ctr(ctr); 1156 ctr->reset_ctr(ctr);
1151 1157
1152 retval = wait_on_ctr_state(ctr, CAPI_CTR_DETECTED); 1158 retval = wait_on_ctr_state(ctr, CAPI_CTR_DETECTED);
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 964a55fb1486..8f78f15c8ef7 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -170,17 +170,6 @@ static inline void ignore_cstruct_param(struct cardstate *cs, _cstruct param,
170} 170}
171 171
172/* 172/*
173 * convert hex to binary
174 */
175static inline u8 hex2bin(char c)
176{
177 int result = c & 0x0f;
178 if (c & 0x40)
179 result += 9;
180 return result;
181}
182
183/*
184 * convert an IE from Gigaset hex string to ETSI binary representation 173 * convert an IE from Gigaset hex string to ETSI binary representation
185 * including length byte 174 * including length byte
186 * return value: result length, -1 on error 175 * return value: result length, -1 on error
@@ -191,7 +180,7 @@ static int encode_ie(char *in, u8 *out, int maxlen)
191 while (*in) { 180 while (*in) {
192 if (!isxdigit(in[0]) || !isxdigit(in[1]) || l >= maxlen) 181 if (!isxdigit(in[0]) || !isxdigit(in[1]) || l >= maxlen)
193 return -1; 182 return -1;
194 out[++l] = (hex2bin(in[0]) << 4) + hex2bin(in[1]); 183 out[++l] = (hex_to_bin(in[0]) << 4) + hex_to_bin(in[1]);
195 in += 2; 184 in += 2;
196 } 185 }
197 out[0] = l; 186 out[0] = l;
@@ -933,30 +922,6 @@ void gigaset_isdn_stop(struct cardstate *cs)
933 */ 922 */
934 923
935/* 924/*
936 * load firmware
937 */
938static int gigaset_load_firmware(struct capi_ctr *ctr, capiloaddata *data)
939{
940 struct cardstate *cs = ctr->driverdata;
941
942 /* AVM specific operation, not needed for Gigaset -- ignore */
943 dev_notice(cs->dev, "load_firmware ignored\n");
944
945 return 0;
946}
947
948/*
949 * reset (deactivate) controller
950 */
951static void gigaset_reset_ctr(struct capi_ctr *ctr)
952{
953 struct cardstate *cs = ctr->driverdata;
954
955 /* AVM specific operation, not needed for Gigaset -- ignore */
956 dev_notice(cs->dev, "reset_ctr ignored\n");
957}
958
959/*
960 * register CAPI application 925 * register CAPI application
961 */ 926 */
962static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl, 927static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl,
@@ -2213,8 +2178,8 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
2213 iif->ctr.driverdata = cs; 2178 iif->ctr.driverdata = cs;
2214 strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name)); 2179 strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name));
2215 iif->ctr.driver_name = "gigaset"; 2180 iif->ctr.driver_name = "gigaset";
2216 iif->ctr.load_firmware = gigaset_load_firmware; 2181 iif->ctr.load_firmware = NULL;
2217 iif->ctr.reset_ctr = gigaset_reset_ctr; 2182 iif->ctr.reset_ctr = NULL;
2218 iif->ctr.register_appl = gigaset_register_appl; 2183 iif->ctr.register_appl = gigaset_register_appl;
2219 iif->ctr.release_appl = gigaset_release_appl; 2184 iif->ctr.release_appl = gigaset_release_appl;
2220 iif->ctr.send_message = gigaset_send_message; 2185 iif->ctr.send_message = gigaset_send_message;
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 70044ee4b228..a44cdb492ea9 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1272,9 +1272,9 @@ isdn_poll(struct file *file, poll_table * wait)
1272 1272
1273 1273
1274static int 1274static int
1275isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) 1275isdn_ioctl(struct file *file, uint cmd, ulong arg)
1276{ 1276{
1277 uint minor = iminor(inode); 1277 uint minor = iminor(file->f_path.dentry->d_inode);
1278 isdn_ctrl c; 1278 isdn_ctrl c;
1279 int drvidx; 1279 int drvidx;
1280 int chidx; 1280 int chidx;
@@ -1722,6 +1722,18 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1722#undef cfg 1722#undef cfg
1723} 1723}
1724 1724
1725static long
1726isdn_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1727{
1728 int ret;
1729
1730 lock_kernel();
1731 ret = isdn_ioctl(file, cmd, arg);
1732 unlock_kernel();
1733
1734 return ret;
1735}
1736
1725/* 1737/*
1726 * Open the device code. 1738 * Open the device code.
1727 */ 1739 */
@@ -1838,7 +1850,7 @@ static const struct file_operations isdn_fops =
1838 .read = isdn_read, 1850 .read = isdn_read,
1839 .write = isdn_write, 1851 .write = isdn_write,
1840 .poll = isdn_poll, 1852 .poll = isdn_poll,
1841 .ioctl = isdn_ioctl, 1853 .unlocked_ioctl = isdn_unlocked_ioctl,
1842 .open = isdn_open, 1854 .open = isdn_open,
1843 .release = isdn_close, 1855 .release = isdn_close,
1844}; 1856};
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index 8785004e85e0..81048b8ed8ad 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -24,6 +24,7 @@
24#include <linux/miscdevice.h> 24#include <linux/miscdevice.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/mISDNif.h> 26#include <linux/mISDNif.h>
27#include <linux/smp_lock.h>
27#include "core.h" 28#include "core.h"
28 29
29static u_int *debug; 30static u_int *debug;
@@ -97,8 +98,6 @@ mISDN_read(struct file *filep, char __user *buf, size_t count, loff_t *off)
97 if (*debug & DEBUG_TIMER) 98 if (*debug & DEBUG_TIMER)
98 printk(KERN_DEBUG "%s(%p, %p, %d, %p)\n", __func__, 99 printk(KERN_DEBUG "%s(%p, %p, %d, %p)\n", __func__,
99 filep, buf, (int)count, off); 100 filep, buf, (int)count, off);
100 if (*off != filep->f_pos)
101 return -ESPIPE;
102 101
103 if (list_empty(&dev->expired) && (dev->work == 0)) { 102 if (list_empty(&dev->expired) && (dev->work == 0)) {
104 if (filep->f_flags & O_NONBLOCK) 103 if (filep->f_flags & O_NONBLOCK)
@@ -215,9 +214,8 @@ unlock:
215 return ret; 214 return ret;
216} 215}
217 216
218static int 217static long
219mISDN_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, 218mISDN_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
220 unsigned long arg)
221{ 219{
222 struct mISDNtimerdev *dev = filep->private_data; 220 struct mISDNtimerdev *dev = filep->private_data;
223 int id, tout, ret = 0; 221 int id, tout, ret = 0;
@@ -226,6 +224,7 @@ mISDN_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
226 if (*debug & DEBUG_TIMER) 224 if (*debug & DEBUG_TIMER)
227 printk(KERN_DEBUG "%s(%p, %x, %lx)\n", __func__, 225 printk(KERN_DEBUG "%s(%p, %x, %lx)\n", __func__,
228 filep, cmd, arg); 226 filep, cmd, arg);
227 lock_kernel();
229 switch (cmd) { 228 switch (cmd) {
230 case IMADDTIMER: 229 case IMADDTIMER:
231 if (get_user(tout, (int __user *)arg)) { 230 if (get_user(tout, (int __user *)arg)) {
@@ -257,13 +256,14 @@ mISDN_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
257 default: 256 default:
258 ret = -EINVAL; 257 ret = -EINVAL;
259 } 258 }
259 unlock_kernel();
260 return ret; 260 return ret;
261} 261}
262 262
263static const struct file_operations mISDN_fops = { 263static const struct file_operations mISDN_fops = {
264 .read = mISDN_read, 264 .read = mISDN_read,
265 .poll = mISDN_poll, 265 .poll = mISDN_poll,
266 .ioctl = mISDN_ioctl, 266 .unlocked_ioctl = mISDN_ioctl,
267 .open = mISDN_open, 267 .open = mISDN_open,
268 .release = mISDN_close, 268 .release = mISDN_close,
269}; 269};
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index c6e4b772b757..6d94b0b9979c 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -211,7 +211,7 @@ struct gpio_led_of_platform_data {
211static int __devinit of_gpio_leds_probe(struct of_device *ofdev, 211static int __devinit of_gpio_leds_probe(struct of_device *ofdev,
212 const struct of_device_id *match) 212 const struct of_device_id *match)
213{ 213{
214 struct device_node *np = ofdev->node, *child; 214 struct device_node *np = ofdev->dev.of_node, *child;
215 struct gpio_led_of_platform_data *pdata; 215 struct gpio_led_of_platform_data *pdata;
216 int count = 0, ret; 216 int count = 0, ret;
217 217
@@ -291,8 +291,8 @@ static struct of_platform_driver of_gpio_leds_driver = {
291 .driver = { 291 .driver = {
292 .name = "of_gpio_leds", 292 .name = "of_gpio_leds",
293 .owner = THIS_MODULE, 293 .owner = THIS_MODULE,
294 .of_match_table = of_gpio_leds_match,
294 }, 295 },
295 .match_table = of_gpio_leds_match,
296 .probe = of_gpio_leds_probe, 296 .probe = of_gpio_leds_probe,
297 .remove = __devexit_p(of_gpio_leds_remove), 297 .remove = __devexit_p(of_gpio_leds_remove),
298}; 298};
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 26a303a1d1ab..97147804a49c 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -39,14 +39,12 @@ static struct macio_chip *macio_on_hold;
39 39
40static int macio_bus_match(struct device *dev, struct device_driver *drv) 40static int macio_bus_match(struct device *dev, struct device_driver *drv)
41{ 41{
42 struct macio_dev * macio_dev = to_macio_device(dev); 42 const struct of_device_id * matches = drv->of_match_table;
43 struct macio_driver * macio_drv = to_macio_driver(drv);
44 const struct of_device_id * matches = macio_drv->match_table;
45 43
46 if (!matches) 44 if (!matches)
47 return 0; 45 return 0;
48 46
49 return of_match_device(matches, &macio_dev->ofdev) != NULL; 47 return of_match_device(matches, dev) != NULL;
50} 48}
51 49
52struct macio_dev *macio_dev_get(struct macio_dev *dev) 50struct macio_dev *macio_dev_get(struct macio_dev *dev)
@@ -84,7 +82,7 @@ static int macio_device_probe(struct device *dev)
84 82
85 macio_dev_get(macio_dev); 83 macio_dev_get(macio_dev);
86 84
87 match = of_match_device(drv->match_table, &macio_dev->ofdev); 85 match = of_match_device(drv->driver.of_match_table, dev);
88 if (match) 86 if (match)
89 error = drv->probe(macio_dev, match); 87 error = drv->probe(macio_dev, match);
90 if (error) 88 if (error)
@@ -248,7 +246,7 @@ static void macio_create_fixup_irq(struct macio_dev *dev, int index,
248 246
249static void macio_add_missing_resources(struct macio_dev *dev) 247static void macio_add_missing_resources(struct macio_dev *dev)
250{ 248{
251 struct device_node *np = dev->ofdev.node; 249 struct device_node *np = dev->ofdev.dev.of_node;
252 unsigned int irq_base; 250 unsigned int irq_base;
253 251
254 /* Gatwick has some missing interrupts on child nodes */ 252 /* Gatwick has some missing interrupts on child nodes */
@@ -289,7 +287,7 @@ static void macio_add_missing_resources(struct macio_dev *dev)
289 287
290static void macio_setup_interrupts(struct macio_dev *dev) 288static void macio_setup_interrupts(struct macio_dev *dev)
291{ 289{
292 struct device_node *np = dev->ofdev.node; 290 struct device_node *np = dev->ofdev.dev.of_node;
293 unsigned int irq; 291 unsigned int irq;
294 int i = 0, j = 0; 292 int i = 0, j = 0;
295 293
@@ -317,7 +315,7 @@ static void macio_setup_interrupts(struct macio_dev *dev)
317static void macio_setup_resources(struct macio_dev *dev, 315static void macio_setup_resources(struct macio_dev *dev,
318 struct resource *parent_res) 316 struct resource *parent_res)
319{ 317{
320 struct device_node *np = dev->ofdev.node; 318 struct device_node *np = dev->ofdev.dev.of_node;
321 struct resource r; 319 struct resource r;
322 int index; 320 int index;
323 321
@@ -373,9 +371,9 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
373 371
374 dev->bus = &chip->lbus; 372 dev->bus = &chip->lbus;
375 dev->media_bay = in_bay; 373 dev->media_bay = in_bay;
376 dev->ofdev.node = np; 374 dev->ofdev.dev.of_node = np;
377 dev->ofdev.dma_mask = 0xffffffffUL; 375 dev->ofdev.archdata.dma_mask = 0xffffffffUL;
378 dev->ofdev.dev.dma_mask = &dev->ofdev.dma_mask; 376 dev->ofdev.dev.dma_mask = &dev->ofdev.archdata.dma_mask;
379 dev->ofdev.dev.parent = parent; 377 dev->ofdev.dev.parent = parent;
380 dev->ofdev.dev.bus = &macio_bus_type; 378 dev->ofdev.dev.bus = &macio_bus_type;
381 dev->ofdev.dev.release = macio_release_dev; 379 dev->ofdev.dev.release = macio_release_dev;
@@ -494,9 +492,9 @@ static void macio_pci_add_devices(struct macio_chip *chip)
494 } 492 }
495 493
496 /* Add media bay devices if any */ 494 /* Add media bay devices if any */
495 pnode = mbdev->ofdev.dev.of_node;
497 if (mbdev) 496 if (mbdev)
498 for (np = NULL; (np = of_get_next_child(mbdev->ofdev.node, np)) 497 for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) {
499 != NULL;) {
500 if (macio_skip_device(np)) 498 if (macio_skip_device(np))
501 continue; 499 continue;
502 of_node_get(np); 500 of_node_get(np);
@@ -506,9 +504,9 @@ static void macio_pci_add_devices(struct macio_chip *chip)
506 } 504 }
507 505
508 /* Add serial ports if any */ 506 /* Add serial ports if any */
507 pnode = sdev->ofdev.dev.of_node;
509 if (sdev) { 508 if (sdev) {
510 for (np = NULL; (np = of_get_next_child(sdev->ofdev.node, np)) 509 for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) {
511 != NULL;) {
512 if (macio_skip_device(np)) 510 if (macio_skip_device(np))
513 continue; 511 continue;
514 of_node_get(np); 512 of_node_get(np);
diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c
index 9e9453b58425..6999ce59fd10 100644
--- a/drivers/macintosh/macio_sysfs.c
+++ b/drivers/macintosh/macio_sysfs.c
@@ -9,7 +9,7 @@ field##_show (struct device *dev, struct device_attribute *attr, \
9 char *buf) \ 9 char *buf) \
10{ \ 10{ \
11 struct macio_dev *mdev = to_macio_device (dev); \ 11 struct macio_dev *mdev = to_macio_device (dev); \
12 return sprintf (buf, format_string, mdev->ofdev.node->field); \ 12 return sprintf (buf, format_string, mdev->ofdev.dev.of_node->field); \
13} 13}
14 14
15static ssize_t 15static ssize_t
@@ -21,7 +21,7 @@ compatible_show (struct device *dev, struct device_attribute *attr, char *buf)
21 int length = 0; 21 int length = 0;
22 22
23 of = &to_macio_device (dev)->ofdev; 23 of = &to_macio_device (dev)->ofdev;
24 compat = of_get_property(of->node, "compatible", &cplen); 24 compat = of_get_property(of->dev.of_node, "compatible", &cplen);
25 if (!compat) { 25 if (!compat) {
26 *buf = '\0'; 26 *buf = '\0';
27 return 0; 27 return 0;
@@ -58,7 +58,7 @@ static ssize_t devspec_show(struct device *dev,
58 struct of_device *ofdev; 58 struct of_device *ofdev;
59 59
60 ofdev = to_of_device(dev); 60 ofdev = to_of_device(dev);
61 return sprintf(buf, "%s\n", ofdev->node->full_name); 61 return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name);
62} 62}
63 63
64macio_config_of_attr (name, "%s\n"); 64macio_config_of_attr (name, "%s\n");
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index 08002b88f342..288acce76b74 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -564,7 +564,7 @@ static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_de
564 unsigned long base; 564 unsigned long base;
565 int i; 565 int i;
566 566
567 ofnode = mdev->ofdev.node; 567 ofnode = mdev->ofdev.dev.of_node;
568 568
569 if (macio_resource_count(mdev) < 1) 569 if (macio_resource_count(mdev) < 1)
570 return -ENODEV; 570 return -ENODEV;
diff --git a/drivers/macintosh/nvram.c b/drivers/macintosh/nvram.c
index c876349c32de..a271c8218d82 100644
--- a/drivers/macintosh/nvram.c
+++ b/drivers/macintosh/nvram.c
@@ -100,7 +100,7 @@ const struct file_operations nvram_fops = {
100 .llseek = nvram_llseek, 100 .llseek = nvram_llseek,
101 .read = read_nvram, 101 .read = read_nvram,
102 .write = write_nvram, 102 .write = write_nvram,
103 .ioctl = nvram_ioctl, 103 .unlocked_ioctl = nvram_ioctl,
104}; 104};
105 105
106static struct miscdevice nvram_dev = { 106static struct miscdevice nvram_dev = {
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 7c54d80c4fb2..12946c5f583f 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -375,7 +375,7 @@ static int __devinit rackmeter_probe(struct macio_dev* mdev,
375 pr_debug("rackmeter_probe()\n"); 375 pr_debug("rackmeter_probe()\n");
376 376
377 /* Get i2s-a node */ 377 /* Get i2s-a node */
378 while ((i2s = of_get_next_child(mdev->ofdev.node, i2s)) != NULL) 378 while ((i2s = of_get_next_child(mdev->ofdev.dev.of_node, i2s)) != NULL)
379 if (strcmp(i2s->name, "i2s-a") == 0) 379 if (strcmp(i2s->name, "i2s-a") == 0)
380 break; 380 break;
381 if (i2s == NULL) { 381 if (i2s == NULL) {
@@ -431,7 +431,7 @@ static int __devinit rackmeter_probe(struct macio_dev* mdev,
431 of_address_to_resource(i2s, 1, &rdma)) { 431 of_address_to_resource(i2s, 1, &rdma)) {
432 printk(KERN_ERR 432 printk(KERN_ERR
433 "rackmeter: found match but lacks resources: %s", 433 "rackmeter: found match but lacks resources: %s",
434 mdev->ofdev.node->full_name); 434 mdev->ofdev.dev.of_node->full_name);
435 rc = -ENXIO; 435 rc = -ENXIO;
436 goto bail_free; 436 goto bail_free;
437 } 437 }
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index c9da5c4c167d..2506c957712e 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -671,8 +671,11 @@ static const struct of_device_id smu_platform_match[] =
671 671
672static struct of_platform_driver smu_of_platform_driver = 672static struct of_platform_driver smu_of_platform_driver =
673{ 673{
674 .name = "smu", 674 .driver = {
675 .match_table = smu_platform_match, 675 .name = "smu",
676 .owner = THIS_MODULE,
677 .of_match_table = smu_platform_match,
678 },
676 .probe = smu_platform_probe, 679 .probe = smu_platform_probe,
677}; 680};
678 681
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index b18fa948f3d1..e60605bd0ea9 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -2215,7 +2215,7 @@ static int fcu_of_probe(struct of_device* dev, const struct of_device_id *match)
2215 state = state_detached; 2215 state = state_detached;
2216 2216
2217 /* Lookup the fans in the device tree */ 2217 /* Lookup the fans in the device tree */
2218 fcu_lookup_fans(dev->node); 2218 fcu_lookup_fans(dev->dev.of_node);
2219 2219
2220 /* Add the driver */ 2220 /* Add the driver */
2221 return i2c_add_driver(&therm_pm72_driver); 2221 return i2c_add_driver(&therm_pm72_driver);
@@ -2238,8 +2238,11 @@ static const struct of_device_id fcu_match[] =
2238 2238
2239static struct of_platform_driver fcu_of_platform_driver = 2239static struct of_platform_driver fcu_of_platform_driver =
2240{ 2240{
2241 .name = "temperature", 2241 .driver = {
2242 .match_table = fcu_match, 2242 .name = "temperature",
2243 .owner = THIS_MODULE,
2244 .of_match_table = fcu_match,
2245 },
2243 .probe = fcu_of_probe, 2246 .probe = fcu_of_probe,
2244 .remove = fcu_of_remove 2247 .remove = fcu_of_remove
2245}; 2248};
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 0839770e4ec5..5c9367acf0cf 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -463,8 +463,11 @@ static const struct of_device_id therm_of_match[] = {{
463}; 463};
464 464
465static struct of_platform_driver therm_of_driver = { 465static struct of_platform_driver therm_of_driver = {
466 .name = "temperature", 466 .driver = {
467 .match_table = therm_of_match, 467 .name = "temperature",
468 .owner = THIS_MODULE,
469 .of_match_table = therm_of_match,
470 },
468 .probe = therm_of_probe, 471 .probe = therm_of_probe,
469 .remove = therm_of_remove, 472 .remove = therm_of_remove,
470}; 473};
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 42764849eb78..3d4fc0f7b00b 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -2273,8 +2273,7 @@ static int register_pmu_pm_ops(void)
2273device_initcall(register_pmu_pm_ops); 2273device_initcall(register_pmu_pm_ops);
2274#endif 2274#endif
2275 2275
2276static int 2276static int pmu_ioctl(struct file *filp,
2277pmu_ioctl(struct inode * inode, struct file *filp,
2278 u_int cmd, u_long arg) 2277 u_int cmd, u_long arg)
2279{ 2278{
2280 __u32 __user *argp = (__u32 __user *)arg; 2279 __u32 __user *argp = (__u32 __user *)arg;
@@ -2337,11 +2336,23 @@ pmu_ioctl(struct inode * inode, struct file *filp,
2337 return error; 2336 return error;
2338} 2337}
2339 2338
2339static long pmu_unlocked_ioctl(struct file *filp,
2340 u_int cmd, u_long arg)
2341{
2342 int ret;
2343
2344 lock_kernel();
2345 ret = pmu_ioctl(filp, cmd, arg);
2346 unlock_kernel();
2347
2348 return ret;
2349}
2350
2340static const struct file_operations pmu_device_fops = { 2351static const struct file_operations pmu_device_fops = {
2341 .read = pmu_read, 2352 .read = pmu_read,
2342 .write = pmu_write, 2353 .write = pmu_write,
2343 .poll = pmu_fpoll, 2354 .poll = pmu_fpoll,
2344 .ioctl = pmu_ioctl, 2355 .unlocked_ioctl = pmu_unlocked_ioctl,
2345 .open = pmu_open, 2356 .open = pmu_open,
2346 .release = pmu_release, 2357 .release = pmu_release,
2347}; 2358};
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 9ea17d6c799b..d2c0f94fa37d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4645,7 +4645,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
4645 kfree(percpu->scribble); 4645 kfree(percpu->scribble);
4646 pr_err("%s: failed memory allocation for cpu%ld\n", 4646 pr_err("%s: failed memory allocation for cpu%ld\n",
4647 __func__, cpu); 4647 __func__, cpu);
4648 return NOTIFY_BAD; 4648 return notifier_from_errno(-ENOMEM);
4649 } 4649 }
4650 break; 4650 break;
4651 case CPU_DEAD: 4651 case CPU_DEAD:
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 9ddc57909d49..425862ffb285 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -25,6 +25,7 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/smp_lock.h>
28#include <linux/poll.h> 29#include <linux/poll.h>
29#include <linux/ioctl.h> 30#include <linux/ioctl.h>
30#include <linux/wait.h> 31#include <linux/wait.h>
@@ -963,7 +964,7 @@ dvb_demux_read(struct file *file, char __user *buf, size_t count,
963 return ret; 964 return ret;
964} 965}
965 966
966static int dvb_demux_do_ioctl(struct inode *inode, struct file *file, 967static int dvb_demux_do_ioctl(struct file *file,
967 unsigned int cmd, void *parg) 968 unsigned int cmd, void *parg)
968{ 969{
969 struct dmxdev_filter *dmxdevfilter = file->private_data; 970 struct dmxdev_filter *dmxdevfilter = file->private_data;
@@ -1084,10 +1085,16 @@ static int dvb_demux_do_ioctl(struct inode *inode, struct file *file,
1084 return ret; 1085 return ret;
1085} 1086}
1086 1087
1087static int dvb_demux_ioctl(struct inode *inode, struct file *file, 1088static long dvb_demux_ioctl(struct file *file, unsigned int cmd,
1088 unsigned int cmd, unsigned long arg) 1089 unsigned long arg)
1089{ 1090{
1090 return dvb_usercopy(inode, file, cmd, arg, dvb_demux_do_ioctl); 1091 int ret;
1092
1093 lock_kernel();
1094 ret = dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl);
1095 unlock_kernel();
1096
1097 return ret;
1091} 1098}
1092 1099
1093static unsigned int dvb_demux_poll(struct file *file, poll_table *wait) 1100static unsigned int dvb_demux_poll(struct file *file, poll_table *wait)
@@ -1139,7 +1146,7 @@ static int dvb_demux_release(struct inode *inode, struct file *file)
1139static const struct file_operations dvb_demux_fops = { 1146static const struct file_operations dvb_demux_fops = {
1140 .owner = THIS_MODULE, 1147 .owner = THIS_MODULE,
1141 .read = dvb_demux_read, 1148 .read = dvb_demux_read,
1142 .ioctl = dvb_demux_ioctl, 1149 .unlocked_ioctl = dvb_demux_ioctl,
1143 .open = dvb_demux_open, 1150 .open = dvb_demux_open,
1144 .release = dvb_demux_release, 1151 .release = dvb_demux_release,
1145 .poll = dvb_demux_poll, 1152 .poll = dvb_demux_poll,
@@ -1152,7 +1159,7 @@ static struct dvb_device dvbdev_demux = {
1152 .fops = &dvb_demux_fops 1159 .fops = &dvb_demux_fops
1153}; 1160};
1154 1161
1155static int dvb_dvr_do_ioctl(struct inode *inode, struct file *file, 1162static int dvb_dvr_do_ioctl(struct file *file,
1156 unsigned int cmd, void *parg) 1163 unsigned int cmd, void *parg)
1157{ 1164{
1158 struct dvb_device *dvbdev = file->private_data; 1165 struct dvb_device *dvbdev = file->private_data;
@@ -1176,10 +1183,16 @@ static int dvb_dvr_do_ioctl(struct inode *inode, struct file *file,
1176 return ret; 1183 return ret;
1177} 1184}
1178 1185
1179static int dvb_dvr_ioctl(struct inode *inode, struct file *file, 1186static long dvb_dvr_ioctl(struct file *file,
1180 unsigned int cmd, unsigned long arg) 1187 unsigned int cmd, unsigned long arg)
1181{ 1188{
1182 return dvb_usercopy(inode, file, cmd, arg, dvb_dvr_do_ioctl); 1189 int ret;
1190
1191 lock_kernel();
1192 ret = dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl);
1193 unlock_kernel();
1194
1195 return ret;
1183} 1196}
1184 1197
1185static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait) 1198static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
@@ -1208,7 +1221,7 @@ static const struct file_operations dvb_dvr_fops = {
1208 .owner = THIS_MODULE, 1221 .owner = THIS_MODULE,
1209 .read = dvb_dvr_read, 1222 .read = dvb_dvr_read,
1210 .write = dvb_dvr_write, 1223 .write = dvb_dvr_write,
1211 .ioctl = dvb_dvr_ioctl, 1224 .unlocked_ioctl = dvb_dvr_ioctl,
1212 .open = dvb_dvr_open, 1225 .open = dvb_dvr_open,
1213 .release = dvb_dvr_release, 1226 .release = dvb_dvr_release,
1214 .poll = dvb_dvr_poll, 1227 .poll = dvb_dvr_poll,
diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
index cb22da53bfb0..ef259a0718ac 100644
--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
@@ -36,6 +36,7 @@
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/sched.h> 38#include <linux/sched.h>
39#include <linux/smp_lock.h>
39#include <linux/kthread.h> 40#include <linux/kthread.h>
40 41
41#include "dvb_ca_en50221.h" 42#include "dvb_ca_en50221.h"
@@ -1181,7 +1182,7 @@ static int dvb_ca_en50221_thread(void *data)
1181 * 1182 *
1182 * @return 0 on success, <0 on error. 1183 * @return 0 on success, <0 on error.
1183 */ 1184 */
1184static int dvb_ca_en50221_io_do_ioctl(struct inode *inode, struct file *file, 1185static int dvb_ca_en50221_io_do_ioctl(struct file *file,
1185 unsigned int cmd, void *parg) 1186 unsigned int cmd, void *parg)
1186{ 1187{
1187 struct dvb_device *dvbdev = file->private_data; 1188 struct dvb_device *dvbdev = file->private_data;
@@ -1255,10 +1256,16 @@ static int dvb_ca_en50221_io_do_ioctl(struct inode *inode, struct file *file,
1255 * 1256 *
1256 * @return 0 on success, <0 on error. 1257 * @return 0 on success, <0 on error.
1257 */ 1258 */
1258static int dvb_ca_en50221_io_ioctl(struct inode *inode, struct file *file, 1259static long dvb_ca_en50221_io_ioctl(struct file *file,
1259 unsigned int cmd, unsigned long arg) 1260 unsigned int cmd, unsigned long arg)
1260{ 1261{
1261 return dvb_usercopy(inode, file, cmd, arg, dvb_ca_en50221_io_do_ioctl); 1262 int ret;
1263
1264 lock_kernel();
1265 ret = dvb_usercopy(file, cmd, arg, dvb_ca_en50221_io_do_ioctl);
1266 unlock_kernel();
1267
1268 return ret;
1262} 1269}
1263 1270
1264 1271
@@ -1611,7 +1618,7 @@ static const struct file_operations dvb_ca_fops = {
1611 .owner = THIS_MODULE, 1618 .owner = THIS_MODULE,
1612 .read = dvb_ca_en50221_io_read, 1619 .read = dvb_ca_en50221_io_read,
1613 .write = dvb_ca_en50221_io_write, 1620 .write = dvb_ca_en50221_io_write,
1614 .ioctl = dvb_ca_en50221_io_ioctl, 1621 .unlocked_ioctl = dvb_ca_en50221_io_ioctl,
1615 .open = dvb_ca_en50221_io_open, 1622 .open = dvb_ca_en50221_io_open,
1616 .release = dvb_ca_en50221_io_release, 1623 .release = dvb_ca_en50221_io_release,
1617 .poll = dvb_ca_en50221_io_poll, 1624 .poll = dvb_ca_en50221_io_poll,
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 6932def4d266..44ae89ecef94 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -36,6 +36,7 @@
36#include <linux/list.h> 36#include <linux/list.h>
37#include <linux/freezer.h> 37#include <linux/freezer.h>
38#include <linux/jiffies.h> 38#include <linux/jiffies.h>
39#include <linux/smp_lock.h>
39#include <linux/kthread.h> 40#include <linux/kthread.h>
40#include <asm/processor.h> 41#include <asm/processor.h>
41 42
@@ -1195,14 +1196,14 @@ static void dtv_property_cache_submit(struct dvb_frontend *fe)
1195 } 1196 }
1196} 1197}
1197 1198
1198static int dvb_frontend_ioctl_legacy(struct inode *inode, struct file *file, 1199static int dvb_frontend_ioctl_legacy(struct file *file,
1199 unsigned int cmd, void *parg); 1200 unsigned int cmd, void *parg);
1200static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file, 1201static int dvb_frontend_ioctl_properties(struct file *file,
1201 unsigned int cmd, void *parg); 1202 unsigned int cmd, void *parg);
1202 1203
1203static int dtv_property_process_get(struct dvb_frontend *fe, 1204static int dtv_property_process_get(struct dvb_frontend *fe,
1204 struct dtv_property *tvp, 1205 struct dtv_property *tvp,
1205 struct inode *inode, struct file *file) 1206 struct file *file)
1206{ 1207{
1207 int r = 0; 1208 int r = 0;
1208 1209
@@ -1335,7 +1336,6 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
1335 1336
1336static int dtv_property_process_set(struct dvb_frontend *fe, 1337static int dtv_property_process_set(struct dvb_frontend *fe,
1337 struct dtv_property *tvp, 1338 struct dtv_property *tvp,
1338 struct inode *inode,
1339 struct file *file) 1339 struct file *file)
1340{ 1340{
1341 int r = 0; 1341 int r = 0;
@@ -1366,7 +1366,7 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
1366 dprintk("%s() Finalised property cache\n", __func__); 1366 dprintk("%s() Finalised property cache\n", __func__);
1367 dtv_property_cache_submit(fe); 1367 dtv_property_cache_submit(fe);
1368 1368
1369 r |= dvb_frontend_ioctl_legacy(inode, file, FE_SET_FRONTEND, 1369 r |= dvb_frontend_ioctl_legacy(file, FE_SET_FRONTEND,
1370 &fepriv->parameters); 1370 &fepriv->parameters);
1371 break; 1371 break;
1372 case DTV_FREQUENCY: 1372 case DTV_FREQUENCY:
@@ -1398,12 +1398,12 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
1398 break; 1398 break;
1399 case DTV_VOLTAGE: 1399 case DTV_VOLTAGE:
1400 fe->dtv_property_cache.voltage = tvp->u.data; 1400 fe->dtv_property_cache.voltage = tvp->u.data;
1401 r = dvb_frontend_ioctl_legacy(inode, file, FE_SET_VOLTAGE, 1401 r = dvb_frontend_ioctl_legacy(file, FE_SET_VOLTAGE,
1402 (void *)fe->dtv_property_cache.voltage); 1402 (void *)fe->dtv_property_cache.voltage);
1403 break; 1403 break;
1404 case DTV_TONE: 1404 case DTV_TONE:
1405 fe->dtv_property_cache.sectone = tvp->u.data; 1405 fe->dtv_property_cache.sectone = tvp->u.data;
1406 r = dvb_frontend_ioctl_legacy(inode, file, FE_SET_TONE, 1406 r = dvb_frontend_ioctl_legacy(file, FE_SET_TONE,
1407 (void *)fe->dtv_property_cache.sectone); 1407 (void *)fe->dtv_property_cache.sectone);
1408 break; 1408 break;
1409 case DTV_CODE_RATE_HP: 1409 case DTV_CODE_RATE_HP:
@@ -1487,7 +1487,7 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
1487 return r; 1487 return r;
1488} 1488}
1489 1489
1490static int dvb_frontend_ioctl(struct inode *inode, struct file *file, 1490static int dvb_frontend_ioctl(struct file *file,
1491 unsigned int cmd, void *parg) 1491 unsigned int cmd, void *parg)
1492{ 1492{
1493 struct dvb_device *dvbdev = file->private_data; 1493 struct dvb_device *dvbdev = file->private_data;
@@ -1509,17 +1509,17 @@ static int dvb_frontend_ioctl(struct inode *inode, struct file *file,
1509 return -ERESTARTSYS; 1509 return -ERESTARTSYS;
1510 1510
1511 if ((cmd == FE_SET_PROPERTY) || (cmd == FE_GET_PROPERTY)) 1511 if ((cmd == FE_SET_PROPERTY) || (cmd == FE_GET_PROPERTY))
1512 err = dvb_frontend_ioctl_properties(inode, file, cmd, parg); 1512 err = dvb_frontend_ioctl_properties(file, cmd, parg);
1513 else { 1513 else {
1514 fe->dtv_property_cache.state = DTV_UNDEFINED; 1514 fe->dtv_property_cache.state = DTV_UNDEFINED;
1515 err = dvb_frontend_ioctl_legacy(inode, file, cmd, parg); 1515 err = dvb_frontend_ioctl_legacy(file, cmd, parg);
1516 } 1516 }
1517 1517
1518 up(&fepriv->sem); 1518 up(&fepriv->sem);
1519 return err; 1519 return err;
1520} 1520}
1521 1521
1522static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file, 1522static int dvb_frontend_ioctl_properties(struct file *file,
1523 unsigned int cmd, void *parg) 1523 unsigned int cmd, void *parg)
1524{ 1524{
1525 struct dvb_device *dvbdev = file->private_data; 1525 struct dvb_device *dvbdev = file->private_data;
@@ -1555,7 +1555,7 @@ static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file,
1555 } 1555 }
1556 1556
1557 for (i = 0; i < tvps->num; i++) { 1557 for (i = 0; i < tvps->num; i++) {
1558 (tvp + i)->result = dtv_property_process_set(fe, tvp + i, inode, file); 1558 (tvp + i)->result = dtv_property_process_set(fe, tvp + i, file);
1559 err |= (tvp + i)->result; 1559 err |= (tvp + i)->result;
1560 } 1560 }
1561 1561
@@ -1587,7 +1587,7 @@ static int dvb_frontend_ioctl_properties(struct inode *inode, struct file *file,
1587 } 1587 }
1588 1588
1589 for (i = 0; i < tvps->num; i++) { 1589 for (i = 0; i < tvps->num; i++) {
1590 (tvp + i)->result = dtv_property_process_get(fe, tvp + i, inode, file); 1590 (tvp + i)->result = dtv_property_process_get(fe, tvp + i, file);
1591 err |= (tvp + i)->result; 1591 err |= (tvp + i)->result;
1592 } 1592 }
1593 1593
@@ -1604,7 +1604,7 @@ out:
1604 return err; 1604 return err;
1605} 1605}
1606 1606
1607static int dvb_frontend_ioctl_legacy(struct inode *inode, struct file *file, 1607static int dvb_frontend_ioctl_legacy(struct file *file,
1608 unsigned int cmd, void *parg) 1608 unsigned int cmd, void *parg)
1609{ 1609{
1610 struct dvb_device *dvbdev = file->private_data; 1610 struct dvb_device *dvbdev = file->private_data;
@@ -2031,7 +2031,7 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
2031 2031
2032static const struct file_operations dvb_frontend_fops = { 2032static const struct file_operations dvb_frontend_fops = {
2033 .owner = THIS_MODULE, 2033 .owner = THIS_MODULE,
2034 .ioctl = dvb_generic_ioctl, 2034 .unlocked_ioctl = dvb_generic_ioctl,
2035 .poll = dvb_frontend_poll, 2035 .poll = dvb_frontend_poll,
2036 .open = dvb_frontend_open, 2036 .open = dvb_frontend_open,
2037 .release = dvb_frontend_release 2037 .release = dvb_frontend_release
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index cccea412088b..f6dac2bb0ac6 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -59,6 +59,7 @@
59#include <linux/netdevice.h> 59#include <linux/netdevice.h>
60#include <linux/etherdevice.h> 60#include <linux/etherdevice.h>
61#include <linux/dvb/net.h> 61#include <linux/dvb/net.h>
62#include <linux/smp_lock.h>
62#include <linux/uio.h> 63#include <linux/uio.h>
63#include <asm/uaccess.h> 64#include <asm/uaccess.h>
64#include <linux/crc32.h> 65#include <linux/crc32.h>
@@ -1329,7 +1330,7 @@ static int dvb_net_remove_if(struct dvb_net *dvbnet, unsigned long num)
1329 return 0; 1330 return 0;
1330} 1331}
1331 1332
1332static int dvb_net_do_ioctl(struct inode *inode, struct file *file, 1333static int dvb_net_do_ioctl(struct file *file,
1333 unsigned int cmd, void *parg) 1334 unsigned int cmd, void *parg)
1334{ 1335{
1335 struct dvb_device *dvbdev = file->private_data; 1336 struct dvb_device *dvbdev = file->private_data;
@@ -1431,10 +1432,16 @@ static int dvb_net_do_ioctl(struct inode *inode, struct file *file,
1431 return 0; 1432 return 0;
1432} 1433}
1433 1434
1434static int dvb_net_ioctl(struct inode *inode, struct file *file, 1435static long dvb_net_ioctl(struct file *file,
1435 unsigned int cmd, unsigned long arg) 1436 unsigned int cmd, unsigned long arg)
1436{ 1437{
1437 return dvb_usercopy(inode, file, cmd, arg, dvb_net_do_ioctl); 1438 int ret;
1439
1440 lock_kernel();
1441 ret = dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl);
1442 unlock_kernel();
1443
1444 return ret;
1438} 1445}
1439 1446
1440static int dvb_net_close(struct inode *inode, struct file *file) 1447static int dvb_net_close(struct inode *inode, struct file *file)
@@ -1455,7 +1462,7 @@ static int dvb_net_close(struct inode *inode, struct file *file)
1455 1462
1456static const struct file_operations dvb_net_fops = { 1463static const struct file_operations dvb_net_fops = {
1457 .owner = THIS_MODULE, 1464 .owner = THIS_MODULE,
1458 .ioctl = dvb_net_ioctl, 1465 .unlocked_ioctl = dvb_net_ioctl,
1459 .open = dvb_generic_open, 1466 .open = dvb_generic_open,
1460 .release = dvb_net_close, 1467 .release = dvb_net_close,
1461}; 1468};
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index 94159b90f733..b915c39d782f 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -154,10 +154,11 @@ int dvb_generic_release(struct inode *inode, struct file *file)
154EXPORT_SYMBOL(dvb_generic_release); 154EXPORT_SYMBOL(dvb_generic_release);
155 155
156 156
157int dvb_generic_ioctl(struct inode *inode, struct file *file, 157long dvb_generic_ioctl(struct file *file,
158 unsigned int cmd, unsigned long arg) 158 unsigned int cmd, unsigned long arg)
159{ 159{
160 struct dvb_device *dvbdev = file->private_data; 160 struct dvb_device *dvbdev = file->private_data;
161 int ret;
161 162
162 if (!dvbdev) 163 if (!dvbdev)
163 return -ENODEV; 164 return -ENODEV;
@@ -165,7 +166,11 @@ int dvb_generic_ioctl(struct inode *inode, struct file *file,
165 if (!dvbdev->kernel_ioctl) 166 if (!dvbdev->kernel_ioctl)
166 return -EINVAL; 167 return -EINVAL;
167 168
168 return dvb_usercopy (inode, file, cmd, arg, dvbdev->kernel_ioctl); 169 lock_kernel();
170 ret = dvb_usercopy(file, cmd, arg, dvbdev->kernel_ioctl);
171 unlock_kernel();
172
173 return ret;
169} 174}
170EXPORT_SYMBOL(dvb_generic_ioctl); 175EXPORT_SYMBOL(dvb_generic_ioctl);
171 176
@@ -377,9 +382,9 @@ EXPORT_SYMBOL(dvb_unregister_adapter);
377 define this as video_usercopy(). this will introduce a dependecy 382 define this as video_usercopy(). this will introduce a dependecy
378 to the v4l "videodev.o" module, which is unnecessary for some 383 to the v4l "videodev.o" module, which is unnecessary for some
379 cards (ie. the budget dvb-cards don't need the v4l module...) */ 384 cards (ie. the budget dvb-cards don't need the v4l module...) */
380int dvb_usercopy(struct inode *inode, struct file *file, 385int dvb_usercopy(struct file *file,
381 unsigned int cmd, unsigned long arg, 386 unsigned int cmd, unsigned long arg,
382 int (*func)(struct inode *inode, struct file *file, 387 int (*func)(struct file *file,
383 unsigned int cmd, void *arg)) 388 unsigned int cmd, void *arg))
384{ 389{
385 char sbuf[128]; 390 char sbuf[128];
@@ -416,7 +421,7 @@ int dvb_usercopy(struct inode *inode, struct file *file,
416 } 421 }
417 422
418 /* call driver */ 423 /* call driver */
419 if ((err = func(inode, file, cmd, parg)) == -ENOIOCTLCMD) 424 if ((err = func(file, cmd, parg)) == -ENOIOCTLCMD)
420 err = -EINVAL; 425 err = -EINVAL;
421 426
422 if (err < 0) 427 if (err < 0)
diff --git a/drivers/media/dvb/dvb-core/dvbdev.h b/drivers/media/dvb/dvb-core/dvbdev.h
index f7b499d4a3c0..fcc6ae98745e 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.h
+++ b/drivers/media/dvb/dvb-core/dvbdev.h
@@ -116,8 +116,7 @@ struct dvb_device {
116 116
117 wait_queue_head_t wait_queue; 117 wait_queue_head_t wait_queue;
118 /* don't really need those !? -- FIXME: use video_usercopy */ 118 /* don't really need those !? -- FIXME: use video_usercopy */
119 int (*kernel_ioctl)(struct inode *inode, struct file *file, 119 int (*kernel_ioctl)(struct file *file, unsigned int cmd, void *arg);
120 unsigned int cmd, void *arg);
121 120
122 void *priv; 121 void *priv;
123}; 122};
@@ -138,17 +137,15 @@ extern void dvb_unregister_device (struct dvb_device *dvbdev);
138 137
139extern int dvb_generic_open (struct inode *inode, struct file *file); 138extern int dvb_generic_open (struct inode *inode, struct file *file);
140extern int dvb_generic_release (struct inode *inode, struct file *file); 139extern int dvb_generic_release (struct inode *inode, struct file *file);
141extern int dvb_generic_ioctl (struct inode *inode, struct file *file, 140extern long dvb_generic_ioctl (struct file *file,
142 unsigned int cmd, unsigned long arg); 141 unsigned int cmd, unsigned long arg);
143 142
144/* we don't mess with video_usercopy() any more, 143/* we don't mess with video_usercopy() any more,
145we simply define out own dvb_usercopy(), which will hopefully become 144we simply define out own dvb_usercopy(), which will hopefully become
146generic_usercopy() someday... */ 145generic_usercopy() someday... */
147 146
148extern int dvb_usercopy(struct inode *inode, struct file *file, 147extern int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
149 unsigned int cmd, unsigned long arg, 148 int (*func)(struct file *file, unsigned int cmd, void *arg));
150 int (*func)(struct inode *inode, struct file *file,
151 unsigned int cmd, void *arg));
152 149
153/** generic DVB attach function. */ 150/** generic DVB attach function. */
154#ifdef CONFIG_MEDIA_ATTACH 151#ifdef CONFIG_MEDIA_ATTACH
diff --git a/drivers/media/dvb/firewire/firedtv-ci.c b/drivers/media/dvb/firewire/firedtv-ci.c
index 853e04b7cb36..d3c2cf60de76 100644
--- a/drivers/media/dvb/firewire/firedtv-ci.c
+++ b/drivers/media/dvb/firewire/firedtv-ci.c
@@ -175,8 +175,7 @@ static int fdtv_ca_send_msg(struct firedtv *fdtv, void *arg)
175 return err; 175 return err;
176} 176}
177 177
178static int fdtv_ca_ioctl(struct inode *inode, struct file *file, 178static int fdtv_ca_ioctl(struct file *file, unsigned int cmd, void *arg)
179 unsigned int cmd, void *arg)
180{ 179{
181 struct dvb_device *dvbdev = file->private_data; 180 struct dvb_device *dvbdev = file->private_data;
182 struct firedtv *fdtv = dvbdev->priv; 181 struct firedtv *fdtv = dvbdev->priv;
@@ -217,7 +216,7 @@ static unsigned int fdtv_ca_io_poll(struct file *file, poll_table *wait)
217 216
218static const struct file_operations fdtv_ca_fops = { 217static const struct file_operations fdtv_ca_fops = {
219 .owner = THIS_MODULE, 218 .owner = THIS_MODULE,
220 .ioctl = dvb_generic_ioctl, 219 .unlocked_ioctl = dvb_generic_ioctl,
221 .open = dvb_generic_open, 220 .open = dvb_generic_open,
222 .release = dvb_generic_release, 221 .release = dvb_generic_release,
223 .poll = fdtv_ca_io_poll, 222 .poll = fdtv_ca_io_poll,
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index 38915591c6e5..a6be529eec5c 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -708,7 +708,7 @@ static void gpioirq(unsigned long cookie)
708 708
709 709
710#ifdef CONFIG_DVB_AV7110_OSD 710#ifdef CONFIG_DVB_AV7110_OSD
711static int dvb_osd_ioctl(struct inode *inode, struct file *file, 711static int dvb_osd_ioctl(struct file *file,
712 unsigned int cmd, void *parg) 712 unsigned int cmd, void *parg)
713{ 713{
714 struct dvb_device *dvbdev = file->private_data; 714 struct dvb_device *dvbdev = file->private_data;
@@ -727,7 +727,7 @@ static int dvb_osd_ioctl(struct inode *inode, struct file *file,
727 727
728static const struct file_operations dvb_osd_fops = { 728static const struct file_operations dvb_osd_fops = {
729 .owner = THIS_MODULE, 729 .owner = THIS_MODULE,
730 .ioctl = dvb_generic_ioctl, 730 .unlocked_ioctl = dvb_generic_ioctl,
731 .open = dvb_generic_open, 731 .open = dvb_generic_open,
732 .release = dvb_generic_release, 732 .release = dvb_generic_release,
733}; 733};
diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c
index 53884814161c..13efba942dac 100644
--- a/drivers/media/dvb/ttpci/av7110_av.c
+++ b/drivers/media/dvb/ttpci/av7110_av.c
@@ -1089,7 +1089,7 @@ static int play_iframe(struct av7110 *av7110, char __user *buf, unsigned int len
1089} 1089}
1090 1090
1091 1091
1092static int dvb_video_ioctl(struct inode *inode, struct file *file, 1092static int dvb_video_ioctl(struct file *file,
1093 unsigned int cmd, void *parg) 1093 unsigned int cmd, void *parg)
1094{ 1094{
1095 struct dvb_device *dvbdev = file->private_data; 1095 struct dvb_device *dvbdev = file->private_data;
@@ -1297,7 +1297,7 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
1297 return ret; 1297 return ret;
1298} 1298}
1299 1299
1300static int dvb_audio_ioctl(struct inode *inode, struct file *file, 1300static int dvb_audio_ioctl(struct file *file,
1301 unsigned int cmd, void *parg) 1301 unsigned int cmd, void *parg)
1302{ 1302{
1303 struct dvb_device *dvbdev = file->private_data; 1303 struct dvb_device *dvbdev = file->private_data;
@@ -1517,7 +1517,7 @@ static int dvb_audio_release(struct inode *inode, struct file *file)
1517static const struct file_operations dvb_video_fops = { 1517static const struct file_operations dvb_video_fops = {
1518 .owner = THIS_MODULE, 1518 .owner = THIS_MODULE,
1519 .write = dvb_video_write, 1519 .write = dvb_video_write,
1520 .ioctl = dvb_generic_ioctl, 1520 .unlocked_ioctl = dvb_generic_ioctl,
1521 .open = dvb_video_open, 1521 .open = dvb_video_open,
1522 .release = dvb_video_release, 1522 .release = dvb_video_release,
1523 .poll = dvb_video_poll, 1523 .poll = dvb_video_poll,
@@ -1535,7 +1535,7 @@ static struct dvb_device dvbdev_video = {
1535static const struct file_operations dvb_audio_fops = { 1535static const struct file_operations dvb_audio_fops = {
1536 .owner = THIS_MODULE, 1536 .owner = THIS_MODULE,
1537 .write = dvb_audio_write, 1537 .write = dvb_audio_write,
1538 .ioctl = dvb_generic_ioctl, 1538 .unlocked_ioctl = dvb_generic_ioctl,
1539 .open = dvb_audio_open, 1539 .open = dvb_audio_open,
1540 .release = dvb_audio_release, 1540 .release = dvb_audio_release,
1541 .poll = dvb_audio_poll, 1541 .poll = dvb_audio_poll,
diff --git a/drivers/media/dvb/ttpci/av7110_ca.c b/drivers/media/dvb/ttpci/av7110_ca.c
index ac7779c45c5b..4eba35a018e3 100644
--- a/drivers/media/dvb/ttpci/av7110_ca.c
+++ b/drivers/media/dvb/ttpci/av7110_ca.c
@@ -248,8 +248,7 @@ static unsigned int dvb_ca_poll (struct file *file, poll_table *wait)
248 return mask; 248 return mask;
249} 249}
250 250
251static int dvb_ca_ioctl(struct inode *inode, struct file *file, 251static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
252 unsigned int cmd, void *parg)
253{ 252{
254 struct dvb_device *dvbdev = file->private_data; 253 struct dvb_device *dvbdev = file->private_data;
255 struct av7110 *av7110 = dvbdev->priv; 254 struct av7110 *av7110 = dvbdev->priv;
@@ -350,7 +349,7 @@ static const struct file_operations dvb_ca_fops = {
350 .owner = THIS_MODULE, 349 .owner = THIS_MODULE,
351 .read = dvb_ca_read, 350 .read = dvb_ca_read,
352 .write = dvb_ca_write, 351 .write = dvb_ca_write,
353 .ioctl = dvb_generic_ioctl, 352 .unlocked_ioctl = dvb_generic_ioctl,
354 .open = dvb_ca_open, 353 .open = dvb_ca_open,
355 .release = dvb_generic_release, 354 .release = dvb_generic_release,
356 .poll = dvb_ca_poll, 355 .poll = dvb_ca_poll,
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 7bd4c0fc23cc..5c53624e0e87 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -2570,9 +2570,7 @@ mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i)
2570} 2570}
2571 2571
2572/** 2572/**
2573 * mptscsih_set_scsi_lookup 2573 * mptscsih_set_scsi_lookup - write a scmd entry into the ScsiLookup[] array list
2574 *
2575 * writes a scmd entry into the ScsiLookup[] array list
2576 * 2574 *
2577 * @ioc: Pointer to MPT_ADAPTER structure 2575 * @ioc: Pointer to MPT_ADAPTER structure
2578 * @i: index into the array 2576 * @i: index into the array
@@ -2735,7 +2733,7 @@ mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
2735 2733
2736 2734
2737/** 2735/**
2738 * mptscsih_get_completion_code - 2736 * mptscsih_get_completion_code - get completion code from MPT request
2739 * @ioc: Pointer to MPT_ADAPTER structure 2737 * @ioc: Pointer to MPT_ADAPTER structure
2740 * @req: Pointer to original MPT request frame 2738 * @req: Pointer to original MPT request frame
2741 * @reply: Pointer to MPT reply frame (NULL if TurboReply) 2739 * @reply: Pointer to MPT reply frame (NULL if TurboReply)
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index d33693c13368..c4b117f5fb70 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -186,14 +186,9 @@ static int i2o_cfg_parms(unsigned long arg, unsigned int type)
186 if (!dev) 186 if (!dev)
187 return -ENXIO; 187 return -ENXIO;
188 188
189 ops = kmalloc(kcmd.oplen, GFP_KERNEL); 189 ops = memdup_user(kcmd.opbuf, kcmd.oplen);
190 if (!ops) 190 if (IS_ERR(ops))
191 return -ENOMEM; 191 return PTR_ERR(ops);
192
193 if (copy_from_user(ops, kcmd.opbuf, kcmd.oplen)) {
194 kfree(ops);
195 return -EFAULT;
196 }
197 192
198 /* 193 /*
199 * It's possible to have a _very_ large table 194 * It's possible to have a _very_ large table
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 6a14d2b1ccf0..405d2d5183cf 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -173,33 +173,35 @@ static struct resource regulator_resources[] = {
173 PM8607_REG_RESOURCE(LDO9, LDO9), 173 PM8607_REG_RESOURCE(LDO9, LDO9),
174 PM8607_REG_RESOURCE(LDO10, LDO10), 174 PM8607_REG_RESOURCE(LDO10, LDO10),
175 PM8607_REG_RESOURCE(LDO12, LDO12), 175 PM8607_REG_RESOURCE(LDO12, LDO12),
176 PM8607_REG_RESOURCE(VIBRATOR_SET, VIBRATOR_SET),
176 PM8607_REG_RESOURCE(LDO14, LDO14), 177 PM8607_REG_RESOURCE(LDO14, LDO14),
177}; 178};
178 179
179#define PM8607_REG_DEVS(_name, _id) \ 180#define PM8607_REG_DEVS(_id) \
180{ \ 181{ \
181 .name = "88pm8607-" #_name, \ 182 .name = "88pm860x-regulator", \
182 .num_resources = 1, \ 183 .num_resources = 1, \
183 .resources = &regulator_resources[PM8607_ID_##_id], \ 184 .resources = &regulator_resources[PM8607_ID_##_id], \
184 .id = PM8607_ID_##_id, \ 185 .id = PM8607_ID_##_id, \
185} 186}
186 187
187static struct mfd_cell regulator_devs[] = { 188static struct mfd_cell regulator_devs[] = {
188 PM8607_REG_DEVS(buck1, BUCK1), 189 PM8607_REG_DEVS(BUCK1),
189 PM8607_REG_DEVS(buck2, BUCK2), 190 PM8607_REG_DEVS(BUCK2),
190 PM8607_REG_DEVS(buck3, BUCK3), 191 PM8607_REG_DEVS(BUCK3),
191 PM8607_REG_DEVS(ldo1, LDO1), 192 PM8607_REG_DEVS(LDO1),
192 PM8607_REG_DEVS(ldo2, LDO2), 193 PM8607_REG_DEVS(LDO2),
193 PM8607_REG_DEVS(ldo3, LDO3), 194 PM8607_REG_DEVS(LDO3),
194 PM8607_REG_DEVS(ldo4, LDO4), 195 PM8607_REG_DEVS(LDO4),
195 PM8607_REG_DEVS(ldo5, LDO5), 196 PM8607_REG_DEVS(LDO5),
196 PM8607_REG_DEVS(ldo6, LDO6), 197 PM8607_REG_DEVS(LDO6),
197 PM8607_REG_DEVS(ldo7, LDO7), 198 PM8607_REG_DEVS(LDO7),
198 PM8607_REG_DEVS(ldo8, LDO8), 199 PM8607_REG_DEVS(LDO8),
199 PM8607_REG_DEVS(ldo9, LDO9), 200 PM8607_REG_DEVS(LDO9),
200 PM8607_REG_DEVS(ldo10, LDO10), 201 PM8607_REG_DEVS(LDO10),
201 PM8607_REG_DEVS(ldo12, LDO12), 202 PM8607_REG_DEVS(LDO12),
202 PM8607_REG_DEVS(ldo14, LDO14), 203 PM8607_REG_DEVS(LDO13),
204 PM8607_REG_DEVS(LDO14),
203}; 205};
204 206
205struct pm860x_irq_data { 207struct pm860x_irq_data {
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index de3e74cde51c..3c6a9860dd9c 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -49,6 +49,7 @@ config MFD_SH_MOBILE_SDHI
49 bool "Support for SuperH Mobile SDHI" 49 bool "Support for SuperH Mobile SDHI"
50 depends on SUPERH || ARCH_SHMOBILE 50 depends on SUPERH || ARCH_SHMOBILE
51 select MFD_CORE 51 select MFD_CORE
52 select TMIO_MMC_DMA
52 ---help--- 53 ---help---
53 This driver supports the SDHI hardware block found in many 54 This driver supports the SDHI hardware block found in many
54 SuperH Mobile SoCs. 55 SuperH Mobile SoCs.
@@ -162,6 +163,11 @@ config MFD_TMIO
162 bool 163 bool
163 default n 164 default n
164 165
166config TMIO_MMC_DMA
167 bool
168 select DMA_ENGINE
169 select DMADEVICES
170
165config MFD_T7L66XB 171config MFD_T7L66XB
166 bool "Support Toshiba T7L66XB" 172 bool "Support Toshiba T7L66XB"
167 depends on ARM && HAVE_CLK 173 depends on ARM && HAVE_CLK
diff --git a/drivers/mfd/sh_mobile_sdhi.c b/drivers/mfd/sh_mobile_sdhi.c
index 497f91b6138e..cd164595f08a 100644
--- a/drivers/mfd/sh_mobile_sdhi.c
+++ b/drivers/mfd/sh_mobile_sdhi.c
@@ -26,11 +26,15 @@
26#include <linux/mfd/core.h> 26#include <linux/mfd/core.h>
27#include <linux/mfd/tmio.h> 27#include <linux/mfd/tmio.h>
28#include <linux/mfd/sh_mobile_sdhi.h> 28#include <linux/mfd/sh_mobile_sdhi.h>
29#include <linux/sh_dma.h>
29 30
30struct sh_mobile_sdhi { 31struct sh_mobile_sdhi {
31 struct clk *clk; 32 struct clk *clk;
32 struct tmio_mmc_data mmc_data; 33 struct tmio_mmc_data mmc_data;
33 struct mfd_cell cell_mmc; 34 struct mfd_cell cell_mmc;
35 struct sh_dmae_slave param_tx;
36 struct sh_dmae_slave param_rx;
37 struct tmio_mmc_dma dma_priv;
34}; 38};
35 39
36static struct resource sh_mobile_sdhi_resources[] = { 40static struct resource sh_mobile_sdhi_resources[] = {
@@ -64,6 +68,8 @@ static void sh_mobile_sdhi_set_pwr(struct platform_device *tmio, int state)
64static int __init sh_mobile_sdhi_probe(struct platform_device *pdev) 68static int __init sh_mobile_sdhi_probe(struct platform_device *pdev)
65{ 69{
66 struct sh_mobile_sdhi *priv; 70 struct sh_mobile_sdhi *priv;
71 struct tmio_mmc_data *mmc_data;
72 struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
67 struct resource *mem; 73 struct resource *mem;
68 char clk_name[8]; 74 char clk_name[8];
69 int ret, irq; 75 int ret, irq;
@@ -85,6 +91,8 @@ static int __init sh_mobile_sdhi_probe(struct platform_device *pdev)
85 return -ENOMEM; 91 return -ENOMEM;
86 } 92 }
87 93
94 mmc_data = &priv->mmc_data;
95
88 snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id); 96 snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id);
89 priv->clk = clk_get(&pdev->dev, clk_name); 97 priv->clk = clk_get(&pdev->dev, clk_name);
90 if (IS_ERR(priv->clk)) { 98 if (IS_ERR(priv->clk)) {
@@ -96,12 +104,24 @@ static int __init sh_mobile_sdhi_probe(struct platform_device *pdev)
96 104
97 clk_enable(priv->clk); 105 clk_enable(priv->clk);
98 106
99 priv->mmc_data.hclk = clk_get_rate(priv->clk); 107 mmc_data->hclk = clk_get_rate(priv->clk);
100 priv->mmc_data.set_pwr = sh_mobile_sdhi_set_pwr; 108 mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
101 priv->mmc_data.capabilities = MMC_CAP_MMC_HIGHSPEED; 109 mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
110 if (p) {
111 mmc_data->flags = p->tmio_flags;
112 mmc_data->ocr_mask = p->tmio_ocr_mask;
113 }
114
115 if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
116 priv->param_tx.slave_id = p->dma_slave_tx;
117 priv->param_rx.slave_id = p->dma_slave_rx;
118 priv->dma_priv.chan_priv_tx = &priv->param_tx;
119 priv->dma_priv.chan_priv_rx = &priv->param_rx;
120 mmc_data->dma = &priv->dma_priv;
121 }
102 122
103 memcpy(&priv->cell_mmc, &sh_mobile_sdhi_cell, sizeof(priv->cell_mmc)); 123 memcpy(&priv->cell_mmc, &sh_mobile_sdhi_cell, sizeof(priv->cell_mmc));
104 priv->cell_mmc.driver_data = &priv->mmc_data; 124 priv->cell_mmc.driver_data = mmc_data;
105 priv->cell_mmc.platform_data = &priv->cell_mmc; 125 priv->cell_mmc.platform_data = &priv->cell_mmc;
106 priv->cell_mmc.data_size = sizeof(priv->cell_mmc); 126 priv->cell_mmc.data_size = sizeof(priv->cell_mmc);
107 127
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0d0d625fece2..26386a92f5aa 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -14,11 +14,17 @@ menuconfig MISC_DEVICES
14if MISC_DEVICES 14if MISC_DEVICES
15 15
16config AD525X_DPOT 16config AD525X_DPOT
17 tristate "Analog Devices AD525x Digital Potentiometers" 17 tristate "Analog Devices Digital Potentiometers"
18 depends on I2C && SYSFS 18 depends on (I2C || SPI) && SYSFS
19 help 19 help
20 If you say yes here, you get support for the Analog Devices 20 If you say yes here, you get support for the Analog Devices
21 AD5258, AD5259, AD5251, AD5252, AD5253, AD5254 and AD5255 21 AD5258, AD5259, AD5251, AD5252, AD5253, AD5254, AD5255
22 AD5160, AD5161, AD5162, AD5165, AD5200, AD5201, AD5203,
23 AD5204, AD5206, AD5207, AD5231, AD5232, AD5233, AD5235,
24 AD5260, AD5262, AD5263, AD5290, AD5291, AD5292, AD5293,
25 AD7376, AD8400, AD8402, AD8403, ADN2850, AD5241, AD5242,
26 AD5243, AD5245, AD5246, AD5247, AD5248, AD5280, AD5282,
27 ADN2860, AD5273, AD5171, AD5170, AD5172, AD5173
22 digital potentiometer chips. 28 digital potentiometer chips.
23 29
24 See Documentation/misc-devices/ad525x_dpot.txt for the 30 See Documentation/misc-devices/ad525x_dpot.txt for the
@@ -27,6 +33,26 @@ config AD525X_DPOT
27 This driver can also be built as a module. If so, the module 33 This driver can also be built as a module. If so, the module
28 will be called ad525x_dpot. 34 will be called ad525x_dpot.
29 35
36config AD525X_DPOT_I2C
37 tristate "support I2C bus connection"
38 depends on AD525X_DPOT && I2C
39 help
40 Say Y here if you have a digital potentiometers hooked to an I2C bus.
41
42 To compile this driver as a module, choose M here: the
43 module will be called ad525x_dpot-i2c.
44
45config AD525X_DPOT_SPI
46 tristate "support SPI bus connection"
47 depends on AD525X_DPOT && SPI_MASTER
48 help
49 Say Y here if you have a digital potentiometers hooked to an SPI bus.
50
51 If unsure, say N (but it's safe to say "Y").
52
53 To compile this driver as a module, choose M here: the
54 module will be called ad525x_dpot-spi.
55
30config ATMEL_PWM 56config ATMEL_PWM
31 tristate "Atmel AT32/AT91 PWM support" 57 tristate "Atmel AT32/AT91 PWM support"
32 depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9 58 depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f12dc3e54402..6ed06a19474a 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -4,6 +4,8 @@
4 4
5obj-$(CONFIG_IBM_ASM) += ibmasm/ 5obj-$(CONFIG_IBM_ASM) += ibmasm/
6obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o 6obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o
7obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o
8obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o
7obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o 9obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
8obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o 10obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
9obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o 11obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
new file mode 100644
index 000000000000..374352af7979
--- /dev/null
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -0,0 +1,134 @@
1/*
2 * Driver for the Analog Devices digital potentiometers (I2C bus)
3 *
4 * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/i2c.h>
10#include <linux/module.h>
11
12#include "ad525x_dpot.h"
13
14/* ------------------------------------------------------------------------- */
15/* I2C bus functions */
16static int write_d8(void *client, u8 val)
17{
18 return i2c_smbus_write_byte(client, val);
19}
20
21static int write_r8d8(void *client, u8 reg, u8 val)
22{
23 return i2c_smbus_write_byte_data(client, reg, val);
24}
25
26static int write_r8d16(void *client, u8 reg, u16 val)
27{
28 return i2c_smbus_write_word_data(client, reg, val);
29}
30
31static int read_d8(void *client)
32{
33 return i2c_smbus_read_byte(client);
34}
35
36static int read_r8d8(void *client, u8 reg)
37{
38 return i2c_smbus_read_byte_data(client, reg);
39}
40
41static int read_r8d16(void *client, u8 reg)
42{
43 return i2c_smbus_read_word_data(client, reg);
44}
45
46static const struct ad_dpot_bus_ops bops = {
47 .read_d8 = read_d8,
48 .read_r8d8 = read_r8d8,
49 .read_r8d16 = read_r8d16,
50 .write_d8 = write_d8,
51 .write_r8d8 = write_r8d8,
52 .write_r8d16 = write_r8d16,
53};
54
55static int __devinit ad_dpot_i2c_probe(struct i2c_client *client,
56 const struct i2c_device_id *id)
57{
58 struct ad_dpot_bus_data bdata = {
59 .client = client,
60 .bops = &bops,
61 };
62
63 struct ad_dpot_id dpot_id = {
64 .name = (char *) &id->name,
65 .devid = id->driver_data,
66 };
67
68 if (!i2c_check_functionality(client->adapter,
69 I2C_FUNC_SMBUS_WORD_DATA)) {
70 dev_err(&client->dev, "SMBUS Word Data not Supported\n");
71 return -EIO;
72 }
73
74 return ad_dpot_probe(&client->dev, &bdata, &dpot_id);
75}
76
77static int __devexit ad_dpot_i2c_remove(struct i2c_client *client)
78{
79 return ad_dpot_remove(&client->dev);
80}
81
82static const struct i2c_device_id ad_dpot_id[] = {
83 {"ad5258", AD5258_ID},
84 {"ad5259", AD5259_ID},
85 {"ad5251", AD5251_ID},
86 {"ad5252", AD5252_ID},
87 {"ad5253", AD5253_ID},
88 {"ad5254", AD5254_ID},
89 {"ad5255", AD5255_ID},
90 {"ad5241", AD5241_ID},
91 {"ad5242", AD5242_ID},
92 {"ad5243", AD5243_ID},
93 {"ad5245", AD5245_ID},
94 {"ad5246", AD5246_ID},
95 {"ad5247", AD5247_ID},
96 {"ad5248", AD5248_ID},
97 {"ad5280", AD5280_ID},
98 {"ad5282", AD5282_ID},
99 {"adn2860", ADN2860_ID},
100 {"ad5273", AD5273_ID},
101 {"ad5171", AD5171_ID},
102 {"ad5170", AD5170_ID},
103 {"ad5172", AD5172_ID},
104 {"ad5173", AD5173_ID},
105 {}
106};
107MODULE_DEVICE_TABLE(i2c, ad_dpot_id);
108
109static struct i2c_driver ad_dpot_i2c_driver = {
110 .driver = {
111 .name = "ad_dpot",
112 .owner = THIS_MODULE,
113 },
114 .probe = ad_dpot_i2c_probe,
115 .remove = __devexit_p(ad_dpot_i2c_remove),
116 .id_table = ad_dpot_id,
117};
118
119static int __init ad_dpot_i2c_init(void)
120{
121 return i2c_add_driver(&ad_dpot_i2c_driver);
122}
123module_init(ad_dpot_i2c_init);
124
125static void __exit ad_dpot_i2c_exit(void)
126{
127 i2c_del_driver(&ad_dpot_i2c_driver);
128}
129module_exit(ad_dpot_i2c_exit);
130
131MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
132MODULE_DESCRIPTION("digital potentiometer I2C bus driver");
133MODULE_LICENSE("GPL");
134MODULE_ALIAS("i2c:ad_dpot");
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
new file mode 100644
index 000000000000..b8c6df9c8437
--- /dev/null
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -0,0 +1,172 @@
1/*
2 * Driver for the Analog Devices digital potentiometers (SPI bus)
3 *
4 * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/spi/spi.h>
10#include <linux/module.h>
11
12#include "ad525x_dpot.h"
13
14static const struct ad_dpot_id ad_dpot_spi_devlist[] = {
15 {.name = "ad5160", .devid = AD5160_ID},
16 {.name = "ad5161", .devid = AD5161_ID},
17 {.name = "ad5162", .devid = AD5162_ID},
18 {.name = "ad5165", .devid = AD5165_ID},
19 {.name = "ad5200", .devid = AD5200_ID},
20 {.name = "ad5201", .devid = AD5201_ID},
21 {.name = "ad5203", .devid = AD5203_ID},
22 {.name = "ad5204", .devid = AD5204_ID},
23 {.name = "ad5206", .devid = AD5206_ID},
24 {.name = "ad5207", .devid = AD5207_ID},
25 {.name = "ad5231", .devid = AD5231_ID},
26 {.name = "ad5232", .devid = AD5232_ID},
27 {.name = "ad5233", .devid = AD5233_ID},
28 {.name = "ad5235", .devid = AD5235_ID},
29 {.name = "ad5260", .devid = AD5260_ID},
30 {.name = "ad5262", .devid = AD5262_ID},
31 {.name = "ad5263", .devid = AD5263_ID},
32 {.name = "ad5290", .devid = AD5290_ID},
33 {.name = "ad5291", .devid = AD5291_ID},
34 {.name = "ad5292", .devid = AD5292_ID},
35 {.name = "ad5293", .devid = AD5293_ID},
36 {.name = "ad7376", .devid = AD7376_ID},
37 {.name = "ad8400", .devid = AD8400_ID},
38 {.name = "ad8402", .devid = AD8402_ID},
39 {.name = "ad8403", .devid = AD8403_ID},
40 {.name = "adn2850", .devid = ADN2850_ID},
41 {}
42};
43
44/* ------------------------------------------------------------------------- */
45
46/* SPI bus functions */
47static int write8(void *client, u8 val)
48{
49 u8 data = val;
50 return spi_write(client, &data, 1);
51}
52
53static int write16(void *client, u8 reg, u8 val)
54{
55 u8 data[2] = {reg, val};
56 return spi_write(client, data, 1);
57}
58
59static int write24(void *client, u8 reg, u16 val)
60{
61 u8 data[3] = {reg, val >> 8, val};
62 return spi_write(client, data, 1);
63}
64
65static int read8(void *client)
66{
67 int ret;
68 u8 data;
69 ret = spi_read(client, &data, 1);
70 if (ret < 0)
71 return ret;
72
73 return data;
74}
75
76static int read16(void *client, u8 reg)
77{
78 int ret;
79 u8 buf_rx[2];
80
81 write16(client, reg, 0);
82 ret = spi_read(client, buf_rx, 2);
83 if (ret < 0)
84 return ret;
85
86 return (buf_rx[0] << 8) | buf_rx[1];
87}
88
89static int read24(void *client, u8 reg)
90{
91 int ret;
92 u8 buf_rx[3];
93
94 write24(client, reg, 0);
95 ret = spi_read(client, buf_rx, 3);
96 if (ret < 0)
97 return ret;
98
99 return (buf_rx[1] << 8) | buf_rx[2];
100}
101
102static const struct ad_dpot_bus_ops bops = {
103 .read_d8 = read8,
104 .read_r8d8 = read16,
105 .read_r8d16 = read24,
106 .write_d8 = write8,
107 .write_r8d8 = write16,
108 .write_r8d16 = write24,
109};
110
111static const struct ad_dpot_id *dpot_match_id(const struct ad_dpot_id *id,
112 char *name)
113{
114 while (id->name && id->name[0]) {
115 if (strcmp(name, id->name) == 0)
116 return id;
117 id++;
118 }
119 return NULL;
120}
121
122static int __devinit ad_dpot_spi_probe(struct spi_device *spi)
123{
124 char *name = spi->dev.platform_data;
125 const struct ad_dpot_id *dpot_id;
126
127 struct ad_dpot_bus_data bdata = {
128 .client = spi,
129 .bops = &bops,
130 };
131
132 dpot_id = dpot_match_id(ad_dpot_spi_devlist, name);
133
134 if (dpot_id == NULL) {
135 dev_err(&spi->dev, "%s not in supported device list", name);
136 return -ENODEV;
137 }
138
139 return ad_dpot_probe(&spi->dev, &bdata, dpot_id);
140}
141
142static int __devexit ad_dpot_spi_remove(struct spi_device *spi)
143{
144 return ad_dpot_remove(&spi->dev);
145}
146
147static struct spi_driver ad_dpot_spi_driver = {
148 .driver = {
149 .name = "ad_dpot",
150 .bus = &spi_bus_type,
151 .owner = THIS_MODULE,
152 },
153 .probe = ad_dpot_spi_probe,
154 .remove = __devexit_p(ad_dpot_spi_remove),
155};
156
157static int __init ad_dpot_spi_init(void)
158{
159 return spi_register_driver(&ad_dpot_spi_driver);
160}
161module_init(ad_dpot_spi_init);
162
163static void __exit ad_dpot_spi_exit(void)
164{
165 spi_unregister_driver(&ad_dpot_spi_driver);
166}
167module_exit(ad_dpot_spi_exit);
168
169MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
170MODULE_DESCRIPTION("digital potentiometer SPI bus driver");
171MODULE_LICENSE("GPL");
172MODULE_ALIAS("spi:ad_dpot");
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 30a59f2bacd2..5e6fa8449e8b 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * ad525x_dpot: Driver for the Analog Devices AD525x digital potentiometers 2 * ad525x_dpot: Driver for the Analog Devices digital potentiometers
3 * Copyright (c) 2009 Analog Devices, Inc. 3 * Copyright (c) 2009-2010 Analog Devices, Inc.
4 * Author: Michael Hennerich <hennerich@blackfin.uclinux.org> 4 * Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
5 * 5 *
6 * DEVID #Wipers #Positions Resistor Options (kOhm) 6 * DEVID #Wipers #Positions Resistor Options (kOhm)
@@ -11,6 +11,47 @@
11 * AD5255 3 512 25, 250 11 * AD5255 3 512 25, 250
12 * AD5253 4 64 1, 10, 50, 100 12 * AD5253 4 64 1, 10, 50, 100
13 * AD5254 4 256 1, 10, 50, 100 13 * AD5254 4 256 1, 10, 50, 100
14 * AD5160 1 256 5, 10, 50, 100
15 * AD5161 1 256 5, 10, 50, 100
16 * AD5162 2 256 2.5, 10, 50, 100
17 * AD5165 1 256 100
18 * AD5200 1 256 10, 50
19 * AD5201 1 33 10, 50
20 * AD5203 4 64 10, 100
21 * AD5204 4 256 10, 50, 100
22 * AD5206 6 256 10, 50, 100
23 * AD5207 2 256 10, 50, 100
24 * AD5231 1 1024 10, 50, 100
25 * AD5232 2 256 10, 50, 100
26 * AD5233 4 64 10, 50, 100
27 * AD5235 2 1024 25, 250
28 * AD5260 1 256 20, 50, 200
29 * AD5262 2 256 20, 50, 200
30 * AD5263 4 256 20, 50, 200
31 * AD5290 1 256 10, 50, 100
32 * AD5291 1 256 20
33 * AD5292 1 1024 20
34 * AD5293 1 1024 20
35 * AD7376 1 128 10, 50, 100, 1M
36 * AD8400 1 256 1, 10, 50, 100
37 * AD8402 2 256 1, 10, 50, 100
38 * AD8403 4 256 1, 10, 50, 100
39 * ADN2850 3 512 25, 250
40 * AD5241 1 256 10, 100, 1M
41 * AD5246 1 128 5, 10, 50, 100
42 * AD5247 1 128 5, 10, 50, 100
43 * AD5245 1 256 5, 10, 50, 100
44 * AD5243 2 256 2.5, 10, 50, 100
45 * AD5248 2 256 2.5, 10, 50, 100
46 * AD5242 2 256 20, 50, 200
47 * AD5280 1 256 20, 50, 200
48 * AD5282 2 256 20, 50, 200
49 * ADN2860 3 512 25, 250
50 * AD5273 1 64 1, 10, 50, 100 (OTP)
51 * AD5171 1 64 5, 10, 50, 100 (OTP)
52 * AD5170 1 256 2.5, 10, 50, 100 (OTP)
53 * AD5172 2 256 2.5, 10, 50, 100 (OTP)
54 * AD5173 2 256 2.5, 10, 50, 100 (OTP)
14 * 55 *
15 * See Documentation/misc-devices/ad525x_dpot.txt for more info. 56 * See Documentation/misc-devices/ad525x_dpot.txt for more info.
16 * 57 *
@@ -28,77 +69,283 @@
28#include <linux/device.h> 69#include <linux/device.h>
29#include <linux/kernel.h> 70#include <linux/kernel.h>
30#include <linux/init.h> 71#include <linux/init.h>
31#include <linux/slab.h>
32#include <linux/i2c.h>
33#include <linux/delay.h> 72#include <linux/delay.h>
73#include <linux/slab.h>
34 74
35#define DRIVER_NAME "ad525x_dpot" 75#define DRIVER_VERSION "0.2"
36#define DRIVER_VERSION "0.1"
37
38enum dpot_devid {
39 AD5258_ID,
40 AD5259_ID,
41 AD5251_ID,
42 AD5252_ID,
43 AD5253_ID,
44 AD5254_ID,
45 AD5255_ID,
46};
47 76
48#define AD5258_MAX_POSITION 64 77#include "ad525x_dpot.h"
49#define AD5259_MAX_POSITION 256
50#define AD5251_MAX_POSITION 64
51#define AD5252_MAX_POSITION 256
52#define AD5253_MAX_POSITION 64
53#define AD5254_MAX_POSITION 256
54#define AD5255_MAX_POSITION 512
55
56#define AD525X_RDAC0 0
57#define AD525X_RDAC1 1
58#define AD525X_RDAC2 2
59#define AD525X_RDAC3 3
60
61#define AD525X_REG_TOL 0x18
62#define AD525X_TOL_RDAC0 (AD525X_REG_TOL | AD525X_RDAC0)
63#define AD525X_TOL_RDAC1 (AD525X_REG_TOL | AD525X_RDAC1)
64#define AD525X_TOL_RDAC2 (AD525X_REG_TOL | AD525X_RDAC2)
65#define AD525X_TOL_RDAC3 (AD525X_REG_TOL | AD525X_RDAC3)
66
67/* RDAC-to-EEPROM Interface Commands */
68#define AD525X_I2C_RDAC (0x00 << 5)
69#define AD525X_I2C_EEPROM (0x01 << 5)
70#define AD525X_I2C_CMD (0x80)
71
72#define AD525X_DEC_ALL_6DB (AD525X_I2C_CMD | (0x4 << 3))
73#define AD525X_INC_ALL_6DB (AD525X_I2C_CMD | (0x9 << 3))
74#define AD525X_DEC_ALL (AD525X_I2C_CMD | (0x6 << 3))
75#define AD525X_INC_ALL (AD525X_I2C_CMD | (0xB << 3))
76
77static s32 ad525x_read(struct i2c_client *client, u8 reg);
78static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value);
79 78
80/* 79/*
81 * Client data (each client gets its own) 80 * Client data (each client gets its own)
82 */ 81 */
83 82
84struct dpot_data { 83struct dpot_data {
84 struct ad_dpot_bus_data bdata;
85 struct mutex update_lock; 85 struct mutex update_lock;
86 unsigned rdac_mask; 86 unsigned rdac_mask;
87 unsigned max_pos; 87 unsigned max_pos;
88 unsigned devid; 88 unsigned long devid;
89 unsigned uid;
90 unsigned feat;
91 unsigned wipers;
92 u16 rdac_cache[MAX_RDACS];
93 DECLARE_BITMAP(otp_en_mask, MAX_RDACS);
89}; 94};
90 95
96static inline int dpot_read_d8(struct dpot_data *dpot)
97{
98 return dpot->bdata.bops->read_d8(dpot->bdata.client);
99}
100
101static inline int dpot_read_r8d8(struct dpot_data *dpot, u8 reg)
102{
103 return dpot->bdata.bops->read_r8d8(dpot->bdata.client, reg);
104}
105
106static inline int dpot_read_r8d16(struct dpot_data *dpot, u8 reg)
107{
108 return dpot->bdata.bops->read_r8d16(dpot->bdata.client, reg);
109}
110
111static inline int dpot_write_d8(struct dpot_data *dpot, u8 val)
112{
113 return dpot->bdata.bops->write_d8(dpot->bdata.client, val);
114}
115
116static inline int dpot_write_r8d8(struct dpot_data *dpot, u8 reg, u16 val)
117{
118 return dpot->bdata.bops->write_r8d8(dpot->bdata.client, reg, val);
119}
120
121static inline int dpot_write_r8d16(struct dpot_data *dpot, u8 reg, u16 val)
122{
123 return dpot->bdata.bops->write_r8d16(dpot->bdata.client, reg, val);
124}
125
126static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
127{
128 unsigned ctrl = 0;
129
130 if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) {
131
132 if (dpot->feat & F_RDACS_WONLY)
133 return dpot->rdac_cache[reg & DPOT_RDAC_MASK];
134
135 if (dpot->uid == DPOT_UID(AD5291_ID) ||
136 dpot->uid == DPOT_UID(AD5292_ID) ||
137 dpot->uid == DPOT_UID(AD5293_ID))
138 return dpot_read_r8d8(dpot,
139 DPOT_AD5291_READ_RDAC << 2);
140
141 ctrl = DPOT_SPI_READ_RDAC;
142 } else if (reg & DPOT_ADDR_EEPROM) {
143 ctrl = DPOT_SPI_READ_EEPROM;
144 }
145
146 if (dpot->feat & F_SPI_16BIT)
147 return dpot_read_r8d8(dpot, ctrl);
148 else if (dpot->feat & F_SPI_24BIT)
149 return dpot_read_r8d16(dpot, ctrl);
150
151 return -EFAULT;
152}
153
154static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
155{
156 unsigned ctrl = 0;
157 switch (dpot->uid) {
158 case DPOT_UID(AD5246_ID):
159 case DPOT_UID(AD5247_ID):
160 return dpot_read_d8(dpot);
161 case DPOT_UID(AD5245_ID):
162 case DPOT_UID(AD5241_ID):
163 case DPOT_UID(AD5242_ID):
164 case DPOT_UID(AD5243_ID):
165 case DPOT_UID(AD5248_ID):
166 case DPOT_UID(AD5280_ID):
167 case DPOT_UID(AD5282_ID):
168 ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
169 0 : DPOT_AD5291_RDAC_AB;
170 return dpot_read_r8d8(dpot, ctrl);
171 case DPOT_UID(AD5170_ID):
172 case DPOT_UID(AD5171_ID):
173 case DPOT_UID(AD5273_ID):
174 return dpot_read_d8(dpot);
175 case DPOT_UID(AD5172_ID):
176 case DPOT_UID(AD5173_ID):
177 ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
178 0 : DPOT_AD5272_3_A0;
179 return dpot_read_r8d8(dpot, ctrl);
180 default:
181 if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256))
182 return dpot_read_r8d16(dpot, (reg & 0xF8) |
183 ((reg & 0x7) << 1));
184 else
185 return dpot_read_r8d8(dpot, reg);
186 }
187}
188
189static s32 dpot_read(struct dpot_data *dpot, u8 reg)
190{
191 if (dpot->feat & F_SPI)
192 return dpot_read_spi(dpot, reg);
193 else
194 return dpot_read_i2c(dpot, reg);
195}
196
197static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
198{
199 unsigned val = 0;
200
201 if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) {
202 if (dpot->feat & F_RDACS_WONLY)
203 dpot->rdac_cache[reg & DPOT_RDAC_MASK] = value;
204
205 if (dpot->feat & F_AD_APPDATA) {
206 if (dpot->feat & F_SPI_8BIT) {
207 val = ((reg & DPOT_RDAC_MASK) <<
208 DPOT_MAX_POS(dpot->devid)) |
209 value;
210 return dpot_write_d8(dpot, val);
211 } else if (dpot->feat & F_SPI_16BIT) {
212 val = ((reg & DPOT_RDAC_MASK) <<
213 DPOT_MAX_POS(dpot->devid)) |
214 value;
215 return dpot_write_r8d8(dpot, val >> 8,
216 val & 0xFF);
217 } else
218 BUG();
219 } else {
220 if (dpot->uid == DPOT_UID(AD5291_ID) ||
221 dpot->uid == DPOT_UID(AD5292_ID) ||
222 dpot->uid == DPOT_UID(AD5293_ID))
223 return dpot_write_r8d8(dpot,
224 (DPOT_AD5291_RDAC << 2) |
225 (value >> 8), value & 0xFF);
226
227 val = DPOT_SPI_RDAC | (reg & DPOT_RDAC_MASK);
228 }
229 } else if (reg & DPOT_ADDR_EEPROM) {
230 val = DPOT_SPI_EEPROM | (reg & DPOT_RDAC_MASK);
231 } else if (reg & DPOT_ADDR_CMD) {
232 switch (reg) {
233 case DPOT_DEC_ALL_6DB:
234 val = DPOT_SPI_DEC_ALL_6DB;
235 break;
236 case DPOT_INC_ALL_6DB:
237 val = DPOT_SPI_INC_ALL_6DB;
238 break;
239 case DPOT_DEC_ALL:
240 val = DPOT_SPI_DEC_ALL;
241 break;
242 case DPOT_INC_ALL:
243 val = DPOT_SPI_INC_ALL;
244 break;
245 }
246 } else
247 BUG();
248
249 if (dpot->feat & F_SPI_16BIT)
250 return dpot_write_r8d8(dpot, val, value);
251 else if (dpot->feat & F_SPI_24BIT)
252 return dpot_write_r8d16(dpot, val, value);
253
254 return -EFAULT;
255}
256
257static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
258{
259 /* Only write the instruction byte for certain commands */
260 unsigned tmp = 0, ctrl = 0;
261
262 switch (dpot->uid) {
263 case DPOT_UID(AD5246_ID):
264 case DPOT_UID(AD5247_ID):
265 return dpot_write_d8(dpot, value);
266 break;
267
268 case DPOT_UID(AD5245_ID):
269 case DPOT_UID(AD5241_ID):
270 case DPOT_UID(AD5242_ID):
271 case DPOT_UID(AD5243_ID):
272 case DPOT_UID(AD5248_ID):
273 case DPOT_UID(AD5280_ID):
274 case DPOT_UID(AD5282_ID):
275 ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
276 0 : DPOT_AD5291_RDAC_AB;
277 return dpot_write_r8d8(dpot, ctrl, value);
278 break;
279 case DPOT_UID(AD5171_ID):
280 case DPOT_UID(AD5273_ID):
281 if (reg & DPOT_ADDR_OTP) {
282 tmp = dpot_read_d8(dpot);
283 if (tmp >> 6) /* Ready to Program? */
284 return -EFAULT;
285 ctrl = DPOT_AD5273_FUSE;
286 }
287 return dpot_write_r8d8(dpot, ctrl, value);
288 break;
289 case DPOT_UID(AD5172_ID):
290 case DPOT_UID(AD5173_ID):
291 ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
292 0 : DPOT_AD5272_3_A0;
293 if (reg & DPOT_ADDR_OTP) {
294 tmp = dpot_read_r8d16(dpot, ctrl);
295 if (tmp >> 14) /* Ready to Program? */
296 return -EFAULT;
297 ctrl |= DPOT_AD5270_2_3_FUSE;
298 }
299 return dpot_write_r8d8(dpot, ctrl, value);
300 break;
301 case DPOT_UID(AD5170_ID):
302 if (reg & DPOT_ADDR_OTP) {
303 tmp = dpot_read_r8d16(dpot, tmp);
304 if (tmp >> 14) /* Ready to Program? */
305 return -EFAULT;
306 ctrl = DPOT_AD5270_2_3_FUSE;
307 }
308 return dpot_write_r8d8(dpot, ctrl, value);
309 break;
310 default:
311 if (reg & DPOT_ADDR_CMD)
312 return dpot_write_d8(dpot, reg);
313
314 if (dpot->max_pos > 256)
315 return dpot_write_r8d16(dpot, (reg & 0xF8) |
316 ((reg & 0x7) << 1), value);
317 else
318 /* All other registers require instruction + data bytes */
319 return dpot_write_r8d8(dpot, reg, value);
320 }
321}
322
323
324static s32 dpot_write(struct dpot_data *dpot, u8 reg, u16 value)
325{
326 if (dpot->feat & F_SPI)
327 return dpot_write_spi(dpot, reg, value);
328 else
329 return dpot_write_i2c(dpot, reg, value);
330}
331
91/* sysfs functions */ 332/* sysfs functions */
92 333
93static ssize_t sysfs_show_reg(struct device *dev, 334static ssize_t sysfs_show_reg(struct device *dev,
94 struct device_attribute *attr, char *buf, u32 reg) 335 struct device_attribute *attr,
336 char *buf, u32 reg)
95{ 337{
96 struct i2c_client *client = to_i2c_client(dev); 338 struct dpot_data *data = dev_get_drvdata(dev);
97 struct dpot_data *data = i2c_get_clientdata(client);
98 s32 value; 339 s32 value;
99 340
341 if (reg & DPOT_ADDR_OTP_EN)
342 return sprintf(buf, "%s\n",
343 test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask) ?
344 "enabled" : "disabled");
345
346
100 mutex_lock(&data->update_lock); 347 mutex_lock(&data->update_lock);
101 value = ad525x_read(client, reg); 348 value = dpot_read(data, reg);
102 mutex_unlock(&data->update_lock); 349 mutex_unlock(&data->update_lock);
103 350
104 if (value < 0) 351 if (value < 0)
@@ -111,7 +358,7 @@ static ssize_t sysfs_show_reg(struct device *dev,
111 * datasheet (Rev. A) for more details. 358 * datasheet (Rev. A) for more details.
112 */ 359 */
113 360
114 if (reg & AD525X_REG_TOL) 361 if (reg & DPOT_REG_TOL)
115 return sprintf(buf, "0x%04x\n", value & 0xFFFF); 362 return sprintf(buf, "0x%04x\n", value & 0xFFFF);
116 else 363 else
117 return sprintf(buf, "%u\n", value & data->rdac_mask); 364 return sprintf(buf, "%u\n", value & data->rdac_mask);
@@ -121,11 +368,23 @@ static ssize_t sysfs_set_reg(struct device *dev,
121 struct device_attribute *attr, 368 struct device_attribute *attr,
122 const char *buf, size_t count, u32 reg) 369 const char *buf, size_t count, u32 reg)
123{ 370{
124 struct i2c_client *client = to_i2c_client(dev); 371 struct dpot_data *data = dev_get_drvdata(dev);
125 struct dpot_data *data = i2c_get_clientdata(client);
126 unsigned long value; 372 unsigned long value;
127 int err; 373 int err;
128 374
375 if (reg & DPOT_ADDR_OTP_EN) {
376 if (!strncmp(buf, "enabled", sizeof("enabled")))
377 set_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
378 else
379 clear_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
380
381 return count;
382 }
383
384 if ((reg & DPOT_ADDR_OTP) &&
385 !test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask))
386 return -EPERM;
387
129 err = strict_strtoul(buf, 10, &value); 388 err = strict_strtoul(buf, 10, &value);
130 if (err) 389 if (err)
131 return err; 390 return err;
@@ -134,9 +393,11 @@ static ssize_t sysfs_set_reg(struct device *dev,
134 value = data->rdac_mask; 393 value = data->rdac_mask;
135 394
136 mutex_lock(&data->update_lock); 395 mutex_lock(&data->update_lock);
137 ad525x_write(client, reg, value); 396 dpot_write(data, reg, value);
138 if (reg & AD525X_I2C_EEPROM) 397 if (reg & DPOT_ADDR_EEPROM)
139 msleep(26); /* Sleep while the EEPROM updates */ 398 msleep(26); /* Sleep while the EEPROM updates */
399 else if (reg & DPOT_ADDR_OTP)
400 msleep(400); /* Sleep while the OTP updates */
140 mutex_unlock(&data->update_lock); 401 mutex_unlock(&data->update_lock);
141 402
142 return count; 403 return count;
@@ -146,11 +407,10 @@ static ssize_t sysfs_do_cmd(struct device *dev,
146 struct device_attribute *attr, 407 struct device_attribute *attr,
147 const char *buf, size_t count, u32 reg) 408 const char *buf, size_t count, u32 reg)
148{ 409{
149 struct i2c_client *client = to_i2c_client(dev); 410 struct dpot_data *data = dev_get_drvdata(dev);
150 struct dpot_data *data = i2c_get_clientdata(client);
151 411
152 mutex_lock(&data->update_lock); 412 mutex_lock(&data->update_lock);
153 ad525x_write(client, reg, 0); 413 dpot_write(data, reg, 0);
154 mutex_unlock(&data->update_lock); 414 mutex_unlock(&data->update_lock);
155 415
156 return count; 416 return count;
@@ -158,244 +418,131 @@ static ssize_t sysfs_do_cmd(struct device *dev,
158 418
159/* ------------------------------------------------------------------------- */ 419/* ------------------------------------------------------------------------- */
160 420
161static ssize_t show_rdac0(struct device *dev, 421#define DPOT_DEVICE_SHOW(_name, _reg) static ssize_t \
162 struct device_attribute *attr, char *buf) 422show_##_name(struct device *dev, \
163{ 423 struct device_attribute *attr, char *buf) \
164 return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC0); 424{ \
165} 425 return sysfs_show_reg(dev, attr, buf, _reg); \
166 426}
167static ssize_t set_rdac0(struct device *dev, 427
168 struct device_attribute *attr, 428#define DPOT_DEVICE_SET(_name, _reg) static ssize_t \
169 const char *buf, size_t count) 429set_##_name(struct device *dev, \
170{ 430 struct device_attribute *attr, \
171 return sysfs_set_reg(dev, attr, buf, count, 431 const char *buf, size_t count) \
172 AD525X_I2C_RDAC | AD525X_RDAC0); 432{ \
173} 433 return sysfs_set_reg(dev, attr, buf, count, _reg); \
174 434}
175static DEVICE_ATTR(rdac0, S_IWUSR | S_IRUGO, show_rdac0, set_rdac0); 435
176 436#define DPOT_DEVICE_SHOW_SET(name, reg) \
177static ssize_t show_eeprom0(struct device *dev, 437DPOT_DEVICE_SHOW(name, reg) \
178 struct device_attribute *attr, char *buf) 438DPOT_DEVICE_SET(name, reg) \
179{ 439static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, set_##name);
180 return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC0); 440
181} 441#define DPOT_DEVICE_SHOW_ONLY(name, reg) \
182 442DPOT_DEVICE_SHOW(name, reg) \
183static ssize_t set_eeprom0(struct device *dev, 443static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, NULL);
184 struct device_attribute *attr, 444
185 const char *buf, size_t count) 445DPOT_DEVICE_SHOW_SET(rdac0, DPOT_ADDR_RDAC | DPOT_RDAC0);
186{ 446DPOT_DEVICE_SHOW_SET(eeprom0, DPOT_ADDR_EEPROM | DPOT_RDAC0);
187 return sysfs_set_reg(dev, attr, buf, count, 447DPOT_DEVICE_SHOW_ONLY(tolerance0, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC0);
188 AD525X_I2C_EEPROM | AD525X_RDAC0); 448DPOT_DEVICE_SHOW_SET(otp0, DPOT_ADDR_OTP | DPOT_RDAC0);
189} 449DPOT_DEVICE_SHOW_SET(otp0en, DPOT_ADDR_OTP_EN | DPOT_RDAC0);
190 450
191static DEVICE_ATTR(eeprom0, S_IWUSR | S_IRUGO, show_eeprom0, set_eeprom0); 451DPOT_DEVICE_SHOW_SET(rdac1, DPOT_ADDR_RDAC | DPOT_RDAC1);
192 452DPOT_DEVICE_SHOW_SET(eeprom1, DPOT_ADDR_EEPROM | DPOT_RDAC1);
193static ssize_t show_tolerance0(struct device *dev, 453DPOT_DEVICE_SHOW_ONLY(tolerance1, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC1);
194 struct device_attribute *attr, char *buf) 454DPOT_DEVICE_SHOW_SET(otp1, DPOT_ADDR_OTP | DPOT_RDAC1);
195{ 455DPOT_DEVICE_SHOW_SET(otp1en, DPOT_ADDR_OTP_EN | DPOT_RDAC1);
196 return sysfs_show_reg(dev, attr, buf, 456
197 AD525X_I2C_EEPROM | AD525X_TOL_RDAC0); 457DPOT_DEVICE_SHOW_SET(rdac2, DPOT_ADDR_RDAC | DPOT_RDAC2);
198} 458DPOT_DEVICE_SHOW_SET(eeprom2, DPOT_ADDR_EEPROM | DPOT_RDAC2);
199 459DPOT_DEVICE_SHOW_ONLY(tolerance2, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC2);
200static DEVICE_ATTR(tolerance0, S_IRUGO, show_tolerance0, NULL); 460DPOT_DEVICE_SHOW_SET(otp2, DPOT_ADDR_OTP | DPOT_RDAC2);
201 461DPOT_DEVICE_SHOW_SET(otp2en, DPOT_ADDR_OTP_EN | DPOT_RDAC2);
202/* ------------------------------------------------------------------------- */ 462
203 463DPOT_DEVICE_SHOW_SET(rdac3, DPOT_ADDR_RDAC | DPOT_RDAC3);
204static ssize_t show_rdac1(struct device *dev, 464DPOT_DEVICE_SHOW_SET(eeprom3, DPOT_ADDR_EEPROM | DPOT_RDAC3);
205 struct device_attribute *attr, char *buf) 465DPOT_DEVICE_SHOW_ONLY(tolerance3, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC3);
206{ 466DPOT_DEVICE_SHOW_SET(otp3, DPOT_ADDR_OTP | DPOT_RDAC3);
207 return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC1); 467DPOT_DEVICE_SHOW_SET(otp3en, DPOT_ADDR_OTP_EN | DPOT_RDAC3);
208} 468
209 469DPOT_DEVICE_SHOW_SET(rdac4, DPOT_ADDR_RDAC | DPOT_RDAC4);
210static ssize_t set_rdac1(struct device *dev, 470DPOT_DEVICE_SHOW_SET(eeprom4, DPOT_ADDR_EEPROM | DPOT_RDAC4);
211 struct device_attribute *attr, 471DPOT_DEVICE_SHOW_ONLY(tolerance4, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC4);
212 const char *buf, size_t count) 472DPOT_DEVICE_SHOW_SET(otp4, DPOT_ADDR_OTP | DPOT_RDAC4);
213{ 473DPOT_DEVICE_SHOW_SET(otp4en, DPOT_ADDR_OTP_EN | DPOT_RDAC4);
214 return sysfs_set_reg(dev, attr, buf, count, 474
215 AD525X_I2C_RDAC | AD525X_RDAC1); 475DPOT_DEVICE_SHOW_SET(rdac5, DPOT_ADDR_RDAC | DPOT_RDAC5);
216} 476DPOT_DEVICE_SHOW_SET(eeprom5, DPOT_ADDR_EEPROM | DPOT_RDAC5);
217 477DPOT_DEVICE_SHOW_ONLY(tolerance5, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC5);
218static DEVICE_ATTR(rdac1, S_IWUSR | S_IRUGO, show_rdac1, set_rdac1); 478DPOT_DEVICE_SHOW_SET(otp5, DPOT_ADDR_OTP | DPOT_RDAC5);
219 479DPOT_DEVICE_SHOW_SET(otp5en, DPOT_ADDR_OTP_EN | DPOT_RDAC5);
220static ssize_t show_eeprom1(struct device *dev, 480
221 struct device_attribute *attr, char *buf) 481static const struct attribute *dpot_attrib_wipers[] = {
222{ 482 &dev_attr_rdac0.attr,
223 return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC1); 483 &dev_attr_rdac1.attr,
224} 484 &dev_attr_rdac2.attr,
225 485 &dev_attr_rdac3.attr,
226static ssize_t set_eeprom1(struct device *dev, 486 &dev_attr_rdac4.attr,
227 struct device_attribute *attr, 487 &dev_attr_rdac5.attr,
228 const char *buf, size_t count) 488 NULL
229{ 489};
230 return sysfs_set_reg(dev, attr, buf, count,
231 AD525X_I2C_EEPROM | AD525X_RDAC1);
232}
233
234static DEVICE_ATTR(eeprom1, S_IWUSR | S_IRUGO, show_eeprom1, set_eeprom1);
235
236static ssize_t show_tolerance1(struct device *dev,
237 struct device_attribute *attr, char *buf)
238{
239 return sysfs_show_reg(dev, attr, buf,
240 AD525X_I2C_EEPROM | AD525X_TOL_RDAC1);
241}
242
243static DEVICE_ATTR(tolerance1, S_IRUGO, show_tolerance1, NULL);
244
245/* ------------------------------------------------------------------------- */
246
247static ssize_t show_rdac2(struct device *dev,
248 struct device_attribute *attr, char *buf)
249{
250 return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC2);
251}
252
253static ssize_t set_rdac2(struct device *dev,
254 struct device_attribute *attr,
255 const char *buf, size_t count)
256{
257 return sysfs_set_reg(dev, attr, buf, count,
258 AD525X_I2C_RDAC | AD525X_RDAC2);
259}
260
261static DEVICE_ATTR(rdac2, S_IWUSR | S_IRUGO, show_rdac2, set_rdac2);
262
263static ssize_t show_eeprom2(struct device *dev,
264 struct device_attribute *attr, char *buf)
265{
266 return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC2);
267}
268
269static ssize_t set_eeprom2(struct device *dev,
270 struct device_attribute *attr,
271 const char *buf, size_t count)
272{
273 return sysfs_set_reg(dev, attr, buf, count,
274 AD525X_I2C_EEPROM | AD525X_RDAC2);
275}
276
277static DEVICE_ATTR(eeprom2, S_IWUSR | S_IRUGO, show_eeprom2, set_eeprom2);
278
279static ssize_t show_tolerance2(struct device *dev,
280 struct device_attribute *attr, char *buf)
281{
282 return sysfs_show_reg(dev, attr, buf,
283 AD525X_I2C_EEPROM | AD525X_TOL_RDAC2);
284}
285
286static DEVICE_ATTR(tolerance2, S_IRUGO, show_tolerance2, NULL);
287
288/* ------------------------------------------------------------------------- */
289
290static ssize_t show_rdac3(struct device *dev,
291 struct device_attribute *attr, char *buf)
292{
293 return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC3);
294}
295
296static ssize_t set_rdac3(struct device *dev,
297 struct device_attribute *attr,
298 const char *buf, size_t count)
299{
300 return sysfs_set_reg(dev, attr, buf, count,
301 AD525X_I2C_RDAC | AD525X_RDAC3);
302}
303
304static DEVICE_ATTR(rdac3, S_IWUSR | S_IRUGO, show_rdac3, set_rdac3);
305
306static ssize_t show_eeprom3(struct device *dev,
307 struct device_attribute *attr, char *buf)
308{
309 return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC3);
310}
311
312static ssize_t set_eeprom3(struct device *dev,
313 struct device_attribute *attr,
314 const char *buf, size_t count)
315{
316 return sysfs_set_reg(dev, attr, buf, count,
317 AD525X_I2C_EEPROM | AD525X_RDAC3);
318}
319 490
320static DEVICE_ATTR(eeprom3, S_IWUSR | S_IRUGO, show_eeprom3, set_eeprom3); 491static const struct attribute *dpot_attrib_eeprom[] = {
492 &dev_attr_eeprom0.attr,
493 &dev_attr_eeprom1.attr,
494 &dev_attr_eeprom2.attr,
495 &dev_attr_eeprom3.attr,
496 &dev_attr_eeprom4.attr,
497 &dev_attr_eeprom5.attr,
498 NULL
499};
321 500
322static ssize_t show_tolerance3(struct device *dev, 501static const struct attribute *dpot_attrib_otp[] = {
323 struct device_attribute *attr, char *buf) 502 &dev_attr_otp0.attr,
324{ 503 &dev_attr_otp1.attr,
325 return sysfs_show_reg(dev, attr, buf, 504 &dev_attr_otp2.attr,
326 AD525X_I2C_EEPROM | AD525X_TOL_RDAC3); 505 &dev_attr_otp3.attr,
327} 506 &dev_attr_otp4.attr,
507 &dev_attr_otp5.attr,
508 NULL
509};
328 510
329static DEVICE_ATTR(tolerance3, S_IRUGO, show_tolerance3, NULL); 511static const struct attribute *dpot_attrib_otp_en[] = {
330 512 &dev_attr_otp0en.attr,
331static struct attribute *ad525x_attributes_wipers[4][4] = { 513 &dev_attr_otp1en.attr,
332 { 514 &dev_attr_otp2en.attr,
333 &dev_attr_rdac0.attr, 515 &dev_attr_otp3en.attr,
334 &dev_attr_eeprom0.attr, 516 &dev_attr_otp4en.attr,
335 &dev_attr_tolerance0.attr, 517 &dev_attr_otp5en.attr,
336 NULL 518 NULL
337 }, {
338 &dev_attr_rdac1.attr,
339 &dev_attr_eeprom1.attr,
340 &dev_attr_tolerance1.attr,
341 NULL
342 }, {
343 &dev_attr_rdac2.attr,
344 &dev_attr_eeprom2.attr,
345 &dev_attr_tolerance2.attr,
346 NULL
347 }, {
348 &dev_attr_rdac3.attr,
349 &dev_attr_eeprom3.attr,
350 &dev_attr_tolerance3.attr,
351 NULL
352 }
353}; 519};
354 520
355static const struct attribute_group ad525x_group_wipers[] = { 521static const struct attribute *dpot_attrib_tolerance[] = {
356 {.attrs = ad525x_attributes_wipers[AD525X_RDAC0]}, 522 &dev_attr_tolerance0.attr,
357 {.attrs = ad525x_attributes_wipers[AD525X_RDAC1]}, 523 &dev_attr_tolerance1.attr,
358 {.attrs = ad525x_attributes_wipers[AD525X_RDAC2]}, 524 &dev_attr_tolerance2.attr,
359 {.attrs = ad525x_attributes_wipers[AD525X_RDAC3]}, 525 &dev_attr_tolerance3.attr,
526 &dev_attr_tolerance4.attr,
527 &dev_attr_tolerance5.attr,
528 NULL
360}; 529};
361 530
362/* ------------------------------------------------------------------------- */ 531/* ------------------------------------------------------------------------- */
363 532
364static ssize_t set_inc_all(struct device *dev, 533#define DPOT_DEVICE_DO_CMD(_name, _cmd) static ssize_t \
365 struct device_attribute *attr, 534set_##_name(struct device *dev, \
366 const char *buf, size_t count) 535 struct device_attribute *attr, \
367{ 536 const char *buf, size_t count) \
368 return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL); 537{ \
369} 538 return sysfs_do_cmd(dev, attr, buf, count, _cmd); \
539} \
540static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, NULL, set_##_name);
370 541
371static DEVICE_ATTR(inc_all, S_IWUSR, NULL, set_inc_all); 542DPOT_DEVICE_DO_CMD(inc_all, DPOT_INC_ALL);
372 543DPOT_DEVICE_DO_CMD(dec_all, DPOT_DEC_ALL);
373static ssize_t set_dec_all(struct device *dev, 544DPOT_DEVICE_DO_CMD(inc_all_6db, DPOT_INC_ALL_6DB);
374 struct device_attribute *attr, 545DPOT_DEVICE_DO_CMD(dec_all_6db, DPOT_DEC_ALL_6DB);
375 const char *buf, size_t count)
376{
377 return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL);
378}
379
380static DEVICE_ATTR(dec_all, S_IWUSR, NULL, set_dec_all);
381
382static ssize_t set_inc_all_6db(struct device *dev,
383 struct device_attribute *attr,
384 const char *buf, size_t count)
385{
386 return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL_6DB);
387}
388
389static DEVICE_ATTR(inc_all_6db, S_IWUSR, NULL, set_inc_all_6db);
390
391static ssize_t set_dec_all_6db(struct device *dev,
392 struct device_attribute *attr,
393 const char *buf, size_t count)
394{
395 return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL_6DB);
396}
397
398static DEVICE_ATTR(dec_all_6db, S_IWUSR, NULL, set_dec_all_6db);
399 546
400static struct attribute *ad525x_attributes_commands[] = { 547static struct attribute *ad525x_attributes_commands[] = {
401 &dev_attr_inc_all.attr, 548 &dev_attr_inc_all.attr,
@@ -409,74 +556,56 @@ static const struct attribute_group ad525x_group_commands = {
409 .attrs = ad525x_attributes_commands, 556 .attrs = ad525x_attributes_commands,
410}; 557};
411 558
412/* ------------------------------------------------------------------------- */ 559__devinit int ad_dpot_add_files(struct device *dev,
413 560 unsigned features, unsigned rdac)
414/* i2c device functions */ 561{
562 int err = sysfs_create_file(&dev->kobj,
563 dpot_attrib_wipers[rdac]);
564 if (features & F_CMD_EEP)
565 err |= sysfs_create_file(&dev->kobj,
566 dpot_attrib_eeprom[rdac]);
567 if (features & F_CMD_TOL)
568 err |= sysfs_create_file(&dev->kobj,
569 dpot_attrib_tolerance[rdac]);
570 if (features & F_CMD_OTP) {
571 err |= sysfs_create_file(&dev->kobj,
572 dpot_attrib_otp_en[rdac]);
573 err |= sysfs_create_file(&dev->kobj,
574 dpot_attrib_otp[rdac]);
575 }
415 576
416/** 577 if (err)
417 * ad525x_read - return the value contained in the specified register 578 dev_err(dev, "failed to register sysfs hooks for RDAC%d\n",
418 * on the AD5258 device. 579 rdac);
419 * @client: value returned from i2c_new_device()
420 * @reg: the register to read
421 *
422 * If the tolerance register is specified, 2 bytes are returned.
423 * Otherwise, 1 byte is returned. A negative value indicates an error
424 * occurred while reading the register.
425 */
426static s32 ad525x_read(struct i2c_client *client, u8 reg)
427{
428 struct dpot_data *data = i2c_get_clientdata(client);
429 580
430 if ((reg & AD525X_REG_TOL) || (data->max_pos > 256)) 581 return err;
431 return i2c_smbus_read_word_data(client, (reg & 0xF8) |
432 ((reg & 0x7) << 1));
433 else
434 return i2c_smbus_read_byte_data(client, reg);
435} 582}
436 583
437/** 584inline void ad_dpot_remove_files(struct device *dev,
438 * ad525x_write - store the given value in the specified register on 585 unsigned features, unsigned rdac)
439 * the AD5258 device. 586{
440 * @client: value returned from i2c_new_device() 587 sysfs_remove_file(&dev->kobj,
441 * @reg: the register to write 588 dpot_attrib_wipers[rdac]);
442 * @value: the byte to store in the register 589 if (features & F_CMD_EEP)
443 * 590 sysfs_remove_file(&dev->kobj,
444 * For certain instructions that do not require a data byte, "NULL" 591 dpot_attrib_eeprom[rdac]);
445 * should be specified for the "value" parameter. These instructions 592 if (features & F_CMD_TOL)
446 * include NOP, RESTORE_FROM_EEPROM, and STORE_TO_EEPROM. 593 sysfs_remove_file(&dev->kobj,
447 * 594 dpot_attrib_tolerance[rdac]);
448 * A negative return value indicates an error occurred while reading 595 if (features & F_CMD_OTP) {
449 * the register. 596 sysfs_remove_file(&dev->kobj,
450 */ 597 dpot_attrib_otp_en[rdac]);
451static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value) 598 sysfs_remove_file(&dev->kobj,
452{ 599 dpot_attrib_otp[rdac]);
453 struct dpot_data *data = i2c_get_clientdata(client); 600 }
454
455 /* Only write the instruction byte for certain commands */
456 if (reg & AD525X_I2C_CMD)
457 return i2c_smbus_write_byte(client, reg);
458
459 if (data->max_pos > 256)
460 return i2c_smbus_write_word_data(client, (reg & 0xF8) |
461 ((reg & 0x7) << 1), value);
462 else
463 /* All other registers require instruction + data bytes */
464 return i2c_smbus_write_byte_data(client, reg, value);
465} 601}
466 602
467static int ad525x_probe(struct i2c_client *client, 603__devinit int ad_dpot_probe(struct device *dev,
468 const struct i2c_device_id *id) 604 struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id)
469{ 605{
470 struct device *dev = &client->dev;
471 struct dpot_data *data;
472 int err = 0;
473 606
474 dev_dbg(dev, "%s\n", __func__); 607 struct dpot_data *data;
475 608 int i, err = 0;
476 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
477 dev_err(dev, "missing I2C functionality for this driver\n");
478 goto exit;
479 }
480 609
481 data = kzalloc(sizeof(struct dpot_data), GFP_KERNEL); 610 data = kzalloc(sizeof(struct dpot_data), GFP_KERNEL);
482 if (!data) { 611 if (!data) {
@@ -484,183 +613,74 @@ static int ad525x_probe(struct i2c_client *client,
484 goto exit; 613 goto exit;
485 } 614 }
486 615
487 i2c_set_clientdata(client, data); 616 dev_set_drvdata(dev, data);
488 mutex_init(&data->update_lock); 617 mutex_init(&data->update_lock);
489 618
490 switch (id->driver_data) { 619 data->bdata = *bdata;
491 case AD5258_ID: 620 data->devid = id->devid;
492 data->max_pos = AD5258_MAX_POSITION; 621
493 err = sysfs_create_group(&dev->kobj, 622 data->max_pos = 1 << DPOT_MAX_POS(data->devid);
494 &ad525x_group_wipers[AD525X_RDAC0]); 623 data->rdac_mask = data->max_pos - 1;
495 break; 624 data->feat = DPOT_FEAT(data->devid);
496 case AD5259_ID: 625 data->uid = DPOT_UID(data->devid);
497 data->max_pos = AD5259_MAX_POSITION; 626 data->wipers = DPOT_WIPERS(data->devid);
498 err = sysfs_create_group(&dev->kobj, 627
499 &ad525x_group_wipers[AD525X_RDAC0]); 628 for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
500 break; 629 if (data->wipers & (1 << i)) {
501 case AD5251_ID: 630 err = ad_dpot_add_files(dev, data->feat, i);
502 data->max_pos = AD5251_MAX_POSITION; 631 if (err)
503 err = sysfs_create_group(&dev->kobj, 632 goto exit_remove_files;
504 &ad525x_group_wipers[AD525X_RDAC1]); 633 /* power-up midscale */
505 err |= sysfs_create_group(&dev->kobj, 634 if (data->feat & F_RDACS_WONLY)
506 &ad525x_group_wipers[AD525X_RDAC3]); 635 data->rdac_cache[i] = data->max_pos / 2;
507 err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands); 636 }
508 break; 637
509 case AD5252_ID: 638 if (data->feat & F_CMD_INC)
510 data->max_pos = AD5252_MAX_POSITION; 639 err = sysfs_create_group(&dev->kobj, &ad525x_group_commands);
511 err = sysfs_create_group(&dev->kobj,
512 &ad525x_group_wipers[AD525X_RDAC1]);
513 err |= sysfs_create_group(&dev->kobj,
514 &ad525x_group_wipers[AD525X_RDAC3]);
515 err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
516 break;
517 case AD5253_ID:
518 data->max_pos = AD5253_MAX_POSITION;
519 err = sysfs_create_group(&dev->kobj,
520 &ad525x_group_wipers[AD525X_RDAC0]);
521 err |= sysfs_create_group(&dev->kobj,
522 &ad525x_group_wipers[AD525X_RDAC1]);
523 err |= sysfs_create_group(&dev->kobj,
524 &ad525x_group_wipers[AD525X_RDAC2]);
525 err |= sysfs_create_group(&dev->kobj,
526 &ad525x_group_wipers[AD525X_RDAC3]);
527 err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
528 break;
529 case AD5254_ID:
530 data->max_pos = AD5254_MAX_POSITION;
531 err = sysfs_create_group(&dev->kobj,
532 &ad525x_group_wipers[AD525X_RDAC0]);
533 err |= sysfs_create_group(&dev->kobj,
534 &ad525x_group_wipers[AD525X_RDAC1]);
535 err |= sysfs_create_group(&dev->kobj,
536 &ad525x_group_wipers[AD525X_RDAC2]);
537 err |= sysfs_create_group(&dev->kobj,
538 &ad525x_group_wipers[AD525X_RDAC3]);
539 err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
540 break;
541 case AD5255_ID:
542 data->max_pos = AD5255_MAX_POSITION;
543 err = sysfs_create_group(&dev->kobj,
544 &ad525x_group_wipers[AD525X_RDAC0]);
545 err |= sysfs_create_group(&dev->kobj,
546 &ad525x_group_wipers[AD525X_RDAC1]);
547 err |= sysfs_create_group(&dev->kobj,
548 &ad525x_group_wipers[AD525X_RDAC2]);
549 err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
550 break;
551 default:
552 err = -ENODEV;
553 goto exit_free;
554 }
555 640
556 if (err) { 641 if (err) {
557 dev_err(dev, "failed to register sysfs hooks\n"); 642 dev_err(dev, "failed to register sysfs hooks\n");
558 goto exit_free; 643 goto exit_free;
559 } 644 }
560 645
561 data->devid = id->driver_data;
562 data->rdac_mask = data->max_pos - 1;
563
564 dev_info(dev, "%s %d-Position Digital Potentiometer registered\n", 646 dev_info(dev, "%s %d-Position Digital Potentiometer registered\n",
565 id->name, data->max_pos); 647 id->name, data->max_pos);
566 648
567 return 0; 649 return 0;
568 650
651exit_remove_files:
652 for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
653 if (data->wipers & (1 << i))
654 ad_dpot_remove_files(dev, data->feat, i);
655
569exit_free: 656exit_free:
570 kfree(data); 657 kfree(data);
571 i2c_set_clientdata(client, NULL); 658 dev_set_drvdata(dev, NULL);
572exit: 659exit:
573 dev_err(dev, "failed to create client\n"); 660 dev_err(dev, "failed to create client for %s ID 0x%lX\n",
661 id->name, id->devid);
574 return err; 662 return err;
575} 663}
664EXPORT_SYMBOL(ad_dpot_probe);
576 665
577static int __devexit ad525x_remove(struct i2c_client *client) 666__devexit int ad_dpot_remove(struct device *dev)
578{ 667{
579 struct dpot_data *data = i2c_get_clientdata(client); 668 struct dpot_data *data = dev_get_drvdata(dev);
580 struct device *dev = &client->dev; 669 int i;
581 670
582 switch (data->devid) { 671 for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
583 case AD5258_ID: 672 if (data->wipers & (1 << i))
584 case AD5259_ID: 673 ad_dpot_remove_files(dev, data->feat, i);
585 sysfs_remove_group(&dev->kobj,
586 &ad525x_group_wipers[AD525X_RDAC0]);
587 break;
588 case AD5251_ID:
589 case AD5252_ID:
590 sysfs_remove_group(&dev->kobj,
591 &ad525x_group_wipers[AD525X_RDAC1]);
592 sysfs_remove_group(&dev->kobj,
593 &ad525x_group_wipers[AD525X_RDAC3]);
594 sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
595 break;
596 case AD5253_ID:
597 case AD5254_ID:
598 sysfs_remove_group(&dev->kobj,
599 &ad525x_group_wipers[AD525X_RDAC0]);
600 sysfs_remove_group(&dev->kobj,
601 &ad525x_group_wipers[AD525X_RDAC1]);
602 sysfs_remove_group(&dev->kobj,
603 &ad525x_group_wipers[AD525X_RDAC2]);
604 sysfs_remove_group(&dev->kobj,
605 &ad525x_group_wipers[AD525X_RDAC3]);
606 sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
607 break;
608 case AD5255_ID:
609 sysfs_remove_group(&dev->kobj,
610 &ad525x_group_wipers[AD525X_RDAC0]);
611 sysfs_remove_group(&dev->kobj,
612 &ad525x_group_wipers[AD525X_RDAC1]);
613 sysfs_remove_group(&dev->kobj,
614 &ad525x_group_wipers[AD525X_RDAC2]);
615 sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
616 break;
617 }
618 674
619 i2c_set_clientdata(client, NULL);
620 kfree(data); 675 kfree(data);
621 676
622 return 0; 677 return 0;
623} 678}
679EXPORT_SYMBOL(ad_dpot_remove);
624 680
625static const struct i2c_device_id ad525x_idtable[] = {
626 {"ad5258", AD5258_ID},
627 {"ad5259", AD5259_ID},
628 {"ad5251", AD5251_ID},
629 {"ad5252", AD5252_ID},
630 {"ad5253", AD5253_ID},
631 {"ad5254", AD5254_ID},
632 {"ad5255", AD5255_ID},
633 {}
634};
635
636MODULE_DEVICE_TABLE(i2c, ad525x_idtable);
637
638static struct i2c_driver ad525x_driver = {
639 .driver = {
640 .owner = THIS_MODULE,
641 .name = DRIVER_NAME,
642 },
643 .id_table = ad525x_idtable,
644 .probe = ad525x_probe,
645 .remove = __devexit_p(ad525x_remove),
646};
647
648static int __init ad525x_init(void)
649{
650 return i2c_add_driver(&ad525x_driver);
651}
652
653module_init(ad525x_init);
654
655static void __exit ad525x_exit(void)
656{
657 i2c_del_driver(&ad525x_driver);
658}
659
660module_exit(ad525x_exit);
661 681
662MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, " 682MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, "
663 "Michael Hennerich <hennerich@blackfin.uclinux.org>, "); 683 "Michael Hennerich <hennerich@blackfin.uclinux.org>");
664MODULE_DESCRIPTION("AD5258/9 digital potentiometer driver"); 684MODULE_DESCRIPTION("Digital potentiometer driver");
665MODULE_LICENSE("GPL"); 685MODULE_LICENSE("GPL");
666MODULE_VERSION(DRIVER_VERSION); 686MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
new file mode 100644
index 000000000000..78b89fd2e2fd
--- /dev/null
+++ b/drivers/misc/ad525x_dpot.h
@@ -0,0 +1,202 @@
1/*
2 * Driver for the Analog Devices digital potentiometers
3 *
4 * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef _AD_DPOT_H_
10#define _AD_DPOT_H_
11
12#include <linux/types.h>
13
14#define DPOT_CONF(features, wipers, max_pos, uid) \
15 (((features) << 18) | (((wipers) & 0xFF) << 10) | \
16 ((max_pos & 0xF) << 6) | (uid & 0x3F))
17
18#define DPOT_UID(conf) (conf & 0x3F)
19#define DPOT_MAX_POS(conf) ((conf >> 6) & 0xF)
20#define DPOT_WIPERS(conf) ((conf >> 10) & 0xFF)
21#define DPOT_FEAT(conf) (conf >> 18)
22
23#define BRDAC0 (1 << 0)
24#define BRDAC1 (1 << 1)
25#define BRDAC2 (1 << 2)
26#define BRDAC3 (1 << 3)
27#define BRDAC4 (1 << 4)
28#define BRDAC5 (1 << 5)
29#define MAX_RDACS 6
30
31#define F_CMD_INC (1 << 0) /* Features INC/DEC ALL, 6dB */
32#define F_CMD_EEP (1 << 1) /* Features EEPROM */
33#define F_CMD_OTP (1 << 2) /* Features OTP */
34#define F_CMD_TOL (1 << 3) /* RDACS feature Tolerance REG */
35#define F_RDACS_RW (1 << 4) /* RDACS are Read/Write */
36#define F_RDACS_WONLY (1 << 5) /* RDACS are Write only */
37#define F_AD_APPDATA (1 << 6) /* RDAC Address append to data */
38#define F_SPI_8BIT (1 << 7) /* All SPI XFERS are 8-bit */
39#define F_SPI_16BIT (1 << 8) /* All SPI XFERS are 16-bit */
40#define F_SPI_24BIT (1 << 9) /* All SPI XFERS are 24-bit */
41
42#define F_RDACS_RW_TOL (F_RDACS_RW | F_CMD_EEP | F_CMD_TOL)
43#define F_RDACS_RW_EEP (F_RDACS_RW | F_CMD_EEP)
44#define F_SPI (F_SPI_8BIT | F_SPI_16BIT | F_SPI_24BIT)
45
46enum dpot_devid {
47 AD5258_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 6, 0), /* I2C */
48 AD5259_ID = DPOT_CONF(F_RDACS_RW_TOL, BRDAC0, 8, 1),
49 AD5251_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
50 BRDAC0 | BRDAC3, 6, 2),
51 AD5252_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
52 BRDAC0 | BRDAC3, 8, 3),
53 AD5253_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
54 BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 4),
55 AD5254_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
56 BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 5),
57 AD5255_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
58 BRDAC0 | BRDAC1 | BRDAC2, 9, 6),
59 AD5160_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
60 BRDAC0, 8, 7), /* SPI */
61 AD5161_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
62 BRDAC0, 8, 8),
63 AD5162_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
64 BRDAC0 | BRDAC1, 8, 9),
65 AD5165_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
66 BRDAC0, 8, 10),
67 AD5200_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
68 BRDAC0, 8, 11),
69 AD5201_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
70 BRDAC0, 5, 12),
71 AD5203_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
72 BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 13),
73 AD5204_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
74 BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 14),
75 AD5206_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
76 BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3 | BRDAC4 | BRDAC5,
77 8, 15),
78 AD5207_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
79 BRDAC0 | BRDAC1, 8, 16),
80 AD5231_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT,
81 BRDAC0, 10, 17),
82 AD5232_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_16BIT,
83 BRDAC0 | BRDAC1, 8, 18),
84 AD5233_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_16BIT,
85 BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 6, 19),
86 AD5235_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT,
87 BRDAC0 | BRDAC1, 10, 20),
88 AD5260_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
89 BRDAC0, 8, 21),
90 AD5262_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
91 BRDAC0 | BRDAC1, 8, 22),
92 AD5263_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
93 BRDAC0 | BRDAC1 | BRDAC2 | BRDAC3, 8, 23),
94 AD5290_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
95 BRDAC0, 8, 24),
96 AD5291_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 8, 25),
97 AD5292_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 26),
98 AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27),
99 AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
100 BRDAC0, 7, 28),
101 AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT,
102 BRDAC0, 8, 29),
103 AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
104 BRDAC0 | BRDAC1, 8, 30),
105 AD8403_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT,
106 BRDAC0 | BRDAC1 | BRDAC2, 8, 31),
107 ADN2850_ID = DPOT_CONF(F_RDACS_RW_EEP | F_CMD_INC | F_SPI_24BIT,
108 BRDAC0 | BRDAC1, 10, 32),
109 AD5241_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 33),
110 AD5242_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 34),
111 AD5243_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 35),
112 AD5245_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 36),
113 AD5246_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 7, 37),
114 AD5247_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 7, 38),
115 AD5248_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 39),
116 AD5280_ID = DPOT_CONF(F_RDACS_RW, BRDAC0, 8, 40),
117 AD5282_ID = DPOT_CONF(F_RDACS_RW, BRDAC0 | BRDAC1, 8, 41),
118 ADN2860_ID = DPOT_CONF(F_RDACS_RW_TOL | F_CMD_INC,
119 BRDAC0 | BRDAC1 | BRDAC2, 9, 42),
120 AD5273_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 6, 43),
121 AD5171_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 6, 44),
122 AD5170_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0, 8, 45),
123 AD5172_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 46),
124 AD5173_ID = DPOT_CONF(F_RDACS_RW | F_CMD_OTP, BRDAC0 | BRDAC1, 8, 47),
125};
126
127#define DPOT_RDAC0 0
128#define DPOT_RDAC1 1
129#define DPOT_RDAC2 2
130#define DPOT_RDAC3 3
131#define DPOT_RDAC4 4
132#define DPOT_RDAC5 5
133
134#define DPOT_RDAC_MASK 0x1F
135
136#define DPOT_REG_TOL 0x18
137#define DPOT_TOL_RDAC0 (DPOT_REG_TOL | DPOT_RDAC0)
138#define DPOT_TOL_RDAC1 (DPOT_REG_TOL | DPOT_RDAC1)
139#define DPOT_TOL_RDAC2 (DPOT_REG_TOL | DPOT_RDAC2)
140#define DPOT_TOL_RDAC3 (DPOT_REG_TOL | DPOT_RDAC3)
141#define DPOT_TOL_RDAC4 (DPOT_REG_TOL | DPOT_RDAC4)
142#define DPOT_TOL_RDAC5 (DPOT_REG_TOL | DPOT_RDAC5)
143
144/* RDAC-to-EEPROM Interface Commands */
145#define DPOT_ADDR_RDAC (0x0 << 5)
146#define DPOT_ADDR_EEPROM (0x1 << 5)
147#define DPOT_ADDR_OTP (0x1 << 6)
148#define DPOT_ADDR_CMD (0x1 << 7)
149#define DPOT_ADDR_OTP_EN (0x1 << 9)
150
151#define DPOT_DEC_ALL_6DB (DPOT_ADDR_CMD | (0x4 << 3))
152#define DPOT_INC_ALL_6DB (DPOT_ADDR_CMD | (0x9 << 3))
153#define DPOT_DEC_ALL (DPOT_ADDR_CMD | (0x6 << 3))
154#define DPOT_INC_ALL (DPOT_ADDR_CMD | (0xB << 3))
155
156#define DPOT_SPI_RDAC 0xB0
157#define DPOT_SPI_EEPROM 0x30
158#define DPOT_SPI_READ_RDAC 0xA0
159#define DPOT_SPI_READ_EEPROM 0x90
160#define DPOT_SPI_DEC_ALL_6DB 0x50
161#define DPOT_SPI_INC_ALL_6DB 0xD0
162#define DPOT_SPI_DEC_ALL 0x70
163#define DPOT_SPI_INC_ALL 0xF0
164
165/* AD5291/2/3 use special commands */
166#define DPOT_AD5291_RDAC 0x01
167#define DPOT_AD5291_READ_RDAC 0x02
168
169/* AD524x use special commands */
170#define DPOT_AD5291_RDAC_AB 0x80
171
172#define DPOT_AD5273_FUSE 0x80
173#define DPOT_AD5270_2_3_FUSE 0x20
174#define DPOT_AD5270_2_3_OW 0x08
175#define DPOT_AD5272_3_A0 0x08
176#define DPOT_AD5270_2FUSE 0x80
177
178struct dpot_data;
179
180struct ad_dpot_bus_ops {
181 int (*read_d8) (void *client);
182 int (*read_r8d8) (void *client, u8 reg);
183 int (*read_r8d16) (void *client, u8 reg);
184 int (*write_d8) (void *client, u8 val);
185 int (*write_r8d8) (void *client, u8 reg, u8 val);
186 int (*write_r8d16) (void *client, u8 reg, u16 val);
187};
188
189struct ad_dpot_bus_data {
190 void *client;
191 const struct ad_dpot_bus_ops *bops;
192};
193
194struct ad_dpot_id {
195 char *name;
196 unsigned long devid;
197};
198
199int ad_dpot_probe(struct device *dev, struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id);
200int ad_dpot_remove(struct device *dev);
201
202#endif
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 31a991161f0a..5bfb2a2041b8 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -75,6 +75,9 @@ enum ctype {
75 UNALIGNED_LOAD_STORE_WRITE, 75 UNALIGNED_LOAD_STORE_WRITE,
76 OVERWRITE_ALLOCATION, 76 OVERWRITE_ALLOCATION,
77 WRITE_AFTER_FREE, 77 WRITE_AFTER_FREE,
78 SOFTLOCKUP,
79 HARDLOCKUP,
80 HUNG_TASK,
78}; 81};
79 82
80static char* cp_name[] = { 83static char* cp_name[] = {
@@ -99,6 +102,9 @@ static char* cp_type[] = {
99 "UNALIGNED_LOAD_STORE_WRITE", 102 "UNALIGNED_LOAD_STORE_WRITE",
100 "OVERWRITE_ALLOCATION", 103 "OVERWRITE_ALLOCATION",
101 "WRITE_AFTER_FREE", 104 "WRITE_AFTER_FREE",
105 "SOFTLOCKUP",
106 "HARDLOCKUP",
107 "HUNG_TASK",
102}; 108};
103 109
104static struct jprobe lkdtm; 110static struct jprobe lkdtm;
@@ -320,6 +326,20 @@ static void lkdtm_do_action(enum ctype which)
320 memset(data, 0x78, len); 326 memset(data, 0x78, len);
321 break; 327 break;
322 } 328 }
329 case SOFTLOCKUP:
330 preempt_disable();
331 for (;;)
332 cpu_relax();
333 break;
334 case HARDLOCKUP:
335 local_irq_disable();
336 for (;;)
337 cpu_relax();
338 break;
339 case HUNG_TASK:
340 set_current_state(TASK_UNINTERRUPTIBLE);
341 schedule();
342 break;
323 case NONE: 343 case NONE:
324 default: 344 default:
325 break; 345 break;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 3168ebd616b2..569e94da844c 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1252,9 +1252,8 @@ EXPORT_SYMBOL(mmc_card_can_sleep);
1252/** 1252/**
1253 * mmc_suspend_host - suspend a host 1253 * mmc_suspend_host - suspend a host
1254 * @host: mmc host 1254 * @host: mmc host
1255 * @state: suspend mode (PM_SUSPEND_xxx)
1256 */ 1255 */
1257int mmc_suspend_host(struct mmc_host *host, pm_message_t state) 1256int mmc_suspend_host(struct mmc_host *host)
1258{ 1257{
1259 int err = 0; 1258 int err = 0;
1260 1259
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 0d96080d44b0..63772e7e7608 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -79,8 +79,6 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
79 * we cannot use the retries field in mmc_command. 79 * we cannot use the retries field in mmc_command.
80 */ 80 */
81 for (i = 0;i <= retries;i++) { 81 for (i = 0;i <= retries;i++) {
82 memset(&mrq, 0, sizeof(struct mmc_request));
83
84 err = mmc_app_cmd(host, card); 82 err = mmc_app_cmd(host, card);
85 if (err) { 83 if (err) {
86 /* no point in retrying; no APP commands allowed */ 84 /* no point in retrying; no APP commands allowed */
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 2dd4cfe7ca17..b9dee28ee7d0 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -296,6 +296,12 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
296 card->type = MMC_TYPE_SDIO; 296 card->type = MMC_TYPE_SDIO;
297 297
298 /* 298 /*
299 * Call the optional HC's init_card function to handle quirks.
300 */
301 if (host->ops->init_card)
302 host->ops->init_card(host, card);
303
304 /*
299 * For native busses: set card RCA and quit open drain mode. 305 * For native busses: set card RCA and quit open drain mode.
300 */ 306 */
301 if (!powered_resume && !mmc_host_is_spi(host)) { 307 if (!powered_resume && !mmc_host_is_spi(host)) {
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index ff27c8c71355..0f687cdeb064 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -406,6 +406,36 @@ void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret)
406EXPORT_SYMBOL_GPL(sdio_writeb); 406EXPORT_SYMBOL_GPL(sdio_writeb);
407 407
408/** 408/**
409 * sdio_writeb_readb - write and read a byte from SDIO function
410 * @func: SDIO function to access
411 * @write_byte: byte to write
412 * @addr: address to write to
413 * @err_ret: optional status value from transfer
414 *
415 * Performs a RAW (Read after Write) operation as defined by SDIO spec -
416 * single byte is written to address space of a given SDIO function and
417 * response is read back from the same address, both using single request.
418 * If there is a problem with the operation, 0xff is returned and
419 * @err_ret will contain the error code.
420 */
421u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte,
422 unsigned int addr, int *err_ret)
423{
424 int ret;
425 u8 val;
426
427 ret = mmc_io_rw_direct(func->card, 1, func->num, addr,
428 write_byte, &val);
429 if (err_ret)
430 *err_ret = ret;
431 if (ret)
432 val = 0xff;
433
434 return val;
435}
436EXPORT_SYMBOL_GPL(sdio_writeb_readb);
437
438/**
409 * sdio_memcpy_fromio - read a chunk of memory from a SDIO function 439 * sdio_memcpy_fromio - read a chunk of memory from a SDIO function
410 * @func: SDIO function to access 440 * @func: SDIO function to access
411 * @dst: buffer to store the data 441 * @dst: buffer to store the data
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2e13b94769fd..e171e77f6129 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -136,6 +136,18 @@ config MMC_SDHCI_S3C
136 136
137 If unsure, say N. 137 If unsure, say N.
138 138
139config MMC_SDHCI_SPEAR
140 tristate "SDHCI support on ST SPEAr platform"
141 depends on MMC_SDHCI && PLAT_SPEAR
142 help
143 This selects the Secure Digital Host Controller Interface (SDHCI)
144 often referrered to as the HSMMC block in some of the ST SPEAR range
145 of SoC
146
147 If you have a controller with this interface, say Y or M here.
148
149 If unsure, say N.
150
139config MMC_SDHCI_S3C_DMA 151config MMC_SDHCI_S3C_DMA
140 bool "DMA support on S3C SDHCI" 152 bool "DMA support on S3C SDHCI"
141 depends on MMC_SDHCI_S3C && EXPERIMENTAL 153 depends on MMC_SDHCI_S3C && EXPERIMENTAL
@@ -412,3 +424,11 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
412 depends on SDH_BFIN 424 depends on SDH_BFIN
413 help 425 help
414 If you say yes here SD-Cards may work on the EZkit. 426 If you say yes here SD-Cards may work on the EZkit.
427
428config MMC_SH_MMCIF
429 tristate "SuperH Internal MMCIF support"
430 depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
431 help
432 This selects the MMC Host Interface controler (MMCIF).
433
434 This driver supports MMCIF in sh7724/sh7757/sh7372.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index f4803977dfce..e30c2ee48894 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_MMC_SDHCI) += sdhci.o
14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o 14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
15obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o 15obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
16obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o 16obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
17obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
17obj-$(CONFIG_MMC_WBSD) += wbsd.o 18obj-$(CONFIG_MMC_WBSD) += wbsd.o
18obj-$(CONFIG_MMC_AU1X) += au1xmmc.o 19obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
19obj-$(CONFIG_MMC_OMAP) += omap.o 20obj-$(CONFIG_MMC_OMAP) += omap.o
@@ -34,6 +35,7 @@ obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
34obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 35obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
35obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 36obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
36obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o 37obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
38obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
37 39
38obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o 40obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
39sdhci-of-y := sdhci-of-core.o 41sdhci-of-y := sdhci-of-core.o
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 336d9f553f3e..5f3a599ead07 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -1157,7 +1157,7 @@ static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
1157 enable_irq_wake(host->board->det_pin); 1157 enable_irq_wake(host->board->det_pin);
1158 1158
1159 if (mmc) 1159 if (mmc)
1160 ret = mmc_suspend_host(mmc, state); 1160 ret = mmc_suspend_host(mmc);
1161 1161
1162 return ret; 1162 return ret;
1163} 1163}
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index df0e8a88d85f..95ef864ad8f9 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -173,6 +173,7 @@ struct atmel_mci {
173 * @mmc: The mmc_host representing this slot. 173 * @mmc: The mmc_host representing this slot.
174 * @host: The MMC controller this slot is using. 174 * @host: The MMC controller this slot is using.
175 * @sdc_reg: Value of SDCR to be written before using this slot. 175 * @sdc_reg: Value of SDCR to be written before using this slot.
176 * @sdio_irq: SDIO irq mask for this slot.
176 * @mrq: mmc_request currently being processed or waiting to be 177 * @mrq: mmc_request currently being processed or waiting to be
177 * processed, or NULL when the slot is idle. 178 * processed, or NULL when the slot is idle.
178 * @queue_node: List node for placing this node in the @queue list of 179 * @queue_node: List node for placing this node in the @queue list of
@@ -191,6 +192,7 @@ struct atmel_mci_slot {
191 struct atmel_mci *host; 192 struct atmel_mci *host;
192 193
193 u32 sdc_reg; 194 u32 sdc_reg;
195 u32 sdio_irq;
194 196
195 struct mmc_request *mrq; 197 struct mmc_request *mrq;
196 struct list_head queue_node; 198 struct list_head queue_node;
@@ -792,7 +794,7 @@ static void atmci_start_request(struct atmel_mci *host,
792 mci_writel(host, SDCR, slot->sdc_reg); 794 mci_writel(host, SDCR, slot->sdc_reg);
793 795
794 iflags = mci_readl(host, IMR); 796 iflags = mci_readl(host, IMR);
795 if (iflags) 797 if (iflags & ~(MCI_SDIOIRQA | MCI_SDIOIRQB))
796 dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", 798 dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
797 iflags); 799 iflags);
798 800
@@ -952,10 +954,21 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
952 if (mci_has_rwproof()) 954 if (mci_has_rwproof())
953 host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF); 955 host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF);
954 956
955 if (list_empty(&host->queue)) 957 if (atmci_is_mci2()) {
958 /* setup High Speed mode in relation with card capacity */
959 if (ios->timing == MMC_TIMING_SD_HS)
960 host->cfg_reg |= MCI_CFG_HSMODE;
961 else
962 host->cfg_reg &= ~MCI_CFG_HSMODE;
963 }
964
965 if (list_empty(&host->queue)) {
956 mci_writel(host, MR, host->mode_reg); 966 mci_writel(host, MR, host->mode_reg);
957 else 967 if (atmci_is_mci2())
968 mci_writel(host, CFG, host->cfg_reg);
969 } else {
958 host->need_clock_update = true; 970 host->need_clock_update = true;
971 }
959 972
960 spin_unlock_bh(&host->lock); 973 spin_unlock_bh(&host->lock);
961 } else { 974 } else {
@@ -1030,11 +1043,23 @@ static int atmci_get_cd(struct mmc_host *mmc)
1030 return present; 1043 return present;
1031} 1044}
1032 1045
1046static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1047{
1048 struct atmel_mci_slot *slot = mmc_priv(mmc);
1049 struct atmel_mci *host = slot->host;
1050
1051 if (enable)
1052 mci_writel(host, IER, slot->sdio_irq);
1053 else
1054 mci_writel(host, IDR, slot->sdio_irq);
1055}
1056
1033static const struct mmc_host_ops atmci_ops = { 1057static const struct mmc_host_ops atmci_ops = {
1034 .request = atmci_request, 1058 .request = atmci_request,
1035 .set_ios = atmci_set_ios, 1059 .set_ios = atmci_set_ios,
1036 .get_ro = atmci_get_ro, 1060 .get_ro = atmci_get_ro,
1037 .get_cd = atmci_get_cd, 1061 .get_cd = atmci_get_cd,
1062 .enable_sdio_irq = atmci_enable_sdio_irq,
1038}; 1063};
1039 1064
1040/* Called with host->lock held */ 1065/* Called with host->lock held */
@@ -1052,8 +1077,11 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1052 * necessary if set_ios() is called when a different slot is 1077 * necessary if set_ios() is called when a different slot is
1053 * busy transfering data. 1078 * busy transfering data.
1054 */ 1079 */
1055 if (host->need_clock_update) 1080 if (host->need_clock_update) {
1056 mci_writel(host, MR, host->mode_reg); 1081 mci_writel(host, MR, host->mode_reg);
1082 if (atmci_is_mci2())
1083 mci_writel(host, CFG, host->cfg_reg);
1084 }
1057 1085
1058 host->cur_slot->mrq = NULL; 1086 host->cur_slot->mrq = NULL;
1059 host->mrq = NULL; 1087 host->mrq = NULL;
@@ -1483,6 +1511,19 @@ static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status)
1483 tasklet_schedule(&host->tasklet); 1511 tasklet_schedule(&host->tasklet);
1484} 1512}
1485 1513
1514static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
1515{
1516 int i;
1517
1518 for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
1519 struct atmel_mci_slot *slot = host->slot[i];
1520 if (slot && (status & slot->sdio_irq)) {
1521 mmc_signal_sdio_irq(slot->mmc);
1522 }
1523 }
1524}
1525
1526
1486static irqreturn_t atmci_interrupt(int irq, void *dev_id) 1527static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1487{ 1528{
1488 struct atmel_mci *host = dev_id; 1529 struct atmel_mci *host = dev_id;
@@ -1522,6 +1563,10 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1522 1563
1523 if (pending & MCI_CMDRDY) 1564 if (pending & MCI_CMDRDY)
1524 atmci_cmd_interrupt(host, status); 1565 atmci_cmd_interrupt(host, status);
1566
1567 if (pending & (MCI_SDIOIRQA | MCI_SDIOIRQB))
1568 atmci_sdio_interrupt(host, status);
1569
1525 } while (pass_count++ < 5); 1570 } while (pass_count++ < 5);
1526 1571
1527 return pass_count ? IRQ_HANDLED : IRQ_NONE; 1572 return pass_count ? IRQ_HANDLED : IRQ_NONE;
@@ -1544,7 +1589,7 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
1544 1589
1545static int __init atmci_init_slot(struct atmel_mci *host, 1590static int __init atmci_init_slot(struct atmel_mci *host,
1546 struct mci_slot_pdata *slot_data, unsigned int id, 1591 struct mci_slot_pdata *slot_data, unsigned int id,
1547 u32 sdc_reg) 1592 u32 sdc_reg, u32 sdio_irq)
1548{ 1593{
1549 struct mmc_host *mmc; 1594 struct mmc_host *mmc;
1550 struct atmel_mci_slot *slot; 1595 struct atmel_mci_slot *slot;
@@ -1560,11 +1605,16 @@ static int __init atmci_init_slot(struct atmel_mci *host,
1560 slot->wp_pin = slot_data->wp_pin; 1605 slot->wp_pin = slot_data->wp_pin;
1561 slot->detect_is_active_high = slot_data->detect_is_active_high; 1606 slot->detect_is_active_high = slot_data->detect_is_active_high;
1562 slot->sdc_reg = sdc_reg; 1607 slot->sdc_reg = sdc_reg;
1608 slot->sdio_irq = sdio_irq;
1563 1609
1564 mmc->ops = &atmci_ops; 1610 mmc->ops = &atmci_ops;
1565 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); 1611 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
1566 mmc->f_max = host->bus_hz / 2; 1612 mmc->f_max = host->bus_hz / 2;
1567 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1613 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1614 if (sdio_irq)
1615 mmc->caps |= MMC_CAP_SDIO_IRQ;
1616 if (atmci_is_mci2())
1617 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1568 if (slot_data->bus_width >= 4) 1618 if (slot_data->bus_width >= 4)
1569 mmc->caps |= MMC_CAP_4_BIT_DATA; 1619 mmc->caps |= MMC_CAP_4_BIT_DATA;
1570 1620
@@ -1753,13 +1803,13 @@ static int __init atmci_probe(struct platform_device *pdev)
1753 ret = -ENODEV; 1803 ret = -ENODEV;
1754 if (pdata->slot[0].bus_width) { 1804 if (pdata->slot[0].bus_width) {
1755 ret = atmci_init_slot(host, &pdata->slot[0], 1805 ret = atmci_init_slot(host, &pdata->slot[0],
1756 0, MCI_SDCSEL_SLOT_A); 1806 0, MCI_SDCSEL_SLOT_A, MCI_SDIOIRQA);
1757 if (!ret) 1807 if (!ret)
1758 nr_slots++; 1808 nr_slots++;
1759 } 1809 }
1760 if (pdata->slot[1].bus_width) { 1810 if (pdata->slot[1].bus_width) {
1761 ret = atmci_init_slot(host, &pdata->slot[1], 1811 ret = atmci_init_slot(host, &pdata->slot[1],
1762 1, MCI_SDCSEL_SLOT_B); 1812 1, MCI_SDCSEL_SLOT_B, MCI_SDIOIRQB);
1763 if (!ret) 1813 if (!ret)
1764 nr_slots++; 1814 nr_slots++;
1765 } 1815 }
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index f5834449400e..c8da5d30a861 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1142,7 +1142,7 @@ static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
1142 struct au1xmmc_host *host = platform_get_drvdata(pdev); 1142 struct au1xmmc_host *host = platform_get_drvdata(pdev);
1143 int ret; 1143 int ret;
1144 1144
1145 ret = mmc_suspend_host(host->mmc, state); 1145 ret = mmc_suspend_host(host->mmc);
1146 if (ret) 1146 if (ret)
1147 return ret; 1147 return ret;
1148 1148
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 6919e844072c..4b0e677d7295 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -576,7 +576,7 @@ static int sdh_suspend(struct platform_device *dev, pm_message_t state)
576 int ret = 0; 576 int ret = 0;
577 577
578 if (mmc) 578 if (mmc)
579 ret = mmc_suspend_host(mmc, state); 579 ret = mmc_suspend_host(mmc);
580 580
581 bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON); 581 bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON);
582 peripheral_free_list(drv_data->pin_req); 582 peripheral_free_list(drv_data->pin_req);
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 92a324f7417c..ca3bdc831900 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -675,7 +675,7 @@ static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state)
675 struct mmc_host *mmc = cb710_slot_to_mmc(slot); 675 struct mmc_host *mmc = cb710_slot_to_mmc(slot);
676 int err; 676 int err;
677 677
678 err = mmc_suspend_host(mmc, state); 678 err = mmc_suspend_host(mmc);
679 if (err) 679 if (err)
680 return err; 680 return err;
681 681
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 3bd0ba294e9d..33d9f1b00862 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -137,15 +137,15 @@
137 137
138/* 138/*
139 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, 139 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
140 * and we handle up to NR_SG segments. MMC_BLOCK_BOUNCE kicks in only 140 * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
141 * for drivers with max_hw_segs == 1, making the segments bigger (64KB) 141 * for drivers with max_hw_segs == 1, making the segments bigger (64KB)
142 * than the page or two that's otherwise typical. NR_SG == 16 gives at 142 * than the page or two that's otherwise typical. nr_sg (passed from
143 * least the same throughput boost, using EDMA transfer linkage instead 143 * platform data) == 16 gives at least the same throughput boost, using
144 * of spending CPU time copying pages. 144 * EDMA transfer linkage instead of spending CPU time copying pages.
145 */ 145 */
146#define MAX_CCNT ((1 << 16) - 1) 146#define MAX_CCNT ((1 << 16) - 1)
147 147
148#define NR_SG 16 148#define MAX_NR_SG 16
149 149
150static unsigned rw_threshold = 32; 150static unsigned rw_threshold = 32;
151module_param(rw_threshold, uint, S_IRUGO); 151module_param(rw_threshold, uint, S_IRUGO);
@@ -171,6 +171,7 @@ struct mmc_davinci_host {
171#define DAVINCI_MMC_DATADIR_READ 1 171#define DAVINCI_MMC_DATADIR_READ 1
172#define DAVINCI_MMC_DATADIR_WRITE 2 172#define DAVINCI_MMC_DATADIR_WRITE 2
173 unsigned char data_dir; 173 unsigned char data_dir;
174 unsigned char suspended;
174 175
175 /* buffer is used during PIO of one scatterlist segment, and 176 /* buffer is used during PIO of one scatterlist segment, and
176 * is updated along with buffer_bytes_left. bytes_left applies 177 * is updated along with buffer_bytes_left. bytes_left applies
@@ -192,7 +193,7 @@ struct mmc_davinci_host {
192 struct edmacc_param tx_template; 193 struct edmacc_param tx_template;
193 struct edmacc_param rx_template; 194 struct edmacc_param rx_template;
194 unsigned n_link; 195 unsigned n_link;
195 u32 links[NR_SG - 1]; 196 u32 links[MAX_NR_SG - 1];
196 197
197 /* For PIO we walk scatterlists one segment at a time. */ 198 /* For PIO we walk scatterlists one segment at a time. */
198 unsigned int sg_len; 199 unsigned int sg_len;
@@ -202,6 +203,8 @@ struct mmc_davinci_host {
202 u8 version; 203 u8 version;
203 /* for ns in one cycle calculation */ 204 /* for ns in one cycle calculation */
204 unsigned ns_in_one_cycle; 205 unsigned ns_in_one_cycle;
206 /* Number of sg segments */
207 u8 nr_sg;
205#ifdef CONFIG_CPU_FREQ 208#ifdef CONFIG_CPU_FREQ
206 struct notifier_block freq_transition; 209 struct notifier_block freq_transition;
207#endif 210#endif
@@ -568,6 +571,7 @@ davinci_release_dma_channels(struct mmc_davinci_host *host)
568 571
569static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) 572static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
570{ 573{
574 u32 link_size;
571 int r, i; 575 int r, i;
572 576
573 /* Acquire master DMA write channel */ 577 /* Acquire master DMA write channel */
@@ -593,7 +597,8 @@ static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
593 /* Allocate parameter RAM slots, which will later be bound to a 597 /* Allocate parameter RAM slots, which will later be bound to a
594 * channel as needed to handle a scatterlist. 598 * channel as needed to handle a scatterlist.
595 */ 599 */
596 for (i = 0; i < ARRAY_SIZE(host->links); i++) { 600 link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links));
601 for (i = 0; i < link_size; i++) {
597 r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY); 602 r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY);
598 if (r < 0) { 603 if (r < 0) {
599 dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n", 604 dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n",
@@ -905,19 +910,26 @@ static void mmc_davinci_cmd_done(struct mmc_davinci_host *host,
905 } 910 }
906} 911}
907 912
908static void 913static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host,
909davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) 914 int val)
910{ 915{
911 u32 temp; 916 u32 temp;
912 917
913 /* reset command and data state machines */
914 temp = readl(host->base + DAVINCI_MMCCTL); 918 temp = readl(host->base + DAVINCI_MMCCTL);
915 writel(temp | MMCCTL_CMDRST | MMCCTL_DATRST, 919 if (val) /* reset */
916 host->base + DAVINCI_MMCCTL); 920 temp |= MMCCTL_CMDRST | MMCCTL_DATRST;
921 else /* enable */
922 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
917 923
918 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
919 udelay(10);
920 writel(temp, host->base + DAVINCI_MMCCTL); 924 writel(temp, host->base + DAVINCI_MMCCTL);
925 udelay(10);
926}
927
928static void
929davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
930{
931 mmc_davinci_reset_ctrl(host, 1);
932 mmc_davinci_reset_ctrl(host, 0);
921} 933}
922 934
923static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) 935static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
@@ -1121,15 +1133,8 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
1121#endif 1133#endif
1122static void __init init_mmcsd_host(struct mmc_davinci_host *host) 1134static void __init init_mmcsd_host(struct mmc_davinci_host *host)
1123{ 1135{
1124 /* DAT line portion is diabled and in reset state */
1125 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_DATRST,
1126 host->base + DAVINCI_MMCCTL);
1127
1128 /* CMD line portion is diabled and in reset state */
1129 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_CMDRST,
1130 host->base + DAVINCI_MMCCTL);
1131 1136
1132 udelay(10); 1137 mmc_davinci_reset_ctrl(host, 1);
1133 1138
1134 writel(0, host->base + DAVINCI_MMCCLK); 1139 writel(0, host->base + DAVINCI_MMCCLK);
1135 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 1140 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
@@ -1137,12 +1142,7 @@ static void __init init_mmcsd_host(struct mmc_davinci_host *host)
1137 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 1142 writel(0x1FFF, host->base + DAVINCI_MMCTOR);
1138 writel(0xFFFF, host->base + DAVINCI_MMCTOD); 1143 writel(0xFFFF, host->base + DAVINCI_MMCTOD);
1139 1144
1140 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_DATRST, 1145 mmc_davinci_reset_ctrl(host, 0);
1141 host->base + DAVINCI_MMCCTL);
1142 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_CMDRST,
1143 host->base + DAVINCI_MMCCTL);
1144
1145 udelay(10);
1146} 1146}
1147 1147
1148static int __init davinci_mmcsd_probe(struct platform_device *pdev) 1148static int __init davinci_mmcsd_probe(struct platform_device *pdev)
@@ -1202,6 +1202,12 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1202 1202
1203 init_mmcsd_host(host); 1203 init_mmcsd_host(host);
1204 1204
1205 if (pdata->nr_sg)
1206 host->nr_sg = pdata->nr_sg - 1;
1207
1208 if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
1209 host->nr_sg = MAX_NR_SG;
1210
1205 host->use_dma = use_dma; 1211 host->use_dma = use_dma;
1206 host->irq = irq; 1212 host->irq = irq;
1207 1213
@@ -1327,32 +1333,65 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
1327} 1333}
1328 1334
1329#ifdef CONFIG_PM 1335#ifdef CONFIG_PM
1330static int davinci_mmcsd_suspend(struct platform_device *pdev, pm_message_t msg) 1336static int davinci_mmcsd_suspend(struct device *dev)
1331{ 1337{
1338 struct platform_device *pdev = to_platform_device(dev);
1332 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1339 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1340 int ret;
1333 1341
1334 return mmc_suspend_host(host->mmc, msg); 1342 mmc_host_enable(host->mmc);
1343 ret = mmc_suspend_host(host->mmc);
1344 if (!ret) {
1345 writel(0, host->base + DAVINCI_MMCIM);
1346 mmc_davinci_reset_ctrl(host, 1);
1347 mmc_host_disable(host->mmc);
1348 clk_disable(host->clk);
1349 host->suspended = 1;
1350 } else {
1351 host->suspended = 0;
1352 mmc_host_disable(host->mmc);
1353 }
1354
1355 return ret;
1335} 1356}
1336 1357
1337static int davinci_mmcsd_resume(struct platform_device *pdev) 1358static int davinci_mmcsd_resume(struct device *dev)
1338{ 1359{
1360 struct platform_device *pdev = to_platform_device(dev);
1339 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1361 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1362 int ret;
1363
1364 if (!host->suspended)
1365 return 0;
1340 1366
1341 return mmc_resume_host(host->mmc); 1367 clk_enable(host->clk);
1368 mmc_host_enable(host->mmc);
1369
1370 mmc_davinci_reset_ctrl(host, 0);
1371 ret = mmc_resume_host(host->mmc);
1372 if (!ret)
1373 host->suspended = 0;
1374
1375 return ret;
1342} 1376}
1377
1378static const struct dev_pm_ops davinci_mmcsd_pm = {
1379 .suspend = davinci_mmcsd_suspend,
1380 .resume = davinci_mmcsd_resume,
1381};
1382
1383#define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm)
1343#else 1384#else
1344#define davinci_mmcsd_suspend NULL 1385#define davinci_mmcsd_pm_ops NULL
1345#define davinci_mmcsd_resume NULL
1346#endif 1386#endif
1347 1387
1348static struct platform_driver davinci_mmcsd_driver = { 1388static struct platform_driver davinci_mmcsd_driver = {
1349 .driver = { 1389 .driver = {
1350 .name = "davinci_mmc", 1390 .name = "davinci_mmc",
1351 .owner = THIS_MODULE, 1391 .owner = THIS_MODULE,
1392 .pm = davinci_mmcsd_pm_ops,
1352 }, 1393 },
1353 .remove = __exit_p(davinci_mmcsd_remove), 1394 .remove = __exit_p(davinci_mmcsd_remove),
1354 .suspend = davinci_mmcsd_suspend,
1355 .resume = davinci_mmcsd_resume,
1356}; 1395};
1357 1396
1358static int __init davinci_mmcsd_init(void) 1397static int __init davinci_mmcsd_init(void)
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index bf98d7cc928a..9a68ff4353a2 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -1115,7 +1115,7 @@ static int imxmci_suspend(struct platform_device *dev, pm_message_t state)
1115 int ret = 0; 1115 int ret = 0;
1116 1116
1117 if (mmc) 1117 if (mmc)
1118 ret = mmc_suspend_host(mmc, state); 1118 ret = mmc_suspend_host(mmc);
1119 1119
1120 return ret; 1120 return ret;
1121} 1121}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index ff115d920888..4917af96bae1 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -824,7 +824,7 @@ static int mmci_suspend(struct amba_device *dev, pm_message_t state)
824 if (mmc) { 824 if (mmc) {
825 struct mmci_host *host = mmc_priv(mmc); 825 struct mmci_host *host = mmc_priv(mmc);
826 826
827 ret = mmc_suspend_host(mmc, state); 827 ret = mmc_suspend_host(mmc);
828 if (ret == 0) 828 if (ret == 0)
829 writel(0, host->base + MMCIMASK0); 829 writel(0, host->base + MMCIMASK0);
830 } 830 }
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 61f1d27fed3f..24e09454e522 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -1327,7 +1327,7 @@ msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
1327 disable_irq(host->stat_irq); 1327 disable_irq(host->stat_irq);
1328 1328
1329 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) 1329 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
1330 rc = mmc_suspend_host(mmc, state); 1330 rc = mmc_suspend_host(mmc);
1331 if (!rc) 1331 if (!rc)
1332 msmsdcc_writel(host, 0, MMCIMASK0); 1332 msmsdcc_writel(host, 0, MMCIMASK0);
1333 if (host->clks_on) 1333 if (host->clks_on)
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 34e23489811a..366eefa77c5a 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -865,7 +865,7 @@ static int mvsd_suspend(struct platform_device *dev, pm_message_t state)
865 int ret = 0; 865 int ret = 0;
866 866
867 if (mmc) 867 if (mmc)
868 ret = mmc_suspend_host(mmc, state); 868 ret = mmc_suspend_host(mmc);
869 869
870 return ret; 870 return ret;
871} 871}
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 2df90412abb5..d9d4a72e0ec7 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -119,6 +119,7 @@ struct mxcmci_host {
119 int detect_irq; 119 int detect_irq;
120 int dma; 120 int dma;
121 int do_dma; 121 int do_dma;
122 int use_sdio;
122 unsigned int power_mode; 123 unsigned int power_mode;
123 struct imxmmc_platform_data *pdata; 124 struct imxmmc_platform_data *pdata;
124 125
@@ -138,6 +139,7 @@ struct mxcmci_host {
138 int clock; 139 int clock;
139 140
140 struct work_struct datawork; 141 struct work_struct datawork;
142 spinlock_t lock;
141}; 143};
142 144
143static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); 145static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
@@ -151,6 +153,8 @@ static void mxcmci_softreset(struct mxcmci_host *host)
151{ 153{
152 int i; 154 int i;
153 155
156 dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n");
157
154 /* reset sequence */ 158 /* reset sequence */
155 writew(STR_STP_CLK_RESET, host->base + MMC_REG_STR_STP_CLK); 159 writew(STR_STP_CLK_RESET, host->base + MMC_REG_STR_STP_CLK);
156 writew(STR_STP_CLK_RESET | STR_STP_CLK_START_CLK, 160 writew(STR_STP_CLK_RESET | STR_STP_CLK_START_CLK,
@@ -224,6 +228,9 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
224static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, 228static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
225 unsigned int cmdat) 229 unsigned int cmdat)
226{ 230{
231 u32 int_cntr;
232 unsigned long flags;
233
227 WARN_ON(host->cmd != NULL); 234 WARN_ON(host->cmd != NULL);
228 host->cmd = cmd; 235 host->cmd = cmd;
229 236
@@ -247,12 +254,16 @@ static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
247 return -EINVAL; 254 return -EINVAL;
248 } 255 }
249 256
257 int_cntr = INT_END_CMD_RES_EN;
258
250 if (mxcmci_use_dma(host)) 259 if (mxcmci_use_dma(host))
251 writel(INT_READ_OP_EN | INT_WRITE_OP_DONE_EN | 260 int_cntr |= INT_READ_OP_EN | INT_WRITE_OP_DONE_EN;
252 INT_END_CMD_RES_EN, 261
253 host->base + MMC_REG_INT_CNTR); 262 spin_lock_irqsave(&host->lock, flags);
254 else 263 if (host->use_sdio)
255 writel(INT_END_CMD_RES_EN, host->base + MMC_REG_INT_CNTR); 264 int_cntr |= INT_SDIO_IRQ_EN;
265 writel(int_cntr, host->base + MMC_REG_INT_CNTR);
266 spin_unlock_irqrestore(&host->lock, flags);
256 267
257 writew(cmd->opcode, host->base + MMC_REG_CMD); 268 writew(cmd->opcode, host->base + MMC_REG_CMD);
258 writel(cmd->arg, host->base + MMC_REG_ARG); 269 writel(cmd->arg, host->base + MMC_REG_ARG);
@@ -264,7 +275,14 @@ static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
264static void mxcmci_finish_request(struct mxcmci_host *host, 275static void mxcmci_finish_request(struct mxcmci_host *host,
265 struct mmc_request *req) 276 struct mmc_request *req)
266{ 277{
267 writel(0, host->base + MMC_REG_INT_CNTR); 278 u32 int_cntr = 0;
279 unsigned long flags;
280
281 spin_lock_irqsave(&host->lock, flags);
282 if (host->use_sdio)
283 int_cntr |= INT_SDIO_IRQ_EN;
284 writel(int_cntr, host->base + MMC_REG_INT_CNTR);
285 spin_unlock_irqrestore(&host->lock, flags);
268 286
269 host->req = NULL; 287 host->req = NULL;
270 host->cmd = NULL; 288 host->cmd = NULL;
@@ -290,16 +308,25 @@ static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
290 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", 308 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
291 stat); 309 stat);
292 if (stat & STATUS_CRC_READ_ERR) { 310 if (stat & STATUS_CRC_READ_ERR) {
311 dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__);
293 data->error = -EILSEQ; 312 data->error = -EILSEQ;
294 } else if (stat & STATUS_CRC_WRITE_ERR) { 313 } else if (stat & STATUS_CRC_WRITE_ERR) {
295 u32 err_code = (stat >> 9) & 0x3; 314 u32 err_code = (stat >> 9) & 0x3;
296 if (err_code == 2) /* No CRC response */ 315 if (err_code == 2) { /* No CRC response */
316 dev_err(mmc_dev(host->mmc),
317 "%s: No CRC -ETIMEDOUT\n", __func__);
297 data->error = -ETIMEDOUT; 318 data->error = -ETIMEDOUT;
298 else 319 } else {
320 dev_err(mmc_dev(host->mmc),
321 "%s: -EILSEQ\n", __func__);
299 data->error = -EILSEQ; 322 data->error = -EILSEQ;
323 }
300 } else if (stat & STATUS_TIME_OUT_READ) { 324 } else if (stat & STATUS_TIME_OUT_READ) {
325 dev_err(mmc_dev(host->mmc),
326 "%s: read -ETIMEDOUT\n", __func__);
301 data->error = -ETIMEDOUT; 327 data->error = -ETIMEDOUT;
302 } else { 328 } else {
329 dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__);
303 data->error = -EIO; 330 data->error = -EIO;
304 } 331 }
305 } else { 332 } else {
@@ -433,8 +460,6 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
433 struct scatterlist *sg; 460 struct scatterlist *sg;
434 int stat, i; 461 int stat, i;
435 462
436 host->datasize = 0;
437
438 host->data = data; 463 host->data = data;
439 host->datasize = 0; 464 host->datasize = 0;
440 465
@@ -464,6 +489,9 @@ static void mxcmci_datawork(struct work_struct *work)
464 struct mxcmci_host *host = container_of(work, struct mxcmci_host, 489 struct mxcmci_host *host = container_of(work, struct mxcmci_host,
465 datawork); 490 datawork);
466 int datastat = mxcmci_transfer_data(host); 491 int datastat = mxcmci_transfer_data(host);
492
493 writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
494 host->base + MMC_REG_STATUS);
467 mxcmci_finish_data(host, datastat); 495 mxcmci_finish_data(host, datastat);
468 496
469 if (host->req->stop) { 497 if (host->req->stop) {
@@ -523,15 +551,35 @@ static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
523static irqreturn_t mxcmci_irq(int irq, void *devid) 551static irqreturn_t mxcmci_irq(int irq, void *devid)
524{ 552{
525 struct mxcmci_host *host = devid; 553 struct mxcmci_host *host = devid;
554 unsigned long flags;
555 bool sdio_irq;
526 u32 stat; 556 u32 stat;
527 557
528 stat = readl(host->base + MMC_REG_STATUS); 558 stat = readl(host->base + MMC_REG_STATUS);
529 writel(stat, host->base + MMC_REG_STATUS); 559 writel(stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE |
560 STATUS_WRITE_OP_DONE), host->base + MMC_REG_STATUS);
530 561
531 dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat); 562 dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
532 563
564 spin_lock_irqsave(&host->lock, flags);
565 sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
566 spin_unlock_irqrestore(&host->lock, flags);
567
568#ifdef HAS_DMA
569 if (mxcmci_use_dma(host) &&
570 (stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE)))
571 writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
572 host->base + MMC_REG_STATUS);
573#endif
574
575 if (sdio_irq) {
576 writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS);
577 mmc_signal_sdio_irq(host->mmc);
578 }
579
533 if (stat & STATUS_END_CMD_RESP) 580 if (stat & STATUS_END_CMD_RESP)
534 mxcmci_cmd_done(host, stat); 581 mxcmci_cmd_done(host, stat);
582
535#ifdef HAS_DMA 583#ifdef HAS_DMA
536 if (mxcmci_use_dma(host) && 584 if (mxcmci_use_dma(host) &&
537 (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) 585 (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE)))
@@ -668,11 +716,46 @@ static int mxcmci_get_ro(struct mmc_host *mmc)
668 return -ENOSYS; 716 return -ENOSYS;
669} 717}
670 718
719static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
720{
721 struct mxcmci_host *host = mmc_priv(mmc);
722 unsigned long flags;
723 u32 int_cntr;
724
725 spin_lock_irqsave(&host->lock, flags);
726 host->use_sdio = enable;
727 int_cntr = readl(host->base + MMC_REG_INT_CNTR);
728
729 if (enable)
730 int_cntr |= INT_SDIO_IRQ_EN;
731 else
732 int_cntr &= ~INT_SDIO_IRQ_EN;
733
734 writel(int_cntr, host->base + MMC_REG_INT_CNTR);
735 spin_unlock_irqrestore(&host->lock, flags);
736}
737
738static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
739{
740 /*
741 * MX3 SoCs have a silicon bug which corrupts CRC calculation of
742 * multi-block transfers when connected SDIO peripheral doesn't
743 * drive the BUSY line as required by the specs.
744 * One way to prevent this is to only allow 1-bit transfers.
745 */
746
747 if (cpu_is_mx3() && card->type == MMC_TYPE_SDIO)
748 host->caps &= ~MMC_CAP_4_BIT_DATA;
749 else
750 host->caps |= MMC_CAP_4_BIT_DATA;
751}
671 752
672static const struct mmc_host_ops mxcmci_ops = { 753static const struct mmc_host_ops mxcmci_ops = {
673 .request = mxcmci_request, 754 .request = mxcmci_request,
674 .set_ios = mxcmci_set_ios, 755 .set_ios = mxcmci_set_ios,
675 .get_ro = mxcmci_get_ro, 756 .get_ro = mxcmci_get_ro,
757 .enable_sdio_irq = mxcmci_enable_sdio_irq,
758 .init_card = mxcmci_init_card,
676}; 759};
677 760
678static int mxcmci_probe(struct platform_device *pdev) 761static int mxcmci_probe(struct platform_device *pdev)
@@ -700,7 +783,7 @@ static int mxcmci_probe(struct platform_device *pdev)
700 } 783 }
701 784
702 mmc->ops = &mxcmci_ops; 785 mmc->ops = &mxcmci_ops;
703 mmc->caps = MMC_CAP_4_BIT_DATA; 786 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
704 787
705 /* MMC core transfer sizes tunable parameters */ 788 /* MMC core transfer sizes tunable parameters */
706 mmc->max_hw_segs = 64; 789 mmc->max_hw_segs = 64;
@@ -719,6 +802,7 @@ static int mxcmci_probe(struct platform_device *pdev)
719 802
720 host->mmc = mmc; 803 host->mmc = mmc;
721 host->pdata = pdev->dev.platform_data; 804 host->pdata = pdev->dev.platform_data;
805 spin_lock_init(&host->lock);
722 806
723 if (host->pdata && host->pdata->ocr_avail) 807 if (host->pdata && host->pdata->ocr_avail)
724 mmc->ocr_avail = host->pdata->ocr_avail; 808 mmc->ocr_avail = host->pdata->ocr_avail;
@@ -848,7 +932,7 @@ static int mxcmci_suspend(struct platform_device *dev, pm_message_t state)
848 int ret = 0; 932 int ret = 0;
849 933
850 if (mmc) 934 if (mmc)
851 ret = mmc_suspend_host(mmc, state); 935 ret = mmc_suspend_host(mmc);
852 936
853 return ret; 937 return ret;
854} 938}
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index bb6cc54b558e..1247e5de9faa 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -64,7 +64,7 @@ static int of_mmc_spi_get_ro(struct device *dev)
64struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi) 64struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
65{ 65{
66 struct device *dev = &spi->dev; 66 struct device *dev = &spi->dev;
67 struct device_node *np = dev_archdata_get_node(&dev->archdata); 67 struct device_node *np = dev->of_node;
68 struct of_mmc_spi *oms; 68 struct of_mmc_spi *oms;
69 const u32 *voltage_ranges; 69 const u32 *voltage_ranges;
70 int num_ranges; 70 int num_ranges;
@@ -135,7 +135,7 @@ EXPORT_SYMBOL(mmc_spi_get_pdata);
135void mmc_spi_put_pdata(struct spi_device *spi) 135void mmc_spi_put_pdata(struct spi_device *spi)
136{ 136{
137 struct device *dev = &spi->dev; 137 struct device *dev = &spi->dev;
138 struct device_node *np = dev_archdata_get_node(&dev->archdata); 138 struct device_node *np = dev->of_node;
139 struct of_mmc_spi *oms = to_of_mmc_spi(dev); 139 struct of_mmc_spi *oms = to_of_mmc_spi(dev);
140 int i; 140 int i;
141 141
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 84d280406341..2b281680e320 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -39,30 +39,30 @@
39#include <plat/fpga.h> 39#include <plat/fpga.h>
40 40
41#define OMAP_MMC_REG_CMD 0x00 41#define OMAP_MMC_REG_CMD 0x00
42#define OMAP_MMC_REG_ARGL 0x04 42#define OMAP_MMC_REG_ARGL 0x01
43#define OMAP_MMC_REG_ARGH 0x08 43#define OMAP_MMC_REG_ARGH 0x02
44#define OMAP_MMC_REG_CON 0x0c 44#define OMAP_MMC_REG_CON 0x03
45#define OMAP_MMC_REG_STAT 0x10 45#define OMAP_MMC_REG_STAT 0x04
46#define OMAP_MMC_REG_IE 0x14 46#define OMAP_MMC_REG_IE 0x05
47#define OMAP_MMC_REG_CTO 0x18 47#define OMAP_MMC_REG_CTO 0x06
48#define OMAP_MMC_REG_DTO 0x1c 48#define OMAP_MMC_REG_DTO 0x07
49#define OMAP_MMC_REG_DATA 0x20 49#define OMAP_MMC_REG_DATA 0x08
50#define OMAP_MMC_REG_BLEN 0x24 50#define OMAP_MMC_REG_BLEN 0x09
51#define OMAP_MMC_REG_NBLK 0x28 51#define OMAP_MMC_REG_NBLK 0x0a
52#define OMAP_MMC_REG_BUF 0x2c 52#define OMAP_MMC_REG_BUF 0x0b
53#define OMAP_MMC_REG_SDIO 0x34 53#define OMAP_MMC_REG_SDIO 0x0d
54#define OMAP_MMC_REG_REV 0x3c 54#define OMAP_MMC_REG_REV 0x0f
55#define OMAP_MMC_REG_RSP0 0x40 55#define OMAP_MMC_REG_RSP0 0x10
56#define OMAP_MMC_REG_RSP1 0x44 56#define OMAP_MMC_REG_RSP1 0x11
57#define OMAP_MMC_REG_RSP2 0x48 57#define OMAP_MMC_REG_RSP2 0x12
58#define OMAP_MMC_REG_RSP3 0x4c 58#define OMAP_MMC_REG_RSP3 0x13
59#define OMAP_MMC_REG_RSP4 0x50 59#define OMAP_MMC_REG_RSP4 0x14
60#define OMAP_MMC_REG_RSP5 0x54 60#define OMAP_MMC_REG_RSP5 0x15
61#define OMAP_MMC_REG_RSP6 0x58 61#define OMAP_MMC_REG_RSP6 0x16
62#define OMAP_MMC_REG_RSP7 0x5c 62#define OMAP_MMC_REG_RSP7 0x17
63#define OMAP_MMC_REG_IOSR 0x60 63#define OMAP_MMC_REG_IOSR 0x18
64#define OMAP_MMC_REG_SYSC 0x64 64#define OMAP_MMC_REG_SYSC 0x19
65#define OMAP_MMC_REG_SYSS 0x68 65#define OMAP_MMC_REG_SYSS 0x1a
66 66
67#define OMAP_MMC_STAT_CARD_ERR (1 << 14) 67#define OMAP_MMC_STAT_CARD_ERR (1 << 14)
68#define OMAP_MMC_STAT_CARD_IRQ (1 << 13) 68#define OMAP_MMC_STAT_CARD_IRQ (1 << 13)
@@ -78,8 +78,9 @@
78#define OMAP_MMC_STAT_CARD_BUSY (1 << 2) 78#define OMAP_MMC_STAT_CARD_BUSY (1 << 2)
79#define OMAP_MMC_STAT_END_OF_CMD (1 << 0) 79#define OMAP_MMC_STAT_END_OF_CMD (1 << 0)
80 80
81#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG_##reg) 81#define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift)
82#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG_##reg) 82#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg))
83#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg))
83 84
84/* 85/*
85 * Command types 86 * Command types
@@ -133,6 +134,7 @@ struct mmc_omap_host {
133 int irq; 134 int irq;
134 unsigned char bus_mode; 135 unsigned char bus_mode;
135 unsigned char hw_bus_mode; 136 unsigned char hw_bus_mode;
137 unsigned int reg_shift;
136 138
137 struct work_struct cmd_abort_work; 139 struct work_struct cmd_abort_work;
138 unsigned abort:1; 140 unsigned abort:1;
@@ -680,9 +682,9 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
680 host->data->bytes_xfered += n; 682 host->data->bytes_xfered += n;
681 683
682 if (write) { 684 if (write) {
683 __raw_writesw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n); 685 __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
684 } else { 686 } else {
685 __raw_readsw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n); 687 __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
686 } 688 }
687} 689}
688 690
@@ -900,7 +902,7 @@ mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
900 int dst_port = 0; 902 int dst_port = 0;
901 int sync_dev = 0; 903 int sync_dev = 0;
902 904
903 data_addr = host->phys_base + OMAP_MMC_REG_DATA; 905 data_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
904 frame = data->blksz; 906 frame = data->blksz;
905 count = sg_dma_len(sg); 907 count = sg_dma_len(sg);
906 908
@@ -1493,6 +1495,8 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1493 } 1495 }
1494 } 1496 }
1495 1497
1498 host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
1499
1496 return 0; 1500 return 0;
1497 1501
1498err_plat_cleanup: 1502err_plat_cleanup:
@@ -1557,7 +1561,7 @@ static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
1557 struct mmc_omap_slot *slot; 1561 struct mmc_omap_slot *slot;
1558 1562
1559 slot = host->slots[i]; 1563 slot = host->slots[i];
1560 ret = mmc_suspend_host(slot->mmc, mesg); 1564 ret = mmc_suspend_host(slot->mmc);
1561 if (ret < 0) { 1565 if (ret < 0) {
1562 while (--i >= 0) { 1566 while (--i >= 0) {
1563 slot = host->slots[i]; 1567 slot = host->slots[i];
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index e9caf694c59e..b032828c6126 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -157,12 +157,10 @@ struct omap_hsmmc_host {
157 */ 157 */
158 struct regulator *vcc; 158 struct regulator *vcc;
159 struct regulator *vcc_aux; 159 struct regulator *vcc_aux;
160 struct semaphore sem;
161 struct work_struct mmc_carddetect_work; 160 struct work_struct mmc_carddetect_work;
162 void __iomem *base; 161 void __iomem *base;
163 resource_size_t mapbase; 162 resource_size_t mapbase;
164 spinlock_t irq_lock; /* Prevent races with irq handler */ 163 spinlock_t irq_lock; /* Prevent races with irq handler */
165 unsigned long flags;
166 unsigned int id; 164 unsigned int id;
167 unsigned int dma_len; 165 unsigned int dma_len;
168 unsigned int dma_sg_idx; 166 unsigned int dma_sg_idx;
@@ -183,6 +181,7 @@ struct omap_hsmmc_host {
183 int protect_card; 181 int protect_card;
184 int reqs_blocked; 182 int reqs_blocked;
185 int use_reg; 183 int use_reg;
184 int req_in_progress;
186 185
187 struct omap_mmc_platform_data *pdata; 186 struct omap_mmc_platform_data *pdata;
188}; 187};
@@ -524,6 +523,27 @@ static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
524 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n"); 523 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n");
525} 524}
526 525
526static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host)
527{
528 unsigned int irq_mask;
529
530 if (host->use_dma)
531 irq_mask = INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE);
532 else
533 irq_mask = INT_EN_MASK;
534
535 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
536 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
537 OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
538}
539
540static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
541{
542 OMAP_HSMMC_WRITE(host->base, ISE, 0);
543 OMAP_HSMMC_WRITE(host->base, IE, 0);
544 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
545}
546
527#ifdef CONFIG_PM 547#ifdef CONFIG_PM
528 548
529/* 549/*
@@ -592,9 +612,7 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
592 && time_before(jiffies, timeout)) 612 && time_before(jiffies, timeout))
593 ; 613 ;
594 614
595 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 615 omap_hsmmc_disable_irq(host);
596 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
597 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
598 616
599 /* Do not initialize card-specific things if the power is off */ 617 /* Do not initialize card-specific things if the power is off */
600 if (host->power_mode == MMC_POWER_OFF) 618 if (host->power_mode == MMC_POWER_OFF)
@@ -697,6 +715,8 @@ static void send_init_stream(struct omap_hsmmc_host *host)
697 return; 715 return;
698 716
699 disable_irq(host->irq); 717 disable_irq(host->irq);
718
719 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
700 OMAP_HSMMC_WRITE(host->base, CON, 720 OMAP_HSMMC_WRITE(host->base, CON,
701 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM); 721 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
702 OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD); 722 OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD);
@@ -762,17 +782,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
762 mmc_hostname(host->mmc), cmd->opcode, cmd->arg); 782 mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
763 host->cmd = cmd; 783 host->cmd = cmd;
764 784
765 /* 785 omap_hsmmc_enable_irq(host);
766 * Clear status bits and enable interrupts
767 */
768 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
769 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
770
771 if (host->use_dma)
772 OMAP_HSMMC_WRITE(host->base, IE,
773 INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE));
774 else
775 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
776 786
777 host->response_busy = 0; 787 host->response_busy = 0;
778 if (cmd->flags & MMC_RSP_PRESENT) { 788 if (cmd->flags & MMC_RSP_PRESENT) {
@@ -806,13 +816,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
806 if (host->use_dma) 816 if (host->use_dma)
807 cmdreg |= DMA_EN; 817 cmdreg |= DMA_EN;
808 818
809 /* 819 host->req_in_progress = 1;
810 * In an interrupt context (i.e. STOP command), the spinlock is unlocked
811 * by the interrupt handler, otherwise (i.e. for a new request) it is
812 * unlocked here.
813 */
814 if (!in_interrupt())
815 spin_unlock_irqrestore(&host->irq_lock, host->flags);
816 820
817 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg); 821 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
818 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg); 822 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
@@ -827,6 +831,23 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
827 return DMA_FROM_DEVICE; 831 return DMA_FROM_DEVICE;
828} 832}
829 833
834static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
835{
836 int dma_ch;
837
838 spin_lock(&host->irq_lock);
839 host->req_in_progress = 0;
840 dma_ch = host->dma_ch;
841 spin_unlock(&host->irq_lock);
842
843 omap_hsmmc_disable_irq(host);
844 /* Do not complete the request if DMA is still in progress */
845 if (mrq->data && host->use_dma && dma_ch != -1)
846 return;
847 host->mrq = NULL;
848 mmc_request_done(host->mmc, mrq);
849}
850
830/* 851/*
831 * Notify the transfer complete to MMC core 852 * Notify the transfer complete to MMC core
832 */ 853 */
@@ -843,25 +864,19 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
843 return; 864 return;
844 } 865 }
845 866
846 host->mrq = NULL; 867 omap_hsmmc_request_done(host, mrq);
847 mmc_request_done(host->mmc, mrq);
848 return; 868 return;
849 } 869 }
850 870
851 host->data = NULL; 871 host->data = NULL;
852 872
853 if (host->use_dma && host->dma_ch != -1)
854 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
855 omap_hsmmc_get_dma_dir(host, data));
856
857 if (!data->error) 873 if (!data->error)
858 data->bytes_xfered += data->blocks * (data->blksz); 874 data->bytes_xfered += data->blocks * (data->blksz);
859 else 875 else
860 data->bytes_xfered = 0; 876 data->bytes_xfered = 0;
861 877
862 if (!data->stop) { 878 if (!data->stop) {
863 host->mrq = NULL; 879 omap_hsmmc_request_done(host, data->mrq);
864 mmc_request_done(host->mmc, data->mrq);
865 return; 880 return;
866 } 881 }
867 omap_hsmmc_start_command(host, data->stop, NULL); 882 omap_hsmmc_start_command(host, data->stop, NULL);
@@ -887,10 +902,8 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
887 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10); 902 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10);
888 } 903 }
889 } 904 }
890 if ((host->data == NULL && !host->response_busy) || cmd->error) { 905 if ((host->data == NULL && !host->response_busy) || cmd->error)
891 host->mrq = NULL; 906 omap_hsmmc_request_done(host, cmd->mrq);
892 mmc_request_done(host->mmc, cmd->mrq);
893 }
894} 907}
895 908
896/* 909/*
@@ -898,14 +911,19 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
898 */ 911 */
899static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) 912static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
900{ 913{
914 int dma_ch;
915
901 host->data->error = errno; 916 host->data->error = errno;
902 917
903 if (host->use_dma && host->dma_ch != -1) { 918 spin_lock(&host->irq_lock);
919 dma_ch = host->dma_ch;
920 host->dma_ch = -1;
921 spin_unlock(&host->irq_lock);
922
923 if (host->use_dma && dma_ch != -1) {
904 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len, 924 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
905 omap_hsmmc_get_dma_dir(host, host->data)); 925 omap_hsmmc_get_dma_dir(host, host->data));
906 omap_free_dma(host->dma_ch); 926 omap_free_dma(dma_ch);
907 host->dma_ch = -1;
908 up(&host->sem);
909 } 927 }
910 host->data = NULL; 928 host->data = NULL;
911} 929}
@@ -967,28 +985,21 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
967 __func__); 985 __func__);
968} 986}
969 987
970/* 988static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
971 * MMC controller IRQ handler
972 */
973static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
974{ 989{
975 struct omap_hsmmc_host *host = dev_id;
976 struct mmc_data *data; 990 struct mmc_data *data;
977 int end_cmd = 0, end_trans = 0, status; 991 int end_cmd = 0, end_trans = 0;
978 992
979 spin_lock(&host->irq_lock); 993 if (!host->req_in_progress) {
980 994 do {
981 if (host->mrq == NULL) { 995 OMAP_HSMMC_WRITE(host->base, STAT, status);
982 OMAP_HSMMC_WRITE(host->base, STAT, 996 /* Flush posted write */
983 OMAP_HSMMC_READ(host->base, STAT)); 997 status = OMAP_HSMMC_READ(host->base, STAT);
984 /* Flush posted write */ 998 } while (status & INT_EN_MASK);
985 OMAP_HSMMC_READ(host->base, STAT); 999 return;
986 spin_unlock(&host->irq_lock);
987 return IRQ_HANDLED;
988 } 1000 }
989 1001
990 data = host->data; 1002 data = host->data;
991 status = OMAP_HSMMC_READ(host->base, STAT);
992 dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); 1003 dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
993 1004
994 if (status & ERR) { 1005 if (status & ERR) {
@@ -1041,15 +1052,27 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
1041 } 1052 }
1042 1053
1043 OMAP_HSMMC_WRITE(host->base, STAT, status); 1054 OMAP_HSMMC_WRITE(host->base, STAT, status);
1044 /* Flush posted write */
1045 OMAP_HSMMC_READ(host->base, STAT);
1046 1055
1047 if (end_cmd || ((status & CC) && host->cmd)) 1056 if (end_cmd || ((status & CC) && host->cmd))
1048 omap_hsmmc_cmd_done(host, host->cmd); 1057 omap_hsmmc_cmd_done(host, host->cmd);
1049 if ((end_trans || (status & TC)) && host->mrq) 1058 if ((end_trans || (status & TC)) && host->mrq)
1050 omap_hsmmc_xfer_done(host, data); 1059 omap_hsmmc_xfer_done(host, data);
1060}
1051 1061
1052 spin_unlock(&host->irq_lock); 1062/*
1063 * MMC controller IRQ handler
1064 */
1065static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
1066{
1067 struct omap_hsmmc_host *host = dev_id;
1068 int status;
1069
1070 status = OMAP_HSMMC_READ(host->base, STAT);
1071 do {
1072 omap_hsmmc_do_irq(host, status);
1073 /* Flush posted write */
1074 status = OMAP_HSMMC_READ(host->base, STAT);
1075 } while (status & INT_EN_MASK);
1053 1076
1054 return IRQ_HANDLED; 1077 return IRQ_HANDLED;
1055} 1078}
@@ -1244,31 +1267,47 @@ static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
1244/* 1267/*
1245 * DMA call back function 1268 * DMA call back function
1246 */ 1269 */
1247static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data) 1270static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1248{ 1271{
1249 struct omap_hsmmc_host *host = data; 1272 struct omap_hsmmc_host *host = cb_data;
1273 struct mmc_data *data = host->mrq->data;
1274 int dma_ch, req_in_progress;
1250 1275
1251 if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ) 1276 if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ)
1252 dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n"); 1277 dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n");
1253 1278
1254 if (host->dma_ch < 0) 1279 spin_lock(&host->irq_lock);
1280 if (host->dma_ch < 0) {
1281 spin_unlock(&host->irq_lock);
1255 return; 1282 return;
1283 }
1256 1284
1257 host->dma_sg_idx++; 1285 host->dma_sg_idx++;
1258 if (host->dma_sg_idx < host->dma_len) { 1286 if (host->dma_sg_idx < host->dma_len) {
1259 /* Fire up the next transfer. */ 1287 /* Fire up the next transfer. */
1260 omap_hsmmc_config_dma_params(host, host->data, 1288 omap_hsmmc_config_dma_params(host, data,
1261 host->data->sg + host->dma_sg_idx); 1289 data->sg + host->dma_sg_idx);
1290 spin_unlock(&host->irq_lock);
1262 return; 1291 return;
1263 } 1292 }
1264 1293
1265 omap_free_dma(host->dma_ch); 1294 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
1295 omap_hsmmc_get_dma_dir(host, data));
1296
1297 req_in_progress = host->req_in_progress;
1298 dma_ch = host->dma_ch;
1266 host->dma_ch = -1; 1299 host->dma_ch = -1;
1267 /* 1300 spin_unlock(&host->irq_lock);
1268 * DMA Callback: run in interrupt context. 1301
1269 * mutex_unlock will throw a kernel warning if used. 1302 omap_free_dma(dma_ch);
1270 */ 1303
1271 up(&host->sem); 1304 /* If DMA has finished after TC, complete the request */
1305 if (!req_in_progress) {
1306 struct mmc_request *mrq = host->mrq;
1307
1308 host->mrq = NULL;
1309 mmc_request_done(host->mmc, mrq);
1310 }
1272} 1311}
1273 1312
1274/* 1313/*
@@ -1277,7 +1316,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data)
1277static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, 1316static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1278 struct mmc_request *req) 1317 struct mmc_request *req)
1279{ 1318{
1280 int dma_ch = 0, ret = 0, err = 1, i; 1319 int dma_ch = 0, ret = 0, i;
1281 struct mmc_data *data = req->data; 1320 struct mmc_data *data = req->data;
1282 1321
1283 /* Sanity check: all the SG entries must be aligned by block size. */ 1322 /* Sanity check: all the SG entries must be aligned by block size. */
@@ -1294,23 +1333,7 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1294 */ 1333 */
1295 return -EINVAL; 1334 return -EINVAL;
1296 1335
1297 /* 1336 BUG_ON(host->dma_ch != -1);
1298 * If for some reason the DMA transfer is still active,
1299 * we wait for timeout period and free the dma
1300 */
1301 if (host->dma_ch != -1) {
1302 set_current_state(TASK_UNINTERRUPTIBLE);
1303 schedule_timeout(100);
1304 if (down_trylock(&host->sem)) {
1305 omap_free_dma(host->dma_ch);
1306 host->dma_ch = -1;
1307 up(&host->sem);
1308 return err;
1309 }
1310 } else {
1311 if (down_trylock(&host->sem))
1312 return err;
1313 }
1314 1337
1315 ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data), 1338 ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
1316 "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch); 1339 "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
@@ -1410,37 +1433,27 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
1410 struct omap_hsmmc_host *host = mmc_priv(mmc); 1433 struct omap_hsmmc_host *host = mmc_priv(mmc);
1411 int err; 1434 int err;
1412 1435
1413 /* 1436 BUG_ON(host->req_in_progress);
1414 * Prevent races with the interrupt handler because of unexpected 1437 BUG_ON(host->dma_ch != -1);
1415 * interrupts, but not if we are already in interrupt context i.e. 1438 if (host->protect_card) {
1416 * retries. 1439 if (host->reqs_blocked < 3) {
1417 */ 1440 /*
1418 if (!in_interrupt()) { 1441 * Ensure the controller is left in a consistent
1419 spin_lock_irqsave(&host->irq_lock, host->flags); 1442 * state by resetting the command and data state
1420 /* 1443 * machines.
1421 * Protect the card from I/O if there is a possibility 1444 */
1422 * it can be removed. 1445 omap_hsmmc_reset_controller_fsm(host, SRD);
1423 */ 1446 omap_hsmmc_reset_controller_fsm(host, SRC);
1424 if (host->protect_card) { 1447 host->reqs_blocked += 1;
1425 if (host->reqs_blocked < 3) { 1448 }
1426 /* 1449 req->cmd->error = -EBADF;
1427 * Ensure the controller is left in a consistent 1450 if (req->data)
1428 * state by resetting the command and data state 1451 req->data->error = -EBADF;
1429 * machines. 1452 req->cmd->retries = 0;
1430 */ 1453 mmc_request_done(mmc, req);
1431 omap_hsmmc_reset_controller_fsm(host, SRD); 1454 return;
1432 omap_hsmmc_reset_controller_fsm(host, SRC); 1455 } else if (host->reqs_blocked)
1433 host->reqs_blocked += 1; 1456 host->reqs_blocked = 0;
1434 }
1435 req->cmd->error = -EBADF;
1436 if (req->data)
1437 req->data->error = -EBADF;
1438 spin_unlock_irqrestore(&host->irq_lock, host->flags);
1439 mmc_request_done(mmc, req);
1440 return;
1441 } else if (host->reqs_blocked)
1442 host->reqs_blocked = 0;
1443 }
1444 WARN_ON(host->mrq != NULL); 1457 WARN_ON(host->mrq != NULL);
1445 host->mrq = req; 1458 host->mrq = req;
1446 err = omap_hsmmc_prepare_data(host, req); 1459 err = omap_hsmmc_prepare_data(host, req);
@@ -1449,8 +1462,6 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
1449 if (req->data) 1462 if (req->data)
1450 req->data->error = err; 1463 req->data->error = err;
1451 host->mrq = NULL; 1464 host->mrq = NULL;
1452 if (!in_interrupt())
1453 spin_unlock_irqrestore(&host->irq_lock, host->flags);
1454 mmc_request_done(mmc, req); 1465 mmc_request_done(mmc, req);
1455 return; 1466 return;
1456 } 1467 }
@@ -2019,7 +2030,6 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2019 mmc->f_min = 400000; 2030 mmc->f_min = 400000;
2020 mmc->f_max = 52000000; 2031 mmc->f_max = 52000000;
2021 2032
2022 sema_init(&host->sem, 1);
2023 spin_lock_init(&host->irq_lock); 2033 spin_lock_init(&host->irq_lock);
2024 2034
2025 host->iclk = clk_get(&pdev->dev, "ick"); 2035 host->iclk = clk_get(&pdev->dev, "ick");
@@ -2162,8 +2172,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2162 } 2172 }
2163 } 2173 }
2164 2174
2165 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); 2175 omap_hsmmc_disable_irq(host);
2166 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
2167 2176
2168 mmc_host_lazy_disable(host->mmc); 2177 mmc_host_lazy_disable(host->mmc);
2169 2178
@@ -2258,10 +2267,12 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2258} 2267}
2259 2268
2260#ifdef CONFIG_PM 2269#ifdef CONFIG_PM
2261static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state) 2270static int omap_hsmmc_suspend(struct device *dev)
2262{ 2271{
2263 int ret = 0; 2272 int ret = 0;
2273 struct platform_device *pdev = to_platform_device(dev);
2264 struct omap_hsmmc_host *host = platform_get_drvdata(pdev); 2274 struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
2275 pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
2265 2276
2266 if (host && host->suspended) 2277 if (host && host->suspended)
2267 return 0; 2278 return 0;
@@ -2281,12 +2292,9 @@ static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
2281 } 2292 }
2282 cancel_work_sync(&host->mmc_carddetect_work); 2293 cancel_work_sync(&host->mmc_carddetect_work);
2283 mmc_host_enable(host->mmc); 2294 mmc_host_enable(host->mmc);
2284 ret = mmc_suspend_host(host->mmc, state); 2295 ret = mmc_suspend_host(host->mmc);
2285 if (ret == 0) { 2296 if (ret == 0) {
2286 OMAP_HSMMC_WRITE(host->base, ISE, 0); 2297 omap_hsmmc_disable_irq(host);
2287 OMAP_HSMMC_WRITE(host->base, IE, 0);
2288
2289
2290 OMAP_HSMMC_WRITE(host->base, HCTL, 2298 OMAP_HSMMC_WRITE(host->base, HCTL,
2291 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); 2299 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
2292 mmc_host_disable(host->mmc); 2300 mmc_host_disable(host->mmc);
@@ -2310,9 +2318,10 @@ static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
2310} 2318}
2311 2319
2312/* Routine to resume the MMC device */ 2320/* Routine to resume the MMC device */
2313static int omap_hsmmc_resume(struct platform_device *pdev) 2321static int omap_hsmmc_resume(struct device *dev)
2314{ 2322{
2315 int ret = 0; 2323 int ret = 0;
2324 struct platform_device *pdev = to_platform_device(dev);
2316 struct omap_hsmmc_host *host = platform_get_drvdata(pdev); 2325 struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
2317 2326
2318 if (host && !host->suspended) 2327 if (host && !host->suspended)
@@ -2363,13 +2372,17 @@ clk_en_err:
2363#define omap_hsmmc_resume NULL 2372#define omap_hsmmc_resume NULL
2364#endif 2373#endif
2365 2374
2366static struct platform_driver omap_hsmmc_driver = { 2375static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
2367 .remove = omap_hsmmc_remove,
2368 .suspend = omap_hsmmc_suspend, 2376 .suspend = omap_hsmmc_suspend,
2369 .resume = omap_hsmmc_resume, 2377 .resume = omap_hsmmc_resume,
2378};
2379
2380static struct platform_driver omap_hsmmc_driver = {
2381 .remove = omap_hsmmc_remove,
2370 .driver = { 2382 .driver = {
2371 .name = DRIVER_NAME, 2383 .name = DRIVER_NAME,
2372 .owner = THIS_MODULE, 2384 .owner = THIS_MODULE,
2385 .pm = &omap_hsmmc_dev_pm_ops,
2373 }, 2386 },
2374}; 2387};
2375 2388
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index e4f00e70a749..0a4e43f37140 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -813,7 +813,7 @@ static int pxamci_suspend(struct device *dev)
813 int ret = 0; 813 int ret = 0;
814 814
815 if (mmc) 815 if (mmc)
816 ret = mmc_suspend_host(mmc, PMSG_SUSPEND); 816 ret = mmc_suspend_host(mmc);
817 817
818 return ret; 818 return ret;
819} 819}
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 2fdf7689ae6c..2e16e0a90a5e 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1881,9 +1881,8 @@ MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
1881static int s3cmci_suspend(struct device *dev) 1881static int s3cmci_suspend(struct device *dev)
1882{ 1882{
1883 struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev)); 1883 struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
1884 struct pm_message event = { PM_EVENT_SUSPEND };
1885 1884
1886 return mmc_suspend_host(mmc, event); 1885 return mmc_suspend_host(mmc);
1887} 1886}
1888 1887
1889static int s3cmci_resume(struct device *dev) 1888static int s3cmci_resume(struct device *dev)
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
index 55e33135edb4..a2e9820cd42f 100644
--- a/drivers/mmc/host/sdhci-of-core.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -89,7 +89,7 @@ static int sdhci_of_suspend(struct of_device *ofdev, pm_message_t state)
89{ 89{
90 struct sdhci_host *host = dev_get_drvdata(&ofdev->dev); 90 struct sdhci_host *host = dev_get_drvdata(&ofdev->dev);
91 91
92 return mmc_suspend_host(host->mmc, state); 92 return mmc_suspend_host(host->mmc);
93} 93}
94 94
95static int sdhci_of_resume(struct of_device *ofdev) 95static int sdhci_of_resume(struct of_device *ofdev)
@@ -118,7 +118,7 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np)
118static int __devinit sdhci_of_probe(struct of_device *ofdev, 118static int __devinit sdhci_of_probe(struct of_device *ofdev,
119 const struct of_device_id *match) 119 const struct of_device_id *match)
120{ 120{
121 struct device_node *np = ofdev->node; 121 struct device_node *np = ofdev->dev.of_node;
122 struct sdhci_of_data *sdhci_of_data = match->data; 122 struct sdhci_of_data *sdhci_of_data = match->data;
123 struct sdhci_host *host; 123 struct sdhci_host *host;
124 struct sdhci_of_host *of_host; 124 struct sdhci_of_host *of_host;
@@ -205,8 +205,11 @@ static const struct of_device_id sdhci_of_match[] = {
205MODULE_DEVICE_TABLE(of, sdhci_of_match); 205MODULE_DEVICE_TABLE(of, sdhci_of_match);
206 206
207static struct of_platform_driver sdhci_of_driver = { 207static struct of_platform_driver sdhci_of_driver = {
208 .driver.name = "sdhci-of", 208 .driver = {
209 .match_table = sdhci_of_match, 209 .name = "sdhci-of",
210 .owner = THIS_MODULE,
211 .of_match_table = sdhci_of_match,
212 },
210 .probe = sdhci_of_probe, 213 .probe = sdhci_of_probe,
211 .remove = __devexit_p(sdhci_of_remove), 214 .remove = __devexit_p(sdhci_of_remove),
212 .suspend = sdhci_of_suspend, 215 .suspend = sdhci_of_suspend,
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index d5b11a17e648..c8623de13af3 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -129,12 +129,12 @@ struct sdhci_of_data sdhci_esdhc = {
129 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | 129 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
130 SDHCI_QUIRK_NO_CARD_NO_RESET, 130 SDHCI_QUIRK_NO_CARD_NO_RESET,
131 .ops = { 131 .ops = {
132 .readl = sdhci_be32bs_readl, 132 .read_l = sdhci_be32bs_readl,
133 .readw = esdhc_readw, 133 .read_w = esdhc_readw,
134 .readb = sdhci_be32bs_readb, 134 .read_b = sdhci_be32bs_readb,
135 .writel = sdhci_be32bs_writel, 135 .write_l = sdhci_be32bs_writel,
136 .writew = esdhc_writew, 136 .write_w = esdhc_writew,
137 .writeb = esdhc_writeb, 137 .write_b = esdhc_writeb,
138 .set_clock = esdhc_set_clock, 138 .set_clock = esdhc_set_clock,
139 .enable_dma = esdhc_enable_dma, 139 .enable_dma = esdhc_enable_dma,
140 .get_max_clock = esdhc_get_max_clock, 140 .get_max_clock = esdhc_get_max_clock,
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 35117f3ed757..68ddb7546ae2 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -55,11 +55,11 @@ struct sdhci_of_data sdhci_hlwd = {
55 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | 55 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
56 SDHCI_QUIRK_32BIT_DMA_SIZE, 56 SDHCI_QUIRK_32BIT_DMA_SIZE,
57 .ops = { 57 .ops = {
58 .readl = sdhci_be32bs_readl, 58 .read_l = sdhci_be32bs_readl,
59 .readw = sdhci_be32bs_readw, 59 .read_w = sdhci_be32bs_readw,
60 .readb = sdhci_be32bs_readb, 60 .read_b = sdhci_be32bs_readb,
61 .writel = sdhci_hlwd_writel, 61 .write_l = sdhci_hlwd_writel,
62 .writew = sdhci_hlwd_writew, 62 .write_w = sdhci_hlwd_writew,
63 .writeb = sdhci_hlwd_writeb, 63 .write_b = sdhci_hlwd_writeb,
64 }, 64 },
65}; 65};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 6701af629c30..65483fdea45b 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -628,7 +628,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
628 host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot)); 628 host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot));
629 if (IS_ERR(host)) { 629 if (IS_ERR(host)) {
630 dev_err(&pdev->dev, "cannot allocate host\n"); 630 dev_err(&pdev->dev, "cannot allocate host\n");
631 return ERR_PTR(PTR_ERR(host)); 631 return ERR_CAST(host);
632 } 632 }
633 633
634 slot = sdhci_priv(host); 634 slot = sdhci_priv(host);
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 297f40ae6ad5..b6ee0d719698 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -29,6 +29,7 @@
29#include <linux/mmc/host.h> 29#include <linux/mmc/host.h>
30 30
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/sdhci-pltfm.h>
32 33
33#include "sdhci.h" 34#include "sdhci.h"
34 35
@@ -49,19 +50,18 @@ static struct sdhci_ops sdhci_pltfm_ops = {
49 50
50static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) 51static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
51{ 52{
53 struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
52 struct sdhci_host *host; 54 struct sdhci_host *host;
53 struct resource *iomem; 55 struct resource *iomem;
54 int ret; 56 int ret;
55 57
56 BUG_ON(pdev == NULL);
57
58 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 58 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
59 if (!iomem) { 59 if (!iomem) {
60 ret = -ENOMEM; 60 ret = -ENOMEM;
61 goto err; 61 goto err;
62 } 62 }
63 63
64 if (resource_size(iomem) != 0x100) 64 if (resource_size(iomem) < 0x100)
65 dev_err(&pdev->dev, "Invalid iomem size. You may " 65 dev_err(&pdev->dev, "Invalid iomem size. You may "
66 "experience problems.\n"); 66 "experience problems.\n");
67 67
@@ -76,7 +76,12 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
76 } 76 }
77 77
78 host->hw_name = "platform"; 78 host->hw_name = "platform";
79 host->ops = &sdhci_pltfm_ops; 79 if (pdata && pdata->ops)
80 host->ops = pdata->ops;
81 else
82 host->ops = &sdhci_pltfm_ops;
83 if (pdata)
84 host->quirks = pdata->quirks;
80 host->irq = platform_get_irq(pdev, 0); 85 host->irq = platform_get_irq(pdev, 0);
81 86
82 if (!request_mem_region(iomem->start, resource_size(iomem), 87 if (!request_mem_region(iomem->start, resource_size(iomem),
@@ -93,6 +98,12 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
93 goto err_remap; 98 goto err_remap;
94 } 99 }
95 100
101 if (pdata && pdata->init) {
102 ret = pdata->init(host);
103 if (ret)
104 goto err_plat_init;
105 }
106
96 ret = sdhci_add_host(host); 107 ret = sdhci_add_host(host);
97 if (ret) 108 if (ret)
98 goto err_add_host; 109 goto err_add_host;
@@ -102,6 +113,9 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
102 return 0; 113 return 0;
103 114
104err_add_host: 115err_add_host:
116 if (pdata && pdata->exit)
117 pdata->exit(host);
118err_plat_init:
105 iounmap(host->ioaddr); 119 iounmap(host->ioaddr);
106err_remap: 120err_remap:
107 release_mem_region(iomem->start, resource_size(iomem)); 121 release_mem_region(iomem->start, resource_size(iomem));
@@ -114,6 +128,7 @@ err:
114 128
115static int __devexit sdhci_pltfm_remove(struct platform_device *pdev) 129static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
116{ 130{
131 struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
117 struct sdhci_host *host = platform_get_drvdata(pdev); 132 struct sdhci_host *host = platform_get_drvdata(pdev);
118 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 133 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
119 int dead; 134 int dead;
@@ -125,6 +140,8 @@ static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
125 dead = 1; 140 dead = 1;
126 141
127 sdhci_remove_host(host, dead); 142 sdhci_remove_host(host, dead);
143 if (pdata && pdata->exit)
144 pdata->exit(host);
128 iounmap(host->ioaddr); 145 iounmap(host->ioaddr);
129 release_mem_region(iomem->start, resource_size(iomem)); 146 release_mem_region(iomem->start, resource_size(iomem));
130 sdhci_free_host(host); 147 sdhci_free_host(host);
@@ -165,4 +182,3 @@ MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
165MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); 182MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
166MODULE_LICENSE("GPL v2"); 183MODULE_LICENSE("GPL v2");
167MODULE_ALIAS("platform:sdhci"); 184MODULE_ALIAS("platform:sdhci");
168
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 2136794c0cfa..af217924a76e 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -317,12 +317,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
317 host->irq = irq; 317 host->irq = irq;
318 318
319 /* Setup quirks for the controller */ 319 /* Setup quirks for the controller */
320 320 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
321 /* Currently with ADMA enabled we are getting some length
322 * interrupts that are not being dealt with, do disable
323 * ADMA until this is sorted out. */
324 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
325 host->quirks |= SDHCI_QUIRK_32BIT_ADMA_SIZE;
326 321
327#ifndef CONFIG_MMC_SDHCI_S3C_DMA 322#ifndef CONFIG_MMC_SDHCI_S3C_DMA
328 323
@@ -330,9 +325,6 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
330 * support as well. */ 325 * support as well. */
331 host->quirks |= SDHCI_QUIRK_BROKEN_DMA; 326 host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
332 327
333 /* PIO currently has problems with multi-block IO */
334 host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
335
336#endif /* CONFIG_MMC_SDHCI_S3C_DMA */ 328#endif /* CONFIG_MMC_SDHCI_S3C_DMA */
337 329
338 /* It seems we do not get an DATA transfer complete on non-busy 330 /* It seems we do not get an DATA transfer complete on non-busy
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
new file mode 100644
index 000000000000..d70c54c7b70a
--- /dev/null
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -0,0 +1,298 @@
1/*
2 * drivers/mmc/host/sdhci-spear.c
3 *
4 * Support of SDHCI platform devices for spear soc family
5 *
6 * Copyright (C) 2010 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com>
8 *
9 * Inspired by sdhci-pltfm.c
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <linux/clk.h>
17#include <linux/delay.h>
18#include <linux/gpio.h>
19#include <linux/highmem.h>
20#include <linux/interrupt.h>
21#include <linux/irq.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <linux/mmc/host.h>
25#include <linux/mmc/sdhci-spear.h>
26#include <linux/io.h>
27#include "sdhci.h"
28
29struct spear_sdhci {
30 struct clk *clk;
31 struct sdhci_plat_data *data;
32};
33
34/* sdhci ops */
35static struct sdhci_ops sdhci_pltfm_ops = {
36 /* Nothing to do for now. */
37};
38
39/* gpio card detection interrupt handler */
40static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id)
41{
42 struct platform_device *pdev = dev_id;
43 struct sdhci_host *host = platform_get_drvdata(pdev);
44 struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
45 unsigned long gpio_irq_type;
46 int val;
47
48 val = gpio_get_value(sdhci->data->card_int_gpio);
49
50 /* val == 1 -> card removed, val == 0 -> card inserted */
51 /* if card removed - set irq for low level, else vice versa */
52 gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH;
53 set_irq_type(irq, gpio_irq_type);
54
55 if (sdhci->data->card_power_gpio >= 0) {
56 if (!sdhci->data->power_always_enb) {
57 /* if card inserted, give power, otherwise remove it */
58 val = sdhci->data->power_active_high ? !val : val ;
59 gpio_set_value(sdhci->data->card_power_gpio, val);
60 }
61 }
62
63 /* inform sdhci driver about card insertion/removal */
64 tasklet_schedule(&host->card_tasklet);
65
66 return IRQ_HANDLED;
67}
68
69static int __devinit sdhci_probe(struct platform_device *pdev)
70{
71 struct sdhci_host *host;
72 struct resource *iomem;
73 struct spear_sdhci *sdhci;
74 int ret;
75
76 BUG_ON(pdev == NULL);
77
78 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
79 if (!iomem) {
80 ret = -ENOMEM;
81 dev_dbg(&pdev->dev, "memory resource not defined\n");
82 goto err;
83 }
84
85 if (!request_mem_region(iomem->start, resource_size(iomem),
86 "spear-sdhci")) {
87 ret = -EBUSY;
88 dev_dbg(&pdev->dev, "cannot request region\n");
89 goto err;
90 }
91
92 sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL);
93 if (!sdhci) {
94 ret = -ENOMEM;
95 dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");
96 goto err_kzalloc;
97 }
98
99 /* clk enable */
100 sdhci->clk = clk_get(&pdev->dev, NULL);
101 if (IS_ERR(sdhci->clk)) {
102 ret = PTR_ERR(sdhci->clk);
103 dev_dbg(&pdev->dev, "Error getting clock\n");
104 goto err_clk_get;
105 }
106
107 ret = clk_enable(sdhci->clk);
108 if (ret) {
109 dev_dbg(&pdev->dev, "Error enabling clock\n");
110 goto err_clk_enb;
111 }
112
113 /* overwrite platform_data */
114 sdhci->data = dev_get_platdata(&pdev->dev);
115 pdev->dev.platform_data = sdhci;
116
117 if (pdev->dev.parent)
118 host = sdhci_alloc_host(pdev->dev.parent, 0);
119 else
120 host = sdhci_alloc_host(&pdev->dev, 0);
121
122 if (IS_ERR(host)) {
123 ret = PTR_ERR(host);
124 dev_dbg(&pdev->dev, "error allocating host\n");
125 goto err_alloc_host;
126 }
127
128 host->hw_name = "sdhci";
129 host->ops = &sdhci_pltfm_ops;
130 host->irq = platform_get_irq(pdev, 0);
131 host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
132
133 host->ioaddr = ioremap(iomem->start, resource_size(iomem));
134 if (!host->ioaddr) {
135 ret = -ENOMEM;
136 dev_dbg(&pdev->dev, "failed to remap registers\n");
137 goto err_ioremap;
138 }
139
140 ret = sdhci_add_host(host);
141 if (ret) {
142 dev_dbg(&pdev->dev, "error adding host\n");
143 goto err_add_host;
144 }
145
146 platform_set_drvdata(pdev, host);
147
148 /*
149 * It is optional to use GPIOs for sdhci Power control & sdhci card
150 * interrupt detection. If sdhci->data is NULL, then use original sdhci
151 * lines otherwise GPIO lines.
152 * If GPIO is selected for power control, then power should be disabled
153 * after card removal and should be enabled when card insertion
154 * interrupt occurs
155 */
156 if (!sdhci->data)
157 return 0;
158
159 if (sdhci->data->card_power_gpio >= 0) {
160 int val = 0;
161
162 ret = gpio_request(sdhci->data->card_power_gpio, "sdhci");
163 if (ret < 0) {
164 dev_dbg(&pdev->dev, "gpio request fail: %d\n",
165 sdhci->data->card_power_gpio);
166 goto err_pgpio_request;
167 }
168
169 if (sdhci->data->power_always_enb)
170 val = sdhci->data->power_active_high;
171 else
172 val = !sdhci->data->power_active_high;
173
174 ret = gpio_direction_output(sdhci->data->card_power_gpio, val);
175 if (ret) {
176 dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
177 sdhci->data->card_power_gpio);
178 goto err_pgpio_direction;
179 }
180
181 gpio_set_value(sdhci->data->card_power_gpio, 1);
182 }
183
184 if (sdhci->data->card_int_gpio >= 0) {
185 ret = gpio_request(sdhci->data->card_int_gpio, "sdhci");
186 if (ret < 0) {
187 dev_dbg(&pdev->dev, "gpio request fail: %d\n",
188 sdhci->data->card_int_gpio);
189 goto err_igpio_request;
190 }
191
192 ret = gpio_direction_input(sdhci->data->card_int_gpio);
193 if (ret) {
194 dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
195 sdhci->data->card_int_gpio);
196 goto err_igpio_direction;
197 }
198 ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio),
199 sdhci_gpio_irq, IRQF_TRIGGER_LOW,
200 mmc_hostname(host->mmc), pdev);
201 if (ret) {
202 dev_dbg(&pdev->dev, "gpio request irq fail: %d\n",
203 sdhci->data->card_int_gpio);
204 goto err_igpio_request_irq;
205 }
206
207 }
208
209 return 0;
210
211err_igpio_request_irq:
212err_igpio_direction:
213 if (sdhci->data->card_int_gpio >= 0)
214 gpio_free(sdhci->data->card_int_gpio);
215err_igpio_request:
216err_pgpio_direction:
217 if (sdhci->data->card_power_gpio >= 0)
218 gpio_free(sdhci->data->card_power_gpio);
219err_pgpio_request:
220 platform_set_drvdata(pdev, NULL);
221 sdhci_remove_host(host, 1);
222err_add_host:
223 iounmap(host->ioaddr);
224err_ioremap:
225 sdhci_free_host(host);
226err_alloc_host:
227 clk_disable(sdhci->clk);
228err_clk_enb:
229 clk_put(sdhci->clk);
230err_clk_get:
231 kfree(sdhci);
232err_kzalloc:
233 release_mem_region(iomem->start, resource_size(iomem));
234err:
235 dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);
236 return ret;
237}
238
239static int __devexit sdhci_remove(struct platform_device *pdev)
240{
241 struct sdhci_host *host = platform_get_drvdata(pdev);
242 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
243 struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
244 int dead;
245 u32 scratch;
246
247 if (sdhci->data) {
248 if (sdhci->data->card_int_gpio >= 0) {
249 free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev);
250 gpio_free(sdhci->data->card_int_gpio);
251 }
252
253 if (sdhci->data->card_power_gpio >= 0)
254 gpio_free(sdhci->data->card_power_gpio);
255 }
256
257 platform_set_drvdata(pdev, NULL);
258 dead = 0;
259 scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
260 if (scratch == (u32)-1)
261 dead = 1;
262
263 sdhci_remove_host(host, dead);
264 iounmap(host->ioaddr);
265 sdhci_free_host(host);
266 clk_disable(sdhci->clk);
267 clk_put(sdhci->clk);
268 kfree(sdhci);
269 if (iomem)
270 release_mem_region(iomem->start, resource_size(iomem));
271
272 return 0;
273}
274
275static struct platform_driver sdhci_driver = {
276 .driver = {
277 .name = "sdhci",
278 .owner = THIS_MODULE,
279 },
280 .probe = sdhci_probe,
281 .remove = __devexit_p(sdhci_remove),
282};
283
284static int __init sdhci_init(void)
285{
286 return platform_driver_register(&sdhci_driver);
287}
288module_init(sdhci_init);
289
290static void __exit sdhci_exit(void)
291{
292 platform_driver_unregister(&sdhci_driver);
293}
294module_exit(sdhci_exit);
295
296MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
297MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
298MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9d4fdfa685e5..c6d1bd8d4ac4 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -496,12 +496,22 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
496 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); 496 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
497 } 497 }
498 498
499 /* 499 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
500 * Add a terminating entry. 500 /*
501 */ 501 * Mark the last descriptor as the terminating descriptor
502 */
503 if (desc != host->adma_desc) {
504 desc -= 8;
505 desc[0] |= 0x2; /* end */
506 }
507 } else {
508 /*
509 * Add a terminating entry.
510 */
502 511
503 /* nop, end, valid */ 512 /* nop, end, valid */
504 sdhci_set_adma_desc(desc, 0, 0, 0x3); 513 sdhci_set_adma_desc(desc, 0, 0, 0x3);
514 }
505 515
506 /* 516 /*
507 * Resync align buffer as we might have changed it. 517 * Resync align buffer as we might have changed it.
@@ -1587,7 +1597,7 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
1587 1597
1588 sdhci_disable_card_detection(host); 1598 sdhci_disable_card_detection(host);
1589 1599
1590 ret = mmc_suspend_host(host->mmc, state); 1600 ret = mmc_suspend_host(host->mmc);
1591 if (ret) 1601 if (ret)
1592 return ret; 1602 return ret;
1593 1603
@@ -1744,7 +1754,8 @@ int sdhci_add_host(struct sdhci_host *host)
1744 host->max_clk = 1754 host->max_clk =
1745 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; 1755 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
1746 host->max_clk *= 1000000; 1756 host->max_clk *= 1000000;
1747 if (host->max_clk == 0) { 1757 if (host->max_clk == 0 || host->quirks &
1758 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
1748 if (!host->ops->get_max_clock) { 1759 if (!host->ops->get_max_clock) {
1749 printk(KERN_ERR 1760 printk(KERN_ERR
1750 "%s: Hardware doesn't specify base clock " 1761 "%s: Hardware doesn't specify base clock "
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 842f46f94284..c8468134adc9 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -127,7 +127,7 @@
127#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \ 127#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
128 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \ 128 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
129 SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \ 129 SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
130 SDHCI_INT_DATA_END_BIT | SDHCI_ADMA_ERROR) 130 SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR)
131#define SDHCI_INT_ALL_MASK ((unsigned int)-1) 131#define SDHCI_INT_ALL_MASK ((unsigned int)-1)
132 132
133#define SDHCI_ACMD12_ERR 0x3C 133#define SDHCI_ACMD12_ERR 0x3C
@@ -236,6 +236,10 @@ struct sdhci_host {
236#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23) 236#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
237/* Controller uses SDCLK instead of TMCLK for data timeouts */ 237/* Controller uses SDCLK instead of TMCLK for data timeouts */
238#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24) 238#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24)
239/* Controller reports wrong base clock capability */
240#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25)
241/* Controller cannot support End Attribute in NOP ADMA descriptor */
242#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26)
239 243
240 int irq; /* Device IRQ */ 244 int irq; /* Device IRQ */
241 void __iomem * ioaddr; /* Mapped address */ 245 void __iomem * ioaddr; /* Mapped address */
@@ -294,12 +298,12 @@ struct sdhci_host {
294 298
295struct sdhci_ops { 299struct sdhci_ops {
296#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS 300#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
297 u32 (*readl)(struct sdhci_host *host, int reg); 301 u32 (*read_l)(struct sdhci_host *host, int reg);
298 u16 (*readw)(struct sdhci_host *host, int reg); 302 u16 (*read_w)(struct sdhci_host *host, int reg);
299 u8 (*readb)(struct sdhci_host *host, int reg); 303 u8 (*read_b)(struct sdhci_host *host, int reg);
300 void (*writel)(struct sdhci_host *host, u32 val, int reg); 304 void (*write_l)(struct sdhci_host *host, u32 val, int reg);
301 void (*writew)(struct sdhci_host *host, u16 val, int reg); 305 void (*write_w)(struct sdhci_host *host, u16 val, int reg);
302 void (*writeb)(struct sdhci_host *host, u8 val, int reg); 306 void (*write_b)(struct sdhci_host *host, u8 val, int reg);
303#endif 307#endif
304 308
305 void (*set_clock)(struct sdhci_host *host, unsigned int clock); 309 void (*set_clock)(struct sdhci_host *host, unsigned int clock);
@@ -314,48 +318,48 @@ struct sdhci_ops {
314 318
315static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg) 319static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg)
316{ 320{
317 if (unlikely(host->ops->writel)) 321 if (unlikely(host->ops->write_l))
318 host->ops->writel(host, val, reg); 322 host->ops->write_l(host, val, reg);
319 else 323 else
320 writel(val, host->ioaddr + reg); 324 writel(val, host->ioaddr + reg);
321} 325}
322 326
323static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg) 327static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg)
324{ 328{
325 if (unlikely(host->ops->writew)) 329 if (unlikely(host->ops->write_w))
326 host->ops->writew(host, val, reg); 330 host->ops->write_w(host, val, reg);
327 else 331 else
328 writew(val, host->ioaddr + reg); 332 writew(val, host->ioaddr + reg);
329} 333}
330 334
331static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg) 335static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg)
332{ 336{
333 if (unlikely(host->ops->writeb)) 337 if (unlikely(host->ops->write_b))
334 host->ops->writeb(host, val, reg); 338 host->ops->write_b(host, val, reg);
335 else 339 else
336 writeb(val, host->ioaddr + reg); 340 writeb(val, host->ioaddr + reg);
337} 341}
338 342
339static inline u32 sdhci_readl(struct sdhci_host *host, int reg) 343static inline u32 sdhci_readl(struct sdhci_host *host, int reg)
340{ 344{
341 if (unlikely(host->ops->readl)) 345 if (unlikely(host->ops->read_l))
342 return host->ops->readl(host, reg); 346 return host->ops->read_l(host, reg);
343 else 347 else
344 return readl(host->ioaddr + reg); 348 return readl(host->ioaddr + reg);
345} 349}
346 350
347static inline u16 sdhci_readw(struct sdhci_host *host, int reg) 351static inline u16 sdhci_readw(struct sdhci_host *host, int reg)
348{ 352{
349 if (unlikely(host->ops->readw)) 353 if (unlikely(host->ops->read_w))
350 return host->ops->readw(host, reg); 354 return host->ops->read_w(host, reg);
351 else 355 else
352 return readw(host->ioaddr + reg); 356 return readw(host->ioaddr + reg);
353} 357}
354 358
355static inline u8 sdhci_readb(struct sdhci_host *host, int reg) 359static inline u8 sdhci_readb(struct sdhci_host *host, int reg)
356{ 360{
357 if (unlikely(host->ops->readb)) 361 if (unlikely(host->ops->read_b))
358 return host->ops->readb(host, reg); 362 return host->ops->read_b(host, reg);
359 else 363 else
360 return readb(host->ioaddr + reg); 364 return readb(host->ioaddr + reg);
361} 365}
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index cb41e9c3ac07..e7507af3856e 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -519,7 +519,7 @@ static int sdricoh_pcmcia_suspend(struct pcmcia_device *link)
519{ 519{
520 struct mmc_host *mmc = link->priv; 520 struct mmc_host *mmc = link->priv;
521 dev_dbg(&link->dev, "suspend\n"); 521 dev_dbg(&link->dev, "suspend\n");
522 mmc_suspend_host(mmc, PMSG_SUSPEND); 522 mmc_suspend_host(mmc);
523 return 0; 523 return 0;
524} 524}
525 525
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
new file mode 100644
index 000000000000..eb97830c0344
--- /dev/null
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -0,0 +1,965 @@
1/*
2 * MMCIF eMMC driver.
3 *
4 * Copyright (C) 2010 Renesas Solutions Corp.
5 * Yusuke Goda <yusuke.goda.sx@renesas.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License.
10 *
11 *
12 * TODO
13 * 1. DMA
14 * 2. Power management
15 * 3. Handle MMC errors better
16 *
17 */
18
19#include <linux/dma-mapping.h>
20#include <linux/mmc/host.h>
21#include <linux/mmc/card.h>
22#include <linux/mmc/core.h>
23#include <linux/mmc/mmc.h>
24#include <linux/mmc/sdio.h>
25#include <linux/delay.h>
26#include <linux/platform_device.h>
27#include <linux/clk.h>
28#include <linux/mmc/sh_mmcif.h>
29
30#define DRIVER_NAME "sh_mmcif"
31#define DRIVER_VERSION "2010-04-28"
32
33#define MMCIF_CE_CMD_SET 0x00000000
34#define MMCIF_CE_ARG 0x00000008
35#define MMCIF_CE_ARG_CMD12 0x0000000C
36#define MMCIF_CE_CMD_CTRL 0x00000010
37#define MMCIF_CE_BLOCK_SET 0x00000014
38#define MMCIF_CE_CLK_CTRL 0x00000018
39#define MMCIF_CE_BUF_ACC 0x0000001C
40#define MMCIF_CE_RESP3 0x00000020
41#define MMCIF_CE_RESP2 0x00000024
42#define MMCIF_CE_RESP1 0x00000028
43#define MMCIF_CE_RESP0 0x0000002C
44#define MMCIF_CE_RESP_CMD12 0x00000030
45#define MMCIF_CE_DATA 0x00000034
46#define MMCIF_CE_INT 0x00000040
47#define MMCIF_CE_INT_MASK 0x00000044
48#define MMCIF_CE_HOST_STS1 0x00000048
49#define MMCIF_CE_HOST_STS2 0x0000004C
50#define MMCIF_CE_VERSION 0x0000007C
51
52/* CE_CMD_SET */
53#define CMD_MASK 0x3f000000
54#define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22))
55#define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
56#define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */
57#define CMD_SET_RBSY (1 << 21) /* R1b */
58#define CMD_SET_CCSEN (1 << 20)
59#define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */
60#define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */
61#define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */
62#define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */
63#define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */
64#define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */
65#define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */
66#define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/
67#define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/
68#define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
69#define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/
70#define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */
71#define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
72#define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
73#define CMD_SET_CCSH (1 << 5)
74#define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
75#define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
76#define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
77
78/* CE_CMD_CTRL */
79#define CMD_CTRL_BREAK (1 << 0)
80
81/* CE_BLOCK_SET */
82#define BLOCK_SIZE_MASK 0x0000ffff
83
84/* CE_CLK_CTRL */
85#define CLK_ENABLE (1 << 24) /* 1: output mmc clock */
86#define CLK_CLEAR ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
87#define CLK_SUP_PCLK ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
88#define SRSPTO_256 ((1 << 13) | (0 << 12)) /* resp timeout */
89#define SRBSYTO_29 ((1 << 11) | (1 << 10) | \
90 (1 << 9) | (1 << 8)) /* resp busy timeout */
91#define SRWDTO_29 ((1 << 7) | (1 << 6) | \
92 (1 << 5) | (1 << 4)) /* read/write timeout */
93#define SCCSTO_29 ((1 << 3) | (1 << 2) | \
94 (1 << 1) | (1 << 0)) /* ccs timeout */
95
96/* CE_BUF_ACC */
97#define BUF_ACC_DMAWEN (1 << 25)
98#define BUF_ACC_DMAREN (1 << 24)
99#define BUF_ACC_BUSW_32 (0 << 17)
100#define BUF_ACC_BUSW_16 (1 << 17)
101#define BUF_ACC_ATYP (1 << 16)
102
103/* CE_INT */
104#define INT_CCSDE (1 << 29)
105#define INT_CMD12DRE (1 << 26)
106#define INT_CMD12RBE (1 << 25)
107#define INT_CMD12CRE (1 << 24)
108#define INT_DTRANE (1 << 23)
109#define INT_BUFRE (1 << 22)
110#define INT_BUFWEN (1 << 21)
111#define INT_BUFREN (1 << 20)
112#define INT_CCSRCV (1 << 19)
113#define INT_RBSYE (1 << 17)
114#define INT_CRSPE (1 << 16)
115#define INT_CMDVIO (1 << 15)
116#define INT_BUFVIO (1 << 14)
117#define INT_WDATERR (1 << 11)
118#define INT_RDATERR (1 << 10)
119#define INT_RIDXERR (1 << 9)
120#define INT_RSPERR (1 << 8)
121#define INT_CCSTO (1 << 5)
122#define INT_CRCSTO (1 << 4)
123#define INT_WDATTO (1 << 3)
124#define INT_RDATTO (1 << 2)
125#define INT_RBSYTO (1 << 1)
126#define INT_RSPTO (1 << 0)
127#define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \
128 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
129 INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
130 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
131
132/* CE_INT_MASK */
133#define MASK_ALL 0x00000000
134#define MASK_MCCSDE (1 << 29)
135#define MASK_MCMD12DRE (1 << 26)
136#define MASK_MCMD12RBE (1 << 25)
137#define MASK_MCMD12CRE (1 << 24)
138#define MASK_MDTRANE (1 << 23)
139#define MASK_MBUFRE (1 << 22)
140#define MASK_MBUFWEN (1 << 21)
141#define MASK_MBUFREN (1 << 20)
142#define MASK_MCCSRCV (1 << 19)
143#define MASK_MRBSYE (1 << 17)
144#define MASK_MCRSPE (1 << 16)
145#define MASK_MCMDVIO (1 << 15)
146#define MASK_MBUFVIO (1 << 14)
147#define MASK_MWDATERR (1 << 11)
148#define MASK_MRDATERR (1 << 10)
149#define MASK_MRIDXERR (1 << 9)
150#define MASK_MRSPERR (1 << 8)
151#define MASK_MCCSTO (1 << 5)
152#define MASK_MCRCSTO (1 << 4)
153#define MASK_MWDATTO (1 << 3)
154#define MASK_MRDATTO (1 << 2)
155#define MASK_MRBSYTO (1 << 1)
156#define MASK_MRSPTO (1 << 0)
157
158/* CE_HOST_STS1 */
159#define STS1_CMDSEQ (1 << 31)
160
161/* CE_HOST_STS2 */
162#define STS2_CRCSTE (1 << 31)
163#define STS2_CRC16E (1 << 30)
164#define STS2_AC12CRCE (1 << 29)
165#define STS2_RSPCRC7E (1 << 28)
166#define STS2_CRCSTEBE (1 << 27)
167#define STS2_RDATEBE (1 << 26)
168#define STS2_AC12REBE (1 << 25)
169#define STS2_RSPEBE (1 << 24)
170#define STS2_AC12IDXE (1 << 23)
171#define STS2_RSPIDXE (1 << 22)
172#define STS2_CCSTO (1 << 15)
173#define STS2_RDATTO (1 << 14)
174#define STS2_DATBSYTO (1 << 13)
175#define STS2_CRCSTTO (1 << 12)
176#define STS2_AC12BSYTO (1 << 11)
177#define STS2_RSPBSYTO (1 << 10)
178#define STS2_AC12RSPTO (1 << 9)
179#define STS2_RSPTO (1 << 8)
180#define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \
181 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
182#define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \
183 STS2_DATBSYTO | STS2_CRCSTTO | \
184 STS2_AC12BSYTO | STS2_RSPBSYTO | \
185 STS2_AC12RSPTO | STS2_RSPTO)
186
187/* CE_VERSION */
188#define SOFT_RST_ON (1 << 31)
189#define SOFT_RST_OFF (0 << 31)
190
191#define CLKDEV_EMMC_DATA 52000000 /* 52MHz */
192#define CLKDEV_MMC_DATA 20000000 /* 20MHz */
193#define CLKDEV_INIT 400000 /* 400 KHz */
194
195struct sh_mmcif_host {
196 struct mmc_host *mmc;
197 struct mmc_data *data;
198 struct mmc_command *cmd;
199 struct platform_device *pd;
200 struct clk *hclk;
201 unsigned int clk;
202 int bus_width;
203 u16 wait_int;
204 u16 sd_error;
205 long timeout;
206 void __iomem *addr;
207 wait_queue_head_t intr_wait;
208};
209
210static inline u32 sh_mmcif_readl(struct sh_mmcif_host *host, unsigned int reg)
211{
212 return readl(host->addr + reg);
213}
214
215static inline void sh_mmcif_writel(struct sh_mmcif_host *host,
216 unsigned int reg, u32 val)
217{
218 writel(val, host->addr + reg);
219}
220
221static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
222 unsigned int reg, u32 val)
223{
224 writel(val | sh_mmcif_readl(host, reg), host->addr + reg);
225}
226
227static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
228 unsigned int reg, u32 val)
229{
230 writel(~val & sh_mmcif_readl(host, reg), host->addr + reg);
231}
232
233
234static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
235{
236 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
237
238 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
239 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
240
241 if (!clk)
242 return;
243 if (p->sup_pclk && clk == host->clk)
244 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
245 else
246 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
247 (ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16));
248
249 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
250}
251
252static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
253{
254 u32 tmp;
255
256 tmp = 0x010f0000 & sh_mmcif_readl(host, MMCIF_CE_CLK_CTRL);
257
258 sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_ON);
259 sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_OFF);
260 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
261 SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
262 /* byte swap on */
263 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
264}
265
266static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
267{
268 u32 state1, state2;
269 int ret, timeout = 10000000;
270
271 host->sd_error = 0;
272 host->wait_int = 0;
273
274 state1 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS1);
275 state2 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS2);
276 pr_debug("%s: ERR HOST_STS1 = %08x\n", \
277 DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS1));
278 pr_debug("%s: ERR HOST_STS2 = %08x\n", \
279 DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS2));
280
281 if (state1 & STS1_CMDSEQ) {
282 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
283 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
284 while (1) {
285 timeout--;
286 if (timeout < 0) {
287 pr_err(DRIVER_NAME": Forceed end of " \
288 "command sequence timeout err\n");
289 return -EIO;
290 }
291 if (!(sh_mmcif_readl(host, MMCIF_CE_HOST_STS1)
292 & STS1_CMDSEQ))
293 break;
294 mdelay(1);
295 }
296 sh_mmcif_sync_reset(host);
297 pr_debug(DRIVER_NAME": Forced end of command sequence\n");
298 return -EIO;
299 }
300
301 if (state2 & STS2_CRC_ERR) {
302 pr_debug(DRIVER_NAME": Happened CRC error\n");
303 ret = -EIO;
304 } else if (state2 & STS2_TIMEOUT_ERR) {
305 pr_debug(DRIVER_NAME": Happened Timeout error\n");
306 ret = -ETIMEDOUT;
307 } else {
308 pr_debug(DRIVER_NAME": Happened End/Index error\n");
309 ret = -EIO;
310 }
311 return ret;
312}
313
314static int sh_mmcif_single_read(struct sh_mmcif_host *host,
315 struct mmc_request *mrq)
316{
317 struct mmc_data *data = mrq->data;
318 long time;
319 u32 blocksize, i, *p = sg_virt(data->sg);
320
321 host->wait_int = 0;
322
323 /* buf read enable */
324 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
325 time = wait_event_interruptible_timeout(host->intr_wait,
326 host->wait_int == 1 ||
327 host->sd_error == 1, host->timeout);
328 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
329 return sh_mmcif_error_manage(host);
330
331 host->wait_int = 0;
332 blocksize = (BLOCK_SIZE_MASK &
333 sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3;
334 for (i = 0; i < blocksize / 4; i++)
335 *p++ = sh_mmcif_readl(host, MMCIF_CE_DATA);
336
337 /* buffer read end */
338 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
339 time = wait_event_interruptible_timeout(host->intr_wait,
340 host->wait_int == 1 ||
341 host->sd_error == 1, host->timeout);
342 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
343 return sh_mmcif_error_manage(host);
344
345 host->wait_int = 0;
346 return 0;
347}
348
349static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
350 struct mmc_request *mrq)
351{
352 struct mmc_data *data = mrq->data;
353 long time;
354 u32 blocksize, i, j, sec, *p;
355
356 blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET);
357 for (j = 0; j < data->sg_len; j++) {
358 p = sg_virt(data->sg);
359 host->wait_int = 0;
360 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
361 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
362 /* buf read enable */
363 time = wait_event_interruptible_timeout(host->intr_wait,
364 host->wait_int == 1 ||
365 host->sd_error == 1, host->timeout);
366
367 if (host->wait_int != 1 &&
368 (time == 0 || host->sd_error != 0))
369 return sh_mmcif_error_manage(host);
370
371 host->wait_int = 0;
372 for (i = 0; i < blocksize / 4; i++)
373 *p++ = sh_mmcif_readl(host, MMCIF_CE_DATA);
374 }
375 if (j < data->sg_len - 1)
376 data->sg++;
377 }
378 return 0;
379}
380
381static int sh_mmcif_single_write(struct sh_mmcif_host *host,
382 struct mmc_request *mrq)
383{
384 struct mmc_data *data = mrq->data;
385 long time;
386 u32 blocksize, i, *p = sg_virt(data->sg);
387
388 host->wait_int = 0;
389 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
390
391 /* buf write enable */
392 time = wait_event_interruptible_timeout(host->intr_wait,
393 host->wait_int == 1 ||
394 host->sd_error == 1, host->timeout);
395 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
396 return sh_mmcif_error_manage(host);
397
398 host->wait_int = 0;
399 blocksize = (BLOCK_SIZE_MASK &
400 sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3;
401 for (i = 0; i < blocksize / 4; i++)
402 sh_mmcif_writel(host, MMCIF_CE_DATA, *p++);
403
404 /* buffer write end */
405 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
406
407 time = wait_event_interruptible_timeout(host->intr_wait,
408 host->wait_int == 1 ||
409 host->sd_error == 1, host->timeout);
410 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
411 return sh_mmcif_error_manage(host);
412
413 host->wait_int = 0;
414 return 0;
415}
416
417static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
418 struct mmc_request *mrq)
419{
420 struct mmc_data *data = mrq->data;
421 long time;
422 u32 i, sec, j, blocksize, *p;
423
424 blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET);
425
426 for (j = 0; j < data->sg_len; j++) {
427 p = sg_virt(data->sg);
428 host->wait_int = 0;
429 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
430 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
431 /* buf write enable*/
432 time = wait_event_interruptible_timeout(host->intr_wait,
433 host->wait_int == 1 ||
434 host->sd_error == 1, host->timeout);
435
436 if (host->wait_int != 1 &&
437 (time == 0 || host->sd_error != 0))
438 return sh_mmcif_error_manage(host);
439
440 host->wait_int = 0;
441 for (i = 0; i < blocksize / 4; i++)
442 sh_mmcif_writel(host, MMCIF_CE_DATA, *p++);
443 }
444 if (j < data->sg_len - 1)
445 data->sg++;
446 }
447 return 0;
448}
449
450static void sh_mmcif_get_response(struct sh_mmcif_host *host,
451 struct mmc_command *cmd)
452{
453 if (cmd->flags & MMC_RSP_136) {
454 cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP3);
455 cmd->resp[1] = sh_mmcif_readl(host, MMCIF_CE_RESP2);
456 cmd->resp[2] = sh_mmcif_readl(host, MMCIF_CE_RESP1);
457 cmd->resp[3] = sh_mmcif_readl(host, MMCIF_CE_RESP0);
458 } else
459 cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP0);
460}
461
462static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
463 struct mmc_command *cmd)
464{
465 cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP_CMD12);
466}
467
468static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
469 struct mmc_request *mrq, struct mmc_command *cmd, u32 opc)
470{
471 u32 tmp = 0;
472
473 /* Response Type check */
474 switch (mmc_resp_type(cmd)) {
475 case MMC_RSP_NONE:
476 tmp |= CMD_SET_RTYP_NO;
477 break;
478 case MMC_RSP_R1:
479 case MMC_RSP_R1B:
480 case MMC_RSP_R3:
481 tmp |= CMD_SET_RTYP_6B;
482 break;
483 case MMC_RSP_R2:
484 tmp |= CMD_SET_RTYP_17B;
485 break;
486 default:
487 pr_err(DRIVER_NAME": Not support type response.\n");
488 break;
489 }
490 switch (opc) {
491 /* RBSY */
492 case MMC_SWITCH:
493 case MMC_STOP_TRANSMISSION:
494 case MMC_SET_WRITE_PROT:
495 case MMC_CLR_WRITE_PROT:
496 case MMC_ERASE:
497 case MMC_GEN_CMD:
498 tmp |= CMD_SET_RBSY;
499 break;
500 }
501 /* WDAT / DATW */
502 if (host->data) {
503 tmp |= CMD_SET_WDAT;
504 switch (host->bus_width) {
505 case MMC_BUS_WIDTH_1:
506 tmp |= CMD_SET_DATW_1;
507 break;
508 case MMC_BUS_WIDTH_4:
509 tmp |= CMD_SET_DATW_4;
510 break;
511 case MMC_BUS_WIDTH_8:
512 tmp |= CMD_SET_DATW_8;
513 break;
514 default:
515 pr_err(DRIVER_NAME": Not support bus width.\n");
516 break;
517 }
518 }
519 /* DWEN */
520 if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
521 tmp |= CMD_SET_DWEN;
522 /* CMLTE/CMD12EN */
523 if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
524 tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
525 sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
526 mrq->data->blocks << 16);
527 }
528 /* RIDXC[1:0] check bits */
529 if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
530 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
531 tmp |= CMD_SET_RIDXC_BITS;
532 /* RCRC7C[1:0] check bits */
533 if (opc == MMC_SEND_OP_COND)
534 tmp |= CMD_SET_CRC7C_BITS;
535 /* RCRC7C[1:0] internal CRC7 */
536 if (opc == MMC_ALL_SEND_CID ||
537 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
538 tmp |= CMD_SET_CRC7C_INTERNAL;
539
540 return opc = ((opc << 24) | tmp);
541}
542
543static u32 sh_mmcif_data_trans(struct sh_mmcif_host *host,
544 struct mmc_request *mrq, u32 opc)
545{
546 u32 ret;
547
548 switch (opc) {
549 case MMC_READ_MULTIPLE_BLOCK:
550 ret = sh_mmcif_multi_read(host, mrq);
551 break;
552 case MMC_WRITE_MULTIPLE_BLOCK:
553 ret = sh_mmcif_multi_write(host, mrq);
554 break;
555 case MMC_WRITE_BLOCK:
556 ret = sh_mmcif_single_write(host, mrq);
557 break;
558 case MMC_READ_SINGLE_BLOCK:
559 case MMC_SEND_EXT_CSD:
560 ret = sh_mmcif_single_read(host, mrq);
561 break;
562 default:
563 pr_err(DRIVER_NAME": NOT SUPPORT CMD = d'%08d\n", opc);
564 ret = -EINVAL;
565 break;
566 }
567 return ret;
568}
569
570static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
571 struct mmc_request *mrq, struct mmc_command *cmd)
572{
573 long time;
574 int ret = 0, mask = 0;
575 u32 opc = cmd->opcode;
576
577 host->cmd = cmd;
578
579 switch (opc) {
580 /* respons busy check */
581 case MMC_SWITCH:
582 case MMC_STOP_TRANSMISSION:
583 case MMC_SET_WRITE_PROT:
584 case MMC_CLR_WRITE_PROT:
585 case MMC_ERASE:
586 case MMC_GEN_CMD:
587 mask = MASK_MRBSYE;
588 break;
589 default:
590 mask = MASK_MCRSPE;
591 break;
592 }
593 mask |= MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
594 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
595 MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
596 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
597
598 if (host->data) {
599 sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, 0);
600 sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, mrq->data->blksz);
601 }
602 opc = sh_mmcif_set_cmd(host, mrq, cmd, opc);
603
604 sh_mmcif_writel(host, MMCIF_CE_INT, 0xD80430C0);
605 sh_mmcif_writel(host, MMCIF_CE_INT_MASK, mask);
606 /* set arg */
607 sh_mmcif_writel(host, MMCIF_CE_ARG, cmd->arg);
608 host->wait_int = 0;
609 /* set cmd */
610 sh_mmcif_writel(host, MMCIF_CE_CMD_SET, opc);
611
612 time = wait_event_interruptible_timeout(host->intr_wait,
613 host->wait_int == 1 || host->sd_error == 1, host->timeout);
614 if (host->wait_int != 1 && time == 0) {
615 cmd->error = sh_mmcif_error_manage(host);
616 return;
617 }
618 if (host->sd_error) {
619 switch (cmd->opcode) {
620 case MMC_ALL_SEND_CID:
621 case MMC_SELECT_CARD:
622 case MMC_APP_CMD:
623 cmd->error = -ETIMEDOUT;
624 break;
625 default:
626 pr_debug("%s: Cmd(d'%d) err\n",
627 DRIVER_NAME, cmd->opcode);
628 cmd->error = sh_mmcif_error_manage(host);
629 break;
630 }
631 host->sd_error = 0;
632 host->wait_int = 0;
633 return;
634 }
635 if (!(cmd->flags & MMC_RSP_PRESENT)) {
636 cmd->error = ret;
637 host->wait_int = 0;
638 return;
639 }
640 if (host->wait_int == 1) {
641 sh_mmcif_get_response(host, cmd);
642 host->wait_int = 0;
643 }
644 if (host->data) {
645 ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
646 if (ret < 0)
647 mrq->data->bytes_xfered = 0;
648 else
649 mrq->data->bytes_xfered =
650 mrq->data->blocks * mrq->data->blksz;
651 }
652 cmd->error = ret;
653}
654
655static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
656 struct mmc_request *mrq, struct mmc_command *cmd)
657{
658 long time;
659
660 if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
661 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
662 else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
663 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
664 else {
665 pr_err(DRIVER_NAME": not support stop cmd\n");
666 cmd->error = sh_mmcif_error_manage(host);
667 return;
668 }
669
670 time = wait_event_interruptible_timeout(host->intr_wait,
671 host->wait_int == 1 ||
672 host->sd_error == 1, host->timeout);
673 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) {
674 cmd->error = sh_mmcif_error_manage(host);
675 return;
676 }
677 sh_mmcif_get_cmd12response(host, cmd);
678 host->wait_int = 0;
679 cmd->error = 0;
680}
681
682static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
683{
684 struct sh_mmcif_host *host = mmc_priv(mmc);
685
686 switch (mrq->cmd->opcode) {
687 /* MMCIF does not support SD/SDIO command */
688 case SD_IO_SEND_OP_COND:
689 case MMC_APP_CMD:
690 mrq->cmd->error = -ETIMEDOUT;
691 mmc_request_done(mmc, mrq);
692 return;
693 case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
694 if (!mrq->data) {
695 /* send_if_cond cmd (not support) */
696 mrq->cmd->error = -ETIMEDOUT;
697 mmc_request_done(mmc, mrq);
698 return;
699 }
700 break;
701 default:
702 break;
703 }
704 host->data = mrq->data;
705 sh_mmcif_start_cmd(host, mrq, mrq->cmd);
706 host->data = NULL;
707
708 if (mrq->cmd->error != 0) {
709 mmc_request_done(mmc, mrq);
710 return;
711 }
712 if (mrq->stop)
713 sh_mmcif_stop_cmd(host, mrq, mrq->stop);
714 mmc_request_done(mmc, mrq);
715}
716
717static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
718{
719 struct sh_mmcif_host *host = mmc_priv(mmc);
720 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
721
722 if (ios->power_mode == MMC_POWER_OFF) {
723 /* clock stop */
724 sh_mmcif_clock_control(host, 0);
725 if (p->down_pwr)
726 p->down_pwr(host->pd);
727 return;
728 } else if (ios->power_mode == MMC_POWER_UP) {
729 if (p->set_pwr)
730 p->set_pwr(host->pd, ios->power_mode);
731 }
732
733 if (ios->clock)
734 sh_mmcif_clock_control(host, ios->clock);
735
736 host->bus_width = ios->bus_width;
737}
738
739static struct mmc_host_ops sh_mmcif_ops = {
740 .request = sh_mmcif_request,
741 .set_ios = sh_mmcif_set_ios,
742};
743
744static void sh_mmcif_detect(struct mmc_host *mmc)
745{
746 mmc_detect_change(mmc, 0);
747}
748
749static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
750{
751 struct sh_mmcif_host *host = dev_id;
752 u32 state = 0;
753 int err = 0;
754
755 state = sh_mmcif_readl(host, MMCIF_CE_INT);
756
757 if (state & INT_RBSYE) {
758 sh_mmcif_writel(host, MMCIF_CE_INT, ~(INT_RBSYE | INT_CRSPE));
759 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
760 } else if (state & INT_CRSPE) {
761 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_CRSPE);
762 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
763 } else if (state & INT_BUFREN) {
764 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFREN);
765 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
766 } else if (state & INT_BUFWEN) {
767 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFWEN);
768 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
769 } else if (state & INT_CMD12DRE) {
770 sh_mmcif_writel(host, MMCIF_CE_INT,
771 ~(INT_CMD12DRE | INT_CMD12RBE |
772 INT_CMD12CRE | INT_BUFRE));
773 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
774 } else if (state & INT_BUFRE) {
775 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFRE);
776 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
777 } else if (state & INT_DTRANE) {
778 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_DTRANE);
779 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
780 } else if (state & INT_CMD12RBE) {
781 sh_mmcif_writel(host, MMCIF_CE_INT,
782 ~(INT_CMD12RBE | INT_CMD12CRE));
783 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
784 } else if (state & INT_ERR_STS) {
785 /* err interrupts */
786 sh_mmcif_writel(host, MMCIF_CE_INT, ~state);
787 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
788 err = 1;
789 } else {
790 pr_debug("%s: Not support int\n", DRIVER_NAME);
791 sh_mmcif_writel(host, MMCIF_CE_INT, ~state);
792 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
793 err = 1;
794 }
795 if (err) {
796 host->sd_error = 1;
797 pr_debug("%s: int err state = %08x\n", DRIVER_NAME, state);
798 }
799 host->wait_int = 1;
800 wake_up(&host->intr_wait);
801
802 return IRQ_HANDLED;
803}
804
805static int __devinit sh_mmcif_probe(struct platform_device *pdev)
806{
807 int ret = 0, irq[2];
808 struct mmc_host *mmc;
809 struct sh_mmcif_host *host = NULL;
810 struct sh_mmcif_plat_data *pd = NULL;
811 struct resource *res;
812 void __iomem *reg;
813 char clk_name[8];
814
815 irq[0] = platform_get_irq(pdev, 0);
816 irq[1] = platform_get_irq(pdev, 1);
817 if (irq[0] < 0 || irq[1] < 0) {
818 pr_err(DRIVER_NAME": Get irq error\n");
819 return -ENXIO;
820 }
821 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
822 if (!res) {
823 dev_err(&pdev->dev, "platform_get_resource error.\n");
824 return -ENXIO;
825 }
826 reg = ioremap(res->start, resource_size(res));
827 if (!reg) {
828 dev_err(&pdev->dev, "ioremap error.\n");
829 return -ENOMEM;
830 }
831 pd = (struct sh_mmcif_plat_data *)(pdev->dev.platform_data);
832 if (!pd) {
833 dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
834 ret = -ENXIO;
835 goto clean_up;
836 }
837 mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
838 if (!mmc) {
839 ret = -ENOMEM;
840 goto clean_up;
841 }
842 host = mmc_priv(mmc);
843 host->mmc = mmc;
844 host->addr = reg;
845 host->timeout = 1000;
846
847 snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
848 host->hclk = clk_get(&pdev->dev, clk_name);
849 if (IS_ERR(host->hclk)) {
850 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
851 ret = PTR_ERR(host->hclk);
852 goto clean_up1;
853 }
854 clk_enable(host->hclk);
855 host->clk = clk_get_rate(host->hclk);
856 host->pd = pdev;
857
858 init_waitqueue_head(&host->intr_wait);
859
860 mmc->ops = &sh_mmcif_ops;
861 mmc->f_max = host->clk;
862 /* close to 400KHz */
863 if (mmc->f_max < 51200000)
864 mmc->f_min = mmc->f_max / 128;
865 else if (mmc->f_max < 102400000)
866 mmc->f_min = mmc->f_max / 256;
867 else
868 mmc->f_min = mmc->f_max / 512;
869 if (pd->ocr)
870 mmc->ocr_avail = pd->ocr;
871 mmc->caps = MMC_CAP_MMC_HIGHSPEED;
872 if (pd->caps)
873 mmc->caps |= pd->caps;
874 mmc->max_phys_segs = 128;
875 mmc->max_hw_segs = 128;
876 mmc->max_blk_size = 512;
877 mmc->max_blk_count = 65535;
878 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
879 mmc->max_seg_size = mmc->max_req_size;
880
881 sh_mmcif_sync_reset(host);
882 platform_set_drvdata(pdev, host);
883 mmc_add_host(mmc);
884
885 ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
886 if (ret) {
887 pr_err(DRIVER_NAME": request_irq error (sh_mmc:error)\n");
888 goto clean_up2;
889 }
890 ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
891 if (ret) {
892 free_irq(irq[0], host);
893 pr_err(DRIVER_NAME": request_irq error (sh_mmc:int)\n");
894 goto clean_up2;
895 }
896
897 sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL);
898 sh_mmcif_detect(host->mmc);
899
900 pr_info("%s: driver version %s\n", DRIVER_NAME, DRIVER_VERSION);
901 pr_debug("%s: chip ver H'%04x\n", DRIVER_NAME,
902 sh_mmcif_readl(host, MMCIF_CE_VERSION) & 0x0000ffff);
903 return ret;
904
905clean_up2:
906 clk_disable(host->hclk);
907clean_up1:
908 mmc_free_host(mmc);
909clean_up:
910 if (reg)
911 iounmap(reg);
912 return ret;
913}
914
915static int __devexit sh_mmcif_remove(struct platform_device *pdev)
916{
917 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
918 int irq[2];
919
920 sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL);
921
922 irq[0] = platform_get_irq(pdev, 0);
923 irq[1] = platform_get_irq(pdev, 1);
924
925 if (host->addr)
926 iounmap(host->addr);
927
928 platform_set_drvdata(pdev, NULL);
929 mmc_remove_host(host->mmc);
930
931 free_irq(irq[0], host);
932 free_irq(irq[1], host);
933
934 clk_disable(host->hclk);
935 mmc_free_host(host->mmc);
936
937 return 0;
938}
939
940static struct platform_driver sh_mmcif_driver = {
941 .probe = sh_mmcif_probe,
942 .remove = sh_mmcif_remove,
943 .driver = {
944 .name = DRIVER_NAME,
945 },
946};
947
948static int __init sh_mmcif_init(void)
949{
950 return platform_driver_register(&sh_mmcif_driver);
951}
952
953static void __exit sh_mmcif_exit(void)
954{
955 platform_driver_unregister(&sh_mmcif_driver);
956}
957
958module_init(sh_mmcif_init);
959module_exit(sh_mmcif_exit);
960
961
962MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
963MODULE_LICENSE("GPL");
964MODULE_ALIAS(DRIVER_NAME);
965MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 82554ddec6b3..cec99958b652 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -1032,7 +1032,7 @@ static void tifm_sd_remove(struct tifm_dev *sock)
1032 1032
1033static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state) 1033static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)
1034{ 1034{
1035 return mmc_suspend_host(tifm_get_drvdata(sock), state); 1035 return mmc_suspend_host(tifm_get_drvdata(sock));
1036} 1036}
1037 1037
1038static int tifm_sd_resume(struct tifm_dev *sock) 1038static int tifm_sd_resume(struct tifm_dev *sock)
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index b2b577f6afd4..ee7d0a5a51c4 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -29,6 +29,7 @@
29#include <linux/irq.h> 29#include <linux/irq.h>
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/dmaengine.h>
32#include <linux/mmc/host.h> 33#include <linux/mmc/host.h>
33#include <linux/mfd/core.h> 34#include <linux/mfd/core.h>
34#include <linux/mfd/tmio.h> 35#include <linux/mfd/tmio.h>
@@ -131,8 +132,8 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
131 132
132 host->cmd = cmd; 133 host->cmd = cmd;
133 134
134/* FIXME - this seems to be ok comented out but the spec suggest this bit should 135/* FIXME - this seems to be ok commented out but the spec suggest this bit
135 * be set when issuing app commands. 136 * should be set when issuing app commands.
136 * if(cmd->flags & MMC_FLAG_ACMD) 137 * if(cmd->flags & MMC_FLAG_ACMD)
137 * c |= APP_CMD; 138 * c |= APP_CMD;
138 */ 139 */
@@ -155,12 +156,12 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
155 return 0; 156 return 0;
156} 157}
157 158
158/* This chip always returns (at least?) as much data as you ask for. 159/*
160 * This chip always returns (at least?) as much data as you ask for.
159 * I'm unsure what happens if you ask for less than a block. This should be 161 * I'm unsure what happens if you ask for less than a block. This should be
160 * looked into to ensure that a funny length read doesnt hose the controller. 162 * looked into to ensure that a funny length read doesnt hose the controller.
161 *
162 */ 163 */
163static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host) 164static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
164{ 165{
165 struct mmc_data *data = host->data; 166 struct mmc_data *data = host->data;
166 unsigned short *buf; 167 unsigned short *buf;
@@ -180,7 +181,7 @@ static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
180 count = data->blksz; 181 count = data->blksz;
181 182
182 pr_debug("count: %08x offset: %08x flags %08x\n", 183 pr_debug("count: %08x offset: %08x flags %08x\n",
183 count, host->sg_off, data->flags); 184 count, host->sg_off, data->flags);
184 185
185 /* Transfer the data */ 186 /* Transfer the data */
186 if (data->flags & MMC_DATA_READ) 187 if (data->flags & MMC_DATA_READ)
@@ -198,7 +199,7 @@ static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
198 return; 199 return;
199} 200}
200 201
201static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host) 202static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
202{ 203{
203 struct mmc_data *data = host->data; 204 struct mmc_data *data = host->data;
204 struct mmc_command *stop; 205 struct mmc_command *stop;
@@ -206,7 +207,7 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
206 host->data = NULL; 207 host->data = NULL;
207 208
208 if (!data) { 209 if (!data) {
209 pr_debug("Spurious data end IRQ\n"); 210 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
210 return; 211 return;
211 } 212 }
212 stop = data->stop; 213 stop = data->stop;
@@ -219,7 +220,8 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
219 220
220 pr_debug("Completed data request\n"); 221 pr_debug("Completed data request\n");
221 222
222 /*FIXME - other drivers allow an optional stop command of any given type 223 /*
224 * FIXME: other drivers allow an optional stop command of any given type
223 * which we dont do, as the chip can auto generate them. 225 * which we dont do, as the chip can auto generate them.
224 * Perhaps we can be smarter about when to use auto CMD12 and 226 * Perhaps we can be smarter about when to use auto CMD12 and
225 * only issue the auto request when we know this is the desired 227 * only issue the auto request when we know this is the desired
@@ -227,10 +229,17 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
227 * upper layers expect. For now, we do what works. 229 * upper layers expect. For now, we do what works.
228 */ 230 */
229 231
230 if (data->flags & MMC_DATA_READ) 232 if (data->flags & MMC_DATA_READ) {
231 disable_mmc_irqs(host, TMIO_MASK_READOP); 233 if (!host->chan_rx)
232 else 234 disable_mmc_irqs(host, TMIO_MASK_READOP);
233 disable_mmc_irqs(host, TMIO_MASK_WRITEOP); 235 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
236 host->mrq);
237 } else {
238 if (!host->chan_tx)
239 disable_mmc_irqs(host, TMIO_MASK_WRITEOP);
240 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
241 host->mrq);
242 }
234 243
235 if (stop) { 244 if (stop) {
236 if (stop->opcode == 12 && !stop->arg) 245 if (stop->opcode == 12 && !stop->arg)
@@ -242,7 +251,35 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
242 tmio_mmc_finish_request(host); 251 tmio_mmc_finish_request(host);
243} 252}
244 253
245static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, 254static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
255{
256 struct mmc_data *data = host->data;
257
258 if (!data)
259 return;
260
261 if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
262 /*
263 * Has all data been written out yet? Testing on SuperH showed,
264 * that in most cases the first interrupt comes already with the
265 * BUSY status bit clear, but on some operations, like mount or
266 * in the beginning of a write / sync / umount, there is one
267 * DATAEND interrupt with the BUSY bit set, in this cases
268 * waiting for one more interrupt fixes the problem.
269 */
270 if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
271 disable_mmc_irqs(host, TMIO_STAT_DATAEND);
272 tasklet_schedule(&host->dma_complete);
273 }
274 } else if (host->chan_rx && (data->flags & MMC_DATA_READ)) {
275 disable_mmc_irqs(host, TMIO_STAT_DATAEND);
276 tasklet_schedule(&host->dma_complete);
277 } else {
278 tmio_mmc_do_data_irq(host);
279 }
280}
281
282static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
246 unsigned int stat) 283 unsigned int stat)
247{ 284{
248 struct mmc_command *cmd = host->cmd; 285 struct mmc_command *cmd = host->cmd;
@@ -282,10 +319,16 @@ static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
282 * If theres no data or we encountered an error, finish now. 319 * If theres no data or we encountered an error, finish now.
283 */ 320 */
284 if (host->data && !cmd->error) { 321 if (host->data && !cmd->error) {
285 if (host->data->flags & MMC_DATA_READ) 322 if (host->data->flags & MMC_DATA_READ) {
286 enable_mmc_irqs(host, TMIO_MASK_READOP); 323 if (!host->chan_rx)
287 else 324 enable_mmc_irqs(host, TMIO_MASK_READOP);
288 enable_mmc_irqs(host, TMIO_MASK_WRITEOP); 325 } else {
326 struct dma_chan *chan = host->chan_tx;
327 if (!chan)
328 enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
329 else
330 tasklet_schedule(&host->dma_issue);
331 }
289 } else { 332 } else {
290 tmio_mmc_finish_request(host); 333 tmio_mmc_finish_request(host);
291 } 334 }
@@ -293,7 +336,6 @@ static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
293 return; 336 return;
294} 337}
295 338
296
297static irqreturn_t tmio_mmc_irq(int irq, void *devid) 339static irqreturn_t tmio_mmc_irq(int irq, void *devid)
298{ 340{
299 struct tmio_mmc_host *host = devid; 341 struct tmio_mmc_host *host = devid;
@@ -311,7 +353,7 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
311 if (!ireg) { 353 if (!ireg) {
312 disable_mmc_irqs(host, status & ~irq_mask); 354 disable_mmc_irqs(host, status & ~irq_mask);
313 355
314 pr_debug("tmio_mmc: Spurious irq, disabling! " 356 pr_warning("tmio_mmc: Spurious irq, disabling! "
315 "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); 357 "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
316 pr_debug_status(status); 358 pr_debug_status(status);
317 359
@@ -363,16 +405,265 @@ out:
363 return IRQ_HANDLED; 405 return IRQ_HANDLED;
364} 406}
365 407
408#ifdef CONFIG_TMIO_MMC_DMA
409static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
410{
411#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
412 /* Switch DMA mode on or off - SuperH specific? */
413 sd_ctrl_write16(host, 0xd8, enable ? 2 : 0);
414#endif
415}
416
417static void tmio_dma_complete(void *arg)
418{
419 struct tmio_mmc_host *host = arg;
420
421 dev_dbg(&host->pdev->dev, "Command completed\n");
422
423 if (!host->data)
424 dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n");
425 else
426 enable_mmc_irqs(host, TMIO_STAT_DATAEND);
427}
428
429static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
430{
431 struct scatterlist *sg = host->sg_ptr;
432 struct dma_async_tx_descriptor *desc = NULL;
433 struct dma_chan *chan = host->chan_rx;
434 int ret;
435
436 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
437 if (ret > 0) {
438 host->dma_sglen = ret;
439 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
440 DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
441 }
442
443 if (desc) {
444 host->desc = desc;
445 desc->callback = tmio_dma_complete;
446 desc->callback_param = host;
447 host->cookie = desc->tx_submit(desc);
448 if (host->cookie < 0) {
449 host->desc = NULL;
450 ret = host->cookie;
451 } else {
452 chan->device->device_issue_pending(chan);
453 }
454 }
455 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
456 __func__, host->sg_len, ret, host->cookie, host->mrq);
457
458 if (!host->desc) {
459 /* DMA failed, fall back to PIO */
460 if (ret >= 0)
461 ret = -EIO;
462 host->chan_rx = NULL;
463 dma_release_channel(chan);
464 /* Free the Tx channel too */
465 chan = host->chan_tx;
466 if (chan) {
467 host->chan_tx = NULL;
468 dma_release_channel(chan);
469 }
470 dev_warn(&host->pdev->dev,
471 "DMA failed: %d, falling back to PIO\n", ret);
472 tmio_mmc_enable_dma(host, false);
473 reset(host);
474 /* Fail this request, let above layers recover */
475 host->mrq->cmd->error = ret;
476 tmio_mmc_finish_request(host);
477 }
478
479 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
480 desc, host->cookie, host->sg_len);
481
482 return ret > 0 ? 0 : ret;
483}
484
485static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
486{
487 struct scatterlist *sg = host->sg_ptr;
488 struct dma_async_tx_descriptor *desc = NULL;
489 struct dma_chan *chan = host->chan_tx;
490 int ret;
491
492 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
493 if (ret > 0) {
494 host->dma_sglen = ret;
495 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
496 DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
497 }
498
499 if (desc) {
500 host->desc = desc;
501 desc->callback = tmio_dma_complete;
502 desc->callback_param = host;
503 host->cookie = desc->tx_submit(desc);
504 if (host->cookie < 0) {
505 host->desc = NULL;
506 ret = host->cookie;
507 }
508 }
509 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
510 __func__, host->sg_len, ret, host->cookie, host->mrq);
511
512 if (!host->desc) {
513 /* DMA failed, fall back to PIO */
514 if (ret >= 0)
515 ret = -EIO;
516 host->chan_tx = NULL;
517 dma_release_channel(chan);
518 /* Free the Rx channel too */
519 chan = host->chan_rx;
520 if (chan) {
521 host->chan_rx = NULL;
522 dma_release_channel(chan);
523 }
524 dev_warn(&host->pdev->dev,
525 "DMA failed: %d, falling back to PIO\n", ret);
526 tmio_mmc_enable_dma(host, false);
527 reset(host);
528 /* Fail this request, let above layers recover */
529 host->mrq->cmd->error = ret;
530 tmio_mmc_finish_request(host);
531 }
532
533 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
534 desc, host->cookie);
535
536 return ret > 0 ? 0 : ret;
537}
538
539static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
540 struct mmc_data *data)
541{
542 if (data->flags & MMC_DATA_READ) {
543 if (host->chan_rx)
544 return tmio_mmc_start_dma_rx(host);
545 } else {
546 if (host->chan_tx)
547 return tmio_mmc_start_dma_tx(host);
548 }
549
550 return 0;
551}
552
553static void tmio_issue_tasklet_fn(unsigned long priv)
554{
555 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
556 struct dma_chan *chan = host->chan_tx;
557
558 chan->device->device_issue_pending(chan);
559}
560
561static void tmio_tasklet_fn(unsigned long arg)
562{
563 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
564
565 if (host->data->flags & MMC_DATA_READ)
566 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
567 DMA_FROM_DEVICE);
568 else
569 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
570 DMA_TO_DEVICE);
571
572 tmio_mmc_do_data_irq(host);
573}
574
575/* It might be necessary to make filter MFD specific */
576static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
577{
578 dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
579 chan->private = arg;
580 return true;
581}
582
583static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
584 struct tmio_mmc_data *pdata)
585{
586 host->cookie = -EINVAL;
587 host->desc = NULL;
588
589 /* We can only either use DMA for both Tx and Rx or not use it at all */
590 if (pdata->dma) {
591 dma_cap_mask_t mask;
592
593 dma_cap_zero(mask);
594 dma_cap_set(DMA_SLAVE, mask);
595
596 host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
597 pdata->dma->chan_priv_tx);
598 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
599 host->chan_tx);
600
601 if (!host->chan_tx)
602 return;
603
604 host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
605 pdata->dma->chan_priv_rx);
606 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
607 host->chan_rx);
608
609 if (!host->chan_rx) {
610 dma_release_channel(host->chan_tx);
611 host->chan_tx = NULL;
612 return;
613 }
614
615 tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host);
616 tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host);
617
618 tmio_mmc_enable_dma(host, true);
619 }
620}
621
622static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
623{
624 if (host->chan_tx) {
625 struct dma_chan *chan = host->chan_tx;
626 host->chan_tx = NULL;
627 dma_release_channel(chan);
628 }
629 if (host->chan_rx) {
630 struct dma_chan *chan = host->chan_rx;
631 host->chan_rx = NULL;
632 dma_release_channel(chan);
633 }
634
635 host->cookie = -EINVAL;
636 host->desc = NULL;
637}
638#else
639static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
640 struct mmc_data *data)
641{
642 return 0;
643}
644
645static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
646 struct tmio_mmc_data *pdata)
647{
648 host->chan_tx = NULL;
649 host->chan_rx = NULL;
650}
651
652static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
653{
654}
655#endif
656
366static int tmio_mmc_start_data(struct tmio_mmc_host *host, 657static int tmio_mmc_start_data(struct tmio_mmc_host *host,
367 struct mmc_data *data) 658 struct mmc_data *data)
368{ 659{
369 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", 660 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
370 data->blksz, data->blocks); 661 data->blksz, data->blocks);
371 662
372 /* Hardware cannot perform 1 and 2 byte requests in 4 bit mode */ 663 /* Hardware cannot perform 1 and 2 byte requests in 4 bit mode */
373 if (data->blksz < 4 && host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { 664 if (data->blksz < 4 && host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
374 printk(KERN_ERR "%s: %d byte block unsupported in 4 bit mode\n", 665 pr_err("%s: %d byte block unsupported in 4 bit mode\n",
375 mmc_hostname(host->mmc), data->blksz); 666 mmc_hostname(host->mmc), data->blksz);
376 return -EINVAL; 667 return -EINVAL;
377 } 668 }
378 669
@@ -383,7 +674,7 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
383 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); 674 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
384 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); 675 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
385 676
386 return 0; 677 return tmio_mmc_start_dma(host, data);
387} 678}
388 679
389/* Process requests from the MMC layer */ 680/* Process requests from the MMC layer */
@@ -404,7 +695,6 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
404 } 695 }
405 696
406 ret = tmio_mmc_start_command(host, mrq->cmd); 697 ret = tmio_mmc_start_command(host, mrq->cmd);
407
408 if (!ret) 698 if (!ret)
409 return; 699 return;
410 700
@@ -458,11 +748,14 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
458static int tmio_mmc_get_ro(struct mmc_host *mmc) 748static int tmio_mmc_get_ro(struct mmc_host *mmc)
459{ 749{
460 struct tmio_mmc_host *host = mmc_priv(mmc); 750 struct tmio_mmc_host *host = mmc_priv(mmc);
751 struct mfd_cell *cell = host->pdev->dev.platform_data;
752 struct tmio_mmc_data *pdata = cell->driver_data;
461 753
462 return (sd_ctrl_read16(host, CTL_STATUS) & TMIO_STAT_WRPROTECT) ? 0 : 1; 754 return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
755 (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1;
463} 756}
464 757
465static struct mmc_host_ops tmio_mmc_ops = { 758static const struct mmc_host_ops tmio_mmc_ops = {
466 .request = tmio_mmc_request, 759 .request = tmio_mmc_request,
467 .set_ios = tmio_mmc_set_ios, 760 .set_ios = tmio_mmc_set_ios,
468 .get_ro = tmio_mmc_get_ro, 761 .get_ro = tmio_mmc_get_ro,
@@ -475,7 +768,7 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
475 struct mmc_host *mmc = platform_get_drvdata(dev); 768 struct mmc_host *mmc = platform_get_drvdata(dev);
476 int ret; 769 int ret;
477 770
478 ret = mmc_suspend_host(mmc, state); 771 ret = mmc_suspend_host(mmc);
479 772
480 /* Tell MFD core it can disable us now.*/ 773 /* Tell MFD core it can disable us now.*/
481 if (!ret && cell->disable) 774 if (!ret && cell->disable)
@@ -515,6 +808,7 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
515 struct tmio_mmc_host *host; 808 struct tmio_mmc_host *host;
516 struct mmc_host *mmc; 809 struct mmc_host *mmc;
517 int ret = -EINVAL; 810 int ret = -EINVAL;
811 u32 irq_mask = TMIO_MASK_CMD;
518 812
519 if (dev->num_resources != 2) 813 if (dev->num_resources != 2)
520 goto out; 814 goto out;
@@ -553,7 +847,10 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
553 mmc->caps |= pdata->capabilities; 847 mmc->caps |= pdata->capabilities;
554 mmc->f_max = pdata->hclk; 848 mmc->f_max = pdata->hclk;
555 mmc->f_min = mmc->f_max / 512; 849 mmc->f_min = mmc->f_max / 512;
556 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 850 if (pdata->ocr_mask)
851 mmc->ocr_avail = pdata->ocr_mask;
852 else
853 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
557 854
558 /* Tell the MFD core we are ready to be enabled */ 855 /* Tell the MFD core we are ready to be enabled */
559 if (cell->enable) { 856 if (cell->enable) {
@@ -578,13 +875,20 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
578 if (ret) 875 if (ret)
579 goto cell_disable; 876 goto cell_disable;
580 877
878 /* See if we also get DMA */
879 tmio_mmc_request_dma(host, pdata);
880
581 mmc_add_host(mmc); 881 mmc_add_host(mmc);
582 882
583 printk(KERN_INFO "%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), 883 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
584 (unsigned long)host->ctl, host->irq); 884 (unsigned long)host->ctl, host->irq);
585 885
586 /* Unmask the IRQs we want to know about */ 886 /* Unmask the IRQs we want to know about */
587 enable_mmc_irqs(host, TMIO_MASK_IRQ); 887 if (!host->chan_rx)
888 irq_mask |= TMIO_MASK_READOP;
889 if (!host->chan_tx)
890 irq_mask |= TMIO_MASK_WRITEOP;
891 enable_mmc_irqs(host, irq_mask);
588 892
589 return 0; 893 return 0;
590 894
@@ -609,6 +913,7 @@ static int __devexit tmio_mmc_remove(struct platform_device *dev)
609 if (mmc) { 913 if (mmc) {
610 struct tmio_mmc_host *host = mmc_priv(mmc); 914 struct tmio_mmc_host *host = mmc_priv(mmc);
611 mmc_remove_host(mmc); 915 mmc_remove_host(mmc);
916 tmio_mmc_release_dma(host);
612 free_irq(host->irq, host); 917 free_irq(host->irq, host);
613 if (cell->disable) 918 if (cell->disable)
614 cell->disable(dev); 919 cell->disable(dev);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index dafecfbcd91a..64f7d5dfc106 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -10,6 +10,8 @@
10 */ 10 */
11 11
12#include <linux/highmem.h> 12#include <linux/highmem.h>
13#include <linux/interrupt.h>
14#include <linux/dmaengine.h>
13 15
14#define CTL_SD_CMD 0x00 16#define CTL_SD_CMD 0x00
15#define CTL_ARG_REG 0x04 17#define CTL_ARG_REG 0x04
@@ -106,6 +108,17 @@ struct tmio_mmc_host {
106 unsigned int sg_off; 108 unsigned int sg_off;
107 109
108 struct platform_device *pdev; 110 struct platform_device *pdev;
111
112 /* DMA support */
113 struct dma_chan *chan_rx;
114 struct dma_chan *chan_tx;
115 struct tasklet_struct dma_complete;
116 struct tasklet_struct dma_issue;
117#ifdef CONFIG_TMIO_MMC_DMA
118 struct dma_async_tx_descriptor *desc;
119 unsigned int dma_sglen;
120 dma_cookie_t cookie;
121#endif
109}; 122};
110 123
111#include <linux/io.h> 124#include <linux/io.h>
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 632858a94376..19f2d72dbca5 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1280,7 +1280,7 @@ static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state)
1280 via_save_pcictrlreg(host); 1280 via_save_pcictrlreg(host);
1281 via_save_sdcreg(host); 1281 via_save_sdcreg(host);
1282 1282
1283 ret = mmc_suspend_host(host->mmc, state); 1283 ret = mmc_suspend_host(host->mmc);
1284 1284
1285 pci_save_state(pcidev); 1285 pci_save_state(pcidev);
1286 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0); 1286 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 69efe01eece8..0012f5d13d28 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1819,7 +1819,7 @@ static int wbsd_suspend(struct wbsd_host *host, pm_message_t state)
1819{ 1819{
1820 BUG_ON(host == NULL); 1820 BUG_ON(host == NULL);
1821 1821
1822 return mmc_suspend_host(host->mmc, state); 1822 return mmc_suspend_host(host->mmc);
1823} 1823}
1824 1824
1825static int wbsd_resume(struct wbsd_host *host) 1825static int wbsd_resume(struct wbsd_host *host)
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 36dbcee1ac29..ba124baa646d 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -143,7 +143,7 @@ static int of_flash_remove(struct of_device *dev)
143static struct mtd_info * __devinit obsolete_probe(struct of_device *dev, 143static struct mtd_info * __devinit obsolete_probe(struct of_device *dev,
144 struct map_info *map) 144 struct map_info *map)
145{ 145{
146 struct device_node *dp = dev->node; 146 struct device_node *dp = dev->dev.of_node;
147 const char *of_probe; 147 const char *of_probe;
148 struct mtd_info *mtd; 148 struct mtd_info *mtd;
149 static const char *rom_probe_types[] 149 static const char *rom_probe_types[]
@@ -221,7 +221,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
221#ifdef CONFIG_MTD_PARTITIONS 221#ifdef CONFIG_MTD_PARTITIONS
222 const char **part_probe_types; 222 const char **part_probe_types;
223#endif 223#endif
224 struct device_node *dp = dev->node; 224 struct device_node *dp = dev->dev.of_node;
225 struct resource res; 225 struct resource res;
226 struct of_flash *info; 226 struct of_flash *info;
227 const char *probe_type = match->data; 227 const char *probe_type = match->data;
@@ -245,7 +245,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
245 p = of_get_property(dp, "reg", &count); 245 p = of_get_property(dp, "reg", &count);
246 if (count % reg_tuple_size != 0) { 246 if (count % reg_tuple_size != 0) {
247 dev_err(&dev->dev, "Malformed reg property on %s\n", 247 dev_err(&dev->dev, "Malformed reg property on %s\n",
248 dev->node->full_name); 248 dev->dev.of_node->full_name);
249 err = -EINVAL; 249 err = -EINVAL;
250 goto err_flash_remove; 250 goto err_flash_remove;
251 } 251 }
@@ -418,8 +418,11 @@ static struct of_device_id of_flash_match[] = {
418MODULE_DEVICE_TABLE(of, of_flash_match); 418MODULE_DEVICE_TABLE(of, of_flash_match);
419 419
420static struct of_platform_driver of_flash_driver = { 420static struct of_platform_driver of_flash_driver = {
421 .name = "of-flash", 421 .driver = {
422 .match_table = of_flash_match, 422 .name = "of-flash",
423 .owner = THIS_MODULE,
424 .of_match_table = of_flash_match,
425 },
423 .probe = of_flash_probe, 426 .probe = of_flash_probe,
424 .remove = of_flash_remove, 427 .remove = of_flash_remove,
425}; 428};
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index fadc4c45b455..0391c2527bd7 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -110,7 +110,7 @@ int uflash_devinit(struct of_device *op, struct device_node *dp)
110 110
111static int __devinit uflash_probe(struct of_device *op, const struct of_device_id *match) 111static int __devinit uflash_probe(struct of_device *op, const struct of_device_id *match)
112{ 112{
113 struct device_node *dp = op->node; 113 struct device_node *dp = op->dev.of_node;
114 114
115 /* Flashprom must have the "user" property in order to 115 /* Flashprom must have the "user" property in order to
116 * be used by this driver. 116 * be used by this driver.
@@ -149,8 +149,11 @@ static const struct of_device_id uflash_match[] = {
149MODULE_DEVICE_TABLE(of, uflash_match); 149MODULE_DEVICE_TABLE(of, uflash_match);
150 150
151static struct of_platform_driver uflash_driver = { 151static struct of_platform_driver uflash_driver = {
152 .name = DRIVER_NAME, 152 .driver = {
153 .match_table = uflash_match, 153 .name = DRIVER_NAME,
154 .owner = THIS_MODULE,
155 .of_match_table = uflash_match,
156 },
154 .probe = uflash_probe, 157 .probe = uflash_probe,
155 .remove = __devexit_p(uflash_remove), 158 .remove = __devexit_p(uflash_remove),
156}; 159};
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 8bb5e4a66328..000d65ea55a4 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -468,8 +468,7 @@ static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
468 return ret; 468 return ret;
469} 469}
470 470
471static int mtd_ioctl(struct inode *inode, struct file *file, 471static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
472 u_int cmd, u_long arg)
473{ 472{
474 struct mtd_file_info *mfi = file->private_data; 473 struct mtd_file_info *mfi = file->private_data;
475 struct mtd_info *mtd = mfi->mtd; 474 struct mtd_info *mtd = mfi->mtd;
@@ -840,6 +839,17 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
840 return ret; 839 return ret;
841} /* memory_ioctl */ 840} /* memory_ioctl */
842 841
842static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
843{
844 int ret;
845
846 lock_kernel();
847 ret = mtd_ioctl(file, cmd, arg);
848 unlock_kernel();
849
850 return ret;
851}
852
843#ifdef CONFIG_COMPAT 853#ifdef CONFIG_COMPAT
844 854
845struct mtd_oob_buf32 { 855struct mtd_oob_buf32 {
@@ -854,7 +864,6 @@ struct mtd_oob_buf32 {
854static long mtd_compat_ioctl(struct file *file, unsigned int cmd, 864static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
855 unsigned long arg) 865 unsigned long arg)
856{ 866{
857 struct inode *inode = file->f_path.dentry->d_inode;
858 struct mtd_file_info *mfi = file->private_data; 867 struct mtd_file_info *mfi = file->private_data;
859 struct mtd_info *mtd = mfi->mtd; 868 struct mtd_info *mtd = mfi->mtd;
860 void __user *argp = compat_ptr(arg); 869 void __user *argp = compat_ptr(arg);
@@ -892,7 +901,7 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
892 break; 901 break;
893 } 902 }
894 default: 903 default:
895 ret = mtd_ioctl(inode, file, cmd, (unsigned long)argp); 904 ret = mtd_ioctl(file, cmd, (unsigned long)argp);
896 } 905 }
897 906
898 unlock_kernel(); 907 unlock_kernel();
@@ -960,7 +969,7 @@ static const struct file_operations mtd_fops = {
960 .llseek = mtd_lseek, 969 .llseek = mtd_lseek,
961 .read = mtd_read, 970 .read = mtd_read,
962 .write = mtd_write, 971 .write = mtd_write,
963 .ioctl = mtd_ioctl, 972 .unlocked_ioctl = mtd_unlocked_ioctl,
964#ifdef CONFIG_COMPAT 973#ifdef CONFIG_COMPAT
965 .compat_ioctl = mtd_compat_ioctl, 974 .compat_ioctl = mtd_compat_ioctl,
966#endif 975#endif
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 3f38fb8e6666..5084cc517944 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -1030,14 +1030,14 @@ static int __devinit fsl_elbc_ctrl_probe(struct of_device *ofdev,
1030 init_waitqueue_head(&ctrl->controller.wq); 1030 init_waitqueue_head(&ctrl->controller.wq);
1031 init_waitqueue_head(&ctrl->irq_wait); 1031 init_waitqueue_head(&ctrl->irq_wait);
1032 1032
1033 ctrl->regs = of_iomap(ofdev->node, 0); 1033 ctrl->regs = of_iomap(ofdev->dev.of_node, 0);
1034 if (!ctrl->regs) { 1034 if (!ctrl->regs) {
1035 dev_err(&ofdev->dev, "failed to get memory region\n"); 1035 dev_err(&ofdev->dev, "failed to get memory region\n");
1036 ret = -ENODEV; 1036 ret = -ENODEV;
1037 goto err; 1037 goto err;
1038 } 1038 }
1039 1039
1040 ctrl->irq = of_irq_to_resource(ofdev->node, 0, NULL); 1040 ctrl->irq = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
1041 if (ctrl->irq == NO_IRQ) { 1041 if (ctrl->irq == NO_IRQ) {
1042 dev_err(&ofdev->dev, "failed to get irq resource\n"); 1042 dev_err(&ofdev->dev, "failed to get irq resource\n");
1043 ret = -ENODEV; 1043 ret = -ENODEV;
@@ -1058,7 +1058,7 @@ static int __devinit fsl_elbc_ctrl_probe(struct of_device *ofdev,
1058 goto err; 1058 goto err;
1059 } 1059 }
1060 1060
1061 for_each_child_of_node(ofdev->node, child) 1061 for_each_child_of_node(ofdev->dev.of_node, child)
1062 if (of_device_is_compatible(child, "fsl,elbc-fcm-nand")) 1062 if (of_device_is_compatible(child, "fsl,elbc-fcm-nand"))
1063 fsl_elbc_chip_probe(ctrl, child); 1063 fsl_elbc_chip_probe(ctrl, child);
1064 1064
@@ -1078,9 +1078,10 @@ static const struct of_device_id fsl_elbc_match[] = {
1078 1078
1079static struct of_platform_driver fsl_elbc_ctrl_driver = { 1079static struct of_platform_driver fsl_elbc_ctrl_driver = {
1080 .driver = { 1080 .driver = {
1081 .name = "fsl-elbc", 1081 .name = "fsl-elbc",
1082 .owner = THIS_MODULE,
1083 .of_match_table = fsl_elbc_match,
1082 }, 1084 },
1083 .match_table = fsl_elbc_match,
1084 .probe = fsl_elbc_ctrl_probe, 1085 .probe = fsl_elbc_ctrl_probe,
1085 .remove = fsl_elbc_ctrl_remove, 1086 .remove = fsl_elbc_ctrl_remove,
1086}; 1087};
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 2d215ccb564d..00aea6f7d1f1 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -360,8 +360,11 @@ static const struct of_device_id of_fun_match[] = {
360MODULE_DEVICE_TABLE(of, of_fun_match); 360MODULE_DEVICE_TABLE(of, of_fun_match);
361 361
362static struct of_platform_driver of_fun_driver = { 362static struct of_platform_driver of_fun_driver = {
363 .name = "fsl,upm-nand", 363 .driver = {
364 .match_table = of_fun_match, 364 .name = "fsl,upm-nand",
365 .owner = THIS_MODULE,
366 .of_match_table = of_fun_match,
367 },
365 .probe = fun_probe, 368 .probe = fun_probe,
366 .remove = __devexit_p(fun_remove), 369 .remove = __devexit_p(fun_remove),
367}; 370};
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index b983cae8c298..98fd2bdf8be1 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -239,14 +239,14 @@ static int __devinit ndfc_probe(struct of_device *ofdev,
239 dev_set_drvdata(&ofdev->dev, ndfc); 239 dev_set_drvdata(&ofdev->dev, ndfc);
240 240
241 /* Read the reg property to get the chip select */ 241 /* Read the reg property to get the chip select */
242 reg = of_get_property(ofdev->node, "reg", &len); 242 reg = of_get_property(ofdev->dev.of_node, "reg", &len);
243 if (reg == NULL || len != 12) { 243 if (reg == NULL || len != 12) {
244 dev_err(&ofdev->dev, "unable read reg property (%d)\n", len); 244 dev_err(&ofdev->dev, "unable read reg property (%d)\n", len);
245 return -ENOENT; 245 return -ENOENT;
246 } 246 }
247 ndfc->chip_select = reg[0]; 247 ndfc->chip_select = reg[0];
248 248
249 ndfc->ndfcbase = of_iomap(ofdev->node, 0); 249 ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0);
250 if (!ndfc->ndfcbase) { 250 if (!ndfc->ndfcbase) {
251 dev_err(&ofdev->dev, "failed to get memory\n"); 251 dev_err(&ofdev->dev, "failed to get memory\n");
252 return -EIO; 252 return -EIO;
@@ -255,20 +255,20 @@ static int __devinit ndfc_probe(struct of_device *ofdev,
255 ccr = NDFC_CCR_BS(ndfc->chip_select); 255 ccr = NDFC_CCR_BS(ndfc->chip_select);
256 256
257 /* It is ok if ccr does not exist - just default to 0 */ 257 /* It is ok if ccr does not exist - just default to 0 */
258 reg = of_get_property(ofdev->node, "ccr", NULL); 258 reg = of_get_property(ofdev->dev.of_node, "ccr", NULL);
259 if (reg) 259 if (reg)
260 ccr |= *reg; 260 ccr |= *reg;
261 261
262 out_be32(ndfc->ndfcbase + NDFC_CCR, ccr); 262 out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
263 263
264 /* Set the bank settings if given */ 264 /* Set the bank settings if given */
265 reg = of_get_property(ofdev->node, "bank-settings", NULL); 265 reg = of_get_property(ofdev->dev.of_node, "bank-settings", NULL);
266 if (reg) { 266 if (reg) {
267 int offset = NDFC_BCFG0 + (ndfc->chip_select << 2); 267 int offset = NDFC_BCFG0 + (ndfc->chip_select << 2);
268 out_be32(ndfc->ndfcbase + offset, *reg); 268 out_be32(ndfc->ndfcbase + offset, *reg);
269 } 269 }
270 270
271 err = ndfc_chip_init(ndfc, ofdev->node); 271 err = ndfc_chip_init(ndfc, ofdev->dev.of_node);
272 if (err) { 272 if (err) {
273 iounmap(ndfc->ndfcbase); 273 iounmap(ndfc->ndfcbase);
274 return err; 274 return err;
@@ -294,9 +294,10 @@ MODULE_DEVICE_TABLE(of, ndfc_match);
294 294
295static struct of_platform_driver ndfc_driver = { 295static struct of_platform_driver ndfc_driver = {
296 .driver = { 296 .driver = {
297 .name = "ndfc", 297 .name = "ndfc",
298 .owner = THIS_MODULE,
299 .of_match_table = ndfc_match,
298 }, 300 },
299 .match_table = ndfc_match,
300 .probe = ndfc_probe, 301 .probe = ndfc_probe,
301 .remove = __devexit_p(ndfc_remove), 302 .remove = __devexit_p(ndfc_remove),
302}; 303};
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 090a05c12cbe..f02af24d033a 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -93,7 +93,7 @@ static int __devinit pasemi_nand_probe(struct of_device *ofdev,
93 const struct of_device_id *match) 93 const struct of_device_id *match)
94{ 94{
95 struct pci_dev *pdev; 95 struct pci_dev *pdev;
96 struct device_node *np = ofdev->node; 96 struct device_node *np = ofdev->dev.of_node;
97 struct resource res; 97 struct resource res;
98 struct nand_chip *chip; 98 struct nand_chip *chip;
99 int err = 0; 99 int err = 0;
@@ -221,8 +221,11 @@ MODULE_DEVICE_TABLE(of, pasemi_nand_match);
221 221
222static struct of_platform_driver pasemi_nand_driver = 222static struct of_platform_driver pasemi_nand_driver =
223{ 223{
224 .name = (char*)driver_name, 224 .driver = {
225 .match_table = pasemi_nand_match, 225 .name = (char*)driver_name,
226 .owner = THIS_MODULE,
227 .of_match_table = pasemi_nand_match,
228 },
226 .probe = pasemi_nand_probe, 229 .probe = pasemi_nand_probe,
227 .remove = pasemi_nand_remove, 230 .remove = pasemi_nand_remove,
228}; 231};
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index b37cbde6e7db..884852dc7eb4 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -301,8 +301,11 @@ static const struct of_device_id socrates_nand_match[] =
301MODULE_DEVICE_TABLE(of, socrates_nand_match); 301MODULE_DEVICE_TABLE(of, socrates_nand_match);
302 302
303static struct of_platform_driver socrates_nand_driver = { 303static struct of_platform_driver socrates_nand_driver = {
304 .name = "socrates_nand", 304 .driver = {
305 .match_table = socrates_nand_match, 305 .name = "socrates_nand",
306 .owner = THIS_MODULE,
307 .of_match_table = socrates_nand_match,
308 },
306 .probe = socrates_nand_probe, 309 .probe = socrates_nand_probe,
307 .remove = __devexit_p(socrates_nand_remove), 310 .remove = __devexit_p(socrates_nand_remove),
308}; 311};
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 373c1a563474..b46be490cd2a 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -283,6 +283,8 @@ struct be_adapter {
283 u8 port_type; 283 u8 port_type;
284 u8 transceiver; 284 u8 transceiver;
285 u8 generation; /* BladeEngine ASIC generation */ 285 u8 generation; /* BladeEngine ASIC generation */
286 u32 flash_status;
287 struct completion flash_compl;
286 288
287 bool sriov_enabled; 289 bool sriov_enabled;
288 u32 vf_if_handle[BE_MAX_VF]; 290 u32 vf_if_handle[BE_MAX_VF];
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index e79bf8b9af3b..c911bfb55b19 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -59,6 +59,13 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
59 59
60 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & 60 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
61 CQE_STATUS_COMPL_MASK; 61 CQE_STATUS_COMPL_MASK;
62
63 if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
64 (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
65 adapter->flash_status = compl_status;
66 complete(&adapter->flash_compl);
67 }
68
62 if (compl_status == MCC_STATUS_SUCCESS) { 69 if (compl_status == MCC_STATUS_SUCCESS) {
63 if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) { 70 if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
64 struct be_cmd_resp_get_stats *resp = 71 struct be_cmd_resp_get_stats *resp =
@@ -1417,6 +1424,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1417 int status; 1424 int status;
1418 1425
1419 spin_lock_bh(&adapter->mcc_lock); 1426 spin_lock_bh(&adapter->mcc_lock);
1427 adapter->flash_status = 0;
1420 1428
1421 wrb = wrb_from_mccq(adapter); 1429 wrb = wrb_from_mccq(adapter);
1422 if (!wrb) { 1430 if (!wrb) {
@@ -1428,6 +1436,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1428 1436
1429 be_wrb_hdr_prepare(wrb, cmd->size, false, 1, 1437 be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1430 OPCODE_COMMON_WRITE_FLASHROM); 1438 OPCODE_COMMON_WRITE_FLASHROM);
1439 wrb->tag1 = CMD_SUBSYSTEM_COMMON;
1431 1440
1432 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1441 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1433 OPCODE_COMMON_WRITE_FLASHROM, cmd->size); 1442 OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
@@ -1439,10 +1448,16 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1439 req->params.op_code = cpu_to_le32(flash_opcode); 1448 req->params.op_code = cpu_to_le32(flash_opcode);
1440 req->params.data_buf_size = cpu_to_le32(buf_size); 1449 req->params.data_buf_size = cpu_to_le32(buf_size);
1441 1450
1442 status = be_mcc_notify_wait(adapter); 1451 be_mcc_notify(adapter);
1452 spin_unlock_bh(&adapter->mcc_lock);
1453
1454 if (!wait_for_completion_timeout(&adapter->flash_compl,
1455 msecs_to_jiffies(12000)))
1456 status = -1;
1457 else
1458 status = adapter->flash_status;
1443 1459
1444err: 1460err:
1445 spin_unlock_bh(&adapter->mcc_lock);
1446 return status; 1461 return status;
1447} 1462}
1448 1463
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 058d7f95f5ae..aa065c71ddd8 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -2319,6 +2319,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
2319 spin_lock_init(&adapter->mcc_lock); 2319 spin_lock_init(&adapter->mcc_lock);
2320 spin_lock_init(&adapter->mcc_cq_lock); 2320 spin_lock_init(&adapter->mcc_cq_lock);
2321 2321
2322 init_completion(&adapter->flash_compl);
2322 pci_save_state(adapter->pdev); 2323 pci_save_state(adapter->pdev);
2323 return 0; 2324 return 0;
2324 2325
@@ -2487,10 +2488,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
2487 status = be_cmd_POST(adapter); 2488 status = be_cmd_POST(adapter);
2488 if (status) 2489 if (status)
2489 goto ctrl_clean; 2490 goto ctrl_clean;
2490
2491 status = be_cmd_reset_function(adapter);
2492 if (status)
2493 goto ctrl_clean;
2494 } 2491 }
2495 2492
2496 /* tell fw we're ready to fire cmds */ 2493 /* tell fw we're ready to fire cmds */
@@ -2498,6 +2495,12 @@ static int __devinit be_probe(struct pci_dev *pdev,
2498 if (status) 2495 if (status)
2499 goto ctrl_clean; 2496 goto ctrl_clean;
2500 2497
2498 if (be_physfn(adapter)) {
2499 status = be_cmd_reset_function(adapter);
2500 if (status)
2501 goto ctrl_clean;
2502 }
2503
2501 status = be_stats_init(adapter); 2504 status = be_stats_init(adapter);
2502 if (status) 2505 if (status)
2503 goto ctrl_clean; 2506 goto ctrl_clean;
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 39a54bad397f..368f33313fb6 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -1626,6 +1626,7 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
1626 return 0; 1626 return 0;
1627 1627
1628out_err_mdiobus_register: 1628out_err_mdiobus_register:
1629 kfree(miibus->irq);
1629 mdiobus_free(miibus); 1630 mdiobus_free(miibus);
1630out_err_alloc: 1631out_err_alloc:
1631 peripheral_free_list(pin_req); 1632 peripheral_free_list(pin_req);
@@ -1638,6 +1639,7 @@ static int __devexit bfin_mii_bus_remove(struct platform_device *pdev)
1638 struct mii_bus *miibus = platform_get_drvdata(pdev); 1639 struct mii_bus *miibus = platform_get_drvdata(pdev);
1639 platform_set_drvdata(pdev, NULL); 1640 platform_set_drvdata(pdev, NULL);
1640 mdiobus_unregister(miibus); 1641 mdiobus_unregister(miibus);
1642 kfree(miibus->irq);
1641 mdiobus_free(miibus); 1643 mdiobus_free(miibus);
1642 peripheral_free_list(pin_req); 1644 peripheral_free_list(pin_req);
1643 return 0; 1645 return 0;
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 225fd147774a..8af8442c694a 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -392,15 +392,17 @@ static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
392}; 392};
393 393
394static struct of_platform_driver mpc5xxx_can_driver = { 394static struct of_platform_driver mpc5xxx_can_driver = {
395 .owner = THIS_MODULE, 395 .driver = {
396 .name = "mpc5xxx_can", 396 .name = "mpc5xxx_can",
397 .owner = THIS_MODULE,
398 .of_match_table = mpc5xxx_can_table,
399 },
397 .probe = mpc5xxx_can_probe, 400 .probe = mpc5xxx_can_probe,
398 .remove = __devexit_p(mpc5xxx_can_remove), 401 .remove = __devexit_p(mpc5xxx_can_remove),
399#ifdef CONFIG_PM 402#ifdef CONFIG_PM
400 .suspend = mpc5xxx_can_suspend, 403 .suspend = mpc5xxx_can_suspend,
401 .resume = mpc5xxx_can_resume, 404 .resume = mpc5xxx_can_resume,
402#endif 405#endif
403 .match_table = mpc5xxx_can_table,
404}; 406};
405 407
406static int __init mpc5xxx_can_init(void) 408static int __init mpc5xxx_can_init(void)
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 85f7cbfe8e5f..0a8de01d52f7 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -599,6 +599,8 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
599 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | 599 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
600 CAN_CTRLMODE_BERR_REPORTING; 600 CAN_CTRLMODE_BERR_REPORTING;
601 601
602 spin_lock_init(&priv->cmdreg_lock);
603
602 if (sizeof_priv) 604 if (sizeof_priv)
603 priv->priv = (void *)priv + sizeof(struct sja1000_priv); 605 priv->priv = (void *)priv + sizeof(struct sja1000_priv);
604 606
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 34e79efbd2fc..ac1a83d7c204 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -71,7 +71,7 @@ static int __devexit sja1000_ofp_remove(struct of_device *ofdev)
71{ 71{
72 struct net_device *dev = dev_get_drvdata(&ofdev->dev); 72 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
73 struct sja1000_priv *priv = netdev_priv(dev); 73 struct sja1000_priv *priv = netdev_priv(dev);
74 struct device_node *np = ofdev->node; 74 struct device_node *np = ofdev->dev.of_node;
75 struct resource res; 75 struct resource res;
76 76
77 dev_set_drvdata(&ofdev->dev, NULL); 77 dev_set_drvdata(&ofdev->dev, NULL);
@@ -90,7 +90,7 @@ static int __devexit sja1000_ofp_remove(struct of_device *ofdev)
90static int __devinit sja1000_ofp_probe(struct of_device *ofdev, 90static int __devinit sja1000_ofp_probe(struct of_device *ofdev,
91 const struct of_device_id *id) 91 const struct of_device_id *id)
92{ 92{
93 struct device_node *np = ofdev->node; 93 struct device_node *np = ofdev->dev.of_node;
94 struct net_device *dev; 94 struct net_device *dev;
95 struct sja1000_priv *priv; 95 struct sja1000_priv *priv;
96 struct resource res; 96 struct resource res;
@@ -215,11 +215,13 @@ static struct of_device_id __devinitdata sja1000_ofp_table[] = {
215MODULE_DEVICE_TABLE(of, sja1000_ofp_table); 215MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
216 216
217static struct of_platform_driver sja1000_ofp_driver = { 217static struct of_platform_driver sja1000_ofp_driver = {
218 .owner = THIS_MODULE, 218 .driver = {
219 .name = DRV_NAME, 219 .owner = THIS_MODULE,
220 .name = DRV_NAME,
221 .of_match_table = sja1000_ofp_table,
222 },
220 .probe = sja1000_ofp_probe, 223 .probe = sja1000_ofp_probe,
221 .remove = __devexit_p(sja1000_ofp_remove), 224 .remove = __devexit_p(sja1000_ofp_remove),
222 .match_table = sja1000_ofp_table,
223}; 225};
224 226
225static int __init sja1000_ofp_init(void) 227static int __init sja1000_ofp_init(void)
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 02698a1c80b0..f547894ff48f 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -122,8 +122,11 @@ static struct of_device_id ehea_device_table[] = {
122MODULE_DEVICE_TABLE(of, ehea_device_table); 122MODULE_DEVICE_TABLE(of, ehea_device_table);
123 123
124static struct of_platform_driver ehea_driver = { 124static struct of_platform_driver ehea_driver = {
125 .name = "ehea", 125 .driver = {
126 .match_table = ehea_device_table, 126 .name = "ehea",
127 .owner = THIS_MODULE,
128 .of_match_table = ehea_device_table,
129 },
127 .probe = ehea_probe_adapter, 130 .probe = ehea_probe_adapter,
128 .remove = ehea_remove, 131 .remove = ehea_remove,
129}; 132};
@@ -3050,7 +3053,7 @@ static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
3050static void __devinit logical_port_release(struct device *dev) 3053static void __devinit logical_port_release(struct device *dev)
3051{ 3054{
3052 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev); 3055 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
3053 of_node_put(port->ofdev.node); 3056 of_node_put(port->ofdev.dev.of_node);
3054} 3057}
3055 3058
3056static struct device *ehea_register_port(struct ehea_port *port, 3059static struct device *ehea_register_port(struct ehea_port *port,
@@ -3058,7 +3061,7 @@ static struct device *ehea_register_port(struct ehea_port *port,
3058{ 3061{
3059 int ret; 3062 int ret;
3060 3063
3061 port->ofdev.node = of_node_get(dn); 3064 port->ofdev.dev.of_node = of_node_get(dn);
3062 port->ofdev.dev.parent = &port->adapter->ofdev->dev; 3065 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
3063 port->ofdev.dev.bus = &ibmebus_bus_type; 3066 port->ofdev.dev.bus = &ibmebus_bus_type;
3064 3067
@@ -3225,7 +3228,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
3225 const u32 *dn_log_port_id; 3228 const u32 *dn_log_port_id;
3226 int i = 0; 3229 int i = 0;
3227 3230
3228 lhea_dn = adapter->ofdev->node; 3231 lhea_dn = adapter->ofdev->dev.of_node;
3229 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { 3232 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3230 3233
3231 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", 3234 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
@@ -3264,7 +3267,7 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3264 struct device_node *eth_dn = NULL; 3267 struct device_node *eth_dn = NULL;
3265 const u32 *dn_log_port_id; 3268 const u32 *dn_log_port_id;
3266 3269
3267 lhea_dn = adapter->ofdev->node; 3270 lhea_dn = adapter->ofdev->dev.of_node;
3268 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { 3271 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3269 3272
3270 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", 3273 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
@@ -3394,7 +3397,7 @@ static int __devinit ehea_probe_adapter(struct of_device *dev,
3394 const u64 *adapter_handle; 3397 const u64 *adapter_handle;
3395 int ret; 3398 int ret;
3396 3399
3397 if (!dev || !dev->node) { 3400 if (!dev || !dev->dev.of_node) {
3398 ehea_error("Invalid ibmebus device probed"); 3401 ehea_error("Invalid ibmebus device probed");
3399 return -EINVAL; 3402 return -EINVAL;
3400 } 3403 }
@@ -3410,14 +3413,14 @@ static int __devinit ehea_probe_adapter(struct of_device *dev,
3410 3413
3411 adapter->ofdev = dev; 3414 adapter->ofdev = dev;
3412 3415
3413 adapter_handle = of_get_property(dev->node, "ibm,hea-handle", 3416 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
3414 NULL); 3417 NULL);
3415 if (adapter_handle) 3418 if (adapter_handle)
3416 adapter->handle = *adapter_handle; 3419 adapter->handle = *adapter_handle;
3417 3420
3418 if (!adapter->handle) { 3421 if (!adapter->handle) {
3419 dev_err(&dev->dev, "failed getting handle for adapter" 3422 dev_err(&dev->dev, "failed getting handle for adapter"
3420 " '%s'\n", dev->node->full_name); 3423 " '%s'\n", dev->dev.of_node->full_name);
3421 ret = -ENODEV; 3424 ret = -ENODEV;
3422 goto out_free_ad; 3425 goto out_free_ad;
3423 } 3426 }
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index e125113759a5..6586b5c7e4b6 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -1034,9 +1034,10 @@ static int enic_set_port_profile(struct enic *enic, u8 request, u8 *mac,
1034{ 1034{
1035 struct vic_provinfo *vp; 1035 struct vic_provinfo *vp;
1036 u8 oui[3] = VIC_PROVINFO_CISCO_OUI; 1036 u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
1037 unsigned short *uuid; 1037 u8 *uuid;
1038 char uuid_str[38]; 1038 char uuid_str[38];
1039 static char *uuid_fmt = "%04X%04X-%04X-%04X-%04X-%04X%04X%04X"; 1039 static char *uuid_fmt = "%02X%02X%02X%02X-%02X%02X-%02X%02X-"
1040 "%02X%02X-%02X%02X%02X%02X%0X%02X";
1040 int err; 1041 int err;
1041 1042
1042 if (!name) 1043 if (!name)
@@ -1058,20 +1059,24 @@ static int enic_set_port_profile(struct enic *enic, u8 request, u8 *mac,
1058 ETH_ALEN, mac); 1059 ETH_ALEN, mac);
1059 1060
1060 if (instance_uuid) { 1061 if (instance_uuid) {
1061 uuid = (unsigned short *)instance_uuid; 1062 uuid = instance_uuid;
1062 sprintf(uuid_str, uuid_fmt, 1063 sprintf(uuid_str, uuid_fmt,
1063 uuid[0], uuid[1], uuid[2], uuid[3], 1064 uuid[0], uuid[1], uuid[2], uuid[3],
1064 uuid[4], uuid[5], uuid[6], uuid[7]); 1065 uuid[4], uuid[5], uuid[6], uuid[7],
1066 uuid[8], uuid[9], uuid[10], uuid[11],
1067 uuid[12], uuid[13], uuid[14], uuid[15]);
1065 vic_provinfo_add_tlv(vp, 1068 vic_provinfo_add_tlv(vp,
1066 VIC_LINUX_PROV_TLV_CLIENT_UUID_STR, 1069 VIC_LINUX_PROV_TLV_CLIENT_UUID_STR,
1067 sizeof(uuid_str), uuid_str); 1070 sizeof(uuid_str), uuid_str);
1068 } 1071 }
1069 1072
1070 if (host_uuid) { 1073 if (host_uuid) {
1071 uuid = (unsigned short *)host_uuid; 1074 uuid = host_uuid;
1072 sprintf(uuid_str, uuid_fmt, 1075 sprintf(uuid_str, uuid_fmt,
1073 uuid[0], uuid[1], uuid[2], uuid[3], 1076 uuid[0], uuid[1], uuid[2], uuid[3],
1074 uuid[4], uuid[5], uuid[6], uuid[7]); 1077 uuid[4], uuid[5], uuid[6], uuid[7],
1078 uuid[8], uuid[9], uuid[10], uuid[11],
1079 uuid[12], uuid[13], uuid[14], uuid[15]);
1075 vic_provinfo_add_tlv(vp, 1080 vic_provinfo_add_tlv(vp,
1076 VIC_LINUX_PROV_TLV_HOST_UUID_STR, 1081 VIC_LINUX_PROV_TLV_HOST_UUID_STR,
1077 sizeof(uuid_str), uuid_str); 1082 sizeof(uuid_str), uuid_str);
@@ -1127,6 +1132,14 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
1127 switch (request) { 1132 switch (request) {
1128 case PORT_REQUEST_ASSOCIATE: 1133 case PORT_REQUEST_ASSOCIATE:
1129 1134
1135 /* If the interface mac addr hasn't been assigned,
1136 * assign a random mac addr before setting port-
1137 * profile.
1138 */
1139
1140 if (is_zero_ether_addr(netdev->dev_addr))
1141 random_ether_addr(netdev->dev_addr);
1142
1130 if (port[IFLA_PORT_PROFILE]) 1143 if (port[IFLA_PORT_PROFILE])
1131 name = nla_data(port[IFLA_PORT_PROFILE]); 1144 name = nla_data(port[IFLA_PORT_PROFILE]);
1132 1145
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index 14cbde5cf68e..6ed2df14ec84 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -174,6 +174,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
174 * @iobase: pointer to I/O memory region 174 * @iobase: pointer to I/O memory region
175 * @membase: pointer to buffer memory region 175 * @membase: pointer to buffer memory region
176 * @dma_alloc: dma allocated buffer size 176 * @dma_alloc: dma allocated buffer size
177 * @io_region_size: I/O memory region size
177 * @num_tx: number of send buffers 178 * @num_tx: number of send buffers
178 * @cur_tx: last send buffer written 179 * @cur_tx: last send buffer written
179 * @dty_tx: last buffer actually sent 180 * @dty_tx: last buffer actually sent
@@ -193,6 +194,7 @@ struct ethoc {
193 void __iomem *iobase; 194 void __iomem *iobase;
194 void __iomem *membase; 195 void __iomem *membase;
195 int dma_alloc; 196 int dma_alloc;
197 resource_size_t io_region_size;
196 198
197 unsigned int num_tx; 199 unsigned int num_tx;
198 unsigned int cur_tx; 200 unsigned int cur_tx;
@@ -943,6 +945,7 @@ static int ethoc_probe(struct platform_device *pdev)
943 priv = netdev_priv(netdev); 945 priv = netdev_priv(netdev);
944 priv->netdev = netdev; 946 priv->netdev = netdev;
945 priv->dma_alloc = 0; 947 priv->dma_alloc = 0;
948 priv->io_region_size = mmio->end - mmio->start + 1;
946 949
947 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, 950 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
948 resource_size(mmio)); 951 resource_size(mmio));
@@ -1047,20 +1050,34 @@ static int ethoc_probe(struct platform_device *pdev)
1047 ret = register_netdev(netdev); 1050 ret = register_netdev(netdev);
1048 if (ret < 0) { 1051 if (ret < 0) {
1049 dev_err(&netdev->dev, "failed to register interface\n"); 1052 dev_err(&netdev->dev, "failed to register interface\n");
1050 goto error; 1053 goto error2;
1051 } 1054 }
1052 1055
1053 goto out; 1056 goto out;
1054 1057
1058error2:
1059 netif_napi_del(&priv->napi);
1055error: 1060error:
1056 mdiobus_unregister(priv->mdio); 1061 mdiobus_unregister(priv->mdio);
1057free_mdio: 1062free_mdio:
1058 kfree(priv->mdio->irq); 1063 kfree(priv->mdio->irq);
1059 mdiobus_free(priv->mdio); 1064 mdiobus_free(priv->mdio);
1060free: 1065free:
1061 if (priv->dma_alloc) 1066 if (priv) {
1062 dma_free_coherent(NULL, priv->dma_alloc, priv->membase, 1067 if (priv->dma_alloc)
1063 netdev->mem_start); 1068 dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
1069 netdev->mem_start);
1070 else if (priv->membase)
1071 devm_iounmap(&pdev->dev, priv->membase);
1072 if (priv->iobase)
1073 devm_iounmap(&pdev->dev, priv->iobase);
1074 }
1075 if (mem)
1076 devm_release_mem_region(&pdev->dev, mem->start,
1077 mem->end - mem->start + 1);
1078 if (mmio)
1079 devm_release_mem_region(&pdev->dev, mmio->start,
1080 mmio->end - mmio->start + 1);
1064 free_netdev(netdev); 1081 free_netdev(netdev);
1065out: 1082out:
1066 return ret; 1083 return ret;
@@ -1078,6 +1095,7 @@ static int ethoc_remove(struct platform_device *pdev)
1078 platform_set_drvdata(pdev, NULL); 1095 platform_set_drvdata(pdev, NULL);
1079 1096
1080 if (netdev) { 1097 if (netdev) {
1098 netif_napi_del(&priv->napi);
1081 phy_disconnect(priv->phy); 1099 phy_disconnect(priv->phy);
1082 priv->phy = NULL; 1100 priv->phy = NULL;
1083 1101
@@ -1089,6 +1107,14 @@ static int ethoc_remove(struct platform_device *pdev)
1089 if (priv->dma_alloc) 1107 if (priv->dma_alloc)
1090 dma_free_coherent(NULL, priv->dma_alloc, priv->membase, 1108 dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
1091 netdev->mem_start); 1109 netdev->mem_start);
1110 else {
1111 devm_iounmap(&pdev->dev, priv->membase);
1112 devm_release_mem_region(&pdev->dev, netdev->mem_start,
1113 netdev->mem_end - netdev->mem_start + 1);
1114 }
1115 devm_iounmap(&pdev->dev, priv->iobase);
1116 devm_release_mem_region(&pdev->dev, netdev->base_addr,
1117 priv->io_region_size);
1092 unregister_netdev(netdev); 1118 unregister_netdev(netdev);
1093 free_netdev(netdev); 1119 free_netdev(netdev);
1094 } 1120 }
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 42d9ac9ba395..326465ffbb23 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -41,6 +41,7 @@
41#include <linux/clk.h> 41#include <linux/clk.h>
42#include <linux/platform_device.h> 42#include <linux/platform_device.h>
43#include <linux/phy.h> 43#include <linux/phy.h>
44#include <linux/fec.h>
44 45
45#include <asm/cacheflush.h> 46#include <asm/cacheflush.h>
46 47
@@ -182,6 +183,7 @@ struct fec_enet_private {
182 struct phy_device *phy_dev; 183 struct phy_device *phy_dev;
183 int mii_timeout; 184 int mii_timeout;
184 uint phy_speed; 185 uint phy_speed;
186 phy_interface_t phy_interface;
185 int index; 187 int index;
186 int link; 188 int link;
187 int full_duplex; 189 int full_duplex;
@@ -1191,6 +1193,21 @@ fec_restart(struct net_device *dev, int duplex)
1191 /* Set MII speed */ 1193 /* Set MII speed */
1192 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1194 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1193 1195
1196#ifdef FEC_MIIGSK_ENR
1197 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
1198 /* disable the gasket and wait */
1199 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1200 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1201 udelay(1);
1202
1203 /* configure the gasket: RMII, 50 MHz, no loopback, no echo */
1204 writel(1, fep->hwp + FEC_MIIGSK_CFGR);
1205
1206 /* re-enable the gasket */
1207 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1208 }
1209#endif
1210
1194 /* And last, enable the transmit and receive processing */ 1211 /* And last, enable the transmit and receive processing */
1195 writel(2, fep->hwp + FEC_ECNTRL); 1212 writel(2, fep->hwp + FEC_ECNTRL);
1196 writel(0, fep->hwp + FEC_R_DES_ACTIVE); 1213 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
@@ -1226,6 +1243,7 @@ static int __devinit
1226fec_probe(struct platform_device *pdev) 1243fec_probe(struct platform_device *pdev)
1227{ 1244{
1228 struct fec_enet_private *fep; 1245 struct fec_enet_private *fep;
1246 struct fec_platform_data *pdata;
1229 struct net_device *ndev; 1247 struct net_device *ndev;
1230 int i, irq, ret = 0; 1248 int i, irq, ret = 0;
1231 struct resource *r; 1249 struct resource *r;
@@ -1259,6 +1277,10 @@ fec_probe(struct platform_device *pdev)
1259 1277
1260 platform_set_drvdata(pdev, ndev); 1278 platform_set_drvdata(pdev, ndev);
1261 1279
1280 pdata = pdev->dev.platform_data;
1281 if (pdata)
1282 fep->phy_interface = pdata->phy;
1283
1262 /* This device has up to three irqs on some platforms */ 1284 /* This device has up to three irqs on some platforms */
1263 for (i = 0; i < 3; i++) { 1285 for (i = 0; i < 3; i++) {
1264 irq = platform_get_irq(pdev, i); 1286 irq = platform_get_irq(pdev, i);
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index cc47f3f057c7..2c48b25668d5 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -43,6 +43,8 @@
43#define FEC_R_DES_START 0x180 /* Receive descriptor ring */ 43#define FEC_R_DES_START 0x180 /* Receive descriptor ring */
44#define FEC_X_DES_START 0x184 /* Transmit descriptor ring */ 44#define FEC_X_DES_START 0x184 /* Transmit descriptor ring */
45#define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */ 45#define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */
46#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
47#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
46 48
47#else 49#else
48 50
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 221f440c10f4..25e6cc6840b1 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -871,7 +871,7 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
871 priv->ndev = ndev; 871 priv->ndev = ndev;
872 872
873 /* Reserve FEC control zone */ 873 /* Reserve FEC control zone */
874 rv = of_address_to_resource(op->node, 0, &mem); 874 rv = of_address_to_resource(op->dev.of_node, 0, &mem);
875 if (rv) { 875 if (rv) {
876 printk(KERN_ERR DRIVER_NAME ": " 876 printk(KERN_ERR DRIVER_NAME ": "
877 "Error while parsing device node resource\n" ); 877 "Error while parsing device node resource\n" );
@@ -919,7 +919,7 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
919 919
920 /* Get the IRQ we need one by one */ 920 /* Get the IRQ we need one by one */
921 /* Control */ 921 /* Control */
922 ndev->irq = irq_of_parse_and_map(op->node, 0); 922 ndev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
923 923
924 /* RX */ 924 /* RX */
925 priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk); 925 priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk);
@@ -942,20 +942,20 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
942 /* Start with safe defaults for link connection */ 942 /* Start with safe defaults for link connection */
943 priv->speed = 100; 943 priv->speed = 100;
944 priv->duplex = DUPLEX_HALF; 944 priv->duplex = DUPLEX_HALF;
945 priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->node) >> 20) / 5) << 1; 945 priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->dev.of_node) >> 20) / 5) << 1;
946 946
947 /* The current speed preconfigures the speed of the MII link */ 947 /* The current speed preconfigures the speed of the MII link */
948 prop = of_get_property(op->node, "current-speed", &prop_size); 948 prop = of_get_property(op->dev.of_node, "current-speed", &prop_size);
949 if (prop && (prop_size >= sizeof(u32) * 2)) { 949 if (prop && (prop_size >= sizeof(u32) * 2)) {
950 priv->speed = prop[0]; 950 priv->speed = prop[0];
951 priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF; 951 priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF;
952 } 952 }
953 953
954 /* If there is a phy handle, then get the PHY node */ 954 /* If there is a phy handle, then get the PHY node */
955 priv->phy_node = of_parse_phandle(op->node, "phy-handle", 0); 955 priv->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
956 956
957 /* the 7-wire property means don't use MII mode */ 957 /* the 7-wire property means don't use MII mode */
958 if (of_find_property(op->node, "fsl,7-wire-mode", NULL)) { 958 if (of_find_property(op->dev.of_node, "fsl,7-wire-mode", NULL)) {
959 priv->seven_wire_mode = 1; 959 priv->seven_wire_mode = 1;
960 dev_info(&ndev->dev, "using 7-wire PHY mode\n"); 960 dev_info(&ndev->dev, "using 7-wire PHY mode\n");
961 } 961 }
@@ -1063,9 +1063,11 @@ static struct of_device_id mpc52xx_fec_match[] = {
1063MODULE_DEVICE_TABLE(of, mpc52xx_fec_match); 1063MODULE_DEVICE_TABLE(of, mpc52xx_fec_match);
1064 1064
1065static struct of_platform_driver mpc52xx_fec_driver = { 1065static struct of_platform_driver mpc52xx_fec_driver = {
1066 .owner = THIS_MODULE, 1066 .driver = {
1067 .name = DRIVER_NAME, 1067 .name = DRIVER_NAME,
1068 .match_table = mpc52xx_fec_match, 1068 .owner = THIS_MODULE,
1069 .of_match_table = mpc52xx_fec_match,
1070 },
1069 .probe = mpc52xx_fec_probe, 1071 .probe = mpc52xx_fec_probe,
1070 .remove = mpc52xx_fec_remove, 1072 .remove = mpc52xx_fec_remove,
1071#ifdef CONFIG_PM 1073#ifdef CONFIG_PM
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 7658a082e390..006f64d9f96a 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -66,7 +66,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
66 const struct of_device_id *match) 66 const struct of_device_id *match)
67{ 67{
68 struct device *dev = &of->dev; 68 struct device *dev = &of->dev;
69 struct device_node *np = of->node; 69 struct device_node *np = of->dev.of_node;
70 struct mii_bus *bus; 70 struct mii_bus *bus;
71 struct mpc52xx_fec_mdio_priv *priv; 71 struct mpc52xx_fec_mdio_priv *priv;
72 struct resource res = {}; 72 struct resource res = {};
@@ -107,7 +107,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
107 107
108 /* set MII speed */ 108 /* set MII speed */
109 out_be32(&priv->regs->mii_speed, 109 out_be32(&priv->regs->mii_speed,
110 ((mpc5xxx_get_bus_frequency(of->node) >> 20) / 5) << 1); 110 ((mpc5xxx_get_bus_frequency(of->dev.of_node) >> 20) / 5) << 1);
111 111
112 err = of_mdiobus_register(bus, np); 112 err = of_mdiobus_register(bus, np);
113 if (err) 113 if (err)
@@ -159,10 +159,13 @@ static struct of_device_id mpc52xx_fec_mdio_match[] = {
159MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match); 159MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match);
160 160
161struct of_platform_driver mpc52xx_fec_mdio_driver = { 161struct of_platform_driver mpc52xx_fec_mdio_driver = {
162 .name = "mpc5200b-fec-phy", 162 .driver = {
163 .name = "mpc5200b-fec-phy",
164 .owner = THIS_MODULE,
165 .of_match_table = mpc52xx_fec_mdio_match,
166 },
163 .probe = mpc52xx_fec_mdio_probe, 167 .probe = mpc52xx_fec_mdio_probe,
164 .remove = mpc52xx_fec_mdio_remove, 168 .remove = mpc52xx_fec_mdio_remove,
165 .match_table = mpc52xx_fec_mdio_match,
166}; 169};
167 170
168/* let fec driver call it, since this has to be registered before it */ 171/* let fec driver call it, since this has to be registered before it */
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 0fb0fefcb787..309a0eaddd81 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -1013,7 +1013,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
1013 return -ENOMEM; 1013 return -ENOMEM;
1014 1014
1015 if (!IS_FEC(match)) { 1015 if (!IS_FEC(match)) {
1016 data = of_get_property(ofdev->node, "fsl,cpm-command", &len); 1016 data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
1017 if (!data || len != 4) 1017 if (!data || len != 4)
1018 goto out_free_fpi; 1018 goto out_free_fpi;
1019 1019
@@ -1025,8 +1025,8 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
1025 fpi->rx_copybreak = 240; 1025 fpi->rx_copybreak = 240;
1026 fpi->use_napi = 1; 1026 fpi->use_napi = 1;
1027 fpi->napi_weight = 17; 1027 fpi->napi_weight = 17;
1028 fpi->phy_node = of_parse_phandle(ofdev->node, "phy-handle", 0); 1028 fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
1029 if ((!fpi->phy_node) && (!of_get_property(ofdev->node, "fixed-link", 1029 if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link",
1030 NULL))) 1030 NULL)))
1031 goto out_free_fpi; 1031 goto out_free_fpi;
1032 1032
@@ -1059,7 +1059,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
1059 spin_lock_init(&fep->lock); 1059 spin_lock_init(&fep->lock);
1060 spin_lock_init(&fep->tx_lock); 1060 spin_lock_init(&fep->tx_lock);
1061 1061
1062 mac_addr = of_get_mac_address(ofdev->node); 1062 mac_addr = of_get_mac_address(ofdev->dev.of_node);
1063 if (mac_addr) 1063 if (mac_addr)
1064 memcpy(ndev->dev_addr, mac_addr, 6); 1064 memcpy(ndev->dev_addr, mac_addr, 6);
1065 1065
@@ -1156,8 +1156,11 @@ static struct of_device_id fs_enet_match[] = {
1156MODULE_DEVICE_TABLE(of, fs_enet_match); 1156MODULE_DEVICE_TABLE(of, fs_enet_match);
1157 1157
1158static struct of_platform_driver fs_enet_driver = { 1158static struct of_platform_driver fs_enet_driver = {
1159 .name = "fs_enet", 1159 .driver = {
1160 .match_table = fs_enet_match, 1160 .owner = THIS_MODULE,
1161 .name = "fs_enet",
1162 .of_match_table = fs_enet_match,
1163 },
1161 .probe = fs_enet_probe, 1164 .probe = fs_enet_probe,
1162 .remove = fs_enet_remove, 1165 .remove = fs_enet_remove,
1163}; 1166};
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 714da967fa19..5d45084b287d 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -88,19 +88,19 @@ static int do_pd_setup(struct fs_enet_private *fep)
88 struct fs_platform_info *fpi = fep->fpi; 88 struct fs_platform_info *fpi = fep->fpi;
89 int ret = -EINVAL; 89 int ret = -EINVAL;
90 90
91 fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL); 91 fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
92 if (fep->interrupt == NO_IRQ) 92 if (fep->interrupt == NO_IRQ)
93 goto out; 93 goto out;
94 94
95 fep->fcc.fccp = of_iomap(ofdev->node, 0); 95 fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0);
96 if (!fep->fcc.fccp) 96 if (!fep->fcc.fccp)
97 goto out; 97 goto out;
98 98
99 fep->fcc.ep = of_iomap(ofdev->node, 1); 99 fep->fcc.ep = of_iomap(ofdev->dev.of_node, 1);
100 if (!fep->fcc.ep) 100 if (!fep->fcc.ep)
101 goto out_fccp; 101 goto out_fccp;
102 102
103 fep->fcc.fcccp = of_iomap(ofdev->node, 2); 103 fep->fcc.fcccp = of_iomap(ofdev->dev.of_node, 2);
104 if (!fep->fcc.fcccp) 104 if (!fep->fcc.fcccp)
105 goto out_ep; 105 goto out_ep;
106 106
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index 7eff92ef01da..7ca1642276d0 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -98,11 +98,11 @@ static int do_pd_setup(struct fs_enet_private *fep)
98{ 98{
99 struct of_device *ofdev = to_of_device(fep->dev); 99 struct of_device *ofdev = to_of_device(fep->dev);
100 100
101 fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL); 101 fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
102 if (fep->interrupt == NO_IRQ) 102 if (fep->interrupt == NO_IRQ)
103 return -EINVAL; 103 return -EINVAL;
104 104
105 fep->fec.fecp = of_iomap(ofdev->node, 0); 105 fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
106 if (!fep->fcc.fccp) 106 if (!fep->fcc.fccp)
107 return -EINVAL; 107 return -EINVAL;
108 108
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index 7f0591e43cd9..a3c44544846d 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -98,15 +98,15 @@ static int do_pd_setup(struct fs_enet_private *fep)
98{ 98{
99 struct of_device *ofdev = to_of_device(fep->dev); 99 struct of_device *ofdev = to_of_device(fep->dev);
100 100
101 fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL); 101 fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
102 if (fep->interrupt == NO_IRQ) 102 if (fep->interrupt == NO_IRQ)
103 return -EINVAL; 103 return -EINVAL;
104 104
105 fep->scc.sccp = of_iomap(ofdev->node, 0); 105 fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
106 if (!fep->scc.sccp) 106 if (!fep->scc.sccp)
107 return -EINVAL; 107 return -EINVAL;
108 108
109 fep->scc.ep = of_iomap(ofdev->node, 1); 109 fep->scc.ep = of_iomap(ofdev->dev.of_node, 1);
110 if (!fep->scc.ep) { 110 if (!fep->scc.ep) {
111 iounmap(fep->scc.sccp); 111 iounmap(fep->scc.sccp);
112 return -EINVAL; 112 return -EINVAL;
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 24ff9f43a62b..0f90685d3d19 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -224,8 +224,11 @@ static struct of_device_id fs_enet_mdio_bb_match[] = {
224MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match); 224MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match);
225 225
226static struct of_platform_driver fs_enet_bb_mdio_driver = { 226static struct of_platform_driver fs_enet_bb_mdio_driver = {
227 .name = "fsl-bb-mdio", 227 .driver = {
228 .match_table = fs_enet_mdio_bb_match, 228 .name = "fsl-bb-mdio",
229 .owner = THIS_MODULE,
230 .of_match_table = fs_enet_mdio_bb_match,
231 },
229 .probe = fs_enet_mdio_probe, 232 .probe = fs_enet_mdio_probe,
230 .remove = fs_enet_mdio_remove, 233 .remove = fs_enet_mdio_remove,
231}; 234};
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index 5944b65082cb..bddffd169b93 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -124,7 +124,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
124 new_bus->write = &fs_enet_fec_mii_write; 124 new_bus->write = &fs_enet_fec_mii_write;
125 new_bus->reset = &fs_enet_fec_mii_reset; 125 new_bus->reset = &fs_enet_fec_mii_reset;
126 126
127 ret = of_address_to_resource(ofdev->node, 0, &res); 127 ret = of_address_to_resource(ofdev->dev.of_node, 0, &res);
128 if (ret) 128 if (ret)
129 goto out_res; 129 goto out_res;
130 130
@@ -135,7 +135,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
135 goto out_fec; 135 goto out_fec;
136 136
137 if (get_bus_freq) { 137 if (get_bus_freq) {
138 clock = get_bus_freq(ofdev->node); 138 clock = get_bus_freq(ofdev->dev.of_node);
139 if (!clock) { 139 if (!clock) {
140 /* Use maximum divider if clock is unknown */ 140 /* Use maximum divider if clock is unknown */
141 dev_warn(&ofdev->dev, "could not determine IPS clock\n"); 141 dev_warn(&ofdev->dev, "could not determine IPS clock\n");
@@ -172,7 +172,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
172 new_bus->parent = &ofdev->dev; 172 new_bus->parent = &ofdev->dev;
173 dev_set_drvdata(&ofdev->dev, new_bus); 173 dev_set_drvdata(&ofdev->dev, new_bus);
174 174
175 ret = of_mdiobus_register(new_bus, ofdev->node); 175 ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
176 if (ret) 176 if (ret)
177 goto out_free_irqs; 177 goto out_free_irqs;
178 178
@@ -222,8 +222,11 @@ static struct of_device_id fs_enet_mdio_fec_match[] = {
222MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match); 222MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
223 223
224static struct of_platform_driver fs_enet_fec_mdio_driver = { 224static struct of_platform_driver fs_enet_fec_mdio_driver = {
225 .name = "fsl-fec-mdio", 225 .driver = {
226 .match_table = fs_enet_mdio_fec_match, 226 .name = "fsl-fec-mdio",
227 .owner = THIS_MODULE,
228 .of_match_table = fs_enet_mdio_fec_match,
229 },
227 .probe = fs_enet_mdio_probe, 230 .probe = fs_enet_mdio_probe,
228 .remove = fs_enet_mdio_remove, 231 .remove = fs_enet_mdio_remove,
229}; 232};
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index ff028f59b930..b4c41d72c423 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -267,7 +267,7 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
267static int fsl_pq_mdio_probe(struct of_device *ofdev, 267static int fsl_pq_mdio_probe(struct of_device *ofdev,
268 const struct of_device_id *match) 268 const struct of_device_id *match)
269{ 269{
270 struct device_node *np = ofdev->node; 270 struct device_node *np = ofdev->dev.of_node;
271 struct device_node *tbi; 271 struct device_node *tbi;
272 struct fsl_pq_mdio_priv *priv; 272 struct fsl_pq_mdio_priv *priv;
273 struct fsl_pq_mdio __iomem *regs = NULL; 273 struct fsl_pq_mdio __iomem *regs = NULL;
@@ -471,10 +471,13 @@ static struct of_device_id fsl_pq_mdio_match[] = {
471MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match); 471MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
472 472
473static struct of_platform_driver fsl_pq_mdio_driver = { 473static struct of_platform_driver fsl_pq_mdio_driver = {
474 .name = "fsl-pq_mdio", 474 .driver = {
475 .name = "fsl-pq_mdio",
476 .owner = THIS_MODULE,
477 .of_match_table = fsl_pq_mdio_match,
478 },
475 .probe = fsl_pq_mdio_probe, 479 .probe = fsl_pq_mdio_probe,
476 .remove = fsl_pq_mdio_remove, 480 .remove = fsl_pq_mdio_remove,
477 .match_table = fsl_pq_mdio_match,
478}; 481};
479 482
480int __init fsl_pq_mdio_init(void) 483int __init fsl_pq_mdio_init(void)
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index c6791cd4ee05..1830f3199cb5 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -608,7 +608,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
608 int err = 0, i; 608 int err = 0, i;
609 struct net_device *dev = NULL; 609 struct net_device *dev = NULL;
610 struct gfar_private *priv = NULL; 610 struct gfar_private *priv = NULL;
611 struct device_node *np = ofdev->node; 611 struct device_node *np = ofdev->dev.of_node;
612 struct device_node *child = NULL; 612 struct device_node *child = NULL;
613 const u32 *stash; 613 const u32 *stash;
614 const u32 *stash_len; 614 const u32 *stash_len;
@@ -646,7 +646,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
646 return -ENOMEM; 646 return -ENOMEM;
647 647
648 priv = netdev_priv(dev); 648 priv = netdev_priv(dev);
649 priv->node = ofdev->node; 649 priv->node = ofdev->dev.of_node;
650 priv->ndev = dev; 650 priv->ndev = dev;
651 651
652 dev->num_tx_queues = num_tx_qs; 652 dev->num_tx_queues = num_tx_qs;
@@ -939,7 +939,7 @@ static int gfar_probe(struct of_device *ofdev,
939 priv = netdev_priv(dev); 939 priv = netdev_priv(dev);
940 priv->ndev = dev; 940 priv->ndev = dev;
941 priv->ofdev = ofdev; 941 priv->ofdev = ofdev;
942 priv->node = ofdev->node; 942 priv->node = ofdev->dev.of_node;
943 SET_NETDEV_DEV(dev, &ofdev->dev); 943 SET_NETDEV_DEV(dev, &ofdev->dev);
944 944
945 spin_lock_init(&priv->bflock); 945 spin_lock_init(&priv->bflock);
@@ -3167,12 +3167,14 @@ MODULE_DEVICE_TABLE(of, gfar_match);
3167 3167
3168/* Structure for a device driver */ 3168/* Structure for a device driver */
3169static struct of_platform_driver gfar_driver = { 3169static struct of_platform_driver gfar_driver = {
3170 .name = "fsl-gianfar", 3170 .driver = {
3171 .match_table = gfar_match, 3171 .name = "fsl-gianfar",
3172 3172 .owner = THIS_MODULE,
3173 .pm = GFAR_PM_OPS,
3174 .of_match_table = gfar_match,
3175 },
3173 .probe = gfar_probe, 3176 .probe = gfar_probe,
3174 .remove = gfar_remove, 3177 .remove = gfar_remove,
3175 .driver.pm = GFAR_PM_OPS,
3176}; 3178};
3177 3179
3178static int __init gfar_init(void) 3180static int __init gfar_init(void)
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index fd491e409488..f37a4c143ddd 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -1499,7 +1499,8 @@ static int __devinit greth_of_probe(struct of_device *ofdev, const struct of_dev
1499 if (i == 6) { 1499 if (i == 6) {
1500 const unsigned char *addr; 1500 const unsigned char *addr;
1501 int len; 1501 int len;
1502 addr = of_get_property(ofdev->node, "local-mac-address", &len); 1502 addr = of_get_property(ofdev->dev.of_node, "local-mac-address",
1503 &len);
1503 if (addr != NULL && len == 6) { 1504 if (addr != NULL && len == 6) {
1504 for (i = 0; i < 6; i++) 1505 for (i = 0; i < 6; i++)
1505 macaddr[i] = (unsigned int) addr[i]; 1506 macaddr[i] = (unsigned int) addr[i];
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 2484e9e6c1ed..b150c102ca5a 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -136,7 +136,8 @@ static inline void emac_report_timeout_error(struct emac_instance *dev,
136 EMAC_FTR_440EP_PHY_CLK_FIX)) 136 EMAC_FTR_440EP_PHY_CLK_FIX))
137 DBG(dev, "%s" NL, error); 137 DBG(dev, "%s" NL, error);
138 else if (net_ratelimit()) 138 else if (net_ratelimit())
139 printk(KERN_ERR "%s: %s\n", dev->ofdev->node->full_name, error); 139 printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
140 error);
140} 141}
141 142
142/* EMAC PHY clock workaround: 143/* EMAC PHY clock workaround:
@@ -2185,7 +2186,7 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2185 strcpy(info->version, DRV_VERSION); 2186 strcpy(info->version, DRV_VERSION);
2186 info->fw_version[0] = '\0'; 2187 info->fw_version[0] = '\0';
2187 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s", 2188 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2188 dev->cell_index, dev->ofdev->node->full_name); 2189 dev->cell_index, dev->ofdev->dev.of_node->full_name);
2189 info->regdump_len = emac_ethtool_get_regs_len(ndev); 2190 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2190} 2191}
2191 2192
@@ -2379,7 +2380,7 @@ static int __devinit emac_read_uint_prop(struct device_node *np, const char *nam
2379 2380
2380static int __devinit emac_init_phy(struct emac_instance *dev) 2381static int __devinit emac_init_phy(struct emac_instance *dev)
2381{ 2382{
2382 struct device_node *np = dev->ofdev->node; 2383 struct device_node *np = dev->ofdev->dev.of_node;
2383 struct net_device *ndev = dev->ndev; 2384 struct net_device *ndev = dev->ndev;
2384 u32 phy_map, adv; 2385 u32 phy_map, adv;
2385 int i; 2386 int i;
@@ -2514,7 +2515,7 @@ static int __devinit emac_init_phy(struct emac_instance *dev)
2514 2515
2515static int __devinit emac_init_config(struct emac_instance *dev) 2516static int __devinit emac_init_config(struct emac_instance *dev)
2516{ 2517{
2517 struct device_node *np = dev->ofdev->node; 2518 struct device_node *np = dev->ofdev->dev.of_node;
2518 const void *p; 2519 const void *p;
2519 unsigned int plen; 2520 unsigned int plen;
2520 const char *pm, *phy_modes[] = { 2521 const char *pm, *phy_modes[] = {
@@ -2723,7 +2724,7 @@ static int __devinit emac_probe(struct of_device *ofdev,
2723{ 2724{
2724 struct net_device *ndev; 2725 struct net_device *ndev;
2725 struct emac_instance *dev; 2726 struct emac_instance *dev;
2726 struct device_node *np = ofdev->node; 2727 struct device_node *np = ofdev->dev.of_node;
2727 struct device_node **blist = NULL; 2728 struct device_node **blist = NULL;
2728 int err, i; 2729 int err, i;
2729 2730
@@ -2810,7 +2811,7 @@ static int __devinit emac_probe(struct of_device *ofdev,
2810 err = mal_register_commac(dev->mal, &dev->commac); 2811 err = mal_register_commac(dev->mal, &dev->commac);
2811 if (err) { 2812 if (err) {
2812 printk(KERN_ERR "%s: failed to register with mal %s!\n", 2813 printk(KERN_ERR "%s: failed to register with mal %s!\n",
2813 np->full_name, dev->mal_dev->node->full_name); 2814 np->full_name, dev->mal_dev->dev.of_node->full_name);
2814 goto err_rel_deps; 2815 goto err_rel_deps;
2815 } 2816 }
2816 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu); 2817 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
@@ -2995,9 +2996,11 @@ static struct of_device_id emac_match[] =
2995MODULE_DEVICE_TABLE(of, emac_match); 2996MODULE_DEVICE_TABLE(of, emac_match);
2996 2997
2997static struct of_platform_driver emac_driver = { 2998static struct of_platform_driver emac_driver = {
2998 .name = "emac", 2999 .driver = {
2999 .match_table = emac_match, 3000 .name = "emac",
3000 3001 .owner = THIS_MODULE,
3002 .of_match_table = emac_match,
3003 },
3001 .probe = emac_probe, 3004 .probe = emac_probe,
3002 .remove = emac_remove, 3005 .remove = emac_remove,
3003}; 3006};
diff --git a/drivers/net/ibm_newemac/debug.c b/drivers/net/ibm_newemac/debug.c
index 775c850a425a..3995fafc1e08 100644
--- a/drivers/net/ibm_newemac/debug.c
+++ b/drivers/net/ibm_newemac/debug.c
@@ -33,7 +33,7 @@ static void emac_desc_dump(struct emac_instance *p)
33 int i; 33 int i;
34 printk("** EMAC %s TX BDs **\n" 34 printk("** EMAC %s TX BDs **\n"
35 " tx_cnt = %d tx_slot = %d ack_slot = %d\n", 35 " tx_cnt = %d tx_slot = %d ack_slot = %d\n",
36 p->ofdev->node->full_name, 36 p->ofdev->dev.of_node->full_name,
37 p->tx_cnt, p->tx_slot, p->ack_slot); 37 p->tx_cnt, p->tx_slot, p->ack_slot);
38 for (i = 0; i < NUM_TX_BUFF / 2; ++i) 38 for (i = 0; i < NUM_TX_BUFF / 2; ++i)
39 printk 39 printk
@@ -49,7 +49,7 @@ static void emac_desc_dump(struct emac_instance *p)
49 printk("** EMAC %s RX BDs **\n" 49 printk("** EMAC %s RX BDs **\n"
50 " rx_slot = %d flags = 0x%lx rx_skb_size = %d rx_sync_size = %d\n" 50 " rx_slot = %d flags = 0x%lx rx_skb_size = %d rx_sync_size = %d\n"
51 " rx_sg_skb = 0x%p\n", 51 " rx_sg_skb = 0x%p\n",
52 p->ofdev->node->full_name, 52 p->ofdev->dev.of_node->full_name,
53 p->rx_slot, p->commac.flags, p->rx_skb_size, 53 p->rx_slot, p->commac.flags, p->rx_skb_size,
54 p->rx_sync_size, p->rx_sg_skb); 54 p->rx_sync_size, p->rx_sg_skb);
55 for (i = 0; i < NUM_RX_BUFF / 2; ++i) 55 for (i = 0; i < NUM_RX_BUFF / 2; ++i)
@@ -77,7 +77,8 @@ static void emac_mac_dump(struct emac_instance *dev)
77 "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n" 77 "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n"
78 "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n" 78 "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n"
79 "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n", 79 "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n",
80 dev->ofdev->node->full_name, in_be32(&p->mr0), in_be32(&p->mr1), 80 dev->ofdev->dev.of_node->full_name,
81 in_be32(&p->mr0), in_be32(&p->mr1),
81 in_be32(&p->tmr0), in_be32(&p->tmr1), 82 in_be32(&p->tmr0), in_be32(&p->tmr1),
82 in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser), 83 in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser),
83 in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid), 84 in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid),
@@ -128,7 +129,7 @@ static void emac_mal_dump(struct mal_instance *mal)
128 "CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n" 129 "CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n"
129 "TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n" 130 "TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n"
130 "RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n", 131 "RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n",
131 mal->ofdev->node->full_name, 132 mal->ofdev->dev.of_node->full_name,
132 get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR), 133 get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR),
133 get_mal_dcrn(mal, MAL_IER), 134 get_mal_dcrn(mal, MAL_IER),
134 get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR), 135 get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR),
diff --git a/drivers/net/ibm_newemac/debug.h b/drivers/net/ibm_newemac/debug.h
index b631842ec8d0..e596c77ccdf7 100644
--- a/drivers/net/ibm_newemac/debug.h
+++ b/drivers/net/ibm_newemac/debug.h
@@ -53,8 +53,8 @@ extern void emac_dbg_dump_all(void);
53 53
54#endif 54#endif
55 55
56#define EMAC_DBG(dev, name, fmt, arg...) \ 56#define EMAC_DBG(d, name, fmt, arg...) \
57 printk(KERN_DEBUG #name "%s: " fmt, dev->ofdev->node->full_name, ## arg) 57 printk(KERN_DEBUG #name "%s: " fmt, d->ofdev->dev.of_node->full_name, ## arg)
58 58
59#if DBG_LEVEL > 0 59#if DBG_LEVEL > 0
60# define DBG(d,f,x...) EMAC_DBG(d, emac, f, ##x) 60# define DBG(d,f,x...) EMAC_DBG(d, emac, f, ##x)
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index 5b3d94419fe6..fcff9e0bd382 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -538,11 +538,11 @@ static int __devinit mal_probe(struct of_device *ofdev,
538 } 538 }
539 mal->index = index; 539 mal->index = index;
540 mal->ofdev = ofdev; 540 mal->ofdev = ofdev;
541 mal->version = of_device_is_compatible(ofdev->node, "ibm,mcmal2") ? 2 : 1; 541 mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
542 542
543 MAL_DBG(mal, "probe" NL); 543 MAL_DBG(mal, "probe" NL);
544 544
545 prop = of_get_property(ofdev->node, "num-tx-chans", NULL); 545 prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL);
546 if (prop == NULL) { 546 if (prop == NULL) {
547 printk(KERN_ERR 547 printk(KERN_ERR
548 "mal%d: can't find MAL num-tx-chans property!\n", 548 "mal%d: can't find MAL num-tx-chans property!\n",
@@ -552,7 +552,7 @@ static int __devinit mal_probe(struct of_device *ofdev,
552 } 552 }
553 mal->num_tx_chans = prop[0]; 553 mal->num_tx_chans = prop[0];
554 554
555 prop = of_get_property(ofdev->node, "num-rx-chans", NULL); 555 prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL);
556 if (prop == NULL) { 556 if (prop == NULL) {
557 printk(KERN_ERR 557 printk(KERN_ERR
558 "mal%d: can't find MAL num-rx-chans property!\n", 558 "mal%d: can't find MAL num-rx-chans property!\n",
@@ -562,14 +562,14 @@ static int __devinit mal_probe(struct of_device *ofdev,
562 } 562 }
563 mal->num_rx_chans = prop[0]; 563 mal->num_rx_chans = prop[0];
564 564
565 dcr_base = dcr_resource_start(ofdev->node, 0); 565 dcr_base = dcr_resource_start(ofdev->dev.of_node, 0);
566 if (dcr_base == 0) { 566 if (dcr_base == 0) {
567 printk(KERN_ERR 567 printk(KERN_ERR
568 "mal%d: can't find DCR resource!\n", index); 568 "mal%d: can't find DCR resource!\n", index);
569 err = -ENODEV; 569 err = -ENODEV;
570 goto fail; 570 goto fail;
571 } 571 }
572 mal->dcr_host = dcr_map(ofdev->node, dcr_base, 0x100); 572 mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100);
573 if (!DCR_MAP_OK(mal->dcr_host)) { 573 if (!DCR_MAP_OK(mal->dcr_host)) {
574 printk(KERN_ERR 574 printk(KERN_ERR
575 "mal%d: failed to map DCRs !\n", index); 575 "mal%d: failed to map DCRs !\n", index);
@@ -577,28 +577,28 @@ static int __devinit mal_probe(struct of_device *ofdev,
577 goto fail; 577 goto fail;
578 } 578 }
579 579
580 if (of_device_is_compatible(ofdev->node, "ibm,mcmal-405ez")) { 580 if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
581#if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \ 581#if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \
582 defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR) 582 defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR)
583 mal->features |= (MAL_FTR_CLEAR_ICINTSTAT | 583 mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
584 MAL_FTR_COMMON_ERR_INT); 584 MAL_FTR_COMMON_ERR_INT);
585#else 585#else
586 printk(KERN_ERR "%s: Support for 405EZ not enabled!\n", 586 printk(KERN_ERR "%s: Support for 405EZ not enabled!\n",
587 ofdev->node->full_name); 587 ofdev->dev.of_node->full_name);
588 err = -ENODEV; 588 err = -ENODEV;
589 goto fail; 589 goto fail;
590#endif 590#endif
591 } 591 }
592 592
593 mal->txeob_irq = irq_of_parse_and_map(ofdev->node, 0); 593 mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
594 mal->rxeob_irq = irq_of_parse_and_map(ofdev->node, 1); 594 mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
595 mal->serr_irq = irq_of_parse_and_map(ofdev->node, 2); 595 mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2);
596 596
597 if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) { 597 if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
598 mal->txde_irq = mal->rxde_irq = mal->serr_irq; 598 mal->txde_irq = mal->rxde_irq = mal->serr_irq;
599 } else { 599 } else {
600 mal->txde_irq = irq_of_parse_and_map(ofdev->node, 3); 600 mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3);
601 mal->rxde_irq = irq_of_parse_and_map(ofdev->node, 4); 601 mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
602 } 602 }
603 603
604 if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ || 604 if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ ||
@@ -629,7 +629,7 @@ static int __devinit mal_probe(struct of_device *ofdev,
629 /* Current Axon is not happy with priority being non-0, it can 629 /* Current Axon is not happy with priority being non-0, it can
630 * deadlock, fix it up here 630 * deadlock, fix it up here
631 */ 631 */
632 if (of_device_is_compatible(ofdev->node, "ibm,mcmal-axon")) 632 if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon"))
633 cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10); 633 cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10);
634 634
635 /* Apply configuration */ 635 /* Apply configuration */
@@ -701,7 +701,7 @@ static int __devinit mal_probe(struct of_device *ofdev,
701 701
702 printk(KERN_INFO 702 printk(KERN_INFO
703 "MAL v%d %s, %d TX channels, %d RX channels\n", 703 "MAL v%d %s, %d TX channels, %d RX channels\n",
704 mal->version, ofdev->node->full_name, 704 mal->version, ofdev->dev.of_node->full_name,
705 mal->num_tx_chans, mal->num_rx_chans); 705 mal->num_tx_chans, mal->num_rx_chans);
706 706
707 /* Advertise this instance to the rest of the world */ 707 /* Advertise this instance to the rest of the world */
@@ -790,9 +790,11 @@ static struct of_device_id mal_platform_match[] =
790}; 790};
791 791
792static struct of_platform_driver mal_of_driver = { 792static struct of_platform_driver mal_of_driver = {
793 .name = "mcmal", 793 .driver = {
794 .match_table = mal_platform_match, 794 .name = "mcmal",
795 795 .owner = THIS_MODULE,
796 .of_match_table = mal_platform_match,
797 },
796 .probe = mal_probe, 798 .probe = mal_probe,
797 .remove = mal_remove, 799 .remove = mal_remove,
798}; 800};
diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c
index 5b90d34c8455..108919bcdf13 100644
--- a/drivers/net/ibm_newemac/rgmii.c
+++ b/drivers/net/ibm_newemac/rgmii.c
@@ -103,7 +103,7 @@ int __devinit rgmii_attach(struct of_device *ofdev, int input, int mode)
103 /* Check if we need to attach to a RGMII */ 103 /* Check if we need to attach to a RGMII */
104 if (input < 0 || !rgmii_valid_mode(mode)) { 104 if (input < 0 || !rgmii_valid_mode(mode)) {
105 printk(KERN_ERR "%s: unsupported settings !\n", 105 printk(KERN_ERR "%s: unsupported settings !\n",
106 ofdev->node->full_name); 106 ofdev->dev.of_node->full_name);
107 return -ENODEV; 107 return -ENODEV;
108 } 108 }
109 109
@@ -113,7 +113,7 @@ int __devinit rgmii_attach(struct of_device *ofdev, int input, int mode)
113 out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input)); 113 out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input));
114 114
115 printk(KERN_NOTICE "%s: input %d in %s mode\n", 115 printk(KERN_NOTICE "%s: input %d in %s mode\n",
116 ofdev->node->full_name, input, rgmii_mode_name(mode)); 116 ofdev->dev.of_node->full_name, input, rgmii_mode_name(mode));
117 117
118 ++dev->users; 118 ++dev->users;
119 119
@@ -231,7 +231,7 @@ void *rgmii_dump_regs(struct of_device *ofdev, void *buf)
231static int __devinit rgmii_probe(struct of_device *ofdev, 231static int __devinit rgmii_probe(struct of_device *ofdev,
232 const struct of_device_id *match) 232 const struct of_device_id *match)
233{ 233{
234 struct device_node *np = ofdev->node; 234 struct device_node *np = ofdev->dev.of_node;
235 struct rgmii_instance *dev; 235 struct rgmii_instance *dev;
236 struct resource regs; 236 struct resource regs;
237 int rc; 237 int rc;
@@ -264,11 +264,11 @@ static int __devinit rgmii_probe(struct of_device *ofdev,
264 } 264 }
265 265
266 /* Check for RGMII flags */ 266 /* Check for RGMII flags */
267 if (of_get_property(ofdev->node, "has-mdio", NULL)) 267 if (of_get_property(ofdev->dev.of_node, "has-mdio", NULL))
268 dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO; 268 dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO;
269 269
270 /* CAB lacks the right properties, fix this up */ 270 /* CAB lacks the right properties, fix this up */
271 if (of_device_is_compatible(ofdev->node, "ibm,rgmii-axon")) 271 if (of_device_is_compatible(ofdev->dev.of_node, "ibm,rgmii-axon"))
272 dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO; 272 dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO;
273 273
274 DBG2(dev, " Boot FER = 0x%08x, SSR = 0x%08x\n", 274 DBG2(dev, " Boot FER = 0x%08x, SSR = 0x%08x\n",
@@ -279,7 +279,7 @@ static int __devinit rgmii_probe(struct of_device *ofdev,
279 279
280 printk(KERN_INFO 280 printk(KERN_INFO
281 "RGMII %s initialized with%s MDIO support\n", 281 "RGMII %s initialized with%s MDIO support\n",
282 ofdev->node->full_name, 282 ofdev->dev.of_node->full_name,
283 (dev->flags & EMAC_RGMII_FLAG_HAS_MDIO) ? "" : "out"); 283 (dev->flags & EMAC_RGMII_FLAG_HAS_MDIO) ? "" : "out");
284 284
285 wmb(); 285 wmb();
@@ -319,9 +319,11 @@ static struct of_device_id rgmii_match[] =
319}; 319};
320 320
321static struct of_platform_driver rgmii_driver = { 321static struct of_platform_driver rgmii_driver = {
322 .name = "emac-rgmii", 322 .driver = {
323 .match_table = rgmii_match, 323 .name = "emac-rgmii",
324 324 .owner = THIS_MODULE,
325 .of_match_table = rgmii_match,
326 },
325 .probe = rgmii_probe, 327 .probe = rgmii_probe,
326 .remove = rgmii_remove, 328 .remove = rgmii_remove,
327}; 329};
diff --git a/drivers/net/ibm_newemac/tah.c b/drivers/net/ibm_newemac/tah.c
index 30173a9fb557..044637144c43 100644
--- a/drivers/net/ibm_newemac/tah.c
+++ b/drivers/net/ibm_newemac/tah.c
@@ -57,7 +57,8 @@ void tah_reset(struct of_device *ofdev)
57 --n; 57 --n;
58 58
59 if (unlikely(!n)) 59 if (unlikely(!n))
60 printk(KERN_ERR "%s: reset timeout\n", ofdev->node->full_name); 60 printk(KERN_ERR "%s: reset timeout\n",
61 ofdev->dev.of_node->full_name);
61 62
62 /* 10KB TAH TX FIFO accomodates the max MTU of 9000 */ 63 /* 10KB TAH TX FIFO accomodates the max MTU of 9000 */
63 out_be32(&p->mr, 64 out_be32(&p->mr,
@@ -89,7 +90,7 @@ void *tah_dump_regs(struct of_device *ofdev, void *buf)
89static int __devinit tah_probe(struct of_device *ofdev, 90static int __devinit tah_probe(struct of_device *ofdev,
90 const struct of_device_id *match) 91 const struct of_device_id *match)
91{ 92{
92 struct device_node *np = ofdev->node; 93 struct device_node *np = ofdev->dev.of_node;
93 struct tah_instance *dev; 94 struct tah_instance *dev;
94 struct resource regs; 95 struct resource regs;
95 int rc; 96 int rc;
@@ -127,7 +128,7 @@ static int __devinit tah_probe(struct of_device *ofdev,
127 tah_reset(ofdev); 128 tah_reset(ofdev);
128 129
129 printk(KERN_INFO 130 printk(KERN_INFO
130 "TAH %s initialized\n", ofdev->node->full_name); 131 "TAH %s initialized\n", ofdev->dev.of_node->full_name);
131 wmb(); 132 wmb();
132 133
133 return 0; 134 return 0;
@@ -165,9 +166,11 @@ static struct of_device_id tah_match[] =
165}; 166};
166 167
167static struct of_platform_driver tah_driver = { 168static struct of_platform_driver tah_driver = {
168 .name = "emac-tah", 169 .driver = {
169 .match_table = tah_match, 170 .name = "emac-tah",
170 171 .owner = THIS_MODULE,
172 .of_match_table = tah_match,
173 },
171 .probe = tah_probe, 174 .probe = tah_probe,
172 .remove = tah_remove, 175 .remove = tah_remove,
173}; 176};
diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c
index 1f038f808ab3..046dcd069c45 100644
--- a/drivers/net/ibm_newemac/zmii.c
+++ b/drivers/net/ibm_newemac/zmii.c
@@ -121,13 +121,14 @@ int __devinit zmii_attach(struct of_device *ofdev, int input, int *mode)
121 dev->mode = *mode; 121 dev->mode = *mode;
122 122
123 printk(KERN_NOTICE "%s: bridge in %s mode\n", 123 printk(KERN_NOTICE "%s: bridge in %s mode\n",
124 ofdev->node->full_name, zmii_mode_name(dev->mode)); 124 ofdev->dev.of_node->full_name,
125 zmii_mode_name(dev->mode));
125 } else { 126 } else {
126 /* All inputs must use the same mode */ 127 /* All inputs must use the same mode */
127 if (*mode != PHY_MODE_NA && *mode != dev->mode) { 128 if (*mode != PHY_MODE_NA && *mode != dev->mode) {
128 printk(KERN_ERR 129 printk(KERN_ERR
129 "%s: invalid mode %d specified for input %d\n", 130 "%s: invalid mode %d specified for input %d\n",
130 ofdev->node->full_name, *mode, input); 131 ofdev->dev.of_node->full_name, *mode, input);
131 mutex_unlock(&dev->lock); 132 mutex_unlock(&dev->lock);
132 return -EINVAL; 133 return -EINVAL;
133 } 134 }
@@ -233,7 +234,7 @@ void *zmii_dump_regs(struct of_device *ofdev, void *buf)
233static int __devinit zmii_probe(struct of_device *ofdev, 234static int __devinit zmii_probe(struct of_device *ofdev,
234 const struct of_device_id *match) 235 const struct of_device_id *match)
235{ 236{
236 struct device_node *np = ofdev->node; 237 struct device_node *np = ofdev->dev.of_node;
237 struct zmii_instance *dev; 238 struct zmii_instance *dev;
238 struct resource regs; 239 struct resource regs;
239 int rc; 240 int rc;
@@ -273,7 +274,7 @@ static int __devinit zmii_probe(struct of_device *ofdev,
273 out_be32(&dev->base->fer, 0); 274 out_be32(&dev->base->fer, 0);
274 275
275 printk(KERN_INFO 276 printk(KERN_INFO
276 "ZMII %s initialized\n", ofdev->node->full_name); 277 "ZMII %s initialized\n", ofdev->dev.of_node->full_name);
277 wmb(); 278 wmb();
278 dev_set_drvdata(&ofdev->dev, dev); 279 dev_set_drvdata(&ofdev->dev, dev);
279 280
@@ -312,9 +313,11 @@ static struct of_device_id zmii_match[] =
312}; 313};
313 314
314static struct of_platform_driver zmii_driver = { 315static struct of_platform_driver zmii_driver = {
315 .name = "emac-zmii", 316 .driver = {
316 .match_table = zmii_match, 317 .name = "emac-zmii",
317 318 .owner = THIS_MODULE,
319 .of_match_table = zmii_match,
320 },
318 .probe = zmii_probe, 321 .probe = zmii_probe,
319 .remove = zmii_remove, 322 .remove = zmii_remove,
320}; 323};
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index 911c082cee5a..f940dfa1f7f8 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -107,8 +107,12 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
107 case 57600: 107 case 57600:
108 case 115200: 108 case 115200:
109 109
110 quot = (port->clk + (8 * speed)) / (16 * speed)\ 110 /*
111 - ANOMALY_05000230; 111 * IRDA is not affected by anomaly 05000230, so there is no
112 * need to tweak the divisor like he UART driver (which will
113 * slightly speed up the baud rate on us).
114 */
115 quot = (port->clk + (8 * speed)) / (16 * speed);
112 116
113 do { 117 do {
114 udelay(utime); 118 udelay(utime);
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index d0ea3d6dea95..ffae480587ae 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -360,6 +360,7 @@ struct ixgbe_adapter {
360 u32 flags2; 360 u32 flags2;
361#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1) 361#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
362#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) 362#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
363#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
363/* default to trying for four seconds */ 364/* default to trying for four seconds */
364#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) 365#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
365 366
@@ -407,6 +408,8 @@ struct ixgbe_adapter {
407 u16 eeprom_version; 408 u16 eeprom_version;
408 409
409 int node; 410 int node;
411 struct work_struct check_overtemp_task;
412 u32 interrupt_event;
410 413
411 /* SR-IOV */ 414 /* SR-IOV */
412 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); 415 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index f2b7ff44215b..9c02d6014cc4 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -1236,6 +1236,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = {
1236 .setup_link = &ixgbe_setup_phy_link_generic, 1236 .setup_link = &ixgbe_setup_phy_link_generic,
1237 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, 1237 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
1238 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, 1238 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598,
1239 .check_overtemp = &ixgbe_tn_check_overtemp,
1239}; 1240};
1240 1241
1241struct ixgbe_info ixgbe_82598_info = { 1242struct ixgbe_info ixgbe_82598_info = {
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index e9706eb8e4ff..a4e2901f2f08 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -2395,6 +2395,7 @@ static struct ixgbe_phy_operations phy_ops_82599 = {
2395 .write_i2c_byte = &ixgbe_write_i2c_byte_generic, 2395 .write_i2c_byte = &ixgbe_write_i2c_byte_generic,
2396 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, 2396 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
2397 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, 2397 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
2398 .check_overtemp = &ixgbe_tn_check_overtemp,
2398}; 2399};
2399 2400
2400struct ixgbe_info ixgbe_82599_info = { 2401struct ixgbe_info ixgbe_82599_info = {
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 9551cbb7bf01..d571d101de08 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -108,6 +108,8 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
108 board_82599 }, 108 board_82599 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), 109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
110 board_82599 }, 110 board_82599 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
112 board_82599 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), 113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
112 board_82599 }, 114 board_82599 },
113 115
@@ -1618,6 +1620,48 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1618 } 1620 }
1619} 1621}
1620 1622
1623/**
1624 * ixgbe_check_overtemp_task - worker thread to check over tempurature
1625 * @work: pointer to work_struct containing our data
1626 **/
1627static void ixgbe_check_overtemp_task(struct work_struct *work)
1628{
1629 struct ixgbe_adapter *adapter = container_of(work,
1630 struct ixgbe_adapter,
1631 check_overtemp_task);
1632 struct ixgbe_hw *hw = &adapter->hw;
1633 u32 eicr = adapter->interrupt_event;
1634
1635 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
1636 switch (hw->device_id) {
1637 case IXGBE_DEV_ID_82599_T3_LOM: {
1638 u32 autoneg;
1639 bool link_up = false;
1640
1641 if (hw->mac.ops.check_link)
1642 hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
1643
1644 if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
1645 (eicr & IXGBE_EICR_LSC))
1646 /* Check if this is due to overtemp */
1647 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP)
1648 break;
1649 }
1650 return;
1651 default:
1652 if (!(eicr & IXGBE_EICR_GPI_SDP0))
1653 return;
1654 break;
1655 }
1656 DPRINTK(DRV, ERR, "Network adapter has been stopped because it "
1657 "has over heated. Restart the computer. If the problem "
1658 "persists, power off the system and replace the "
1659 "adapter\n");
1660 /* write to clear the interrupt */
1661 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
1662 }
1663}
1664
1621static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) 1665static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
1622{ 1666{
1623 struct ixgbe_hw *hw = &adapter->hw; 1667 struct ixgbe_hw *hw = &adapter->hw;
@@ -1689,6 +1733,10 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1689 1733
1690 if (hw->mac.type == ixgbe_mac_82599EB) { 1734 if (hw->mac.type == ixgbe_mac_82599EB) {
1691 ixgbe_check_sfp_event(adapter, eicr); 1735 ixgbe_check_sfp_event(adapter, eicr);
1736 adapter->interrupt_event = eicr;
1737 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1738 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
1739 schedule_work(&adapter->check_overtemp_task);
1692 1740
1693 /* Handle Flow Director Full threshold interrupt */ 1741 /* Handle Flow Director Full threshold interrupt */
1694 if (eicr & IXGBE_EICR_FLOW_DIR) { 1742 if (eicr & IXGBE_EICR_FLOW_DIR) {
@@ -2190,6 +2238,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
2190 u32 mask; 2238 u32 mask;
2191 2239
2192 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 2240 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
2241 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2242 mask |= IXGBE_EIMS_GPI_SDP0;
2193 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) 2243 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2194 mask |= IXGBE_EIMS_GPI_SDP1; 2244 mask |= IXGBE_EIMS_GPI_SDP1;
2195 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 2245 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
@@ -2250,6 +2300,9 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2250 ixgbe_check_sfp_event(adapter, eicr); 2300 ixgbe_check_sfp_event(adapter, eicr);
2251 2301
2252 ixgbe_check_fan_failure(adapter, eicr); 2302 ixgbe_check_fan_failure(adapter, eicr);
2303 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2304 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
2305 schedule_work(&adapter->check_overtemp_task);
2253 2306
2254 if (napi_schedule_prep(&(q_vector->napi))) { 2307 if (napi_schedule_prep(&(q_vector->napi))) {
2255 adapter->tx_ring[0]->total_packets = 0; 2308 adapter->tx_ring[0]->total_packets = 0;
@@ -3265,6 +3318,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3265 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3318 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3266 } 3319 }
3267 3320
3321 /* Enable Thermal over heat sensor interrupt */
3322 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
3323 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3324 gpie |= IXGBE_SDP0_GPIEN;
3325 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3326 }
3327
3268 /* Enable fan failure interrupt if media type is copper */ 3328 /* Enable fan failure interrupt if media type is copper */
3269 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { 3329 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
3270 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3330 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
@@ -3666,6 +3726,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3666 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 3726 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
3667 cancel_work_sync(&adapter->fdir_reinit_task); 3727 cancel_work_sync(&adapter->fdir_reinit_task);
3668 3728
3729 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
3730 cancel_work_sync(&adapter->check_overtemp_task);
3731
3669 /* disable transmits in the hardware now that interrupts are off */ 3732 /* disable transmits in the hardware now that interrupts are off */
3670 for (i = 0; i < adapter->num_tx_queues; i++) { 3733 for (i = 0; i < adapter->num_tx_queues; i++) {
3671 j = adapter->tx_ring[i]->reg_idx; 3734 j = adapter->tx_ring[i]->reg_idx;
@@ -4645,6 +4708,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4645 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; 4708 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
4646 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; 4709 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
4647 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 4710 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
4711 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
4712 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
4648 if (dev->features & NETIF_F_NTUPLE) { 4713 if (dev->features & NETIF_F_NTUPLE) {
4649 /* Flow Director perfect filter enabled */ 4714 /* Flow Director perfect filter enabled */
4650 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 4715 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
@@ -6561,7 +6626,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6561 } 6626 }
6562 6627
6563 /* reset_hw fills in the perm_addr as well */ 6628 /* reset_hw fills in the perm_addr as well */
6629 hw->phy.reset_if_overtemp = true;
6564 err = hw->mac.ops.reset_hw(hw); 6630 err = hw->mac.ops.reset_hw(hw);
6631 hw->phy.reset_if_overtemp = false;
6565 if (err == IXGBE_ERR_SFP_NOT_PRESENT && 6632 if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
6566 hw->mac.type == ixgbe_mac_82598EB) { 6633 hw->mac.type == ixgbe_mac_82598EB) {
6567 /* 6634 /*
@@ -6730,6 +6797,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6730 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 6797 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
6731 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task); 6798 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
6732 6799
6800 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
6801 INIT_WORK(&adapter->check_overtemp_task, ixgbe_check_overtemp_task);
6733#ifdef CONFIG_IXGBE_DCA 6802#ifdef CONFIG_IXGBE_DCA
6734 if (dca_add_requester(&pdev->dev) == 0) { 6803 if (dca_add_requester(&pdev->dev) == 0) {
6735 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 6804 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 22d21af14783..09e1911ff510 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -135,6 +135,11 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
135 **/ 135 **/
136s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) 136s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
137{ 137{
138 /* Don't reset PHY if it's shut down due to overtemp. */
139 if (!hw->phy.reset_if_overtemp &&
140 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
141 return 0;
142
138 /* 143 /*
139 * Perform soft PHY reset to the PHY_XS. 144 * Perform soft PHY reset to the PHY_XS.
140 * This will cause a soft reset to the PHY 145 * This will cause a soft reset to the PHY
@@ -1345,3 +1350,28 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
1345 return status; 1350 return status;
1346} 1351}
1347 1352
1353/**
1354 * ixgbe_tn_check_overtemp - Checks if an overtemp occured.
1355 * @hw: pointer to hardware structure
1356 *
1357 * Checks if the LASI temp alarm status was triggered due to overtemp
1358 **/
1359s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
1360{
1361 s32 status = 0;
1362 u16 phy_data = 0;
1363
1364 if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
1365 goto out;
1366
1367 /* Check that the LASI temp alarm status was triggered */
1368 hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
1369 MDIO_MMD_PMAPMD, &phy_data);
1370
1371 if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
1372 goto out;
1373
1374 status = IXGBE_ERR_OVERTEMP;
1375out:
1376 return status;
1377}
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index c9c545941407..ef4ba834c593 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -80,6 +80,8 @@
80#define IXGBE_I2C_T_SU_STO 4 80#define IXGBE_I2C_T_SU_STO 4
81#define IXGBE_I2C_T_BUF 5 81#define IXGBE_I2C_T_BUF 5
82 82
83#define IXGBE_TN_LASI_STATUS_REG 0x9005
84#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
83 85
84s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); 86s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
85s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); 87s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
@@ -106,6 +108,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
106s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 108s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
107 u16 *list_offset, 109 u16 *list_offset,
108 u16 *data_offset); 110 u16 *data_offset);
111s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
109s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 112s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
110 u8 dev_addr, u8 *data); 113 u8 dev_addr, u8 *data);
111s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, 114s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 39b9be897439..2eb6e151016c 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -51,6 +51,7 @@
51#define IXGBE_DEV_ID_82599_KX4 0x10F7 51#define IXGBE_DEV_ID_82599_KX4 0x10F7
52#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 52#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
53#define IXGBE_DEV_ID_82599_KR 0x1517 53#define IXGBE_DEV_ID_82599_KR 0x1517
54#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
54#define IXGBE_DEV_ID_82599_CX4 0x10F9 55#define IXGBE_DEV_ID_82599_CX4 0x10F9
55#define IXGBE_DEV_ID_82599_SFP 0x10FB 56#define IXGBE_DEV_ID_82599_SFP 0x10FB
56#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 57#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
@@ -2470,6 +2471,7 @@ struct ixgbe_phy_operations {
2470 s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); 2471 s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
2471 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); 2472 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
2472 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); 2473 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
2474 s32 (*check_overtemp)(struct ixgbe_hw *);
2473}; 2475};
2474 2476
2475struct ixgbe_eeprom_info { 2477struct ixgbe_eeprom_info {
@@ -2518,6 +2520,7 @@ struct ixgbe_phy_info {
2518 enum ixgbe_smart_speed smart_speed; 2520 enum ixgbe_smart_speed smart_speed;
2519 bool smart_speed_active; 2521 bool smart_speed_active;
2520 bool multispeed_fiber; 2522 bool multispeed_fiber;
2523 bool reset_if_overtemp;
2521}; 2524};
2522 2525
2523#include "ixgbe_mbx.h" 2526#include "ixgbe_mbx.h"
@@ -2605,6 +2608,7 @@ struct ixgbe_info {
2605#define IXGBE_ERR_FDIR_REINIT_FAILED -23 2608#define IXGBE_ERR_FDIR_REINIT_FAILED -23
2606#define IXGBE_ERR_EEPROM_VERSION -24 2609#define IXGBE_ERR_EEPROM_VERSION -24
2607#define IXGBE_ERR_NO_SPACE -25 2610#define IXGBE_ERR_NO_SPACE -25
2611#define IXGBE_ERR_OVERTEMP -26
2608#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF 2612#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
2609 2613
2610#endif /* _IXGBE_TYPE_H_ */ 2614#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index b59b24d667f0..fa7620e28404 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -920,14 +920,14 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
920 mutex_init(&lp->indirect_mutex); 920 mutex_init(&lp->indirect_mutex);
921 921
922 /* map device registers */ 922 /* map device registers */
923 lp->regs = of_iomap(op->node, 0); 923 lp->regs = of_iomap(op->dev.of_node, 0);
924 if (!lp->regs) { 924 if (!lp->regs) {
925 dev_err(&op->dev, "could not map temac regs.\n"); 925 dev_err(&op->dev, "could not map temac regs.\n");
926 goto nodev; 926 goto nodev;
927 } 927 }
928 928
929 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 929 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
930 np = of_parse_phandle(op->node, "llink-connected", 0); 930 np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
931 if (!np) { 931 if (!np) {
932 dev_err(&op->dev, "could not find DMA node\n"); 932 dev_err(&op->dev, "could not find DMA node\n");
933 goto nodev; 933 goto nodev;
@@ -959,7 +959,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
959 of_node_put(np); /* Finished with the DMA node; drop the reference */ 959 of_node_put(np); /* Finished with the DMA node; drop the reference */
960 960
961 /* Retrieve the MAC address */ 961 /* Retrieve the MAC address */
962 addr = of_get_property(op->node, "local-mac-address", &size); 962 addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
963 if ((!addr) || (size != 6)) { 963 if ((!addr) || (size != 6)) {
964 dev_err(&op->dev, "could not find MAC address\n"); 964 dev_err(&op->dev, "could not find MAC address\n");
965 rc = -ENODEV; 965 rc = -ENODEV;
@@ -967,11 +967,11 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
967 } 967 }
968 temac_set_mac_address(ndev, (void *)addr); 968 temac_set_mac_address(ndev, (void *)addr);
969 969
970 rc = temac_mdio_setup(lp, op->node); 970 rc = temac_mdio_setup(lp, op->dev.of_node);
971 if (rc) 971 if (rc)
972 dev_warn(&op->dev, "error registering MDIO bus\n"); 972 dev_warn(&op->dev, "error registering MDIO bus\n");
973 973
974 lp->phy_node = of_parse_phandle(op->node, "phy-handle", 0); 974 lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
975 if (lp->phy_node) 975 if (lp->phy_node)
976 dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np); 976 dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np);
977 977
@@ -1024,12 +1024,12 @@ static struct of_device_id temac_of_match[] __devinitdata = {
1024MODULE_DEVICE_TABLE(of, temac_of_match); 1024MODULE_DEVICE_TABLE(of, temac_of_match);
1025 1025
1026static struct of_platform_driver temac_of_driver = { 1026static struct of_platform_driver temac_of_driver = {
1027 .match_table = temac_of_match,
1028 .probe = temac_of_probe, 1027 .probe = temac_of_probe,
1029 .remove = __devexit_p(temac_of_remove), 1028 .remove = __devexit_p(temac_of_remove),
1030 .driver = { 1029 .driver = {
1031 .owner = THIS_MODULE, 1030 .owner = THIS_MODULE,
1032 .name = "xilinx_temac", 1031 .name = "xilinx_temac",
1032 .of_match_table = temac_of_match,
1033 }, 1033 },
1034}; 1034};
1035 1035
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 4e238afab4a3..87e8d4cb4057 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -634,11 +634,18 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
634 634
635 err = register_netdevice(dev); 635 err = register_netdevice(dev);
636 if (err < 0) 636 if (err < 0)
637 return err; 637 goto destroy_port;
638 638
639 list_add_tail(&vlan->list, &port->vlans); 639 list_add_tail(&vlan->list, &port->vlans);
640 netif_stacked_transfer_operstate(lowerdev, dev); 640 netif_stacked_transfer_operstate(lowerdev, dev);
641
641 return 0; 642 return 0;
643
644destroy_port:
645 if (list_empty(&port->vlans))
646 macvlan_port_destroy(lowerdev);
647
648 return err;
642} 649}
643EXPORT_SYMBOL_GPL(macvlan_common_newlink); 650EXPORT_SYMBOL_GPL(macvlan_common_newlink);
644 651
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index 57288ca1395f..b07e4dee80aa 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -163,28 +163,30 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
163 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], 163 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
164 cur_order, gfp_mask); 164 cur_order, gfp_mask);
165 165
166 if (!ret) { 166 if (ret) {
167 ++chunk->npages; 167 if (--cur_order < 0)
168 168 goto fail;
169 if (coherent) 169 else
170 ++chunk->nsg; 170 continue;
171 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { 171 }
172 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
173 chunk->npages,
174 PCI_DMA_BIDIRECTIONAL);
175 172
176 if (chunk->nsg <= 0) 173 ++chunk->npages;
177 goto fail;
178 174
179 chunk = NULL; 175 if (coherent)
180 } 176 ++chunk->nsg;
177 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
178 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
179 chunk->npages,
180 PCI_DMA_BIDIRECTIONAL);
181 181
182 npages -= 1 << cur_order; 182 if (chunk->nsg <= 0)
183 } else {
184 --cur_order;
185 if (cur_order < 0)
186 goto fail; 183 goto fail;
187 } 184 }
185
186 if (chunk->npages == MLX4_ICM_CHUNK_LEN)
187 chunk = NULL;
188
189 npages -= 1 << cur_order;
188 } 190 }
189 191
190 if (!coherent && chunk) { 192 if (!coherent && chunk) {
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index 3898108f98ce..1a57c3da1f49 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -928,7 +928,7 @@ static const struct net_device_ops myri_ops = {
928 928
929static int __devinit myri_sbus_probe(struct of_device *op, const struct of_device_id *match) 929static int __devinit myri_sbus_probe(struct of_device *op, const struct of_device_id *match)
930{ 930{
931 struct device_node *dp = op->node; 931 struct device_node *dp = op->dev.of_node;
932 static unsigned version_printed; 932 static unsigned version_printed;
933 struct net_device *dev; 933 struct net_device *dev;
934 struct myri_eth *mp; 934 struct myri_eth *mp;
@@ -1161,8 +1161,11 @@ static const struct of_device_id myri_sbus_match[] = {
1161MODULE_DEVICE_TABLE(of, myri_sbus_match); 1161MODULE_DEVICE_TABLE(of, myri_sbus_match);
1162 1162
1163static struct of_platform_driver myri_sbus_driver = { 1163static struct of_platform_driver myri_sbus_driver = {
1164 .name = "myri", 1164 .driver = {
1165 .match_table = myri_sbus_match, 1165 .name = "myri",
1166 .owner = THIS_MODULE,
1167 .of_match_table = myri_sbus_match,
1168 },
1166 .probe = myri_sbus_probe, 1169 .probe = myri_sbus_probe,
1167 .remove = __devexit_p(myri_sbus_remove), 1170 .remove = __devexit_p(myri_sbus_remove),
1168}; 1171};
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 30abb4e436f1..63e8e3893bd6 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -9115,7 +9115,7 @@ static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
9115 const u32 *int_prop; 9115 const u32 *int_prop;
9116 int i; 9116 int i;
9117 9117
9118 int_prop = of_get_property(op->node, "interrupts", NULL); 9118 int_prop = of_get_property(op->dev.of_node, "interrupts", NULL);
9119 if (!int_prop) 9119 if (!int_prop)
9120 return -ENODEV; 9120 return -ENODEV;
9121 9121
@@ -9266,7 +9266,7 @@ static int __devinit niu_get_of_props(struct niu *np)
9266 int prop_len; 9266 int prop_len;
9267 9267
9268 if (np->parent->plat_type == PLAT_TYPE_NIU) 9268 if (np->parent->plat_type == PLAT_TYPE_NIU)
9269 dp = np->op->node; 9269 dp = np->op->dev.of_node;
9270 else 9270 else
9271 dp = pci_device_to_OF_node(np->pdev); 9271 dp = pci_device_to_OF_node(np->pdev);
9272 9272
@@ -10083,10 +10083,10 @@ static int __devinit niu_of_probe(struct of_device *op,
10083 10083
10084 niu_driver_version(); 10084 niu_driver_version();
10085 10085
10086 reg = of_get_property(op->node, "reg", NULL); 10086 reg = of_get_property(op->dev.of_node, "reg", NULL);
10087 if (!reg) { 10087 if (!reg) {
10088 dev_err(&op->dev, "%s: No 'reg' property, aborting\n", 10088 dev_err(&op->dev, "%s: No 'reg' property, aborting\n",
10089 op->node->full_name); 10089 op->dev.of_node->full_name);
10090 return -ENODEV; 10090 return -ENODEV;
10091 } 10091 }
10092 10092
@@ -10099,7 +10099,7 @@ static int __devinit niu_of_probe(struct of_device *op,
10099 np = netdev_priv(dev); 10099 np = netdev_priv(dev);
10100 10100
10101 memset(&parent_id, 0, sizeof(parent_id)); 10101 memset(&parent_id, 0, sizeof(parent_id));
10102 parent_id.of = of_get_parent(op->node); 10102 parent_id.of = of_get_parent(op->dev.of_node);
10103 10103
10104 np->parent = niu_get_parent(np, &parent_id, 10104 np->parent = niu_get_parent(np, &parent_id,
10105 PLAT_TYPE_NIU); 10105 PLAT_TYPE_NIU);
@@ -10234,8 +10234,11 @@ static const struct of_device_id niu_match[] = {
10234MODULE_DEVICE_TABLE(of, niu_match); 10234MODULE_DEVICE_TABLE(of, niu_match);
10235 10235
10236static struct of_platform_driver niu_of_driver = { 10236static struct of_platform_driver niu_of_driver = {
10237 .name = "niu", 10237 .driver = {
10238 .match_table = niu_match, 10238 .name = "niu",
10239 .owner = THIS_MODULE,
10240 .of_match_table = niu_match,
10241 },
10239 .probe = niu_of_probe, 10242 .probe = niu_of_probe,
10240 .remove = __devexit_p(niu_of_remove), 10243 .remove = __devexit_p(niu_of_remove),
10241}; 10244};
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 35897134a5dd..fc5fef2a8175 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -199,12 +199,12 @@ static int __devinit mdio_ofgpio_probe(struct of_device *ofdev,
199 if (!pdata) 199 if (!pdata)
200 return -ENOMEM; 200 return -ENOMEM;
201 201
202 ret = of_get_gpio(ofdev->node, 0); 202 ret = of_get_gpio(ofdev->dev.of_node, 0);
203 if (ret < 0) 203 if (ret < 0)
204 goto out_free; 204 goto out_free;
205 pdata->mdc = ret; 205 pdata->mdc = ret;
206 206
207 ret = of_get_gpio(ofdev->node, 1); 207 ret = of_get_gpio(ofdev->dev.of_node, 1);
208 if (ret < 0) 208 if (ret < 0)
209 goto out_free; 209 goto out_free;
210 pdata->mdio = ret; 210 pdata->mdio = ret;
@@ -213,7 +213,7 @@ static int __devinit mdio_ofgpio_probe(struct of_device *ofdev,
213 if (!new_bus) 213 if (!new_bus)
214 goto out_free; 214 goto out_free;
215 215
216 ret = of_mdiobus_register(new_bus, ofdev->node); 216 ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
217 if (ret) 217 if (ret)
218 mdio_gpio_bus_deinit(&ofdev->dev); 218 mdio_gpio_bus_deinit(&ofdev->dev);
219 219
@@ -241,8 +241,11 @@ static struct of_device_id mdio_ofgpio_match[] = {
241MODULE_DEVICE_TABLE(of, mdio_ofgpio_match); 241MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
242 242
243static struct of_platform_driver mdio_ofgpio_driver = { 243static struct of_platform_driver mdio_ofgpio_driver = {
244 .name = "mdio-gpio", 244 .driver = {
245 .match_table = mdio_ofgpio_match, 245 .name = "mdio-gpio",
246 .owner = THIS_MODULE,
247 .of_match_table = mdio_ofgpio_match,
248 },
246 .probe = mdio_ofgpio_probe, 249 .probe = mdio_ofgpio_probe,
247 .remove = __devexit_p(mdio_ofgpio_remove), 250 .remove = __devexit_p(mdio_ofgpio_remove),
248}; 251};
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 5441688daba7..c5f8eb102bf7 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -2926,5 +2926,5 @@ EXPORT_SYMBOL(ppp_output_wakeup);
2926EXPORT_SYMBOL(ppp_register_compressor); 2926EXPORT_SYMBOL(ppp_register_compressor);
2927EXPORT_SYMBOL(ppp_unregister_compressor); 2927EXPORT_SYMBOL(ppp_unregister_compressor);
2928MODULE_LICENSE("GPL"); 2928MODULE_LICENSE("GPL");
2929MODULE_ALIAS_CHARDEV_MAJOR(PPP_MAJOR); 2929MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
2930MODULE_ALIAS("/dev/ppp"); 2930MODULE_ALIAS("devname:ppp");
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index b1b93ff2351f..805b64d1e893 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -289,6 +289,7 @@ static void pppoe_flush_dev(struct net_device *dev)
289 struct pppoe_net *pn; 289 struct pppoe_net *pn;
290 int i; 290 int i;
291 291
292 pn = pppoe_pernet(dev_net(dev));
292 write_lock_bh(&pn->hash_lock); 293 write_lock_bh(&pn->hash_lock);
293 for (i = 0; i < PPPOE_HASH_SIZE; i++) { 294 for (i = 0; i < PPPOE_HASH_SIZE; i++) {
294 struct pppox_sock *po = pn->hash_table[i]; 295 struct pppox_sock *po = pn->hash_table[i];
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 586ed0915a29..501a55ffce57 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -1294,6 +1294,9 @@ static int sh_mdio_release(struct net_device *ndev)
1294 /* remove mdio bus info from net_device */ 1294 /* remove mdio bus info from net_device */
1295 dev_set_drvdata(&ndev->dev, NULL); 1295 dev_set_drvdata(&ndev->dev, NULL);
1296 1296
1297 /* free interrupts memory */
1298 kfree(bus->irq);
1299
1297 /* free bitbang info */ 1300 /* free bitbang info */
1298 free_mdio_bitbang(bus); 1301 free_mdio_bitbang(bus);
1299 1302
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 4591fe9bf0b9..367e96f317d4 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -1131,8 +1131,8 @@ static int __devinit bigmac_ether_init(struct of_device *op,
1131 goto fail_and_cleanup; 1131 goto fail_and_cleanup;
1132 1132
1133 /* Get supported SBUS burst sizes. */ 1133 /* Get supported SBUS burst sizes. */
1134 bsizes = of_getintprop_default(qec_op->node, "burst-sizes", 0xff); 1134 bsizes = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff);
1135 bsizes_more = of_getintprop_default(qec_op->node, "burst-sizes", 0xff); 1135 bsizes_more = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff);
1136 1136
1137 bsizes &= 0xff; 1137 bsizes &= 0xff;
1138 if (bsizes_more != 0xff) 1138 if (bsizes_more != 0xff)
@@ -1184,7 +1184,7 @@ static int __devinit bigmac_ether_init(struct of_device *op,
1184 } 1184 }
1185 1185
1186 /* Get the board revision of this BigMAC. */ 1186 /* Get the board revision of this BigMAC. */
1187 bp->board_rev = of_getintprop_default(bp->bigmac_op->node, 1187 bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node,
1188 "board-version", 1); 1188 "board-version", 1);
1189 1189
1190 /* Init auto-negotiation timer state. */ 1190 /* Init auto-negotiation timer state. */
@@ -1290,8 +1290,11 @@ static const struct of_device_id bigmac_sbus_match[] = {
1290MODULE_DEVICE_TABLE(of, bigmac_sbus_match); 1290MODULE_DEVICE_TABLE(of, bigmac_sbus_match);
1291 1291
1292static struct of_platform_driver bigmac_sbus_driver = { 1292static struct of_platform_driver bigmac_sbus_driver = {
1293 .name = "sunbmac", 1293 .driver = {
1294 .match_table = bigmac_sbus_match, 1294 .name = "sunbmac",
1295 .owner = THIS_MODULE,
1296 .of_match_table = bigmac_sbus_match,
1297 },
1295 .probe = bigmac_sbus_probe, 1298 .probe = bigmac_sbus_probe,
1296 .remove = __devexit_p(bigmac_sbus_remove), 1299 .remove = __devexit_p(bigmac_sbus_remove),
1297}; 1300};
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 915c5909c7a8..3d9650b8d38f 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -2481,7 +2481,7 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
2481 else { 2481 else {
2482 const struct linux_prom_registers *regs; 2482 const struct linux_prom_registers *regs;
2483 struct of_device *op = hp->happy_dev; 2483 struct of_device *op = hp->happy_dev;
2484 regs = of_get_property(op->node, "regs", NULL); 2484 regs = of_get_property(op->dev.of_node, "regs", NULL);
2485 if (regs) 2485 if (regs)
2486 sprintf(info->bus_info, "SBUS:%d", 2486 sprintf(info->bus_info, "SBUS:%d",
2487 regs->which_io); 2487 regs->which_io);
@@ -2641,14 +2641,14 @@ static const struct net_device_ops hme_netdev_ops = {
2641#ifdef CONFIG_SBUS 2641#ifdef CONFIG_SBUS
2642static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe) 2642static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
2643{ 2643{
2644 struct device_node *dp = op->node, *sbus_dp; 2644 struct device_node *dp = op->dev.of_node, *sbus_dp;
2645 struct quattro *qp = NULL; 2645 struct quattro *qp = NULL;
2646 struct happy_meal *hp; 2646 struct happy_meal *hp;
2647 struct net_device *dev; 2647 struct net_device *dev;
2648 int i, qfe_slot = -1; 2648 int i, qfe_slot = -1;
2649 int err = -ENODEV; 2649 int err = -ENODEV;
2650 2650
2651 sbus_dp = to_of_device(op->dev.parent)->node; 2651 sbus_dp = to_of_device(op->dev.parent)->dev.of_node;
2652 2652
2653 /* We can match PCI devices too, do not accept those here. */ 2653 /* We can match PCI devices too, do not accept those here. */
2654 if (strcmp(sbus_dp->name, "sbus")) 2654 if (strcmp(sbus_dp->name, "sbus"))
@@ -3237,7 +3237,7 @@ static void happy_meal_pci_exit(void)
3237#ifdef CONFIG_SBUS 3237#ifdef CONFIG_SBUS
3238static int __devinit hme_sbus_probe(struct of_device *op, const struct of_device_id *match) 3238static int __devinit hme_sbus_probe(struct of_device *op, const struct of_device_id *match)
3239{ 3239{
3240 struct device_node *dp = op->node; 3240 struct device_node *dp = op->dev.of_node;
3241 const char *model = of_get_property(dp, "model", NULL); 3241 const char *model = of_get_property(dp, "model", NULL);
3242 int is_qfe = (match->data != NULL); 3242 int is_qfe = (match->data != NULL);
3243 3243
@@ -3291,8 +3291,11 @@ static const struct of_device_id hme_sbus_match[] = {
3291MODULE_DEVICE_TABLE(of, hme_sbus_match); 3291MODULE_DEVICE_TABLE(of, hme_sbus_match);
3292 3292
3293static struct of_platform_driver hme_sbus_driver = { 3293static struct of_platform_driver hme_sbus_driver = {
3294 .name = "hme", 3294 .driver = {
3295 .match_table = hme_sbus_match, 3295 .name = "hme",
3296 .owner = THIS_MODULE,
3297 .of_match_table = hme_sbus_match,
3298 },
3296 .probe = hme_sbus_probe, 3299 .probe = hme_sbus_probe,
3297 .remove = __devexit_p(hme_sbus_remove), 3300 .remove = __devexit_p(hme_sbus_remove),
3298}; 3301};
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 386af7bbe678..7d9c33dd9d1a 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1323,7 +1323,7 @@ static int __devinit sparc_lance_probe_one(struct of_device *op,
1323 struct of_device *ledma, 1323 struct of_device *ledma,
1324 struct of_device *lebuffer) 1324 struct of_device *lebuffer)
1325{ 1325{
1326 struct device_node *dp = op->node; 1326 struct device_node *dp = op->dev.of_node;
1327 static unsigned version_printed; 1327 static unsigned version_printed;
1328 struct lance_private *lp; 1328 struct lance_private *lp;
1329 struct net_device *dev; 1329 struct net_device *dev;
@@ -1410,7 +1410,7 @@ static int __devinit sparc_lance_probe_one(struct of_device *op,
1410 1410
1411 lp->burst_sizes = 0; 1411 lp->burst_sizes = 0;
1412 if (lp->ledma) { 1412 if (lp->ledma) {
1413 struct device_node *ledma_dp = ledma->node; 1413 struct device_node *ledma_dp = ledma->dev.of_node;
1414 struct device_node *sbus_dp; 1414 struct device_node *sbus_dp;
1415 unsigned int sbmask; 1415 unsigned int sbmask;
1416 const char *prop; 1416 const char *prop;
@@ -1506,7 +1506,7 @@ fail:
1506static int __devinit sunlance_sbus_probe(struct of_device *op, const struct of_device_id *match) 1506static int __devinit sunlance_sbus_probe(struct of_device *op, const struct of_device_id *match)
1507{ 1507{
1508 struct of_device *parent = to_of_device(op->dev.parent); 1508 struct of_device *parent = to_of_device(op->dev.parent);
1509 struct device_node *parent_dp = parent->node; 1509 struct device_node *parent_dp = parent->dev.of_node;
1510 int err; 1510 int err;
1511 1511
1512 if (!strcmp(parent_dp->name, "ledma")) { 1512 if (!strcmp(parent_dp->name, "ledma")) {
@@ -1545,8 +1545,11 @@ static const struct of_device_id sunlance_sbus_match[] = {
1545MODULE_DEVICE_TABLE(of, sunlance_sbus_match); 1545MODULE_DEVICE_TABLE(of, sunlance_sbus_match);
1546 1546
1547static struct of_platform_driver sunlance_sbus_driver = { 1547static struct of_platform_driver sunlance_sbus_driver = {
1548 .name = "sunlance", 1548 .driver = {
1549 .match_table = sunlance_sbus_match, 1549 .name = "sunlance",
1550 .owner = THIS_MODULE,
1551 .of_match_table = sunlance_sbus_match,
1552 },
1550 .probe = sunlance_sbus_probe, 1553 .probe = sunlance_sbus_probe,
1551 .remove = __devexit_p(sunlance_sbus_remove), 1554 .remove = __devexit_p(sunlance_sbus_remove),
1552}; 1555};
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index a7542d25c845..72b579c8d812 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -695,7 +695,7 @@ static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
695 strcpy(info->version, "3.0"); 695 strcpy(info->version, "3.0");
696 696
697 op = qep->op; 697 op = qep->op;
698 regs = of_get_property(op->node, "reg", NULL); 698 regs = of_get_property(op->dev.of_node, "reg", NULL);
699 if (regs) 699 if (regs)
700 sprintf(info->bus_info, "SBUS:%d", regs->which_io); 700 sprintf(info->bus_info, "SBUS:%d", regs->which_io);
701 701
@@ -799,7 +799,7 @@ static struct sunqec * __devinit get_qec(struct of_device *child)
799 if (qec_global_reset(qecp->gregs)) 799 if (qec_global_reset(qecp->gregs))
800 goto fail; 800 goto fail;
801 801
802 qecp->qec_bursts = qec_get_burst(op->node); 802 qecp->qec_bursts = qec_get_burst(op->dev.of_node);
803 803
804 qec_init_once(qecp, op); 804 qec_init_once(qecp, op);
805 805
@@ -857,7 +857,7 @@ static int __devinit qec_ether_init(struct of_device *op)
857 857
858 res = -ENODEV; 858 res = -ENODEV;
859 859
860 i = of_getintprop_default(op->node, "channel#", -1); 860 i = of_getintprop_default(op->dev.of_node, "channel#", -1);
861 if (i == -1) 861 if (i == -1)
862 goto fail; 862 goto fail;
863 qe->channel = i; 863 qe->channel = i;
@@ -977,8 +977,11 @@ static const struct of_device_id qec_sbus_match[] = {
977MODULE_DEVICE_TABLE(of, qec_sbus_match); 977MODULE_DEVICE_TABLE(of, qec_sbus_match);
978 978
979static struct of_platform_driver qec_sbus_driver = { 979static struct of_platform_driver qec_sbus_driver = {
980 .name = "qec", 980 .driver = {
981 .match_table = qec_sbus_match, 981 .name = "qec",
982 .owner = THIS_MODULE,
983 .of_match_table = qec_sbus_match,
984 },
982 .probe = qec_sbus_probe, 985 .probe = qec_sbus_probe,
983 .remove = __devexit_p(qec_sbus_remove), 986 .remove = __devexit_p(qec_sbus_remove),
984}; 987};
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 97b25533e5fb..6ad6fe706312 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -526,6 +526,8 @@ static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
526 struct sk_buff *skb; 526 struct sk_buff *skb;
527 int err; 527 int err;
528 528
529 sock_update_classid(sk);
530
529 /* Under a page? Don't bother with paged skb. */ 531 /* Under a page? Don't bother with paged skb. */
530 if (prepad + len < PAGE_SIZE || !linear) 532 if (prepad + len < PAGE_SIZE || !linear)
531 linear = len; 533 linear = len;
@@ -1649,3 +1651,4 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
1649MODULE_AUTHOR(DRV_COPYRIGHT); 1651MODULE_AUTHOR(DRV_COPYRIGHT);
1650MODULE_LICENSE("GPL"); 1652MODULE_LICENSE("GPL");
1651MODULE_ALIAS_MISCDEV(TUN_MINOR); 1653MODULE_ALIAS_MISCDEV(TUN_MINOR);
1654MODULE_ALIAS("devname:net/tun");
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 932602db54b3..4a34833b85dd 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3719,7 +3719,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
3719static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match) 3719static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match)
3720{ 3720{
3721 struct device *device = &ofdev->dev; 3721 struct device *device = &ofdev->dev;
3722 struct device_node *np = ofdev->node; 3722 struct device_node *np = ofdev->dev.of_node;
3723 struct net_device *dev = NULL; 3723 struct net_device *dev = NULL;
3724 struct ucc_geth_private *ugeth = NULL; 3724 struct ucc_geth_private *ugeth = NULL;
3725 struct ucc_geth_info *ug_info; 3725 struct ucc_geth_info *ug_info;
@@ -3963,8 +3963,11 @@ static struct of_device_id ucc_geth_match[] = {
3963MODULE_DEVICE_TABLE(of, ucc_geth_match); 3963MODULE_DEVICE_TABLE(of, ucc_geth_match);
3964 3964
3965static struct of_platform_driver ucc_geth_driver = { 3965static struct of_platform_driver ucc_geth_driver = {
3966 .name = DRV_NAME, 3966 .driver = {
3967 .match_table = ucc_geth_match, 3967 .name = DRV_NAME,
3968 .owner = THIS_MODULE,
3969 .of_match_table = ucc_geth_match,
3970 },
3968 .probe = ucc_geth_probe, 3971 .probe = ucc_geth_probe,
3969 .remove = ucc_geth_remove, 3972 .remove = ucc_geth_remove,
3970 .suspend = ucc_geth_suspend, 3973 .suspend = ucc_geth_suspend,
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 31b73310ec77..1f802e90474c 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -322,7 +322,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
322 size = (u16) (header & 0x0000ffff); 322 size = (u16) (header & 0x0000ffff);
323 323
324 if ((skb->len) - ((size + 1) & 0xfffe) == 0) { 324 if ((skb->len) - ((size + 1) & 0xfffe) == 0) {
325 u8 alignment = (u32)skb->data & 0x3; 325 u8 alignment = (unsigned long)skb->data & 0x3;
326 if (alignment != 0x2) { 326 if (alignment != 0x2) {
327 /* 327 /*
328 * not 16bit aligned so use the room provided by 328 * not 16bit aligned so use the room provided by
@@ -351,7 +351,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
351 } 351 }
352 ax_skb = skb_clone(skb, GFP_ATOMIC); 352 ax_skb = skb_clone(skb, GFP_ATOMIC);
353 if (ax_skb) { 353 if (ax_skb) {
354 u8 alignment = (u32)packet & 0x3; 354 u8 alignment = (unsigned long)packet & 0x3;
355 ax_skb->len = size; 355 ax_skb->len = size;
356 356
357 if (alignment != 0x2) { 357 if (alignment != 0x2) {
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 9964df199511..0a3c41faea9c 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -475,6 +475,9 @@ static const struct usb_device_id hso_ids[] = {
475 {USB_DEVICE(0x0af0, 0x8302)}, 475 {USB_DEVICE(0x0af0, 0x8302)},
476 {USB_DEVICE(0x0af0, 0x8304)}, 476 {USB_DEVICE(0x0af0, 0x8304)},
477 {USB_DEVICE(0x0af0, 0x8400)}, 477 {USB_DEVICE(0x0af0, 0x8400)},
478 {USB_DEVICE(0x0af0, 0x8600)},
479 {USB_DEVICE(0x0af0, 0x8800)},
480 {USB_DEVICE(0x0af0, 0x8900)},
478 {USB_DEVICE(0x0af0, 0xd035)}, 481 {USB_DEVICE(0x0af0, 0xd035)},
479 {USB_DEVICE(0x0af0, 0xd055)}, 482 {USB_DEVICE(0x0af0, 0xd055)},
480 {USB_DEVICE(0x0af0, 0xd155)}, 483 {USB_DEVICE(0x0af0, 0xd155)},
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 6537593fae66..8cc9e319f435 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -1027,12 +1027,12 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
1027 ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN; 1027 ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN;
1028 1028
1029 spin_lock_irqsave(&i2400m->rx_lock, flags); 1029 spin_lock_irqsave(&i2400m->rx_lock, flags);
1030 roq = &i2400m->rx_roq[ro_cin]; 1030 if (i2400m->rx_roq == NULL) {
1031 if (roq == NULL) {
1032 kfree_skb(skb); /* rx_roq is already destroyed */ 1031 kfree_skb(skb); /* rx_roq is already destroyed */
1033 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 1032 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1034 goto error; 1033 goto error;
1035 } 1034 }
1035 roq = &i2400m->rx_roq[ro_cin];
1036 kref_get(&i2400m->rx_roq_refcount); 1036 kref_get(&i2400m->rx_roq_refcount);
1037 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 1037 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1038 1038
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index a441aad922c2..3b7ab20a5c54 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -5162,13 +5162,6 @@ static void proc_SSID_on_close(struct inode *inode, struct file *file)
5162 enable_MAC(ai, 1); 5162 enable_MAC(ai, 1);
5163} 5163}
5164 5164
5165static inline u8 hexVal(char c) {
5166 if (c>='0' && c<='9') return c -= '0';
5167 if (c>='a' && c<='f') return c -= 'a'-10;
5168 if (c>='A' && c<='F') return c -= 'A'-10;
5169 return 0;
5170}
5171
5172static void proc_APList_on_close( struct inode *inode, struct file *file ) { 5165static void proc_APList_on_close( struct inode *inode, struct file *file ) {
5173 struct proc_data *data = (struct proc_data *)file->private_data; 5166 struct proc_data *data = (struct proc_data *)file->private_data;
5174 struct proc_dir_entry *dp = PDE(inode); 5167 struct proc_dir_entry *dp = PDE(inode);
@@ -5188,11 +5181,11 @@ static void proc_APList_on_close( struct inode *inode, struct file *file ) {
5188 switch(j%3) { 5181 switch(j%3) {
5189 case 0: 5182 case 0:
5190 APList_rid.ap[i][j/3]= 5183 APList_rid.ap[i][j/3]=
5191 hexVal(data->wbuffer[j+i*6*3])<<4; 5184 hex_to_bin(data->wbuffer[j+i*6*3])<<4;
5192 break; 5185 break;
5193 case 1: 5186 case 1:
5194 APList_rid.ap[i][j/3]|= 5187 APList_rid.ap[i][j/3]|=
5195 hexVal(data->wbuffer[j+i*6*3]); 5188 hex_to_bin(data->wbuffer[j+i*6*3]);
5196 break; 5189 break;
5197 } 5190 }
5198 } 5191 }
@@ -5340,10 +5333,10 @@ static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
5340 for( i = 0; i < 16*3 && data->wbuffer[i+j]; i++ ) { 5333 for( i = 0; i < 16*3 && data->wbuffer[i+j]; i++ ) {
5341 switch(i%3) { 5334 switch(i%3) {
5342 case 0: 5335 case 0:
5343 key[i/3] = hexVal(data->wbuffer[i+j])<<4; 5336 key[i/3] = hex_to_bin(data->wbuffer[i+j])<<4;
5344 break; 5337 break;
5345 case 1: 5338 case 1:
5346 key[i/3] |= hexVal(data->wbuffer[i+j]); 5339 key[i/3] |= hex_to_bin(data->wbuffer[i+j]);
5347 break; 5340 break;
5348 } 5341 }
5349 } 5342 }
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 5f04cf38a5bc..cc6d41dec332 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1214,6 +1214,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1214 struct ath5k_hw *ah = sc->ah; 1214 struct ath5k_hw *ah = sc->ah;
1215 struct sk_buff *skb = bf->skb; 1215 struct sk_buff *skb = bf->skb;
1216 struct ath5k_desc *ds; 1216 struct ath5k_desc *ds;
1217 int ret;
1217 1218
1218 if (!skb) { 1219 if (!skb) {
1219 skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr); 1220 skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
@@ -1240,9 +1241,9 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1240 ds = bf->desc; 1241 ds = bf->desc;
1241 ds->ds_link = bf->daddr; /* link to self */ 1242 ds->ds_link = bf->daddr; /* link to self */
1242 ds->ds_data = bf->skbaddr; 1243 ds->ds_data = bf->skbaddr;
1243 ah->ah_setup_rx_desc(ah, ds, 1244 ret = ah->ah_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
1244 skb_tailroom(skb), /* buffer size */ 1245 if (ret)
1245 0); 1246 return ret;
1246 1247
1247 if (sc->rxlink != NULL) 1248 if (sc->rxlink != NULL)
1248 *sc->rxlink = bf->daddr; 1249 *sc->rxlink = bf->daddr;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index c8a4558f79ba..f43d85a302c4 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -76,22 +76,13 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
76 ds = bf->bf_desc; 76 ds = bf->bf_desc;
77 flags = ATH9K_TXDESC_NOACK; 77 flags = ATH9K_TXDESC_NOACK;
78 78
79 if (((sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || 79 ds->ds_link = 0;
80 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) && 80 /*
81 (ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) { 81 * Switch antenna every beacon.
82 ds->ds_link = bf->bf_daddr; /* self-linked */ 82 * Should only switch every beacon period, not for every SWBA
83 flags |= ATH9K_TXDESC_VEOL; 83 * XXX assumes two antennae
84 /* Let hardware handle antenna switching. */ 84 */
85 antenna = 0; 85 antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1);
86 } else {
87 ds->ds_link = 0;
88 /*
89 * Switch antenna every beacon.
90 * Should only switch every beacon period, not for every SWBA
91 * XXX assumes two antennae
92 */
93 antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1);
94 }
95 86
96 sband = &sc->sbands[common->hw->conf.channel->band]; 87 sband = &sc->sbands[common->hw->conf.channel->band];
97 rate = sband->bitrates[rateidx].hw_value; 88 rate = sband->bitrates[rateidx].hw_value;
@@ -215,36 +206,6 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
215 return bf; 206 return bf;
216} 207}
217 208
218/*
219 * Startup beacon transmission for adhoc mode when they are sent entirely
220 * by the hardware using the self-linked descriptor + veol trick.
221*/
222static void ath_beacon_start_adhoc(struct ath_softc *sc,
223 struct ieee80211_vif *vif)
224{
225 struct ath_hw *ah = sc->sc_ah;
226 struct ath_common *common = ath9k_hw_common(ah);
227 struct ath_buf *bf;
228 struct ath_vif *avp;
229 struct sk_buff *skb;
230
231 avp = (void *)vif->drv_priv;
232
233 if (avp->av_bcbuf == NULL)
234 return;
235
236 bf = avp->av_bcbuf;
237 skb = bf->bf_mpdu;
238
239 ath_beacon_setup(sc, avp, bf, 0);
240
241 /* NB: caller is known to have already stopped tx dma */
242 ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr);
243 ath9k_hw_txstart(ah, sc->beacon.beaconq);
244 ath_print(common, ATH_DBG_BEACON, "TXDP%u = %llx (%p)\n",
245 sc->beacon.beaconq, ito64(bf->bf_daddr), bf->bf_desc);
246}
247
248int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) 209int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
249{ 210{
250 struct ath_softc *sc = aphy->sc; 211 struct ath_softc *sc = aphy->sc;
@@ -265,7 +226,8 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
265 list_del(&avp->av_bcbuf->list); 226 list_del(&avp->av_bcbuf->list);
266 227
267 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP || 228 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP ||
268 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) { 229 sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC ||
230 sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) {
269 int slot; 231 int slot;
270 /* 232 /*
271 * Assign the vif to a beacon xmit slot. As 233 * Assign the vif to a beacon xmit slot. As
@@ -274,17 +236,11 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
274 avp->av_bslot = 0; 236 avp->av_bslot = 0;
275 for (slot = 0; slot < ATH_BCBUF; slot++) 237 for (slot = 0; slot < ATH_BCBUF; slot++)
276 if (sc->beacon.bslot[slot] == NULL) { 238 if (sc->beacon.bslot[slot] == NULL) {
277 /*
278 * XXX hack, space out slots to better
279 * deal with misses
280 */
281 if (slot+1 < ATH_BCBUF &&
282 sc->beacon.bslot[slot+1] == NULL) {
283 avp->av_bslot = slot+1;
284 break;
285 }
286 avp->av_bslot = slot; 239 avp->av_bslot = slot;
240
287 /* NB: keep looking for a double slot */ 241 /* NB: keep looking for a double slot */
242 if (slot == 0 || !sc->beacon.bslot[slot-1])
243 break;
288 } 244 }
289 BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL); 245 BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL);
290 sc->beacon.bslot[avp->av_bslot] = vif; 246 sc->beacon.bslot[avp->av_bslot] = vif;
@@ -721,8 +677,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
721 * self-linked tx descriptor and let the hardware deal with things. 677 * self-linked tx descriptor and let the hardware deal with things.
722 */ 678 */
723 intval |= ATH9K_BEACON_ENA; 679 intval |= ATH9K_BEACON_ENA;
724 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) 680 ah->imask |= ATH9K_INT_SWBA;
725 ah->imask |= ATH9K_INT_SWBA;
726 681
727 ath_beaconq_config(sc); 682 ath_beaconq_config(sc);
728 683
@@ -732,10 +687,6 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
732 ath9k_beacon_init(sc, nexttbtt, intval); 687 ath9k_beacon_init(sc, nexttbtt, intval);
733 sc->beacon.bmisscnt = 0; 688 sc->beacon.bmisscnt = 0;
734 ath9k_hw_set_interrupts(ah, ah->imask); 689 ath9k_hw_set_interrupts(ah, ah->imask);
735
736 /* FIXME: Handle properly when vif is NULL */
737 if (vif && ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)
738 ath_beacon_start_adhoc(sc, vif);
739} 690}
740 691
741void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif) 692void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 46dc41a16faa..77b359162d6c 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -107,12 +107,14 @@ static inline void ath9k_skb_queue_purge(struct hif_device_usb *hif_dev,
107static void hif_usb_tx_cb(struct urb *urb) 107static void hif_usb_tx_cb(struct urb *urb)
108{ 108{
109 struct tx_buf *tx_buf = (struct tx_buf *) urb->context; 109 struct tx_buf *tx_buf = (struct tx_buf *) urb->context;
110 struct hif_device_usb *hif_dev = tx_buf->hif_dev; 110 struct hif_device_usb *hif_dev;
111 struct sk_buff *skb; 111 struct sk_buff *skb;
112 112
113 if (!hif_dev || !tx_buf) 113 if (!tx_buf || !tx_buf->hif_dev)
114 return; 114 return;
115 115
116 hif_dev = tx_buf->hif_dev;
117
116 switch (urb->status) { 118 switch (urb->status) {
117 case 0: 119 case 0:
118 break; 120 break;
@@ -607,6 +609,10 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
607 609
608 return 0; 610 return 0;
609err: 611err:
612 if (tx_buf) {
613 kfree(tx_buf->buf);
614 kfree(tx_buf);
615 }
610 ath9k_hif_usb_dealloc_tx_urbs(hif_dev); 616 ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
611 return -ENOMEM; 617 return -ENOMEM;
612} 618}
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index ad556aa8da39..c251603ab032 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -23,6 +23,7 @@
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <linux/netdevice.h> 24#include <linux/netdevice.h>
25#include <linux/leds.h> 25#include <linux/leds.h>
26#include <linux/slab.h>
26#include <net/mac80211.h> 27#include <net/mac80211.h>
27 28
28#include "common.h" 29#include "common.h"
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 893b552981a0..abfa0493236f 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -752,7 +752,6 @@ static int ath_key_config(struct ath_common *common,
752 struct ath_hw *ah = common->ah; 752 struct ath_hw *ah = common->ah;
753 struct ath9k_keyval hk; 753 struct ath9k_keyval hk;
754 const u8 *mac = NULL; 754 const u8 *mac = NULL;
755 u8 gmac[ETH_ALEN];
756 int ret = 0; 755 int ret = 0;
757 int idx; 756 int idx;
758 757
@@ -776,30 +775,9 @@ static int ath_key_config(struct ath_common *common,
776 memcpy(hk.kv_val, key->key, key->keylen); 775 memcpy(hk.kv_val, key->key, key->keylen);
777 776
778 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { 777 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
779 778 /* For now, use the default keys for broadcast keys. This may
780 if (key->ap_addr) { 779 * need to change with virtual interfaces. */
781 /* 780 idx = key->keyidx;
782 * Group keys on hardware that supports multicast frame
783 * key search use a mac that is the sender's address with
784 * the high bit set instead of the app-specified address.
785 */
786 memcpy(gmac, key->ap_addr, ETH_ALEN);
787 gmac[0] |= 0x80;
788 mac = gmac;
789
790 if (key->alg == ALG_TKIP)
791 idx = ath_reserve_key_cache_slot_tkip(common);
792 else
793 idx = ath_reserve_key_cache_slot(common);
794 if (idx < 0)
795 mac = NULL; /* no free key cache entries */
796 }
797
798 if (!mac) {
799 /* For now, use the default keys for broadcast keys. This may
800 * need to change with virtual interfaces. */
801 idx = key->keyidx;
802 }
803 } else if (key->keyidx) { 781 } else if (key->keyidx) {
804 if (WARN_ON(!sta)) 782 if (WARN_ON(!sta))
805 return -EOPNOTSUPP; 783 return -EOPNOTSUPP;
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 257b10ba6f57..1ec836cf1c0d 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -28,7 +28,6 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
28 { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */ 28 { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
29 { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */ 29 { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
30 { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */ 30 { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
31 { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */
32 { 0 } 31 { 0 }
33}; 32};
34 33
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index ba139132c85f..ca6065b71b46 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -19,6 +19,12 @@
19 19
20#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 20#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
21 21
22static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
23{
24 return sc->ps_enabled &&
25 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
26}
27
22static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, 28static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
23 struct ieee80211_hdr *hdr) 29 struct ieee80211_hdr *hdr)
24{ 30{
@@ -616,8 +622,8 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
616 hdr = (struct ieee80211_hdr *)skb->data; 622 hdr = (struct ieee80211_hdr *)skb->data;
617 623
618 /* Process Beacon and CAB receive in PS state */ 624 /* Process Beacon and CAB receive in PS state */
619 if ((sc->ps_flags & PS_WAIT_FOR_BEACON) && 625 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
620 ieee80211_is_beacon(hdr->frame_control)) 626 && ieee80211_is_beacon(hdr->frame_control))
621 ath_rx_ps_beacon(sc, skb); 627 ath_rx_ps_beacon(sc, skb);
622 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 628 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
623 (ieee80211_is_data(hdr->frame_control) || 629 (ieee80211_is_data(hdr->frame_control) ||
@@ -932,9 +938,10 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
932 sc->rx.rxotherant = 0; 938 sc->rx.rxotherant = 0;
933 } 939 }
934 940
935 if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON | 941 if (unlikely(ath9k_check_auto_sleep(sc) ||
936 PS_WAIT_FOR_CAB | 942 (sc->ps_flags & (PS_WAIT_FOR_BEACON |
937 PS_WAIT_FOR_PSPOLL_DATA))) 943 PS_WAIT_FOR_CAB |
944 PS_WAIT_FOR_PSPOLL_DATA))))
938 ath_rx_ps(sc, skb); 945 ath_rx_ps(sc, skb);
939 946
940 ath_rx_send_to_mac80211(hw, sc, skb, rxs); 947 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
index a273e373b7b0..c92b2c0cbd91 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
@@ -30,6 +30,7 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/gfp.h>
33#include <net/mac80211.h> 34#include <net/mac80211.h>
34 35
35#include "iwl-dev.h" 36#include "iwl-dev.h"
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 107e173112f6..5d3f51ff2f0d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -376,6 +376,11 @@ void iwl_bg_start_internal_scan(struct work_struct *work)
376 376
377 mutex_lock(&priv->mutex); 377 mutex_lock(&priv->mutex);
378 378
379 if (priv->is_internal_short_scan == true) {
380 IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
381 goto unlock;
382 }
383
379 if (!iwl_is_ready_rf(priv)) { 384 if (!iwl_is_ready_rf(priv)) {
380 IWL_DEBUG_SCAN(priv, "not ready or exit pending\n"); 385 IWL_DEBUG_SCAN(priv, "not ready or exit pending\n");
381 goto unlock; 386 goto unlock;
@@ -497,17 +502,27 @@ void iwl_bg_scan_completed(struct work_struct *work)
497{ 502{
498 struct iwl_priv *priv = 503 struct iwl_priv *priv =
499 container_of(work, struct iwl_priv, scan_completed); 504 container_of(work, struct iwl_priv, scan_completed);
505 bool internal = false;
500 506
501 IWL_DEBUG_SCAN(priv, "SCAN complete scan\n"); 507 IWL_DEBUG_SCAN(priv, "SCAN complete scan\n");
502 508
503 cancel_delayed_work(&priv->scan_check); 509 cancel_delayed_work(&priv->scan_check);
504 510
505 if (!priv->is_internal_short_scan) 511 mutex_lock(&priv->mutex);
506 ieee80211_scan_completed(priv->hw, false); 512 if (priv->is_internal_short_scan) {
507 else {
508 priv->is_internal_short_scan = false; 513 priv->is_internal_short_scan = false;
509 IWL_DEBUG_SCAN(priv, "internal short scan completed\n"); 514 IWL_DEBUG_SCAN(priv, "internal short scan completed\n");
515 internal = true;
510 } 516 }
517 mutex_unlock(&priv->mutex);
518
519 /*
520 * Do not hold mutex here since this will cause mac80211 to call
521 * into driver again into functions that will attempt to take
522 * mutex.
523 */
524 if (!internal)
525 ieee80211_scan_completed(priv->hw, false);
511 526
512 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 527 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
513 return; 528 return;
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 85ed235ac901..83a26361a9b5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -431,7 +431,7 @@ int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs,
431 struct iwl_link_quality_cmd *link_cmd; 431 struct iwl_link_quality_cmd *link_cmd;
432 unsigned long flags; 432 unsigned long flags;
433 433
434 if (*sta_id_r) 434 if (sta_id_r)
435 *sta_id_r = IWL_INVALID_STATION; 435 *sta_id_r = IWL_INVALID_STATION;
436 436
437 ret = iwl_add_station_common(priv, addr, 0, NULL, &sta_id); 437 ret = iwl_add_station_common(priv, addr, 0, NULL, &sta_id);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 2d2890878dea..4bd61ee627c0 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2572,14 +2572,18 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
2572 2572
2573static void rndis_wlan_do_link_down_work(struct usbnet *usbdev) 2573static void rndis_wlan_do_link_down_work(struct usbnet *usbdev)
2574{ 2574{
2575 union iwreq_data evt; 2575 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2576 2576
2577 netif_carrier_off(usbdev->net); 2577 if (priv->connected) {
2578 priv->connected = false;
2579 memset(priv->bssid, 0, ETH_ALEN);
2580
2581 deauthenticate(usbdev);
2578 2582
2579 evt.data.flags = 0; 2583 cfg80211_disconnected(usbdev->net, 0, NULL, 0, GFP_KERNEL);
2580 evt.data.length = 0; 2584 }
2581 memset(evt.ap_addr.sa_data, 0, ETH_ALEN); 2585
2582 wireless_send_event(usbdev->net, SIOCGIWAP, &evt, NULL); 2586 netif_carrier_off(usbdev->net);
2583} 2587}
2584 2588
2585static void rndis_wlan_worker(struct work_struct *work) 2589static void rndis_wlan_worker(struct work_struct *work)
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 4ba7b038928f..ad2c98af7e9d 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -926,7 +926,7 @@ static void rt2400pci_disable_radio(struct rt2x00_dev *rt2x00dev)
926static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev, 926static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev,
927 enum dev_state state) 927 enum dev_state state)
928{ 928{
929 u32 reg; 929 u32 reg, reg2;
930 unsigned int i; 930 unsigned int i;
931 char put_to_sleep; 931 char put_to_sleep;
932 char bbp_state; 932 char bbp_state;
@@ -947,11 +947,12 @@ static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev,
947 * device has entered the correct state. 947 * device has entered the correct state.
948 */ 948 */
949 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 949 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
950 rt2x00pci_register_read(rt2x00dev, PWRCSR1, &reg); 950 rt2x00pci_register_read(rt2x00dev, PWRCSR1, &reg2);
951 bbp_state = rt2x00_get_field32(reg, PWRCSR1_BBP_CURR_STATE); 951 bbp_state = rt2x00_get_field32(reg2, PWRCSR1_BBP_CURR_STATE);
952 rf_state = rt2x00_get_field32(reg, PWRCSR1_RF_CURR_STATE); 952 rf_state = rt2x00_get_field32(reg2, PWRCSR1_RF_CURR_STATE);
953 if (bbp_state == state && rf_state == state) 953 if (bbp_state == state && rf_state == state)
954 return 0; 954 return 0;
955 rt2x00pci_register_write(rt2x00dev, PWRCSR1, reg);
955 msleep(10); 956 msleep(10);
956 } 957 }
957 958
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 89d132d4af12..41da3d218c65 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1084,7 +1084,7 @@ static void rt2500pci_disable_radio(struct rt2x00_dev *rt2x00dev)
1084static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev, 1084static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev,
1085 enum dev_state state) 1085 enum dev_state state)
1086{ 1086{
1087 u32 reg; 1087 u32 reg, reg2;
1088 unsigned int i; 1088 unsigned int i;
1089 char put_to_sleep; 1089 char put_to_sleep;
1090 char bbp_state; 1090 char bbp_state;
@@ -1105,11 +1105,12 @@ static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev,
1105 * device has entered the correct state. 1105 * device has entered the correct state.
1106 */ 1106 */
1107 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 1107 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1108 rt2x00pci_register_read(rt2x00dev, PWRCSR1, &reg); 1108 rt2x00pci_register_read(rt2x00dev, PWRCSR1, &reg2);
1109 bbp_state = rt2x00_get_field32(reg, PWRCSR1_BBP_CURR_STATE); 1109 bbp_state = rt2x00_get_field32(reg2, PWRCSR1_BBP_CURR_STATE);
1110 rf_state = rt2x00_get_field32(reg, PWRCSR1_RF_CURR_STATE); 1110 rf_state = rt2x00_get_field32(reg2, PWRCSR1_RF_CURR_STATE);
1111 if (bbp_state == state && rf_state == state) 1111 if (bbp_state == state && rf_state == state)
1112 return 0; 1112 return 0;
1113 rt2x00pci_register_write(rt2x00dev, PWRCSR1, reg);
1113 msleep(10); 1114 msleep(10);
1114 } 1115 }
1115 1116
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 0f8b84b7224c..699161327d65 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -413,7 +413,7 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
413 */ 413 */
414 rt2x00_desc_read(txi, 0, &word); 414 rt2x00_desc_read(txi, 0, &word);
415 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN, 415 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
416 skb->len + TXWI_DESC_SIZE); 416 skb->len - TXINFO_DESC_SIZE);
417 rt2x00_set_field32(&word, TXINFO_W0_WIV, 417 rt2x00_set_field32(&word, TXINFO_W0_WIV,
418 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); 418 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
419 rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2); 419 rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index a016f7ccde29..f71eee67f977 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -206,7 +206,7 @@ void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
206 /* 206 /*
207 * Free irq line. 207 * Free irq line.
208 */ 208 */
209 free_irq(to_pci_dev(rt2x00dev->dev)->irq, rt2x00dev); 209 free_irq(rt2x00dev->irq, rt2x00dev);
210 210
211 /* 211 /*
212 * Free DMA 212 * Free DMA
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 2e3076f67535..6a74baf4e934 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -1689,7 +1689,7 @@ static void rt61pci_disable_radio(struct rt2x00_dev *rt2x00dev)
1689 1689
1690static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) 1690static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state)
1691{ 1691{
1692 u32 reg; 1692 u32 reg, reg2;
1693 unsigned int i; 1693 unsigned int i;
1694 char put_to_sleep; 1694 char put_to_sleep;
1695 1695
@@ -1706,10 +1706,11 @@ static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state)
1706 * device has entered the correct state. 1706 * device has entered the correct state.
1707 */ 1707 */
1708 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 1708 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1709 rt2x00pci_register_read(rt2x00dev, MAC_CSR12, &reg); 1709 rt2x00pci_register_read(rt2x00dev, MAC_CSR12, &reg2);
1710 state = rt2x00_get_field32(reg, MAC_CSR12_BBP_CURRENT_STATE); 1710 state = rt2x00_get_field32(reg2, MAC_CSR12_BBP_CURRENT_STATE);
1711 if (state == !put_to_sleep) 1711 if (state == !put_to_sleep)
1712 return 0; 1712 return 0;
1713 rt2x00pci_register_write(rt2x00dev, MAC_CSR12, reg);
1713 msleep(10); 1714 msleep(10);
1714 } 1715 }
1715 1716
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index e35bd19c3c5a..6e0d82efe924 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -1366,7 +1366,7 @@ static void rt73usb_disable_radio(struct rt2x00_dev *rt2x00dev)
1366 1366
1367static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) 1367static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state)
1368{ 1368{
1369 u32 reg; 1369 u32 reg, reg2;
1370 unsigned int i; 1370 unsigned int i;
1371 char put_to_sleep; 1371 char put_to_sleep;
1372 1372
@@ -1383,10 +1383,11 @@ static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state)
1383 * device has entered the correct state. 1383 * device has entered the correct state.
1384 */ 1384 */
1385 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 1385 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1386 rt2x00usb_register_read(rt2x00dev, MAC_CSR12, &reg); 1386 rt2x00usb_register_read(rt2x00dev, MAC_CSR12, &reg2);
1387 state = rt2x00_get_field32(reg, MAC_CSR12_BBP_CURRENT_STATE); 1387 state = rt2x00_get_field32(reg2, MAC_CSR12_BBP_CURRENT_STATE);
1388 if (state == !put_to_sleep) 1388 if (state == !put_to_sleep)
1389 return 0; 1389 return 0;
1390 rt2x00usb_register_write(rt2x00dev, MAC_CSR12, reg);
1390 msleep(10); 1391 msleep(10);
1391 } 1392 }
1392 1393
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c
index 57f4bfd959c8..b98fb643fab0 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.c
@@ -113,6 +113,8 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
113 wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len, 113 wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len,
114 beacon ? "beacon" : ""); 114 beacon ? "beacon" : "");
115 115
116 skb_trim(skb, skb->len - desc->pad_len);
117
116 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); 118 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
117 ieee80211_rx_ni(wl->hw, skb); 119 ieee80211_rx_ni(wl->hw, skb);
118} 120}
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index a7db68d37ee9..d04c5b262050 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -1088,7 +1088,7 @@ static void xemaclite_remove_ndev(struct net_device *ndev)
1088 */ 1088 */
1089static bool get_bool(struct of_device *ofdev, const char *s) 1089static bool get_bool(struct of_device *ofdev, const char *s)
1090{ 1090{
1091 u32 *p = (u32 *)of_get_property(ofdev->node, s, NULL); 1091 u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL);
1092 1092
1093 if (p) { 1093 if (p) {
1094 return (bool)*p; 1094 return (bool)*p;
@@ -1130,14 +1130,14 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
1130 dev_info(dev, "Device Tree Probing\n"); 1130 dev_info(dev, "Device Tree Probing\n");
1131 1131
1132 /* Get iospace for the device */ 1132 /* Get iospace for the device */
1133 rc = of_address_to_resource(ofdev->node, 0, &r_mem); 1133 rc = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem);
1134 if (rc) { 1134 if (rc) {
1135 dev_err(dev, "invalid address\n"); 1135 dev_err(dev, "invalid address\n");
1136 return rc; 1136 return rc;
1137 } 1137 }
1138 1138
1139 /* Get IRQ for the device */ 1139 /* Get IRQ for the device */
1140 rc = of_irq_to_resource(ofdev->node, 0, &r_irq); 1140 rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq);
1141 if (rc == NO_IRQ) { 1141 if (rc == NO_IRQ) {
1142 dev_err(dev, "no IRQ found\n"); 1142 dev_err(dev, "no IRQ found\n");
1143 return rc; 1143 return rc;
@@ -1182,7 +1182,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
1182 lp->next_rx_buf_to_use = 0x0; 1182 lp->next_rx_buf_to_use = 0x0;
1183 lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong"); 1183 lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong");
1184 lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong"); 1184 lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
1185 mac_address = of_get_mac_address(ofdev->node); 1185 mac_address = of_get_mac_address(ofdev->dev.of_node);
1186 1186
1187 if (mac_address) 1187 if (mac_address)
1188 /* Set the MAC address. */ 1188 /* Set the MAC address. */
@@ -1197,7 +1197,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
1197 /* Set the MAC address in the EmacLite device */ 1197 /* Set the MAC address in the EmacLite device */
1198 xemaclite_update_address(lp, ndev->dev_addr); 1198 xemaclite_update_address(lp, ndev->dev_addr);
1199 1199
1200 lp->phy_node = of_parse_phandle(ofdev->node, "phy-handle", 0); 1200 lp->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
1201 rc = xemaclite_mdio_setup(lp, &ofdev->dev); 1201 rc = xemaclite_mdio_setup(lp, &ofdev->dev);
1202 if (rc) 1202 if (rc)
1203 dev_warn(&ofdev->dev, "error registering MDIO bus\n"); 1203 dev_warn(&ofdev->dev, "error registering MDIO bus\n");
@@ -1291,8 +1291,11 @@ static struct of_device_id xemaclite_of_match[] __devinitdata = {
1291MODULE_DEVICE_TABLE(of, xemaclite_of_match); 1291MODULE_DEVICE_TABLE(of, xemaclite_of_match);
1292 1292
1293static struct of_platform_driver xemaclite_of_driver = { 1293static struct of_platform_driver xemaclite_of_driver = {
1294 .name = DRIVER_NAME, 1294 .driver = {
1295 .match_table = xemaclite_of_match, 1295 .name = DRIVER_NAME,
1296 .owner = THIS_MODULE,
1297 .of_match_table = xemaclite_of_match,
1298 },
1296 .probe = xemaclite_of_probe, 1299 .probe = xemaclite_of_probe,
1297 .remove = __devexit_p(xemaclite_of_remove), 1300 .remove = __devexit_p(xemaclite_of_remove),
1298}; 1301};
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 224ae6bc67b6..7d18f8e0b013 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -10,8 +10,7 @@
10#include <asm/errno.h> 10#include <asm/errno.h>
11 11
12/** 12/**
13 * of_match_device - Tell if an of_device structure has a matching 13 * of_match_device - Tell if a struct device matches an of_device_id list
14 * of_match structure
15 * @ids: array of of device match structures to search in 14 * @ids: array of of device match structures to search in
16 * @dev: the of device structure to match against 15 * @dev: the of device structure to match against
17 * 16 *
@@ -19,11 +18,11 @@
19 * system is in its list of supported devices. 18 * system is in its list of supported devices.
20 */ 19 */
21const struct of_device_id *of_match_device(const struct of_device_id *matches, 20const struct of_device_id *of_match_device(const struct of_device_id *matches,
22 const struct of_device *dev) 21 const struct device *dev)
23{ 22{
24 if (!dev->node) 23 if (!dev->of_node)
25 return NULL; 24 return NULL;
26 return of_match_node(matches, dev->node); 25 return of_match_node(matches, dev->of_node);
27} 26}
28EXPORT_SYMBOL(of_match_device); 27EXPORT_SYMBOL(of_match_device);
29 28
@@ -54,7 +53,7 @@ static ssize_t devspec_show(struct device *dev,
54 struct of_device *ofdev; 53 struct of_device *ofdev;
55 54
56 ofdev = to_of_device(dev); 55 ofdev = to_of_device(dev);
57 return sprintf(buf, "%s\n", ofdev->node->full_name); 56 return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name);
58} 57}
59 58
60static ssize_t name_show(struct device *dev, 59static ssize_t name_show(struct device *dev,
@@ -63,7 +62,7 @@ static ssize_t name_show(struct device *dev,
63 struct of_device *ofdev; 62 struct of_device *ofdev;
64 63
65 ofdev = to_of_device(dev); 64 ofdev = to_of_device(dev);
66 return sprintf(buf, "%s\n", ofdev->node->name); 65 return sprintf(buf, "%s\n", ofdev->dev.of_node->name);
67} 66}
68 67
69static ssize_t modalias_show(struct device *dev, 68static ssize_t modalias_show(struct device *dev,
@@ -97,14 +96,14 @@ void of_release_dev(struct device *dev)
97 struct of_device *ofdev; 96 struct of_device *ofdev;
98 97
99 ofdev = to_of_device(dev); 98 ofdev = to_of_device(dev);
100 of_node_put(ofdev->node); 99 of_node_put(ofdev->dev.of_node);
101 kfree(ofdev); 100 kfree(ofdev);
102} 101}
103EXPORT_SYMBOL(of_release_dev); 102EXPORT_SYMBOL(of_release_dev);
104 103
105int of_device_register(struct of_device *ofdev) 104int of_device_register(struct of_device *ofdev)
106{ 105{
107 BUG_ON(ofdev->node == NULL); 106 BUG_ON(ofdev->dev.of_node == NULL);
108 107
109 device_initialize(&ofdev->dev); 108 device_initialize(&ofdev->dev);
110 109
@@ -112,7 +111,7 @@ int of_device_register(struct of_device *ofdev)
112 * the parent. If there is no parent defined, set the node 111 * the parent. If there is no parent defined, set the node
113 * explicitly */ 112 * explicitly */
114 if (!ofdev->dev.parent) 113 if (!ofdev->dev.parent)
115 set_dev_node(&ofdev->dev, of_node_to_nid(ofdev->node)); 114 set_dev_node(&ofdev->dev, of_node_to_nid(ofdev->dev.of_node));
116 115
117 return device_add(&ofdev->dev); 116 return device_add(&ofdev->dev);
118} 117}
@@ -132,11 +131,11 @@ ssize_t of_device_get_modalias(struct of_device *ofdev,
132 ssize_t tsize, csize, repend; 131 ssize_t tsize, csize, repend;
133 132
134 /* Name & Type */ 133 /* Name & Type */
135 csize = snprintf(str, len, "of:N%sT%s", 134 csize = snprintf(str, len, "of:N%sT%s", ofdev->dev.of_node->name,
136 ofdev->node->name, ofdev->node->type); 135 ofdev->dev.of_node->type);
137 136
138 /* Get compatible property if any */ 137 /* Get compatible property if any */
139 compat = of_get_property(ofdev->node, "compatible", &cplen); 138 compat = of_get_property(ofdev->dev.of_node, "compatible", &cplen);
140 if (!compat) 139 if (!compat)
141 return csize; 140 return csize;
142 141
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index dee4fb56b094..b6987bba8556 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -556,6 +556,21 @@ void __init unflatten_device_tree(void)
556 556
557 pr_debug(" -> unflatten_device_tree()\n"); 557 pr_debug(" -> unflatten_device_tree()\n");
558 558
559 if (!initial_boot_params) {
560 pr_debug("No device tree pointer\n");
561 return;
562 }
563
564 pr_debug("Unflattening device tree:\n");
565 pr_debug("magic: %08x\n", be32_to_cpu(initial_boot_params->magic));
566 pr_debug("size: %08x\n", be32_to_cpu(initial_boot_params->totalsize));
567 pr_debug("version: %08x\n", be32_to_cpu(initial_boot_params->version));
568
569 if (be32_to_cpu(initial_boot_params->magic) != OF_DT_HEADER) {
570 pr_err("Invalid device tree blob header\n");
571 return;
572 }
573
559 /* First pass, scan for size */ 574 /* First pass, scan for size */
560 start = ((unsigned long)initial_boot_params) + 575 start = ((unsigned long)initial_boot_params) +
561 be32_to_cpu(initial_boot_params->off_dt_struct); 576 be32_to_cpu(initial_boot_params->off_dt_struct);
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
index a3a708e590d0..ab6522c8e4fe 100644
--- a/drivers/of/of_i2c.c
+++ b/drivers/of/of_i2c.c
@@ -42,7 +42,7 @@ void of_register_i2c_devices(struct i2c_adapter *adap,
42 42
43 info.addr = be32_to_cpup(addr); 43 info.addr = be32_to_cpup(addr);
44 44
45 dev_archdata_set_node(&dev_ad, node); 45 info.of_node = node;
46 info.archdata = &dev_ad; 46 info.archdata = &dev_ad;
47 47
48 request_module("%s", info.type); 48 request_module("%s", info.type);
@@ -68,7 +68,7 @@ EXPORT_SYMBOL(of_register_i2c_devices);
68 68
69static int of_dev_node_match(struct device *dev, void *data) 69static int of_dev_node_match(struct device *dev, void *data)
70{ 70{
71 return dev_archdata_get_node(&dev->archdata) == data; 71 return dev->of_node == data;
72} 72}
73 73
74/* must call put_device() when done with returned i2c_client device */ 74/* must call put_device() when done with returned i2c_client device */
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index b4748337223b..42a6715f8e84 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -79,7 +79,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
79 /* Associate the OF node with the device structure so it 79 /* Associate the OF node with the device structure so it
80 * can be looked up later */ 80 * can be looked up later */
81 of_node_get(child); 81 of_node_get(child);
82 dev_archdata_set_node(&phy->dev.archdata, child); 82 phy->dev.of_node = child;
83 83
84 /* All data is now stored in the phy struct; register it */ 84 /* All data is now stored in the phy struct; register it */
85 rc = phy_device_register(phy); 85 rc = phy_device_register(phy);
@@ -100,7 +100,7 @@ EXPORT_SYMBOL(of_mdiobus_register);
100/* Helper function for of_phy_find_device */ 100/* Helper function for of_phy_find_device */
101static int of_phy_match(struct device *dev, void *phy_np) 101static int of_phy_match(struct device *dev, void *phy_np)
102{ 102{
103 return dev_archdata_get_node(&dev->archdata) == phy_np; 103 return dev->of_node == phy_np;
104} 104}
105 105
106/** 106/**
@@ -166,7 +166,7 @@ struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
166 if (!dev->dev.parent) 166 if (!dev->dev.parent)
167 return NULL; 167 return NULL;
168 168
169 net_np = dev_archdata_get_node(&dev->dev.parent->archdata); 169 net_np = dev->dev.parent->of_node;
170 if (!net_np) 170 if (!net_np)
171 return NULL; 171 return NULL;
172 172
diff --git a/drivers/of/of_spi.c b/drivers/of/of_spi.c
index f65f48b98448..5fed7e3c7da3 100644
--- a/drivers/of/of_spi.c
+++ b/drivers/of/of_spi.c
@@ -79,7 +79,7 @@ void of_register_spi_devices(struct spi_master *master, struct device_node *np)
79 79
80 /* Store a pointer to the node in the device structure */ 80 /* Store a pointer to the node in the device structure */
81 of_node_get(nc); 81 of_node_get(nc);
82 spi->dev.archdata.of_node = nc; 82 spi->dev.of_node = nc;
83 83
84 /* Register the new device */ 84 /* Register the new device */
85 request_module(spi->modalias); 85 request_module(spi->modalias);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index d58ade170c4b..7dacc1ebe91e 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -21,14 +21,12 @@ extern struct device_attribute of_platform_device_attrs[];
21 21
22static int of_platform_bus_match(struct device *dev, struct device_driver *drv) 22static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
23{ 23{
24 struct of_device *of_dev = to_of_device(dev); 24 const struct of_device_id *matches = drv->of_match_table;
25 struct of_platform_driver *of_drv = to_of_platform_driver(drv);
26 const struct of_device_id *matches = of_drv->match_table;
27 25
28 if (!matches) 26 if (!matches)
29 return 0; 27 return 0;
30 28
31 return of_match_device(matches, of_dev) != NULL; 29 return of_match_device(matches, dev) != NULL;
32} 30}
33 31
34static int of_platform_device_probe(struct device *dev) 32static int of_platform_device_probe(struct device *dev)
@@ -46,7 +44,7 @@ static int of_platform_device_probe(struct device *dev)
46 44
47 of_dev_get(of_dev); 45 of_dev_get(of_dev);
48 46
49 match = of_match_device(drv->match_table, of_dev); 47 match = of_match_device(drv->driver.of_match_table, dev);
50 if (match) 48 if (match)
51 error = drv->probe(of_dev, match); 49 error = drv->probe(of_dev, match);
52 if (error) 50 if (error)
@@ -386,11 +384,6 @@ int of_bus_type_init(struct bus_type *bus, const char *name)
386 384
387int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus) 385int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus)
388{ 386{
389 /* initialize common driver fields */
390 if (!drv->driver.name)
391 drv->driver.name = drv->name;
392 if (!drv->driver.owner)
393 drv->driver.owner = drv->owner;
394 drv->driver.bus = bus; 387 drv->driver.bus = bus;
395 388
396 /* register with core */ 389 /* register with core */
diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c
index 1586e1caa2f5..8bef6d60f88b 100644
--- a/drivers/parport/parport_amiga.c
+++ b/drivers/parport/parport_amiga.c
@@ -18,6 +18,8 @@
18#include <linux/parport.h> 18#include <linux/parport.h>
19#include <linux/ioport.h> 19#include <linux/ioport.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/platform_device.h>
22
21#include <asm/setup.h> 23#include <asm/setup.h>
22#include <asm/amigahw.h> 24#include <asm/amigahw.h>
23#include <asm/irq.h> 25#include <asm/irq.h>
@@ -31,7 +33,6 @@
31#define DPRINTK(x...) do { } while (0) 33#define DPRINTK(x...) do { } while (0)
32#endif 34#endif
33 35
34static struct parport *this_port = NULL;
35 36
36static void amiga_write_data(struct parport *p, unsigned char data) 37static void amiga_write_data(struct parport *p, unsigned char data)
37{ 38{
@@ -227,18 +228,11 @@ static struct parport_operations pp_amiga_ops = {
227 228
228/* ----------- Initialisation code --------------------------------- */ 229/* ----------- Initialisation code --------------------------------- */
229 230
230static int __init parport_amiga_init(void) 231static int __init amiga_parallel_probe(struct platform_device *pdev)
231{ 232{
232 struct parport *p; 233 struct parport *p;
233 int err; 234 int err;
234 235
235 if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_PARALLEL))
236 return -ENODEV;
237
238 err = -EBUSY;
239 if (!request_mem_region(CIAA_PHYSADDR-1+0x100, 0x100, "parallel"))
240 goto out_mem;
241
242 ciaa.ddrb = 0xff; 236 ciaa.ddrb = 0xff;
243 ciab.ddra &= 0xf8; 237 ciab.ddra &= 0xf8;
244 mb(); 238 mb();
@@ -246,41 +240,63 @@ static int __init parport_amiga_init(void)
246 p = parport_register_port((unsigned long)&ciaa.prb, IRQ_AMIGA_CIAA_FLG, 240 p = parport_register_port((unsigned long)&ciaa.prb, IRQ_AMIGA_CIAA_FLG,
247 PARPORT_DMA_NONE, &pp_amiga_ops); 241 PARPORT_DMA_NONE, &pp_amiga_ops);
248 if (!p) 242 if (!p)
249 goto out_port; 243 return -EBUSY;
250 244
251 err = request_irq(IRQ_AMIGA_CIAA_FLG, parport_irq_handler, 0, p->name, p); 245 err = request_irq(IRQ_AMIGA_CIAA_FLG, parport_irq_handler, 0, p->name,
246 p);
252 if (err) 247 if (err)
253 goto out_irq; 248 goto out_irq;
254 249
255 this_port = p;
256 printk(KERN_INFO "%s: Amiga built-in port using irq\n", p->name); 250 printk(KERN_INFO "%s: Amiga built-in port using irq\n", p->name);
257 /* XXX: set operating mode */ 251 /* XXX: set operating mode */
258 parport_announce_port(p); 252 parport_announce_port(p);
259 253
254 platform_set_drvdata(pdev, p);
255
260 return 0; 256 return 0;
261 257
262out_irq: 258out_irq:
263 parport_put_port(p); 259 parport_put_port(p);
264out_port:
265 release_mem_region(CIAA_PHYSADDR-1+0x100, 0x100);
266out_mem:
267 return err; 260 return err;
268} 261}
269 262
270static void __exit parport_amiga_exit(void) 263static int __exit amiga_parallel_remove(struct platform_device *pdev)
264{
265 struct parport *port = platform_get_drvdata(pdev);
266
267 parport_remove_port(port);
268 if (port->irq != PARPORT_IRQ_NONE)
269 free_irq(IRQ_AMIGA_CIAA_FLG, port);
270 parport_put_port(port);
271 platform_set_drvdata(pdev, NULL);
272 return 0;
273}
274
275static struct platform_driver amiga_parallel_driver = {
276 .remove = __exit_p(amiga_parallel_remove),
277 .driver = {
278 .name = "amiga-parallel",
279 .owner = THIS_MODULE,
280 },
281};
282
283static int __init amiga_parallel_init(void)
284{
285 return platform_driver_probe(&amiga_parallel_driver,
286 amiga_parallel_probe);
287}
288
289module_init(amiga_parallel_init);
290
291static void __exit amiga_parallel_exit(void)
271{ 292{
272 parport_remove_port(this_port); 293 platform_driver_unregister(&amiga_parallel_driver);
273 if (this_port->irq != PARPORT_IRQ_NONE)
274 free_irq(IRQ_AMIGA_CIAA_FLG, this_port);
275 parport_put_port(this_port);
276 release_mem_region(CIAA_PHYSADDR-1+0x100, 0x100);
277} 294}
278 295
296module_exit(amiga_parallel_exit);
279 297
280MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>"); 298MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>");
281MODULE_DESCRIPTION("Parport Driver for Amiga builtin Port"); 299MODULE_DESCRIPTION("Parport Driver for Amiga builtin Port");
282MODULE_SUPPORTED_DEVICE("Amiga builtin Parallel Port"); 300MODULE_SUPPORTED_DEVICE("Amiga builtin Parallel Port");
283MODULE_LICENSE("GPL"); 301MODULE_LICENSE("GPL");
284 302MODULE_ALIAS("platform:amiga-parallel");
285module_init(parport_amiga_init)
286module_exit(parport_amiga_exit)
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index 065f229580d5..9a5b4b894161 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -382,8 +382,11 @@ static const struct of_device_id bpp_match[] = {
382MODULE_DEVICE_TABLE(of, bpp_match); 382MODULE_DEVICE_TABLE(of, bpp_match);
383 383
384static struct of_platform_driver bpp_sbus_driver = { 384static struct of_platform_driver bpp_sbus_driver = {
385 .name = "bpp", 385 .driver = {
386 .match_table = bpp_match, 386 .name = "bpp",
387 .owner = THIS_MODULE,
388 .of_match_table = bpp_match,
389 },
387 .probe = bpp_probe, 390 .probe = bpp_probe,
388 .remove = __devexit_p(bpp_remove), 391 .remove = __devexit_p(bpp_remove),
389}; 392};
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index 2e59fe947d28..f94d8281cfb0 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -185,7 +185,7 @@ static int __devinit electra_cf_probe(struct of_device *ofdev,
185 const struct of_device_id *match) 185 const struct of_device_id *match)
186{ 186{
187 struct device *device = &ofdev->dev; 187 struct device *device = &ofdev->dev;
188 struct device_node *np = ofdev->node; 188 struct device_node *np = ofdev->dev.of_node;
189 struct electra_cf_socket *cf; 189 struct electra_cf_socket *cf;
190 struct resource mem, io; 190 struct resource mem, io;
191 int status; 191 int status;
@@ -357,8 +357,11 @@ static const struct of_device_id electra_cf_match[] = {
357MODULE_DEVICE_TABLE(of, electra_cf_match); 357MODULE_DEVICE_TABLE(of, electra_cf_match);
358 358
359static struct of_platform_driver electra_cf_driver = { 359static struct of_platform_driver electra_cf_driver = {
360 .name = (char *)driver_name, 360 .driver = {
361 .match_table = electra_cf_match, 361 .name = (char *)driver_name,
362 .owner = THIS_MODULE,
363 .of_match_table = electra_cf_match,
364 },
362 .probe = electra_cf_probe, 365 .probe = electra_cf_probe,
363 .remove = electra_cf_remove, 366 .remove = electra_cf_remove,
364}; 367};
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 41cc954a5ffe..1a648b90b634 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -1298,8 +1298,11 @@ static const struct of_device_id m8xx_pcmcia_match[] = {
1298MODULE_DEVICE_TABLE(of, m8xx_pcmcia_match); 1298MODULE_DEVICE_TABLE(of, m8xx_pcmcia_match);
1299 1299
1300static struct of_platform_driver m8xx_pcmcia_driver = { 1300static struct of_platform_driver m8xx_pcmcia_driver = {
1301 .name = driver_name, 1301 .driver = {
1302 .match_table = m8xx_pcmcia_match, 1302 .name = driver_name,
1303 .owner = THIS_MODULE,
1304 .match_table = m8xx_pcmcia_match,
1305 },
1303 .probe = m8xx_probe, 1306 .probe = m8xx_probe,
1304 .remove = m8xx_remove, 1307 .remove = m8xx_remove,
1305}; 1308};
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index ef0c5f133691..d007a2a03830 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -813,8 +813,7 @@ static u_int ds_poll(struct file *file, poll_table *wait)
813 813
814/*====================================================================*/ 814/*====================================================================*/
815 815
816static int ds_ioctl(struct inode *inode, struct file *file, 816static int ds_ioctl(struct file *file, u_int cmd, u_long arg)
817 u_int cmd, u_long arg)
818{ 817{
819 struct pcmcia_socket *s; 818 struct pcmcia_socket *s;
820 void __user *uarg = (char __user *)arg; 819 void __user *uarg = (char __user *)arg;
@@ -1021,13 +1020,25 @@ free_out:
1021 return err; 1020 return err;
1022} /* ds_ioctl */ 1021} /* ds_ioctl */
1023 1022
1023static long ds_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1024{
1025 int ret;
1026
1027 lock_kernel();
1028 ret = ds_ioctl(file, cmd, arg);
1029 unlock_kernel();
1030
1031 return ret;
1032}
1033
1034
1024/*====================================================================*/ 1035/*====================================================================*/
1025 1036
1026static const struct file_operations ds_fops = { 1037static const struct file_operations ds_fops = {
1027 .owner = THIS_MODULE, 1038 .owner = THIS_MODULE,
1028 .open = ds_open, 1039 .open = ds_open,
1029 .release = ds_release, 1040 .release = ds_release,
1030 .ioctl = ds_ioctl, 1041 .unlocked_ioctl = ds_unlocked_ioctl,
1031 .read = ds_read, 1042 .read = ds_read,
1032 .write = ds_write, 1043 .write = ds_write,
1033 .poll = ds_poll, 1044 .poll = ds_poll,
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index faaa9b4d0d07..8e9ba177d817 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -57,6 +57,11 @@ config WM8350_POWER
57 Say Y here to enable support for the power management unit 57 Say Y here to enable support for the power management unit
58 provided by the Wolfson Microelectronics WM8350 PMIC. 58 provided by the Wolfson Microelectronics WM8350 PMIC.
59 59
60config TEST_POWER
61 tristate "Test power driver"
62 help
63 This driver is used for testing. It's safe to say M here.
64
60config BATTERY_DS2760 65config BATTERY_DS2760
61 tristate "DS2760 battery driver (HP iPAQ & others)" 66 tristate "DS2760 battery driver (HP iPAQ & others)"
62 select W1 67 select W1
@@ -65,10 +70,10 @@ config BATTERY_DS2760
65 Say Y here to enable support for batteries with ds2760 chip. 70 Say Y here to enable support for batteries with ds2760 chip.
66 71
67config BATTERY_DS2782 72config BATTERY_DS2782
68 tristate "DS2782 standalone gas-gauge" 73 tristate "DS2782/DS2786 standalone gas-gauge"
69 depends on I2C 74 depends on I2C
70 help 75 help
71 Say Y here to enable support for the DS2782 standalone battery 76 Say Y here to enable support for the DS2782/DS2786 standalone battery
72 gas-gauge. 77 gas-gauge.
73 78
74config BATTERY_PMU 79config BATTERY_PMU
@@ -125,6 +130,12 @@ config BATTERY_MAX17040
125 in handheld and portable equipment. The MAX17040 is configured 130 in handheld and portable equipment. The MAX17040 is configured
126 to operate with a single lithium cell 131 to operate with a single lithium cell
127 132
133config BATTERY_Z2
134 tristate "Z2 battery driver"
135 depends on I2C && MACH_ZIPIT2
136 help
137 Say Y to include support for the battery on the Zipit Z2.
138
128config CHARGER_PCF50633 139config CHARGER_PCF50633
129 tristate "NXP PCF50633 MBC" 140 tristate "NXP PCF50633 MBC"
130 depends on MFD_PCF50633 141 depends on MFD_PCF50633
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index a2ba7c85c97a..00050809a6c7 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_MAX8925_POWER) += max8925_power.o
20obj-$(CONFIG_WM831X_BACKUP) += wm831x_backup.o 20obj-$(CONFIG_WM831X_BACKUP) += wm831x_backup.o
21obj-$(CONFIG_WM831X_POWER) += wm831x_power.o 21obj-$(CONFIG_WM831X_POWER) += wm831x_power.o
22obj-$(CONFIG_WM8350_POWER) += wm8350_power.o 22obj-$(CONFIG_WM8350_POWER) += wm8350_power.o
23obj-$(CONFIG_TEST_POWER) += test_power.o
23 24
24obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o 25obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
25obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o 26obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o
@@ -31,4 +32,5 @@ obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
31obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o 32obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o
32obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o 33obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o
33obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o 34obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
35obj-$(CONFIG_BATTERY_Z2) += z2_battery.o
34obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o 36obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index 3bf8d1f622e3..4d3b27228a2e 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -304,6 +304,28 @@ static void ds2760_battery_write_rated_capacity(struct ds2760_device_info *di,
304 w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1); 304 w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1);
305} 305}
306 306
307static void ds2760_battery_write_active_full(struct ds2760_device_info *di,
308 int active_full)
309{
310 unsigned char tmp[2] = {
311 active_full >> 8,
312 active_full & 0xff
313 };
314
315 if (tmp[0] == di->raw[DS2760_ACTIVE_FULL] &&
316 tmp[1] == di->raw[DS2760_ACTIVE_FULL + 1])
317 return;
318
319 w1_ds2760_write(di->w1_dev, tmp, DS2760_ACTIVE_FULL, sizeof(tmp));
320 w1_ds2760_store_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK0);
321 w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK0);
322
323 /* Write to the di->raw[] buffer directly - the DS2760_ACTIVE_FULL
324 * values won't be read back by ds2760_battery_read_status() */
325 di->raw[DS2760_ACTIVE_FULL] = tmp[0];
326 di->raw[DS2760_ACTIVE_FULL + 1] = tmp[1];
327}
328
307static void ds2760_battery_work(struct work_struct *work) 329static void ds2760_battery_work(struct work_struct *work)
308{ 330{
309 struct ds2760_device_info *di = container_of(work, 331 struct ds2760_device_info *di = container_of(work,
@@ -426,6 +448,45 @@ static int ds2760_battery_get_property(struct power_supply *psy,
426 return 0; 448 return 0;
427} 449}
428 450
451static int ds2760_battery_set_property(struct power_supply *psy,
452 enum power_supply_property psp,
453 const union power_supply_propval *val)
454{
455 struct ds2760_device_info *di = to_ds2760_device_info(psy);
456
457 switch (psp) {
458 case POWER_SUPPLY_PROP_CHARGE_FULL:
459 /* the interface counts in uAh, convert the value */
460 ds2760_battery_write_active_full(di, val->intval / 1000L);
461 break;
462
463 case POWER_SUPPLY_PROP_CHARGE_NOW:
464 /* ds2760_battery_set_current_accum() does the conversion */
465 ds2760_battery_set_current_accum(di, val->intval);
466 break;
467
468 default:
469 return -EPERM;
470 }
471
472 return 0;
473}
474
475static int ds2760_battery_property_is_writeable(struct power_supply *psy,
476 enum power_supply_property psp)
477{
478 switch (psp) {
479 case POWER_SUPPLY_PROP_CHARGE_FULL:
480 case POWER_SUPPLY_PROP_CHARGE_NOW:
481 return 1;
482
483 default:
484 break;
485 }
486
487 return 0;
488}
489
429static enum power_supply_property ds2760_battery_props[] = { 490static enum power_supply_property ds2760_battery_props[] = {
430 POWER_SUPPLY_PROP_STATUS, 491 POWER_SUPPLY_PROP_STATUS,
431 POWER_SUPPLY_PROP_VOLTAGE_NOW, 492 POWER_SUPPLY_PROP_VOLTAGE_NOW,
@@ -460,6 +521,9 @@ static int ds2760_battery_probe(struct platform_device *pdev)
460 di->bat.properties = ds2760_battery_props; 521 di->bat.properties = ds2760_battery_props;
461 di->bat.num_properties = ARRAY_SIZE(ds2760_battery_props); 522 di->bat.num_properties = ARRAY_SIZE(ds2760_battery_props);
462 di->bat.get_property = ds2760_battery_get_property; 523 di->bat.get_property = ds2760_battery_get_property;
524 di->bat.set_property = ds2760_battery_set_property;
525 di->bat.property_is_writeable =
526 ds2760_battery_property_is_writeable;
463 di->bat.set_charged = ds2760_battery_set_charged; 527 di->bat.set_charged = ds2760_battery_set_charged;
464 di->bat.external_power_changed = 528 di->bat.external_power_changed =
465 ds2760_battery_external_power_changed; 529 ds2760_battery_external_power_changed;
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 99c89976a902..d762a0cbc6af 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -5,6 +5,8 @@
5 * 5 *
6 * Author: Ryan Mallon <ryan@bluewatersys.com> 6 * Author: Ryan Mallon <ryan@bluewatersys.com>
7 * 7 *
8 * DS2786 added by Yulia Vilensky <vilensky@compulab.co.il>
9 *
8 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
@@ -20,12 +22,13 @@
20#include <linux/idr.h> 22#include <linux/idr.h>
21#include <linux/power_supply.h> 23#include <linux/power_supply.h>
22#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/ds2782_battery.h>
23 26
24#define DS2782_REG_RARC 0x06 /* Remaining active relative capacity */ 27#define DS2782_REG_RARC 0x06 /* Remaining active relative capacity */
25 28
26#define DS2782_REG_VOLT_MSB 0x0c 29#define DS278x_REG_VOLT_MSB 0x0c
27#define DS2782_REG_TEMP_MSB 0x0a 30#define DS278x_REG_TEMP_MSB 0x0a
28#define DS2782_REG_CURRENT_MSB 0x0e 31#define DS278x_REG_CURRENT_MSB 0x0e
29 32
30/* EEPROM Block */ 33/* EEPROM Block */
31#define DS2782_REG_RSNSP 0x69 /* Sense resistor value */ 34#define DS2782_REG_RSNSP 0x69 /* Sense resistor value */
@@ -33,18 +36,33 @@
33/* Current unit measurement in uA for a 1 milli-ohm sense resistor */ 36/* Current unit measurement in uA for a 1 milli-ohm sense resistor */
34#define DS2782_CURRENT_UNITS 1563 37#define DS2782_CURRENT_UNITS 1563
35 38
36#define to_ds2782_info(x) container_of(x, struct ds2782_info, battery) 39#define DS2786_REG_RARC 0x02 /* Remaining active relative capacity */
40
41#define DS2786_CURRENT_UNITS 25
42
43struct ds278x_info;
44
45struct ds278x_battery_ops {
46 int (*get_current)(struct ds278x_info *info, int *current_uA);
47 int (*get_voltage)(struct ds278x_info *info, int *voltage_uA);
48 int (*get_capacity)(struct ds278x_info *info, int *capacity_uA);
49
50};
51
52#define to_ds278x_info(x) container_of(x, struct ds278x_info, battery)
37 53
38struct ds2782_info { 54struct ds278x_info {
39 struct i2c_client *client; 55 struct i2c_client *client;
40 struct power_supply battery; 56 struct power_supply battery;
57 struct ds278x_battery_ops *ops;
41 int id; 58 int id;
59 int rsns;
42}; 60};
43 61
44static DEFINE_IDR(battery_id); 62static DEFINE_IDR(battery_id);
45static DEFINE_MUTEX(battery_lock); 63static DEFINE_MUTEX(battery_lock);
46 64
47static inline int ds2782_read_reg(struct ds2782_info *info, int reg, u8 *val) 65static inline int ds278x_read_reg(struct ds278x_info *info, int reg, u8 *val)
48{ 66{
49 int ret; 67 int ret;
50 68
@@ -58,7 +76,7 @@ static inline int ds2782_read_reg(struct ds2782_info *info, int reg, u8 *val)
58 return 0; 76 return 0;
59} 77}
60 78
61static inline int ds2782_read_reg16(struct ds2782_info *info, int reg_msb, 79static inline int ds278x_read_reg16(struct ds278x_info *info, int reg_msb,
62 s16 *val) 80 s16 *val)
63{ 81{
64 int ret; 82 int ret;
@@ -73,7 +91,7 @@ static inline int ds2782_read_reg16(struct ds2782_info *info, int reg_msb,
73 return 0; 91 return 0;
74} 92}
75 93
76static int ds2782_get_temp(struct ds2782_info *info, int *temp) 94static int ds278x_get_temp(struct ds278x_info *info, int *temp)
77{ 95{
78 s16 raw; 96 s16 raw;
79 int err; 97 int err;
@@ -84,14 +102,14 @@ static int ds2782_get_temp(struct ds2782_info *info, int *temp)
84 * celsius. The temperature value is stored as a 10 bit number, plus 102 * celsius. The temperature value is stored as a 10 bit number, plus
85 * sign in the upper bits of a 16 bit register. 103 * sign in the upper bits of a 16 bit register.
86 */ 104 */
87 err = ds2782_read_reg16(info, DS2782_REG_TEMP_MSB, &raw); 105 err = ds278x_read_reg16(info, DS278x_REG_TEMP_MSB, &raw);
88 if (err) 106 if (err)
89 return err; 107 return err;
90 *temp = ((raw / 32) * 125) / 100; 108 *temp = ((raw / 32) * 125) / 100;
91 return 0; 109 return 0;
92} 110}
93 111
94static int ds2782_get_current(struct ds2782_info *info, int *current_uA) 112static int ds2782_get_current(struct ds278x_info *info, int *current_uA)
95{ 113{
96 int sense_res; 114 int sense_res;
97 int err; 115 int err;
@@ -102,7 +120,7 @@ static int ds2782_get_current(struct ds2782_info *info, int *current_uA)
102 * The units of measurement for current are dependent on the value of 120 * The units of measurement for current are dependent on the value of
103 * the sense resistor. 121 * the sense resistor.
104 */ 122 */
105 err = ds2782_read_reg(info, DS2782_REG_RSNSP, &sense_res_raw); 123 err = ds278x_read_reg(info, DS2782_REG_RSNSP, &sense_res_raw);
106 if (err) 124 if (err)
107 return err; 125 return err;
108 if (sense_res_raw == 0) { 126 if (sense_res_raw == 0) {
@@ -113,14 +131,14 @@ static int ds2782_get_current(struct ds2782_info *info, int *current_uA)
113 131
114 dev_dbg(&info->client->dev, "sense resistor = %d milli-ohms\n", 132 dev_dbg(&info->client->dev, "sense resistor = %d milli-ohms\n",
115 sense_res); 133 sense_res);
116 err = ds2782_read_reg16(info, DS2782_REG_CURRENT_MSB, &raw); 134 err = ds278x_read_reg16(info, DS278x_REG_CURRENT_MSB, &raw);
117 if (err) 135 if (err)
118 return err; 136 return err;
119 *current_uA = raw * (DS2782_CURRENT_UNITS / sense_res); 137 *current_uA = raw * (DS2782_CURRENT_UNITS / sense_res);
120 return 0; 138 return 0;
121} 139}
122 140
123static int ds2782_get_voltage(struct ds2782_info *info, int *voltage_uA) 141static int ds2782_get_voltage(struct ds278x_info *info, int *voltage_uA)
124{ 142{
125 s16 raw; 143 s16 raw;
126 int err; 144 int err;
@@ -129,36 +147,77 @@ static int ds2782_get_voltage(struct ds2782_info *info, int *voltage_uA)
129 * Voltage is measured in units of 4.88mV. The voltage is stored as 147 * Voltage is measured in units of 4.88mV. The voltage is stored as
130 * a 10-bit number plus sign, in the upper bits of a 16-bit register 148 * a 10-bit number plus sign, in the upper bits of a 16-bit register
131 */ 149 */
132 err = ds2782_read_reg16(info, DS2782_REG_VOLT_MSB, &raw); 150 err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw);
133 if (err) 151 if (err)
134 return err; 152 return err;
135 *voltage_uA = (raw / 32) * 4800; 153 *voltage_uA = (raw / 32) * 4800;
136 return 0; 154 return 0;
137} 155}
138 156
139static int ds2782_get_capacity(struct ds2782_info *info, int *capacity) 157static int ds2782_get_capacity(struct ds278x_info *info, int *capacity)
140{ 158{
141 int err; 159 int err;
142 u8 raw; 160 u8 raw;
143 161
144 err = ds2782_read_reg(info, DS2782_REG_RARC, &raw); 162 err = ds278x_read_reg(info, DS2782_REG_RARC, &raw);
145 if (err) 163 if (err)
146 return err; 164 return err;
147 *capacity = raw; 165 *capacity = raw;
148 return raw; 166 return raw;
149} 167}
150 168
151static int ds2782_get_status(struct ds2782_info *info, int *status) 169static int ds2786_get_current(struct ds278x_info *info, int *current_uA)
170{
171 int err;
172 s16 raw;
173
174 err = ds278x_read_reg16(info, DS278x_REG_CURRENT_MSB, &raw);
175 if (err)
176 return err;
177 *current_uA = (raw / 16) * (DS2786_CURRENT_UNITS / info->rsns);
178 return 0;
179}
180
181static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uA)
182{
183 s16 raw;
184 int err;
185
186 /*
187 * Voltage is measured in units of 1.22mV. The voltage is stored as
188 * a 10-bit number plus sign, in the upper bits of a 16-bit register
189 */
190 err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw);
191 if (err)
192 return err;
193 *voltage_uA = (raw / 8) * 1220;
194 return 0;
195}
196
197static int ds2786_get_capacity(struct ds278x_info *info, int *capacity)
198{
199 int err;
200 u8 raw;
201
202 err = ds278x_read_reg(info, DS2786_REG_RARC, &raw);
203 if (err)
204 return err;
205 /* Relative capacity is displayed with resolution 0.5 % */
206 *capacity = raw/2 ;
207 return 0;
208}
209
210static int ds278x_get_status(struct ds278x_info *info, int *status)
152{ 211{
153 int err; 212 int err;
154 int current_uA; 213 int current_uA;
155 int capacity; 214 int capacity;
156 215
157 err = ds2782_get_current(info, &current_uA); 216 err = info->ops->get_current(info, &current_uA);
158 if (err) 217 if (err)
159 return err; 218 return err;
160 219
161 err = ds2782_get_capacity(info, &capacity); 220 err = info->ops->get_capacity(info, &capacity);
162 if (err) 221 if (err)
163 return err; 222 return err;
164 223
@@ -174,32 +233,32 @@ static int ds2782_get_status(struct ds2782_info *info, int *status)
174 return 0; 233 return 0;
175} 234}
176 235
177static int ds2782_battery_get_property(struct power_supply *psy, 236static int ds278x_battery_get_property(struct power_supply *psy,
178 enum power_supply_property prop, 237 enum power_supply_property prop,
179 union power_supply_propval *val) 238 union power_supply_propval *val)
180{ 239{
181 struct ds2782_info *info = to_ds2782_info(psy); 240 struct ds278x_info *info = to_ds278x_info(psy);
182 int ret; 241 int ret;
183 242
184 switch (prop) { 243 switch (prop) {
185 case POWER_SUPPLY_PROP_STATUS: 244 case POWER_SUPPLY_PROP_STATUS:
186 ret = ds2782_get_status(info, &val->intval); 245 ret = ds278x_get_status(info, &val->intval);
187 break; 246 break;
188 247
189 case POWER_SUPPLY_PROP_CAPACITY: 248 case POWER_SUPPLY_PROP_CAPACITY:
190 ret = ds2782_get_capacity(info, &val->intval); 249 ret = info->ops->get_capacity(info, &val->intval);
191 break; 250 break;
192 251
193 case POWER_SUPPLY_PROP_VOLTAGE_NOW: 252 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
194 ret = ds2782_get_voltage(info, &val->intval); 253 ret = info->ops->get_voltage(info, &val->intval);
195 break; 254 break;
196 255
197 case POWER_SUPPLY_PROP_CURRENT_NOW: 256 case POWER_SUPPLY_PROP_CURRENT_NOW:
198 ret = ds2782_get_current(info, &val->intval); 257 ret = info->ops->get_current(info, &val->intval);
199 break; 258 break;
200 259
201 case POWER_SUPPLY_PROP_TEMP: 260 case POWER_SUPPLY_PROP_TEMP:
202 ret = ds2782_get_temp(info, &val->intval); 261 ret = ds278x_get_temp(info, &val->intval);
203 break; 262 break;
204 263
205 default: 264 default:
@@ -209,7 +268,7 @@ static int ds2782_battery_get_property(struct power_supply *psy,
209 return ret; 268 return ret;
210} 269}
211 270
212static enum power_supply_property ds2782_battery_props[] = { 271static enum power_supply_property ds278x_battery_props[] = {
213 POWER_SUPPLY_PROP_STATUS, 272 POWER_SUPPLY_PROP_STATUS,
214 POWER_SUPPLY_PROP_CAPACITY, 273 POWER_SUPPLY_PROP_CAPACITY,
215 POWER_SUPPLY_PROP_VOLTAGE_NOW, 274 POWER_SUPPLY_PROP_VOLTAGE_NOW,
@@ -217,18 +276,18 @@ static enum power_supply_property ds2782_battery_props[] = {
217 POWER_SUPPLY_PROP_TEMP, 276 POWER_SUPPLY_PROP_TEMP,
218}; 277};
219 278
220static void ds2782_power_supply_init(struct power_supply *battery) 279static void ds278x_power_supply_init(struct power_supply *battery)
221{ 280{
222 battery->type = POWER_SUPPLY_TYPE_BATTERY; 281 battery->type = POWER_SUPPLY_TYPE_BATTERY;
223 battery->properties = ds2782_battery_props; 282 battery->properties = ds278x_battery_props;
224 battery->num_properties = ARRAY_SIZE(ds2782_battery_props); 283 battery->num_properties = ARRAY_SIZE(ds278x_battery_props);
225 battery->get_property = ds2782_battery_get_property; 284 battery->get_property = ds278x_battery_get_property;
226 battery->external_power_changed = NULL; 285 battery->external_power_changed = NULL;
227} 286}
228 287
229static int ds2782_battery_remove(struct i2c_client *client) 288static int ds278x_battery_remove(struct i2c_client *client)
230{ 289{
231 struct ds2782_info *info = i2c_get_clientdata(client); 290 struct ds278x_info *info = i2c_get_clientdata(client);
232 291
233 power_supply_unregister(&info->battery); 292 power_supply_unregister(&info->battery);
234 kfree(info->battery.name); 293 kfree(info->battery.name);
@@ -237,19 +296,45 @@ static int ds2782_battery_remove(struct i2c_client *client)
237 idr_remove(&battery_id, info->id); 296 idr_remove(&battery_id, info->id);
238 mutex_unlock(&battery_lock); 297 mutex_unlock(&battery_lock);
239 298
240 i2c_set_clientdata(client, info);
241
242 kfree(info); 299 kfree(info);
243 return 0; 300 return 0;
244} 301}
245 302
246static int ds2782_battery_probe(struct i2c_client *client, 303enum ds278x_num_id {
304 DS2782 = 0,
305 DS2786,
306};
307
308static struct ds278x_battery_ops ds278x_ops[] = {
309 [DS2782] = {
310 .get_current = ds2782_get_current,
311 .get_voltage = ds2782_get_voltage,
312 .get_capacity = ds2782_get_capacity,
313 },
314 [DS2786] = {
315 .get_current = ds2786_get_current,
316 .get_voltage = ds2786_get_voltage,
317 .get_capacity = ds2786_get_capacity,
318 }
319};
320
321static int ds278x_battery_probe(struct i2c_client *client,
247 const struct i2c_device_id *id) 322 const struct i2c_device_id *id)
248{ 323{
249 struct ds2782_info *info; 324 struct ds278x_platform_data *pdata = client->dev.platform_data;
325 struct ds278x_info *info;
250 int ret; 326 int ret;
251 int num; 327 int num;
252 328
329 /*
330 * ds2786 should have the sense resistor value set
331 * in the platform data
332 */
333 if (id->driver_data == DS2786 && !pdata) {
334 dev_err(&client->dev, "missing platform data for ds2786\n");
335 return -EINVAL;
336 }
337
253 /* Get an ID for this battery */ 338 /* Get an ID for this battery */
254 ret = idr_pre_get(&battery_id, GFP_KERNEL); 339 ret = idr_pre_get(&battery_id, GFP_KERNEL);
255 if (ret == 0) { 340 if (ret == 0) {
@@ -269,15 +354,20 @@ static int ds2782_battery_probe(struct i2c_client *client,
269 goto fail_info; 354 goto fail_info;
270 } 355 }
271 356
272 info->battery.name = kasprintf(GFP_KERNEL, "ds2782-%d", num); 357 info->battery.name = kasprintf(GFP_KERNEL, "%s-%d", client->name, num);
273 if (!info->battery.name) { 358 if (!info->battery.name) {
274 ret = -ENOMEM; 359 ret = -ENOMEM;
275 goto fail_name; 360 goto fail_name;
276 } 361 }
277 362
363 if (id->driver_data == DS2786)
364 info->rsns = pdata->rsns;
365
278 i2c_set_clientdata(client, info); 366 i2c_set_clientdata(client, info);
279 info->client = client; 367 info->client = client;
280 ds2782_power_supply_init(&info->battery); 368 info->id = num;
369 info->ops = &ds278x_ops[id->driver_data];
370 ds278x_power_supply_init(&info->battery);
281 371
282 ret = power_supply_register(&client->dev, &info->battery); 372 ret = power_supply_register(&client->dev, &info->battery);
283 if (ret) { 373 if (ret) {
@@ -290,7 +380,6 @@ static int ds2782_battery_probe(struct i2c_client *client,
290fail_register: 380fail_register:
291 kfree(info->battery.name); 381 kfree(info->battery.name);
292fail_name: 382fail_name:
293 i2c_set_clientdata(client, info);
294 kfree(info); 383 kfree(info);
295fail_info: 384fail_info:
296 mutex_lock(&battery_lock); 385 mutex_lock(&battery_lock);
@@ -300,31 +389,32 @@ fail_id:
300 return ret; 389 return ret;
301} 390}
302 391
303static const struct i2c_device_id ds2782_id[] = { 392static const struct i2c_device_id ds278x_id[] = {
304 {"ds2782", 0}, 393 {"ds2782", DS2782},
394 {"ds2786", DS2786},
305 {}, 395 {},
306}; 396};
307 397
308static struct i2c_driver ds2782_battery_driver = { 398static struct i2c_driver ds278x_battery_driver = {
309 .driver = { 399 .driver = {
310 .name = "ds2782-battery", 400 .name = "ds2782-battery",
311 }, 401 },
312 .probe = ds2782_battery_probe, 402 .probe = ds278x_battery_probe,
313 .remove = ds2782_battery_remove, 403 .remove = ds278x_battery_remove,
314 .id_table = ds2782_id, 404 .id_table = ds278x_id,
315}; 405};
316 406
317static int __init ds2782_init(void) 407static int __init ds278x_init(void)
318{ 408{
319 return i2c_add_driver(&ds2782_battery_driver); 409 return i2c_add_driver(&ds278x_battery_driver);
320} 410}
321module_init(ds2782_init); 411module_init(ds278x_init);
322 412
323static void __exit ds2782_exit(void) 413static void __exit ds278x_exit(void)
324{ 414{
325 i2c_del_driver(&ds2782_battery_driver); 415 i2c_del_driver(&ds278x_battery_driver);
326} 416}
327module_exit(ds2782_exit); 417module_exit(ds278x_exit);
328 418
329MODULE_AUTHOR("Ryan Mallon <ryan@bluewatersys.com>"); 419MODULE_AUTHOR("Ryan Mallon <ryan@bluewatersys.com>");
330MODULE_DESCRIPTION("Maxim/Dallas DS2782 Stand-Alone Fuel Gauage IC driver"); 420MODULE_DESCRIPTION("Maxim/Dallas DS2782 Stand-Alone Fuel Gauage IC driver");
diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
index a232de6a5703..69f8aa3a6a4b 100644
--- a/drivers/power/pda_power.c
+++ b/drivers/power/pda_power.c
@@ -404,6 +404,13 @@ static int usb_wakeup_enabled;
404 404
405static int pda_power_suspend(struct platform_device *pdev, pm_message_t state) 405static int pda_power_suspend(struct platform_device *pdev, pm_message_t state)
406{ 406{
407 if (pdata->suspend) {
408 int ret = pdata->suspend(state);
409
410 if (ret)
411 return ret;
412 }
413
407 if (device_may_wakeup(&pdev->dev)) { 414 if (device_may_wakeup(&pdev->dev)) {
408 if (ac_irq) 415 if (ac_irq)
409 ac_wakeup_enabled = !enable_irq_wake(ac_irq->start); 416 ac_wakeup_enabled = !enable_irq_wake(ac_irq->start);
@@ -423,6 +430,9 @@ static int pda_power_resume(struct platform_device *pdev)
423 disable_irq_wake(ac_irq->start); 430 disable_irq_wake(ac_irq->start);
424 } 431 }
425 432
433 if (pdata->resume)
434 return pdata->resume();
435
426 return 0; 436 return 0;
427} 437}
428#else 438#else
diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
index f38ba482be75..018de2b26998 100644
--- a/drivers/power/power_supply.h
+++ b/drivers/power/power_supply.h
@@ -12,15 +12,12 @@
12 12
13#ifdef CONFIG_SYSFS 13#ifdef CONFIG_SYSFS
14 14
15extern int power_supply_create_attrs(struct power_supply *psy); 15extern void power_supply_init_attrs(struct device_type *dev_type);
16extern void power_supply_remove_attrs(struct power_supply *psy);
17extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env); 16extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
18 17
19#else 18#else
20 19
21static inline int power_supply_create_attrs(struct power_supply *psy) 20static inline void power_supply_init_attrs(struct device_type *dev_type) {}
22{ return 0; }
23static inline void power_supply_remove_attrs(struct power_supply *psy) {}
24#define power_supply_uevent NULL 21#define power_supply_uevent NULL
25 22
26#endif /* CONFIG_SYSFS */ 23#endif /* CONFIG_SYSFS */
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index cce75b40b435..91606bb55318 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/slab.h>
16#include <linux/device.h> 17#include <linux/device.h>
17#include <linux/err.h> 18#include <linux/err.h>
18#include <linux/power_supply.h> 19#include <linux/power_supply.h>
@@ -22,6 +23,8 @@
22struct class *power_supply_class; 23struct class *power_supply_class;
23EXPORT_SYMBOL_GPL(power_supply_class); 24EXPORT_SYMBOL_GPL(power_supply_class);
24 25
26static struct device_type power_supply_dev_type;
27
25static int __power_supply_changed_work(struct device *dev, void *data) 28static int __power_supply_changed_work(struct device *dev, void *data)
26{ 29{
27 struct power_supply *psy = (struct power_supply *)data; 30 struct power_supply *psy = (struct power_supply *)data;
@@ -144,22 +147,39 @@ struct power_supply *power_supply_get_by_name(char *name)
144} 147}
145EXPORT_SYMBOL_GPL(power_supply_get_by_name); 148EXPORT_SYMBOL_GPL(power_supply_get_by_name);
146 149
150static void power_supply_dev_release(struct device *dev)
151{
152 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
153 kfree(dev);
154}
155
147int power_supply_register(struct device *parent, struct power_supply *psy) 156int power_supply_register(struct device *parent, struct power_supply *psy)
148{ 157{
149 int rc = 0; 158 struct device *dev;
159 int rc;
150 160
151 psy->dev = device_create(power_supply_class, parent, 0, psy, 161 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
152 "%s", psy->name); 162 if (!dev)
153 if (IS_ERR(psy->dev)) { 163 return -ENOMEM;
154 rc = PTR_ERR(psy->dev);
155 goto dev_create_failed;
156 }
157 164
158 INIT_WORK(&psy->changed_work, power_supply_changed_work); 165 device_initialize(dev);
159 166
160 rc = power_supply_create_attrs(psy); 167 dev->class = power_supply_class;
168 dev->type = &power_supply_dev_type;
169 dev->parent = parent;
170 dev->release = power_supply_dev_release;
171 dev_set_drvdata(dev, psy);
172 psy->dev = dev;
173
174 rc = kobject_set_name(&dev->kobj, "%s", psy->name);
175 if (rc)
176 goto kobject_set_name_failed;
177
178 rc = device_add(dev);
161 if (rc) 179 if (rc)
162 goto create_attrs_failed; 180 goto device_add_failed;
181
182 INIT_WORK(&psy->changed_work, power_supply_changed_work);
163 183
164 rc = power_supply_create_triggers(psy); 184 rc = power_supply_create_triggers(psy);
165 if (rc) 185 if (rc)
@@ -170,10 +190,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
170 goto success; 190 goto success;
171 191
172create_triggers_failed: 192create_triggers_failed:
173 power_supply_remove_attrs(psy);
174create_attrs_failed:
175 device_unregister(psy->dev); 193 device_unregister(psy->dev);
176dev_create_failed: 194kobject_set_name_failed:
195device_add_failed:
196 kfree(dev);
177success: 197success:
178 return rc; 198 return rc;
179} 199}
@@ -183,7 +203,6 @@ void power_supply_unregister(struct power_supply *psy)
183{ 203{
184 flush_scheduled_work(); 204 flush_scheduled_work();
185 power_supply_remove_triggers(psy); 205 power_supply_remove_triggers(psy);
186 power_supply_remove_attrs(psy);
187 device_unregister(psy->dev); 206 device_unregister(psy->dev);
188} 207}
189EXPORT_SYMBOL_GPL(power_supply_unregister); 208EXPORT_SYMBOL_GPL(power_supply_unregister);
@@ -196,6 +215,7 @@ static int __init power_supply_class_init(void)
196 return PTR_ERR(power_supply_class); 215 return PTR_ERR(power_supply_class);
197 216
198 power_supply_class->dev_uevent = power_supply_uevent; 217 power_supply_class->dev_uevent = power_supply_uevent;
218 power_supply_init_attrs(&power_supply_dev_type);
199 219
200 return 0; 220 return 0;
201} 221}
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 5b6e352ac7c1..9d30eeb8c810 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -31,9 +31,9 @@
31 31
32#define POWER_SUPPLY_ATTR(_name) \ 32#define POWER_SUPPLY_ATTR(_name) \
33{ \ 33{ \
34 .attr = { .name = #_name, .mode = 0444 }, \ 34 .attr = { .name = #_name }, \
35 .show = power_supply_show_property, \ 35 .show = power_supply_show_property, \
36 .store = NULL, \ 36 .store = power_supply_store_property, \
37} 37}
38 38
39static struct device_attribute power_supply_attrs[]; 39static struct device_attribute power_supply_attrs[];
@@ -41,6 +41,9 @@ static struct device_attribute power_supply_attrs[];
41static ssize_t power_supply_show_property(struct device *dev, 41static ssize_t power_supply_show_property(struct device *dev,
42 struct device_attribute *attr, 42 struct device_attribute *attr,
43 char *buf) { 43 char *buf) {
44 static char *type_text[] = {
45 "Battery", "UPS", "Mains", "USB"
46 };
44 static char *status_text[] = { 47 static char *status_text[] = {
45 "Unknown", "Charging", "Discharging", "Not charging", "Full" 48 "Unknown", "Charging", "Discharging", "Not charging", "Full"
46 }; 49 };
@@ -58,12 +61,15 @@ static ssize_t power_supply_show_property(struct device *dev,
58 static char *capacity_level_text[] = { 61 static char *capacity_level_text[] = {
59 "Unknown", "Critical", "Low", "Normal", "High", "Full" 62 "Unknown", "Critical", "Low", "Normal", "High", "Full"
60 }; 63 };
61 ssize_t ret; 64 ssize_t ret = 0;
62 struct power_supply *psy = dev_get_drvdata(dev); 65 struct power_supply *psy = dev_get_drvdata(dev);
63 const ptrdiff_t off = attr - power_supply_attrs; 66 const ptrdiff_t off = attr - power_supply_attrs;
64 union power_supply_propval value; 67 union power_supply_propval value;
65 68
66 ret = psy->get_property(psy, off, &value); 69 if (off == POWER_SUPPLY_PROP_TYPE)
70 value.intval = psy->type;
71 else
72 ret = psy->get_property(psy, off, &value);
67 73
68 if (ret < 0) { 74 if (ret < 0) {
69 if (ret == -ENODATA) 75 if (ret == -ENODATA)
@@ -85,12 +91,37 @@ static ssize_t power_supply_show_property(struct device *dev,
85 return sprintf(buf, "%s\n", technology_text[value.intval]); 91 return sprintf(buf, "%s\n", technology_text[value.intval]);
86 else if (off == POWER_SUPPLY_PROP_CAPACITY_LEVEL) 92 else if (off == POWER_SUPPLY_PROP_CAPACITY_LEVEL)
87 return sprintf(buf, "%s\n", capacity_level_text[value.intval]); 93 return sprintf(buf, "%s\n", capacity_level_text[value.intval]);
94 else if (off == POWER_SUPPLY_PROP_TYPE)
95 return sprintf(buf, "%s\n", type_text[value.intval]);
88 else if (off >= POWER_SUPPLY_PROP_MODEL_NAME) 96 else if (off >= POWER_SUPPLY_PROP_MODEL_NAME)
89 return sprintf(buf, "%s\n", value.strval); 97 return sprintf(buf, "%s\n", value.strval);
90 98
91 return sprintf(buf, "%d\n", value.intval); 99 return sprintf(buf, "%d\n", value.intval);
92} 100}
93 101
102static ssize_t power_supply_store_property(struct device *dev,
103 struct device_attribute *attr,
104 const char *buf, size_t count) {
105 ssize_t ret;
106 struct power_supply *psy = dev_get_drvdata(dev);
107 const ptrdiff_t off = attr - power_supply_attrs;
108 union power_supply_propval value;
109 long long_val;
110
111 /* TODO: support other types than int */
112 ret = strict_strtol(buf, 10, &long_val);
113 if (ret < 0)
114 return ret;
115
116 value.intval = long_val;
117
118 ret = psy->set_property(psy, off, &value);
119 if (ret < 0)
120 return ret;
121
122 return count;
123}
124
94/* Must be in the same order as POWER_SUPPLY_PROP_* */ 125/* Must be in the same order as POWER_SUPPLY_PROP_* */
95static struct device_attribute power_supply_attrs[] = { 126static struct device_attribute power_supply_attrs[] = {
96 /* Properties of type `int' */ 127 /* Properties of type `int' */
@@ -132,67 +163,61 @@ static struct device_attribute power_supply_attrs[] = {
132 POWER_SUPPLY_ATTR(time_to_empty_avg), 163 POWER_SUPPLY_ATTR(time_to_empty_avg),
133 POWER_SUPPLY_ATTR(time_to_full_now), 164 POWER_SUPPLY_ATTR(time_to_full_now),
134 POWER_SUPPLY_ATTR(time_to_full_avg), 165 POWER_SUPPLY_ATTR(time_to_full_avg),
166 POWER_SUPPLY_ATTR(type),
135 /* Properties of type `const char *' */ 167 /* Properties of type `const char *' */
136 POWER_SUPPLY_ATTR(model_name), 168 POWER_SUPPLY_ATTR(model_name),
137 POWER_SUPPLY_ATTR(manufacturer), 169 POWER_SUPPLY_ATTR(manufacturer),
138 POWER_SUPPLY_ATTR(serial_number), 170 POWER_SUPPLY_ATTR(serial_number),
139}; 171};
140 172
141static ssize_t power_supply_show_static_attrs(struct device *dev, 173static struct attribute *
142 struct device_attribute *attr, 174__power_supply_attrs[ARRAY_SIZE(power_supply_attrs) + 1];
143 char *buf) { 175
144 static char *type_text[] = { "Battery", "UPS", "Mains", "USB" }; 176static mode_t power_supply_attr_is_visible(struct kobject *kobj,
177 struct attribute *attr,
178 int attrno)
179{
180 struct device *dev = container_of(kobj, struct device, kobj);
145 struct power_supply *psy = dev_get_drvdata(dev); 181 struct power_supply *psy = dev_get_drvdata(dev);
182 mode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
183 int i;
146 184
147 return sprintf(buf, "%s\n", type_text[psy->type]); 185 if (attrno == POWER_SUPPLY_PROP_TYPE)
148} 186 return mode;
149 187
150static struct device_attribute power_supply_static_attrs[] = { 188 for (i = 0; i < psy->num_properties; i++) {
151 __ATTR(type, 0444, power_supply_show_static_attrs, NULL), 189 int property = psy->properties[i];
152};
153 190
154int power_supply_create_attrs(struct power_supply *psy) 191 if (property == attrno) {
155{ 192 if (psy->property_is_writeable &&
156 int rc = 0; 193 psy->property_is_writeable(psy, property) > 0)
157 int i, j; 194 mode |= S_IWUSR;
158
159 for (i = 0; i < ARRAY_SIZE(power_supply_static_attrs); i++) {
160 rc = device_create_file(psy->dev,
161 &power_supply_static_attrs[i]);
162 if (rc)
163 goto statics_failed;
164 }
165 195
166 for (j = 0; j < psy->num_properties; j++) { 196 return mode;
167 rc = device_create_file(psy->dev, 197 }
168 &power_supply_attrs[psy->properties[j]]);
169 if (rc)
170 goto dynamics_failed;
171 } 198 }
172 199
173 goto succeed; 200 return 0;
174
175dynamics_failed:
176 while (j--)
177 device_remove_file(psy->dev,
178 &power_supply_attrs[psy->properties[j]]);
179statics_failed:
180 while (i--)
181 device_remove_file(psy->dev, &power_supply_static_attrs[i]);
182succeed:
183 return rc;
184} 201}
185 202
186void power_supply_remove_attrs(struct power_supply *psy) 203static struct attribute_group power_supply_attr_group = {
204 .attrs = __power_supply_attrs,
205 .is_visible = power_supply_attr_is_visible,
206};
207
208static const struct attribute_group *power_supply_attr_groups[] = {
209 &power_supply_attr_group,
210 NULL,
211};
212
213void power_supply_init_attrs(struct device_type *dev_type)
187{ 214{
188 int i; 215 int i;
189 216
190 for (i = 0; i < ARRAY_SIZE(power_supply_static_attrs); i++) 217 dev_type->groups = power_supply_attr_groups;
191 device_remove_file(psy->dev, &power_supply_static_attrs[i]);
192 218
193 for (i = 0; i < psy->num_properties; i++) 219 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
194 device_remove_file(psy->dev, 220 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
195 &power_supply_attrs[psy->properties[i]]);
196} 221}
197 222
198static char *kstruprdup(const char *str, gfp_t gfp) 223static char *kstruprdup(const char *str, gfp_t gfp)
@@ -236,36 +261,6 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
236 if (!prop_buf) 261 if (!prop_buf)
237 return -ENOMEM; 262 return -ENOMEM;
238 263
239 for (j = 0; j < ARRAY_SIZE(power_supply_static_attrs); j++) {
240 struct device_attribute *attr;
241 char *line;
242
243 attr = &power_supply_static_attrs[j];
244
245 ret = power_supply_show_static_attrs(dev, attr, prop_buf);
246 if (ret < 0)
247 goto out;
248
249 line = strchr(prop_buf, '\n');
250 if (line)
251 *line = 0;
252
253 attrname = kstruprdup(attr->attr.name, GFP_KERNEL);
254 if (!attrname) {
255 ret = -ENOMEM;
256 goto out;
257 }
258
259 dev_dbg(dev, "Static prop %s=%s\n", attrname, prop_buf);
260
261 ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf);
262 kfree(attrname);
263 if (ret)
264 goto out;
265 }
266
267 dev_dbg(dev, "%zd dynamic props\n", psy->num_properties);
268
269 for (j = 0; j < psy->num_properties; j++) { 264 for (j = 0; j < psy->num_properties; j++) {
270 struct device_attribute *attr; 265 struct device_attribute *attr;
271 char *line; 266 char *line;
diff --git a/drivers/power/test_power.c b/drivers/power/test_power.c
new file mode 100644
index 000000000000..0cd9f67d33e5
--- /dev/null
+++ b/drivers/power/test_power.c
@@ -0,0 +1,163 @@
1/*
2 * Power supply driver for testing.
3 *
4 * Copyright 2010 Anton Vorontsov <cbouatmailru@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/power_supply.h>
14#include <linux/errno.h>
15#include <linux/delay.h>
16#include <linux/vermagic.h>
17
18static int test_power_ac_online = 1;
19static int test_power_battery_status = POWER_SUPPLY_STATUS_CHARGING;
20
21static int test_power_get_ac_property(struct power_supply *psy,
22 enum power_supply_property psp,
23 union power_supply_propval *val)
24{
25 switch (psp) {
26 case POWER_SUPPLY_PROP_ONLINE:
27 val->intval = test_power_ac_online;
28 break;
29 default:
30 return -EINVAL;
31 }
32 return 0;
33}
34
35static int test_power_get_battery_property(struct power_supply *psy,
36 enum power_supply_property psp,
37 union power_supply_propval *val)
38{
39 switch (psp) {
40 case POWER_SUPPLY_PROP_MODEL_NAME:
41 val->strval = "Test battery";
42 break;
43 case POWER_SUPPLY_PROP_MANUFACTURER:
44 val->strval = "Linux";
45 break;
46 case POWER_SUPPLY_PROP_SERIAL_NUMBER:
47 val->strval = UTS_RELEASE;
48 break;
49 case POWER_SUPPLY_PROP_STATUS:
50 val->intval = test_power_battery_status;
51 break;
52 case POWER_SUPPLY_PROP_CHARGE_TYPE:
53 val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
54 break;
55 case POWER_SUPPLY_PROP_HEALTH:
56 val->intval = POWER_SUPPLY_HEALTH_GOOD;
57 break;
58 case POWER_SUPPLY_PROP_TECHNOLOGY:
59 val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
60 break;
61 case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
62 val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
63 break;
64 case POWER_SUPPLY_PROP_CAPACITY:
65 val->intval = 50;
66 break;
67 case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
68 case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
69 val->intval = 3600;
70 break;
71 default:
72 pr_info("%s: some properties deliberately report errors.\n",
73 __func__);
74 return -EINVAL;
75 }
76 return 0;
77}
78
79static enum power_supply_property test_power_ac_props[] = {
80 POWER_SUPPLY_PROP_ONLINE,
81};
82
83static enum power_supply_property test_power_battery_props[] = {
84 POWER_SUPPLY_PROP_STATUS,
85 POWER_SUPPLY_PROP_CHARGE_TYPE,
86 POWER_SUPPLY_PROP_HEALTH,
87 POWER_SUPPLY_PROP_TECHNOLOGY,
88 POWER_SUPPLY_PROP_CHARGE_FULL,
89 POWER_SUPPLY_PROP_CHARGE_EMPTY,
90 POWER_SUPPLY_PROP_CAPACITY,
91 POWER_SUPPLY_PROP_CAPACITY_LEVEL,
92 POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
93 POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
94 POWER_SUPPLY_PROP_MODEL_NAME,
95 POWER_SUPPLY_PROP_MANUFACTURER,
96 POWER_SUPPLY_PROP_SERIAL_NUMBER,
97};
98
99static char *test_power_ac_supplied_to[] = {
100 "test_battery",
101};
102
103static struct power_supply test_power_supplies[] = {
104 {
105 .name = "test_ac",
106 .type = POWER_SUPPLY_TYPE_MAINS,
107 .supplied_to = test_power_ac_supplied_to,
108 .num_supplicants = ARRAY_SIZE(test_power_ac_supplied_to),
109 .properties = test_power_ac_props,
110 .num_properties = ARRAY_SIZE(test_power_ac_props),
111 .get_property = test_power_get_ac_property,
112 }, {
113 .name = "test_battery",
114 .type = POWER_SUPPLY_TYPE_BATTERY,
115 .properties = test_power_battery_props,
116 .num_properties = ARRAY_SIZE(test_power_battery_props),
117 .get_property = test_power_get_battery_property,
118 },
119};
120
121static int __init test_power_init(void)
122{
123 int i;
124 int ret;
125
126 for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++) {
127 ret = power_supply_register(NULL, &test_power_supplies[i]);
128 if (ret) {
129 pr_err("%s: failed to register %s\n", __func__,
130 test_power_supplies[i].name);
131 goto failed;
132 }
133 }
134
135 return 0;
136failed:
137 while (--i >= 0)
138 power_supply_unregister(&test_power_supplies[i]);
139 return ret;
140}
141module_init(test_power_init);
142
143static void __exit test_power_exit(void)
144{
145 int i;
146
147 /* Let's see how we handle changes... */
148 test_power_ac_online = 0;
149 test_power_battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
150 for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++)
151 power_supply_changed(&test_power_supplies[i]);
152 pr_info("%s: 'changed' event sent, sleeping for 10 seconds...\n",
153 __func__);
154 ssleep(10);
155
156 for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++)
157 power_supply_unregister(&test_power_supplies[i]);
158}
159module_exit(test_power_exit);
160
161MODULE_DESCRIPTION("Power supply driver for testing");
162MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
163MODULE_LICENSE("GPL");
diff --git a/drivers/power/tosa_battery.c b/drivers/power/tosa_battery.c
index 2eab35aab311..ee04936b2db5 100644
--- a/drivers/power/tosa_battery.c
+++ b/drivers/power/tosa_battery.c
@@ -61,7 +61,7 @@ static unsigned long tosa_read_bat(struct tosa_bat *bat)
61 mutex_lock(&bat_lock); 61 mutex_lock(&bat_lock);
62 gpio_set_value(bat->gpio_bat, 1); 62 gpio_set_value(bat->gpio_bat, 1);
63 msleep(5); 63 msleep(5);
64 value = wm97xx_read_aux_adc(bat->psy.dev->parent->driver_data, 64 value = wm97xx_read_aux_adc(dev_get_drvdata(bat->psy.dev->parent),
65 bat->adc_bat); 65 bat->adc_bat);
66 gpio_set_value(bat->gpio_bat, 0); 66 gpio_set_value(bat->gpio_bat, 0);
67 mutex_unlock(&bat_lock); 67 mutex_unlock(&bat_lock);
@@ -81,7 +81,7 @@ static unsigned long tosa_read_temp(struct tosa_bat *bat)
81 mutex_lock(&bat_lock); 81 mutex_lock(&bat_lock);
82 gpio_set_value(bat->gpio_temp, 1); 82 gpio_set_value(bat->gpio_temp, 1);
83 msleep(5); 83 msleep(5);
84 value = wm97xx_read_aux_adc(bat->psy.dev->parent->driver_data, 84 value = wm97xx_read_aux_adc(dev_get_drvdata(bat->psy.dev->parent),
85 bat->adc_temp); 85 bat->adc_temp);
86 gpio_set_value(bat->gpio_temp, 0); 86 gpio_set_value(bat->gpio_temp, 0);
87 mutex_unlock(&bat_lock); 87 mutex_unlock(&bat_lock);
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index 875c4d0f776b..fbcc36dae470 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -537,9 +537,9 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
537 goto err_battery; 537 goto err_battery;
538 538
539 irq = platform_get_irq_byname(pdev, "SYSLO"); 539 irq = platform_get_irq_byname(pdev, "SYSLO");
540 ret = wm831x_request_irq(wm831x, irq, wm831x_syslo_irq, 540 ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq,
541 IRQF_TRIGGER_RISING, "SYSLO", 541 IRQF_TRIGGER_RISING, "System power low",
542 power); 542 power);
543 if (ret != 0) { 543 if (ret != 0) {
544 dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n", 544 dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n",
545 irq, ret); 545 irq, ret);
@@ -547,9 +547,9 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
547 } 547 }
548 548
549 irq = platform_get_irq_byname(pdev, "PWR SRC"); 549 irq = platform_get_irq_byname(pdev, "PWR SRC");
550 ret = wm831x_request_irq(wm831x, irq, wm831x_pwr_src_irq, 550 ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq,
551 IRQF_TRIGGER_RISING, "Power source", 551 IRQF_TRIGGER_RISING, "Power source",
552 power); 552 power);
553 if (ret != 0) { 553 if (ret != 0) {
554 dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n", 554 dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n",
555 irq, ret); 555 irq, ret);
@@ -558,10 +558,10 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
558 558
559 for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) { 559 for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) {
560 irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]); 560 irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
561 ret = wm831x_request_irq(wm831x, irq, wm831x_bat_irq, 561 ret = request_threaded_irq(irq, NULL, wm831x_bat_irq,
562 IRQF_TRIGGER_RISING, 562 IRQF_TRIGGER_RISING,
563 wm831x_bat_irqs[i], 563 wm831x_bat_irqs[i],
564 power); 564 power);
565 if (ret != 0) { 565 if (ret != 0) {
566 dev_err(&pdev->dev, 566 dev_err(&pdev->dev,
567 "Failed to request %s IRQ %d: %d\n", 567 "Failed to request %s IRQ %d: %d\n",
@@ -575,13 +575,13 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
575err_bat_irq: 575err_bat_irq:
576 for (; i >= 0; i--) { 576 for (; i >= 0; i--) {
577 irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]); 577 irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
578 wm831x_free_irq(wm831x, irq, power); 578 free_irq(irq, power);
579 } 579 }
580 irq = platform_get_irq_byname(pdev, "PWR SRC"); 580 irq = platform_get_irq_byname(pdev, "PWR SRC");
581 wm831x_free_irq(wm831x, irq, power); 581 free_irq(irq, power);
582err_syslo: 582err_syslo:
583 irq = platform_get_irq_byname(pdev, "SYSLO"); 583 irq = platform_get_irq_byname(pdev, "SYSLO");
584 wm831x_free_irq(wm831x, irq, power); 584 free_irq(irq, power);
585err_usb: 585err_usb:
586 power_supply_unregister(usb); 586 power_supply_unregister(usb);
587err_battery: 587err_battery:
@@ -596,19 +596,18 @@ err_kmalloc:
596static __devexit int wm831x_power_remove(struct platform_device *pdev) 596static __devexit int wm831x_power_remove(struct platform_device *pdev)
597{ 597{
598 struct wm831x_power *wm831x_power = platform_get_drvdata(pdev); 598 struct wm831x_power *wm831x_power = platform_get_drvdata(pdev);
599 struct wm831x *wm831x = wm831x_power->wm831x;
600 int irq, i; 599 int irq, i;
601 600
602 for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) { 601 for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) {
603 irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]); 602 irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
604 wm831x_free_irq(wm831x, irq, wm831x_power); 603 free_irq(irq, wm831x_power);
605 } 604 }
606 605
607 irq = platform_get_irq_byname(pdev, "PWR SRC"); 606 irq = platform_get_irq_byname(pdev, "PWR SRC");
608 wm831x_free_irq(wm831x, irq, wm831x_power); 607 free_irq(irq, wm831x_power);
609 608
610 irq = platform_get_irq_byname(pdev, "SYSLO"); 609 irq = platform_get_irq_byname(pdev, "SYSLO");
611 wm831x_free_irq(wm831x, irq, wm831x_power); 610 free_irq(irq, wm831x_power);
612 611
613 power_supply_unregister(&wm831x_power->battery); 612 power_supply_unregister(&wm831x_power->battery);
614 power_supply_unregister(&wm831x_power->wall); 613 power_supply_unregister(&wm831x_power->wall);
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index 94c70650aafc..4e8afce0c818 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -308,6 +308,9 @@ static void __exit wm97xx_bat_exit(void)
308 platform_driver_unregister(&wm97xx_bat_driver); 308 platform_driver_unregister(&wm97xx_bat_driver);
309} 309}
310 310
311/* The interface is deprecated, as well as linux/wm97xx_batt.h */
312void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data);
313
311void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data) 314void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data)
312{ 315{
313 gpdata = data; 316 gpdata = data;
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c
new file mode 100644
index 000000000000..9cca465436e3
--- /dev/null
+++ b/drivers/power/z2_battery.c
@@ -0,0 +1,328 @@
1/*
2 * Battery measurement code for Zipit Z2
3 *
4 * Copyright (C) 2009 Peter Edwards <sweetlilmre@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/power_supply.h>
17#include <linux/i2c.h>
18#include <linux/spinlock.h>
19#include <linux/interrupt.h>
20#include <linux/gpio.h>
21#include <linux/interrupt.h>
22#include <linux/irq.h>
23#include <asm/irq.h>
24#include <asm/mach/irq.h>
25#include <linux/z2_battery.h>
26
27#define Z2_DEFAULT_NAME "Z2"
28
29struct z2_charger {
30 struct z2_battery_info *info;
31 int bat_status;
32 struct i2c_client *client;
33 struct power_supply batt_ps;
34 struct mutex work_lock;
35 struct work_struct bat_work;
36};
37
38static unsigned long z2_read_bat(struct z2_charger *charger)
39{
40 int data;
41 data = i2c_smbus_read_byte_data(charger->client,
42 charger->info->batt_I2C_reg);
43 if (data < 0)
44 return 0;
45
46 return data * charger->info->batt_mult / charger->info->batt_div;
47}
48
49static int z2_batt_get_property(struct power_supply *batt_ps,
50 enum power_supply_property psp,
51 union power_supply_propval *val)
52{
53 struct z2_charger *charger = container_of(batt_ps, struct z2_charger,
54 batt_ps);
55 struct z2_battery_info *info = charger->info;
56
57 switch (psp) {
58 case POWER_SUPPLY_PROP_STATUS:
59 val->intval = charger->bat_status;
60 break;
61 case POWER_SUPPLY_PROP_TECHNOLOGY:
62 val->intval = info->batt_tech;
63 break;
64 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
65 if (info->batt_I2C_reg >= 0)
66 val->intval = z2_read_bat(charger);
67 else
68 return -EINVAL;
69 break;
70 case POWER_SUPPLY_PROP_VOLTAGE_MAX:
71 if (info->max_voltage >= 0)
72 val->intval = info->max_voltage;
73 else
74 return -EINVAL;
75 break;
76 case POWER_SUPPLY_PROP_VOLTAGE_MIN:
77 if (info->min_voltage >= 0)
78 val->intval = info->min_voltage;
79 else
80 return -EINVAL;
81 break;
82 case POWER_SUPPLY_PROP_PRESENT:
83 val->intval = 1;
84 break;
85 default:
86 return -EINVAL;
87 }
88
89 return 0;
90}
91
92static void z2_batt_ext_power_changed(struct power_supply *batt_ps)
93{
94 struct z2_charger *charger = container_of(batt_ps, struct z2_charger,
95 batt_ps);
96 schedule_work(&charger->bat_work);
97}
98
99static void z2_batt_update(struct z2_charger *charger)
100{
101 int old_status = charger->bat_status;
102 struct z2_battery_info *info;
103
104 info = charger->info;
105
106 mutex_lock(&charger->work_lock);
107
108 charger->bat_status = (info->charge_gpio >= 0) ?
109 (gpio_get_value(info->charge_gpio) ?
110 POWER_SUPPLY_STATUS_CHARGING :
111 POWER_SUPPLY_STATUS_DISCHARGING) :
112 POWER_SUPPLY_STATUS_UNKNOWN;
113
114 if (old_status != charger->bat_status) {
115 pr_debug("%s: %i -> %i\n", charger->batt_ps.name, old_status,
116 charger->bat_status);
117 power_supply_changed(&charger->batt_ps);
118 }
119
120 mutex_unlock(&charger->work_lock);
121}
122
123static void z2_batt_work(struct work_struct *work)
124{
125 struct z2_charger *charger;
126 charger = container_of(work, struct z2_charger, bat_work);
127 z2_batt_update(charger);
128}
129
130static irqreturn_t z2_charge_switch_irq(int irq, void *devid)
131{
132 struct z2_charger *charger = devid;
133 schedule_work(&charger->bat_work);
134 return IRQ_HANDLED;
135}
136
137static int z2_batt_ps_init(struct z2_charger *charger, int props)
138{
139 int i = 0;
140 enum power_supply_property *prop;
141 struct z2_battery_info *info = charger->info;
142
143 if (info->batt_tech >= 0)
144 props++; /* POWER_SUPPLY_PROP_TECHNOLOGY */
145 if (info->batt_I2C_reg >= 0)
146 props++; /* POWER_SUPPLY_PROP_VOLTAGE_NOW */
147 if (info->max_voltage >= 0)
148 props++; /* POWER_SUPPLY_PROP_VOLTAGE_MAX */
149 if (info->min_voltage >= 0)
150 props++; /* POWER_SUPPLY_PROP_VOLTAGE_MIN */
151
152 prop = kzalloc(props * sizeof(*prop), GFP_KERNEL);
153 if (!prop)
154 return -ENOMEM;
155
156 prop[i++] = POWER_SUPPLY_PROP_PRESENT;
157 if (info->charge_gpio >= 0)
158 prop[i++] = POWER_SUPPLY_PROP_STATUS;
159 if (info->batt_tech >= 0)
160 prop[i++] = POWER_SUPPLY_PROP_TECHNOLOGY;
161 if (info->batt_I2C_reg >= 0)
162 prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_NOW;
163 if (info->max_voltage >= 0)
164 prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_MAX;
165 if (info->min_voltage >= 0)
166 prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_MIN;
167
168 if (!info->batt_name) {
169 dev_info(&charger->client->dev,
170 "Please consider setting proper battery "
171 "name in platform definition file, falling "
172 "back to name \" Z2_DEFAULT_NAME \"\n");
173 charger->batt_ps.name = Z2_DEFAULT_NAME;
174 } else
175 charger->batt_ps.name = info->batt_name;
176
177 charger->batt_ps.properties = prop;
178 charger->batt_ps.num_properties = props;
179 charger->batt_ps.type = POWER_SUPPLY_TYPE_BATTERY;
180 charger->batt_ps.get_property = z2_batt_get_property;
181 charger->batt_ps.external_power_changed = z2_batt_ext_power_changed;
182 charger->batt_ps.use_for_apm = 1;
183
184 return 0;
185}
186
187static int __devinit z2_batt_probe(struct i2c_client *client,
188 const struct i2c_device_id *id)
189{
190 int ret = 0;
191 int props = 1; /* POWER_SUPPLY_PROP_PRESENT */
192 struct z2_charger *charger;
193 struct z2_battery_info *info = client->dev.platform_data;
194
195 if (info == NULL) {
196 dev_err(&client->dev,
197 "Please set platform device platform_data"
198 " to a valid z2_battery_info pointer!\n");
199 return -EINVAL;
200 }
201
202 charger = kzalloc(sizeof(*charger), GFP_KERNEL);
203 if (charger == NULL)
204 return -ENOMEM;
205
206 charger->bat_status = POWER_SUPPLY_STATUS_UNKNOWN;
207 charger->info = info;
208 charger->client = client;
209 i2c_set_clientdata(client, charger);
210
211 mutex_init(&charger->work_lock);
212
213 if (info->charge_gpio >= 0 && gpio_is_valid(info->charge_gpio)) {
214 ret = gpio_request(info->charge_gpio, "BATT CHRG");
215 if (ret)
216 goto err;
217
218 ret = gpio_direction_input(info->charge_gpio);
219 if (ret)
220 goto err2;
221
222 set_irq_type(gpio_to_irq(info->charge_gpio),
223 IRQ_TYPE_EDGE_BOTH);
224 ret = request_irq(gpio_to_irq(info->charge_gpio),
225 z2_charge_switch_irq, IRQF_DISABLED,
226 "AC Detect", charger);
227 if (ret)
228 goto err3;
229 }
230
231 ret = z2_batt_ps_init(charger, props);
232 if (ret)
233 goto err3;
234
235 INIT_WORK(&charger->bat_work, z2_batt_work);
236
237 ret = power_supply_register(&client->dev, &charger->batt_ps);
238 if (ret)
239 goto err4;
240
241 schedule_work(&charger->bat_work);
242
243 return 0;
244
245err4:
246 kfree(charger->batt_ps.properties);
247err3:
248 if (info->charge_gpio >= 0 && gpio_is_valid(info->charge_gpio))
249 free_irq(gpio_to_irq(info->charge_gpio), charger);
250err2:
251 if (info->charge_gpio >= 0 && gpio_is_valid(info->charge_gpio))
252 gpio_free(info->charge_gpio);
253err:
254 kfree(charger);
255 return ret;
256}
257
258static int __devexit z2_batt_remove(struct i2c_client *client)
259{
260 struct z2_charger *charger = i2c_get_clientdata(client);
261 struct z2_battery_info *info = charger->info;
262
263 flush_scheduled_work();
264 power_supply_unregister(&charger->batt_ps);
265
266 kfree(charger->batt_ps.properties);
267 if (info->charge_gpio >= 0 && gpio_is_valid(info->charge_gpio)) {
268 free_irq(gpio_to_irq(info->charge_gpio), charger);
269 gpio_free(info->charge_gpio);
270 }
271
272 kfree(charger);
273
274 return 0;
275}
276
277#ifdef CONFIG_PM
278static int z2_batt_suspend(struct i2c_client *client, pm_message_t state)
279{
280 flush_scheduled_work();
281 return 0;
282}
283
284static int z2_batt_resume(struct i2c_client *client)
285{
286 struct z2_charger *charger = i2c_get_clientdata(client);
287
288 schedule_work(&charger->bat_work);
289 return 0;
290}
291#else
292#define z2_batt_suspend NULL
293#define z2_batt_resume NULL
294#endif
295
296static const struct i2c_device_id z2_batt_id[] = {
297 { "aer915", 0 },
298 { }
299};
300
301static struct i2c_driver z2_batt_driver = {
302 .driver = {
303 .name = "z2-battery",
304 .owner = THIS_MODULE,
305 },
306 .probe = z2_batt_probe,
307 .remove = z2_batt_remove,
308 .suspend = z2_batt_suspend,
309 .resume = z2_batt_resume,
310 .id_table = z2_batt_id,
311};
312
313static int __init z2_batt_init(void)
314{
315 return i2c_add_driver(&z2_batt_driver);
316}
317
318static void __exit z2_batt_exit(void)
319{
320 i2c_del_driver(&z2_batt_driver);
321}
322
323module_init(z2_batt_init);
324module_exit(z2_batt_exit);
325
326MODULE_LICENSE("GPL");
327MODULE_AUTHOR("Peter Edwards <sweetlilmre@gmail.com>");
328MODULE_DESCRIPTION("Zipit Z2 battery driver");
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index c32822ad84a4..070211a5955c 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -8,3 +8,27 @@ config RAPIDIO_DISC_TIMEOUT
8 ---help--- 8 ---help---
9 Amount of time a discovery node waits for a host to complete 9 Amount of time a discovery node waits for a host to complete
10 enumeration before giving up. 10 enumeration before giving up.
11
12config RAPIDIO_ENABLE_RX_TX_PORTS
13 bool "Enable RapidIO Input/Output Ports"
14 depends on RAPIDIO
15 ---help---
16 The RapidIO specification describes a Output port transmit
17 enable and a Input port receive enable. The recommended state
18 for Input ports and Output ports should be disabled. When
19 this switch is set the RapidIO subsystem will enable all
20 ports for Input/Output direction to allow other traffic
21 than Maintenance transfers.
22
23source "drivers/rapidio/switches/Kconfig"
24
25config RAPIDIO_DEBUG
26 bool "RapidIO subsystem debug messages"
27 depends on RAPIDIO
28 help
29 Say Y here if you want the RapidIO subsystem to produce a bunch of
30 debug messages to the system log. Select this if you are having a
31 problem with the RapidIO subsystem and want to see more of what is
32 going on.
33
34 If you are unsure about this, say N here.
diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile
index 7c0e1818de51..b6139fe187bf 100644
--- a/drivers/rapidio/Makefile
+++ b/drivers/rapidio/Makefile
@@ -4,3 +4,7 @@
4obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o 4obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o
5 5
6obj-$(CONFIG_RAPIDIO) += switches/ 6obj-$(CONFIG_RAPIDIO) += switches/
7
8ifeq ($(CONFIG_RAPIDIO_DEBUG),y)
9EXTRA_CFLAGS += -DDEBUG
10endif
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 45415096c294..566432106cc5 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -4,6 +4,14 @@
4 * Copyright 2005 MontaVista Software, Inc. 4 * Copyright 2005 MontaVista Software, Inc.
5 * Matt Porter <mporter@kernel.crashing.org> 5 * Matt Porter <mporter@kernel.crashing.org>
6 * 6 *
7 * Copyright 2009 Integrated Device Technology, Inc.
8 * Alex Bounine <alexandre.bounine@idt.com>
9 * - Added Port-Write/Error Management initialization and handling
10 *
11 * Copyright 2009 Sysgo AG
12 * Thomas Moll <thomas.moll@sysgo.com>
13 * - Added Input- Output- enable functionality, to allow full communication
14 *
7 * This program is free software; you can redistribute it and/or modify it 15 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the 16 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your 17 * Free Software Foundation; either version 2 of the License, or (at your
@@ -31,15 +39,16 @@
31LIST_HEAD(rio_devices); 39LIST_HEAD(rio_devices);
32static LIST_HEAD(rio_switches); 40static LIST_HEAD(rio_switches);
33 41
34#define RIO_ENUM_CMPL_MAGIC 0xdeadbeef
35
36static void rio_enum_timeout(unsigned long); 42static void rio_enum_timeout(unsigned long);
37 43
44static void rio_init_em(struct rio_dev *rdev);
45
38DEFINE_SPINLOCK(rio_global_list_lock); 46DEFINE_SPINLOCK(rio_global_list_lock);
39 47
40static int next_destid = 0; 48static int next_destid = 0;
41static int next_switchid = 0; 49static int next_switchid = 0;
42static int next_net = 0; 50static int next_net = 0;
51static int next_comptag;
43 52
44static struct timer_list rio_enum_timer = 53static struct timer_list rio_enum_timer =
45TIMER_INITIALIZER(rio_enum_timeout, 0, 0); 54TIMER_INITIALIZER(rio_enum_timeout, 0, 0);
@@ -52,12 +61,6 @@ static int rio_mport_phys_table[] = {
52 -1, 61 -1,
53}; 62};
54 63
55static int rio_sport_phys_table[] = {
56 RIO_EFB_PAR_EP_FREE_ID,
57 RIO_EFB_SER_EP_FREE_ID,
58 -1,
59};
60
61/** 64/**
62 * rio_get_device_id - Get the base/extended device id for a device 65 * rio_get_device_id - Get the base/extended device id for a device
63 * @port: RIO master port 66 * @port: RIO master port
@@ -118,12 +121,26 @@ static int rio_clear_locks(struct rio_mport *port)
118 u32 result; 121 u32 result;
119 int ret = 0; 122 int ret = 0;
120 123
121 /* Write component tag CSR magic complete value */ 124 /* Assign component tag to all devices */
122 rio_local_write_config_32(port, RIO_COMPONENT_TAG_CSR, 125 next_comptag = 1;
123 RIO_ENUM_CMPL_MAGIC); 126 rio_local_write_config_32(port, RIO_COMPONENT_TAG_CSR, next_comptag++);
124 list_for_each_entry(rdev, &rio_devices, global_list) 127
125 rio_write_config_32(rdev, RIO_COMPONENT_TAG_CSR, 128 list_for_each_entry(rdev, &rio_devices, global_list) {
126 RIO_ENUM_CMPL_MAGIC); 129 /* Mark device as discovered */
130 rio_read_config_32(rdev,
131 rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
132 &result);
133 rio_write_config_32(rdev,
134 rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
135 result | RIO_PORT_GEN_DISCOVERED);
136
137 rio_write_config_32(rdev, RIO_COMPONENT_TAG_CSR, next_comptag);
138 rdev->comp_tag = next_comptag++;
139 if (next_comptag >= 0x10000) {
140 pr_err("RIO: Component Tag Counter Overflow\n");
141 break;
142 }
143 }
127 144
128 /* Release host device id locks */ 145 /* Release host device id locks */
129 rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR, 146 rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR,
@@ -229,27 +246,37 @@ static int rio_is_switch(struct rio_dev *rdev)
229} 246}
230 247
231/** 248/**
232 * rio_route_set_ops- Sets routing operations for a particular vendor switch 249 * rio_switch_init - Sets switch operations for a particular vendor switch
233 * @rdev: RIO device 250 * @rdev: RIO device
251 * @do_enum: Enumeration/Discovery mode flag
234 * 252 *
235 * Searches the RIO route ops table for known switch types. If the vid 253 * Searches the RIO switch ops table for known switch types. If the vid
236 * and did match a switch table entry, then set the add_entry() and 254 * and did match a switch table entry, then call switch initialization
237 * get_entry() ops to the table entry values. 255 * routine to setup switch-specific routines.
238 */ 256 */
239static void rio_route_set_ops(struct rio_dev *rdev) 257static void rio_switch_init(struct rio_dev *rdev, int do_enum)
240{ 258{
241 struct rio_route_ops *cur = __start_rio_route_ops; 259 struct rio_switch_ops *cur = __start_rio_switch_ops;
242 struct rio_route_ops *end = __end_rio_route_ops; 260 struct rio_switch_ops *end = __end_rio_switch_ops;
243 261
244 while (cur < end) { 262 while (cur < end) {
245 if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) { 263 if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) {
246 pr_debug("RIO: adding routing ops for %s\n", rio_name(rdev)); 264 pr_debug("RIO: calling init routine for %s\n",
247 rdev->rswitch->add_entry = cur->add_hook; 265 rio_name(rdev));
248 rdev->rswitch->get_entry = cur->get_hook; 266 cur->init_hook(rdev, do_enum);
267 break;
249 } 268 }
250 cur++; 269 cur++;
251 } 270 }
252 271
272 if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) {
273 pr_debug("RIO: adding STD routing ops for %s\n",
274 rio_name(rdev));
275 rdev->rswitch->add_entry = rio_std_route_add_entry;
276 rdev->rswitch->get_entry = rio_std_route_get_entry;
277 rdev->rswitch->clr_table = rio_std_route_clr_table;
278 }
279
253 if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry) 280 if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry)
254 printk(KERN_ERR "RIO: missing routing ops for %s\n", 281 printk(KERN_ERR "RIO: missing routing ops for %s\n",
255 rio_name(rdev)); 282 rio_name(rdev));
@@ -281,6 +308,65 @@ static int __devinit rio_add_device(struct rio_dev *rdev)
281} 308}
282 309
283/** 310/**
311 * rio_enable_rx_tx_port - enable input reciever and output transmitter of
312 * given port
313 * @port: Master port associated with the RIO network
314 * @local: local=1 select local port otherwise a far device is reached
315 * @destid: Destination ID of the device to check host bit
316 * @hopcount: Number of hops to reach the target
317 * @port_num: Port (-number on switch) to enable on a far end device
318 *
319 * Returns 0 or 1 from on General Control Command and Status Register
320 * (EXT_PTR+0x3C)
321 */
322inline int rio_enable_rx_tx_port(struct rio_mport *port,
323 int local, u16 destid,
324 u8 hopcount, u8 port_num) {
325#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS
326 u32 regval;
327 u32 ext_ftr_ptr;
328
329 /*
330 * enable rx input tx output port
331 */
332 pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = "
333 "%d, port_num = %d)\n", local, destid, hopcount, port_num);
334
335 ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount);
336
337 if (local) {
338 rio_local_read_config_32(port, ext_ftr_ptr +
339 RIO_PORT_N_CTL_CSR(0),
340 &regval);
341 } else {
342 if (rio_mport_read_config_32(port, destid, hopcount,
343 ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), &regval) < 0)
344 return -EIO;
345 }
346
347 if (regval & RIO_PORT_N_CTL_P_TYP_SER) {
348 /* serial */
349 regval = regval | RIO_PORT_N_CTL_EN_RX_SER
350 | RIO_PORT_N_CTL_EN_TX_SER;
351 } else {
352 /* parallel */
353 regval = regval | RIO_PORT_N_CTL_EN_RX_PAR
354 | RIO_PORT_N_CTL_EN_TX_PAR;
355 }
356
357 if (local) {
358 rio_local_write_config_32(port, ext_ftr_ptr +
359 RIO_PORT_N_CTL_CSR(0), regval);
360 } else {
361 if (rio_mport_write_config_32(port, destid, hopcount,
362 ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0)
363 return -EIO;
364 }
365#endif
366 return 0;
367}
368
369/**
284 * rio_setup_device- Allocates and sets up a RIO device 370 * rio_setup_device- Allocates and sets up a RIO device
285 * @net: RIO network 371 * @net: RIO network
286 * @port: Master port to send transactions 372 * @port: Master port to send transactions
@@ -325,8 +411,14 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
325 rdev->asm_rev = result >> 16; 411 rdev->asm_rev = result >> 16;
326 rio_mport_read_config_32(port, destid, hopcount, RIO_PEF_CAR, 412 rio_mport_read_config_32(port, destid, hopcount, RIO_PEF_CAR,
327 &rdev->pef); 413 &rdev->pef);
328 if (rdev->pef & RIO_PEF_EXT_FEATURES) 414 if (rdev->pef & RIO_PEF_EXT_FEATURES) {
329 rdev->efptr = result & 0xffff; 415 rdev->efptr = result & 0xffff;
416 rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid,
417 hopcount);
418
419 rdev->em_efptr = rio_mport_get_feature(port, 0, destid,
420 hopcount, RIO_EFB_ERR_MGMNT);
421 }
330 422
331 rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR, 423 rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR,
332 &rdev->src_ops); 424 &rdev->src_ops);
@@ -349,12 +441,13 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
349 if (rio_is_switch(rdev)) { 441 if (rio_is_switch(rdev)) {
350 rio_mport_read_config_32(port, destid, hopcount, 442 rio_mport_read_config_32(port, destid, hopcount,
351 RIO_SWP_INFO_CAR, &rdev->swpinfo); 443 RIO_SWP_INFO_CAR, &rdev->swpinfo);
352 rswitch = kmalloc(sizeof(struct rio_switch), GFP_KERNEL); 444 rswitch = kzalloc(sizeof(struct rio_switch), GFP_KERNEL);
353 if (!rswitch) 445 if (!rswitch)
354 goto cleanup; 446 goto cleanup;
355 rswitch->switchid = next_switchid; 447 rswitch->switchid = next_switchid;
356 rswitch->hopcount = hopcount; 448 rswitch->hopcount = hopcount;
357 rswitch->destid = destid; 449 rswitch->destid = destid;
450 rswitch->port_ok = 0;
358 rswitch->route_table = kzalloc(sizeof(u8)* 451 rswitch->route_table = kzalloc(sizeof(u8)*
359 RIO_MAX_ROUTE_ENTRIES(port->sys_size), 452 RIO_MAX_ROUTE_ENTRIES(port->sys_size),
360 GFP_KERNEL); 453 GFP_KERNEL);
@@ -367,13 +460,22 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
367 rdev->rswitch = rswitch; 460 rdev->rswitch = rswitch;
368 dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id, 461 dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id,
369 rdev->rswitch->switchid); 462 rdev->rswitch->switchid);
370 rio_route_set_ops(rdev); 463 rio_switch_init(rdev, do_enum);
464
465 if (do_enum && rdev->rswitch->clr_table)
466 rdev->rswitch->clr_table(port, destid, hopcount,
467 RIO_GLOBAL_TABLE);
371 468
372 list_add_tail(&rswitch->node, &rio_switches); 469 list_add_tail(&rswitch->node, &rio_switches);
373 470
374 } else 471 } else {
472 if (do_enum)
473 /*Enable Input Output Port (transmitter reviever)*/
474 rio_enable_rx_tx_port(port, 0, destid, hopcount, 0);
475
375 dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id, 476 dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id,
376 rdev->destid); 477 rdev->destid);
478 }
377 479
378 rdev->dev.bus = &rio_bus_type; 480 rdev->dev.bus = &rio_bus_type;
379 481
@@ -414,23 +516,29 @@ cleanup:
414 * 516 *
415 * Reads the port error status CSR for a particular switch port to 517 * Reads the port error status CSR for a particular switch port to
416 * determine if the port has an active link. Returns 518 * determine if the port has an active link. Returns
417 * %PORT_N_ERR_STS_PORT_OK if the port is active or %0 if it is 519 * %RIO_PORT_N_ERR_STS_PORT_OK if the port is active or %0 if it is
418 * inactive. 520 * inactive.
419 */ 521 */
420static int 522static int
421rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport) 523rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
422{ 524{
423 u32 result; 525 u32 result = 0;
424 u32 ext_ftr_ptr; 526 u32 ext_ftr_ptr;
425 527
426 int *entry = rio_sport_phys_table; 528 ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount, 0);
427
428 do {
429 if ((ext_ftr_ptr =
430 rio_mport_get_feature(port, 0, destid, hopcount, *entry)))
431 529
530 while (ext_ftr_ptr) {
531 rio_mport_read_config_32(port, destid, hopcount,
532 ext_ftr_ptr, &result);
533 result = RIO_GET_BLOCK_ID(result);
534 if ((result == RIO_EFB_SER_EP_FREE_ID) ||
535 (result == RIO_EFB_SER_EP_FREE_ID_V13P) ||
536 (result == RIO_EFB_SER_EP_FREC_ID))
432 break; 537 break;
433 } while (*++entry >= 0); 538
539 ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount,
540 ext_ftr_ptr);
541 }
434 542
435 if (ext_ftr_ptr) 543 if (ext_ftr_ptr)
436 rio_mport_read_config_32(port, destid, hopcount, 544 rio_mport_read_config_32(port, destid, hopcount,
@@ -438,7 +546,81 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
438 RIO_PORT_N_ERR_STS_CSR(sport), 546 RIO_PORT_N_ERR_STS_CSR(sport),
439 &result); 547 &result);
440 548
441 return (result & PORT_N_ERR_STS_PORT_OK); 549 return result & RIO_PORT_N_ERR_STS_PORT_OK;
550}
551
552/**
553 * rio_lock_device - Acquires host device lock for specified device
554 * @port: Master port to send transaction
555 * @destid: Destination ID for device/switch
556 * @hopcount: Hopcount to reach switch
557 * @wait_ms: Max wait time in msec (0 = no timeout)
558 *
559 * Attepts to acquire host device lock for specified device
560 * Returns 0 if device lock acquired or EINVAL if timeout expires.
561 */
562static int
563rio_lock_device(struct rio_mport *port, u16 destid, u8 hopcount, int wait_ms)
564{
565 u32 result;
566 int tcnt = 0;
567
568 /* Attempt to acquire device lock */
569 rio_mport_write_config_32(port, destid, hopcount,
570 RIO_HOST_DID_LOCK_CSR, port->host_deviceid);
571 rio_mport_read_config_32(port, destid, hopcount,
572 RIO_HOST_DID_LOCK_CSR, &result);
573
574 while (result != port->host_deviceid) {
575 if (wait_ms != 0 && tcnt == wait_ms) {
576 pr_debug("RIO: timeout when locking device %x:%x\n",
577 destid, hopcount);
578 return -EINVAL;
579 }
580
581 /* Delay a bit */
582 mdelay(1);
583 tcnt++;
584 /* Try to acquire device lock again */
585 rio_mport_write_config_32(port, destid,
586 hopcount,
587 RIO_HOST_DID_LOCK_CSR,
588 port->host_deviceid);
589 rio_mport_read_config_32(port, destid,
590 hopcount,
591 RIO_HOST_DID_LOCK_CSR, &result);
592 }
593
594 return 0;
595}
596
597/**
598 * rio_unlock_device - Releases host device lock for specified device
599 * @port: Master port to send transaction
600 * @destid: Destination ID for device/switch
601 * @hopcount: Hopcount to reach switch
602 *
603 * Returns 0 if device lock released or EINVAL if fails.
604 */
605static int
606rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount)
607{
608 u32 result;
609
610 /* Release device lock */
611 rio_mport_write_config_32(port, destid,
612 hopcount,
613 RIO_HOST_DID_LOCK_CSR,
614 port->host_deviceid);
615 rio_mport_read_config_32(port, destid, hopcount,
616 RIO_HOST_DID_LOCK_CSR, &result);
617 if ((result & 0xffff) != 0xffff) {
618 pr_debug("RIO: badness when releasing device lock %x:%x\n",
619 destid, hopcount);
620 return -EINVAL;
621 }
622
623 return 0;
442} 624}
443 625
444/** 626/**
@@ -448,6 +630,7 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
448 * @table: Routing table ID 630 * @table: Routing table ID
449 * @route_destid: Destination ID to be routed 631 * @route_destid: Destination ID to be routed
450 * @route_port: Port number to be routed 632 * @route_port: Port number to be routed
633 * @lock: lock switch device flag
451 * 634 *
452 * Calls the switch specific add_entry() method to add a route entry 635 * Calls the switch specific add_entry() method to add a route entry
453 * on a switch. The route table can be specified using the @table 636 * on a switch. The route table can be specified using the @table
@@ -456,12 +639,26 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
456 * %RIO_GLOBAL_TABLE in @table. Returns %0 on success or %-EINVAL 639 * %RIO_GLOBAL_TABLE in @table. Returns %0 on success or %-EINVAL
457 * on failure. 640 * on failure.
458 */ 641 */
459static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch, 642static int
460 u16 table, u16 route_destid, u8 route_port) 643rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch,
644 u16 table, u16 route_destid, u8 route_port, int lock)
461{ 645{
462 return rswitch->add_entry(mport, rswitch->destid, 646 int rc;
647
648 if (lock) {
649 rc = rio_lock_device(mport, rswitch->destid,
650 rswitch->hopcount, 1000);
651 if (rc)
652 return rc;
653 }
654
655 rc = rswitch->add_entry(mport, rswitch->destid,
463 rswitch->hopcount, table, 656 rswitch->hopcount, table,
464 route_destid, route_port); 657 route_destid, route_port);
658 if (lock)
659 rio_unlock_device(mport, rswitch->destid, rswitch->hopcount);
660
661 return rc;
465} 662}
466 663
467/** 664/**
@@ -471,6 +668,7 @@ static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswit
471 * @table: Routing table ID 668 * @table: Routing table ID
472 * @route_destid: Destination ID to be routed 669 * @route_destid: Destination ID to be routed
473 * @route_port: Pointer to read port number into 670 * @route_port: Pointer to read port number into
671 * @lock: lock switch device flag
474 * 672 *
475 * Calls the switch specific get_entry() method to read a route entry 673 * Calls the switch specific get_entry() method to read a route entry
476 * in a switch. The route table can be specified using the @table 674 * in a switch. The route table can be specified using the @table
@@ -481,11 +679,24 @@ static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswit
481 */ 679 */
482static int 680static int
483rio_route_get_entry(struct rio_mport *mport, struct rio_switch *rswitch, u16 table, 681rio_route_get_entry(struct rio_mport *mport, struct rio_switch *rswitch, u16 table,
484 u16 route_destid, u8 * route_port) 682 u16 route_destid, u8 *route_port, int lock)
485{ 683{
486 return rswitch->get_entry(mport, rswitch->destid, 684 int rc;
685
686 if (lock) {
687 rc = rio_lock_device(mport, rswitch->destid,
688 rswitch->hopcount, 1000);
689 if (rc)
690 return rc;
691 }
692
693 rc = rswitch->get_entry(mport, rswitch->destid,
487 rswitch->hopcount, table, 694 rswitch->hopcount, table,
488 route_destid, route_port); 695 route_destid, route_port);
696 if (lock)
697 rio_unlock_device(mport, rswitch->destid, rswitch->hopcount);
698
699 return rc;
489} 700}
490 701
491/** 702/**
@@ -625,14 +836,14 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
625 sw_inport = rio_get_swpinfo_inport(port, 836 sw_inport = rio_get_swpinfo_inport(port,
626 RIO_ANY_DESTID(port->sys_size), hopcount); 837 RIO_ANY_DESTID(port->sys_size), hopcount);
627 rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, 838 rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
628 port->host_deviceid, sw_inport); 839 port->host_deviceid, sw_inport, 0);
629 rdev->rswitch->route_table[port->host_deviceid] = sw_inport; 840 rdev->rswitch->route_table[port->host_deviceid] = sw_inport;
630 841
631 for (destid = 0; destid < next_destid; destid++) { 842 for (destid = 0; destid < next_destid; destid++) {
632 if (destid == port->host_deviceid) 843 if (destid == port->host_deviceid)
633 continue; 844 continue;
634 rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, 845 rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
635 destid, sw_inport); 846 destid, sw_inport, 0);
636 rdev->rswitch->route_table[destid] = sw_inport; 847 rdev->rswitch->route_table[destid] = sw_inport;
637 } 848 }
638 849
@@ -644,8 +855,15 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
644 rio_name(rdev), rdev->vid, rdev->did, num_ports); 855 rio_name(rdev), rdev->vid, rdev->did, num_ports);
645 sw_destid = next_destid; 856 sw_destid = next_destid;
646 for (port_num = 0; port_num < num_ports; port_num++) { 857 for (port_num = 0; port_num < num_ports; port_num++) {
647 if (sw_inport == port_num) 858 /*Enable Input Output Port (transmitter reviever)*/
859 rio_enable_rx_tx_port(port, 0,
860 RIO_ANY_DESTID(port->sys_size),
861 hopcount, port_num);
862
863 if (sw_inport == port_num) {
864 rdev->rswitch->port_ok |= (1 << port_num);
648 continue; 865 continue;
866 }
649 867
650 cur_destid = next_destid; 868 cur_destid = next_destid;
651 869
@@ -655,10 +873,11 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
655 pr_debug( 873 pr_debug(
656 "RIO: scanning device on port %d\n", 874 "RIO: scanning device on port %d\n",
657 port_num); 875 port_num);
876 rdev->rswitch->port_ok |= (1 << port_num);
658 rio_route_add_entry(port, rdev->rswitch, 877 rio_route_add_entry(port, rdev->rswitch,
659 RIO_GLOBAL_TABLE, 878 RIO_GLOBAL_TABLE,
660 RIO_ANY_DESTID(port->sys_size), 879 RIO_ANY_DESTID(port->sys_size),
661 port_num); 880 port_num, 0);
662 881
663 if (rio_enum_peer(net, port, hopcount + 1) < 0) 882 if (rio_enum_peer(net, port, hopcount + 1) < 0)
664 return -1; 883 return -1;
@@ -672,15 +891,35 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
672 rio_route_add_entry(port, rdev->rswitch, 891 rio_route_add_entry(port, rdev->rswitch,
673 RIO_GLOBAL_TABLE, 892 RIO_GLOBAL_TABLE,
674 destid, 893 destid,
675 port_num); 894 port_num,
895 0);
676 rdev->rswitch-> 896 rdev->rswitch->
677 route_table[destid] = 897 route_table[destid] =
678 port_num; 898 port_num;
679 } 899 }
680 } 900 }
901 } else {
902 /* If switch supports Error Management,
903 * set PORT_LOCKOUT bit for unused port
904 */
905 if (rdev->em_efptr)
906 rio_set_port_lockout(rdev, port_num, 1);
907
908 rdev->rswitch->port_ok &= ~(1 << port_num);
681 } 909 }
682 } 910 }
683 911
912 /* Direct Port-write messages to the enumeratiing host */
913 if ((rdev->src_ops & RIO_SRC_OPS_PORT_WRITE) &&
914 (rdev->em_efptr)) {
915 rio_write_config_32(rdev,
916 rdev->em_efptr + RIO_EM_PW_TGT_DEVID,
917 (port->host_deviceid << 16) |
918 (port->sys_size << 15));
919 }
920
921 rio_init_em(rdev);
922
684 /* Check for empty switch */ 923 /* Check for empty switch */
685 if (next_destid == sw_destid) { 924 if (next_destid == sw_destid) {
686 next_destid++; 925 next_destid++;
@@ -700,21 +939,16 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
700 * rio_enum_complete- Tests if enumeration of a network is complete 939 * rio_enum_complete- Tests if enumeration of a network is complete
701 * @port: Master port to send transaction 940 * @port: Master port to send transaction
702 * 941 *
703 * Tests the Component Tag CSR for presence of the magic enumeration 942 * Tests the Component Tag CSR for non-zero value (enumeration
704 * complete flag. Return %1 if enumeration is complete or %0 if 943 * complete flag). Return %1 if enumeration is complete or %0 if
705 * enumeration is incomplete. 944 * enumeration is incomplete.
706 */ 945 */
707static int rio_enum_complete(struct rio_mport *port) 946static int rio_enum_complete(struct rio_mport *port)
708{ 947{
709 u32 tag_csr; 948 u32 tag_csr;
710 int ret = 0;
711 949
712 rio_local_read_config_32(port, RIO_COMPONENT_TAG_CSR, &tag_csr); 950 rio_local_read_config_32(port, RIO_COMPONENT_TAG_CSR, &tag_csr);
713 951 return (tag_csr & 0xffff) ? 1 : 0;
714 if (tag_csr == RIO_ENUM_CMPL_MAGIC)
715 ret = 1;
716
717 return ret;
718} 952}
719 953
720/** 954/**
@@ -763,17 +997,21 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
763 pr_debug( 997 pr_debug(
764 "RIO: scanning device on port %d\n", 998 "RIO: scanning device on port %d\n",
765 port_num); 999 port_num);
1000
1001 rio_lock_device(port, destid, hopcount, 1000);
1002
766 for (ndestid = 0; 1003 for (ndestid = 0;
767 ndestid < RIO_ANY_DESTID(port->sys_size); 1004 ndestid < RIO_ANY_DESTID(port->sys_size);
768 ndestid++) { 1005 ndestid++) {
769 rio_route_get_entry(port, rdev->rswitch, 1006 rio_route_get_entry(port, rdev->rswitch,
770 RIO_GLOBAL_TABLE, 1007 RIO_GLOBAL_TABLE,
771 ndestid, 1008 ndestid,
772 &route_port); 1009 &route_port, 0);
773 if (route_port == port_num) 1010 if (route_port == port_num)
774 break; 1011 break;
775 } 1012 }
776 1013
1014 rio_unlock_device(port, destid, hopcount);
777 if (rio_disc_peer 1015 if (rio_disc_peer
778 (net, port, ndestid, hopcount + 1) < 0) 1016 (net, port, ndestid, hopcount + 1) < 0)
779 return -1; 1017 return -1;
@@ -792,7 +1030,7 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
792 * 1030 *
793 * Reads the port error status CSR for the master port to 1031 * Reads the port error status CSR for the master port to
794 * determine if the port has an active link. Returns 1032 * determine if the port has an active link. Returns
795 * %PORT_N_ERR_STS_PORT_OK if the master port is active 1033 * %RIO_PORT_N_ERR_STS_PORT_OK if the master port is active
796 * or %0 if it is inactive. 1034 * or %0 if it is inactive.
797 */ 1035 */
798static int rio_mport_is_active(struct rio_mport *port) 1036static int rio_mport_is_active(struct rio_mport *port)
@@ -813,7 +1051,7 @@ static int rio_mport_is_active(struct rio_mport *port)
813 RIO_PORT_N_ERR_STS_CSR(port->index), 1051 RIO_PORT_N_ERR_STS_CSR(port->index),
814 &result); 1052 &result);
815 1053
816 return (result & PORT_N_ERR_STS_PORT_OK); 1054 return result & RIO_PORT_N_ERR_STS_PORT_OK;
817} 1055}
818 1056
819/** 1057/**
@@ -866,12 +1104,17 @@ static void rio_update_route_tables(struct rio_mport *port)
866 continue; 1104 continue;
867 1105
868 if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) { 1106 if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) {
1107 /* Skip if destid ends in empty switch*/
1108 if (rswitch->destid == destid)
1109 continue;
869 1110
870 sport = rio_get_swpinfo_inport(port, 1111 sport = rio_get_swpinfo_inport(port,
871 rswitch->destid, rswitch->hopcount); 1112 rswitch->destid, rswitch->hopcount);
872 1113
873 if (rswitch->add_entry) { 1114 if (rswitch->add_entry) {
874 rio_route_add_entry(port, rswitch, RIO_GLOBAL_TABLE, destid, sport); 1115 rio_route_add_entry(port, rswitch,
1116 RIO_GLOBAL_TABLE, destid,
1117 sport, 0);
875 rswitch->route_table[destid] = sport; 1118 rswitch->route_table[destid] = sport;
876 } 1119 }
877 } 1120 }
@@ -880,6 +1123,32 @@ static void rio_update_route_tables(struct rio_mport *port)
880} 1123}
881 1124
882/** 1125/**
1126 * rio_init_em - Initializes RIO Error Management (for switches)
1127 * @port: Master port associated with the RIO network
1128 *
1129 * For each enumerated switch, call device-specific error management
1130 * initialization routine (if supplied by the switch driver).
1131 */
1132static void rio_init_em(struct rio_dev *rdev)
1133{
1134 if (rio_is_switch(rdev) && (rdev->em_efptr) &&
1135 (rdev->rswitch->em_init)) {
1136 rdev->rswitch->em_init(rdev);
1137 }
1138}
1139
1140/**
1141 * rio_pw_enable - Enables/disables port-write handling by a master port
1142 * @port: Master port associated with port-write handling
1143 * @enable: 1=enable, 0=disable
1144 */
1145static void rio_pw_enable(struct rio_mport *port, int enable)
1146{
1147 if (port->ops->pwenable)
1148 port->ops->pwenable(port, enable);
1149}
1150
1151/**
883 * rio_enum_mport- Start enumeration through a master port 1152 * rio_enum_mport- Start enumeration through a master port
884 * @mport: Master port to send transactions 1153 * @mport: Master port to send transactions
885 * 1154 *
@@ -911,6 +1180,10 @@ int __devinit rio_enum_mport(struct rio_mport *mport)
911 rc = -ENOMEM; 1180 rc = -ENOMEM;
912 goto out; 1181 goto out;
913 } 1182 }
1183
1184 /* Enable Input Output Port (transmitter reviever) */
1185 rio_enable_rx_tx_port(mport, 1, 0, 0, 0);
1186
914 if (rio_enum_peer(net, mport, 0) < 0) { 1187 if (rio_enum_peer(net, mport, 0) < 0) {
915 /* A higher priority host won enumeration, bail. */ 1188 /* A higher priority host won enumeration, bail. */
916 printk(KERN_INFO 1189 printk(KERN_INFO
@@ -922,6 +1195,7 @@ int __devinit rio_enum_mport(struct rio_mport *mport)
922 } 1195 }
923 rio_update_route_tables(mport); 1196 rio_update_route_tables(mport);
924 rio_clear_locks(mport); 1197 rio_clear_locks(mport);
1198 rio_pw_enable(mport, 1);
925 } else { 1199 } else {
926 printk(KERN_INFO "RIO: master port %d link inactive\n", 1200 printk(KERN_INFO "RIO: master port %d link inactive\n",
927 mport->id); 1201 mport->id);
@@ -945,15 +1219,22 @@ static void rio_build_route_tables(void)
945 u8 sport; 1219 u8 sport;
946 1220
947 list_for_each_entry(rdev, &rio_devices, global_list) 1221 list_for_each_entry(rdev, &rio_devices, global_list)
948 if (rio_is_switch(rdev)) 1222 if (rio_is_switch(rdev)) {
949 for (i = 0; 1223 rio_lock_device(rdev->net->hport, rdev->rswitch->destid,
950 i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size); 1224 rdev->rswitch->hopcount, 1000);
951 i++) { 1225 for (i = 0;
952 if (rio_route_get_entry 1226 i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
953 (rdev->net->hport, rdev->rswitch, RIO_GLOBAL_TABLE, 1227 i++) {
954 i, &sport) < 0) 1228 if (rio_route_get_entry
955 continue; 1229 (rdev->net->hport, rdev->rswitch,
956 rdev->rswitch->route_table[i] = sport; 1230 RIO_GLOBAL_TABLE, i, &sport, 0) < 0)
1231 continue;
1232 rdev->rswitch->route_table[i] = sport;
1233 }
1234
1235 rio_unlock_device(rdev->net->hport,
1236 rdev->rswitch->destid,
1237 rdev->rswitch->hopcount);
957 } 1238 }
958} 1239}
959 1240
@@ -1012,6 +1293,13 @@ int __devinit rio_disc_mport(struct rio_mport *mport)
1012 del_timer_sync(&rio_enum_timer); 1293 del_timer_sync(&rio_enum_timer);
1013 1294
1014 pr_debug("done\n"); 1295 pr_debug("done\n");
1296
1297 /* Read DestID assigned by enumerator */
1298 rio_local_read_config_32(mport, RIO_DID_CSR,
1299 &mport->host_deviceid);
1300 mport->host_deviceid = RIO_GET_DID(mport->sys_size,
1301 mport->host_deviceid);
1302
1015 if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size), 1303 if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size),
1016 0) < 0) { 1304 0) < 0) {
1017 printk(KERN_INFO 1305 printk(KERN_INFO
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 6395c780008b..777e099a3d8f 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -5,6 +5,10 @@
5 * Copyright 2005 MontaVista Software, Inc. 5 * Copyright 2005 MontaVista Software, Inc.
6 * Matt Porter <mporter@kernel.crashing.org> 6 * Matt Porter <mporter@kernel.crashing.org>
7 * 7 *
8 * Copyright 2009 Integrated Device Technology, Inc.
9 * Alex Bounine <alexandre.bounine@idt.com>
10 * - Added Port-Write/Error Management initialization and handling
11 *
8 * This program is free software; you can redistribute it and/or modify it 12 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the 13 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your 14 * Free Software Foundation; either version 2 of the License, or (at your
@@ -333,6 +337,328 @@ int rio_release_outb_dbell(struct rio_dev *rdev, struct resource *res)
333} 337}
334 338
335/** 339/**
340 * rio_request_inb_pwrite - request inbound port-write message service
341 * @mport: RIO device to which register inbound port-write callback routine
342 * @pwcback: Callback routine to execute when port-write is received
343 *
344 * Binds a port-write callback function to the RapidIO device.
345 * Returns 0 if the request has been satisfied.
346 */
347int rio_request_inb_pwrite(struct rio_dev *rdev,
348 int (*pwcback)(struct rio_dev *rdev, union rio_pw_msg *msg, int step))
349{
350 int rc = 0;
351
352 spin_lock(&rio_global_list_lock);
353 if (rdev->pwcback != NULL)
354 rc = -ENOMEM;
355 else
356 rdev->pwcback = pwcback;
357
358 spin_unlock(&rio_global_list_lock);
359 return rc;
360}
361EXPORT_SYMBOL_GPL(rio_request_inb_pwrite);
362
363/**
364 * rio_release_inb_pwrite - release inbound port-write message service
365 * @rdev: RIO device which registered for inbound port-write callback
366 *
367 * Removes callback from the rio_dev structure. Returns 0 if the request
368 * has been satisfied.
369 */
370int rio_release_inb_pwrite(struct rio_dev *rdev)
371{
372 int rc = -ENOMEM;
373
374 spin_lock(&rio_global_list_lock);
375 if (rdev->pwcback) {
376 rdev->pwcback = NULL;
377 rc = 0;
378 }
379
380 spin_unlock(&rio_global_list_lock);
381 return rc;
382}
383EXPORT_SYMBOL_GPL(rio_release_inb_pwrite);
384
385/**
386 * rio_mport_get_physefb - Helper function that returns register offset
387 * for Physical Layer Extended Features Block.
388 * @rdev: RIO device
389 */
390u32
391rio_mport_get_physefb(struct rio_mport *port, int local,
392 u16 destid, u8 hopcount)
393{
394 u32 ext_ftr_ptr;
395 u32 ftr_header;
396
397 ext_ftr_ptr = rio_mport_get_efb(port, local, destid, hopcount, 0);
398
399 while (ext_ftr_ptr) {
400 if (local)
401 rio_local_read_config_32(port, ext_ftr_ptr,
402 &ftr_header);
403 else
404 rio_mport_read_config_32(port, destid, hopcount,
405 ext_ftr_ptr, &ftr_header);
406
407 ftr_header = RIO_GET_BLOCK_ID(ftr_header);
408 switch (ftr_header) {
409
410 case RIO_EFB_SER_EP_ID_V13P:
411 case RIO_EFB_SER_EP_REC_ID_V13P:
412 case RIO_EFB_SER_EP_FREE_ID_V13P:
413 case RIO_EFB_SER_EP_ID:
414 case RIO_EFB_SER_EP_REC_ID:
415 case RIO_EFB_SER_EP_FREE_ID:
416 case RIO_EFB_SER_EP_FREC_ID:
417
418 return ext_ftr_ptr;
419
420 default:
421 break;
422 }
423
424 ext_ftr_ptr = rio_mport_get_efb(port, local, destid,
425 hopcount, ext_ftr_ptr);
426 }
427
428 return ext_ftr_ptr;
429}
430
431/**
432 * rio_get_comptag - Begin or continue searching for a RIO device by component tag
433 * @comp_tag: RIO component tad to match
434 * @from: Previous RIO device found in search, or %NULL for new search
435 *
436 * Iterates through the list of known RIO devices. If a RIO device is
437 * found with a matching @comp_tag, a pointer to its device
438 * structure is returned. Otherwise, %NULL is returned. A new search
439 * is initiated by passing %NULL to the @from argument. Otherwise, if
440 * @from is not %NULL, searches continue from next device on the global
441 * list.
442 */
443static struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from)
444{
445 struct list_head *n;
446 struct rio_dev *rdev;
447
448 spin_lock(&rio_global_list_lock);
449 n = from ? from->global_list.next : rio_devices.next;
450
451 while (n && (n != &rio_devices)) {
452 rdev = rio_dev_g(n);
453 if (rdev->comp_tag == comp_tag)
454 goto exit;
455 n = n->next;
456 }
457 rdev = NULL;
458exit:
459 spin_unlock(&rio_global_list_lock);
460 return rdev;
461}
462
463/**
464 * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port.
465 * @rdev: Pointer to RIO device control structure
466 * @pnum: Switch port number to set LOCKOUT bit
467 * @lock: Operation : set (=1) or clear (=0)
468 */
469int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
470{
471 u8 hopcount = 0xff;
472 u16 destid = rdev->destid;
473 u32 regval;
474
475 if (rdev->rswitch) {
476 destid = rdev->rswitch->destid;
477 hopcount = rdev->rswitch->hopcount;
478 }
479
480 rio_mport_read_config_32(rdev->net->hport, destid, hopcount,
481 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
482 &regval);
483 if (lock)
484 regval |= RIO_PORT_N_CTL_LOCKOUT;
485 else
486 regval &= ~RIO_PORT_N_CTL_LOCKOUT;
487
488 rio_mport_write_config_32(rdev->net->hport, destid, hopcount,
489 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
490 regval);
491 return 0;
492}
493
494/**
495 * rio_inb_pwrite_handler - process inbound port-write message
496 * @pw_msg: pointer to inbound port-write message
497 *
498 * Processes an inbound port-write message. Returns 0 if the request
499 * has been satisfied.
500 */
501int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
502{
503 struct rio_dev *rdev;
504 struct rio_mport *mport;
505 u8 hopcount;
506 u16 destid;
507 u32 err_status;
508 int rc, portnum;
509
510 rdev = rio_get_comptag(pw_msg->em.comptag, NULL);
511 if (rdev == NULL) {
512 /* Someting bad here (probably enumeration error) */
513 pr_err("RIO: %s No matching device for CTag 0x%08x\n",
514 __func__, pw_msg->em.comptag);
515 return -EIO;
516 }
517
518 pr_debug("RIO: Port-Write message from %s\n", rio_name(rdev));
519
520#ifdef DEBUG_PW
521 {
522 u32 i;
523 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32);) {
524 pr_debug("0x%02x: %08x %08x %08x %08x",
525 i*4, pw_msg->raw[i], pw_msg->raw[i + 1],
526 pw_msg->raw[i + 2], pw_msg->raw[i + 3]);
527 i += 4;
528 }
529 pr_debug("\n");
530 }
531#endif
532
533 /* Call an external service function (if such is registered
534 * for this device). This may be the service for endpoints that send
535 * device-specific port-write messages. End-point messages expected
536 * to be handled completely by EP specific device driver.
537 * For switches rc==0 signals that no standard processing required.
538 */
539 if (rdev->pwcback != NULL) {
540 rc = rdev->pwcback(rdev, pw_msg, 0);
541 if (rc == 0)
542 return 0;
543 }
544
545 /* For End-point devices processing stops here */
546 if (!(rdev->pef & RIO_PEF_SWITCH))
547 return 0;
548
549 if (rdev->phys_efptr == 0) {
550 pr_err("RIO_PW: Bad switch initialization for %s\n",
551 rio_name(rdev));
552 return 0;
553 }
554
555 mport = rdev->net->hport;
556 destid = rdev->rswitch->destid;
557 hopcount = rdev->rswitch->hopcount;
558
559 /*
560 * Process the port-write notification from switch
561 */
562
563 portnum = pw_msg->em.is_port & 0xFF;
564
565 if (rdev->rswitch->em_handle)
566 rdev->rswitch->em_handle(rdev, portnum);
567
568 rio_mport_read_config_32(mport, destid, hopcount,
569 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
570 &err_status);
571 pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status);
572
573 if (pw_msg->em.errdetect) {
574 pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",
575 portnum, pw_msg->em.errdetect);
576 /* Clear EM Port N Error Detect CSR */
577 rio_mport_write_config_32(mport, destid, hopcount,
578 rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0);
579 }
580
581 if (pw_msg->em.ltlerrdet) {
582 pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",
583 pw_msg->em.ltlerrdet);
584 /* Clear EM L/T Layer Error Detect CSR */
585 rio_mport_write_config_32(mport, destid, hopcount,
586 rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0);
587 }
588
589 /* Clear Port Errors */
590 rio_mport_write_config_32(mport, destid, hopcount,
591 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
592 err_status & RIO_PORT_N_ERR_STS_CLR_MASK);
593
594 if (rdev->rswitch->port_ok & (1 << portnum)) {
595 if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) {
596 rdev->rswitch->port_ok &= ~(1 << portnum);
597 rio_set_port_lockout(rdev, portnum, 1);
598
599 rio_mport_write_config_32(mport, destid, hopcount,
600 rdev->phys_efptr +
601 RIO_PORT_N_ACK_STS_CSR(portnum),
602 RIO_PORT_N_ACK_CLEAR);
603
604 /* Schedule Extraction Service */
605 pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n",
606 rio_name(rdev), portnum);
607 }
608 } else {
609 if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) {
610 rdev->rswitch->port_ok |= (1 << portnum);
611 rio_set_port_lockout(rdev, portnum, 0);
612
613 /* Schedule Insertion Service */
614 pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n",
615 rio_name(rdev), portnum);
616 }
617 }
618
619 /* Clear Port-Write Pending bit */
620 rio_mport_write_config_32(mport, destid, hopcount,
621 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
622 RIO_PORT_N_ERR_STS_PW_PEND);
623
624 return 0;
625}
626EXPORT_SYMBOL_GPL(rio_inb_pwrite_handler);
627
628/**
629 * rio_mport_get_efb - get pointer to next extended features block
630 * @port: Master port to issue transaction
631 * @local: Indicate a local master port or remote device access
632 * @destid: Destination ID of the device
633 * @hopcount: Number of switch hops to the device
634 * @from: Offset of current Extended Feature block header (if 0 starts
635 * from ExtFeaturePtr)
636 */
637u32
638rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
639 u8 hopcount, u32 from)
640{
641 u32 reg_val;
642
643 if (from == 0) {
644 if (local)
645 rio_local_read_config_32(port, RIO_ASM_INFO_CAR,
646 &reg_val);
647 else
648 rio_mport_read_config_32(port, destid, hopcount,
649 RIO_ASM_INFO_CAR, &reg_val);
650 return reg_val & RIO_EXT_FTR_PTR_MASK;
651 } else {
652 if (local)
653 rio_local_read_config_32(port, from, &reg_val);
654 else
655 rio_mport_read_config_32(port, destid, hopcount,
656 from, &reg_val);
657 return RIO_GET_BLOCK_ID(reg_val);
658 }
659}
660
661/**
336 * rio_mport_get_feature - query for devices' extended features 662 * rio_mport_get_feature - query for devices' extended features
337 * @port: Master port to issue transaction 663 * @port: Master port to issue transaction
338 * @local: Indicate a local master port or remote device access 664 * @local: Indicate a local master port or remote device access
@@ -451,6 +777,111 @@ struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from)
451 return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from); 777 return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from);
452} 778}
453 779
780/**
781 * rio_std_route_add_entry - Add switch route table entry using standard
782 * registers defined in RIO specification rev.1.3
783 * @mport: Master port to issue transaction
784 * @destid: Destination ID of the device
785 * @hopcount: Number of switch hops to the device
786 * @table: routing table ID (global or port-specific)
787 * @route_destid: destID entry in the RT
788 * @route_port: destination port for specified destID
789 */
790int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
791 u16 table, u16 route_destid, u8 route_port)
792{
793 if (table == RIO_GLOBAL_TABLE) {
794 rio_mport_write_config_32(mport, destid, hopcount,
795 RIO_STD_RTE_CONF_DESTID_SEL_CSR,
796 (u32)route_destid);
797 rio_mport_write_config_32(mport, destid, hopcount,
798 RIO_STD_RTE_CONF_PORT_SEL_CSR,
799 (u32)route_port);
800 }
801
802 udelay(10);
803 return 0;
804}
805
806/**
807 * rio_std_route_get_entry - Read switch route table entry (port number)
808 * assosiated with specified destID using standard registers defined in RIO
809 * specification rev.1.3
810 * @mport: Master port to issue transaction
811 * @destid: Destination ID of the device
812 * @hopcount: Number of switch hops to the device
813 * @table: routing table ID (global or port-specific)
814 * @route_destid: destID entry in the RT
815 * @route_port: returned destination port for specified destID
816 */
817int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
818 u16 table, u16 route_destid, u8 *route_port)
819{
820 u32 result;
821
822 if (table == RIO_GLOBAL_TABLE) {
823 rio_mport_write_config_32(mport, destid, hopcount,
824 RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
825 rio_mport_read_config_32(mport, destid, hopcount,
826 RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
827
828 *route_port = (u8)result;
829 }
830
831 return 0;
832}
833
834/**
835 * rio_std_route_clr_table - Clear swotch route table using standard registers
836 * defined in RIO specification rev.1.3.
837 * @mport: Master port to issue transaction
838 * @local: Indicate a local master port or remote device access
839 * @destid: Destination ID of the device
840 * @hopcount: Number of switch hops to the device
841 * @table: routing table ID (global or port-specific)
842 */
843int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
844 u16 table)
845{
846 u32 max_destid = 0xff;
847 u32 i, pef, id_inc = 1, ext_cfg = 0;
848 u32 port_sel = RIO_INVALID_ROUTE;
849
850 if (table == RIO_GLOBAL_TABLE) {
851 rio_mport_read_config_32(mport, destid, hopcount,
852 RIO_PEF_CAR, &pef);
853
854 if (mport->sys_size) {
855 rio_mport_read_config_32(mport, destid, hopcount,
856 RIO_SWITCH_RT_LIMIT,
857 &max_destid);
858 max_destid &= RIO_RT_MAX_DESTID;
859 }
860
861 if (pef & RIO_PEF_EXT_RT) {
862 ext_cfg = 0x80000000;
863 id_inc = 4;
864 port_sel = (RIO_INVALID_ROUTE << 24) |
865 (RIO_INVALID_ROUTE << 16) |
866 (RIO_INVALID_ROUTE << 8) |
867 RIO_INVALID_ROUTE;
868 }
869
870 for (i = 0; i <= max_destid;) {
871 rio_mport_write_config_32(mport, destid, hopcount,
872 RIO_STD_RTE_CONF_DESTID_SEL_CSR,
873 ext_cfg | i);
874 rio_mport_write_config_32(mport, destid, hopcount,
875 RIO_STD_RTE_CONF_PORT_SEL_CSR,
876 port_sel);
877 i += id_inc;
878 }
879 }
880
881 udelay(10);
882 return 0;
883}
884
454static void rio_fixup_device(struct rio_dev *dev) 885static void rio_fixup_device(struct rio_dev *dev)
455{ 886{
456} 887}
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h
index 7786d02581f2..f27b7a9c47d2 100644
--- a/drivers/rapidio/rio.h
+++ b/drivers/rapidio/rio.h
@@ -18,38 +18,50 @@
18 18
19extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid, 19extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid,
20 u8 hopcount, int ftr); 20 u8 hopcount, int ftr);
21extern u32 rio_mport_get_physefb(struct rio_mport *port, int local,
22 u16 destid, u8 hopcount);
23extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
24 u8 hopcount, u32 from);
21extern int rio_create_sysfs_dev_files(struct rio_dev *rdev); 25extern int rio_create_sysfs_dev_files(struct rio_dev *rdev);
22extern int rio_enum_mport(struct rio_mport *mport); 26extern int rio_enum_mport(struct rio_mport *mport);
23extern int rio_disc_mport(struct rio_mport *mport); 27extern int rio_disc_mport(struct rio_mport *mport);
28extern int rio_std_route_add_entry(struct rio_mport *mport, u16 destid,
29 u8 hopcount, u16 table, u16 route_destid,
30 u8 route_port);
31extern int rio_std_route_get_entry(struct rio_mport *mport, u16 destid,
32 u8 hopcount, u16 table, u16 route_destid,
33 u8 *route_port);
34extern int rio_std_route_clr_table(struct rio_mport *mport, u16 destid,
35 u8 hopcount, u16 table);
36extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock);
24 37
25/* Structures internal to the RIO core code */ 38/* Structures internal to the RIO core code */
26extern struct device_attribute rio_dev_attrs[]; 39extern struct device_attribute rio_dev_attrs[];
27extern spinlock_t rio_global_list_lock; 40extern spinlock_t rio_global_list_lock;
28 41
29extern struct rio_route_ops __start_rio_route_ops[]; 42extern struct rio_switch_ops __start_rio_switch_ops[];
30extern struct rio_route_ops __end_rio_route_ops[]; 43extern struct rio_switch_ops __end_rio_switch_ops[];
31 44
32/* Helpers internal to the RIO core code */ 45/* Helpers internal to the RIO core code */
33#define DECLARE_RIO_ROUTE_SECTION(section, vid, did, add_hook, get_hook) \ 46#define DECLARE_RIO_SWITCH_SECTION(section, name, vid, did, init_hook) \
34 static struct rio_route_ops __rio_route_ops __used \ 47 static const struct rio_switch_ops __rio_switch_##name __used \
35 __section(section)= { vid, did, add_hook, get_hook }; 48 __section(section) = { vid, did, init_hook };
36 49
37/** 50/**
38 * DECLARE_RIO_ROUTE_OPS - Registers switch routing operations 51 * DECLARE_RIO_SWITCH_INIT - Registers switch initialization routine
39 * @vid: RIO vendor ID 52 * @vid: RIO vendor ID
40 * @did: RIO device ID 53 * @did: RIO device ID
41 * @add_hook: Callback that adds a route entry 54 * @init_hook: Callback that performs switch-specific initialization
42 * @get_hook: Callback that gets a route entry
43 * 55 *
44 * Manipulating switch route tables in RIO is switch specific. This 56 * Manipulating switch route tables and error management in RIO
45 * registers a switch by vendor and device ID with two callbacks for 57 * is switch specific. This registers a switch by vendor and device ID with
46 * modifying and retrieving route entries in a switch. A &struct 58 * initialization callback for setting up switch operations and (if required)
47 * rio_route_ops is initialized with the ops and placed into a 59 * hardware initialization. A &struct rio_switch_ops is initialized with
48 * RIO-specific kernel section. 60 * pointer to the init routine and placed into a RIO-specific kernel section.
49 */ 61 */
50#define DECLARE_RIO_ROUTE_OPS(vid, did, add_hook, get_hook) \ 62#define DECLARE_RIO_SWITCH_INIT(vid, did, init_hook) \
51 DECLARE_RIO_ROUTE_SECTION(.rio_route_ops, \ 63 DECLARE_RIO_SWITCH_SECTION(.rio_switch_ops, vid##did, \
52 vid, did, add_hook, get_hook) 64 vid, did, init_hook)
53 65
54#define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16)) 66#define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16))
55#define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16)) 67#define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16))
diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig
new file mode 100644
index 000000000000..2b4e9b2b6631
--- /dev/null
+++ b/drivers/rapidio/switches/Kconfig
@@ -0,0 +1,28 @@
1#
2# RapidIO switches configuration
3#
4config RAPIDIO_TSI57X
5 bool "IDT Tsi57x SRIO switches support"
6 depends on RAPIDIO
7 ---help---
8 Includes support for IDT Tsi57x family of serial RapidIO switches.
9
10config RAPIDIO_CPS_XX
11 bool "IDT CPS-xx SRIO switches support"
12 depends on RAPIDIO
13 ---help---
14 Includes support for IDT CPS-16/12/10/8 serial RapidIO switches.
15
16config RAPIDIO_TSI568
17 bool "Tsi568 SRIO switch support"
18 depends on RAPIDIO
19 default n
20 ---help---
21 Includes support for IDT Tsi568 serial RapidIO switch.
22
23config RAPIDIO_TSI500
24 bool "Tsi500 Parallel RapidIO switch support"
25 depends on RAPIDIO
26 default n
27 ---help---
28 Includes support for IDT Tsi500 parallel RapidIO switch.
diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile
index b924f8301761..fe4adc3e8d5f 100644
--- a/drivers/rapidio/switches/Makefile
+++ b/drivers/rapidio/switches/Makefile
@@ -2,4 +2,11 @@
2# Makefile for RIO switches 2# Makefile for RIO switches
3# 3#
4 4
5obj-$(CONFIG_RAPIDIO) += tsi500.o 5obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o
6obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o
7obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o
8obj-$(CONFIG_RAPIDIO_TSI500) += tsi500.o
9
10ifeq ($(CONFIG_RAPIDIO_DEBUG),y)
11EXTRA_CFLAGS += -DDEBUG
12endif
diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c
new file mode 100644
index 000000000000..2c790c144f89
--- /dev/null
+++ b/drivers/rapidio/switches/idtcps.c
@@ -0,0 +1,137 @@
1/*
2 * IDT CPS RapidIO switches support
3 *
4 * Copyright 2009-2010 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#include <linux/rio.h>
14#include <linux/rio_drv.h>
15#include <linux/rio_ids.h>
16#include "../rio.h"
17
18#define CPS_DEFAULT_ROUTE 0xde
19#define CPS_NO_ROUTE 0xdf
20
21#define IDTCPS_RIO_DOMAIN 0xf20020
22
23static int
24idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
25 u16 table, u16 route_destid, u8 route_port)
26{
27 u32 result;
28
29 if (table == RIO_GLOBAL_TABLE) {
30 rio_mport_write_config_32(mport, destid, hopcount,
31 RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
32
33 rio_mport_read_config_32(mport, destid, hopcount,
34 RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
35
36 result = (0xffffff00 & result) | (u32)route_port;
37 rio_mport_write_config_32(mport, destid, hopcount,
38 RIO_STD_RTE_CONF_PORT_SEL_CSR, result);
39 }
40
41 return 0;
42}
43
44static int
45idtcps_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
46 u16 table, u16 route_destid, u8 *route_port)
47{
48 u32 result;
49
50 if (table == RIO_GLOBAL_TABLE) {
51 rio_mport_write_config_32(mport, destid, hopcount,
52 RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
53
54 rio_mport_read_config_32(mport, destid, hopcount,
55 RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
56
57 if (CPS_DEFAULT_ROUTE == (u8)result ||
58 CPS_NO_ROUTE == (u8)result)
59 *route_port = RIO_INVALID_ROUTE;
60 else
61 *route_port = (u8)result;
62 }
63
64 return 0;
65}
66
67static int
68idtcps_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
69 u16 table)
70{
71 u32 i;
72
73 if (table == RIO_GLOBAL_TABLE) {
74 for (i = 0x80000000; i <= 0x800000ff;) {
75 rio_mport_write_config_32(mport, destid, hopcount,
76 RIO_STD_RTE_CONF_DESTID_SEL_CSR, i);
77 rio_mport_write_config_32(mport, destid, hopcount,
78 RIO_STD_RTE_CONF_PORT_SEL_CSR,
79 (CPS_DEFAULT_ROUTE << 24) |
80 (CPS_DEFAULT_ROUTE << 16) |
81 (CPS_DEFAULT_ROUTE << 8) | CPS_DEFAULT_ROUTE);
82 i += 4;
83 }
84 }
85
86 return 0;
87}
88
89static int
90idtcps_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
91 u8 sw_domain)
92{
93 /*
94 * Switch domain configuration operates only at global level
95 */
96 rio_mport_write_config_32(mport, destid, hopcount,
97 IDTCPS_RIO_DOMAIN, (u32)sw_domain);
98 return 0;
99}
100
101static int
102idtcps_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
103 u8 *sw_domain)
104{
105 u32 regval;
106
107 /*
108 * Switch domain configuration operates only at global level
109 */
110 rio_mport_read_config_32(mport, destid, hopcount,
111 IDTCPS_RIO_DOMAIN, &regval);
112
113 *sw_domain = (u8)(regval & 0xff);
114
115 return 0;
116}
117
118static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
119{
120 pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
121 rdev->rswitch->add_entry = idtcps_route_add_entry;
122 rdev->rswitch->get_entry = idtcps_route_get_entry;
123 rdev->rswitch->clr_table = idtcps_route_clr_table;
124 rdev->rswitch->set_domain = idtcps_set_domain;
125 rdev->rswitch->get_domain = idtcps_get_domain;
126 rdev->rswitch->em_init = NULL;
127 rdev->rswitch->em_handle = NULL;
128
129 return 0;
130}
131
132DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS6Q, idtcps_switch_init);
133DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS8, idtcps_switch_init);
134DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS10Q, idtcps_switch_init);
135DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS12, idtcps_switch_init);
136DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS16, idtcps_switch_init);
137DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDT70K200, idtcps_switch_init);
diff --git a/drivers/rapidio/switches/tsi500.c b/drivers/rapidio/switches/tsi500.c
index c77c23bd9840..914eddd5aa42 100644
--- a/drivers/rapidio/switches/tsi500.c
+++ b/drivers/rapidio/switches/tsi500.c
@@ -1,6 +1,10 @@
1/* 1/*
2 * RapidIO Tsi500 switch support 2 * RapidIO Tsi500 switch support
3 * 3 *
4 * Copyright 2009-2010 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 * - Modified switch operations initialization.
7 *
4 * Copyright 2005 MontaVista Software, Inc. 8 * Copyright 2005 MontaVista Software, Inc.
5 * Matt Porter <mporter@kernel.crashing.org> 9 * Matt Porter <mporter@kernel.crashing.org>
6 * 10 *
@@ -57,4 +61,18 @@ tsi500_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 tab
57 return ret; 61 return ret;
58} 62}
59 63
60DECLARE_RIO_ROUTE_OPS(RIO_VID_TUNDRA, RIO_DID_TSI500, tsi500_route_add_entry, tsi500_route_get_entry); 64static int tsi500_switch_init(struct rio_dev *rdev, int do_enum)
65{
66 pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
67 rdev->rswitch->add_entry = tsi500_route_add_entry;
68 rdev->rswitch->get_entry = tsi500_route_get_entry;
69 rdev->rswitch->clr_table = NULL;
70 rdev->rswitch->set_domain = NULL;
71 rdev->rswitch->get_domain = NULL;
72 rdev->rswitch->em_init = NULL;
73 rdev->rswitch->em_handle = NULL;
74
75 return 0;
76}
77
78DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI500, tsi500_switch_init);
diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c
new file mode 100644
index 000000000000..f7fd7898606e
--- /dev/null
+++ b/drivers/rapidio/switches/tsi568.c
@@ -0,0 +1,146 @@
1/*
2 * RapidIO Tsi568 switch support
3 *
4 * Copyright 2009-2010 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 * - Added EM support
7 * - Modified switch operations initialization.
8 *
9 * Copyright 2005 MontaVista Software, Inc.
10 * Matt Porter <mporter@kernel.crashing.org>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/rio.h>
19#include <linux/rio_drv.h>
20#include <linux/rio_ids.h>
21#include <linux/delay.h>
22#include "../rio.h"
23
24/* Global (broadcast) route registers */
25#define SPBC_ROUTE_CFG_DESTID 0x10070
26#define SPBC_ROUTE_CFG_PORT 0x10074
27
28/* Per port route registers */
29#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n)
30#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n)
31
32#define TSI568_SP_MODE_BC 0x10004
33#define TSI568_SP_MODE_PW_DIS 0x08000000
34
35static int
36tsi568_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
37 u16 table, u16 route_destid, u8 route_port)
38{
39 if (table == RIO_GLOBAL_TABLE) {
40 rio_mport_write_config_32(mport, destid, hopcount,
41 SPBC_ROUTE_CFG_DESTID, route_destid);
42 rio_mport_write_config_32(mport, destid, hopcount,
43 SPBC_ROUTE_CFG_PORT, route_port);
44 } else {
45 rio_mport_write_config_32(mport, destid, hopcount,
46 SPP_ROUTE_CFG_DESTID(table),
47 route_destid);
48 rio_mport_write_config_32(mport, destid, hopcount,
49 SPP_ROUTE_CFG_PORT(table), route_port);
50 }
51
52 udelay(10);
53
54 return 0;
55}
56
57static int
58tsi568_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
59 u16 table, u16 route_destid, u8 *route_port)
60{
61 int ret = 0;
62 u32 result;
63
64 if (table == RIO_GLOBAL_TABLE) {
65 rio_mport_write_config_32(mport, destid, hopcount,
66 SPBC_ROUTE_CFG_DESTID, route_destid);
67 rio_mport_read_config_32(mport, destid, hopcount,
68 SPBC_ROUTE_CFG_PORT, &result);
69 } else {
70 rio_mport_write_config_32(mport, destid, hopcount,
71 SPP_ROUTE_CFG_DESTID(table),
72 route_destid);
73 rio_mport_read_config_32(mport, destid, hopcount,
74 SPP_ROUTE_CFG_PORT(table), &result);
75 }
76
77 *route_port = result;
78 if (*route_port > 15)
79 ret = -1;
80
81 return ret;
82}
83
84static int
85tsi568_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
86 u16 table)
87{
88 u32 route_idx;
89 u32 lut_size;
90
91 lut_size = (mport->sys_size) ? 0x1ff : 0xff;
92
93 if (table == RIO_GLOBAL_TABLE) {
94 rio_mport_write_config_32(mport, destid, hopcount,
95 SPBC_ROUTE_CFG_DESTID, 0x80000000);
96 for (route_idx = 0; route_idx <= lut_size; route_idx++)
97 rio_mport_write_config_32(mport, destid, hopcount,
98 SPBC_ROUTE_CFG_PORT,
99 RIO_INVALID_ROUTE);
100 } else {
101 rio_mport_write_config_32(mport, destid, hopcount,
102 SPP_ROUTE_CFG_DESTID(table),
103 0x80000000);
104 for (route_idx = 0; route_idx <= lut_size; route_idx++)
105 rio_mport_write_config_32(mport, destid, hopcount,
106 SPP_ROUTE_CFG_PORT(table),
107 RIO_INVALID_ROUTE);
108 }
109
110 return 0;
111}
112
113static int
114tsi568_em_init(struct rio_dev *rdev)
115{
116 struct rio_mport *mport = rdev->net->hport;
117 u16 destid = rdev->rswitch->destid;
118 u8 hopcount = rdev->rswitch->hopcount;
119 u32 regval;
120
121 pr_debug("TSI568 %s [%d:%d]\n", __func__, destid, hopcount);
122
123 /* Make sure that Port-Writes are disabled (for all ports) */
124 rio_mport_read_config_32(mport, destid, hopcount,
125 TSI568_SP_MODE_BC, &regval);
126 rio_mport_write_config_32(mport, destid, hopcount,
127 TSI568_SP_MODE_BC, regval | TSI568_SP_MODE_PW_DIS);
128
129 return 0;
130}
131
132static int tsi568_switch_init(struct rio_dev *rdev, int do_enum)
133{
134 pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
135 rdev->rswitch->add_entry = tsi568_route_add_entry;
136 rdev->rswitch->get_entry = tsi568_route_get_entry;
137 rdev->rswitch->clr_table = tsi568_route_clr_table;
138 rdev->rswitch->set_domain = NULL;
139 rdev->rswitch->get_domain = NULL;
140 rdev->rswitch->em_init = tsi568_em_init;
141 rdev->rswitch->em_handle = NULL;
142
143 return 0;
144}
145
146DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI568, tsi568_switch_init);
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c
new file mode 100644
index 000000000000..d34df722d95f
--- /dev/null
+++ b/drivers/rapidio/switches/tsi57x.c
@@ -0,0 +1,315 @@
1/*
2 * RapidIO Tsi57x switch family support
3 *
4 * Copyright 2009-2010 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 * - Added EM support
7 * - Modified switch operations initialization.
8 *
9 * Copyright 2005 MontaVista Software, Inc.
10 * Matt Porter <mporter@kernel.crashing.org>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/rio.h>
19#include <linux/rio_drv.h>
20#include <linux/rio_ids.h>
21#include <linux/delay.h>
22#include "../rio.h"
23
24/* Global (broadcast) route registers */
25#define SPBC_ROUTE_CFG_DESTID 0x10070
26#define SPBC_ROUTE_CFG_PORT 0x10074
27
28/* Per port route registers */
29#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n)
30#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n)
31
32#define TSI578_SP_MODE(n) (0x11004 + n*0x100)
33#define TSI578_SP_MODE_GLBL 0x10004
34#define TSI578_SP_MODE_PW_DIS 0x08000000
35#define TSI578_SP_MODE_LUT_512 0x01000000
36
37#define TSI578_SP_CTL_INDEP(n) (0x13004 + n*0x100)
38#define TSI578_SP_LUT_PEINF(n) (0x13010 + n*0x100)
39#define TSI578_SP_CS_TX(n) (0x13014 + n*0x100)
40#define TSI578_SP_INT_STATUS(n) (0x13018 + n*0x100)
41
42#define TSI578_GLBL_ROUTE_BASE 0x10078
43
44static int
45tsi57x_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
46 u16 table, u16 route_destid, u8 route_port)
47{
48 if (table == RIO_GLOBAL_TABLE) {
49 rio_mport_write_config_32(mport, destid, hopcount,
50 SPBC_ROUTE_CFG_DESTID, route_destid);
51 rio_mport_write_config_32(mport, destid, hopcount,
52 SPBC_ROUTE_CFG_PORT, route_port);
53 } else {
54 rio_mport_write_config_32(mport, destid, hopcount,
55 SPP_ROUTE_CFG_DESTID(table), route_destid);
56 rio_mport_write_config_32(mport, destid, hopcount,
57 SPP_ROUTE_CFG_PORT(table), route_port);
58 }
59
60 udelay(10);
61
62 return 0;
63}
64
65static int
66tsi57x_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
67 u16 table, u16 route_destid, u8 *route_port)
68{
69 int ret = 0;
70 u32 result;
71
72 if (table == RIO_GLOBAL_TABLE) {
73 /* Use local RT of the ingress port to avoid possible
74 race condition */
75 rio_mport_read_config_32(mport, destid, hopcount,
76 RIO_SWP_INFO_CAR, &result);
77 table = (result & RIO_SWP_INFO_PORT_NUM_MASK);
78 }
79
80 rio_mport_write_config_32(mport, destid, hopcount,
81 SPP_ROUTE_CFG_DESTID(table), route_destid);
82 rio_mport_read_config_32(mport, destid, hopcount,
83 SPP_ROUTE_CFG_PORT(table), &result);
84
85 *route_port = (u8)result;
86 if (*route_port > 15)
87 ret = -1;
88
89 return ret;
90}
91
92static int
93tsi57x_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
94 u16 table)
95{
96 u32 route_idx;
97 u32 lut_size;
98
99 lut_size = (mport->sys_size) ? 0x1ff : 0xff;
100
101 if (table == RIO_GLOBAL_TABLE) {
102 rio_mport_write_config_32(mport, destid, hopcount,
103 SPBC_ROUTE_CFG_DESTID, 0x80000000);
104 for (route_idx = 0; route_idx <= lut_size; route_idx++)
105 rio_mport_write_config_32(mport, destid, hopcount,
106 SPBC_ROUTE_CFG_PORT,
107 RIO_INVALID_ROUTE);
108 } else {
109 rio_mport_write_config_32(mport, destid, hopcount,
110 SPP_ROUTE_CFG_DESTID(table), 0x80000000);
111 for (route_idx = 0; route_idx <= lut_size; route_idx++)
112 rio_mport_write_config_32(mport, destid, hopcount,
113 SPP_ROUTE_CFG_PORT(table) , RIO_INVALID_ROUTE);
114 }
115
116 return 0;
117}
118
119static int
120tsi57x_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
121 u8 sw_domain)
122{
123 u32 regval;
124
125 /*
126 * Switch domain configuration operates only at global level
127 */
128
129 /* Turn off flat (LUT_512) mode */
130 rio_mport_read_config_32(mport, destid, hopcount,
131 TSI578_SP_MODE_GLBL, &regval);
132 rio_mport_write_config_32(mport, destid, hopcount, TSI578_SP_MODE_GLBL,
133 regval & ~TSI578_SP_MODE_LUT_512);
134 /* Set switch domain base */
135 rio_mport_write_config_32(mport, destid, hopcount,
136 TSI578_GLBL_ROUTE_BASE,
137 (u32)(sw_domain << 24));
138 return 0;
139}
140
141static int
142tsi57x_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
143 u8 *sw_domain)
144{
145 u32 regval;
146
147 /*
148 * Switch domain configuration operates only at global level
149 */
150 rio_mport_read_config_32(mport, destid, hopcount,
151 TSI578_GLBL_ROUTE_BASE, &regval);
152
153 *sw_domain = (u8)(regval >> 24);
154
155 return 0;
156}
157
158static int
159tsi57x_em_init(struct rio_dev *rdev)
160{
161 struct rio_mport *mport = rdev->net->hport;
162 u16 destid = rdev->rswitch->destid;
163 u8 hopcount = rdev->rswitch->hopcount;
164 u32 regval;
165 int portnum;
166
167 pr_debug("TSI578 %s [%d:%d]\n", __func__, destid, hopcount);
168
169 for (portnum = 0; portnum < 16; portnum++) {
170 /* Make sure that Port-Writes are enabled (for all ports) */
171 rio_mport_read_config_32(mport, destid, hopcount,
172 TSI578_SP_MODE(portnum), &regval);
173 rio_mport_write_config_32(mport, destid, hopcount,
174 TSI578_SP_MODE(portnum),
175 regval & ~TSI578_SP_MODE_PW_DIS);
176
177 /* Clear all pending interrupts */
178 rio_mport_read_config_32(mport, destid, hopcount,
179 rdev->phys_efptr +
180 RIO_PORT_N_ERR_STS_CSR(portnum),
181 &regval);
182 rio_mport_write_config_32(mport, destid, hopcount,
183 rdev->phys_efptr +
184 RIO_PORT_N_ERR_STS_CSR(portnum),
185 regval & 0x07120214);
186
187 rio_mport_read_config_32(mport, destid, hopcount,
188 TSI578_SP_INT_STATUS(portnum), &regval);
189 rio_mport_write_config_32(mport, destid, hopcount,
190 TSI578_SP_INT_STATUS(portnum),
191 regval & 0x000700bd);
192
193 /* Enable all interrupts to allow ports to send a port-write */
194 rio_mport_read_config_32(mport, destid, hopcount,
195 TSI578_SP_CTL_INDEP(portnum), &regval);
196 rio_mport_write_config_32(mport, destid, hopcount,
197 TSI578_SP_CTL_INDEP(portnum),
198 regval | 0x000b0000);
199
200 /* Skip next (odd) port if the current port is in x4 mode */
201 rio_mport_read_config_32(mport, destid, hopcount,
202 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
203 &regval);
204 if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4)
205 portnum++;
206 }
207
208 return 0;
209}
210
211static int
212tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
213{
214 struct rio_mport *mport = rdev->net->hport;
215 u16 destid = rdev->rswitch->destid;
216 u8 hopcount = rdev->rswitch->hopcount;
217 u32 intstat, err_status;
218 int sendcount, checkcount;
219 u8 route_port;
220 u32 regval;
221
222 rio_mport_read_config_32(mport, destid, hopcount,
223 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
224 &err_status);
225
226 if ((err_status & RIO_PORT_N_ERR_STS_PORT_OK) &&
227 (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
228 RIO_PORT_N_ERR_STS_PW_INP_ES))) {
229 /* Remove any queued packets by locking/unlocking port */
230 rio_mport_read_config_32(mport, destid, hopcount,
231 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
232 &regval);
233 if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) {
234 rio_mport_write_config_32(mport, destid, hopcount,
235 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
236 regval | RIO_PORT_N_CTL_LOCKOUT);
237 udelay(50);
238 rio_mport_write_config_32(mport, destid, hopcount,
239 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
240 regval);
241 }
242
243 /* Read from link maintenance response register to clear
244 * valid bit
245 */
246 rio_mport_read_config_32(mport, destid, hopcount,
247 rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum),
248 &regval);
249
250 /* Send a Packet-Not-Accepted/Link-Request-Input-Status control
251 * symbol to recover from IES/OES
252 */
253 sendcount = 3;
254 while (sendcount) {
255 rio_mport_write_config_32(mport, destid, hopcount,
256 TSI578_SP_CS_TX(portnum), 0x40fc8000);
257 checkcount = 3;
258 while (checkcount--) {
259 udelay(50);
260 rio_mport_read_config_32(
261 mport, destid, hopcount,
262 rdev->phys_efptr +
263 RIO_PORT_N_MNT_RSP_CSR(portnum),
264 &regval);
265 if (regval & RIO_PORT_N_MNT_RSP_RVAL)
266 goto exit_es;
267 }
268
269 sendcount--;
270 }
271 }
272
273exit_es:
274 /* Clear implementation specific error status bits */
275 rio_mport_read_config_32(mport, destid, hopcount,
276 TSI578_SP_INT_STATUS(portnum), &intstat);
277 pr_debug("TSI578[%x:%x] SP%d_INT_STATUS=0x%08x\n",
278 destid, hopcount, portnum, intstat);
279
280 if (intstat & 0x10000) {
281 rio_mport_read_config_32(mport, destid, hopcount,
282 TSI578_SP_LUT_PEINF(portnum), &regval);
283 regval = (mport->sys_size) ? (regval >> 16) : (regval >> 24);
284 route_port = rdev->rswitch->route_table[regval];
285 pr_debug("RIO: TSI578[%s] P%d LUT Parity Error (destID=%d)\n",
286 rio_name(rdev), portnum, regval);
287 tsi57x_route_add_entry(mport, destid, hopcount,
288 RIO_GLOBAL_TABLE, regval, route_port);
289 }
290
291 rio_mport_write_config_32(mport, destid, hopcount,
292 TSI578_SP_INT_STATUS(portnum),
293 intstat & 0x000700bd);
294
295 return 0;
296}
297
298static int tsi57x_switch_init(struct rio_dev *rdev, int do_enum)
299{
300 pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
301 rdev->rswitch->add_entry = tsi57x_route_add_entry;
302 rdev->rswitch->get_entry = tsi57x_route_get_entry;
303 rdev->rswitch->clr_table = tsi57x_route_clr_table;
304 rdev->rswitch->set_domain = tsi57x_set_domain;
305 rdev->rswitch->get_domain = tsi57x_get_domain;
306 rdev->rswitch->em_init = tsi57x_em_init;
307 rdev->rswitch->em_handle = tsi57x_em_handler;
308
309 return 0;
310}
311
312DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI572, tsi57x_switch_init);
313DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI574, tsi57x_switch_init);
314DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI577, tsi57x_switch_init);
315DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI578, tsi57x_switch_init);
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 5fb83e2ced25..7d149a8d8d9b 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -23,9 +23,9 @@ struct pm8607_regulator_info {
23 struct regulator_dev *regulator; 23 struct regulator_dev *regulator;
24 struct i2c_client *i2c; 24 struct i2c_client *i2c;
25 25
26 int min_uV; 26 unsigned int *vol_table;
27 int max_uV; 27 unsigned int *vol_suspend;
28 int step_uV; 28
29 int vol_reg; 29 int vol_reg;
30 int vol_shift; 30 int vol_shift;
31 int vol_nbits; 31 int vol_nbits;
@@ -36,83 +36,189 @@ struct pm8607_regulator_info {
36 int slope_double; 36 int slope_double;
37}; 37};
38 38
39static inline int check_range(struct pm8607_regulator_info *info, 39static const unsigned int BUCK1_table[] = {
40 int min_uV, int max_uV) 40 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000,
41{ 41 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000,
42 if (max_uV < info->min_uV || min_uV > info->max_uV || min_uV > max_uV) 42 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000,
43 return -EINVAL; 43 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000,
44 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
45 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000,
46 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000,
47 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000,
48};
44 49
45 return 0; 50static const unsigned int BUCK1_suspend_table[] = {
46} 51 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
52 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000,
53 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000,
54 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000,
55 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000,
56 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
57 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000,
58 1400000, 1425000, 1450000, 1475000, 1500000, 1500000, 1500000, 1500000,
59};
60
61static const unsigned int BUCK2_table[] = {
62 0, 50000, 100000, 150000, 200000, 250000, 300000, 350000,
63 400000, 450000, 500000, 550000, 600000, 650000, 700000, 750000,
64 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000,
65 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, 1550000,
66 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, 1950000,
67 2000000, 2050000, 2100000, 2150000, 2200000, 2250000, 2300000, 2350000,
68 2400000, 2450000, 2500000, 2550000, 2600000, 2650000, 2700000, 2750000,
69 2800000, 2850000, 2900000, 2950000, 3000000, 3000000, 3000000, 3000000,
70};
71
72static const unsigned int BUCK2_suspend_table[] = {
73 0, 50000, 100000, 150000, 200000, 250000, 300000, 350000,
74 400000, 450000, 500000, 550000, 600000, 650000, 700000, 750000,
75 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000,
76 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, 1550000,
77 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, 1950000,
78 2000000, 2050000, 2100000, 2150000, 2200000, 2250000, 2300000, 2350000,
79 2400000, 2450000, 2500000, 2550000, 2600000, 2650000, 2700000, 2750000,
80 2800000, 2850000, 2900000, 2950000, 3000000, 3000000, 3000000, 3000000,
81};
82
83static const unsigned int BUCK3_table[] = {
84 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
85 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000,
86 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000,
87 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000,
88 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000,
89 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
90 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000,
91 1400000, 1425000, 1450000, 1475000, 1500000, 1500000, 1500000, 1500000,
92};
93
94static const unsigned int BUCK3_suspend_table[] = {
95 0, 25000, 50000, 75000, 100000, 125000, 150000, 175000,
96 200000, 225000, 250000, 275000, 300000, 325000, 350000, 375000,
97 400000, 425000, 450000, 475000, 500000, 525000, 550000, 575000,
98 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000,
99 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000,
100 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
101 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000,
102 1400000, 1425000, 1450000, 1475000, 1500000, 1500000, 1500000, 1500000,
103};
104
105static const unsigned int LDO1_table[] = {
106 1800000, 1200000, 2800000, 0,
107};
108
109static const unsigned int LDO1_suspend_table[] = {
110 1800000, 1200000, 0, 0,
111};
112
113static const unsigned int LDO2_table[] = {
114 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000,
115};
116
117static const unsigned int LDO2_suspend_table[] = {
118 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
119};
120
121static const unsigned int LDO3_table[] = {
122 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000,
123};
124
125static const unsigned int LDO3_suspend_table[] = {
126 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
127};
128
129static const unsigned int LDO4_table[] = {
130 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2900000, 3300000,
131};
132
133static const unsigned int LDO4_suspend_table[] = {
134 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2900000, 2900000,
135};
136
137static const unsigned int LDO5_table[] = {
138 2900000, 3000000, 3100000, 3300000,
139};
140
141static const unsigned int LDO5_suspend_table[] = {
142 2900000, 0, 0, 0,
143};
144
145static const unsigned int LDO6_table[] = {
146 1800000, 1850000, 2600000, 2650000, 2700000, 2750000, 2800000, 3300000,
147};
148
149static const unsigned int LDO6_suspend_table[] = {
150 1800000, 1850000, 2600000, 2650000, 2700000, 2750000, 2800000, 2900000,
151};
152
153static const unsigned int LDO7_table[] = {
154 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
155};
156
157static const unsigned int LDO7_suspend_table[] = {
158 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
159};
160
161static const unsigned int LDO8_table[] = {
162 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
163};
164
165static const unsigned int LDO8_suspend_table[] = {
166 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
167};
168
169static const unsigned int LDO9_table[] = {
170 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000,
171};
172
173static const unsigned int LDO9_suspend_table[] = {
174 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
175};
176
177static const unsigned int LDO10_table[] = {
178 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 3300000,
179 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000,
180};
181
182static const unsigned int LDO10_suspend_table[] = {
183 1800000, 1850000, 1900000, 2700000, 2750000, 2800000, 2850000, 2900000,
184 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000,
185};
186
187static const unsigned int LDO12_table[] = {
188 1800000, 1900000, 2700000, 2800000, 2900000, 3000000, 3100000, 3300000,
189 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000,
190};
191
192static const unsigned int LDO12_suspend_table[] = {
193 1800000, 1900000, 2700000, 2800000, 2900000, 2900000, 2900000, 2900000,
194 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000, 1200000,
195};
196
197static const unsigned int LDO13_table[] = {
198 1300000, 1800000, 2000000, 2500000, 2800000, 3000000, 0, 0,
199};
200
201static const unsigned int LDO13_suspend_table[] = {
202 0,
203};
204
205static const unsigned int LDO14_table[] = {
206 1800000, 1850000, 2700000, 2750000, 2800000, 2850000, 2900000, 3300000,
207};
208
209static const unsigned int LDO14_suspend_table[] = {
210 1800000, 1850000, 2700000, 2750000, 2800000, 2850000, 2900000, 2900000,
211};
47 212
48static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index) 213static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
49{ 214{
50 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); 215 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
51 int ret = -EINVAL; 216 int ret = -EINVAL;
52 217
53 switch (info->desc.id) { 218 if (info->vol_table && (index < (2 << info->vol_nbits))) {
54 case PM8607_ID_BUCK1: 219 ret = info->vol_table[index];
55 ret = (index < 0x1d) ? (index * 25000 + 800000) :
56 ((index < 0x20) ? 1500000 :
57 ((index < 0x40) ? ((index - 0x20) * 25000) :
58 -EINVAL));
59 break;
60 case PM8607_ID_BUCK3:
61 ret = (index < 0x3d) ? (index * 25000) :
62 ((index < 0x40) ? 1500000 : -EINVAL);
63 if (ret < 0)
64 break;
65 if (info->slope_double) 220 if (info->slope_double)
66 ret <<= 1; 221 ret <<= 1;
67 break;
68 case PM8607_ID_LDO1:
69 ret = (index == 0) ? 1800000 :
70 ((index == 1) ? 1200000 :
71 ((index == 2) ? 2800000 : -EINVAL));
72 break;
73 case PM8607_ID_LDO5:
74 ret = (index == 0) ? 2900000 :
75 ((index == 1) ? 3000000 :
76 ((index == 2) ? 3100000 : 3300000));
77 break;
78 case PM8607_ID_LDO7:
79 case PM8607_ID_LDO8:
80 ret = (index < 3) ? (index * 50000 + 1800000) :
81 ((index < 8) ? (index * 50000 + 2550000) :
82 -EINVAL);
83 break;
84 case PM8607_ID_LDO12:
85 ret = (index < 2) ? (index * 100000 + 1800000) :
86 ((index < 7) ? (index * 100000 + 2500000) :
87 ((index == 7) ? 3300000 : 1200000));
88 break;
89 case PM8607_ID_LDO2:
90 case PM8607_ID_LDO3:
91 case PM8607_ID_LDO9:
92 ret = (index < 3) ? (index * 50000 + 1800000) :
93 ((index < 7) ? (index * 50000 + 2550000) :
94 3300000);
95 break;
96 case PM8607_ID_LDO4:
97 ret = (index < 3) ? (index * 50000 + 1800000) :
98 ((index < 6) ? (index * 50000 + 2550000) :
99 ((index == 6) ? 2900000 : 3300000));
100 break;
101 case PM8607_ID_LDO6:
102 ret = (index < 2) ? (index * 50000 + 1800000) :
103 ((index < 7) ? (index * 50000 + 2500000) :
104 3300000);
105 break;
106 case PM8607_ID_LDO10:
107 ret = (index < 3) ? (index * 50000 + 1800000) :
108 ((index < 7) ? (index * 50000 + 2550000) :
109 ((index == 7) ? 3300000 : 1200000));
110 break;
111 case PM8607_ID_LDO14:
112 ret = (index < 2) ? (index * 50000 + 1800000) :
113 ((index < 7) ? (index * 50000 + 2600000) :
114 3300000);
115 break;
116 } 222 }
117 return ret; 223 return ret;
118} 224}
@@ -120,174 +226,26 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
120static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) 226static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
121{ 227{
122 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); 228 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
123 int val = -ENOENT; 229 int i, ret = -ENOENT;
124 int ret;
125 230
126 switch (info->desc.id) { 231 if (info->slope_double) {
127 case PM8607_ID_BUCK1: 232 min_uV = min_uV >> 1;
128 if (min_uV >= 800000) /* 800mV ~ 1500mV / 25mV */ 233 max_uV = max_uV >> 1;
129 val = (min_uV - 775001) / 25000;
130 else { /* 25mV ~ 775mV / 25mV */
131 val = (min_uV + 249999) / 25000;
132 val += 32;
133 }
134 break;
135 case PM8607_ID_BUCK3:
136 if (info->slope_double)
137 min_uV = min_uV >> 1;
138 val = (min_uV + 249999) / 25000; /* 0mV ~ 1500mV / 25mV */
139
140 break;
141 case PM8607_ID_LDO1:
142 if (min_uV > 1800000)
143 val = 2;
144 else if (min_uV > 1200000)
145 val = 0;
146 else
147 val = 1;
148 break;
149 case PM8607_ID_LDO5:
150 if (min_uV > 3100000)
151 val = 3;
152 else /* 2900mV ~ 3100mV / 100mV */
153 val = (min_uV - 2800001) / 100000;
154 break;
155 case PM8607_ID_LDO7:
156 case PM8607_ID_LDO8:
157 if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
158 if (min_uV <= 1800000)
159 val = 0; /* 1800mv */
160 else if (min_uV <= 1900000)
161 val = (min_uV - 1750001) / 50000;
162 else
163 val = 3; /* 2700mV */
164 } else { /* 2700mV ~ 2900mV / 50mV */
165 if (min_uV <= 2900000) {
166 val = (min_uV - 2650001) / 50000;
167 val += 3;
168 } else
169 val = -EINVAL;
170 }
171 break;
172 case PM8607_ID_LDO10:
173 if (min_uV > 2850000)
174 val = 7;
175 else if (min_uV <= 1200000)
176 val = 8;
177 else if (min_uV < 2700000) /* 1800mV ~ 1900mV / 50mV */
178 val = (min_uV - 1750001) / 50000;
179 else { /* 2700mV ~ 2850mV / 50mV */
180 val = (min_uV - 2650001) / 50000;
181 val += 3;
182 }
183 break;
184 case PM8607_ID_LDO12:
185 if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 100mV */
186 if (min_uV <= 1200000)
187 val = 8; /* 1200mV */
188 else if (min_uV <= 1800000)
189 val = 0; /* 1800mV */
190 else if (min_uV <= 1900000)
191 val = (min_uV - 1700001) / 100000;
192 else
193 val = 2; /* 2700mV */
194 } else { /* 2700mV ~ 3100mV / 100mV */
195 if (min_uV <= 3100000) {
196 val = (min_uV - 2600001) / 100000;
197 val += 2;
198 } else if (min_uV <= 3300000)
199 val = 7;
200 else
201 val = -EINVAL;
202 }
203 break;
204 case PM8607_ID_LDO2:
205 case PM8607_ID_LDO3:
206 case PM8607_ID_LDO9:
207 if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
208 if (min_uV <= 1800000)
209 val = 0;
210 else if (min_uV <= 1900000)
211 val = (min_uV - 1750001) / 50000;
212 else
213 val = 3; /* 2700mV */
214 } else { /* 2700mV ~ 2850mV / 50mV */
215 if (min_uV <= 2850000) {
216 val = (min_uV - 2650001) / 50000;
217 val += 3;
218 } else if (min_uV <= 3300000)
219 val = 7;
220 else
221 val = -EINVAL;
222 }
223 break;
224 case PM8607_ID_LDO4:
225 if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
226 if (min_uV <= 1800000)
227 val = 0;
228 else if (min_uV <= 1900000)
229 val = (min_uV - 1750001) / 50000;
230 else
231 val = 3; /* 2700mV */
232 } else { /* 2700mV ~ 2800mV / 50mV */
233 if (min_uV <= 2850000) {
234 val = (min_uV - 2650001) / 50000;
235 val += 3;
236 } else if (min_uV <= 2900000)
237 val = 6;
238 else if (min_uV <= 3300000)
239 val = 7;
240 else
241 val = -EINVAL;
242 }
243 break;
244 case PM8607_ID_LDO6:
245 if (min_uV < 2600000) { /* 1800mV ~ 1850mV / 50mV */
246 if (min_uV <= 1800000)
247 val = 0;
248 else if (min_uV <= 1850000)
249 val = (min_uV - 1750001) / 50000;
250 else
251 val = 2; /* 2600mV */
252 } else { /* 2600mV ~ 2800mV / 50mV */
253 if (min_uV <= 2800000) {
254 val = (min_uV - 2550001) / 50000;
255 val += 2;
256 } else if (min_uV <= 3300000)
257 val = 7;
258 else
259 val = -EINVAL;
260 }
261 break;
262 case PM8607_ID_LDO14:
263 if (min_uV < 2700000) { /* 1800mV ~ 1850mV / 50mV */
264 if (min_uV <= 1800000)
265 val = 0;
266 else if (min_uV <= 1850000)
267 val = (min_uV - 1750001) / 50000;
268 else
269 val = 2; /* 2700mV */
270 } else { /* 2700mV ~ 2900mV / 50mV */
271 if (min_uV <= 2900000) {
272 val = (min_uV - 2650001) / 50000;
273 val += 2;
274 } else if (min_uV <= 3300000)
275 val = 7;
276 else
277 val = -EINVAL;
278 }
279 break;
280 } 234 }
281 if (val >= 0) { 235 if (info->vol_table) {
282 ret = pm8607_list_voltage(rdev, val); 236 for (i = 0; i < (2 << info->vol_nbits); i++) {
283 if (ret > max_uV) { 237 if (!info->vol_table[i])
284 pr_err("exceed voltage range (%d %d) uV", 238 break;
285 min_uV, max_uV); 239 if ((min_uV <= info->vol_table[i])
286 return -EINVAL; 240 && (max_uV >= info->vol_table[i])) {
241 ret = i;
242 break;
243 }
287 } 244 }
288 } else 245 }
289 pr_err("invalid voltage range (%d %d) uV", min_uV, max_uV); 246 if (ret < 0)
290 return val; 247 pr_err("invalid voltage range (%d %d) uV\n", min_uV, max_uV);
248 return ret;
291} 249}
292 250
293static int pm8607_set_voltage(struct regulator_dev *rdev, 251static int pm8607_set_voltage(struct regulator_dev *rdev,
@@ -297,7 +255,7 @@ static int pm8607_set_voltage(struct regulator_dev *rdev,
297 uint8_t val, mask; 255 uint8_t val, mask;
298 int ret; 256 int ret;
299 257
300 if (check_range(info, min_uV, max_uV)) { 258 if (min_uV > max_uV) {
301 pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV); 259 pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
302 return -EINVAL; 260 return -EINVAL;
303 } 261 }
@@ -375,18 +333,15 @@ static struct regulator_ops pm8607_regulator_ops = {
375 .is_enabled = pm8607_is_enabled, 333 .is_enabled = pm8607_is_enabled,
376}; 334};
377 335
378#define PM8607_DVC(_id, min, max, step, vreg, nbits, ureg, ubit, ereg, ebit) \ 336#define PM8607_DVC(vreg, nbits, ureg, ubit, ereg, ebit) \
379{ \ 337{ \
380 .desc = { \ 338 .desc = { \
381 .name = "BUCK" #_id, \ 339 .name = #vreg, \
382 .ops = &pm8607_regulator_ops, \ 340 .ops = &pm8607_regulator_ops, \
383 .type = REGULATOR_VOLTAGE, \ 341 .type = REGULATOR_VOLTAGE, \
384 .id = PM8607_ID_BUCK##_id, \ 342 .id = PM8607_ID_##vreg, \
385 .owner = THIS_MODULE, \ 343 .owner = THIS_MODULE, \
386 }, \ 344 }, \
387 .min_uV = (min) * 1000, \
388 .max_uV = (max) * 1000, \
389 .step_uV = (step) * 1000, \
390 .vol_reg = PM8607_##vreg, \ 345 .vol_reg = PM8607_##vreg, \
391 .vol_shift = (0), \ 346 .vol_shift = (0), \
392 .vol_nbits = (nbits), \ 347 .vol_nbits = (nbits), \
@@ -395,9 +350,11 @@ static struct regulator_ops pm8607_regulator_ops = {
395 .enable_reg = PM8607_##ereg, \ 350 .enable_reg = PM8607_##ereg, \
396 .enable_bit = (ebit), \ 351 .enable_bit = (ebit), \
397 .slope_double = (0), \ 352 .slope_double = (0), \
353 .vol_table = (unsigned int *)&vreg##_table, \
354 .vol_suspend = (unsigned int *)&vreg##_suspend_table, \
398} 355}
399 356
400#define PM8607_LDO(_id, min, max, step, vreg, shift, nbits, ereg, ebit) \ 357#define PM8607_LDO(_id, vreg, shift, nbits, ereg, ebit) \
401{ \ 358{ \
402 .desc = { \ 359 .desc = { \
403 .name = "LDO" #_id, \ 360 .name = "LDO" #_id, \
@@ -406,33 +363,34 @@ static struct regulator_ops pm8607_regulator_ops = {
406 .id = PM8607_ID_LDO##_id, \ 363 .id = PM8607_ID_LDO##_id, \
407 .owner = THIS_MODULE, \ 364 .owner = THIS_MODULE, \
408 }, \ 365 }, \
409 .min_uV = (min) * 1000, \
410 .max_uV = (max) * 1000, \
411 .step_uV = (step) * 1000, \
412 .vol_reg = PM8607_##vreg, \ 366 .vol_reg = PM8607_##vreg, \
413 .vol_shift = (shift), \ 367 .vol_shift = (shift), \
414 .vol_nbits = (nbits), \ 368 .vol_nbits = (nbits), \
415 .enable_reg = PM8607_##ereg, \ 369 .enable_reg = PM8607_##ereg, \
416 .enable_bit = (ebit), \ 370 .enable_bit = (ebit), \
417 .slope_double = (0), \ 371 .slope_double = (0), \
372 .vol_table = (unsigned int *)&LDO##_id##_table, \
373 .vol_suspend = (unsigned int *)&LDO##_id##_suspend_table, \
418} 374}
419 375
420static struct pm8607_regulator_info pm8607_regulator_info[] = { 376static struct pm8607_regulator_info pm8607_regulator_info[] = {
421 PM8607_DVC(1, 0, 1500, 25, BUCK1, 6, GO, 0, SUPPLIES_EN11, 0), 377 PM8607_DVC(BUCK1, 6, GO, 0, SUPPLIES_EN11, 0),
422 PM8607_DVC(3, 0, 1500, 25, BUCK3, 6, GO, 2, SUPPLIES_EN11, 2), 378 PM8607_DVC(BUCK2, 6, GO, 1, SUPPLIES_EN11, 1),
423 379 PM8607_DVC(BUCK3, 6, GO, 2, SUPPLIES_EN11, 2),
424 PM8607_LDO(1 , 1200, 2800, 0, LDO1 , 0, 2, SUPPLIES_EN11, 3), 380
425 PM8607_LDO(2 , 1800, 3300, 0, LDO2 , 0, 3, SUPPLIES_EN11, 4), 381 PM8607_LDO( 1, LDO1, 0, 2, SUPPLIES_EN11, 3),
426 PM8607_LDO(3 , 1800, 3300, 0, LDO3 , 0, 3, SUPPLIES_EN11, 5), 382 PM8607_LDO( 2, LDO2, 0, 3, SUPPLIES_EN11, 4),
427 PM8607_LDO(4 , 1800, 3300, 0, LDO4 , 0, 3, SUPPLIES_EN11, 6), 383 PM8607_LDO( 3, LDO3, 0, 3, SUPPLIES_EN11, 5),
428 PM8607_LDO(5 , 2900, 3300, 0, LDO5 , 0, 2, SUPPLIES_EN11, 7), 384 PM8607_LDO( 4, LDO4, 0, 3, SUPPLIES_EN11, 6),
429 PM8607_LDO(6 , 1800, 3300, 0, LDO6 , 0, 3, SUPPLIES_EN12, 0), 385 PM8607_LDO( 5, LDO5, 0, 2, SUPPLIES_EN11, 7),
430 PM8607_LDO(7 , 1800, 2900, 0, LDO7 , 0, 3, SUPPLIES_EN12, 1), 386 PM8607_LDO( 6, LDO6, 0, 3, SUPPLIES_EN12, 0),
431 PM8607_LDO(8 , 1800, 2900, 0, LDO8 , 0, 3, SUPPLIES_EN12, 2), 387 PM8607_LDO( 7, LDO7, 0, 3, SUPPLIES_EN12, 1),
432 PM8607_LDO(9 , 1800, 3300, 0, LDO9 , 0, 3, SUPPLIES_EN12, 3), 388 PM8607_LDO( 8, LDO8, 0, 3, SUPPLIES_EN12, 2),
433 PM8607_LDO(10, 1200, 3300, 0, LDO10, 0, 4, SUPPLIES_EN11, 4), 389 PM8607_LDO( 9, LDO9, 0, 3, SUPPLIES_EN12, 3),
434 PM8607_LDO(12, 1200, 3300, 0, LDO12, 0, 4, SUPPLIES_EN11, 5), 390 PM8607_LDO(10, LDO10, 0, 3, SUPPLIES_EN12, 4),
435 PM8607_LDO(14, 1800, 3300, 0, LDO14, 0, 3, SUPPLIES_EN11, 6), 391 PM8607_LDO(12, LDO12, 0, 4, SUPPLIES_EN12, 5),
392 PM8607_LDO(13, VIBRATOR_SET, 1, 3, VIBRATOR_SET, 0),
393 PM8607_LDO(14, LDO14, 0, 4, SUPPLIES_EN12, 6),
436}; 394};
437 395
438static inline struct pm8607_regulator_info *find_regulator_info(int id) 396static inline struct pm8607_regulator_info *find_regulator_info(int id)
@@ -484,60 +442,29 @@ static int __devexit pm8607_regulator_remove(struct platform_device *pdev)
484{ 442{
485 struct pm8607_regulator_info *info = platform_get_drvdata(pdev); 443 struct pm8607_regulator_info *info = platform_get_drvdata(pdev);
486 444
445 platform_set_drvdata(pdev, NULL);
487 regulator_unregister(info->regulator); 446 regulator_unregister(info->regulator);
488 return 0; 447 return 0;
489} 448}
490 449
491#define PM8607_REGULATOR_DRIVER(_name) \ 450static struct platform_driver pm8607_regulator_driver = {
492{ \ 451 .driver = {
493 .driver = { \ 452 .name = "88pm860x-regulator",
494 .name = "88pm8607-" #_name, \ 453 .owner = THIS_MODULE,
495 .owner = THIS_MODULE, \ 454 },
496 }, \ 455 .probe = pm8607_regulator_probe,
497 .probe = pm8607_regulator_probe, \ 456 .remove = __devexit_p(pm8607_regulator_remove),
498 .remove = __devexit_p(pm8607_regulator_remove), \
499}
500
501static struct platform_driver pm8607_regulator_driver[] = {
502 PM8607_REGULATOR_DRIVER(buck1),
503 PM8607_REGULATOR_DRIVER(buck2),
504 PM8607_REGULATOR_DRIVER(buck3),
505 PM8607_REGULATOR_DRIVER(ldo1),
506 PM8607_REGULATOR_DRIVER(ldo2),
507 PM8607_REGULATOR_DRIVER(ldo3),
508 PM8607_REGULATOR_DRIVER(ldo4),
509 PM8607_REGULATOR_DRIVER(ldo5),
510 PM8607_REGULATOR_DRIVER(ldo6),
511 PM8607_REGULATOR_DRIVER(ldo7),
512 PM8607_REGULATOR_DRIVER(ldo8),
513 PM8607_REGULATOR_DRIVER(ldo9),
514 PM8607_REGULATOR_DRIVER(ldo10),
515 PM8607_REGULATOR_DRIVER(ldo12),
516 PM8607_REGULATOR_DRIVER(ldo14),
517}; 457};
518 458
519static int __init pm8607_regulator_init(void) 459static int __init pm8607_regulator_init(void)
520{ 460{
521 int i, count, ret; 461 return platform_driver_register(&pm8607_regulator_driver);
522
523 count = ARRAY_SIZE(pm8607_regulator_driver);
524 for (i = 0; i < count; i++) {
525 ret = platform_driver_register(&pm8607_regulator_driver[i]);
526 if (ret != 0)
527 pr_err("Failed to register regulator driver: %d\n",
528 ret);
529 }
530 return 0;
531} 462}
532subsys_initcall(pm8607_regulator_init); 463subsys_initcall(pm8607_regulator_init);
533 464
534static void __exit pm8607_regulator_exit(void) 465static void __exit pm8607_regulator_exit(void)
535{ 466{
536 int i, count; 467 platform_driver_unregister(&pm8607_regulator_driver);
537
538 count = ARRAY_SIZE(pm8607_regulator_driver);
539 for (i = 0; i < count; i++)
540 platform_driver_unregister(&pm8607_regulator_driver[i]);
541} 468}
542module_exit(pm8607_regulator_exit); 469module_exit(pm8607_regulator_exit);
543 470
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 7de950959ed2..1afd008ca957 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -492,18 +492,21 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
492 .id = AB3100_LDO_A, 492 .id = AB3100_LDO_A,
493 .ops = &regulator_ops_fixed, 493 .ops = &regulator_ops_fixed,
494 .type = REGULATOR_VOLTAGE, 494 .type = REGULATOR_VOLTAGE,
495 .owner = THIS_MODULE,
495 }, 496 },
496 { 497 {
497 .name = "LDO_C", 498 .name = "LDO_C",
498 .id = AB3100_LDO_C, 499 .id = AB3100_LDO_C,
499 .ops = &regulator_ops_fixed, 500 .ops = &regulator_ops_fixed,
500 .type = REGULATOR_VOLTAGE, 501 .type = REGULATOR_VOLTAGE,
502 .owner = THIS_MODULE,
501 }, 503 },
502 { 504 {
503 .name = "LDO_D", 505 .name = "LDO_D",
504 .id = AB3100_LDO_D, 506 .id = AB3100_LDO_D,
505 .ops = &regulator_ops_fixed, 507 .ops = &regulator_ops_fixed,
506 .type = REGULATOR_VOLTAGE, 508 .type = REGULATOR_VOLTAGE,
509 .owner = THIS_MODULE,
507 }, 510 },
508 { 511 {
509 .name = "LDO_E", 512 .name = "LDO_E",
@@ -511,6 +514,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
511 .ops = &regulator_ops_variable_sleepable, 514 .ops = &regulator_ops_variable_sleepable,
512 .n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages), 515 .n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages),
513 .type = REGULATOR_VOLTAGE, 516 .type = REGULATOR_VOLTAGE,
517 .owner = THIS_MODULE,
514 }, 518 },
515 { 519 {
516 .name = "LDO_F", 520 .name = "LDO_F",
@@ -518,6 +522,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
518 .ops = &regulator_ops_variable, 522 .ops = &regulator_ops_variable,
519 .n_voltages = ARRAY_SIZE(ldo_f_typ_voltages), 523 .n_voltages = ARRAY_SIZE(ldo_f_typ_voltages),
520 .type = REGULATOR_VOLTAGE, 524 .type = REGULATOR_VOLTAGE,
525 .owner = THIS_MODULE,
521 }, 526 },
522 { 527 {
523 .name = "LDO_G", 528 .name = "LDO_G",
@@ -525,6 +530,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
525 .ops = &regulator_ops_variable, 530 .ops = &regulator_ops_variable,
526 .n_voltages = ARRAY_SIZE(ldo_g_typ_voltages), 531 .n_voltages = ARRAY_SIZE(ldo_g_typ_voltages),
527 .type = REGULATOR_VOLTAGE, 532 .type = REGULATOR_VOLTAGE,
533 .owner = THIS_MODULE,
528 }, 534 },
529 { 535 {
530 .name = "LDO_H", 536 .name = "LDO_H",
@@ -532,6 +538,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
532 .ops = &regulator_ops_variable, 538 .ops = &regulator_ops_variable,
533 .n_voltages = ARRAY_SIZE(ldo_h_typ_voltages), 539 .n_voltages = ARRAY_SIZE(ldo_h_typ_voltages),
534 .type = REGULATOR_VOLTAGE, 540 .type = REGULATOR_VOLTAGE,
541 .owner = THIS_MODULE,
535 }, 542 },
536 { 543 {
537 .name = "LDO_K", 544 .name = "LDO_K",
@@ -539,12 +546,14 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
539 .ops = &regulator_ops_variable, 546 .ops = &regulator_ops_variable,
540 .n_voltages = ARRAY_SIZE(ldo_k_typ_voltages), 547 .n_voltages = ARRAY_SIZE(ldo_k_typ_voltages),
541 .type = REGULATOR_VOLTAGE, 548 .type = REGULATOR_VOLTAGE,
549 .owner = THIS_MODULE,
542 }, 550 },
543 { 551 {
544 .name = "LDO_EXT", 552 .name = "LDO_EXT",
545 .id = AB3100_LDO_EXT, 553 .id = AB3100_LDO_EXT,
546 .ops = &regulator_ops_external, 554 .ops = &regulator_ops_external,
547 .type = REGULATOR_VOLTAGE, 555 .type = REGULATOR_VOLTAGE,
556 .owner = THIS_MODULE,
548 }, 557 },
549 { 558 {
550 .name = "BUCK", 559 .name = "BUCK",
@@ -552,6 +561,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
552 .ops = &regulator_ops_variable_sleepable, 561 .ops = &regulator_ops_variable_sleepable,
553 .n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages), 562 .n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages),
554 .type = REGULATOR_VOLTAGE, 563 .type = REGULATOR_VOLTAGE,
564 .owner = THIS_MODULE,
555 }, 565 },
556}; 566};
557 567
diff --git a/drivers/regulator/bq24022.c b/drivers/regulator/bq24022.c
index d08cd9b66c6d..068d488a4f71 100644
--- a/drivers/regulator/bq24022.c
+++ b/drivers/regulator/bq24022.c
@@ -78,6 +78,7 @@ static struct regulator_desc bq24022_desc = {
78 .name = "bq24022", 78 .name = "bq24022",
79 .ops = &bq24022_ops, 79 .ops = &bq24022_ops,
80 .type = REGULATOR_CURRENT, 80 .type = REGULATOR_CURRENT,
81 .owner = THIS_MODULE,
81}; 82};
82 83
83static int __init bq24022_probe(struct platform_device *pdev) 84static int __init bq24022_probe(struct platform_device *pdev)
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 51cf2bb37438..2248087b9be2 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -944,8 +944,13 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
944 has_dev = 0; 944 has_dev = 0;
945 945
946 list_for_each_entry(node, &regulator_map_list, list) { 946 list_for_each_entry(node, &regulator_map_list, list) {
947 if (consumer_dev_name != node->dev_name) 947 if (node->dev_name && consumer_dev_name) {
948 if (strcmp(node->dev_name, consumer_dev_name) != 0)
949 continue;
950 } else if (node->dev_name || consumer_dev_name) {
948 continue; 951 continue;
952 }
953
949 if (strcmp(node->supply, supply) != 0) 954 if (strcmp(node->supply, supply) != 0)
950 continue; 955 continue;
951 956
@@ -976,29 +981,6 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
976 return 0; 981 return 0;
977} 982}
978 983
979static void unset_consumer_device_supply(struct regulator_dev *rdev,
980 const char *consumer_dev_name, struct device *consumer_dev)
981{
982 struct regulator_map *node, *n;
983
984 if (consumer_dev && !consumer_dev_name)
985 consumer_dev_name = dev_name(consumer_dev);
986
987 list_for_each_entry_safe(node, n, &regulator_map_list, list) {
988 if (rdev != node->regulator)
989 continue;
990
991 if (consumer_dev_name && node->dev_name &&
992 strcmp(consumer_dev_name, node->dev_name))
993 continue;
994
995 list_del(&node->list);
996 kfree(node->dev_name);
997 kfree(node);
998 return;
999 }
1000}
1001
1002static void unset_regulator_supplies(struct regulator_dev *rdev) 984static void unset_regulator_supplies(struct regulator_dev *rdev)
1003{ 985{
1004 struct regulator_map *node, *n; 986 struct regulator_map *node, *n;
@@ -1008,7 +990,6 @@ static void unset_regulator_supplies(struct regulator_dev *rdev)
1008 list_del(&node->list); 990 list_del(&node->list);
1009 kfree(node->dev_name); 991 kfree(node->dev_name);
1010 kfree(node); 992 kfree(node);
1011 return;
1012 } 993 }
1013 } 994 }
1014} 995}
@@ -1764,6 +1745,7 @@ int regulator_set_mode(struct regulator *regulator, unsigned int mode)
1764{ 1745{
1765 struct regulator_dev *rdev = regulator->rdev; 1746 struct regulator_dev *rdev = regulator->rdev;
1766 int ret; 1747 int ret;
1748 int regulator_curr_mode;
1767 1749
1768 mutex_lock(&rdev->mutex); 1750 mutex_lock(&rdev->mutex);
1769 1751
@@ -1773,6 +1755,15 @@ int regulator_set_mode(struct regulator *regulator, unsigned int mode)
1773 goto out; 1755 goto out;
1774 } 1756 }
1775 1757
1758 /* return if the same mode is requested */
1759 if (rdev->desc->ops->get_mode) {
1760 regulator_curr_mode = rdev->desc->ops->get_mode(rdev);
1761 if (regulator_curr_mode == mode) {
1762 ret = 0;
1763 goto out;
1764 }
1765 }
1766
1776 /* constraints check */ 1767 /* constraints check */
1777 ret = regulator_check_mode(rdev, mode); 1768 ret = regulator_check_mode(rdev, mode);
1778 if (ret < 0) 1769 if (ret < 0)
@@ -2328,7 +2319,37 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
2328 goto scrub; 2319 goto scrub;
2329 2320
2330 /* set supply regulator if it exists */ 2321 /* set supply regulator if it exists */
2322 if (init_data->supply_regulator && init_data->supply_regulator_dev) {
2323 dev_err(dev,
2324 "Supply regulator specified by both name and dev\n");
2325 goto scrub;
2326 }
2327
2328 if (init_data->supply_regulator) {
2329 struct regulator_dev *r;
2330 int found = 0;
2331
2332 list_for_each_entry(r, &regulator_list, list) {
2333 if (strcmp(rdev_get_name(r),
2334 init_data->supply_regulator) == 0) {
2335 found = 1;
2336 break;
2337 }
2338 }
2339
2340 if (!found) {
2341 dev_err(dev, "Failed to find supply %s\n",
2342 init_data->supply_regulator);
2343 goto scrub;
2344 }
2345
2346 ret = set_supply(rdev, r);
2347 if (ret < 0)
2348 goto scrub;
2349 }
2350
2331 if (init_data->supply_regulator_dev) { 2351 if (init_data->supply_regulator_dev) {
2352 dev_warn(dev, "Uses supply_regulator_dev instead of regulator_supply\n");
2332 ret = set_supply(rdev, 2353 ret = set_supply(rdev,
2333 dev_get_drvdata(init_data->supply_regulator_dev)); 2354 dev_get_drvdata(init_data->supply_regulator_dev));
2334 if (ret < 0) 2355 if (ret < 0)
@@ -2341,13 +2362,8 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
2341 init_data->consumer_supplies[i].dev, 2362 init_data->consumer_supplies[i].dev,
2342 init_data->consumer_supplies[i].dev_name, 2363 init_data->consumer_supplies[i].dev_name,
2343 init_data->consumer_supplies[i].supply); 2364 init_data->consumer_supplies[i].supply);
2344 if (ret < 0) { 2365 if (ret < 0)
2345 for (--i; i >= 0; i--) 2366 goto unset_supplies;
2346 unset_consumer_device_supply(rdev,
2347 init_data->consumer_supplies[i].dev_name,
2348 init_data->consumer_supplies[i].dev);
2349 goto scrub;
2350 }
2351 } 2367 }
2352 2368
2353 list_add(&rdev->list, &regulator_list); 2369 list_add(&rdev->list, &regulator_list);
@@ -2355,6 +2371,9 @@ out:
2355 mutex_unlock(&regulator_list_mutex); 2371 mutex_unlock(&regulator_list_mutex);
2356 return rdev; 2372 return rdev;
2357 2373
2374unset_supplies:
2375 unset_regulator_supplies(rdev);
2376
2358scrub: 2377scrub:
2359 device_unregister(&rdev->dev); 2378 device_unregister(&rdev->dev);
2360 /* device core frees rdev */ 2379 /* device core frees rdev */
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index ad036dd8da13..4597d508a229 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -440,8 +440,8 @@ static int mc13783_fixed_regulator_set_voltage(struct regulator_dev *rdev,
440 dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n", 440 dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
441 __func__, id, min_uV, max_uV); 441 __func__, id, min_uV, max_uV);
442 442
443 if (min_uV > mc13783_regulators[id].voltages[0] && 443 if (min_uV >= mc13783_regulators[id].voltages[0] &&
444 max_uV < mc13783_regulators[id].voltages[0]) 444 max_uV <= mc13783_regulators[id].voltages[0])
445 return 0; 445 return 0;
446 else 446 else
447 return -EINVAL; 447 return -EINVAL;
@@ -649,6 +649,6 @@ static void __exit mc13783_regulator_exit(void)
649module_exit(mc13783_regulator_exit); 649module_exit(mc13783_regulator_exit);
650 650
651MODULE_LICENSE("GPL v2"); 651MODULE_LICENSE("GPL v2");
652MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de"); 652MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
653MODULE_DESCRIPTION("Regulator Driver for Freescale MC13783 PMIC"); 653MODULE_DESCRIPTION("Regulator Driver for Freescale MC13783 PMIC");
654MODULE_ALIAS("platform:mc13783-regulator"); 654MODULE_ALIAS("platform:mc13783-regulator");
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 9729d760fb4d..7e5892efc437 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -49,6 +49,7 @@ struct twlreg_info {
49 49
50 /* chip constraints on regulator behavior */ 50 /* chip constraints on regulator behavior */
51 u16 min_mV; 51 u16 min_mV;
52 u16 max_mV;
52 53
53 /* used by regulator core */ 54 /* used by regulator core */
54 struct regulator_desc desc; 55 struct regulator_desc desc;
@@ -318,31 +319,8 @@ static const u16 VIO_VSEL_table[] = {
318static const u16 VINTANA2_VSEL_table[] = { 319static const u16 VINTANA2_VSEL_table[] = {
319 2500, 2750, 320 2500, 2750,
320}; 321};
321static const u16 VAUX1_6030_VSEL_table[] = {
322 1000, 1300, 1800, 2500,
323 2800, 2900, 3000, 3000,
324};
325static const u16 VAUX2_6030_VSEL_table[] = {
326 1200, 1800, 2500, 2750,
327 2800, 2800, 2800, 2800,
328};
329static const u16 VAUX3_6030_VSEL_table[] = {
330 1000, 1200, 1300, 1800,
331 2500, 2800, 3000, 3000,
332};
333static const u16 VMMC_VSEL_table[] = {
334 1200, 1800, 2800, 2900,
335 3000, 3000, 3000, 3000,
336};
337static const u16 VPP_VSEL_table[] = {
338 1800, 1900, 2000, 2100,
339 2200, 2300, 2400, 2500,
340};
341static const u16 VUSIM_VSEL_table[] = {
342 1200, 1800, 2500, 2900,
343};
344 322
345static int twlldo_list_voltage(struct regulator_dev *rdev, unsigned index) 323static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
346{ 324{
347 struct twlreg_info *info = rdev_get_drvdata(rdev); 325 struct twlreg_info *info = rdev_get_drvdata(rdev);
348 int mV = info->table[index]; 326 int mV = info->table[index];
@@ -351,7 +329,7 @@ static int twlldo_list_voltage(struct regulator_dev *rdev, unsigned index)
351} 329}
352 330
353static int 331static int
354twlldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) 332twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
355{ 333{
356 struct twlreg_info *info = rdev_get_drvdata(rdev); 334 struct twlreg_info *info = rdev_get_drvdata(rdev);
357 int vsel; 335 int vsel;
@@ -375,7 +353,7 @@ twlldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
375 return -EDOM; 353 return -EDOM;
376} 354}
377 355
378static int twlldo_get_voltage(struct regulator_dev *rdev) 356static int twl4030ldo_get_voltage(struct regulator_dev *rdev)
379{ 357{
380 struct twlreg_info *info = rdev_get_drvdata(rdev); 358 struct twlreg_info *info = rdev_get_drvdata(rdev);
381 int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, 359 int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
@@ -388,11 +366,67 @@ static int twlldo_get_voltage(struct regulator_dev *rdev)
388 return LDO_MV(info->table[vsel]) * 1000; 366 return LDO_MV(info->table[vsel]) * 1000;
389} 367}
390 368
391static struct regulator_ops twlldo_ops = { 369static struct regulator_ops twl4030ldo_ops = {
392 .list_voltage = twlldo_list_voltage, 370 .list_voltage = twl4030ldo_list_voltage,
393 371
394 .set_voltage = twlldo_set_voltage, 372 .set_voltage = twl4030ldo_set_voltage,
395 .get_voltage = twlldo_get_voltage, 373 .get_voltage = twl4030ldo_get_voltage,
374
375 .enable = twlreg_enable,
376 .disable = twlreg_disable,
377 .is_enabled = twlreg_is_enabled,
378
379 .set_mode = twlreg_set_mode,
380
381 .get_status = twlreg_get_status,
382};
383
384static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
385{
386 struct twlreg_info *info = rdev_get_drvdata(rdev);
387
388 return ((info->min_mV + (index * 100)) * 1000);
389}
390
391static int
392twl6030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
393{
394 struct twlreg_info *info = rdev_get_drvdata(rdev);
395 int vsel;
396
397 if ((min_uV/1000 < info->min_mV) || (max_uV/1000 > info->max_mV))
398 return -EDOM;
399
400 /*
401 * Use the below formula to calculate vsel
402 * mV = 1000mv + 100mv * (vsel - 1)
403 */
404 vsel = (min_uV/1000 - 1000)/100 + 1;
405 return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE, vsel);
406
407}
408
409static int twl6030ldo_get_voltage(struct regulator_dev *rdev)
410{
411 struct twlreg_info *info = rdev_get_drvdata(rdev);
412 int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
413 VREG_VOLTAGE);
414
415 if (vsel < 0)
416 return vsel;
417
418 /*
419 * Use the below formula to calculate vsel
420 * mV = 1000mv + 100mv * (vsel - 1)
421 */
422 return (1000 + (100 * (vsel - 1))) * 1000;
423}
424
425static struct regulator_ops twl6030ldo_ops = {
426 .list_voltage = twl6030ldo_list_voltage,
427
428 .set_voltage = twl6030ldo_set_voltage,
429 .get_voltage = twl6030ldo_get_voltage,
396 430
397 .enable = twlreg_enable, 431 .enable = twlreg_enable,
398 .disable = twlreg_disable, 432 .disable = twlreg_disable,
@@ -438,24 +472,16 @@ static struct regulator_ops twlfixed_ops = {
438 472
439/*----------------------------------------------------------------------*/ 473/*----------------------------------------------------------------------*/
440 474
441#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) \
442 TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
443 remap_conf, TWL4030)
444#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ 475#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
445 remap_conf) \ 476 remap_conf) \
446 TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ 477 TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
447 remap_conf, TWL4030) 478 remap_conf, TWL4030)
448#define TWL6030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
449 remap_conf) \
450 TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
451 remap_conf, TWL6030)
452#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ 479#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
453 remap_conf) \ 480 remap_conf) \
454 TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ 481 TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
455 remap_conf, TWL6030) 482 remap_conf, TWL6030)
456 483
457#define TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf, \ 484#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \
458 family) { \
459 .base = offset, \ 485 .base = offset, \
460 .id = num, \ 486 .id = num, \
461 .table_len = ARRAY_SIZE(label##_VSEL_table), \ 487 .table_len = ARRAY_SIZE(label##_VSEL_table), \
@@ -464,14 +490,32 @@ static struct regulator_ops twlfixed_ops = {
464 .remap = remap_conf, \ 490 .remap = remap_conf, \
465 .desc = { \ 491 .desc = { \
466 .name = #label, \ 492 .name = #label, \
467 .id = family##_REG_##label, \ 493 .id = TWL4030_REG_##label, \
468 .n_voltages = ARRAY_SIZE(label##_VSEL_table), \ 494 .n_voltages = ARRAY_SIZE(label##_VSEL_table), \
469 .ops = &twlldo_ops, \ 495 .ops = &twl4030ldo_ops, \
470 .type = REGULATOR_VOLTAGE, \ 496 .type = REGULATOR_VOLTAGE, \
471 .owner = THIS_MODULE, \ 497 .owner = THIS_MODULE, \
472 }, \ 498 }, \
473 } 499 }
474 500
501#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num, \
502 remap_conf) { \
503 .base = offset, \
504 .id = num, \
505 .min_mV = min_mVolts, \
506 .max_mV = max_mVolts, \
507 .remap = remap_conf, \
508 .desc = { \
509 .name = #label, \
510 .id = TWL6030_REG_##label, \
511 .n_voltages = (max_mVolts - min_mVolts)/100, \
512 .ops = &twl6030ldo_ops, \
513 .type = REGULATOR_VOLTAGE, \
514 .owner = THIS_MODULE, \
515 }, \
516 }
517
518
475#define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \ 519#define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \
476 family) { \ 520 family) { \
477 .base = offset, \ 521 .base = offset, \
@@ -519,12 +563,12 @@ static struct twlreg_info twl_regs[] = {
519 /* 6030 REG with base as PMC Slave Misc : 0x0030 */ 563 /* 6030 REG with base as PMC Slave Misc : 0x0030 */
520 /* Turnon-delay and remap configuration values for 6030 are not 564 /* Turnon-delay and remap configuration values for 6030 are not
521 verified since the specification is not public */ 565 verified since the specification is not public */
522 TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1, 0, 0x21), 566 TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1, 0x21),
523 TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 2, 0, 0x21), 567 TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2, 0x21),
524 TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 3, 0, 0x21), 568 TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3, 0x21),
525 TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 4, 0, 0x21), 569 TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4, 0x21),
526 TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 5, 0, 0x21), 570 TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5, 0x21),
527 TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 7, 0, 0x21), 571 TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7, 0x21),
528 TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x21), 572 TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x21),
529 TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x21), 573 TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x21),
530 TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x21), 574 TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x21),
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 50ac047cd136..10ba12c8c5e0 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -611,6 +611,13 @@ config RTC_DRV_AB3100
611 Select this to enable the ST-Ericsson AB3100 Mixed Signal IC RTC 611 Select this to enable the ST-Ericsson AB3100 Mixed Signal IC RTC
612 support. This chip contains a battery- and capacitor-backed RTC. 612 support. This chip contains a battery- and capacitor-backed RTC.
613 613
614config RTC_DRV_AB8500
615 tristate "ST-Ericsson AB8500 RTC"
616 depends on AB8500_CORE
617 help
618 Select this to enable the ST-Ericsson AB8500 power management IC RTC
619 support. This chip contains a battery- and capacitor-backed RTC.
620
614config RTC_DRV_NUC900 621config RTC_DRV_NUC900
615 tristate "NUC910/NUC920 RTC driver" 622 tristate "NUC910/NUC920 RTC driver"
616 depends on RTC_CLASS && ARCH_W90X900 623 depends on RTC_CLASS && ARCH_W90X900
@@ -640,7 +647,7 @@ config RTC_DRV_OMAP
640 647
641config RTC_DRV_S3C 648config RTC_DRV_S3C
642 tristate "Samsung S3C series SoC RTC" 649 tristate "Samsung S3C series SoC RTC"
643 depends on ARCH_S3C2410 650 depends on ARCH_S3C2410 || ARCH_S3C64XX
644 help 651 help
645 RTC (Realtime Clock) driver for the clock inbuilt into the 652 RTC (Realtime Clock) driver for the clock inbuilt into the
646 Samsung S3C24XX series of SoCs. This can provide periodic 653 Samsung S3C24XX series of SoCs. This can provide periodic
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 245311a1348f..5adbba7cf89c 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -18,6 +18,7 @@ rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o
18# Keep the list ordered. 18# Keep the list ordered.
19 19
20obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o 20obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o
21obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o
21obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o 22obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
22obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o 23obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
23obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o 24obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
new file mode 100644
index 000000000000..2fda03125e55
--- /dev/null
+++ b/drivers/rtc/rtc-ab8500.c
@@ -0,0 +1,363 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License terms: GNU General Public License (GPL) version 2
5 * Author: Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com>
6 *
7 * RTC clock driver for the RTC part of the AB8500 Power management chip.
8 * Based on RTC clock driver for the AB3100 Analog Baseband Chip by
9 * Linus Walleij <linus.walleij@stericsson.com>
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/platform_device.h>
16#include <linux/rtc.h>
17#include <linux/mfd/ab8500.h>
18#include <linux/delay.h>
19
20#define AB8500_RTC_SOFF_STAT_REG 0x0F00
21#define AB8500_RTC_CC_CONF_REG 0x0F01
22#define AB8500_RTC_READ_REQ_REG 0x0F02
23#define AB8500_RTC_WATCH_TSECMID_REG 0x0F03
24#define AB8500_RTC_WATCH_TSECHI_REG 0x0F04
25#define AB8500_RTC_WATCH_TMIN_LOW_REG 0x0F05
26#define AB8500_RTC_WATCH_TMIN_MID_REG 0x0F06
27#define AB8500_RTC_WATCH_TMIN_HI_REG 0x0F07
28#define AB8500_RTC_ALRM_MIN_LOW_REG 0x0F08
29#define AB8500_RTC_ALRM_MIN_MID_REG 0x0F09
30#define AB8500_RTC_ALRM_MIN_HI_REG 0x0F0A
31#define AB8500_RTC_STAT_REG 0x0F0B
32#define AB8500_RTC_BKUP_CHG_REG 0x0F0C
33#define AB8500_RTC_FORCE_BKUP_REG 0x0F0D
34#define AB8500_RTC_CALIB_REG 0x0F0E
35#define AB8500_RTC_SWITCH_STAT_REG 0x0F0F
36#define AB8500_REV_REG 0x1080
37
38/* RtcReadRequest bits */
39#define RTC_READ_REQUEST 0x01
40#define RTC_WRITE_REQUEST 0x02
41
42/* RtcCtrl bits */
43#define RTC_ALARM_ENA 0x04
44#define RTC_STATUS_DATA 0x01
45
46#define COUNTS_PER_SEC (0xF000 / 60)
47#define AB8500_RTC_EPOCH 2000
48
49static const unsigned long ab8500_rtc_time_regs[] = {
50 AB8500_RTC_WATCH_TMIN_HI_REG, AB8500_RTC_WATCH_TMIN_MID_REG,
51 AB8500_RTC_WATCH_TMIN_LOW_REG, AB8500_RTC_WATCH_TSECHI_REG,
52 AB8500_RTC_WATCH_TSECMID_REG
53};
54
55static const unsigned long ab8500_rtc_alarm_regs[] = {
56 AB8500_RTC_ALRM_MIN_HI_REG, AB8500_RTC_ALRM_MIN_MID_REG,
57 AB8500_RTC_ALRM_MIN_LOW_REG
58};
59
60/* Calculate the seconds from 1970 to 01-01-2000 00:00:00 */
61static unsigned long get_elapsed_seconds(int year)
62{
63 unsigned long secs;
64 struct rtc_time tm = {
65 .tm_year = year - 1900,
66 .tm_mday = 1,
67 };
68
69 /*
70 * This function calculates secs from 1970 and not from
71 * 1900, even if we supply the offset from year 1900.
72 */
73 rtc_tm_to_time(&tm, &secs);
74 return secs;
75}
76
77static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
78{
79 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
80 unsigned long timeout = jiffies + HZ;
81 int retval, i;
82 unsigned long mins, secs;
83 unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)];
84
85 /* Request a data read */
86 retval = ab8500_write(ab8500, AB8500_RTC_READ_REQ_REG,
87 RTC_READ_REQUEST);
88 if (retval < 0)
89 return retval;
90
91 /* Early AB8500 chips will not clear the rtc read request bit */
92 if (ab8500->revision == 0) {
93 msleep(1);
94 } else {
95 /* Wait for some cycles after enabling the rtc read in ab8500 */
96 while (time_before(jiffies, timeout)) {
97 retval = ab8500_read(ab8500, AB8500_RTC_READ_REQ_REG);
98 if (retval < 0)
99 return retval;
100
101 if (!(retval & RTC_READ_REQUEST))
102 break;
103
104 msleep(1);
105 }
106 }
107
108 /* Read the Watchtime registers */
109 for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) {
110 retval = ab8500_read(ab8500, ab8500_rtc_time_regs[i]);
111 if (retval < 0)
112 return retval;
113 buf[i] = retval;
114 }
115
116 mins = (buf[0] << 16) | (buf[1] << 8) | buf[2];
117
118 secs = (buf[3] << 8) | buf[4];
119 secs = secs / COUNTS_PER_SEC;
120 secs = secs + (mins * 60);
121
122 /* Add back the initially subtracted number of seconds */
123 secs += get_elapsed_seconds(AB8500_RTC_EPOCH);
124
125 rtc_time_to_tm(secs, tm);
126 return rtc_valid_tm(tm);
127}
128
129static int ab8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
130{
131 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
132 int retval, i;
133 unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)];
134 unsigned long no_secs, no_mins, secs = 0;
135
136 if (tm->tm_year < (AB8500_RTC_EPOCH - 1900)) {
137 dev_dbg(dev, "year should be equal to or greater than %d\n",
138 AB8500_RTC_EPOCH);
139 return -EINVAL;
140 }
141
142 /* Get the number of seconds since 1970 */
143 rtc_tm_to_time(tm, &secs);
144
145 /*
146 * Convert it to the number of seconds since 01-01-2000 00:00:00, since
147 * we only have a small counter in the RTC.
148 */
149 secs -= get_elapsed_seconds(AB8500_RTC_EPOCH);
150
151 no_mins = secs / 60;
152
153 no_secs = secs % 60;
154 /* Make the seconds count as per the RTC resolution */
155 no_secs = no_secs * COUNTS_PER_SEC;
156
157 buf[4] = no_secs & 0xFF;
158 buf[3] = (no_secs >> 8) & 0xFF;
159
160 buf[2] = no_mins & 0xFF;
161 buf[1] = (no_mins >> 8) & 0xFF;
162 buf[0] = (no_mins >> 16) & 0xFF;
163
164 for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) {
165 retval = ab8500_write(ab8500, ab8500_rtc_time_regs[i], buf[i]);
166 if (retval < 0)
167 return retval;
168 }
169
170 /* Request a data write */
171 return ab8500_write(ab8500, AB8500_RTC_READ_REQ_REG, RTC_WRITE_REQUEST);
172}
173
174static int ab8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
175{
176 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
177 int retval, i;
178 int rtc_ctrl;
179 unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)];
180 unsigned long secs, mins;
181
182 /* Check if the alarm is enabled or not */
183 rtc_ctrl = ab8500_read(ab8500, AB8500_RTC_STAT_REG);
184 if (rtc_ctrl < 0)
185 return rtc_ctrl;
186
187 if (rtc_ctrl & RTC_ALARM_ENA)
188 alarm->enabled = 1;
189 else
190 alarm->enabled = 0;
191
192 alarm->pending = 0;
193
194 for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) {
195 retval = ab8500_read(ab8500, ab8500_rtc_alarm_regs[i]);
196 if (retval < 0)
197 return retval;
198 buf[i] = retval;
199 }
200
201 mins = (buf[0] << 16) | (buf[1] << 8) | (buf[2]);
202 secs = mins * 60;
203
204 /* Add back the initially subtracted number of seconds */
205 secs += get_elapsed_seconds(AB8500_RTC_EPOCH);
206
207 rtc_time_to_tm(secs, &alarm->time);
208
209 return rtc_valid_tm(&alarm->time);
210}
211
212static int ab8500_rtc_irq_enable(struct device *dev, unsigned int enabled)
213{
214 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
215
216 return ab8500_set_bits(ab8500, AB8500_RTC_STAT_REG, RTC_ALARM_ENA,
217 enabled ? RTC_ALARM_ENA : 0);
218}
219
220static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
221{
222 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
223 int retval, i;
224 unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)];
225 unsigned long mins, secs = 0;
226
227 if (alarm->time.tm_year < (AB8500_RTC_EPOCH - 1900)) {
228 dev_dbg(dev, "year should be equal to or greater than %d\n",
229 AB8500_RTC_EPOCH);
230 return -EINVAL;
231 }
232
233 /* Get the number of seconds since 1970 */
234 rtc_tm_to_time(&alarm->time, &secs);
235
236 /*
237 * Convert it to the number of seconds since 01-01-2000 00:00:00, since
238 * we only have a small counter in the RTC.
239 */
240 secs -= get_elapsed_seconds(AB8500_RTC_EPOCH);
241
242 mins = secs / 60;
243
244 buf[2] = mins & 0xFF;
245 buf[1] = (mins >> 8) & 0xFF;
246 buf[0] = (mins >> 16) & 0xFF;
247
248 /* Set the alarm time */
249 for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) {
250 retval = ab8500_write(ab8500, ab8500_rtc_alarm_regs[i], buf[i]);
251 if (retval < 0)
252 return retval;
253 }
254
255 return ab8500_rtc_irq_enable(dev, alarm->enabled);
256}
257
258static irqreturn_t rtc_alarm_handler(int irq, void *data)
259{
260 struct rtc_device *rtc = data;
261 unsigned long events = RTC_IRQF | RTC_AF;
262
263 dev_dbg(&rtc->dev, "%s\n", __func__);
264 rtc_update_irq(rtc, 1, events);
265
266 return IRQ_HANDLED;
267}
268
269static const struct rtc_class_ops ab8500_rtc_ops = {
270 .read_time = ab8500_rtc_read_time,
271 .set_time = ab8500_rtc_set_time,
272 .read_alarm = ab8500_rtc_read_alarm,
273 .set_alarm = ab8500_rtc_set_alarm,
274 .alarm_irq_enable = ab8500_rtc_irq_enable,
275};
276
277static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
278{
279 struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
280 int err;
281 struct rtc_device *rtc;
282 int rtc_ctrl;
283 int irq;
284
285 irq = platform_get_irq_byname(pdev, "ALARM");
286 if (irq < 0)
287 return irq;
288
289 /* For RTC supply test */
290 err = ab8500_set_bits(ab8500, AB8500_RTC_STAT_REG, RTC_STATUS_DATA,
291 RTC_STATUS_DATA);
292 if (err < 0)
293 return err;
294
295 /* Wait for reset by the PorRtc */
296 msleep(1);
297
298 rtc_ctrl = ab8500_read(ab8500, AB8500_RTC_STAT_REG);
299 if (rtc_ctrl < 0)
300 return rtc_ctrl;
301
302 /* Check if the RTC Supply fails */
303 if (!(rtc_ctrl & RTC_STATUS_DATA)) {
304 dev_err(&pdev->dev, "RTC supply failure\n");
305 return -ENODEV;
306 }
307
308 rtc = rtc_device_register("ab8500-rtc", &pdev->dev, &ab8500_rtc_ops,
309 THIS_MODULE);
310 if (IS_ERR(rtc)) {
311 dev_err(&pdev->dev, "Registration failed\n");
312 err = PTR_ERR(rtc);
313 return err;
314 }
315
316 err = request_threaded_irq(irq, NULL, rtc_alarm_handler, 0,
317 "ab8500-rtc", rtc);
318 if (err < 0) {
319 rtc_device_unregister(rtc);
320 return err;
321 }
322
323 platform_set_drvdata(pdev, rtc);
324
325 return 0;
326}
327
328static int __devexit ab8500_rtc_remove(struct platform_device *pdev)
329{
330 struct rtc_device *rtc = platform_get_drvdata(pdev);
331 int irq = platform_get_irq_byname(pdev, "ALARM");
332
333 free_irq(irq, rtc);
334 rtc_device_unregister(rtc);
335 platform_set_drvdata(pdev, NULL);
336
337 return 0;
338}
339
340static struct platform_driver ab8500_rtc_driver = {
341 .driver = {
342 .name = "ab8500-rtc",
343 .owner = THIS_MODULE,
344 },
345 .probe = ab8500_rtc_probe,
346 .remove = __devexit_p(ab8500_rtc_remove),
347};
348
349static int __init ab8500_rtc_init(void)
350{
351 return platform_driver_register(&ab8500_rtc_driver);
352}
353
354static void __exit ab8500_rtc_exit(void)
355{
356 platform_driver_unregister(&ab8500_rtc_driver);
357}
358
359module_init(ab8500_rtc_init);
360module_exit(ab8500_rtc_exit);
361MODULE_AUTHOR("Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com>");
362MODULE_DESCRIPTION("AB8500 RTC Driver");
363MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 96e8e70fbf1e..11b8ea29d2b7 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -719,6 +719,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
719 } 719 }
720 } 720 }
721 721
722 cmos_rtc.dev = dev;
723 dev_set_drvdata(dev, &cmos_rtc);
724
722 cmos_rtc.rtc = rtc_device_register(driver_name, dev, 725 cmos_rtc.rtc = rtc_device_register(driver_name, dev,
723 &cmos_rtc_ops, THIS_MODULE); 726 &cmos_rtc_ops, THIS_MODULE);
724 if (IS_ERR(cmos_rtc.rtc)) { 727 if (IS_ERR(cmos_rtc.rtc)) {
@@ -726,8 +729,6 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
726 goto cleanup0; 729 goto cleanup0;
727 } 730 }
728 731
729 cmos_rtc.dev = dev;
730 dev_set_drvdata(dev, &cmos_rtc);
731 rename_region(ports, dev_name(&cmos_rtc.rtc->dev)); 732 rename_region(ports, dev_name(&cmos_rtc.rtc->dev));
732 733
733 spin_lock_irq(&rtc_lock); 734 spin_lock_irq(&rtc_lock);
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index 532acf9b05d8..359d1e04626c 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -16,7 +16,6 @@
16#include <linux/rtc.h> 16#include <linux/rtc.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/bcd.h> 18#include <linux/bcd.h>
19#include <asm/rtc.h>
20 19
21#define DRV_NAME "rtc-ds1302" 20#define DRV_NAME "rtc-ds1302"
22#define DRV_VERSION "0.1.1" 21#define DRV_VERSION "0.1.1"
@@ -34,14 +33,55 @@
34#define RTC_ADDR_MIN 0x01 /* Address of minute register */ 33#define RTC_ADDR_MIN 0x01 /* Address of minute register */
35#define RTC_ADDR_SEC 0x00 /* Address of second register */ 34#define RTC_ADDR_SEC 0x00 /* Address of second register */
36 35
36#ifdef CONFIG_SH_SECUREEDGE5410
37#include <asm/rtc.h>
38#include <mach/snapgear.h>
39
37#define RTC_RESET 0x1000 40#define RTC_RESET 0x1000
38#define RTC_IODATA 0x0800 41#define RTC_IODATA 0x0800
39#define RTC_SCLK 0x0400 42#define RTC_SCLK 0x0400
40 43
41#ifdef CONFIG_SH_SECUREEDGE5410
42#include <mach/snapgear.h>
43#define set_dp(x) SECUREEDGE_WRITE_IOPORT(x, 0x1c00) 44#define set_dp(x) SECUREEDGE_WRITE_IOPORT(x, 0x1c00)
44#define get_dp() SECUREEDGE_READ_IOPORT() 45#define get_dp() SECUREEDGE_READ_IOPORT()
46#define ds1302_set_tx()
47#define ds1302_set_rx()
48
49static inline int ds1302_hw_init(void)
50{
51 return 0;
52}
53
54static inline void ds1302_reset(void)
55{
56 set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK));
57}
58
59static inline void ds1302_clock(void)
60{
61 set_dp(get_dp() | RTC_SCLK); /* clock high */
62 set_dp(get_dp() & ~RTC_SCLK); /* clock low */
63}
64
65static inline void ds1302_start(void)
66{
67 set_dp(get_dp() | RTC_RESET);
68}
69
70static inline void ds1302_stop(void)
71{
72 set_dp(get_dp() & ~RTC_RESET);
73}
74
75static inline void ds1302_txbit(int bit)
76{
77 set_dp((get_dp() & ~RTC_IODATA) | (bit ? RTC_IODATA : 0));
78}
79
80static inline int ds1302_rxbit(void)
81{
82 return !!(get_dp() & RTC_IODATA);
83}
84
45#else 85#else
46#error "Add support for your platform" 86#error "Add support for your platform"
47#endif 87#endif
@@ -50,11 +90,11 @@ static void ds1302_sendbits(unsigned int val)
50{ 90{
51 int i; 91 int i;
52 92
93 ds1302_set_tx();
94
53 for (i = 8; (i); i--, val >>= 1) { 95 for (i = 8; (i); i--, val >>= 1) {
54 set_dp((get_dp() & ~RTC_IODATA) | ((val & 0x1) ? 96 ds1302_txbit(val & 0x1);
55 RTC_IODATA : 0)); 97 ds1302_clock();
56 set_dp(get_dp() | RTC_SCLK); /* clock high */
57 set_dp(get_dp() & ~RTC_SCLK); /* clock low */
58 } 98 }
59} 99}
60 100
@@ -63,10 +103,11 @@ static unsigned int ds1302_recvbits(void)
63 unsigned int val; 103 unsigned int val;
64 int i; 104 int i;
65 105
106 ds1302_set_rx();
107
66 for (i = 0, val = 0; (i < 8); i++) { 108 for (i = 0, val = 0; (i < 8); i++) {
67 val |= (((get_dp() & RTC_IODATA) ? 1 : 0) << i); 109 val |= (ds1302_rxbit() << i);
68 set_dp(get_dp() | RTC_SCLK); /* clock high */ 110 ds1302_clock();
69 set_dp(get_dp() & ~RTC_SCLK); /* clock low */
70 } 111 }
71 112
72 return val; 113 return val;
@@ -76,23 +117,24 @@ static unsigned int ds1302_readbyte(unsigned int addr)
76{ 117{
77 unsigned int val; 118 unsigned int val;
78 119
79 set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK)); 120 ds1302_reset();
80 121
81 set_dp(get_dp() | RTC_RESET); 122 ds1302_start();
82 ds1302_sendbits(((addr & 0x3f) << 1) | RTC_CMD_READ); 123 ds1302_sendbits(((addr & 0x3f) << 1) | RTC_CMD_READ);
83 val = ds1302_recvbits(); 124 val = ds1302_recvbits();
84 set_dp(get_dp() & ~RTC_RESET); 125 ds1302_stop();
85 126
86 return val; 127 return val;
87} 128}
88 129
89static void ds1302_writebyte(unsigned int addr, unsigned int val) 130static void ds1302_writebyte(unsigned int addr, unsigned int val)
90{ 131{
91 set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK)); 132 ds1302_reset();
92 set_dp(get_dp() | RTC_RESET); 133
134 ds1302_start();
93 ds1302_sendbits(((addr & 0x3f) << 1) | RTC_CMD_WRITE); 135 ds1302_sendbits(((addr & 0x3f) << 1) | RTC_CMD_WRITE);
94 ds1302_sendbits(val); 136 ds1302_sendbits(val);
95 set_dp(get_dp() & ~RTC_RESET); 137 ds1302_stop();
96} 138}
97 139
98static int ds1302_rtc_read_time(struct device *dev, struct rtc_time *tm) 140static int ds1302_rtc_read_time(struct device *dev, struct rtc_time *tm)
@@ -167,13 +209,20 @@ static int __init ds1302_rtc_probe(struct platform_device *pdev)
167{ 209{
168 struct rtc_device *rtc; 210 struct rtc_device *rtc;
169 211
212 if (ds1302_hw_init()) {
213 dev_err(&pdev->dev, "Failed to init communication channel");
214 return -EINVAL;
215 }
216
170 /* Reset */ 217 /* Reset */
171 set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK)); 218 ds1302_reset();
172 219
173 /* Write a magic value to the DS1302 RAM, and see if it sticks. */ 220 /* Write a magic value to the DS1302 RAM, and see if it sticks. */
174 ds1302_writebyte(RTC_ADDR_RAM0, 0x42); 221 ds1302_writebyte(RTC_ADDR_RAM0, 0x42);
175 if (ds1302_readbyte(RTC_ADDR_RAM0) != 0x42) 222 if (ds1302_readbyte(RTC_ADDR_RAM0) != 0x42) {
223 dev_err(&pdev->dev, "Failed to probe");
176 return -ENODEV; 224 return -ENODEV;
225 }
177 226
178 rtc = rtc_device_register("ds1302", &pdev->dev, 227 rtc = rtc_device_register("ds1302", &pdev->dev,
179 &ds1302_rtc_ops, THIS_MODULE); 228 &ds1302_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index 054e05294af8..468200c38ecb 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -462,39 +462,16 @@ isl1208_sysfs_store_usr(struct device *dev,
462static DEVICE_ATTR(usr, S_IRUGO | S_IWUSR, isl1208_sysfs_show_usr, 462static DEVICE_ATTR(usr, S_IRUGO | S_IWUSR, isl1208_sysfs_show_usr,
463 isl1208_sysfs_store_usr); 463 isl1208_sysfs_store_usr);
464 464
465static int 465static struct attribute *isl1208_rtc_attrs[] = {
466isl1208_sysfs_register(struct device *dev) 466 &dev_attr_atrim.attr,
467{ 467 &dev_attr_dtrim.attr,
468 int err; 468 &dev_attr_usr.attr,
469 469 NULL
470 err = device_create_file(dev, &dev_attr_atrim); 470};
471 if (err)
472 return err;
473
474 err = device_create_file(dev, &dev_attr_dtrim);
475 if (err) {
476 device_remove_file(dev, &dev_attr_atrim);
477 return err;
478 }
479
480 err = device_create_file(dev, &dev_attr_usr);
481 if (err) {
482 device_remove_file(dev, &dev_attr_atrim);
483 device_remove_file(dev, &dev_attr_dtrim);
484 }
485
486 return 0;
487}
488
489static int
490isl1208_sysfs_unregister(struct device *dev)
491{
492 device_remove_file(dev, &dev_attr_dtrim);
493 device_remove_file(dev, &dev_attr_atrim);
494 device_remove_file(dev, &dev_attr_usr);
495 471
496 return 0; 472static const struct attribute_group isl1208_rtc_sysfs_files = {
497} 473 .attrs = isl1208_rtc_attrs,
474};
498 475
499static int 476static int
500isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id) 477isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
@@ -529,7 +506,7 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
529 dev_warn(&client->dev, "rtc power failure detected, " 506 dev_warn(&client->dev, "rtc power failure detected, "
530 "please set clock.\n"); 507 "please set clock.\n");
531 508
532 rc = isl1208_sysfs_register(&client->dev); 509 rc = sysfs_create_group(&client->dev.kobj, &isl1208_rtc_sysfs_files);
533 if (rc) 510 if (rc)
534 goto exit_unregister; 511 goto exit_unregister;
535 512
@@ -546,7 +523,7 @@ isl1208_remove(struct i2c_client *client)
546{ 523{
547 struct rtc_device *rtc = i2c_get_clientdata(client); 524 struct rtc_device *rtc = i2c_get_clientdata(client);
548 525
549 isl1208_sysfs_unregister(&client->dev); 526 sysfs_remove_group(&client->dev.kobj, &isl1208_rtc_sysfs_files);
550 rtc_device_unregister(rtc); 527 rtc_device_unregister(rtc);
551 528
552 return 0; 529 return 0;
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 60fe266f0f49..6dc4e6241418 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -595,10 +595,6 @@ static void wdt_disable(void)
595static ssize_t wdt_write(struct file *file, const char __user *buf, 595static ssize_t wdt_write(struct file *file, const char __user *buf,
596 size_t count, loff_t *ppos) 596 size_t count, loff_t *ppos)
597{ 597{
598 /* Can't seek (pwrite) on this device
599 if (ppos != &file->f_pos)
600 return -ESPIPE;
601 */
602 if (count) { 598 if (count) {
603 wdt_ping(); 599 wdt_ping();
604 return 1; 600 return 1;
@@ -623,7 +619,7 @@ static ssize_t wdt_read(struct file *file, char __user *buf,
623 * according to their available features. We only actually usefully support 619 * according to their available features. We only actually usefully support
624 * querying capabilities and current status. 620 * querying capabilities and current status.
625 */ 621 */
626static int wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 622static int wdt_ioctl(struct file *file, unsigned int cmd,
627 unsigned long arg) 623 unsigned long arg)
628{ 624{
629 int new_margin, rv; 625 int new_margin, rv;
@@ -676,6 +672,18 @@ static int wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
676 return -ENOTTY; 672 return -ENOTTY;
677} 673}
678 674
675static long wdt_unlocked_ioctl(struct file *file, unsigned int cmd,
676 unsigned long arg)
677{
678 int ret;
679
680 lock_kernel();
681 ret = wdt_ioctl(file, cmd, arg);
682 unlock_kernel();
683
684 return ret;
685}
686
679/** 687/**
680 * wdt_open: 688 * wdt_open:
681 * @inode: inode of device 689 * @inode: inode of device
@@ -695,7 +703,7 @@ static int wdt_open(struct inode *inode, struct file *file)
695 */ 703 */
696 wdt_is_open = 1; 704 wdt_is_open = 1;
697 unlock_kernel(); 705 unlock_kernel();
698 return 0; 706 return nonseekable_open(inode, file);
699 } 707 }
700 return -ENODEV; 708 return -ENODEV;
701} 709}
@@ -736,7 +744,7 @@ static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
736static const struct file_operations wdt_fops = { 744static const struct file_operations wdt_fops = {
737 .owner = THIS_MODULE, 745 .owner = THIS_MODULE,
738 .read = wdt_read, 746 .read = wdt_read,
739 .ioctl = wdt_ioctl, 747 .unlocked_ioctl = wdt_unlocked_ioctl,
740 .write = wdt_write, 748 .write = wdt_write,
741 .open = wdt_open, 749 .open = wdt_open,
742 .release = wdt_release, 750 .release = wdt_release,
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index d71fe61db1d6..25ec921db07c 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -379,7 +379,6 @@ static struct rtc_class_ops mxc_rtc_ops = {
379 379
380static int __init mxc_rtc_probe(struct platform_device *pdev) 380static int __init mxc_rtc_probe(struct platform_device *pdev)
381{ 381{
382 struct clk *clk;
383 struct resource *res; 382 struct resource *res;
384 struct rtc_device *rtc; 383 struct rtc_device *rtc;
385 struct rtc_plat_data *pdata = NULL; 384 struct rtc_plat_data *pdata = NULL;
@@ -402,14 +401,15 @@ static int __init mxc_rtc_probe(struct platform_device *pdev)
402 pdata->ioaddr = devm_ioremap(&pdev->dev, res->start, 401 pdata->ioaddr = devm_ioremap(&pdev->dev, res->start,
403 resource_size(res)); 402 resource_size(res));
404 403
405 clk = clk_get(&pdev->dev, "ckil"); 404 pdata->clk = clk_get(&pdev->dev, "rtc");
406 if (IS_ERR(clk)) { 405 if (IS_ERR(pdata->clk)) {
407 ret = PTR_ERR(clk); 406 dev_err(&pdev->dev, "unable to get clock!\n");
407 ret = PTR_ERR(pdata->clk);
408 goto exit_free_pdata; 408 goto exit_free_pdata;
409 } 409 }
410 410
411 rate = clk_get_rate(clk); 411 clk_enable(pdata->clk);
412 clk_put(clk); 412 rate = clk_get_rate(pdata->clk);
413 413
414 if (rate == 32768) 414 if (rate == 32768)
415 reg = RTC_INPUT_CLK_32768HZ; 415 reg = RTC_INPUT_CLK_32768HZ;
@@ -420,7 +420,7 @@ static int __init mxc_rtc_probe(struct platform_device *pdev)
420 else { 420 else {
421 dev_err(&pdev->dev, "rtc clock is not valid (%lu)\n", rate); 421 dev_err(&pdev->dev, "rtc clock is not valid (%lu)\n", rate);
422 ret = -EINVAL; 422 ret = -EINVAL;
423 goto exit_free_pdata; 423 goto exit_put_clk;
424 } 424 }
425 425
426 reg |= RTC_ENABLE_BIT; 426 reg |= RTC_ENABLE_BIT;
@@ -428,18 +428,9 @@ static int __init mxc_rtc_probe(struct platform_device *pdev)
428 if (((readw(pdata->ioaddr + RTC_RTCCTL)) & RTC_ENABLE_BIT) == 0) { 428 if (((readw(pdata->ioaddr + RTC_RTCCTL)) & RTC_ENABLE_BIT) == 0) {
429 dev_err(&pdev->dev, "hardware module can't be enabled!\n"); 429 dev_err(&pdev->dev, "hardware module can't be enabled!\n");
430 ret = -EIO; 430 ret = -EIO;
431 goto exit_free_pdata; 431 goto exit_put_clk;
432 }
433
434 pdata->clk = clk_get(&pdev->dev, "rtc");
435 if (IS_ERR(pdata->clk)) {
436 dev_err(&pdev->dev, "unable to get clock!\n");
437 ret = PTR_ERR(pdata->clk);
438 goto exit_free_pdata;
439 } 432 }
440 433
441 clk_enable(pdata->clk);
442
443 rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops, 434 rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops,
444 THIS_MODULE); 435 THIS_MODULE);
445 if (IS_ERR(rtc)) { 436 if (IS_ERR(rtc)) {
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 4969b6059c89..e5972b2c17b7 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -29,6 +29,11 @@
29#include <asm/irq.h> 29#include <asm/irq.h>
30#include <plat/regs-rtc.h> 30#include <plat/regs-rtc.h>
31 31
32enum s3c_cpu_type {
33 TYPE_S3C2410,
34 TYPE_S3C64XX,
35};
36
32/* I have yet to find an S3C implementation with more than one 37/* I have yet to find an S3C implementation with more than one
33 * of these rtc blocks in */ 38 * of these rtc blocks in */
34 39
@@ -37,6 +42,7 @@ static struct resource *s3c_rtc_mem;
37static void __iomem *s3c_rtc_base; 42static void __iomem *s3c_rtc_base;
38static int s3c_rtc_alarmno = NO_IRQ; 43static int s3c_rtc_alarmno = NO_IRQ;
39static int s3c_rtc_tickno = NO_IRQ; 44static int s3c_rtc_tickno = NO_IRQ;
45static enum s3c_cpu_type s3c_rtc_cpu_type;
40 46
41static DEFINE_SPINLOCK(s3c_rtc_pie_lock); 47static DEFINE_SPINLOCK(s3c_rtc_pie_lock);
42 48
@@ -80,12 +86,25 @@ static int s3c_rtc_setpie(struct device *dev, int enabled)
80 pr_debug("%s: pie=%d\n", __func__, enabled); 86 pr_debug("%s: pie=%d\n", __func__, enabled);
81 87
82 spin_lock_irq(&s3c_rtc_pie_lock); 88 spin_lock_irq(&s3c_rtc_pie_lock);
83 tmp = readb(s3c_rtc_base + S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE;
84 89
85 if (enabled) 90 if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
86 tmp |= S3C2410_TICNT_ENABLE; 91 tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
92 tmp &= ~S3C64XX_RTCCON_TICEN;
93
94 if (enabled)
95 tmp |= S3C64XX_RTCCON_TICEN;
96
97 writeb(tmp, s3c_rtc_base + S3C2410_RTCCON);
98 } else {
99 tmp = readb(s3c_rtc_base + S3C2410_TICNT);
100 tmp &= ~S3C2410_TICNT_ENABLE;
101
102 if (enabled)
103 tmp |= S3C2410_TICNT_ENABLE;
104
105 writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
106 }
87 107
88 writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
89 spin_unlock_irq(&s3c_rtc_pie_lock); 108 spin_unlock_irq(&s3c_rtc_pie_lock);
90 109
91 return 0; 110 return 0;
@@ -93,15 +112,21 @@ static int s3c_rtc_setpie(struct device *dev, int enabled)
93 112
94static int s3c_rtc_setfreq(struct device *dev, int freq) 113static int s3c_rtc_setfreq(struct device *dev, int freq)
95{ 114{
96 unsigned int tmp; 115 struct platform_device *pdev = to_platform_device(dev);
116 struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
117 unsigned int tmp = 0;
97 118
98 if (!is_power_of_2(freq)) 119 if (!is_power_of_2(freq))
99 return -EINVAL; 120 return -EINVAL;
100 121
101 spin_lock_irq(&s3c_rtc_pie_lock); 122 spin_lock_irq(&s3c_rtc_pie_lock);
102 123
103 tmp = readb(s3c_rtc_base + S3C2410_TICNT) & S3C2410_TICNT_ENABLE; 124 if (s3c_rtc_cpu_type == TYPE_S3C2410) {
104 tmp |= (128 / freq)-1; 125 tmp = readb(s3c_rtc_base + S3C2410_TICNT);
126 tmp &= S3C2410_TICNT_ENABLE;
127 }
128
129 tmp |= (rtc_dev->max_user_freq / freq)-1;
105 130
106 writeb(tmp, s3c_rtc_base + S3C2410_TICNT); 131 writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
107 spin_unlock_irq(&s3c_rtc_pie_lock); 132 spin_unlock_irq(&s3c_rtc_pie_lock);
@@ -283,10 +308,17 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
283 308
284static int s3c_rtc_proc(struct device *dev, struct seq_file *seq) 309static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
285{ 310{
286 unsigned int ticnt = readb(s3c_rtc_base + S3C2410_TICNT); 311 unsigned int ticnt;
287 312
288 seq_printf(seq, "periodic_IRQ\t: %s\n", 313 if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
289 (ticnt & S3C2410_TICNT_ENABLE) ? "yes" : "no" ); 314 ticnt = readb(s3c_rtc_base + S3C2410_RTCCON);
315 ticnt &= S3C64XX_RTCCON_TICEN;
316 } else {
317 ticnt = readb(s3c_rtc_base + S3C2410_TICNT);
318 ticnt &= S3C2410_TICNT_ENABLE;
319 }
320
321 seq_printf(seq, "periodic_IRQ\t: %s\n", ticnt ? "yes" : "no");
290 return 0; 322 return 0;
291} 323}
292 324
@@ -353,10 +385,16 @@ static void s3c_rtc_enable(struct platform_device *pdev, int en)
353 385
354 if (!en) { 386 if (!en) {
355 tmp = readb(base + S3C2410_RTCCON); 387 tmp = readb(base + S3C2410_RTCCON);
356 writeb(tmp & ~S3C2410_RTCCON_RTCEN, base + S3C2410_RTCCON); 388 if (s3c_rtc_cpu_type == TYPE_S3C64XX)
357 389 tmp &= ~S3C64XX_RTCCON_TICEN;
358 tmp = readb(base + S3C2410_TICNT); 390 tmp &= ~S3C2410_RTCCON_RTCEN;
359 writeb(tmp & ~S3C2410_TICNT_ENABLE, base + S3C2410_TICNT); 391 writeb(tmp, base + S3C2410_RTCCON);
392
393 if (s3c_rtc_cpu_type == TYPE_S3C2410) {
394 tmp = readb(base + S3C2410_TICNT);
395 tmp &= ~S3C2410_TICNT_ENABLE;
396 writeb(tmp, base + S3C2410_TICNT);
397 }
360 } else { 398 } else {
361 /* re-enable the device, and check it is ok */ 399 /* re-enable the device, and check it is ok */
362 400
@@ -472,7 +510,12 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
472 goto err_nortc; 510 goto err_nortc;
473 } 511 }
474 512
475 rtc->max_user_freq = 128; 513 if (s3c_rtc_cpu_type == TYPE_S3C64XX)
514 rtc->max_user_freq = 32768;
515 else
516 rtc->max_user_freq = 128;
517
518 s3c_rtc_cpu_type = platform_get_device_id(pdev)->driver_data;
476 519
477 platform_set_drvdata(pdev, rtc); 520 platform_set_drvdata(pdev, rtc);
478 return 0; 521 return 0;
@@ -492,20 +535,30 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
492 535
493/* RTC Power management control */ 536/* RTC Power management control */
494 537
495static int ticnt_save; 538static int ticnt_save, ticnt_en_save;
496 539
497static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state) 540static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
498{ 541{
499 /* save TICNT for anyone using periodic interrupts */ 542 /* save TICNT for anyone using periodic interrupts */
500 ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT); 543 ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
544 if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
545 ticnt_en_save = readb(s3c_rtc_base + S3C2410_RTCCON);
546 ticnt_en_save &= S3C64XX_RTCCON_TICEN;
547 }
501 s3c_rtc_enable(pdev, 0); 548 s3c_rtc_enable(pdev, 0);
502 return 0; 549 return 0;
503} 550}
504 551
505static int s3c_rtc_resume(struct platform_device *pdev) 552static int s3c_rtc_resume(struct platform_device *pdev)
506{ 553{
554 unsigned int tmp;
555
507 s3c_rtc_enable(pdev, 1); 556 s3c_rtc_enable(pdev, 1);
508 writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT); 557 writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
558 if (s3c_rtc_cpu_type == TYPE_S3C64XX && ticnt_en_save) {
559 tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
560 writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
561 }
509 return 0; 562 return 0;
510} 563}
511#else 564#else
@@ -513,13 +566,27 @@ static int s3c_rtc_resume(struct platform_device *pdev)
513#define s3c_rtc_resume NULL 566#define s3c_rtc_resume NULL
514#endif 567#endif
515 568
516static struct platform_driver s3c2410_rtc_driver = { 569static struct platform_device_id s3c_rtc_driver_ids[] = {
570 {
571 .name = "s3c2410-rtc",
572 .driver_data = TYPE_S3C2410,
573 }, {
574 .name = "s3c64xx-rtc",
575 .driver_data = TYPE_S3C64XX,
576 },
577 { }
578};
579
580MODULE_DEVICE_TABLE(platform, s3c_rtc_driver_ids);
581
582static struct platform_driver s3c_rtc_driver = {
517 .probe = s3c_rtc_probe, 583 .probe = s3c_rtc_probe,
518 .remove = __devexit_p(s3c_rtc_remove), 584 .remove = __devexit_p(s3c_rtc_remove),
519 .suspend = s3c_rtc_suspend, 585 .suspend = s3c_rtc_suspend,
520 .resume = s3c_rtc_resume, 586 .resume = s3c_rtc_resume,
587 .id_table = s3c_rtc_driver_ids,
521 .driver = { 588 .driver = {
522 .name = "s3c2410-rtc", 589 .name = "s3c-rtc",
523 .owner = THIS_MODULE, 590 .owner = THIS_MODULE,
524 }, 591 },
525}; 592};
@@ -529,12 +596,12 @@ static char __initdata banner[] = "S3C24XX RTC, (c) 2004,2006 Simtec Electronics
529static int __init s3c_rtc_init(void) 596static int __init s3c_rtc_init(void)
530{ 597{
531 printk(banner); 598 printk(banner);
532 return platform_driver_register(&s3c2410_rtc_driver); 599 return platform_driver_register(&s3c_rtc_driver);
533} 600}
534 601
535static void __exit s3c_rtc_exit(void) 602static void __exit s3c_rtc_exit(void)
536{ 603{
537 platform_driver_unregister(&s3c2410_rtc_driver); 604 platform_driver_unregister(&s3c_rtc_driver);
538} 605}
539 606
540module_init(s3c_rtc_init); 607module_init(s3c_rtc_init);
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index b16cfe57a484..82931dc65c0b 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -449,17 +449,17 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
449 goto err; 449 goto err;
450 } 450 }
451 451
452 ret = wm831x_request_irq(wm831x, per_irq, wm831x_per_irq, 452 ret = request_threaded_irq(per_irq, NULL, wm831x_per_irq,
453 IRQF_TRIGGER_RISING, "wm831x_rtc_per", 453 IRQF_TRIGGER_RISING, "RTC period",
454 wm831x_rtc); 454 wm831x_rtc);
455 if (ret != 0) { 455 if (ret != 0) {
456 dev_err(&pdev->dev, "Failed to request periodic IRQ %d: %d\n", 456 dev_err(&pdev->dev, "Failed to request periodic IRQ %d: %d\n",
457 per_irq, ret); 457 per_irq, ret);
458 } 458 }
459 459
460 ret = wm831x_request_irq(wm831x, alm_irq, wm831x_alm_irq, 460 ret = request_threaded_irq(alm_irq, NULL, wm831x_alm_irq,
461 IRQF_TRIGGER_RISING, "wm831x_rtc_alm", 461 IRQF_TRIGGER_RISING, "RTC alarm",
462 wm831x_rtc); 462 wm831x_rtc);
463 if (ret != 0) { 463 if (ret != 0) {
464 dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n", 464 dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n",
465 alm_irq, ret); 465 alm_irq, ret);
@@ -478,8 +478,8 @@ static int __devexit wm831x_rtc_remove(struct platform_device *pdev)
478 int per_irq = platform_get_irq_byname(pdev, "PER"); 478 int per_irq = platform_get_irq_byname(pdev, "PER");
479 int alm_irq = platform_get_irq_byname(pdev, "ALM"); 479 int alm_irq = platform_get_irq_byname(pdev, "ALM");
480 480
481 wm831x_free_irq(wm831x_rtc->wm831x, alm_irq, wm831x_rtc); 481 free_irq(alm_irq, wm831x_rtc);
482 wm831x_free_irq(wm831x_rtc->wm831x, per_irq, wm831x_rtc); 482 free_irq(per_irq, wm831x_rtc);
483 rtc_device_unregister(wm831x_rtc->rtc); 483 rtc_device_unregister(wm831x_rtc->rtc);
484 kfree(wm831x_rtc); 484 kfree(wm831x_rtc);
485 485
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
index b4951eb0358e..103fdf6b0b89 100644
--- a/drivers/sbus/char/bbc_envctrl.c
+++ b/drivers/sbus/char/bbc_envctrl.c
@@ -565,9 +565,9 @@ int bbc_envctrl_init(struct bbc_i2c_bus *bp)
565 int devidx = 0; 565 int devidx = 0;
566 566
567 while ((op = bbc_i2c_getdev(bp, devidx++)) != NULL) { 567 while ((op = bbc_i2c_getdev(bp, devidx++)) != NULL) {
568 if (!strcmp(op->node->name, "temperature")) 568 if (!strcmp(op->dev.of_node->name, "temperature"))
569 attach_one_temp(bp, op, temp_index++); 569 attach_one_temp(bp, op, temp_index++);
570 if (!strcmp(op->node->name, "fan-control")) 570 if (!strcmp(op->dev.of_node->name, "fan-control"))
571 attach_one_fan(bp, op, fan_index++); 571 attach_one_fan(bp, op, fan_index++);
572 } 572 }
573 if (temp_index != 0 && fan_index != 0) { 573 if (temp_index != 0 && fan_index != 0) {
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 7e30e5f6e032..8bfdd63a1fcb 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -97,7 +97,7 @@ struct bbc_i2c_client *bbc_i2c_attach(struct bbc_i2c_bus *bp, struct of_device *
97 client->bp = bp; 97 client->bp = bp;
98 client->op = op; 98 client->op = op;
99 99
100 reg = of_get_property(op->node, "reg", NULL); 100 reg = of_get_property(op->dev.of_node, "reg", NULL);
101 if (!reg) { 101 if (!reg) {
102 kfree(client); 102 kfree(client);
103 return NULL; 103 return NULL;
@@ -327,7 +327,7 @@ static struct bbc_i2c_bus * __init attach_one_i2c(struct of_device *op, int inde
327 spin_lock_init(&bp->lock); 327 spin_lock_init(&bp->lock);
328 328
329 entry = 0; 329 entry = 0;
330 for (dp = op->node->child; 330 for (dp = op->dev.of_node->child;
331 dp && entry < 8; 331 dp && entry < 8;
332 dp = dp->sibling, entry++) { 332 dp = dp->sibling, entry++) {
333 struct of_device *child_op; 333 struct of_device *child_op;
@@ -414,8 +414,11 @@ static const struct of_device_id bbc_i2c_match[] = {
414MODULE_DEVICE_TABLE(of, bbc_i2c_match); 414MODULE_DEVICE_TABLE(of, bbc_i2c_match);
415 415
416static struct of_platform_driver bbc_i2c_driver = { 416static struct of_platform_driver bbc_i2c_driver = {
417 .name = "bbc_i2c", 417 .driver = {
418 .match_table = bbc_i2c_match, 418 .name = "bbc_i2c",
419 .owner = THIS_MODULE,
420 .of_match_table = bbc_i2c_match,
421 },
419 .probe = bbc_i2c_probe, 422 .probe = bbc_i2c_probe,
420 .remove = __devexit_p(bbc_i2c_remove), 423 .remove = __devexit_p(bbc_i2c_remove),
421}; 424};
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 3e59189f4137..7baf1b644039 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -216,7 +216,7 @@ static int __devinit d7s_probe(struct of_device *op,
216 writeb(regs, p->regs); 216 writeb(regs, p->regs);
217 217
218 printk(KERN_INFO PFX "7-Segment Display%s at [%s:0x%llx] %s\n", 218 printk(KERN_INFO PFX "7-Segment Display%s at [%s:0x%llx] %s\n",
219 op->node->full_name, 219 op->dev.of_node->full_name,
220 (regs & D7S_FLIP) ? " (FLIPPED)" : "", 220 (regs & D7S_FLIP) ? " (FLIPPED)" : "",
221 op->resource[0].start, 221 op->resource[0].start,
222 sol_compat ? "in sol_compat mode" : ""); 222 sol_compat ? "in sol_compat mode" : "");
@@ -266,8 +266,11 @@ static const struct of_device_id d7s_match[] = {
266MODULE_DEVICE_TABLE(of, d7s_match); 266MODULE_DEVICE_TABLE(of, d7s_match);
267 267
268static struct of_platform_driver d7s_driver = { 268static struct of_platform_driver d7s_driver = {
269 .name = DRIVER_NAME, 269 .driver = {
270 .match_table = d7s_match, 270 .name = DRIVER_NAME,
271 .owner = THIS_MODULE,
272 .of_match_table = d7s_match,
273 },
271 .probe = d7s_probe, 274 .probe = d7s_probe,
272 .remove = __devexit_p(d7s_remove), 275 .remove = __devexit_p(d7s_remove),
273}; 276};
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index c6e2eff19409..c8166ecf5276 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -1043,7 +1043,7 @@ static int __devinit envctrl_probe(struct of_device *op,
1043 return -ENOMEM; 1043 return -ENOMEM;
1044 1044
1045 index = 0; 1045 index = 0;
1046 dp = op->node->child; 1046 dp = op->dev.of_node->child;
1047 while (dp) { 1047 while (dp) {
1048 if (!strcmp(dp->name, "gpio")) { 1048 if (!strcmp(dp->name, "gpio")) {
1049 i2c_childlist[index].i2ctype = I2C_GPIO; 1049 i2c_childlist[index].i2ctype = I2C_GPIO;
@@ -1131,8 +1131,11 @@ static const struct of_device_id envctrl_match[] = {
1131MODULE_DEVICE_TABLE(of, envctrl_match); 1131MODULE_DEVICE_TABLE(of, envctrl_match);
1132 1132
1133static struct of_platform_driver envctrl_driver = { 1133static struct of_platform_driver envctrl_driver = {
1134 .name = DRIVER_NAME, 1134 .driver = {
1135 .match_table = envctrl_match, 1135 .name = DRIVER_NAME,
1136 .owner = THIS_MODULE,
1137 .of_match_table = envctrl_match,
1138 },
1136 .probe = envctrl_probe, 1139 .probe = envctrl_probe,
1137 .remove = __devexit_p(envctrl_remove), 1140 .remove = __devexit_p(envctrl_remove),
1138}; 1141};
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index d3b62eb0fba7..368d66294d83 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -162,7 +162,7 @@ static struct miscdevice flash_dev = { FLASH_MINOR, "flash", &flash_fops };
162static int __devinit flash_probe(struct of_device *op, 162static int __devinit flash_probe(struct of_device *op,
163 const struct of_device_id *match) 163 const struct of_device_id *match)
164{ 164{
165 struct device_node *dp = op->node; 165 struct device_node *dp = op->dev.of_node;
166 struct device_node *parent; 166 struct device_node *parent;
167 167
168 parent = dp->parent; 168 parent = dp->parent;
@@ -184,7 +184,7 @@ static int __devinit flash_probe(struct of_device *op,
184 flash.busy = 0; 184 flash.busy = 0;
185 185
186 printk(KERN_INFO "%s: OBP Flash, RD %lx[%lx] WR %lx[%lx]\n", 186 printk(KERN_INFO "%s: OBP Flash, RD %lx[%lx] WR %lx[%lx]\n",
187 op->node->full_name, 187 op->dev.of_node->full_name,
188 flash.read_base, flash.read_size, 188 flash.read_base, flash.read_size,
189 flash.write_base, flash.write_size); 189 flash.write_base, flash.write_size);
190 190
@@ -207,8 +207,11 @@ static const struct of_device_id flash_match[] = {
207MODULE_DEVICE_TABLE(of, flash_match); 207MODULE_DEVICE_TABLE(of, flash_match);
208 208
209static struct of_platform_driver flash_driver = { 209static struct of_platform_driver flash_driver = {
210 .name = "flash", 210 .driver = {
211 .match_table = flash_match, 211 .name = "flash",
212 .owner = THIS_MODULE,
213 .of_match_table = flash_match,
214 },
212 .probe = flash_probe, 215 .probe = flash_probe,
213 .remove = __devexit_p(flash_remove), 216 .remove = __devexit_p(flash_remove),
214}; 217};
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index fc2f676e984d..d53e62ab09da 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -298,9 +298,9 @@ static int opromgetbootargs(void __user *argp, struct openpromio *op, int bufsiz
298/* 298/*
299 * SunOS and Solaris /dev/openprom ioctl calls. 299 * SunOS and Solaris /dev/openprom ioctl calls.
300 */ 300 */
301static int openprom_sunos_ioctl(struct inode * inode, struct file * file, 301static long openprom_sunos_ioctl(struct file * file,
302 unsigned int cmd, unsigned long arg, 302 unsigned int cmd, unsigned long arg,
303 struct device_node *dp) 303 struct device_node *dp)
304{ 304{
305 DATA *data = file->private_data; 305 DATA *data = file->private_data;
306 struct openpromio *opp = NULL; 306 struct openpromio *opp = NULL;
@@ -316,6 +316,8 @@ static int openprom_sunos_ioctl(struct inode * inode, struct file * file,
316 if (bufsize < 0) 316 if (bufsize < 0)
317 return bufsize; 317 return bufsize;
318 318
319 lock_kernel();
320
319 switch (cmd) { 321 switch (cmd) {
320 case OPROMGETOPT: 322 case OPROMGETOPT:
321 case OPROMGETPROP: 323 case OPROMGETPROP:
@@ -365,6 +367,8 @@ static int openprom_sunos_ioctl(struct inode * inode, struct file * file,
365 } 367 }
366 368
367 kfree(opp); 369 kfree(opp);
370 unlock_kernel();
371
368 return error; 372 return error;
369} 373}
370 374
@@ -547,13 +551,14 @@ static int opiocgetnext(unsigned int cmd, void __user *argp)
547 return 0; 551 return 0;
548} 552}
549 553
550static int openprom_bsd_ioctl(struct inode * inode, struct file * file, 554static int openprom_bsd_ioctl(struct file * file,
551 unsigned int cmd, unsigned long arg) 555 unsigned int cmd, unsigned long arg)
552{ 556{
553 DATA *data = (DATA *) file->private_data; 557 DATA *data = (DATA *) file->private_data;
554 void __user *argp = (void __user *)arg; 558 void __user *argp = (void __user *)arg;
555 int err; 559 int err;
556 560
561 lock_kernel();
557 switch (cmd) { 562 switch (cmd) {
558 case OPIOCGET: 563 case OPIOCGET:
559 err = opiocget(argp, data); 564 err = opiocget(argp, data);
@@ -570,10 +575,10 @@ static int openprom_bsd_ioctl(struct inode * inode, struct file * file,
570 case OPIOCGETOPTNODE: 575 case OPIOCGETOPTNODE:
571 BUILD_BUG_ON(sizeof(phandle) != sizeof(int)); 576 BUILD_BUG_ON(sizeof(phandle) != sizeof(int));
572 577
578 err = 0;
573 if (copy_to_user(argp, &options_node->phandle, sizeof(phandle))) 579 if (copy_to_user(argp, &options_node->phandle, sizeof(phandle)))
574 return -EFAULT; 580 err = -EFAULT;
575 581 break;
576 return 0;
577 582
578 case OPIOCGETNEXT: 583 case OPIOCGETNEXT:
579 case OPIOCGETCHILD: 584 case OPIOCGETCHILD:
@@ -581,9 +586,10 @@ static int openprom_bsd_ioctl(struct inode * inode, struct file * file,
581 break; 586 break;
582 587
583 default: 588 default:
584 return -EINVAL; 589 err = -EINVAL;
585 590 break;
586 }; 591 };
592 unlock_kernel();
587 593
588 return err; 594 return err;
589} 595}
@@ -592,8 +598,8 @@ static int openprom_bsd_ioctl(struct inode * inode, struct file * file,
592/* 598/*
593 * Handoff control to the correct ioctl handler. 599 * Handoff control to the correct ioctl handler.
594 */ 600 */
595static int openprom_ioctl(struct inode * inode, struct file * file, 601static long openprom_ioctl(struct file * file,
596 unsigned int cmd, unsigned long arg) 602 unsigned int cmd, unsigned long arg)
597{ 603{
598 DATA *data = (DATA *) file->private_data; 604 DATA *data = (DATA *) file->private_data;
599 605
@@ -602,14 +608,14 @@ static int openprom_ioctl(struct inode * inode, struct file * file,
602 case OPROMNXTOPT: 608 case OPROMNXTOPT:
603 if ((file->f_mode & FMODE_READ) == 0) 609 if ((file->f_mode & FMODE_READ) == 0)
604 return -EPERM; 610 return -EPERM;
605 return openprom_sunos_ioctl(inode, file, cmd, arg, 611 return openprom_sunos_ioctl(file, cmd, arg,
606 options_node); 612 options_node);
607 613
608 case OPROMSETOPT: 614 case OPROMSETOPT:
609 case OPROMSETOPT2: 615 case OPROMSETOPT2:
610 if ((file->f_mode & FMODE_WRITE) == 0) 616 if ((file->f_mode & FMODE_WRITE) == 0)
611 return -EPERM; 617 return -EPERM;
612 return openprom_sunos_ioctl(inode, file, cmd, arg, 618 return openprom_sunos_ioctl(file, cmd, arg,
613 options_node); 619 options_node);
614 620
615 case OPROMNEXT: 621 case OPROMNEXT:
@@ -618,7 +624,7 @@ static int openprom_ioctl(struct inode * inode, struct file * file,
618 case OPROMNXTPROP: 624 case OPROMNXTPROP:
619 if ((file->f_mode & FMODE_READ) == 0) 625 if ((file->f_mode & FMODE_READ) == 0)
620 return -EPERM; 626 return -EPERM;
621 return openprom_sunos_ioctl(inode, file, cmd, arg, 627 return openprom_sunos_ioctl(file, cmd, arg,
622 data->current_node); 628 data->current_node);
623 629
624 case OPROMU2P: 630 case OPROMU2P:
@@ -630,7 +636,7 @@ static int openprom_ioctl(struct inode * inode, struct file * file,
630 case OPROMPATH2NODE: 636 case OPROMPATH2NODE:
631 if ((file->f_mode & FMODE_READ) == 0) 637 if ((file->f_mode & FMODE_READ) == 0)
632 return -EPERM; 638 return -EPERM;
633 return openprom_sunos_ioctl(inode, file, cmd, arg, NULL); 639 return openprom_sunos_ioctl(file, cmd, arg, NULL);
634 640
635 case OPIOCGET: 641 case OPIOCGET:
636 case OPIOCNEXTPROP: 642 case OPIOCNEXTPROP:
@@ -639,12 +645,12 @@ static int openprom_ioctl(struct inode * inode, struct file * file,
639 case OPIOCGETCHILD: 645 case OPIOCGETCHILD:
640 if ((file->f_mode & FMODE_READ) == 0) 646 if ((file->f_mode & FMODE_READ) == 0)
641 return -EBADF; 647 return -EBADF;
642 return openprom_bsd_ioctl(inode,file,cmd,arg); 648 return openprom_bsd_ioctl(file,cmd,arg);
643 649
644 case OPIOCSET: 650 case OPIOCSET:
645 if ((file->f_mode & FMODE_WRITE) == 0) 651 if ((file->f_mode & FMODE_WRITE) == 0)
646 return -EBADF; 652 return -EBADF;
647 return openprom_bsd_ioctl(inode,file,cmd,arg); 653 return openprom_bsd_ioctl(file,cmd,arg);
648 654
649 default: 655 default:
650 return -EINVAL; 656 return -EINVAL;
@@ -676,7 +682,7 @@ static long openprom_compat_ioctl(struct file *file, unsigned int cmd,
676 case OPROMSETCUR: 682 case OPROMSETCUR:
677 case OPROMPCI2NODE: 683 case OPROMPCI2NODE:
678 case OPROMPATH2NODE: 684 case OPROMPATH2NODE:
679 rval = openprom_ioctl(file->f_path.dentry->d_inode, file, cmd, arg); 685 rval = openprom_ioctl(file, cmd, arg);
680 break; 686 break;
681 } 687 }
682 688
@@ -709,7 +715,7 @@ static int openprom_release(struct inode * inode, struct file * file)
709static const struct file_operations openprom_fops = { 715static const struct file_operations openprom_fops = {
710 .owner = THIS_MODULE, 716 .owner = THIS_MODULE,
711 .llseek = no_llseek, 717 .llseek = no_llseek,
712 .ioctl = openprom_ioctl, 718 .unlocked_ioctl = openprom_ioctl,
713 .compat_ioctl = openprom_compat_ioctl, 719 .compat_ioctl = openprom_compat_ioctl,
714 .open = openprom_open, 720 .open = openprom_open,
715 .release = openprom_release, 721 .release = openprom_release,
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index 2c56fd56ec63..5f253665a1da 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -382,7 +382,7 @@ static int __devinit uctrl_probe(struct of_device *op,
382 382
383 sbus_writel(UCTRL_INTR_RXNE_REQ|UCTRL_INTR_RXNE_MSK, &p->regs->uctrl_intr); 383 sbus_writel(UCTRL_INTR_RXNE_REQ|UCTRL_INTR_RXNE_MSK, &p->regs->uctrl_intr);
384 printk(KERN_INFO "%s: uctrl regs[0x%p] (irq %d)\n", 384 printk(KERN_INFO "%s: uctrl regs[0x%p] (irq %d)\n",
385 op->node->full_name, p->regs, p->irq); 385 op->dev.of_node->full_name, p->regs, p->irq);
386 uctrl_get_event_status(p); 386 uctrl_get_event_status(p);
387 uctrl_get_external_status(p); 387 uctrl_get_external_status(p);
388 388
@@ -425,8 +425,11 @@ static const struct of_device_id uctrl_match[] = {
425MODULE_DEVICE_TABLE(of, uctrl_match); 425MODULE_DEVICE_TABLE(of, uctrl_match);
426 426
427static struct of_platform_driver uctrl_driver = { 427static struct of_platform_driver uctrl_driver = {
428 .name = "uctrl", 428 .driver = {
429 .match_table = uctrl_match, 429 .name = "uctrl",
430 .owner = THIS_MODULE,
431 .of_match_table = uctrl_match,
432 },
430 .probe = uctrl_probe, 433 .probe = uctrl_probe,
431 .remove = __devexit_p(uctrl_remove), 434 .remove = __devexit_p(uctrl_remove),
432}; 435};
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 1bb774becf25..e20b7bdd4c78 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -125,7 +125,7 @@ static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H
125static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id); 125static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
126static char *twa_aen_severity_lookup(unsigned char severity_code); 126static char *twa_aen_severity_lookup(unsigned char severity_code);
127static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id); 127static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
128static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); 128static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
129static int twa_chrdev_open(struct inode *inode, struct file *file); 129static int twa_chrdev_open(struct inode *inode, struct file *file);
130static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host); 130static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
131static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id); 131static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
@@ -220,7 +220,7 @@ static struct device_attribute *twa_host_attrs[] = {
220/* File operations struct for character device */ 220/* File operations struct for character device */
221static const struct file_operations twa_fops = { 221static const struct file_operations twa_fops = {
222 .owner = THIS_MODULE, 222 .owner = THIS_MODULE,
223 .ioctl = twa_chrdev_ioctl, 223 .unlocked_ioctl = twa_chrdev_ioctl,
224 .open = twa_chrdev_open, 224 .open = twa_chrdev_open,
225 .release = NULL 225 .release = NULL
226}; 226};
@@ -637,8 +637,9 @@ out:
637} /* End twa_check_srl() */ 637} /* End twa_check_srl() */
638 638
639/* This function handles ioctl for the character device */ 639/* This function handles ioctl for the character device */
640static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 640static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
641{ 641{
642 struct inode *inode = file->f_path.dentry->d_inode;
642 long timeout; 643 long timeout;
643 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0; 644 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
644 dma_addr_t dma_handle; 645 dma_addr_t dma_handle;
@@ -657,6 +658,8 @@ static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int
657 int retval = TW_IOCTL_ERROR_OS_EFAULT; 658 int retval = TW_IOCTL_ERROR_OS_EFAULT;
658 void __user *argp = (void __user *)arg; 659 void __user *argp = (void __user *)arg;
659 660
661 lock_kernel();
662
660 /* Only let one of these through at a time */ 663 /* Only let one of these through at a time */
661 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { 664 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
662 retval = TW_IOCTL_ERROR_OS_EINTR; 665 retval = TW_IOCTL_ERROR_OS_EINTR;
@@ -876,6 +879,7 @@ out3:
876out2: 879out2:
877 mutex_unlock(&tw_dev->ioctl_lock); 880 mutex_unlock(&tw_dev->ioctl_lock);
878out: 881out:
882 unlock_kernel();
879 return retval; 883 return retval;
880} /* End twa_chrdev_ioctl() */ 884} /* End twa_chrdev_ioctl() */
881 885
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index d38000db9237..f481e734aad4 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -750,19 +750,22 @@ static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
750 750
751/* This function handles ioctl for the character device 751/* This function handles ioctl for the character device
752 This interface is used by smartmontools open source software */ 752 This interface is used by smartmontools open source software */
753static int twl_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 753static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
754{ 754{
755 long timeout; 755 long timeout;
756 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0; 756 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
757 dma_addr_t dma_handle; 757 dma_addr_t dma_handle;
758 int request_id = 0; 758 int request_id = 0;
759 TW_Ioctl_Driver_Command driver_command; 759 TW_Ioctl_Driver_Command driver_command;
760 struct inode *inode = file->f_dentry->d_inode;
760 TW_Ioctl_Buf_Apache *tw_ioctl; 761 TW_Ioctl_Buf_Apache *tw_ioctl;
761 TW_Command_Full *full_command_packet; 762 TW_Command_Full *full_command_packet;
762 TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)]; 763 TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)];
763 int retval = -EFAULT; 764 int retval = -EFAULT;
764 void __user *argp = (void __user *)arg; 765 void __user *argp = (void __user *)arg;
765 766
767 lock_kernel();
768
766 /* Only let one of these through at a time */ 769 /* Only let one of these through at a time */
767 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { 770 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
768 retval = -EINTR; 771 retval = -EINTR;
@@ -858,6 +861,7 @@ out3:
858out2: 861out2:
859 mutex_unlock(&tw_dev->ioctl_lock); 862 mutex_unlock(&tw_dev->ioctl_lock);
860out: 863out:
864 unlock_kernel();
861 return retval; 865 return retval;
862} /* End twl_chrdev_ioctl() */ 866} /* End twl_chrdev_ioctl() */
863 867
@@ -884,7 +888,7 @@ out:
884/* File operations struct for character device */ 888/* File operations struct for character device */
885static const struct file_operations twl_fops = { 889static const struct file_operations twl_fops = {
886 .owner = THIS_MODULE, 890 .owner = THIS_MODULE,
887 .ioctl = twl_chrdev_ioctl, 891 .unlocked_ioctl = twl_chrdev_ioctl,
888 .open = twl_chrdev_open, 892 .open = twl_chrdev_open,
889 .release = NULL 893 .release = NULL
890}; 894};
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index d119a614bf7d..30d735ad35b5 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -881,7 +881,7 @@ static int tw_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
881} /* End tw_allocate_memory() */ 881} /* End tw_allocate_memory() */
882 882
883/* This function handles ioctl for the character device */ 883/* This function handles ioctl for the character device */
884static int tw_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) 884static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
885{ 885{
886 int request_id; 886 int request_id;
887 dma_addr_t dma_handle; 887 dma_addr_t dma_handle;
@@ -889,6 +889,7 @@ static int tw_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int
889 unsigned long flags; 889 unsigned long flags;
890 unsigned int data_buffer_length = 0; 890 unsigned int data_buffer_length = 0;
891 unsigned long data_buffer_length_adjusted = 0; 891 unsigned long data_buffer_length_adjusted = 0;
892 struct inode *inode = file->f_dentry->d_inode;
892 unsigned long *cpu_addr; 893 unsigned long *cpu_addr;
893 long timeout; 894 long timeout;
894 TW_New_Ioctl *tw_ioctl; 895 TW_New_Ioctl *tw_ioctl;
@@ -899,9 +900,12 @@ static int tw_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int
899 900
900 dprintk(KERN_WARNING "3w-xxxx: tw_chrdev_ioctl()\n"); 901 dprintk(KERN_WARNING "3w-xxxx: tw_chrdev_ioctl()\n");
901 902
903 lock_kernel();
902 /* Only let one of these through at a time */ 904 /* Only let one of these through at a time */
903 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) 905 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
906 unlock_kernel();
904 return -EINTR; 907 return -EINTR;
908 }
905 909
906 /* First copy down the buffer length */ 910 /* First copy down the buffer length */
907 if (copy_from_user(&data_buffer_length, argp, sizeof(unsigned int))) 911 if (copy_from_user(&data_buffer_length, argp, sizeof(unsigned int)))
@@ -1030,6 +1034,7 @@ out2:
1030 dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_New_Ioctl) - 1, cpu_addr, dma_handle); 1034 dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_New_Ioctl) - 1, cpu_addr, dma_handle);
1031out: 1035out:
1032 mutex_unlock(&tw_dev->ioctl_lock); 1036 mutex_unlock(&tw_dev->ioctl_lock);
1037 unlock_kernel();
1033 return retval; 1038 return retval;
1034} /* End tw_chrdev_ioctl() */ 1039} /* End tw_chrdev_ioctl() */
1035 1040
@@ -1052,7 +1057,7 @@ static int tw_chrdev_open(struct inode *inode, struct file *file)
1052/* File operations struct for character device */ 1057/* File operations struct for character device */
1053static const struct file_operations tw_fops = { 1058static const struct file_operations tw_fops = {
1054 .owner = THIS_MODULE, 1059 .owner = THIS_MODULE,
1055 .ioctl = tw_chrdev_ioctl, 1060 .unlocked_ioctl = tw_chrdev_ioctl,
1056 .open = tw_chrdev_open, 1061 .open = tw_chrdev_open,
1057 .release = NULL 1062 .release = NULL
1058}; 1063};
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 308541ff85cf..1bb5d3f0e260 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -1,34 +1,31 @@
1#include <linux/types.h> 1#include <linux/types.h>
2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/blkdev.h>
5#include <linux/init.h> 2#include <linux/init.h>
6#include <linux/interrupt.h> 3#include <linux/interrupt.h>
4#include <linux/mm.h>
5#include <linux/slab.h>
6#include <linux/spinlock.h>
7#include <linux/zorro.h>
7 8
8#include <asm/setup.h>
9#include <asm/page.h> 9#include <asm/page.h>
10#include <asm/pgtable.h> 10#include <asm/pgtable.h>
11#include <asm/amigaints.h> 11#include <asm/amigaints.h>
12#include <asm/amigahw.h> 12#include <asm/amigahw.h>
13#include <linux/zorro.h>
14#include <asm/irq.h>
15#include <linux/spinlock.h>
16 13
17#include "scsi.h" 14#include "scsi.h"
18#include <scsi/scsi_host.h>
19#include "wd33c93.h" 15#include "wd33c93.h"
20#include "a2091.h" 16#include "a2091.h"
21 17
22#include <linux/stat.h>
23
24 18
25static int a2091_release(struct Scsi_Host *instance); 19struct a2091_hostdata {
20 struct WD33C93_hostdata wh;
21 struct a2091_scsiregs *regs;
22};
26 23
27static irqreturn_t a2091_intr(int irq, void *data) 24static irqreturn_t a2091_intr(int irq, void *data)
28{ 25{
29 struct Scsi_Host *instance = data; 26 struct Scsi_Host *instance = data;
30 a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); 27 struct a2091_hostdata *hdata = shost_priv(instance);
31 unsigned int status = regs->ISTR; 28 unsigned int status = hdata->regs->ISTR;
32 unsigned long flags; 29 unsigned long flags;
33 30
34 if (!(status & (ISTR_INT_F | ISTR_INT_P)) || !(status & ISTR_INTS)) 31 if (!(status & (ISTR_INT_F | ISTR_INT_P)) || !(status & ISTR_INTS))
@@ -43,38 +40,39 @@ static irqreturn_t a2091_intr(int irq, void *data)
43static int dma_setup(struct scsi_cmnd *cmd, int dir_in) 40static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
44{ 41{
45 struct Scsi_Host *instance = cmd->device->host; 42 struct Scsi_Host *instance = cmd->device->host;
46 struct WD33C93_hostdata *hdata = shost_priv(instance); 43 struct a2091_hostdata *hdata = shost_priv(instance);
47 a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); 44 struct WD33C93_hostdata *wh = &hdata->wh;
45 struct a2091_scsiregs *regs = hdata->regs;
48 unsigned short cntr = CNTR_PDMD | CNTR_INTEN; 46 unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
49 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 47 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
50 48
51 /* don't allow DMA if the physical address is bad */ 49 /* don't allow DMA if the physical address is bad */
52 if (addr & A2091_XFER_MASK) { 50 if (addr & A2091_XFER_MASK) {
53 hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; 51 wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
54 hdata->dma_bounce_buffer = kmalloc(hdata->dma_bounce_len, 52 wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
55 GFP_KERNEL); 53 GFP_KERNEL);
56 54
57 /* can't allocate memory; use PIO */ 55 /* can't allocate memory; use PIO */
58 if (!hdata->dma_bounce_buffer) { 56 if (!wh->dma_bounce_buffer) {
59 hdata->dma_bounce_len = 0; 57 wh->dma_bounce_len = 0;
60 return 1; 58 return 1;
61 } 59 }
62 60
63 /* get the physical address of the bounce buffer */ 61 /* get the physical address of the bounce buffer */
64 addr = virt_to_bus(hdata->dma_bounce_buffer); 62 addr = virt_to_bus(wh->dma_bounce_buffer);
65 63
66 /* the bounce buffer may not be in the first 16M of physmem */ 64 /* the bounce buffer may not be in the first 16M of physmem */
67 if (addr & A2091_XFER_MASK) { 65 if (addr & A2091_XFER_MASK) {
68 /* we could use chipmem... maybe later */ 66 /* we could use chipmem... maybe later */
69 kfree(hdata->dma_bounce_buffer); 67 kfree(wh->dma_bounce_buffer);
70 hdata->dma_bounce_buffer = NULL; 68 wh->dma_bounce_buffer = NULL;
71 hdata->dma_bounce_len = 0; 69 wh->dma_bounce_len = 0;
72 return 1; 70 return 1;
73 } 71 }
74 72
75 if (!dir_in) { 73 if (!dir_in) {
76 /* copy to bounce buffer for a write */ 74 /* copy to bounce buffer for a write */
77 memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr, 75 memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
78 cmd->SCp.this_residual); 76 cmd->SCp.this_residual);
79 } 77 }
80 } 78 }
@@ -84,7 +82,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
84 cntr |= CNTR_DDIR; 82 cntr |= CNTR_DDIR;
85 83
86 /* remember direction */ 84 /* remember direction */
87 hdata->dma_dir = dir_in; 85 wh->dma_dir = dir_in;
88 86
89 regs->CNTR = cntr; 87 regs->CNTR = cntr;
90 88
@@ -108,20 +106,21 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
108static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, 106static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
109 int status) 107 int status)
110{ 108{
111 struct WD33C93_hostdata *hdata = shost_priv(instance); 109 struct a2091_hostdata *hdata = shost_priv(instance);
112 a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); 110 struct WD33C93_hostdata *wh = &hdata->wh;
111 struct a2091_scsiregs *regs = hdata->regs;
113 112
114 /* disable SCSI interrupts */ 113 /* disable SCSI interrupts */
115 unsigned short cntr = CNTR_PDMD; 114 unsigned short cntr = CNTR_PDMD;
116 115
117 if (!hdata->dma_dir) 116 if (!wh->dma_dir)
118 cntr |= CNTR_DDIR; 117 cntr |= CNTR_DDIR;
119 118
120 /* disable SCSI interrupts */ 119 /* disable SCSI interrupts */
121 regs->CNTR = cntr; 120 regs->CNTR = cntr;
122 121
123 /* flush if we were reading */ 122 /* flush if we were reading */
124 if (hdata->dma_dir) { 123 if (wh->dma_dir) {
125 regs->FLUSH = 1; 124 regs->FLUSH = 1;
126 while (!(regs->ISTR & ISTR_FE_FLG)) 125 while (!(regs->ISTR & ISTR_FE_FLG))
127 ; 126 ;
@@ -137,95 +136,37 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
137 regs->CNTR = CNTR_PDMD | CNTR_INTEN; 136 regs->CNTR = CNTR_PDMD | CNTR_INTEN;
138 137
139 /* copy from a bounce buffer, if necessary */ 138 /* copy from a bounce buffer, if necessary */
140 if (status && hdata->dma_bounce_buffer) { 139 if (status && wh->dma_bounce_buffer) {
141 if (hdata->dma_dir) 140 if (wh->dma_dir)
142 memcpy(SCpnt->SCp.ptr, hdata->dma_bounce_buffer, 141 memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
143 SCpnt->SCp.this_residual); 142 SCpnt->SCp.this_residual);
144 kfree(hdata->dma_bounce_buffer); 143 kfree(wh->dma_bounce_buffer);
145 hdata->dma_bounce_buffer = NULL; 144 wh->dma_bounce_buffer = NULL;
146 hdata->dma_bounce_len = 0; 145 wh->dma_bounce_len = 0;
147 }
148}
149
150static int __init a2091_detect(struct scsi_host_template *tpnt)
151{
152 static unsigned char called = 0;
153 struct Scsi_Host *instance;
154 unsigned long address;
155 struct zorro_dev *z = NULL;
156 wd33c93_regs wdregs;
157 a2091_scsiregs *regs;
158 struct WD33C93_hostdata *hdata;
159 int num_a2091 = 0;
160
161 if (!MACH_IS_AMIGA || called)
162 return 0;
163 called = 1;
164
165 tpnt->proc_name = "A2091";
166 tpnt->proc_info = &wd33c93_proc_info;
167
168 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
169 if (z->id != ZORRO_PROD_CBM_A590_A2091_1 &&
170 z->id != ZORRO_PROD_CBM_A590_A2091_2)
171 continue;
172 address = z->resource.start;
173 if (!request_mem_region(address, 256, "wd33c93"))
174 continue;
175
176 instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
177 if (instance == NULL)
178 goto release;
179 instance->base = ZTWO_VADDR(address);
180 instance->irq = IRQ_AMIGA_PORTS;
181 instance->unique_id = z->slotaddr;
182 regs = (a2091_scsiregs *)(instance->base);
183 regs->DAWR = DAWR_A2091;
184 wdregs.SASR = &regs->SASR;
185 wdregs.SCMD = &regs->SCMD;
186 hdata = shost_priv(instance);
187 hdata->no_sync = 0xff;
188 hdata->fast = 0;
189 hdata->dma_mode = CTRL_DMA;
190 wd33c93_init(instance, wdregs, dma_setup, dma_stop,
191 WD33C93_FS_8_10);
192 if (request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED,
193 "A2091 SCSI", instance))
194 goto unregister;
195 regs->CNTR = CNTR_PDMD | CNTR_INTEN;
196 num_a2091++;
197 continue;
198
199unregister:
200 scsi_unregister(instance);
201release:
202 release_mem_region(address, 256);
203 } 146 }
204
205 return num_a2091;
206} 147}
207 148
208static int a2091_bus_reset(struct scsi_cmnd *cmd) 149static int a2091_bus_reset(struct scsi_cmnd *cmd)
209{ 150{
151 struct Scsi_Host *instance = cmd->device->host;
152
210 /* FIXME perform bus-specific reset */ 153 /* FIXME perform bus-specific reset */
211 154
212 /* FIXME 2: kill this function, and let midlayer fall back 155 /* FIXME 2: kill this function, and let midlayer fall back
213 to the same action, calling wd33c93_host_reset() */ 156 to the same action, calling wd33c93_host_reset() */
214 157
215 spin_lock_irq(cmd->device->host->host_lock); 158 spin_lock_irq(instance->host_lock);
216 wd33c93_host_reset(cmd); 159 wd33c93_host_reset(cmd);
217 spin_unlock_irq(cmd->device->host->host_lock); 160 spin_unlock_irq(instance->host_lock);
218 161
219 return SUCCESS; 162 return SUCCESS;
220} 163}
221 164
222#define HOSTS_C 165static struct scsi_host_template a2091_scsi_template = {
223 166 .module = THIS_MODULE,
224static struct scsi_host_template driver_template = {
225 .proc_name = "A2901",
226 .name = "Commodore A2091/A590 SCSI", 167 .name = "Commodore A2091/A590 SCSI",
227 .detect = a2091_detect, 168 .proc_info = wd33c93_proc_info,
228 .release = a2091_release, 169 .proc_name = "A2901",
229 .queuecommand = wd33c93_queuecommand, 170 .queuecommand = wd33c93_queuecommand,
230 .eh_abort_handler = wd33c93_abort, 171 .eh_abort_handler = wd33c93_abort,
231 .eh_bus_reset_handler = a2091_bus_reset, 172 .eh_bus_reset_handler = a2091_bus_reset,
@@ -237,19 +178,103 @@ static struct scsi_host_template driver_template = {
237 .use_clustering = DISABLE_CLUSTERING 178 .use_clustering = DISABLE_CLUSTERING
238}; 179};
239 180
181static int __devinit a2091_probe(struct zorro_dev *z,
182 const struct zorro_device_id *ent)
183{
184 struct Scsi_Host *instance;
185 int error;
186 struct a2091_scsiregs *regs;
187 wd33c93_regs wdregs;
188 struct a2091_hostdata *hdata;
240 189
241#include "scsi_module.c" 190 if (!request_mem_region(z->resource.start, 256, "wd33c93"))
191 return -EBUSY;
242 192
243static int a2091_release(struct Scsi_Host *instance) 193 instance = scsi_host_alloc(&a2091_scsi_template,
194 sizeof(struct a2091_hostdata));
195 if (!instance) {
196 error = -ENOMEM;
197 goto fail_alloc;
198 }
199
200 instance->irq = IRQ_AMIGA_PORTS;
201 instance->unique_id = z->slotaddr;
202
203 regs = (struct a2091_scsiregs *)ZTWO_VADDR(z->resource.start);
204 regs->DAWR = DAWR_A2091;
205
206 wdregs.SASR = &regs->SASR;
207 wdregs.SCMD = &regs->SCMD;
208
209 hdata = shost_priv(instance);
210 hdata->wh.no_sync = 0xff;
211 hdata->wh.fast = 0;
212 hdata->wh.dma_mode = CTRL_DMA;
213 hdata->regs = regs;
214
215 wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_8_10);
216 error = request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED,
217 "A2091 SCSI", instance);
218 if (error)
219 goto fail_irq;
220
221 regs->CNTR = CNTR_PDMD | CNTR_INTEN;
222
223 error = scsi_add_host(instance, NULL);
224 if (error)
225 goto fail_host;
226
227 zorro_set_drvdata(z, instance);
228
229 scsi_scan_host(instance);
230 return 0;
231
232fail_host:
233 free_irq(IRQ_AMIGA_PORTS, instance);
234fail_irq:
235 scsi_host_put(instance);
236fail_alloc:
237 release_mem_region(z->resource.start, 256);
238 return error;
239}
240
241static void __devexit a2091_remove(struct zorro_dev *z)
244{ 242{
245#ifdef MODULE 243 struct Scsi_Host *instance = zorro_get_drvdata(z);
246 a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); 244 struct a2091_hostdata *hdata = shost_priv(instance);
247 245
248 regs->CNTR = 0; 246 hdata->regs->CNTR = 0;
249 release_mem_region(ZTWO_PADDR(instance->base), 256); 247 scsi_remove_host(instance);
250 free_irq(IRQ_AMIGA_PORTS, instance); 248 free_irq(IRQ_AMIGA_PORTS, instance);
251#endif 249 scsi_host_put(instance);
252 return 1; 250 release_mem_region(z->resource.start, 256);
251}
252
253static struct zorro_device_id a2091_zorro_tbl[] __devinitdata = {
254 { ZORRO_PROD_CBM_A590_A2091_1 },
255 { ZORRO_PROD_CBM_A590_A2091_2 },
256 { 0 }
257};
258MODULE_DEVICE_TABLE(zorro, a2091_zorro_tbl);
259
260static struct zorro_driver a2091_driver = {
261 .name = "a2091",
262 .id_table = a2091_zorro_tbl,
263 .probe = a2091_probe,
264 .remove = __devexit_p(a2091_remove),
265};
266
267static int __init a2091_init(void)
268{
269 return zorro_register_driver(&a2091_driver);
270}
271module_init(a2091_init);
272
273static void __exit a2091_exit(void)
274{
275 zorro_unregister_driver(&a2091_driver);
253} 276}
277module_exit(a2091_exit);
254 278
279MODULE_DESCRIPTION("Commodore A2091/A590 SCSI");
255MODULE_LICENSE("GPL"); 280MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/a2091.h b/drivers/scsi/a2091.h
index 1c3daa1fd754..794b8e65c711 100644
--- a/drivers/scsi/a2091.h
+++ b/drivers/scsi/a2091.h
@@ -25,7 +25,7 @@
25 */ 25 */
26#define A2091_XFER_MASK (0xff000001) 26#define A2091_XFER_MASK (0xff000001)
27 27
28typedef struct { 28struct a2091_scsiregs {
29 unsigned char pad1[64]; 29 unsigned char pad1[64];
30 volatile unsigned short ISTR; 30 volatile unsigned short ISTR;
31 volatile unsigned short CNTR; 31 volatile unsigned short CNTR;
@@ -44,7 +44,7 @@ typedef struct {
44 volatile unsigned short CINT; 44 volatile unsigned short CINT;
45 unsigned char pad7[2]; 45 unsigned char pad7[2];
46 volatile unsigned short FLUSH; 46 volatile unsigned short FLUSH;
47} a2091_scsiregs; 47};
48 48
49#define DAWR_A2091 (3) 49#define DAWR_A2091 (3)
50 50
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index bc6eb69f5fd0..d9468027fb61 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -1,53 +1,52 @@
1#include <linux/types.h> 1#include <linux/types.h>
2#include <linux/mm.h> 2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/blkdev.h>
5#include <linux/ioport.h> 3#include <linux/ioport.h>
6#include <linux/init.h> 4#include <linux/init.h>
5#include <linux/slab.h>
7#include <linux/spinlock.h> 6#include <linux/spinlock.h>
8#include <linux/interrupt.h> 7#include <linux/interrupt.h>
8#include <linux/platform_device.h>
9 9
10#include <asm/setup.h>
11#include <asm/page.h> 10#include <asm/page.h>
12#include <asm/pgtable.h> 11#include <asm/pgtable.h>
13#include <asm/amigaints.h> 12#include <asm/amigaints.h>
14#include <asm/amigahw.h> 13#include <asm/amigahw.h>
15#include <asm/irq.h>
16 14
17#include "scsi.h" 15#include "scsi.h"
18#include <scsi/scsi_host.h>
19#include "wd33c93.h" 16#include "wd33c93.h"
20#include "a3000.h" 17#include "a3000.h"
21 18
22#include <linux/stat.h>
23
24 19
25#define DMA(ptr) ((a3000_scsiregs *)((ptr)->base)) 20struct a3000_hostdata {
26 21 struct WD33C93_hostdata wh;
27static struct Scsi_Host *a3000_host = NULL; 22 struct a3000_scsiregs *regs;
28 23};
29static int a3000_release(struct Scsi_Host *instance);
30 24
31static irqreturn_t a3000_intr(int irq, void *dummy) 25static irqreturn_t a3000_intr(int irq, void *data)
32{ 26{
27 struct Scsi_Host *instance = data;
28 struct a3000_hostdata *hdata = shost_priv(instance);
29 unsigned int status = hdata->regs->ISTR;
33 unsigned long flags; 30 unsigned long flags;
34 unsigned int status = DMA(a3000_host)->ISTR;
35 31
36 if (!(status & ISTR_INT_P)) 32 if (!(status & ISTR_INT_P))
37 return IRQ_NONE; 33 return IRQ_NONE;
38 if (status & ISTR_INTS) { 34 if (status & ISTR_INTS) {
39 spin_lock_irqsave(a3000_host->host_lock, flags); 35 spin_lock_irqsave(instance->host_lock, flags);
40 wd33c93_intr(a3000_host); 36 wd33c93_intr(instance);
41 spin_unlock_irqrestore(a3000_host->host_lock, flags); 37 spin_unlock_irqrestore(instance->host_lock, flags);
42 return IRQ_HANDLED; 38 return IRQ_HANDLED;
43 } 39 }
44 printk("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status); 40 pr_warning("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status);
45 return IRQ_NONE; 41 return IRQ_NONE;
46} 42}
47 43
48static int dma_setup(struct scsi_cmnd *cmd, int dir_in) 44static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
49{ 45{
50 struct WD33C93_hostdata *hdata = shost_priv(a3000_host); 46 struct Scsi_Host *instance = cmd->device->host;
47 struct a3000_hostdata *hdata = shost_priv(instance);
48 struct WD33C93_hostdata *wh = &hdata->wh;
49 struct a3000_scsiregs *regs = hdata->regs;
51 unsigned short cntr = CNTR_PDMD | CNTR_INTEN; 50 unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
52 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 51 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
53 52
@@ -58,23 +57,23 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
58 * buffer 57 * buffer
59 */ 58 */
60 if (addr & A3000_XFER_MASK) { 59 if (addr & A3000_XFER_MASK) {
61 hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; 60 wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
62 hdata->dma_bounce_buffer = kmalloc(hdata->dma_bounce_len, 61 wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
63 GFP_KERNEL); 62 GFP_KERNEL);
64 63
65 /* can't allocate memory; use PIO */ 64 /* can't allocate memory; use PIO */
66 if (!hdata->dma_bounce_buffer) { 65 if (!wh->dma_bounce_buffer) {
67 hdata->dma_bounce_len = 0; 66 wh->dma_bounce_len = 0;
68 return 1; 67 return 1;
69 } 68 }
70 69
71 if (!dir_in) { 70 if (!dir_in) {
72 /* copy to bounce buffer for a write */ 71 /* copy to bounce buffer for a write */
73 memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr, 72 memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
74 cmd->SCp.this_residual); 73 cmd->SCp.this_residual);
75 } 74 }
76 75
77 addr = virt_to_bus(hdata->dma_bounce_buffer); 76 addr = virt_to_bus(wh->dma_bounce_buffer);
78 } 77 }
79 78
80 /* setup dma direction */ 79 /* setup dma direction */
@@ -82,12 +81,12 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
82 cntr |= CNTR_DDIR; 81 cntr |= CNTR_DDIR;
83 82
84 /* remember direction */ 83 /* remember direction */
85 hdata->dma_dir = dir_in; 84 wh->dma_dir = dir_in;
86 85
87 DMA(a3000_host)->CNTR = cntr; 86 regs->CNTR = cntr;
88 87
89 /* setup DMA *physical* address */ 88 /* setup DMA *physical* address */
90 DMA(a3000_host)->ACR = addr; 89 regs->ACR = addr;
91 90
92 if (dir_in) { 91 if (dir_in) {
93 /* invalidate any cache */ 92 /* invalidate any cache */
@@ -99,7 +98,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
99 98
100 /* start DMA */ 99 /* start DMA */
101 mb(); /* make sure setup is completed */ 100 mb(); /* make sure setup is completed */
102 DMA(a3000_host)->ST_DMA = 1; 101 regs->ST_DMA = 1;
103 mb(); /* make sure DMA has started before next IO */ 102 mb(); /* make sure DMA has started before next IO */
104 103
105 /* return success */ 104 /* return success */
@@ -109,22 +108,24 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
109static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, 108static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
110 int status) 109 int status)
111{ 110{
112 struct WD33C93_hostdata *hdata = shost_priv(instance); 111 struct a3000_hostdata *hdata = shost_priv(instance);
112 struct WD33C93_hostdata *wh = &hdata->wh;
113 struct a3000_scsiregs *regs = hdata->regs;
113 114
114 /* disable SCSI interrupts */ 115 /* disable SCSI interrupts */
115 unsigned short cntr = CNTR_PDMD; 116 unsigned short cntr = CNTR_PDMD;
116 117
117 if (!hdata->dma_dir) 118 if (!wh->dma_dir)
118 cntr |= CNTR_DDIR; 119 cntr |= CNTR_DDIR;
119 120
120 DMA(instance)->CNTR = cntr; 121 regs->CNTR = cntr;
121 mb(); /* make sure CNTR is updated before next IO */ 122 mb(); /* make sure CNTR is updated before next IO */
122 123
123 /* flush if we were reading */ 124 /* flush if we were reading */
124 if (hdata->dma_dir) { 125 if (wh->dma_dir) {
125 DMA(instance)->FLUSH = 1; 126 regs->FLUSH = 1;
126 mb(); /* don't allow prefetch */ 127 mb(); /* don't allow prefetch */
127 while (!(DMA(instance)->ISTR & ISTR_FE_FLG)) 128 while (!(regs->ISTR & ISTR_FE_FLG))
128 barrier(); 129 barrier();
129 mb(); /* no IO until FLUSH is done */ 130 mb(); /* no IO until FLUSH is done */
130 } 131 }
@@ -133,96 +134,54 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
133 /* I think that this CINT is only necessary if you are 134 /* I think that this CINT is only necessary if you are
134 * using the terminal count features. HM 7 Mar 1994 135 * using the terminal count features. HM 7 Mar 1994
135 */ 136 */
136 DMA(instance)->CINT = 1; 137 regs->CINT = 1;
137 138
138 /* stop DMA */ 139 /* stop DMA */
139 DMA(instance)->SP_DMA = 1; 140 regs->SP_DMA = 1;
140 mb(); /* make sure DMA is stopped before next IO */ 141 mb(); /* make sure DMA is stopped before next IO */
141 142
142 /* restore the CONTROL bits (minus the direction flag) */ 143 /* restore the CONTROL bits (minus the direction flag) */
143 DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN; 144 regs->CNTR = CNTR_PDMD | CNTR_INTEN;
144 mb(); /* make sure CNTR is updated before next IO */ 145 mb(); /* make sure CNTR is updated before next IO */
145 146
146 /* copy from a bounce buffer, if necessary */ 147 /* copy from a bounce buffer, if necessary */
147 if (status && hdata->dma_bounce_buffer) { 148 if (status && wh->dma_bounce_buffer) {
148 if (SCpnt) { 149 if (SCpnt) {
149 if (hdata->dma_dir && SCpnt) 150 if (wh->dma_dir && SCpnt)
150 memcpy(SCpnt->SCp.ptr, 151 memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
151 hdata->dma_bounce_buffer,
152 SCpnt->SCp.this_residual); 152 SCpnt->SCp.this_residual);
153 kfree(hdata->dma_bounce_buffer); 153 kfree(wh->dma_bounce_buffer);
154 hdata->dma_bounce_buffer = NULL; 154 wh->dma_bounce_buffer = NULL;
155 hdata->dma_bounce_len = 0; 155 wh->dma_bounce_len = 0;
156 } else { 156 } else {
157 kfree(hdata->dma_bounce_buffer); 157 kfree(wh->dma_bounce_buffer);
158 hdata->dma_bounce_buffer = NULL; 158 wh->dma_bounce_buffer = NULL;
159 hdata->dma_bounce_len = 0; 159 wh->dma_bounce_len = 0;
160 } 160 }
161 } 161 }
162} 162}
163 163
164static int __init a3000_detect(struct scsi_host_template *tpnt)
165{
166 wd33c93_regs regs;
167 struct WD33C93_hostdata *hdata;
168
169 if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(A3000_SCSI))
170 return 0;
171 if (!request_mem_region(0xDD0000, 256, "wd33c93"))
172 return 0;
173
174 tpnt->proc_name = "A3000";
175 tpnt->proc_info = &wd33c93_proc_info;
176
177 a3000_host = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
178 if (a3000_host == NULL)
179 goto fail_register;
180
181 a3000_host->base = ZTWO_VADDR(0xDD0000);
182 a3000_host->irq = IRQ_AMIGA_PORTS;
183 DMA(a3000_host)->DAWR = DAWR_A3000;
184 regs.SASR = &(DMA(a3000_host)->SASR);
185 regs.SCMD = &(DMA(a3000_host)->SCMD);
186 hdata = shost_priv(a3000_host);
187 hdata->no_sync = 0xff;
188 hdata->fast = 0;
189 hdata->dma_mode = CTRL_DMA;
190 wd33c93_init(a3000_host, regs, dma_setup, dma_stop, WD33C93_FS_12_15);
191 if (request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, "A3000 SCSI",
192 a3000_intr))
193 goto fail_irq;
194 DMA(a3000_host)->CNTR = CNTR_PDMD | CNTR_INTEN;
195
196 return 1;
197
198fail_irq:
199 scsi_unregister(a3000_host);
200fail_register:
201 release_mem_region(0xDD0000, 256);
202 return 0;
203}
204
205static int a3000_bus_reset(struct scsi_cmnd *cmd) 164static int a3000_bus_reset(struct scsi_cmnd *cmd)
206{ 165{
166 struct Scsi_Host *instance = cmd->device->host;
167
207 /* FIXME perform bus-specific reset */ 168 /* FIXME perform bus-specific reset */
208 169
209 /* FIXME 2: kill this entire function, which should 170 /* FIXME 2: kill this entire function, which should
210 cause mid-layer to call wd33c93_host_reset anyway? */ 171 cause mid-layer to call wd33c93_host_reset anyway? */
211 172
212 spin_lock_irq(cmd->device->host->host_lock); 173 spin_lock_irq(instance->host_lock);
213 wd33c93_host_reset(cmd); 174 wd33c93_host_reset(cmd);
214 spin_unlock_irq(cmd->device->host->host_lock); 175 spin_unlock_irq(instance->host_lock);
215 176
216 return SUCCESS; 177 return SUCCESS;
217} 178}
218 179
219#define HOSTS_C 180static struct scsi_host_template amiga_a3000_scsi_template = {
220 181 .module = THIS_MODULE,
221static struct scsi_host_template driver_template = {
222 .proc_name = "A3000",
223 .name = "Amiga 3000 built-in SCSI", 182 .name = "Amiga 3000 built-in SCSI",
224 .detect = a3000_detect, 183 .proc_info = wd33c93_proc_info,
225 .release = a3000_release, 184 .proc_name = "A3000",
226 .queuecommand = wd33c93_queuecommand, 185 .queuecommand = wd33c93_queuecommand,
227 .eh_abort_handler = wd33c93_abort, 186 .eh_abort_handler = wd33c93_abort,
228 .eh_bus_reset_handler = a3000_bus_reset, 187 .eh_bus_reset_handler = a3000_bus_reset,
@@ -234,15 +193,104 @@ static struct scsi_host_template driver_template = {
234 .use_clustering = ENABLE_CLUSTERING 193 .use_clustering = ENABLE_CLUSTERING
235}; 194};
236 195
196static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
197{
198 struct resource *res;
199 struct Scsi_Host *instance;
200 int error;
201 struct a3000_scsiregs *regs;
202 wd33c93_regs wdregs;
203 struct a3000_hostdata *hdata;
204
205 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
206 if (!res)
207 return -ENODEV;
208
209 if (!request_mem_region(res->start, resource_size(res), "wd33c93"))
210 return -EBUSY;
211
212 instance = scsi_host_alloc(&amiga_a3000_scsi_template,
213 sizeof(struct a3000_hostdata));
214 if (!instance) {
215 error = -ENOMEM;
216 goto fail_alloc;
217 }
218
219 instance->irq = IRQ_AMIGA_PORTS;
237 220
238#include "scsi_module.c" 221 regs = (struct a3000_scsiregs *)ZTWO_VADDR(res->start);
222 regs->DAWR = DAWR_A3000;
223
224 wdregs.SASR = &regs->SASR;
225 wdregs.SCMD = &regs->SCMD;
226
227 hdata = shost_priv(instance);
228 hdata->wh.no_sync = 0xff;
229 hdata->wh.fast = 0;
230 hdata->wh.dma_mode = CTRL_DMA;
231 hdata->regs = regs;
232
233 wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_12_15);
234 error = request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED,
235 "A3000 SCSI", instance);
236 if (error)
237 goto fail_irq;
238
239 regs->CNTR = CNTR_PDMD | CNTR_INTEN;
240
241 error = scsi_add_host(instance, NULL);
242 if (error)
243 goto fail_host;
244
245 platform_set_drvdata(pdev, instance);
246
247 scsi_scan_host(instance);
248 return 0;
249
250fail_host:
251 free_irq(IRQ_AMIGA_PORTS, instance);
252fail_irq:
253 scsi_host_put(instance);
254fail_alloc:
255 release_mem_region(res->start, resource_size(res));
256 return error;
257}
258
259static int __exit amiga_a3000_scsi_remove(struct platform_device *pdev)
260{
261 struct Scsi_Host *instance = platform_get_drvdata(pdev);
262 struct a3000_hostdata *hdata = shost_priv(instance);
263 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
264
265 hdata->regs->CNTR = 0;
266 scsi_remove_host(instance);
267 free_irq(IRQ_AMIGA_PORTS, instance);
268 scsi_host_put(instance);
269 release_mem_region(res->start, resource_size(res));
270 return 0;
271}
272
273static struct platform_driver amiga_a3000_scsi_driver = {
274 .remove = __exit_p(amiga_a3000_scsi_remove),
275 .driver = {
276 .name = "amiga-a3000-scsi",
277 .owner = THIS_MODULE,
278 },
279};
280
281static int __init amiga_a3000_scsi_init(void)
282{
283 return platform_driver_probe(&amiga_a3000_scsi_driver,
284 amiga_a3000_scsi_probe);
285}
286module_init(amiga_a3000_scsi_init);
239 287
240static int a3000_release(struct Scsi_Host *instance) 288static void __exit amiga_a3000_scsi_exit(void)
241{ 289{
242 DMA(instance)->CNTR = 0; 290 platform_driver_unregister(&amiga_a3000_scsi_driver);
243 release_mem_region(0xDD0000, 256);
244 free_irq(IRQ_AMIGA_PORTS, a3000_intr);
245 return 1;
246} 291}
292module_exit(amiga_a3000_scsi_exit);
247 293
294MODULE_DESCRIPTION("Amiga 3000 built-in SCSI");
248MODULE_LICENSE("GPL"); 295MODULE_LICENSE("GPL");
296MODULE_ALIAS("platform:amiga-a3000-scsi");
diff --git a/drivers/scsi/a3000.h b/drivers/scsi/a3000.h
index 684813ee378c..49db4a335aab 100644
--- a/drivers/scsi/a3000.h
+++ b/drivers/scsi/a3000.h
@@ -25,7 +25,7 @@
25 */ 25 */
26#define A3000_XFER_MASK (0x00000003) 26#define A3000_XFER_MASK (0x00000003)
27 27
28typedef struct { 28struct a3000_scsiregs {
29 unsigned char pad1[2]; 29 unsigned char pad1[2];
30 volatile unsigned short DAWR; 30 volatile unsigned short DAWR;
31 volatile unsigned int WTC; 31 volatile unsigned int WTC;
@@ -46,7 +46,7 @@ typedef struct {
46 volatile unsigned char SASR; 46 volatile unsigned char SASR;
47 unsigned char pad9; 47 unsigned char pad9;
48 volatile unsigned char SCMD; 48 volatile unsigned char SCMD;
49} a3000_scsiregs; 49};
50 50
51#define DAWR_A3000 (3) 51#define DAWR_A3000 (3)
52 52
diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c
index 11ae6be8aeaf..23c76f41883c 100644
--- a/drivers/scsi/a4000t.c
+++ b/drivers/scsi/a4000t.c
@@ -20,10 +20,6 @@
20 20
21#include "53c700.h" 21#include "53c700.h"
22 22
23MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / Kars de Jong <jongk@linux-m68k.org>");
24MODULE_DESCRIPTION("Amiga A4000T NCR53C710 driver");
25MODULE_LICENSE("GPL");
26
27 23
28static struct scsi_host_template a4000t_scsi_driver_template = { 24static struct scsi_host_template a4000t_scsi_driver_template = {
29 .name = "A4000T builtin SCSI", 25 .name = "A4000T builtin SCSI",
@@ -32,30 +28,35 @@ static struct scsi_host_template a4000t_scsi_driver_template = {
32 .module = THIS_MODULE, 28 .module = THIS_MODULE,
33}; 29};
34 30
35static struct platform_device *a4000t_scsi_device;
36 31
37#define A4000T_SCSI_ADDR 0xdd0040 32#define A4000T_SCSI_OFFSET 0x40
38 33
39static int __devinit a4000t_probe(struct platform_device *dev) 34static int __init amiga_a4000t_scsi_probe(struct platform_device *pdev)
40{ 35{
41 struct Scsi_Host *host; 36 struct resource *res;
37 phys_addr_t scsi_addr;
42 struct NCR_700_Host_Parameters *hostdata; 38 struct NCR_700_Host_Parameters *hostdata;
39 struct Scsi_Host *host;
43 40
44 if (!(MACH_IS_AMIGA && AMIGAHW_PRESENT(A4000_SCSI))) 41 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
45 goto out; 42 if (!res)
43 return -ENODEV;
46 44
47 if (!request_mem_region(A4000T_SCSI_ADDR, 0x1000, 45 if (!request_mem_region(res->start, resource_size(res),
48 "A4000T builtin SCSI")) 46 "A4000T builtin SCSI"))
49 goto out; 47 return -EBUSY;
50 48
51 hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL); 49 hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters),
50 GFP_KERNEL);
52 if (!hostdata) { 51 if (!hostdata) {
53 printk(KERN_ERR "a4000t-scsi: Failed to allocate host data\n"); 52 dev_err(&pdev->dev, "Failed to allocate host data\n");
54 goto out_release; 53 goto out_release;
55 } 54 }
56 55
56 scsi_addr = res->start + A4000T_SCSI_OFFSET;
57
57 /* Fill in the required pieces of hostdata */ 58 /* Fill in the required pieces of hostdata */
58 hostdata->base = (void __iomem *)ZTWO_VADDR(A4000T_SCSI_ADDR); 59 hostdata->base = (void __iomem *)ZTWO_VADDR(scsi_addr);
59 hostdata->clock = 50; 60 hostdata->clock = 50;
60 hostdata->chip710 = 1; 61 hostdata->chip710 = 1;
61 hostdata->dmode_extra = DMODE_FC2; 62 hostdata->dmode_extra = DMODE_FC2;
@@ -63,26 +64,25 @@ static int __devinit a4000t_probe(struct platform_device *dev)
63 64
64 /* and register the chip */ 65 /* and register the chip */
65 host = NCR_700_detect(&a4000t_scsi_driver_template, hostdata, 66 host = NCR_700_detect(&a4000t_scsi_driver_template, hostdata,
66 &dev->dev); 67 &pdev->dev);
67 if (!host) { 68 if (!host) {
68 printk(KERN_ERR "a4000t-scsi: No host detected; " 69 dev_err(&pdev->dev,
69 "board configuration problem?\n"); 70 "No host detected; board configuration problem?\n");
70 goto out_free; 71 goto out_free;
71 } 72 }
72 73
73 host->this_id = 7; 74 host->this_id = 7;
74 host->base = A4000T_SCSI_ADDR; 75 host->base = scsi_addr;
75 host->irq = IRQ_AMIGA_PORTS; 76 host->irq = IRQ_AMIGA_PORTS;
76 77
77 if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "a4000t-scsi", 78 if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "a4000t-scsi",
78 host)) { 79 host)) {
79 printk(KERN_ERR "a4000t-scsi: request_irq failed\n"); 80 dev_err(&pdev->dev, "request_irq failed\n");
80 goto out_put_host; 81 goto out_put_host;
81 } 82 }
82 83
83 platform_set_drvdata(dev, host); 84 platform_set_drvdata(pdev, host);
84 scsi_scan_host(host); 85 scsi_scan_host(host);
85
86 return 0; 86 return 0;
87 87
88 out_put_host: 88 out_put_host:
@@ -90,58 +90,49 @@ static int __devinit a4000t_probe(struct platform_device *dev)
90 out_free: 90 out_free:
91 kfree(hostdata); 91 kfree(hostdata);
92 out_release: 92 out_release:
93 release_mem_region(A4000T_SCSI_ADDR, 0x1000); 93 release_mem_region(res->start, resource_size(res));
94 out:
95 return -ENODEV; 94 return -ENODEV;
96} 95}
97 96
98static __devexit int a4000t_device_remove(struct platform_device *dev) 97static int __exit amiga_a4000t_scsi_remove(struct platform_device *pdev)
99{ 98{
100 struct Scsi_Host *host = platform_get_drvdata(dev); 99 struct Scsi_Host *host = platform_get_drvdata(pdev);
101 struct NCR_700_Host_Parameters *hostdata = shost_priv(host); 100 struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
101 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
102 102
103 scsi_remove_host(host); 103 scsi_remove_host(host);
104
105 NCR_700_release(host); 104 NCR_700_release(host);
106 kfree(hostdata); 105 kfree(hostdata);
107 free_irq(host->irq, host); 106 free_irq(host->irq, host);
108 release_mem_region(A4000T_SCSI_ADDR, 0x1000); 107 release_mem_region(res->start, resource_size(res));
109
110 return 0; 108 return 0;
111} 109}
112 110
113static struct platform_driver a4000t_scsi_driver = { 111static struct platform_driver amiga_a4000t_scsi_driver = {
114 .driver = { 112 .remove = __exit_p(amiga_a4000t_scsi_remove),
115 .name = "a4000t-scsi", 113 .driver = {
116 .owner = THIS_MODULE, 114 .name = "amiga-a4000t-scsi",
115 .owner = THIS_MODULE,
117 }, 116 },
118 .probe = a4000t_probe,
119 .remove = __devexit_p(a4000t_device_remove),
120}; 117};
121 118
122static int __init a4000t_scsi_init(void) 119static int __init amiga_a4000t_scsi_init(void)
123{ 120{
124 int err; 121 return platform_driver_probe(&amiga_a4000t_scsi_driver,
125 122 amiga_a4000t_scsi_probe);
126 err = platform_driver_register(&a4000t_scsi_driver);
127 if (err)
128 return err;
129
130 a4000t_scsi_device = platform_device_register_simple("a4000t-scsi",
131 -1, NULL, 0);
132 if (IS_ERR(a4000t_scsi_device)) {
133 platform_driver_unregister(&a4000t_scsi_driver);
134 return PTR_ERR(a4000t_scsi_device);
135 }
136
137 return err;
138} 123}
139 124
140static void __exit a4000t_scsi_exit(void) 125module_init(amiga_a4000t_scsi_init);
126
127static void __exit amiga_a4000t_scsi_exit(void)
141{ 128{
142 platform_device_unregister(a4000t_scsi_device); 129 platform_driver_unregister(&amiga_a4000t_scsi_driver);
143 platform_driver_unregister(&a4000t_scsi_driver);
144} 130}
145 131
146module_init(a4000t_scsi_init); 132module_exit(amiga_a4000t_scsi_exit);
147module_exit(a4000t_scsi_exit); 133
134MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / "
135 "Kars de Jong <jongk@linux-m68k.org>");
136MODULE_DESCRIPTION("Amiga A4000T NCR53C710 driver");
137MODULE_LICENSE("GPL");
138MODULE_ALIAS("platform:amiga-a4000t-scsi");
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index e9373a2d14fa..33898b61fdb5 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -705,12 +705,17 @@ static int aac_cfg_open(struct inode *inode, struct file *file)
705 * Bugs: Needs to handle hot plugging 705 * Bugs: Needs to handle hot plugging
706 */ 706 */
707 707
708static int aac_cfg_ioctl(struct inode *inode, struct file *file, 708static long aac_cfg_ioctl(struct file *file,
709 unsigned int cmd, unsigned long arg) 709 unsigned int cmd, unsigned long arg)
710{ 710{
711 int ret;
711 if (!capable(CAP_SYS_RAWIO)) 712 if (!capable(CAP_SYS_RAWIO))
712 return -EPERM; 713 return -EPERM;
713 return aac_do_ioctl(file->private_data, cmd, (void __user *)arg); 714 lock_kernel();
715 ret = aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
716 unlock_kernel();
717
718 return ret;
714} 719}
715 720
716#ifdef CONFIG_COMPAT 721#ifdef CONFIG_COMPAT
@@ -1029,7 +1034,7 @@ ssize_t aac_get_serial_number(struct device *device, char *buf)
1029 1034
1030static const struct file_operations aac_cfg_fops = { 1035static const struct file_operations aac_cfg_fops = {
1031 .owner = THIS_MODULE, 1036 .owner = THIS_MODULE,
1032 .ioctl = aac_cfg_ioctl, 1037 .unlocked_ioctl = aac_cfg_ioctl,
1033#ifdef CONFIG_COMPAT 1038#ifdef CONFIG_COMPAT
1034 .compat_ioctl = aac_compat_cfg_ioctl, 1039 .compat_ioctl = aac_compat_cfg_ioctl,
1035#endif 1040#endif
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 0435d044c9da..b0c576f84b28 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -114,12 +114,13 @@ static int hba_count = 0;
114 114
115static struct class *adpt_sysfs_class; 115static struct class *adpt_sysfs_class;
116 116
117static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
117#ifdef CONFIG_COMPAT 118#ifdef CONFIG_COMPAT
118static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long); 119static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
119#endif 120#endif
120 121
121static const struct file_operations adpt_fops = { 122static const struct file_operations adpt_fops = {
122 .ioctl = adpt_ioctl, 123 .unlocked_ioctl = adpt_unlocked_ioctl,
123 .open = adpt_open, 124 .open = adpt_open,
124 .release = adpt_close, 125 .release = adpt_close,
125#ifdef CONFIG_COMPAT 126#ifdef CONFIG_COMPAT
@@ -2069,8 +2070,7 @@ static int adpt_system_info(void __user *buffer)
2069 return 0; 2070 return 0;
2070} 2071}
2071 2072
2072static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, 2073static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
2073 ulong arg)
2074{ 2074{
2075 int minor; 2075 int minor;
2076 int error = 0; 2076 int error = 0;
@@ -2153,6 +2153,20 @@ static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
2153 return error; 2153 return error;
2154} 2154}
2155 2155
2156static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2157{
2158 struct inode *inode;
2159 long ret;
2160
2161 inode = file->f_dentry->d_inode;
2162
2163 lock_kernel();
2164 ret = adpt_ioctl(inode, file, cmd, arg);
2165 unlock_kernel();
2166
2167 return ret;
2168}
2169
2156#ifdef CONFIG_COMPAT 2170#ifdef CONFIG_COMPAT
2157static long compat_adpt_ioctl(struct file *file, 2171static long compat_adpt_ioctl(struct file *file,
2158 unsigned int cmd, unsigned long arg) 2172 unsigned int cmd, unsigned long arg)
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 9276121db1ef..44a07593de56 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -688,7 +688,7 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
688 } 688 }
689 689
690 if (!lport->vport) 690 if (!lport->vport)
691 fc_host_max_npiv_vports(lport->host) = USHORT_MAX; 691 fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
692 692
693 snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE, 693 snprintf(fc_host_symbolic_name(lport->host), FC_SYMBOLIC_NAME_SIZE,
694 "%s v%s over %s", FCOE_NAME, FCOE_VERSION, 694 "%s v%s over %s", FCOE_NAME, FCOE_VERSION,
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index a765fe7a55c3..f672d6213eea 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -180,8 +180,8 @@ static const char *gdth_ctr_name(gdth_ha_str *ha);
180 180
181static int gdth_open(struct inode *inode, struct file *filep); 181static int gdth_open(struct inode *inode, struct file *filep);
182static int gdth_close(struct inode *inode, struct file *filep); 182static int gdth_close(struct inode *inode, struct file *filep);
183static int gdth_ioctl(struct inode *inode, struct file *filep, 183static long gdth_unlocked_ioctl(struct file *filep, unsigned int cmd,
184 unsigned int cmd, unsigned long arg); 184 unsigned long arg);
185 185
186static void gdth_flush(gdth_ha_str *ha); 186static void gdth_flush(gdth_ha_str *ha);
187static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *)); 187static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *));
@@ -369,7 +369,7 @@ MODULE_LICENSE("GPL");
369 369
370/* ioctl interface */ 370/* ioctl interface */
371static const struct file_operations gdth_fops = { 371static const struct file_operations gdth_fops = {
372 .ioctl = gdth_ioctl, 372 .unlocked_ioctl = gdth_unlocked_ioctl,
373 .open = gdth_open, 373 .open = gdth_open,
374 .release = gdth_close, 374 .release = gdth_close,
375}; 375};
@@ -4462,8 +4462,7 @@ free_fail:
4462 return rc; 4462 return rc;
4463} 4463}
4464 4464
4465static int gdth_ioctl(struct inode *inode, struct file *filep, 4465static int gdth_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
4466 unsigned int cmd, unsigned long arg)
4467{ 4466{
4468 gdth_ha_str *ha; 4467 gdth_ha_str *ha;
4469 Scsi_Cmnd *scp; 4468 Scsi_Cmnd *scp;
@@ -4611,6 +4610,17 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
4611 return 0; 4610 return 0;
4612} 4611}
4613 4612
4613static long gdth_unlocked_ioctl(struct file *file, unsigned int cmd,
4614 unsigned long arg)
4615{
4616 int ret;
4617
4618 lock_kernel();
4619 ret = gdth_ioctl(file, cmd, arg);
4620 unlock_kernel();
4621
4622 return ret;
4623}
4614 4624
4615/* flush routine */ 4625/* flush routine */
4616static void gdth_flush(gdth_ha_str *ha) 4626static void gdth_flush(gdth_ha_str *ha)
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 18b7102bb80e..2ce26eb7a1ec 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -1,36 +1,35 @@
1#include <linux/types.h> 1#include <linux/types.h>
2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/blkdev.h>
5#include <linux/init.h> 2#include <linux/init.h>
6#include <linux/interrupt.h> 3#include <linux/interrupt.h>
4#include <linux/mm.h>
5#include <linux/slab.h>
6#include <linux/spinlock.h>
7#include <linux/zorro.h>
7 8
8#include <asm/setup.h>
9#include <asm/page.h> 9#include <asm/page.h>
10#include <asm/pgtable.h> 10#include <asm/pgtable.h>
11#include <asm/amigaints.h> 11#include <asm/amigaints.h>
12#include <asm/amigahw.h> 12#include <asm/amigahw.h>
13#include <linux/zorro.h>
14#include <asm/irq.h>
15#include <linux/spinlock.h>
16 13
17#include "scsi.h" 14#include "scsi.h"
18#include <scsi/scsi_host.h>
19#include "wd33c93.h" 15#include "wd33c93.h"
20#include "gvp11.h" 16#include "gvp11.h"
21 17
22#include <linux/stat.h>
23 18
19#define CHECK_WD33C93
24 20
25#define DMA(ptr) ((gvp11_scsiregs *)((ptr)->base)) 21struct gvp11_hostdata {
22 struct WD33C93_hostdata wh;
23 struct gvp11_scsiregs *regs;
24};
26 25
27static irqreturn_t gvp11_intr(int irq, void *_instance) 26static irqreturn_t gvp11_intr(int irq, void *data)
28{ 27{
28 struct Scsi_Host *instance = data;
29 struct gvp11_hostdata *hdata = shost_priv(instance);
30 unsigned int status = hdata->regs->CNTR;
29 unsigned long flags; 31 unsigned long flags;
30 unsigned int status;
31 struct Scsi_Host *instance = (struct Scsi_Host *)_instance;
32 32
33 status = DMA(instance)->CNTR;
34 if (!(status & GVP11_DMAC_INT_PENDING)) 33 if (!(status & GVP11_DMAC_INT_PENDING))
35 return IRQ_NONE; 34 return IRQ_NONE;
36 35
@@ -50,64 +49,66 @@ void gvp11_setup(char *str, int *ints)
50static int dma_setup(struct scsi_cmnd *cmd, int dir_in) 49static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
51{ 50{
52 struct Scsi_Host *instance = cmd->device->host; 51 struct Scsi_Host *instance = cmd->device->host;
53 struct WD33C93_hostdata *hdata = shost_priv(instance); 52 struct gvp11_hostdata *hdata = shost_priv(instance);
53 struct WD33C93_hostdata *wh = &hdata->wh;
54 struct gvp11_scsiregs *regs = hdata->regs;
54 unsigned short cntr = GVP11_DMAC_INT_ENABLE; 55 unsigned short cntr = GVP11_DMAC_INT_ENABLE;
55 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 56 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
56 int bank_mask; 57 int bank_mask;
57 static int scsi_alloc_out_of_range = 0; 58 static int scsi_alloc_out_of_range = 0;
58 59
59 /* use bounce buffer if the physical address is bad */ 60 /* use bounce buffer if the physical address is bad */
60 if (addr & hdata->dma_xfer_mask) { 61 if (addr & wh->dma_xfer_mask) {
61 hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; 62 wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
62 63
63 if (!scsi_alloc_out_of_range) { 64 if (!scsi_alloc_out_of_range) {
64 hdata->dma_bounce_buffer = 65 wh->dma_bounce_buffer =
65 kmalloc(hdata->dma_bounce_len, GFP_KERNEL); 66 kmalloc(wh->dma_bounce_len, GFP_KERNEL);
66 hdata->dma_buffer_pool = BUF_SCSI_ALLOCED; 67 wh->dma_buffer_pool = BUF_SCSI_ALLOCED;
67 } 68 }
68 69
69 if (scsi_alloc_out_of_range || 70 if (scsi_alloc_out_of_range ||
70 !hdata->dma_bounce_buffer) { 71 !wh->dma_bounce_buffer) {
71 hdata->dma_bounce_buffer = 72 wh->dma_bounce_buffer =
72 amiga_chip_alloc(hdata->dma_bounce_len, 73 amiga_chip_alloc(wh->dma_bounce_len,
73 "GVP II SCSI Bounce Buffer"); 74 "GVP II SCSI Bounce Buffer");
74 75
75 if (!hdata->dma_bounce_buffer) { 76 if (!wh->dma_bounce_buffer) {
76 hdata->dma_bounce_len = 0; 77 wh->dma_bounce_len = 0;
77 return 1; 78 return 1;
78 } 79 }
79 80
80 hdata->dma_buffer_pool = BUF_CHIP_ALLOCED; 81 wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
81 } 82 }
82 83
83 /* check if the address of the bounce buffer is OK */ 84 /* check if the address of the bounce buffer is OK */
84 addr = virt_to_bus(hdata->dma_bounce_buffer); 85 addr = virt_to_bus(wh->dma_bounce_buffer);
85 86
86 if (addr & hdata->dma_xfer_mask) { 87 if (addr & wh->dma_xfer_mask) {
87 /* fall back to Chip RAM if address out of range */ 88 /* fall back to Chip RAM if address out of range */
88 if (hdata->dma_buffer_pool == BUF_SCSI_ALLOCED) { 89 if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) {
89 kfree(hdata->dma_bounce_buffer); 90 kfree(wh->dma_bounce_buffer);
90 scsi_alloc_out_of_range = 1; 91 scsi_alloc_out_of_range = 1;
91 } else { 92 } else {
92 amiga_chip_free(hdata->dma_bounce_buffer); 93 amiga_chip_free(wh->dma_bounce_buffer);
93 } 94 }
94 95
95 hdata->dma_bounce_buffer = 96 wh->dma_bounce_buffer =
96 amiga_chip_alloc(hdata->dma_bounce_len, 97 amiga_chip_alloc(wh->dma_bounce_len,
97 "GVP II SCSI Bounce Buffer"); 98 "GVP II SCSI Bounce Buffer");
98 99
99 if (!hdata->dma_bounce_buffer) { 100 if (!wh->dma_bounce_buffer) {
100 hdata->dma_bounce_len = 0; 101 wh->dma_bounce_len = 0;
101 return 1; 102 return 1;
102 } 103 }
103 104
104 addr = virt_to_bus(hdata->dma_bounce_buffer); 105 addr = virt_to_bus(wh->dma_bounce_buffer);
105 hdata->dma_buffer_pool = BUF_CHIP_ALLOCED; 106 wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
106 } 107 }
107 108
108 if (!dir_in) { 109 if (!dir_in) {
109 /* copy to bounce buffer for a write */ 110 /* copy to bounce buffer for a write */
110 memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr, 111 memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
111 cmd->SCp.this_residual); 112 cmd->SCp.this_residual);
112 } 113 }
113 } 114 }
@@ -116,11 +117,11 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
116 if (!dir_in) 117 if (!dir_in)
117 cntr |= GVP11_DMAC_DIR_WRITE; 118 cntr |= GVP11_DMAC_DIR_WRITE;
118 119
119 hdata->dma_dir = dir_in; 120 wh->dma_dir = dir_in;
120 DMA(cmd->device->host)->CNTR = cntr; 121 regs->CNTR = cntr;
121 122
122 /* setup DMA *physical* address */ 123 /* setup DMA *physical* address */
123 DMA(cmd->device->host)->ACR = addr; 124 regs->ACR = addr;
124 125
125 if (dir_in) { 126 if (dir_in) {
126 /* invalidate any cache */ 127 /* invalidate any cache */
@@ -130,12 +131,12 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
130 cache_push(addr, cmd->SCp.this_residual); 131 cache_push(addr, cmd->SCp.this_residual);
131 } 132 }
132 133
133 bank_mask = (~hdata->dma_xfer_mask >> 18) & 0x01c0; 134 bank_mask = (~wh->dma_xfer_mask >> 18) & 0x01c0;
134 if (bank_mask) 135 if (bank_mask)
135 DMA(cmd->device->host)->BANK = bank_mask & (addr >> 18); 136 regs->BANK = bank_mask & (addr >> 18);
136 137
137 /* start DMA */ 138 /* start DMA */
138 DMA(cmd->device->host)->ST_DMA = 1; 139 regs->ST_DMA = 1;
139 140
140 /* return success */ 141 /* return success */
141 return 0; 142 return 0;
@@ -144,236 +145,53 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
144static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, 145static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
145 int status) 146 int status)
146{ 147{
147 struct WD33C93_hostdata *hdata = shost_priv(instance); 148 struct gvp11_hostdata *hdata = shost_priv(instance);
149 struct WD33C93_hostdata *wh = &hdata->wh;
150 struct gvp11_scsiregs *regs = hdata->regs;
148 151
149 /* stop DMA */ 152 /* stop DMA */
150 DMA(instance)->SP_DMA = 1; 153 regs->SP_DMA = 1;
151 /* remove write bit from CONTROL bits */ 154 /* remove write bit from CONTROL bits */
152 DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE; 155 regs->CNTR = GVP11_DMAC_INT_ENABLE;
153 156
154 /* copy from a bounce buffer, if necessary */ 157 /* copy from a bounce buffer, if necessary */
155 if (status && hdata->dma_bounce_buffer) { 158 if (status && wh->dma_bounce_buffer) {
156 if (hdata->dma_dir && SCpnt) 159 if (wh->dma_dir && SCpnt)
157 memcpy(SCpnt->SCp.ptr, hdata->dma_bounce_buffer, 160 memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
158 SCpnt->SCp.this_residual); 161 SCpnt->SCp.this_residual);
159 162
160 if (hdata->dma_buffer_pool == BUF_SCSI_ALLOCED) 163 if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED)
161 kfree(hdata->dma_bounce_buffer); 164 kfree(wh->dma_bounce_buffer);
162 else
163 amiga_chip_free(hdata->dma_bounce_buffer);
164
165 hdata->dma_bounce_buffer = NULL;
166 hdata->dma_bounce_len = 0;
167 }
168}
169
170#define CHECK_WD33C93
171
172int __init gvp11_detect(struct scsi_host_template *tpnt)
173{
174 static unsigned char called = 0;
175 struct Scsi_Host *instance;
176 unsigned long address;
177 unsigned int epc;
178 struct zorro_dev *z = NULL;
179 unsigned int default_dma_xfer_mask;
180 struct WD33C93_hostdata *hdata;
181 wd33c93_regs regs;
182 int num_gvp11 = 0;
183#ifdef CHECK_WD33C93
184 volatile unsigned char *sasr_3393, *scmd_3393;
185 unsigned char save_sasr;
186 unsigned char q, qq;
187#endif
188
189 if (!MACH_IS_AMIGA || called)
190 return 0;
191 called = 1;
192
193 tpnt->proc_name = "GVP11";
194 tpnt->proc_info = &wd33c93_proc_info;
195
196 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
197 /*
198 * This should (hopefully) be the correct way to identify
199 * all the different GVP SCSI controllers (except for the
200 * SERIES I though).
201 */
202
203 if (z->id == ZORRO_PROD_GVP_COMBO_030_R3_SCSI ||
204 z->id == ZORRO_PROD_GVP_SERIES_II)
205 default_dma_xfer_mask = ~0x00ffffff;
206 else if (z->id == ZORRO_PROD_GVP_GFORCE_030_SCSI ||
207 z->id == ZORRO_PROD_GVP_A530_SCSI ||
208 z->id == ZORRO_PROD_GVP_COMBO_030_R4_SCSI)
209 default_dma_xfer_mask = ~0x01ffffff;
210 else if (z->id == ZORRO_PROD_GVP_A1291 ||
211 z->id == ZORRO_PROD_GVP_GFORCE_040_SCSI_1)
212 default_dma_xfer_mask = ~0x07ffffff;
213 else 165 else
214 continue; 166 amiga_chip_free(wh->dma_bounce_buffer);
215
216 /*
217 * Rumors state that some GVP ram boards use the same product
218 * code as the SCSI controllers. Therefore if the board-size
219 * is not 64KB we asume it is a ram board and bail out.
220 */
221 if (z->resource.end - z->resource.start != 0xffff)
222 continue;
223 167
224 address = z->resource.start; 168 wh->dma_bounce_buffer = NULL;
225 if (!request_mem_region(address, 256, "wd33c93")) 169 wh->dma_bounce_len = 0;
226 continue;
227
228#ifdef CHECK_WD33C93
229
230 /*
231 * These darn GVP boards are a problem - it can be tough to tell
232 * whether or not they include a SCSI controller. This is the
233 * ultimate Yet-Another-GVP-Detection-Hack in that it actually
234 * probes for a WD33c93 chip: If we find one, it's extremely
235 * likely that this card supports SCSI, regardless of Product_
236 * Code, Board_Size, etc.
237 */
238
239 /* Get pointers to the presumed register locations and save contents */
240
241 sasr_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SASR);
242 scmd_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SCMD);
243 save_sasr = *sasr_3393;
244
245 /* First test the AuxStatus Reg */
246
247 q = *sasr_3393; /* read it */
248 if (q & 0x08) /* bit 3 should always be clear */
249 goto release;
250 *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */
251 if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */
252 *sasr_3393 = save_sasr; /* Oops - restore this byte */
253 goto release;
254 }
255 if (*sasr_3393 != q) { /* should still read the same */
256 *sasr_3393 = save_sasr; /* Oops - restore this byte */
257 goto release;
258 }
259 if (*scmd_3393 != q) /* and so should the image at 0x1f */
260 goto release;
261
262 /*
263 * Ok, we probably have a wd33c93, but let's check a few other places
264 * for good measure. Make sure that this works for both 'A and 'B
265 * chip versions.
266 */
267
268 *sasr_3393 = WD_SCSI_STATUS;
269 q = *scmd_3393;
270 *sasr_3393 = WD_SCSI_STATUS;
271 *scmd_3393 = ~q;
272 *sasr_3393 = WD_SCSI_STATUS;
273 qq = *scmd_3393;
274 *sasr_3393 = WD_SCSI_STATUS;
275 *scmd_3393 = q;
276 if (qq != q) /* should be read only */
277 goto release;
278 *sasr_3393 = 0x1e; /* this register is unimplemented */
279 q = *scmd_3393;
280 *sasr_3393 = 0x1e;
281 *scmd_3393 = ~q;
282 *sasr_3393 = 0x1e;
283 qq = *scmd_3393;
284 *sasr_3393 = 0x1e;
285 *scmd_3393 = q;
286 if (qq != q || qq != 0xff) /* should be read only, all 1's */
287 goto release;
288 *sasr_3393 = WD_TIMEOUT_PERIOD;
289 q = *scmd_3393;
290 *sasr_3393 = WD_TIMEOUT_PERIOD;
291 *scmd_3393 = ~q;
292 *sasr_3393 = WD_TIMEOUT_PERIOD;
293 qq = *scmd_3393;
294 *sasr_3393 = WD_TIMEOUT_PERIOD;
295 *scmd_3393 = q;
296 if (qq != (~q & 0xff)) /* should be read/write */
297 goto release;
298#endif
299
300 instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
301 if (instance == NULL)
302 goto release;
303 instance->base = ZTWO_VADDR(address);
304 instance->irq = IRQ_AMIGA_PORTS;
305 instance->unique_id = z->slotaddr;
306
307 hdata = shost_priv(instance);
308 if (gvp11_xfer_mask)
309 hdata->dma_xfer_mask = gvp11_xfer_mask;
310 else
311 hdata->dma_xfer_mask = default_dma_xfer_mask;
312
313 DMA(instance)->secret2 = 1;
314 DMA(instance)->secret1 = 0;
315 DMA(instance)->secret3 = 15;
316 while (DMA(instance)->CNTR & GVP11_DMAC_BUSY)
317 ;
318 DMA(instance)->CNTR = 0;
319
320 DMA(instance)->BANK = 0;
321
322 epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000);
323
324 /*
325 * Check for 14MHz SCSI clock
326 */
327 regs.SASR = &(DMA(instance)->SASR);
328 regs.SCMD = &(DMA(instance)->SCMD);
329 hdata->no_sync = 0xff;
330 hdata->fast = 0;
331 hdata->dma_mode = CTRL_DMA;
332 wd33c93_init(instance, regs, dma_setup, dma_stop,
333 (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10
334 : WD33C93_FS_12_15);
335
336 if (request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED,
337 "GVP11 SCSI", instance))
338 goto unregister;
339 DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE;
340 num_gvp11++;
341 continue;
342
343unregister:
344 scsi_unregister(instance);
345release:
346 release_mem_region(address, 256);
347 } 170 }
348
349 return num_gvp11;
350} 171}
351 172
352static int gvp11_bus_reset(struct scsi_cmnd *cmd) 173static int gvp11_bus_reset(struct scsi_cmnd *cmd)
353{ 174{
175 struct Scsi_Host *instance = cmd->device->host;
176
354 /* FIXME perform bus-specific reset */ 177 /* FIXME perform bus-specific reset */
355 178
356 /* FIXME 2: shouldn't we no-op this function (return 179 /* FIXME 2: shouldn't we no-op this function (return
357 FAILED), and fall back to host reset function, 180 FAILED), and fall back to host reset function,
358 wd33c93_host_reset ? */ 181 wd33c93_host_reset ? */
359 182
360 spin_lock_irq(cmd->device->host->host_lock); 183 spin_lock_irq(instance->host_lock);
361 wd33c93_host_reset(cmd); 184 wd33c93_host_reset(cmd);
362 spin_unlock_irq(cmd->device->host->host_lock); 185 spin_unlock_irq(instance->host_lock);
363 186
364 return SUCCESS; 187 return SUCCESS;
365} 188}
366 189
367 190static struct scsi_host_template gvp11_scsi_template = {
368#define HOSTS_C 191 .module = THIS_MODULE,
369
370#include "gvp11.h"
371
372static struct scsi_host_template driver_template = {
373 .proc_name = "GVP11",
374 .name = "GVP Series II SCSI", 192 .name = "GVP Series II SCSI",
375 .detect = gvp11_detect, 193 .proc_info = wd33c93_proc_info,
376 .release = gvp11_release, 194 .proc_name = "GVP11",
377 .queuecommand = wd33c93_queuecommand, 195 .queuecommand = wd33c93_queuecommand,
378 .eh_abort_handler = wd33c93_abort, 196 .eh_abort_handler = wd33c93_abort,
379 .eh_bus_reset_handler = gvp11_bus_reset, 197 .eh_bus_reset_handler = gvp11_bus_reset,
@@ -385,17 +203,230 @@ static struct scsi_host_template driver_template = {
385 .use_clustering = DISABLE_CLUSTERING 203 .use_clustering = DISABLE_CLUSTERING
386}; 204};
387 205
206static int __devinit check_wd33c93(struct gvp11_scsiregs *regs)
207{
208#ifdef CHECK_WD33C93
209 volatile unsigned char *sasr_3393, *scmd_3393;
210 unsigned char save_sasr;
211 unsigned char q, qq;
388 212
389#include "scsi_module.c" 213 /*
214 * These darn GVP boards are a problem - it can be tough to tell
215 * whether or not they include a SCSI controller. This is the
216 * ultimate Yet-Another-GVP-Detection-Hack in that it actually
217 * probes for a WD33c93 chip: If we find one, it's extremely
218 * likely that this card supports SCSI, regardless of Product_
219 * Code, Board_Size, etc.
220 */
221
222 /* Get pointers to the presumed register locations and save contents */
223
224 sasr_3393 = &regs->SASR;
225 scmd_3393 = &regs->SCMD;
226 save_sasr = *sasr_3393;
227
228 /* First test the AuxStatus Reg */
229
230 q = *sasr_3393; /* read it */
231 if (q & 0x08) /* bit 3 should always be clear */
232 return -ENODEV;
233 *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */
234 if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */
235 *sasr_3393 = save_sasr; /* Oops - restore this byte */
236 return -ENODEV;
237 }
238 if (*sasr_3393 != q) { /* should still read the same */
239 *sasr_3393 = save_sasr; /* Oops - restore this byte */
240 return -ENODEV;
241 }
242 if (*scmd_3393 != q) /* and so should the image at 0x1f */
243 return -ENODEV;
244
245 /*
246 * Ok, we probably have a wd33c93, but let's check a few other places
247 * for good measure. Make sure that this works for both 'A and 'B
248 * chip versions.
249 */
250
251 *sasr_3393 = WD_SCSI_STATUS;
252 q = *scmd_3393;
253 *sasr_3393 = WD_SCSI_STATUS;
254 *scmd_3393 = ~q;
255 *sasr_3393 = WD_SCSI_STATUS;
256 qq = *scmd_3393;
257 *sasr_3393 = WD_SCSI_STATUS;
258 *scmd_3393 = q;
259 if (qq != q) /* should be read only */
260 return -ENODEV;
261 *sasr_3393 = 0x1e; /* this register is unimplemented */
262 q = *scmd_3393;
263 *sasr_3393 = 0x1e;
264 *scmd_3393 = ~q;
265 *sasr_3393 = 0x1e;
266 qq = *scmd_3393;
267 *sasr_3393 = 0x1e;
268 *scmd_3393 = q;
269 if (qq != q || qq != 0xff) /* should be read only, all 1's */
270 return -ENODEV;
271 *sasr_3393 = WD_TIMEOUT_PERIOD;
272 q = *scmd_3393;
273 *sasr_3393 = WD_TIMEOUT_PERIOD;
274 *scmd_3393 = ~q;
275 *sasr_3393 = WD_TIMEOUT_PERIOD;
276 qq = *scmd_3393;
277 *sasr_3393 = WD_TIMEOUT_PERIOD;
278 *scmd_3393 = q;
279 if (qq != (~q & 0xff)) /* should be read/write */
280 return -ENODEV;
281#endif /* CHECK_WD33C93 */
390 282
391int gvp11_release(struct Scsi_Host *instance) 283 return 0;
284}
285
286static int __devinit gvp11_probe(struct zorro_dev *z,
287 const struct zorro_device_id *ent)
392{ 288{
393#ifdef MODULE 289 struct Scsi_Host *instance;
394 DMA(instance)->CNTR = 0; 290 unsigned long address;
395 release_mem_region(ZTWO_PADDR(instance->base), 256); 291 int error;
292 unsigned int epc;
293 unsigned int default_dma_xfer_mask;
294 struct gvp11_hostdata *hdata;
295 struct gvp11_scsiregs *regs;
296 wd33c93_regs wdregs;
297
298 default_dma_xfer_mask = ent->driver_data;
299
300 /*
301 * Rumors state that some GVP ram boards use the same product
302 * code as the SCSI controllers. Therefore if the board-size
303 * is not 64KB we asume it is a ram board and bail out.
304 */
305 if (zorro_resource_len(z) != 0x10000)
306 return -ENODEV;
307
308 address = z->resource.start;
309 if (!request_mem_region(address, 256, "wd33c93"))
310 return -EBUSY;
311
312 regs = (struct gvp11_scsiregs *)(ZTWO_VADDR(address));
313
314 error = check_wd33c93(regs);
315 if (error)
316 goto fail_check_or_alloc;
317
318 instance = scsi_host_alloc(&gvp11_scsi_template,
319 sizeof(struct gvp11_hostdata));
320 if (!instance) {
321 error = -ENOMEM;
322 goto fail_check_or_alloc;
323 }
324
325 instance->irq = IRQ_AMIGA_PORTS;
326 instance->unique_id = z->slotaddr;
327
328 regs->secret2 = 1;
329 regs->secret1 = 0;
330 regs->secret3 = 15;
331 while (regs->CNTR & GVP11_DMAC_BUSY)
332 ;
333 regs->CNTR = 0;
334 regs->BANK = 0;
335
336 wdregs.SASR = &regs->SASR;
337 wdregs.SCMD = &regs->SCMD;
338
339 hdata = shost_priv(instance);
340 if (gvp11_xfer_mask)
341 hdata->wh.dma_xfer_mask = gvp11_xfer_mask;
342 else
343 hdata->wh.dma_xfer_mask = default_dma_xfer_mask;
344
345 hdata->wh.no_sync = 0xff;
346 hdata->wh.fast = 0;
347 hdata->wh.dma_mode = CTRL_DMA;
348 hdata->regs = regs;
349
350 /*
351 * Check for 14MHz SCSI clock
352 */
353 epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000);
354 wd33c93_init(instance, wdregs, dma_setup, dma_stop,
355 (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10
356 : WD33C93_FS_12_15);
357
358 error = request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED,
359 "GVP11 SCSI", instance);
360 if (error)
361 goto fail_irq;
362
363 regs->CNTR = GVP11_DMAC_INT_ENABLE;
364
365 error = scsi_add_host(instance, NULL);
366 if (error)
367 goto fail_host;
368
369 zorro_set_drvdata(z, instance);
370 scsi_scan_host(instance);
371 return 0;
372
373fail_host:
396 free_irq(IRQ_AMIGA_PORTS, instance); 374 free_irq(IRQ_AMIGA_PORTS, instance);
397#endif 375fail_irq:
398 return 1; 376 scsi_host_put(instance);
377fail_check_or_alloc:
378 release_mem_region(address, 256);
379 return error;
380}
381
382static void __devexit gvp11_remove(struct zorro_dev *z)
383{
384 struct Scsi_Host *instance = zorro_get_drvdata(z);
385 struct gvp11_hostdata *hdata = shost_priv(instance);
386
387 hdata->regs->CNTR = 0;
388 scsi_remove_host(instance);
389 free_irq(IRQ_AMIGA_PORTS, instance);
390 scsi_host_put(instance);
391 release_mem_region(z->resource.start, 256);
392}
393
394 /*
395 * This should (hopefully) be the correct way to identify
396 * all the different GVP SCSI controllers (except for the
397 * SERIES I though).
398 */
399
400static struct zorro_device_id gvp11_zorro_tbl[] __devinitdata = {
401 { ZORRO_PROD_GVP_COMBO_030_R3_SCSI, ~0x00ffffff },
402 { ZORRO_PROD_GVP_SERIES_II, ~0x00ffffff },
403 { ZORRO_PROD_GVP_GFORCE_030_SCSI, ~0x01ffffff },
404 { ZORRO_PROD_GVP_A530_SCSI, ~0x01ffffff },
405 { ZORRO_PROD_GVP_COMBO_030_R4_SCSI, ~0x01ffffff },
406 { ZORRO_PROD_GVP_A1291, ~0x07ffffff },
407 { ZORRO_PROD_GVP_GFORCE_040_SCSI_1, ~0x07ffffff },
408 { 0 }
409};
410MODULE_DEVICE_TABLE(zorro, gvp11_zorro_tbl);
411
412static struct zorro_driver gvp11_driver = {
413 .name = "gvp11",
414 .id_table = gvp11_zorro_tbl,
415 .probe = gvp11_probe,
416 .remove = __devexit_p(gvp11_remove),
417};
418
419static int __init gvp11_init(void)
420{
421 return zorro_register_driver(&gvp11_driver);
422}
423module_init(gvp11_init);
424
425static void __exit gvp11_exit(void)
426{
427 zorro_unregister_driver(&gvp11_driver);
399} 428}
429module_exit(gvp11_exit);
400 430
431MODULE_DESCRIPTION("GVP Series II SCSI");
401MODULE_LICENSE("GPL"); 432MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/gvp11.h b/drivers/scsi/gvp11.h
index e2efdf9601ef..852913cde5dd 100644
--- a/drivers/scsi/gvp11.h
+++ b/drivers/scsi/gvp11.h
@@ -11,9 +11,6 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13 13
14int gvp11_detect(struct scsi_host_template *);
15int gvp11_release(struct Scsi_Host *);
16
17#ifndef CMD_PER_LUN 14#ifndef CMD_PER_LUN
18#define CMD_PER_LUN 2 15#define CMD_PER_LUN 2
19#endif 16#endif
@@ -22,15 +19,13 @@ int gvp11_release(struct Scsi_Host *);
22#define CAN_QUEUE 16 19#define CAN_QUEUE 16
23#endif 20#endif
24 21
25#ifndef HOSTS_C
26
27/* 22/*
28 * if the transfer address ANDed with this results in a non-zero 23 * if the transfer address ANDed with this results in a non-zero
29 * result, then we can't use DMA. 24 * result, then we can't use DMA.
30 */ 25 */
31#define GVP11_XFER_MASK (0xff000001) 26#define GVP11_XFER_MASK (0xff000001)
32 27
33typedef struct { 28struct gvp11_scsiregs {
34 unsigned char pad1[64]; 29 unsigned char pad1[64];
35 volatile unsigned short CNTR; 30 volatile unsigned short CNTR;
36 unsigned char pad2[31]; 31 unsigned char pad2[31];
@@ -46,7 +41,7 @@ typedef struct {
46 volatile unsigned short SP_DMA; 41 volatile unsigned short SP_DMA;
47 volatile unsigned short secret2; /* store 1 here */ 42 volatile unsigned short secret2; /* store 1 here */
48 volatile unsigned short secret3; /* store 15 here */ 43 volatile unsigned short secret3; /* store 15 here */
49} gvp11_scsiregs; 44};
50 45
51/* bits in CNTR */ 46/* bits in CNTR */
52#define GVP11_DMAC_BUSY (1<<0) 47#define GVP11_DMAC_BUSY (1<<0)
@@ -54,6 +49,4 @@ typedef struct {
54#define GVP11_DMAC_INT_ENABLE (1<<3) 49#define GVP11_DMAC_INT_ENABLE (1<<3)
55#define GVP11_DMAC_DIR_WRITE (1<<4) 50#define GVP11_DMAC_DIR_WRITE (1<<4)
56 51
57#endif /* else def HOSTS_C */
58
59#endif /* GVP11_H */ 52#endif /* GVP11_H */
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 3eb2b7b3d8b0..fef49521cbc3 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1157,7 +1157,7 @@ static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1157static void ibmvfc_set_login_info(struct ibmvfc_host *vhost) 1157static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1158{ 1158{
1159 struct ibmvfc_npiv_login *login_info = &vhost->login_info; 1159 struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1160 struct device_node *of_node = vhost->dev->archdata.of_node; 1160 struct device_node *of_node = vhost->dev->of_node;
1161 const char *location; 1161 const char *location;
1162 1162
1163 memset(login_info, 0, sizeof(*login_info)); 1163 memset(login_info, 0, sizeof(*login_info));
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 88bad0e81bdd..aad35cc41e49 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -932,7 +932,7 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
932 struct viosrp_capabilities *req; 932 struct viosrp_capabilities *req;
933 struct srp_event_struct *evt_struct; 933 struct srp_event_struct *evt_struct;
934 unsigned long flags; 934 unsigned long flags;
935 struct device_node *of_node = hostdata->dev->archdata.of_node; 935 struct device_node *of_node = hostdata->dev->of_node;
936 const char *location; 936 const char *location;
937 937
938 evt_struct = get_event_struct(&hostdata->pool); 938 evt_struct = get_event_struct(&hostdata->pool);
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 4bf7edca9e69..0b6e3228610a 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -91,12 +91,15 @@ static struct proc_dir_entry *mega_proc_dir_entry;
91/* For controller re-ordering */ 91/* For controller re-ordering */
92static struct mega_hbas mega_hbas[MAX_CONTROLLERS]; 92static struct mega_hbas mega_hbas[MAX_CONTROLLERS];
93 93
94static long
95megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
96
94/* 97/*
95 * The File Operations structure for the serial/ioctl interface of the driver 98 * The File Operations structure for the serial/ioctl interface of the driver
96 */ 99 */
97static const struct file_operations megadev_fops = { 100static const struct file_operations megadev_fops = {
98 .owner = THIS_MODULE, 101 .owner = THIS_MODULE,
99 .ioctl = megadev_ioctl, 102 .unlocked_ioctl = megadev_unlocked_ioctl,
100 .open = megadev_open, 103 .open = megadev_open,
101}; 104};
102 105
@@ -3302,8 +3305,7 @@ megadev_open (struct inode *inode, struct file *filep)
3302 * controller. 3305 * controller.
3303 */ 3306 */
3304static int 3307static int
3305megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, 3308megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
3306 unsigned long arg)
3307{ 3309{
3308 adapter_t *adapter; 3310 adapter_t *adapter;
3309 nitioctl_t uioc; 3311 nitioctl_t uioc;
@@ -3694,6 +3696,18 @@ freemem_and_return:
3694 return 0; 3696 return 0;
3695} 3697}
3696 3698
3699static long
3700megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
3701{
3702 int ret;
3703
3704 lock_kernel();
3705 ret = megadev_ioctl(filep, cmd, arg);
3706 unlock_kernel();
3707
3708 return ret;
3709}
3710
3697/** 3711/**
3698 * mega_m_to_n() 3712 * mega_m_to_n()
3699 * @arg - user address 3713 * @arg - user address
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index d310f49d077e..2b4a048cadf1 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -1013,8 +1013,7 @@ static void mega_8_to_40ld (mraid_inquiry *inquiry,
1013 mega_inquiry3 *enquiry3, mega_product_info *); 1013 mega_inquiry3 *enquiry3, mega_product_info *);
1014 1014
1015static int megadev_open (struct inode *, struct file *); 1015static int megadev_open (struct inode *, struct file *);
1016static int megadev_ioctl (struct inode *, struct file *, unsigned int, 1016static int megadev_ioctl (struct file *, unsigned int, unsigned long);
1017 unsigned long);
1018static int mega_m_to_n(void __user *, nitioctl_t *); 1017static int mega_m_to_n(void __user *, nitioctl_t *);
1019static int mega_n_to_m(void __user *, megacmd_t *); 1018static int mega_n_to_m(void __user *, megacmd_t *);
1020 1019
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 36e0b7d05c1d..41f82f76d884 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -22,7 +22,7 @@
22 22
23// Entry points for char node driver 23// Entry points for char node driver
24static int mraid_mm_open(struct inode *, struct file *); 24static int mraid_mm_open(struct inode *, struct file *);
25static int mraid_mm_ioctl(struct inode *, struct file *, uint, unsigned long); 25static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long);
26 26
27 27
28// routines to convert to and from the old the format 28// routines to convert to and from the old the format
@@ -70,7 +70,7 @@ static wait_queue_head_t wait_q;
70 70
71static const struct file_operations lsi_fops = { 71static const struct file_operations lsi_fops = {
72 .open = mraid_mm_open, 72 .open = mraid_mm_open,
73 .ioctl = mraid_mm_ioctl, 73 .unlocked_ioctl = mraid_mm_unlocked_ioctl,
74#ifdef CONFIG_COMPAT 74#ifdef CONFIG_COMPAT
75 .compat_ioctl = mraid_mm_compat_ioctl, 75 .compat_ioctl = mraid_mm_compat_ioctl,
76#endif 76#endif
@@ -110,8 +110,7 @@ mraid_mm_open(struct inode *inode, struct file *filep)
110 * @arg : user ioctl packet 110 * @arg : user ioctl packet
111 */ 111 */
112static int 112static int
113mraid_mm_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, 113mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
114 unsigned long arg)
115{ 114{
116 uioc_t *kioc; 115 uioc_t *kioc;
117 char signature[EXT_IOCTL_SIGN_SZ] = {0}; 116 char signature[EXT_IOCTL_SIGN_SZ] = {0};
@@ -218,6 +217,19 @@ mraid_mm_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
218 return rval; 217 return rval;
219} 218}
220 219
220static long
221mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
222 unsigned long arg)
223{
224 int err;
225
226 /* inconsistant: mraid_mm_compat_ioctl doesn't take the BKL */
227 lock_kernel();
228 err = mraid_mm_ioctl(filep, cmd, arg);
229 unlock_kernel();
230
231 return err;
232}
221 233
222/** 234/**
223 * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet 235 * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
@@ -1225,7 +1237,7 @@ mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd,
1225{ 1237{
1226 int err; 1238 int err;
1227 1239
1228 err = mraid_mm_ioctl(NULL, filep, cmd, arg); 1240 err = mraid_mm_ioctl(filep, cmd, arg);
1229 1241
1230 return err; 1242 return err;
1231} 1243}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index b830d61684dd..0ec1ed389c20 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -3757,7 +3757,7 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
3757 if (ioc->config_cmds.status & MPT2_CMD_PENDING) { 3757 if (ioc->config_cmds.status & MPT2_CMD_PENDING) {
3758 ioc->config_cmds.status |= MPT2_CMD_RESET; 3758 ioc->config_cmds.status |= MPT2_CMD_RESET;
3759 mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid); 3759 mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid);
3760 ioc->config_cmds.smid = USHORT_MAX; 3760 ioc->config_cmds.smid = USHRT_MAX;
3761 complete(&ioc->config_cmds.done); 3761 complete(&ioc->config_cmds.done);
3762 } 3762 }
3763 break; 3763 break;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index e762dd3e2fcb..c65442982d7b 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -258,7 +258,7 @@ mpt2sas_config_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
258#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 258#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
259 _config_display_some_debug(ioc, smid, "config_done", mpi_reply); 259 _config_display_some_debug(ioc, smid, "config_done", mpi_reply);
260#endif 260#endif
261 ioc->config_cmds.smid = USHORT_MAX; 261 ioc->config_cmds.smid = USHRT_MAX;
262 complete(&ioc->config_cmds.done); 262 complete(&ioc->config_cmds.done);
263 return 1; 263 return 1;
264} 264}
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index 716d1785cda7..c29d0dbb9660 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -16,12 +16,12 @@
16#include <linux/stat.h> 16#include <linux/stat.h>
17 17
18 18
19static struct Scsi_Host *mvme147_host = NULL; 19static irqreturn_t mvme147_intr(int irq, void *data)
20
21static irqreturn_t mvme147_intr(int irq, void *dummy)
22{ 20{
21 struct Scsi_Host *instance = data;
22
23 if (irq == MVME147_IRQ_SCSI_PORT) 23 if (irq == MVME147_IRQ_SCSI_PORT)
24 wd33c93_intr(mvme147_host); 24 wd33c93_intr(instance);
25 else 25 else
26 m147_pcc->dma_intr = 0x89; /* Ack and enable ints */ 26 m147_pcc->dma_intr = 0x89; /* Ack and enable ints */
27 return IRQ_HANDLED; 27 return IRQ_HANDLED;
@@ -29,7 +29,8 @@ static irqreturn_t mvme147_intr(int irq, void *dummy)
29 29
30static int dma_setup(struct scsi_cmnd *cmd, int dir_in) 30static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
31{ 31{
32 struct WD33C93_hostdata *hdata = shost_priv(mvme147_host); 32 struct Scsi_Host *instance = cmd->device->host;
33 struct WD33C93_hostdata *hdata = shost_priv(instance);
33 unsigned char flags = 0x01; 34 unsigned char flags = 0x01;
34 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 35 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
35 36
@@ -66,6 +67,7 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
66int mvme147_detect(struct scsi_host_template *tpnt) 67int mvme147_detect(struct scsi_host_template *tpnt)
67{ 68{
68 static unsigned char called = 0; 69 static unsigned char called = 0;
70 struct Scsi_Host *instance;
69 wd33c93_regs regs; 71 wd33c93_regs regs;
70 struct WD33C93_hostdata *hdata; 72 struct WD33C93_hostdata *hdata;
71 73
@@ -76,25 +78,25 @@ int mvme147_detect(struct scsi_host_template *tpnt)
76 tpnt->proc_name = "MVME147"; 78 tpnt->proc_name = "MVME147";
77 tpnt->proc_info = &wd33c93_proc_info; 79 tpnt->proc_info = &wd33c93_proc_info;
78 80
79 mvme147_host = scsi_register(tpnt, sizeof(struct WD33C93_hostdata)); 81 instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
80 if (!mvme147_host) 82 if (!instance)
81 goto err_out; 83 goto err_out;
82 84
83 mvme147_host->base = 0xfffe4000; 85 instance->base = 0xfffe4000;
84 mvme147_host->irq = MVME147_IRQ_SCSI_PORT; 86 instance->irq = MVME147_IRQ_SCSI_PORT;
85 regs.SASR = (volatile unsigned char *)0xfffe4000; 87 regs.SASR = (volatile unsigned char *)0xfffe4000;
86 regs.SCMD = (volatile unsigned char *)0xfffe4001; 88 regs.SCMD = (volatile unsigned char *)0xfffe4001;
87 hdata = shost_priv(mvme147_host); 89 hdata = shost_priv(instance);
88 hdata->no_sync = 0xff; 90 hdata->no_sync = 0xff;
89 hdata->fast = 0; 91 hdata->fast = 0;
90 hdata->dma_mode = CTRL_DMA; 92 hdata->dma_mode = CTRL_DMA;
91 wd33c93_init(mvme147_host, regs, dma_setup, dma_stop, WD33C93_FS_8_10); 93 wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
92 94
93 if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0, 95 if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0,
94 "MVME147 SCSI PORT", mvme147_intr)) 96 "MVME147 SCSI PORT", instance))
95 goto err_unregister; 97 goto err_unregister;
96 if (request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0, 98 if (request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0,
97 "MVME147 SCSI DMA", mvme147_intr)) 99 "MVME147 SCSI DMA", instance))
98 goto err_free_irq; 100 goto err_free_irq;
99#if 0 /* Disabled; causes problems booting */ 101#if 0 /* Disabled; causes problems booting */
100 m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */ 102 m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */
@@ -113,7 +115,7 @@ int mvme147_detect(struct scsi_host_template *tpnt)
113err_free_irq: 115err_free_irq:
114 free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr); 116 free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr);
115err_unregister: 117err_unregister:
116 scsi_unregister(mvme147_host); 118 scsi_unregister(instance);
117err_out: 119err_out:
118 return 0; 120 return 0;
119} 121}
@@ -132,9 +134,6 @@ static int mvme147_bus_reset(struct scsi_cmnd *cmd)
132 return SUCCESS; 134 return SUCCESS;
133} 135}
134 136
135#define HOSTS_C
136
137#include "mvme147.h"
138 137
139static struct scsi_host_template driver_template = { 138static struct scsi_host_template driver_template = {
140 .proc_name = "MVME147", 139 .proc_name = "MVME147",
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index b219118f8bd6..d64b7178fa08 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -3587,7 +3587,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name
3587 if (i == (-ENOSPC)) { 3587 if (i == (-ENOSPC)) {
3588 transfer = STp->buffer->writing; /* FIXME -- check this logic */ 3588 transfer = STp->buffer->writing; /* FIXME -- check this logic */
3589 if (transfer <= do_count) { 3589 if (transfer <= do_count) {
3590 filp->f_pos += do_count - transfer; 3590 *ppos += do_count - transfer;
3591 count -= do_count - transfer; 3591 count -= do_count - transfer;
3592 if (STps->drv_block >= 0) { 3592 if (STps->drv_block >= 0) {
3593 STps->drv_block += (do_count - transfer) / STp->block_size; 3593 STps->drv_block += (do_count - transfer) / STp->block_size;
@@ -3625,7 +3625,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name
3625 goto out; 3625 goto out;
3626 } 3626 }
3627 3627
3628 filp->f_pos += do_count; 3628 *ppos += do_count;
3629 b_point += do_count; 3629 b_point += do_count;
3630 count -= do_count; 3630 count -= do_count;
3631 if (STps->drv_block >= 0) { 3631 if (STps->drv_block >= 0) {
@@ -3647,7 +3647,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name
3647 if (STps->drv_block >= 0) { 3647 if (STps->drv_block >= 0) {
3648 STps->drv_block += blks; 3648 STps->drv_block += blks;
3649 } 3649 }
3650 filp->f_pos += count; 3650 *ppos += count;
3651 count = 0; 3651 count = 0;
3652 } 3652 }
3653 3653
@@ -3823,7 +3823,7 @@ static ssize_t osst_read(struct file * filp, char __user * buf, size_t count, lo
3823 } 3823 }
3824 STp->logical_blk_num += transfer / STp->block_size; 3824 STp->logical_blk_num += transfer / STp->block_size;
3825 STps->drv_block += transfer / STp->block_size; 3825 STps->drv_block += transfer / STp->block_size;
3826 filp->f_pos += transfer; 3826 *ppos += transfer;
3827 buf += transfer; 3827 buf += transfer;
3828 total += transfer; 3828 total += transfer;
3829 } 3829 }
@@ -4932,7 +4932,7 @@ static int os_scsi_tape_close(struct inode * inode, struct file * filp)
4932 4932
4933 4933
4934/* The ioctl command */ 4934/* The ioctl command */
4935static int osst_ioctl(struct inode * inode,struct file * file, 4935static long osst_ioctl(struct file * file,
4936 unsigned int cmd_in, unsigned long arg) 4936 unsigned int cmd_in, unsigned long arg)
4937{ 4937{
4938 int i, cmd_nr, cmd_type, blk, retval = 0; 4938 int i, cmd_nr, cmd_type, blk, retval = 0;
@@ -4943,8 +4943,11 @@ static int osst_ioctl(struct inode * inode,struct file * file,
4943 char * name = tape_name(STp); 4943 char * name = tape_name(STp);
4944 void __user * p = (void __user *)arg; 4944 void __user * p = (void __user *)arg;
4945 4945
4946 if (mutex_lock_interruptible(&STp->lock)) 4946 lock_kernel();
4947 if (mutex_lock_interruptible(&STp->lock)) {
4948 unlock_kernel();
4947 return -ERESTARTSYS; 4949 return -ERESTARTSYS;
4950 }
4948 4951
4949#if DEBUG 4952#if DEBUG
4950 if (debugging && !STp->in_use) { 4953 if (debugging && !STp->in_use) {
@@ -5256,12 +5259,15 @@ static int osst_ioctl(struct inode * inode,struct file * file,
5256 5259
5257 mutex_unlock(&STp->lock); 5260 mutex_unlock(&STp->lock);
5258 5261
5259 return scsi_ioctl(STp->device, cmd_in, p); 5262 retval = scsi_ioctl(STp->device, cmd_in, p);
5263 unlock_kernel();
5264 return retval;
5260 5265
5261out: 5266out:
5262 if (SRpnt) osst_release_request(SRpnt); 5267 if (SRpnt) osst_release_request(SRpnt);
5263 5268
5264 mutex_unlock(&STp->lock); 5269 mutex_unlock(&STp->lock);
5270 unlock_kernel();
5265 5271
5266 return retval; 5272 return retval;
5267} 5273}
@@ -5613,13 +5619,14 @@ static const struct file_operations osst_fops = {
5613 .owner = THIS_MODULE, 5619 .owner = THIS_MODULE,
5614 .read = osst_read, 5620 .read = osst_read,
5615 .write = osst_write, 5621 .write = osst_write,
5616 .ioctl = osst_ioctl, 5622 .unlocked_ioctl = osst_ioctl,
5617#ifdef CONFIG_COMPAT 5623#ifdef CONFIG_COMPAT
5618 .compat_ioctl = osst_compat_ioctl, 5624 .compat_ioctl = osst_compat_ioctl,
5619#endif 5625#endif
5620 .open = os_scsi_tape_open, 5626 .open = os_scsi_tape_open,
5621 .flush = os_scsi_tape_flush, 5627 .flush = os_scsi_tape_flush,
5622 .release = os_scsi_tape_close, 5628 .release = os_scsi_tape_close,
5629 .llseek = noop_llseek,
5623}; 5630};
5624 5631
5625static int osst_supports(struct scsi_device * SDp) 5632static int osst_supports(struct scsi_device * SDp)
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index aa406497eebc..ca5c15c779cf 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -755,7 +755,7 @@ static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti)
755 struct of_device *op = qpti->op; 755 struct of_device *op = qpti->op;
756 struct device_node *dp; 756 struct device_node *dp;
757 757
758 dp = op->node; 758 dp = op->dev.of_node;
759 759
760 qpti->scsi_id = of_getintprop_default(dp, "initiator-id", -1); 760 qpti->scsi_id = of_getintprop_default(dp, "initiator-id", -1);
761 if (qpti->scsi_id == -1) 761 if (qpti->scsi_id == -1)
@@ -776,8 +776,8 @@ static void qpti_get_bursts(struct qlogicpti *qpti)
776 struct of_device *op = qpti->op; 776 struct of_device *op = qpti->op;
777 u8 bursts, bmask; 777 u8 bursts, bmask;
778 778
779 bursts = of_getintprop_default(op->node, "burst-sizes", 0xff); 779 bursts = of_getintprop_default(op->dev.of_node, "burst-sizes", 0xff);
780 bmask = of_getintprop_default(op->node->parent, "burst-sizes", 0xff); 780 bmask = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0xff);
781 if (bmask != 0xff) 781 if (bmask != 0xff)
782 bursts &= bmask; 782 bursts &= bmask;
783 if (bursts == 0xff || 783 if (bursts == 0xff ||
@@ -1293,7 +1293,7 @@ static struct scsi_host_template qpti_template = {
1293static int __devinit qpti_sbus_probe(struct of_device *op, const struct of_device_id *match) 1293static int __devinit qpti_sbus_probe(struct of_device *op, const struct of_device_id *match)
1294{ 1294{
1295 struct scsi_host_template *tpnt = match->data; 1295 struct scsi_host_template *tpnt = match->data;
1296 struct device_node *dp = op->node; 1296 struct device_node *dp = op->dev.of_node;
1297 struct Scsi_Host *host; 1297 struct Scsi_Host *host;
1298 struct qlogicpti *qpti; 1298 struct qlogicpti *qpti;
1299 static int nqptis; 1299 static int nqptis;
@@ -1315,7 +1315,7 @@ static int __devinit qpti_sbus_probe(struct of_device *op, const struct of_devic
1315 qpti->qhost = host; 1315 qpti->qhost = host;
1316 qpti->op = op; 1316 qpti->op = op;
1317 qpti->qpti_id = nqptis; 1317 qpti->qpti_id = nqptis;
1318 strcpy(qpti->prom_name, op->node->name); 1318 strcpy(qpti->prom_name, op->dev.of_node->name);
1319 qpti->is_pti = strcmp(qpti->prom_name, "QLGC,isp"); 1319 qpti->is_pti = strcmp(qpti->prom_name, "QLGC,isp");
1320 1320
1321 if (qpti_map_regs(qpti) < 0) 1321 if (qpti_map_regs(qpti) < 0)
@@ -1456,8 +1456,11 @@ static const struct of_device_id qpti_match[] = {
1456MODULE_DEVICE_TABLE(of, qpti_match); 1456MODULE_DEVICE_TABLE(of, qpti_match);
1457 1457
1458static struct of_platform_driver qpti_sbus_driver = { 1458static struct of_platform_driver qpti_sbus_driver = {
1459 .name = "qpti", 1459 .driver = {
1460 .match_table = qpti_match, 1460 .name = "qpti",
1461 .owner = THIS_MODULE,
1462 .of_match_table = qpti_match,
1463 },
1461 .probe = qpti_sbus_probe, 1464 .probe = qpti_sbus_probe,
1462 .remove = __devexit_p(qpti_sbus_remove), 1465 .remove = __devexit_p(qpti_sbus_remove),
1463}; 1466};
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index a77468cd5a33..1c027a97d8b9 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1221,7 +1221,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
1221} 1221}
1222 1222
1223/** 1223/**
1224 * scsilun_to_int: convert a scsi_lun to an int 1224 * scsilun_to_int - convert a scsi_lun to an int
1225 * @scsilun: struct scsi_lun to be converted. 1225 * @scsilun: struct scsi_lun to be converted.
1226 * 1226 *
1227 * Description: 1227 * Description:
@@ -1253,7 +1253,7 @@ int scsilun_to_int(struct scsi_lun *scsilun)
1253EXPORT_SYMBOL(scsilun_to_int); 1253EXPORT_SYMBOL(scsilun_to_int);
1254 1254
1255/** 1255/**
1256 * int_to_scsilun: reverts an int into a scsi_lun 1256 * int_to_scsilun - reverts an int into a scsi_lun
1257 * @lun: integer to be reverted 1257 * @lun: integer to be reverted
1258 * @scsilun: struct scsi_lun to be set. 1258 * @scsilun: struct scsi_lun to be set.
1259 * 1259 *
@@ -1877,12 +1877,9 @@ void scsi_forget_host(struct Scsi_Host *shost)
1877 spin_unlock_irqrestore(shost->host_lock, flags); 1877 spin_unlock_irqrestore(shost->host_lock, flags);
1878} 1878}
1879 1879
1880/* 1880/**
1881 * Function: scsi_get_host_dev() 1881 * scsi_get_host_dev - Create a scsi_device that points to the host adapter itself
1882 * 1882 * @shost: Host that needs a scsi_device
1883 * Purpose: Create a scsi_device that points to the host adapter itself.
1884 *
1885 * Arguments: SHpnt - Host that needs a scsi_device
1886 * 1883 *
1887 * Lock status: None assumed. 1884 * Lock status: None assumed.
1888 * 1885 *
@@ -1895,7 +1892,7 @@ void scsi_forget_host(struct Scsi_Host *shost)
1895 * 1892 *
1896 * Note - this device is not accessible from any high-level 1893 * Note - this device is not accessible from any high-level
1897 * drivers (including generics), which is probably not 1894 * drivers (including generics), which is probably not
1898 * optimal. We can add hooks later to attach 1895 * optimal. We can add hooks later to attach.
1899 */ 1896 */
1900struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost) 1897struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
1901{ 1898{
@@ -1921,18 +1918,13 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
1921} 1918}
1922EXPORT_SYMBOL(scsi_get_host_dev); 1919EXPORT_SYMBOL(scsi_get_host_dev);
1923 1920
1924/* 1921/**
1925 * Function: scsi_free_host_dev() 1922 * scsi_free_host_dev - Free a scsi_device that points to the host adapter itself
1926 * 1923 * @sdev: Host device to be freed
1927 * Purpose: Free a scsi_device that points to the host adapter itself.
1928 *
1929 * Arguments: SHpnt - Host that needs a scsi_device
1930 * 1924 *
1931 * Lock status: None assumed. 1925 * Lock status: None assumed.
1932 * 1926 *
1933 * Returns: Nothing 1927 * Returns: Nothing
1934 *
1935 * Notes:
1936 */ 1928 */
1937void scsi_free_host_dev(struct scsi_device *sdev) 1929void scsi_free_host_dev(struct scsi_device *sdev)
1938{ 1930{
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index dee1c96288d4..ef752b248c4d 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -758,8 +758,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
758} 758}
759 759
760static int 760static int
761sg_ioctl(struct inode *inode, struct file *filp, 761sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
762 unsigned int cmd_in, unsigned long arg)
763{ 762{
764 void __user *p = (void __user *)arg; 763 void __user *p = (void __user *)arg;
765 int __user *ip = p; 764 int __user *ip = p;
@@ -1078,6 +1077,18 @@ sg_ioctl(struct inode *inode, struct file *filp,
1078 } 1077 }
1079} 1078}
1080 1079
1080static long
1081sg_unlocked_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1082{
1083 int ret;
1084
1085 lock_kernel();
1086 ret = sg_ioctl(filp, cmd_in, arg);
1087 unlock_kernel();
1088
1089 return ret;
1090}
1091
1081#ifdef CONFIG_COMPAT 1092#ifdef CONFIG_COMPAT
1082static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) 1093static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1083{ 1094{
@@ -1322,7 +1333,7 @@ static const struct file_operations sg_fops = {
1322 .read = sg_read, 1333 .read = sg_read,
1323 .write = sg_write, 1334 .write = sg_write,
1324 .poll = sg_poll, 1335 .poll = sg_poll,
1325 .ioctl = sg_ioctl, 1336 .unlocked_ioctl = sg_unlocked_ioctl,
1326#ifdef CONFIG_COMPAT 1337#ifdef CONFIG_COMPAT
1327 .compat_ioctl = sg_compat_ioctl, 1338 .compat_ioctl = sg_compat_ioctl,
1328#endif 1339#endif
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 3ea1a713ef25..24211d0efa6d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3962,6 +3962,7 @@ static const struct file_operations st_fops =
3962 .open = st_open, 3962 .open = st_open,
3963 .flush = st_flush, 3963 .flush = st_flush,
3964 .release = st_release, 3964 .release = st_release,
3965 .llseek = noop_llseek,
3965}; 3966};
3966 3967
3967static int st_probe(struct device *dev) 3968static int st_probe(struct device *dev)
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index fc23d273fb1a..386dd9d602b6 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -125,7 +125,7 @@ static void __devinit esp_get_scsi_id(struct esp *esp, struct of_device *espdma)
125 struct of_device *op = esp->dev; 125 struct of_device *op = esp->dev;
126 struct device_node *dp; 126 struct device_node *dp;
127 127
128 dp = op->node; 128 dp = op->dev.of_node;
129 esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff); 129 esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
130 if (esp->scsi_id != 0xff) 130 if (esp->scsi_id != 0xff)
131 goto done; 131 goto done;
@@ -134,7 +134,7 @@ static void __devinit esp_get_scsi_id(struct esp *esp, struct of_device *espdma)
134 if (esp->scsi_id != 0xff) 134 if (esp->scsi_id != 0xff)
135 goto done; 135 goto done;
136 136
137 esp->scsi_id = of_getintprop_default(espdma->node, 137 esp->scsi_id = of_getintprop_default(espdma->dev.of_node,
138 "scsi-initiator-id", 7); 138 "scsi-initiator-id", 7);
139 139
140done: 140done:
@@ -147,7 +147,7 @@ static void __devinit esp_get_differential(struct esp *esp)
147 struct of_device *op = esp->dev; 147 struct of_device *op = esp->dev;
148 struct device_node *dp; 148 struct device_node *dp;
149 149
150 dp = op->node; 150 dp = op->dev.of_node;
151 if (of_find_property(dp, "differential", NULL)) 151 if (of_find_property(dp, "differential", NULL))
152 esp->flags |= ESP_FLAG_DIFFERENTIAL; 152 esp->flags |= ESP_FLAG_DIFFERENTIAL;
153 else 153 else
@@ -160,7 +160,7 @@ static void __devinit esp_get_clock_params(struct esp *esp)
160 struct device_node *bus_dp, *dp; 160 struct device_node *bus_dp, *dp;
161 int fmhz; 161 int fmhz;
162 162
163 dp = op->node; 163 dp = op->dev.of_node;
164 bus_dp = dp->parent; 164 bus_dp = dp->parent;
165 165
166 fmhz = of_getintprop_default(dp, "clock-frequency", 0); 166 fmhz = of_getintprop_default(dp, "clock-frequency", 0);
@@ -172,12 +172,12 @@ static void __devinit esp_get_clock_params(struct esp *esp)
172 172
173static void __devinit esp_get_bursts(struct esp *esp, struct of_device *dma_of) 173static void __devinit esp_get_bursts(struct esp *esp, struct of_device *dma_of)
174{ 174{
175 struct device_node *dma_dp = dma_of->node; 175 struct device_node *dma_dp = dma_of->dev.of_node;
176 struct of_device *op = esp->dev; 176 struct of_device *op = esp->dev;
177 struct device_node *dp; 177 struct device_node *dp;
178 u8 bursts, val; 178 u8 bursts, val;
179 179
180 dp = op->node; 180 dp = op->dev.of_node;
181 bursts = of_getintprop_default(dp, "burst-sizes", 0xff); 181 bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
182 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff); 182 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
183 if (val != 0xff) 183 if (val != 0xff)
@@ -565,7 +565,7 @@ fail:
565static int __devinit esp_sbus_probe(struct of_device *op, const struct of_device_id *match) 565static int __devinit esp_sbus_probe(struct of_device *op, const struct of_device_id *match)
566{ 566{
567 struct device_node *dma_node = NULL; 567 struct device_node *dma_node = NULL;
568 struct device_node *dp = op->node; 568 struct device_node *dp = op->dev.of_node;
569 struct of_device *dma_of = NULL; 569 struct of_device *dma_of = NULL;
570 int hme = 0; 570 int hme = 0;
571 571
@@ -574,7 +574,7 @@ static int __devinit esp_sbus_probe(struct of_device *op, const struct of_device
574 !strcmp(dp->parent->name, "dma"))) 574 !strcmp(dp->parent->name, "dma")))
575 dma_node = dp->parent; 575 dma_node = dp->parent;
576 else if (!strcmp(dp->name, "SUNW,fas")) { 576 else if (!strcmp(dp->name, "SUNW,fas")) {
577 dma_node = op->node; 577 dma_node = op->dev.of_node;
578 hme = 1; 578 hme = 1;
579 } 579 }
580 if (dma_node) 580 if (dma_node)
@@ -633,8 +633,11 @@ static const struct of_device_id esp_match[] = {
633MODULE_DEVICE_TABLE(of, esp_match); 633MODULE_DEVICE_TABLE(of, esp_match);
634 634
635static struct of_platform_driver esp_sbus_driver = { 635static struct of_platform_driver esp_sbus_driver = {
636 .name = "esp", 636 .driver = {
637 .match_table = esp_match, 637 .name = "esp",
638 .owner = THIS_MODULE,
639 .of_match_table = esp_match,
640 },
638 .probe = esp_sbus_probe, 641 .probe = esp_sbus_probe,
639 .remove = __devexit_p(esp_sbus_remove), 642 .remove = __devexit_p(esp_sbus_remove),
640}; 643};
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index 78ed24bb6a35..30463862603b 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -1437,7 +1437,7 @@ int m68328_console_setup(struct console *cp, char *arg)
1437 for (i = 0; i < ARRAY_SIZE(baud_table); i++) 1437 for (i = 0; i < ARRAY_SIZE(baud_table); i++)
1438 if (baud_table[i] == n) 1438 if (baud_table[i] == n)
1439 break; 1439 break;
1440 if (i < BAUD_TABLE_SIZE) { 1440 if (i < ARRAY_SIZE(baud_table)) {
1441 m68328_console_baud = n; 1441 m68328_console_baud = n;
1442 m68328_console_cbaud = 0; 1442 m68328_console_cbaud = 0;
1443 if (i > 15) { 1443 if (i > 15) {
diff --git a/drivers/serial/apbuart.c b/drivers/serial/apbuart.c
index fe91319b5f65..0099b8692b60 100644
--- a/drivers/serial/apbuart.c
+++ b/drivers/serial/apbuart.c
@@ -559,7 +559,7 @@ static int __devinit apbuart_probe(struct of_device *op,
559 559
560 i = 0; 560 i = 0;
561 for (i = 0; i < grlib_apbuart_port_nr; i++) { 561 for (i = 0; i < grlib_apbuart_port_nr; i++) {
562 if (op->node == grlib_apbuart_nodes[i]) 562 if (op->dev.of_node == grlib_apbuart_nodes[i])
563 break; 563 break;
564 } 564 }
565 565
@@ -584,12 +584,12 @@ static struct of_device_id __initdata apbuart_match[] = {
584}; 584};
585 585
586static struct of_platform_driver grlib_apbuart_of_driver = { 586static struct of_platform_driver grlib_apbuart_of_driver = {
587 .match_table = apbuart_match,
588 .probe = apbuart_probe, 587 .probe = apbuart_probe,
589 .driver = { 588 .driver = {
590 .owner = THIS_MODULE, 589 .owner = THIS_MODULE,
591 .name = "grlib-apbuart", 590 .name = "grlib-apbuart",
592 }, 591 .of_match_table = apbuart_match,
592 },
593}; 593};
594 594
595 595
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index 300cea768d74..9eb62a256e9a 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -1342,7 +1342,7 @@ static int __devinit cpm_uart_probe(struct of_device *ofdev,
1342 /* initialize the device pointer for the port */ 1342 /* initialize the device pointer for the port */
1343 pinfo->port.dev = &ofdev->dev; 1343 pinfo->port.dev = &ofdev->dev;
1344 1344
1345 ret = cpm_uart_init_port(ofdev->node, pinfo); 1345 ret = cpm_uart_init_port(ofdev->dev.of_node, pinfo);
1346 if (ret) 1346 if (ret)
1347 return ret; 1347 return ret;
1348 1348
@@ -1372,8 +1372,11 @@ static struct of_device_id cpm_uart_match[] = {
1372}; 1372};
1373 1373
1374static struct of_platform_driver cpm_uart_driver = { 1374static struct of_platform_driver cpm_uart_driver = {
1375 .name = "cpm_uart", 1375 .driver = {
1376 .match_table = cpm_uart_match, 1376 .name = "cpm_uart",
1377 .owner = THIS_MODULE,
1378 .of_match_table = cpm_uart_match,
1379 },
1377 .probe = cpm_uart_probe, 1380 .probe = cpm_uart_probe,
1378 .remove = cpm_uart_remove, 1381 .remove = cpm_uart_remove,
1379 }; 1382 };
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 02469c31bf0b..84a35f699016 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -397,34 +397,10 @@ static unsigned long mpc512x_getuartclk(void *p)
397 return mpc5xxx_get_bus_frequency(p); 397 return mpc5xxx_get_bus_frequency(p);
398} 398}
399 399
400#define DEFAULT_FIFO_SIZE 16
401
402static unsigned int __init get_fifo_size(struct device_node *np,
403 char *fifo_name)
404{
405 const unsigned int *fp;
406
407 fp = of_get_property(np, fifo_name, NULL);
408 if (fp)
409 return *fp;
410
411 pr_warning("no %s property in %s node, defaulting to %d\n",
412 fifo_name, np->full_name, DEFAULT_FIFO_SIZE);
413
414 return DEFAULT_FIFO_SIZE;
415}
416
417#define FIFOC(_base) ((struct mpc512x_psc_fifo __iomem *) \
418 ((u32)(_base) + sizeof(struct mpc52xx_psc)))
419
420/* Init PSC FIFO Controller */ 400/* Init PSC FIFO Controller */
421static int __init mpc512x_psc_fifoc_init(void) 401static int __init mpc512x_psc_fifoc_init(void)
422{ 402{
423 struct device_node *np; 403 struct device_node *np;
424 void __iomem *psc;
425 unsigned int tx_fifo_size;
426 unsigned int rx_fifo_size;
427 int fifobase = 0; /* current fifo address in 32 bit words */
428 404
429 np = of_find_compatible_node(NULL, NULL, 405 np = of_find_compatible_node(NULL, NULL,
430 "fsl,mpc5121-psc-fifo"); 406 "fsl,mpc5121-psc-fifo");
@@ -447,51 +423,6 @@ static int __init mpc512x_psc_fifoc_init(void)
447 return -ENODEV; 423 return -ENODEV;
448 } 424 }
449 425
450 for_each_compatible_node(np, NULL, "fsl,mpc5121-psc-uart") {
451 tx_fifo_size = get_fifo_size(np, "fsl,tx-fifo-size");
452 rx_fifo_size = get_fifo_size(np, "fsl,rx-fifo-size");
453
454 /* size in register is in 4 byte units */
455 tx_fifo_size /= 4;
456 rx_fifo_size /= 4;
457 if (!tx_fifo_size)
458 tx_fifo_size = 1;
459 if (!rx_fifo_size)
460 rx_fifo_size = 1;
461
462 psc = of_iomap(np, 0);
463 if (!psc) {
464 pr_err("%s: Can't map %s device\n",
465 __func__, np->full_name);
466 continue;
467 }
468
469 /* FIFO space is 4KiB, check if requested size is available */
470 if ((fifobase + tx_fifo_size + rx_fifo_size) > 0x1000) {
471 pr_err("%s: no fifo space available for %s\n",
472 __func__, np->full_name);
473 iounmap(psc);
474 /*
475 * chances are that another device requests less
476 * fifo space, so we continue.
477 */
478 continue;
479 }
480 /* set tx and rx fifo size registers */
481 out_be32(&FIFOC(psc)->txsz, (fifobase << 16) | tx_fifo_size);
482 fifobase += tx_fifo_size;
483 out_be32(&FIFOC(psc)->rxsz, (fifobase << 16) | rx_fifo_size);
484 fifobase += rx_fifo_size;
485
486 /* reset and enable the slices */
487 out_be32(&FIFOC(psc)->txcmd, 0x80);
488 out_be32(&FIFOC(psc)->txcmd, 0x01);
489 out_be32(&FIFOC(psc)->rxcmd, 0x80);
490 out_be32(&FIFOC(psc)->rxcmd, 0x01);
491
492 iounmap(psc);
493 }
494
495 return 0; 426 return 0;
496} 427}
497 428
@@ -1295,14 +1226,14 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
1295 1226
1296 /* Check validity & presence */ 1227 /* Check validity & presence */
1297 for (idx = 0; idx < MPC52xx_PSC_MAXNUM; idx++) 1228 for (idx = 0; idx < MPC52xx_PSC_MAXNUM; idx++)
1298 if (mpc52xx_uart_nodes[idx] == op->node) 1229 if (mpc52xx_uart_nodes[idx] == op->dev.of_node)
1299 break; 1230 break;
1300 if (idx >= MPC52xx_PSC_MAXNUM) 1231 if (idx >= MPC52xx_PSC_MAXNUM)
1301 return -EINVAL; 1232 return -EINVAL;
1302 pr_debug("Found %s assigned to ttyPSC%x\n", 1233 pr_debug("Found %s assigned to ttyPSC%x\n",
1303 mpc52xx_uart_nodes[idx]->full_name, idx); 1234 mpc52xx_uart_nodes[idx]->full_name, idx);
1304 1235
1305 uartclk = psc_ops->getuartclk(op->node); 1236 uartclk = psc_ops->getuartclk(op->dev.of_node);
1306 if (uartclk == 0) { 1237 if (uartclk == 0) {
1307 dev_dbg(&op->dev, "Could not find uart clock frequency!\n"); 1238 dev_dbg(&op->dev, "Could not find uart clock frequency!\n");
1308 return -EINVAL; 1239 return -EINVAL;
@@ -1322,7 +1253,7 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
1322 port->dev = &op->dev; 1253 port->dev = &op->dev;
1323 1254
1324 /* Search for IRQ and mapbase */ 1255 /* Search for IRQ and mapbase */
1325 ret = of_address_to_resource(op->node, 0, &res); 1256 ret = of_address_to_resource(op->dev.of_node, 0, &res);
1326 if (ret) 1257 if (ret)
1327 return ret; 1258 return ret;
1328 1259
@@ -1332,7 +1263,7 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
1332 return -EINVAL; 1263 return -EINVAL;
1333 } 1264 }
1334 1265
1335 psc_ops->get_irq(port, op->node); 1266 psc_ops->get_irq(port, op->dev.of_node);
1336 if (port->irq == NO_IRQ) { 1267 if (port->irq == NO_IRQ) {
1337 dev_dbg(&op->dev, "Could not get irq\n"); 1268 dev_dbg(&op->dev, "Could not get irq\n");
1338 return -EINVAL; 1269 return -EINVAL;
@@ -1431,15 +1362,16 @@ mpc52xx_uart_of_enumerate(void)
1431MODULE_DEVICE_TABLE(of, mpc52xx_uart_of_match); 1362MODULE_DEVICE_TABLE(of, mpc52xx_uart_of_match);
1432 1363
1433static struct of_platform_driver mpc52xx_uart_of_driver = { 1364static struct of_platform_driver mpc52xx_uart_of_driver = {
1434 .match_table = mpc52xx_uart_of_match,
1435 .probe = mpc52xx_uart_of_probe, 1365 .probe = mpc52xx_uart_of_probe,
1436 .remove = mpc52xx_uart_of_remove, 1366 .remove = mpc52xx_uart_of_remove,
1437#ifdef CONFIG_PM 1367#ifdef CONFIG_PM
1438 .suspend = mpc52xx_uart_of_suspend, 1368 .suspend = mpc52xx_uart_of_suspend,
1439 .resume = mpc52xx_uart_of_resume, 1369 .resume = mpc52xx_uart_of_resume,
1440#endif 1370#endif
1441 .driver = { 1371 .driver = {
1442 .name = "mpc52xx-psc-uart", 1372 .name = "mpc52xx-psc-uart",
1373 .owner = THIS_MODULE,
1374 .of_match_table = mpc52xx_uart_of_match,
1443 }, 1375 },
1444}; 1376};
1445 1377
diff --git a/drivers/serial/nwpserial.c b/drivers/serial/nwpserial.c
index e1ab8ec0a4a6..3c02fa96f282 100644
--- a/drivers/serial/nwpserial.c
+++ b/drivers/serial/nwpserial.c
@@ -344,7 +344,7 @@ int nwpserial_register_port(struct uart_port *port)
344 344
345 mutex_lock(&nwpserial_mutex); 345 mutex_lock(&nwpserial_mutex);
346 346
347 dn = to_of_device(port->dev)->node; 347 dn = to_of_device(port->dev)->dev.of_node;
348 if (dn == NULL) 348 if (dn == NULL)
349 goto out; 349 goto out;
350 350
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c
index 4abfebdb0fcc..a48d9080f552 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/serial/of_serial.c
@@ -31,7 +31,7 @@ static int __devinit of_platform_serial_setup(struct of_device *ofdev,
31 int type, struct uart_port *port) 31 int type, struct uart_port *port)
32{ 32{
33 struct resource resource; 33 struct resource resource;
34 struct device_node *np = ofdev->node; 34 struct device_node *np = ofdev->dev.of_node;
35 const unsigned int *clk, *spd; 35 const unsigned int *clk, *spd;
36 const u32 *prop; 36 const u32 *prop;
37 int ret, prop_size; 37 int ret, prop_size;
@@ -88,7 +88,7 @@ static int __devinit of_platform_serial_probe(struct of_device *ofdev,
88 int port_type; 88 int port_type;
89 int ret; 89 int ret;
90 90
91 if (of_find_property(ofdev->node, "used-by-rtas", NULL)) 91 if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL))
92 return -EBUSY; 92 return -EBUSY;
93 93
94 info = kmalloc(sizeof(*info), GFP_KERNEL); 94 info = kmalloc(sizeof(*info), GFP_KERNEL);
@@ -175,11 +175,13 @@ static struct of_device_id __devinitdata of_platform_serial_table[] = {
175}; 175};
176 176
177static struct of_platform_driver of_platform_serial_driver = { 177static struct of_platform_driver of_platform_serial_driver = {
178 .owner = THIS_MODULE, 178 .driver = {
179 .name = "of_serial", 179 .name = "of_serial",
180 .owner = THIS_MODULE,
181 .of_match_table = of_platform_serial_table,
182 },
180 .probe = of_platform_serial_probe, 183 .probe = of_platform_serial_probe,
181 .remove = of_platform_serial_remove, 184 .remove = of_platform_serial_remove,
182 .match_table = of_platform_serial_table,
183}; 185};
184 186
185static int __init of_platform_serial_init(void) 187static int __init of_platform_serial_init(void)
diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c
index 700e10833bf9..cabbdc7ba583 100644
--- a/drivers/serial/pmac_zilog.c
+++ b/drivers/serial/pmac_zilog.c
@@ -1611,7 +1611,7 @@ static int pmz_attach(struct macio_dev *mdev, const struct of_device_id *match)
1611 /* Iterate the pmz_ports array to find a matching entry 1611 /* Iterate the pmz_ports array to find a matching entry
1612 */ 1612 */
1613 for (i = 0; i < MAX_ZS_PORTS; i++) 1613 for (i = 0; i < MAX_ZS_PORTS; i++)
1614 if (pmz_ports[i].node == mdev->ofdev.node) { 1614 if (pmz_ports[i].node == mdev->ofdev.dev.of_node) {
1615 struct uart_pmac_port *uap = &pmz_ports[i]; 1615 struct uart_pmac_port *uap = &pmz_ports[i];
1616 1616
1617 uap->dev = mdev; 1617 uap->dev = mdev;
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 4f73fb756745..5f90fcd7d107 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -1004,8 +1004,9 @@ static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
1004 s->chan_rx = NULL; 1004 s->chan_rx = NULL;
1005 s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL; 1005 s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
1006 dma_release_channel(chan); 1006 dma_release_channel(chan);
1007 dma_free_coherent(port->dev, s->buf_len_rx * 2, 1007 if (sg_dma_address(&s->sg_rx[0]))
1008 sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0])); 1008 dma_free_coherent(port->dev, s->buf_len_rx * 2,
1009 sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0]));
1009 if (enable_pio) 1010 if (enable_pio)
1010 sci_start_rx(port); 1011 sci_start_rx(port);
1011} 1012}
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
index d14cca7fb88d..890f91742962 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/serial/sunhv.c
@@ -565,7 +565,7 @@ static int __devinit hv_probe(struct of_device *op, const struct of_device_id *m
565 if (err) 565 if (err)
566 goto out_free_con_read_page; 566 goto out_free_con_read_page;
567 567
568 sunserial_console_match(&sunhv_console, op->node, 568 sunserial_console_match(&sunhv_console, op->dev.of_node,
569 &sunhv_reg, port->line, false); 569 &sunhv_reg, port->line, false);
570 570
571 err = uart_add_one_port(&sunhv_reg, port); 571 err = uart_add_one_port(&sunhv_reg, port);
@@ -630,8 +630,11 @@ static const struct of_device_id hv_match[] = {
630MODULE_DEVICE_TABLE(of, hv_match); 630MODULE_DEVICE_TABLE(of, hv_match);
631 631
632static struct of_platform_driver hv_driver = { 632static struct of_platform_driver hv_driver = {
633 .name = "hv", 633 .driver = {
634 .match_table = hv_match, 634 .name = "hv",
635 .owner = THIS_MODULE,
636 .of_match_table = hv_match,
637 },
635 .probe = hv_probe, 638 .probe = hv_probe,
636 .remove = __devexit_p(hv_remove), 639 .remove = __devexit_p(hv_remove),
637}; 640};
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index d2e0321049e2..5e81bc6b48b0 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -883,7 +883,7 @@ static int sunsab_console_setup(struct console *con, char *options)
883 printk("Console: ttyS%d (SAB82532)\n", 883 printk("Console: ttyS%d (SAB82532)\n",
884 (sunsab_reg.minor - 64) + con->index); 884 (sunsab_reg.minor - 64) + con->index);
885 885
886 sunserial_console_termios(con, to_of_device(up->port.dev)->node); 886 sunserial_console_termios(con, to_of_device(up->port.dev)->dev.of_node);
887 887
888 switch (con->cflag & CBAUD) { 888 switch (con->cflag & CBAUD) {
889 case B150: baud = 150; break; 889 case B150: baud = 150; break;
@@ -1026,11 +1026,11 @@ static int __devinit sab_probe(struct of_device *op, const struct of_device_id *
1026 if (err) 1026 if (err)
1027 goto out1; 1027 goto out1;
1028 1028
1029 sunserial_console_match(SUNSAB_CONSOLE(), op->node, 1029 sunserial_console_match(SUNSAB_CONSOLE(), op->dev.of_node,
1030 &sunsab_reg, up[0].port.line, 1030 &sunsab_reg, up[0].port.line,
1031 false); 1031 false);
1032 1032
1033 sunserial_console_match(SUNSAB_CONSOLE(), op->node, 1033 sunserial_console_match(SUNSAB_CONSOLE(), op->dev.of_node,
1034 &sunsab_reg, up[1].port.line, 1034 &sunsab_reg, up[1].port.line,
1035 false); 1035 false);
1036 1036
@@ -1093,8 +1093,11 @@ static const struct of_device_id sab_match[] = {
1093MODULE_DEVICE_TABLE(of, sab_match); 1093MODULE_DEVICE_TABLE(of, sab_match);
1094 1094
1095static struct of_platform_driver sab_driver = { 1095static struct of_platform_driver sab_driver = {
1096 .name = "sab", 1096 .driver = {
1097 .match_table = sab_match, 1097 .name = "sab",
1098 .owner = THIS_MODULE,
1099 .of_match_table = sab_match,
1100 },
1098 .probe = sab_probe, 1101 .probe = sab_probe,
1099 .remove = __devexit_p(sab_remove), 1102 .remove = __devexit_p(sab_remove),
1100}; 1103};
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index 01f7731e59b8..234459c2f012 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -1200,7 +1200,7 @@ static int __devinit sunsu_kbd_ms_init(struct uart_sunsu_port *up)
1200 return -ENODEV; 1200 return -ENODEV;
1201 1201
1202 printk("%s: %s port at %llx, irq %u\n", 1202 printk("%s: %s port at %llx, irq %u\n",
1203 to_of_device(up->port.dev)->node->full_name, 1203 to_of_device(up->port.dev)->dev.of_node->full_name,
1204 (up->su_type == SU_PORT_KBD) ? "Keyboard" : "Mouse", 1204 (up->su_type == SU_PORT_KBD) ? "Keyboard" : "Mouse",
1205 (unsigned long long) up->port.mapbase, 1205 (unsigned long long) up->port.mapbase,
1206 up->port.irq); 1206 up->port.irq);
@@ -1352,7 +1352,7 @@ static int __init sunsu_console_setup(struct console *co, char *options)
1352 spin_lock_init(&port->lock); 1352 spin_lock_init(&port->lock);
1353 1353
1354 /* Get firmware console settings. */ 1354 /* Get firmware console settings. */
1355 sunserial_console_termios(co, to_of_device(port->dev)->node); 1355 sunserial_console_termios(co, to_of_device(port->dev)->dev.of_node);
1356 1356
1357 memset(&termios, 0, sizeof(struct ktermios)); 1357 memset(&termios, 0, sizeof(struct ktermios));
1358 termios.c_cflag = co->cflag; 1358 termios.c_cflag = co->cflag;
@@ -1409,7 +1409,7 @@ static enum su_type __devinit su_get_type(struct device_node *dp)
1409static int __devinit su_probe(struct of_device *op, const struct of_device_id *match) 1409static int __devinit su_probe(struct of_device *op, const struct of_device_id *match)
1410{ 1410{
1411 static int inst; 1411 static int inst;
1412 struct device_node *dp = op->node; 1412 struct device_node *dp = op->dev.of_node;
1413 struct uart_sunsu_port *up; 1413 struct uart_sunsu_port *up;
1414 struct resource *rp; 1414 struct resource *rp;
1415 enum su_type type; 1415 enum su_type type;
@@ -1539,8 +1539,11 @@ static const struct of_device_id su_match[] = {
1539MODULE_DEVICE_TABLE(of, su_match); 1539MODULE_DEVICE_TABLE(of, su_match);
1540 1540
1541static struct of_platform_driver su_driver = { 1541static struct of_platform_driver su_driver = {
1542 .name = "su", 1542 .driver = {
1543 .match_table = su_match, 1543 .name = "su",
1544 .owner = THIS_MODULE,
1545 .of_match_table = su_match,
1546 },
1544 .probe = su_probe, 1547 .probe = su_probe,
1545 .remove = __devexit_p(su_remove), 1548 .remove = __devexit_p(su_remove),
1546}; 1549};
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 978b3cee02d7..f9a24f4ebb34 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -1230,7 +1230,7 @@ static int __init sunzilog_console_setup(struct console *con, char *options)
1230 (sunzilog_reg.minor - 64) + con->index, con->index); 1230 (sunzilog_reg.minor - 64) + con->index, con->index);
1231 1231
1232 /* Get firmware console settings. */ 1232 /* Get firmware console settings. */
1233 sunserial_console_termios(con, to_of_device(up->port.dev)->node); 1233 sunserial_console_termios(con, to_of_device(up->port.dev)->dev.of_node);
1234 1234
1235 /* Firmware console speed is limited to 150-->38400 baud so 1235 /* Firmware console speed is limited to 150-->38400 baud so
1236 * this hackish cflag thing is OK. 1236 * this hackish cflag thing is OK.
@@ -1408,7 +1408,7 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m
1408 int keyboard_mouse = 0; 1408 int keyboard_mouse = 0;
1409 int err; 1409 int err;
1410 1410
1411 if (of_find_property(op->node, "keyboard", NULL)) 1411 if (of_find_property(op->dev.of_node, "keyboard", NULL))
1412 keyboard_mouse = 1; 1412 keyboard_mouse = 1;
1413 1413
1414 /* uarts must come before keyboards/mice */ 1414 /* uarts must come before keyboards/mice */
@@ -1465,7 +1465,7 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m
1465 sunzilog_init_hw(&up[1]); 1465 sunzilog_init_hw(&up[1]);
1466 1466
1467 if (!keyboard_mouse) { 1467 if (!keyboard_mouse) {
1468 if (sunserial_console_match(SUNZILOG_CONSOLE(), op->node, 1468 if (sunserial_console_match(SUNZILOG_CONSOLE(), op->dev.of_node,
1469 &sunzilog_reg, up[0].port.line, 1469 &sunzilog_reg, up[0].port.line,
1470 false)) 1470 false))
1471 up->flags |= SUNZILOG_FLAG_IS_CONS; 1471 up->flags |= SUNZILOG_FLAG_IS_CONS;
@@ -1475,7 +1475,7 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m
1475 rp, sizeof(struct zilog_layout)); 1475 rp, sizeof(struct zilog_layout));
1476 return err; 1476 return err;
1477 } 1477 }
1478 if (sunserial_console_match(SUNZILOG_CONSOLE(), op->node, 1478 if (sunserial_console_match(SUNZILOG_CONSOLE(), op->dev.of_node,
1479 &sunzilog_reg, up[1].port.line, 1479 &sunzilog_reg, up[1].port.line,
1480 false)) 1480 false))
1481 up->flags |= SUNZILOG_FLAG_IS_CONS; 1481 up->flags |= SUNZILOG_FLAG_IS_CONS;
@@ -1541,8 +1541,11 @@ static const struct of_device_id zs_match[] = {
1541MODULE_DEVICE_TABLE(of, zs_match); 1541MODULE_DEVICE_TABLE(of, zs_match);
1542 1542
1543static struct of_platform_driver zs_driver = { 1543static struct of_platform_driver zs_driver = {
1544 .name = "zs", 1544 .driver = {
1545 .match_table = zs_match, 1545 .name = "zs",
1546 .owner = THIS_MODULE,
1547 .of_match_table = zs_match,
1548 },
1546 .probe = zs_probe, 1549 .probe = zs_probe,
1547 .remove = __devexit_p(zs_remove), 1550 .remove = __devexit_p(zs_remove),
1548}; 1551};
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
index e6639a95d276..8acccd564378 100644
--- a/drivers/serial/uartlite.c
+++ b/drivers/serial/uartlite.c
@@ -591,15 +591,15 @@ ulite_of_probe(struct of_device *op, const struct of_device_id *match)
591 591
592 dev_dbg(&op->dev, "%s(%p, %p)\n", __func__, op, match); 592 dev_dbg(&op->dev, "%s(%p, %p)\n", __func__, op, match);
593 593
594 rc = of_address_to_resource(op->node, 0, &res); 594 rc = of_address_to_resource(op->dev.of_node, 0, &res);
595 if (rc) { 595 if (rc) {
596 dev_err(&op->dev, "invalid address\n"); 596 dev_err(&op->dev, "invalid address\n");
597 return rc; 597 return rc;
598 } 598 }
599 599
600 irq = irq_of_parse_and_map(op->node, 0); 600 irq = irq_of_parse_and_map(op->dev.of_node, 0);
601 601
602 id = of_get_property(op->node, "port-number", NULL); 602 id = of_get_property(op->dev.of_node, "port-number", NULL);
603 603
604 return ulite_assign(&op->dev, id ? *id : -1, res.start, irq); 604 return ulite_assign(&op->dev, id ? *id : -1, res.start, irq);
605} 605}
@@ -610,13 +610,12 @@ static int __devexit ulite_of_remove(struct of_device *op)
610} 610}
611 611
612static struct of_platform_driver ulite_of_driver = { 612static struct of_platform_driver ulite_of_driver = {
613 .owner = THIS_MODULE,
614 .name = "uartlite",
615 .match_table = ulite_of_match,
616 .probe = ulite_of_probe, 613 .probe = ulite_of_probe,
617 .remove = __devexit_p(ulite_of_remove), 614 .remove = __devexit_p(ulite_of_remove),
618 .driver = { 615 .driver = {
619 .name = "uartlite", 616 .name = "uartlite",
617 .owner = THIS_MODULE,
618 .of_match_table = ulite_of_match,
620 }, 619 },
621}; 620};
622 621
diff --git a/drivers/serial/ucc_uart.c b/drivers/serial/ucc_uart.c
index 074904912f64..907b06f5c447 100644
--- a/drivers/serial/ucc_uart.c
+++ b/drivers/serial/ucc_uart.c
@@ -1197,7 +1197,7 @@ static void uart_firmware_cont(const struct firmware *fw, void *context)
1197static int ucc_uart_probe(struct of_device *ofdev, 1197static int ucc_uart_probe(struct of_device *ofdev,
1198 const struct of_device_id *match) 1198 const struct of_device_id *match)
1199{ 1199{
1200 struct device_node *np = ofdev->node; 1200 struct device_node *np = ofdev->dev.of_node;
1201 const unsigned int *iprop; /* Integer OF properties */ 1201 const unsigned int *iprop; /* Integer OF properties */
1202 const char *sprop; /* String OF properties */ 1202 const char *sprop; /* String OF properties */
1203 struct uart_qe_port *qe_port = NULL; 1203 struct uart_qe_port *qe_port = NULL;
@@ -1486,9 +1486,11 @@ static struct of_device_id ucc_uart_match[] = {
1486MODULE_DEVICE_TABLE(of, ucc_uart_match); 1486MODULE_DEVICE_TABLE(of, ucc_uart_match);
1487 1487
1488static struct of_platform_driver ucc_uart_of_driver = { 1488static struct of_platform_driver ucc_uart_of_driver = {
1489 .owner = THIS_MODULE, 1489 .driver = {
1490 .name = "ucc_uart", 1490 .name = "ucc_uart",
1491 .match_table = ucc_uart_match, 1491 .owner = THIS_MODULE,
1492 .of_match_table = ucc_uart_match,
1493 },
1492 .probe = ucc_uart_probe, 1494 .probe = ucc_uart_probe,
1493 .remove = ucc_uart_remove, 1495 .remove = ucc_uart_remove,
1494}; 1496};
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index f950b6316949..91c2f4f3af10 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -117,6 +117,16 @@ config SPI_DAVINCI
117 help 117 help
118 SPI master controller for DaVinci and DA8xx SPI modules. 118 SPI master controller for DaVinci and DA8xx SPI modules.
119 119
120config SPI_EP93XX
121 tristate "Cirrus Logic EP93xx SPI controller"
122 depends on ARCH_EP93XX
123 help
124 This enables using the Cirrus EP93xx SPI controller in master
125 mode.
126
127 To compile this driver as a module, choose M here. The module will be
128 called ep93xx_spi.
129
120config SPI_GPIO 130config SPI_GPIO
121 tristate "GPIO-based bitbanging SPI Master" 131 tristate "GPIO-based bitbanging SPI Master"
122 depends on GENERIC_GPIO 132 depends on GENERIC_GPIO
@@ -165,6 +175,13 @@ config SPI_MPC52xx_PSC
165 This enables using the Freescale MPC52xx Programmable Serial 175 This enables using the Freescale MPC52xx Programmable Serial
166 Controller in master SPI mode. 176 Controller in master SPI mode.
167 177
178config SPI_MPC512x_PSC
179 tristate "Freescale MPC512x PSC SPI controller"
180 depends on SPI_MASTER && PPC_MPC512x
181 help
182 This enables using the Freescale MPC5121 Programmable Serial
183 Controller in SPI master mode.
184
168config SPI_MPC8xxx 185config SPI_MPC8xxx
169 tristate "Freescale MPC8xxx SPI controller" 186 tristate "Freescale MPC8xxx SPI controller"
170 depends on FSL_SOC 187 depends on FSL_SOC
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index d7d0f89b797b..e9cbd18217a0 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o
21obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o 21obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o
22obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o 22obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o
23obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o 23obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o
24obj-$(CONFIG_SPI_EP93XX) += ep93xx_spi.o
24obj-$(CONFIG_SPI_GPIO) += spi_gpio.o 25obj-$(CONFIG_SPI_GPIO) += spi_gpio.o
25obj-$(CONFIG_SPI_IMX) += spi_imx.o 26obj-$(CONFIG_SPI_IMX) += spi_imx.o
26obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o 27obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
@@ -30,6 +31,7 @@ obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
30obj-$(CONFIG_SPI_OMAP_100K) += omap_spi_100k.o 31obj-$(CONFIG_SPI_OMAP_100K) += omap_spi_100k.o
31obj-$(CONFIG_SPI_ORION) += orion_spi.o 32obj-$(CONFIG_SPI_ORION) += orion_spi.o
32obj-$(CONFIG_SPI_PL022) += amba-pl022.o 33obj-$(CONFIG_SPI_PL022) += amba-pl022.o
34obj-$(CONFIG_SPI_MPC512x_PSC) += mpc512x_psc_spi.o
33obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o 35obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
34obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o 36obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o
35obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o 37obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index e9aeee16d922..f0a1418ce660 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -102,13 +102,21 @@
102/* 102/*
103 * SSP Control Register 0 - SSP_CR0 103 * SSP Control Register 0 - SSP_CR0
104 */ 104 */
105#define SSP_CR0_MASK_DSS (0x1FUL << 0) 105#define SSP_CR0_MASK_DSS (0x0FUL << 0)
106#define SSP_CR0_MASK_HALFDUP (0x1UL << 5) 106#define SSP_CR0_MASK_FRF (0x3UL << 4)
107#define SSP_CR0_MASK_SPO (0x1UL << 6) 107#define SSP_CR0_MASK_SPO (0x1UL << 6)
108#define SSP_CR0_MASK_SPH (0x1UL << 7) 108#define SSP_CR0_MASK_SPH (0x1UL << 7)
109#define SSP_CR0_MASK_SCR (0xFFUL << 8) 109#define SSP_CR0_MASK_SCR (0xFFUL << 8)
110#define SSP_CR0_MASK_CSS (0x1FUL << 16) 110
111#define SSP_CR0_MASK_FRF (0x3UL << 21) 111/*
112 * The ST version of this block moves som bits
113 * in SSP_CR0 and extends it to 32 bits
114 */
115#define SSP_CR0_MASK_DSS_ST (0x1FUL << 0)
116#define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5)
117#define SSP_CR0_MASK_CSS_ST (0x1FUL << 16)
118#define SSP_CR0_MASK_FRF_ST (0x3UL << 21)
119
112 120
113/* 121/*
114 * SSP Control Register 0 - SSP_CR1 122 * SSP Control Register 0 - SSP_CR1
@@ -117,16 +125,18 @@
117#define SSP_CR1_MASK_SSE (0x1UL << 1) 125#define SSP_CR1_MASK_SSE (0x1UL << 1)
118#define SSP_CR1_MASK_MS (0x1UL << 2) 126#define SSP_CR1_MASK_MS (0x1UL << 2)
119#define SSP_CR1_MASK_SOD (0x1UL << 3) 127#define SSP_CR1_MASK_SOD (0x1UL << 3)
120#define SSP_CR1_MASK_RENDN (0x1UL << 4)
121#define SSP_CR1_MASK_TENDN (0x1UL << 5)
122#define SSP_CR1_MASK_MWAIT (0x1UL << 6)
123#define SSP_CR1_MASK_RXIFLSEL (0x7UL << 7)
124#define SSP_CR1_MASK_TXIFLSEL (0x7UL << 10)
125 128
126/* 129/*
127 * SSP Data Register - SSP_DR 130 * The ST version of this block adds some bits
131 * in SSP_CR1
128 */ 132 */
129#define SSP_DR_MASK_DATA 0xFFFFFFFF 133#define SSP_CR1_MASK_RENDN_ST (0x1UL << 4)
134#define SSP_CR1_MASK_TENDN_ST (0x1UL << 5)
135#define SSP_CR1_MASK_MWAIT_ST (0x1UL << 6)
136#define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7)
137#define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10)
138/* This one is only in the PL023 variant */
139#define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13)
130 140
131/* 141/*
132 * SSP Status Register - SSP_SR 142 * SSP Status Register - SSP_SR
@@ -134,7 +144,7 @@
134#define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */ 144#define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */
135#define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */ 145#define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */
136#define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */ 146#define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */
137#define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */ 147#define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */
138#define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */ 148#define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */
139 149
140/* 150/*
@@ -227,7 +237,7 @@
227/* 237/*
228 * SSP Test Data Register - SSP_TDR 238 * SSP Test Data Register - SSP_TDR
229 */ 239 */
230#define TDR_MASK_TESTDATA (0xFFFFFFFF) 240#define TDR_MASK_TESTDATA (0xFFFFFFFF)
231 241
232/* 242/*
233 * Message State 243 * Message State
@@ -235,33 +245,33 @@
235 * hold a single state value, that's why all this 245 * hold a single state value, that's why all this
236 * (void *) casting is done here. 246 * (void *) casting is done here.
237 */ 247 */
238#define STATE_START ((void *) 0) 248#define STATE_START ((void *) 0)
239#define STATE_RUNNING ((void *) 1) 249#define STATE_RUNNING ((void *) 1)
240#define STATE_DONE ((void *) 2) 250#define STATE_DONE ((void *) 2)
241#define STATE_ERROR ((void *) -1) 251#define STATE_ERROR ((void *) -1)
242 252
243/* 253/*
244 * Queue State 254 * Queue State
245 */ 255 */
246#define QUEUE_RUNNING (0) 256#define QUEUE_RUNNING (0)
247#define QUEUE_STOPPED (1) 257#define QUEUE_STOPPED (1)
248/* 258/*
249 * SSP State - Whether Enabled or Disabled 259 * SSP State - Whether Enabled or Disabled
250 */ 260 */
251#define SSP_DISABLED (0) 261#define SSP_DISABLED (0)
252#define SSP_ENABLED (1) 262#define SSP_ENABLED (1)
253 263
254/* 264/*
255 * SSP DMA State - Whether DMA Enabled or Disabled 265 * SSP DMA State - Whether DMA Enabled or Disabled
256 */ 266 */
257#define SSP_DMA_DISABLED (0) 267#define SSP_DMA_DISABLED (0)
258#define SSP_DMA_ENABLED (1) 268#define SSP_DMA_ENABLED (1)
259 269
260/* 270/*
261 * SSP Clock Defaults 271 * SSP Clock Defaults
262 */ 272 */
263#define NMDK_SSP_DEFAULT_CLKRATE 0x2 273#define SSP_DEFAULT_CLKRATE 0x2
264#define NMDK_SSP_DEFAULT_PRESCALE 0x40 274#define SSP_DEFAULT_PRESCALE 0x40
265 275
266/* 276/*
267 * SSP Clock Parameter ranges 277 * SSP Clock Parameter ranges
@@ -307,16 +317,22 @@ enum ssp_writing {
307 * @fifodepth: depth of FIFOs (both) 317 * @fifodepth: depth of FIFOs (both)
308 * @max_bpw: maximum number of bits per word 318 * @max_bpw: maximum number of bits per word
309 * @unidir: supports unidirection transfers 319 * @unidir: supports unidirection transfers
320 * @extended_cr: 32 bit wide control register 0 with extra
321 * features and extra features in CR1 as found in the ST variants
322 * @pl023: supports a subset of the ST extensions called "PL023"
310 */ 323 */
311struct vendor_data { 324struct vendor_data {
312 int fifodepth; 325 int fifodepth;
313 int max_bpw; 326 int max_bpw;
314 bool unidir; 327 bool unidir;
328 bool extended_cr;
329 bool pl023;
315}; 330};
316 331
317/** 332/**
318 * struct pl022 - This is the private SSP driver data structure 333 * struct pl022 - This is the private SSP driver data structure
319 * @adev: AMBA device model hookup 334 * @adev: AMBA device model hookup
335 * @vendor: Vendor data for the IP block
320 * @phybase: The physical memory where the SSP device resides 336 * @phybase: The physical memory where the SSP device resides
321 * @virtbase: The virtual memory where the SSP is mapped 337 * @virtbase: The virtual memory where the SSP is mapped
322 * @master: SPI framework hookup 338 * @master: SPI framework hookup
@@ -369,7 +385,8 @@ struct pl022 {
369 385
370/** 386/**
371 * struct chip_data - To maintain runtime state of SSP for each client chip 387 * struct chip_data - To maintain runtime state of SSP for each client chip
372 * @cr0: Value of control register CR0 of SSP 388 * @cr0: Value of control register CR0 of SSP - on later ST variants this
389 * register is 32 bits wide rather than just 16
373 * @cr1: Value of control register CR1 of SSP 390 * @cr1: Value of control register CR1 of SSP
374 * @dmacr: Value of DMA control Register of SSP 391 * @dmacr: Value of DMA control Register of SSP
375 * @cpsr: Value of Clock prescale register 392 * @cpsr: Value of Clock prescale register
@@ -384,7 +401,7 @@ struct pl022 {
384 * This would be set according to the current message that would be served 401 * This would be set according to the current message that would be served
385 */ 402 */
386struct chip_data { 403struct chip_data {
387 u16 cr0; 404 u32 cr0;
388 u16 cr1; 405 u16 cr1;
389 u16 dmacr; 406 u16 dmacr;
390 u16 cpsr; 407 u16 cpsr;
@@ -517,7 +534,10 @@ static void restore_state(struct pl022 *pl022)
517{ 534{
518 struct chip_data *chip = pl022->cur_chip; 535 struct chip_data *chip = pl022->cur_chip;
519 536
520 writew(chip->cr0, SSP_CR0(pl022->virtbase)); 537 if (pl022->vendor->extended_cr)
538 writel(chip->cr0, SSP_CR0(pl022->virtbase));
539 else
540 writew(chip->cr0, SSP_CR0(pl022->virtbase));
521 writew(chip->cr1, SSP_CR1(pl022->virtbase)); 541 writew(chip->cr1, SSP_CR1(pl022->virtbase));
522 writew(chip->dmacr, SSP_DMACR(pl022->virtbase)); 542 writew(chip->dmacr, SSP_DMACR(pl022->virtbase));
523 writew(chip->cpsr, SSP_CPSR(pl022->virtbase)); 543 writew(chip->cpsr, SSP_CPSR(pl022->virtbase));
@@ -525,38 +545,70 @@ static void restore_state(struct pl022 *pl022)
525 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); 545 writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
526} 546}
527 547
528/**
529 * load_ssp_default_config - Load default configuration for SSP
530 * @pl022: SSP driver private data structure
531 */
532
533/* 548/*
534 * Default SSP Register Values 549 * Default SSP Register Values
535 */ 550 */
536#define DEFAULT_SSP_REG_CR0 ( \ 551#define DEFAULT_SSP_REG_CR0 ( \
537 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \ 552 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \
538 GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \ 553 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \
539 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ 554 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
540 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ 555 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
541 GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ 556 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
542 GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \ 557)
543 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \ 558
559/* ST versions have slightly different bit layout */
560#define DEFAULT_SSP_REG_CR0_ST ( \
561 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \
562 GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \
563 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
564 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
565 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
566 GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16) | \
567 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \
568)
569
570/* The PL023 version is slightly different again */
571#define DEFAULT_SSP_REG_CR0_ST_PL023 ( \
572 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \
573 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
574 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
575 GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
544) 576)
545 577
546#define DEFAULT_SSP_REG_CR1 ( \ 578#define DEFAULT_SSP_REG_CR1 ( \
547 GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \ 579 GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \
548 GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ 580 GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
549 GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ 581 GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
582 GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \
583)
584
585/* ST versions extend this register to use all 16 bits */
586#define DEFAULT_SSP_REG_CR1_ST ( \
587 DEFAULT_SSP_REG_CR1 | \
588 GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
589 GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
590 GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\
591 GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
592 GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \
593)
594
595/*
596 * The PL023 variant has further differences: no loopback mode, no microwire
597 * support, and a new clock feedback delay setting.
598 */
599#define DEFAULT_SSP_REG_CR1_ST_PL023 ( \
600 GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
601 GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
550 GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \ 602 GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \
551 GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN, 4) | \ 603 GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
552 GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN, 5) | \ 604 GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
553 GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT, 6) |\ 605 GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
554 GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL, 7) | \ 606 GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \
555 GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL, 10) \ 607 GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \
556) 608)
557 609
558#define DEFAULT_SSP_REG_CPSR ( \ 610#define DEFAULT_SSP_REG_CPSR ( \
559 GEN_MASK_BITS(NMDK_SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \ 611 GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \
560) 612)
561 613
562#define DEFAULT_SSP_REG_DMACR (\ 614#define DEFAULT_SSP_REG_DMACR (\
@@ -564,11 +616,22 @@ static void restore_state(struct pl022 *pl022)
564 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \ 616 GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \
565) 617)
566 618
567 619/**
620 * load_ssp_default_config - Load default configuration for SSP
621 * @pl022: SSP driver private data structure
622 */
568static void load_ssp_default_config(struct pl022 *pl022) 623static void load_ssp_default_config(struct pl022 *pl022)
569{ 624{
570 writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase)); 625 if (pl022->vendor->pl023) {
571 writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase)); 626 writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase));
627 writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase));
628 } else if (pl022->vendor->extended_cr) {
629 writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase));
630 writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase));
631 } else {
632 writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase));
633 writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase));
634 }
572 writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase)); 635 writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase));
573 writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase)); 636 writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase));
574 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); 637 writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
@@ -1008,7 +1071,7 @@ static void do_polling_transfer(void *data)
1008 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), 1071 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
1009 SSP_CR1(pl022->virtbase)); 1072 SSP_CR1(pl022->virtbase));
1010 1073
1011 dev_dbg(&pl022->adev->dev, "POLLING TRANSFER ONGOING ... \n"); 1074 dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n");
1012 /* FIXME: insert a timeout so we don't hang here indefinately */ 1075 /* FIXME: insert a timeout so we don't hang here indefinately */
1013 while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) 1076 while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end)
1014 readwriter(pl022); 1077 readwriter(pl022);
@@ -1148,7 +1211,6 @@ static int stop_queue(struct pl022 *pl022)
1148 * A wait_queue on the pl022->busy could be used, but then the common 1211 * A wait_queue on the pl022->busy could be used, but then the common
1149 * execution path (pump_messages) would be required to call wake_up or 1212 * execution path (pump_messages) would be required to call wake_up or
1150 * friends on every SPI message. Do this instead */ 1213 * friends on every SPI message. Do this instead */
1151 pl022->run = QUEUE_STOPPED;
1152 while (!list_empty(&pl022->queue) && pl022->busy && limit--) { 1214 while (!list_empty(&pl022->queue) && pl022->busy && limit--) {
1153 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1215 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1154 msleep(10); 1216 msleep(10);
@@ -1157,6 +1219,7 @@ static int stop_queue(struct pl022 *pl022)
1157 1219
1158 if (!list_empty(&pl022->queue) || pl022->busy) 1220 if (!list_empty(&pl022->queue) || pl022->busy)
1159 status = -EBUSY; 1221 status = -EBUSY;
1222 else pl022->run = QUEUE_STOPPED;
1160 1223
1161 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1224 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1162 1225
@@ -1280,11 +1343,21 @@ static int verify_controller_parameters(struct pl022 *pl022,
1280 "Wait State is configured incorrectly\n"); 1343 "Wait State is configured incorrectly\n");
1281 return -EINVAL; 1344 return -EINVAL;
1282 } 1345 }
1283 if ((chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) 1346 /* Half duplex is only available in the ST Micro version */
1284 && (chip_info->duplex != 1347 if (pl022->vendor->extended_cr) {
1285 SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) { 1348 if ((chip_info->duplex !=
1286 dev_err(chip_info->dev, 1349 SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
1287 "DUPLEX is configured incorrectly\n"); 1350 && (chip_info->duplex !=
1351 SSP_MICROWIRE_CHANNEL_HALF_DUPLEX))
1352 dev_err(chip_info->dev,
1353 "Microwire duplex mode is configured incorrectly\n");
1354 return -EINVAL;
1355 } else {
1356 if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
1357 dev_err(chip_info->dev,
1358 "Microwire half duplex mode requested,"
1359 " but this is only available in the"
1360 " ST version of PL022\n");
1288 return -EINVAL; 1361 return -EINVAL;
1289 } 1362 }
1290 } 1363 }
@@ -1581,22 +1654,49 @@ static int pl022_setup(struct spi_device *spi)
1581 1654
1582 chip->cpsr = chip_info->clk_freq.cpsdvsr; 1655 chip->cpsr = chip_info->clk_freq.cpsdvsr;
1583 1656
1584 SSP_WRITE_BITS(chip->cr0, chip_info->data_size, SSP_CR0_MASK_DSS, 0); 1657 /* Special setup for the ST micro extended control registers */
1585 SSP_WRITE_BITS(chip->cr0, chip_info->duplex, SSP_CR0_MASK_HALFDUP, 5); 1658 if (pl022->vendor->extended_cr) {
1659 if (pl022->vendor->pl023) {
1660 /* These bits are only in the PL023 */
1661 SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay,
1662 SSP_CR1_MASK_FBCLKDEL_ST, 13);
1663 } else {
1664 /* These bits are in the PL022 but not PL023 */
1665 SSP_WRITE_BITS(chip->cr0, chip_info->duplex,
1666 SSP_CR0_MASK_HALFDUP_ST, 5);
1667 SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len,
1668 SSP_CR0_MASK_CSS_ST, 16);
1669 SSP_WRITE_BITS(chip->cr0, chip_info->iface,
1670 SSP_CR0_MASK_FRF_ST, 21);
1671 SSP_WRITE_BITS(chip->cr1, chip_info->wait_state,
1672 SSP_CR1_MASK_MWAIT_ST, 6);
1673 }
1674 SSP_WRITE_BITS(chip->cr0, chip_info->data_size,
1675 SSP_CR0_MASK_DSS_ST, 0);
1676 SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx,
1677 SSP_CR1_MASK_RENDN_ST, 4);
1678 SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx,
1679 SSP_CR1_MASK_TENDN_ST, 5);
1680 SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig,
1681 SSP_CR1_MASK_RXIFLSEL_ST, 7);
1682 SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig,
1683 SSP_CR1_MASK_TXIFLSEL_ST, 10);
1684 } else {
1685 SSP_WRITE_BITS(chip->cr0, chip_info->data_size,
1686 SSP_CR0_MASK_DSS, 0);
1687 SSP_WRITE_BITS(chip->cr0, chip_info->iface,
1688 SSP_CR0_MASK_FRF, 4);
1689 }
1690 /* Stuff that is common for all versions */
1586 SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6); 1691 SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6);
1587 SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7); 1692 SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7);
1588 SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8); 1693 SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8);
1589 SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, SSP_CR0_MASK_CSS, 16); 1694 /* Loopback is available on all versions except PL023 */
1590 SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF, 21); 1695 if (!pl022->vendor->pl023)
1591 SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0); 1696 SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0);
1592 SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1); 1697 SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
1593 SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2); 1698 SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
1594 SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3); 1699 SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3);
1595 SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx, SSP_CR1_MASK_RENDN, 4);
1596 SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx, SSP_CR1_MASK_TENDN, 5);
1597 SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, SSP_CR1_MASK_MWAIT, 6);
1598 SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, SSP_CR1_MASK_RXIFLSEL, 7);
1599 SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, SSP_CR1_MASK_TXIFLSEL, 10);
1600 1700
1601 /* Save controller_state */ 1701 /* Save controller_state */
1602 spi_set_ctldata(spi, chip); 1702 spi_set_ctldata(spi, chip);
@@ -1809,6 +1909,8 @@ static struct vendor_data vendor_arm = {
1809 .fifodepth = 8, 1909 .fifodepth = 8,
1810 .max_bpw = 16, 1910 .max_bpw = 16,
1811 .unidir = false, 1911 .unidir = false,
1912 .extended_cr = false,
1913 .pl023 = false,
1812}; 1914};
1813 1915
1814 1916
@@ -1816,6 +1918,16 @@ static struct vendor_data vendor_st = {
1816 .fifodepth = 32, 1918 .fifodepth = 32,
1817 .max_bpw = 32, 1919 .max_bpw = 32,
1818 .unidir = false, 1920 .unidir = false,
1921 .extended_cr = true,
1922 .pl023 = false,
1923};
1924
1925static struct vendor_data vendor_st_pl023 = {
1926 .fifodepth = 32,
1927 .max_bpw = 32,
1928 .unidir = false,
1929 .extended_cr = true,
1930 .pl023 = true,
1819}; 1931};
1820 1932
1821static struct amba_id pl022_ids[] = { 1933static struct amba_id pl022_ids[] = {
@@ -1837,6 +1949,18 @@ static struct amba_id pl022_ids[] = {
1837 .mask = 0xffffffff, 1949 .mask = 0xffffffff,
1838 .data = &vendor_st, 1950 .data = &vendor_st,
1839 }, 1951 },
1952 {
1953 /*
1954 * ST-Ericsson derivative "PL023" (this is not
1955 * an official ARM number), this is a PL022 SSP block
1956 * stripped to SPI mode only, it has 32bit wide
1957 * and 32 locations deep TX/RX FIFO but no extended
1958 * CR0/CR1 register
1959 */
1960 .id = 0x00080023,
1961 .mask = 0xffffffff,
1962 .data = &vendor_st_pl023,
1963 },
1840 { 0, 0 }, 1964 { 0, 0 },
1841}; 1965};
1842 1966
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c
index 95afb6b77395..b85090caf7cf 100644
--- a/drivers/spi/davinci_spi.c
+++ b/drivers/spi/davinci_spi.c
@@ -301,7 +301,7 @@ static int davinci_spi_setup_transfer(struct spi_device *spi,
301 struct davinci_spi *davinci_spi; 301 struct davinci_spi *davinci_spi;
302 struct davinci_spi_platform_data *pdata; 302 struct davinci_spi_platform_data *pdata;
303 u8 bits_per_word = 0; 303 u8 bits_per_word = 0;
304 u32 hz = 0, prescale; 304 u32 hz = 0, prescale = 0, clkspeed;
305 305
306 davinci_spi = spi_master_get_devdata(spi->master); 306 davinci_spi = spi_master_get_devdata(spi->master);
307 pdata = davinci_spi->pdata; 307 pdata = davinci_spi->pdata;
@@ -338,10 +338,16 @@ static int davinci_spi_setup_transfer(struct spi_device *spi,
338 set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f, 338 set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f,
339 spi->chip_select); 339 spi->chip_select);
340 340
341 prescale = ((clk_get_rate(davinci_spi->clk) / hz) - 1) & 0xff; 341 clkspeed = clk_get_rate(davinci_spi->clk);
342 if (hz > clkspeed / 2)
343 prescale = 1 << 8;
344 if (hz < clkspeed / 256)
345 prescale = 255 << 8;
346 if (!prescale)
347 prescale = ((clkspeed / hz - 1) << 8) & 0x0000ff00;
342 348
343 clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select); 349 clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select);
344 set_fmt_bits(davinci_spi->base, prescale << 8, spi->chip_select); 350 set_fmt_bits(davinci_spi->base, prescale, spi->chip_select);
345 351
346 return 0; 352 return 0;
347} 353}
diff --git a/drivers/spi/ep93xx_spi.c b/drivers/spi/ep93xx_spi.c
new file mode 100644
index 000000000000..0ba35df9a6df
--- /dev/null
+++ b/drivers/spi/ep93xx_spi.c
@@ -0,0 +1,938 @@
1/*
2 * Driver for Cirrus Logic EP93xx SPI controller.
3 *
4 * Copyright (c) 2010 Mika Westerberg
5 *
6 * Explicit FIFO handling code was inspired by amba-pl022 driver.
7 *
8 * Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
9 *
10 * For more information about the SPI controller see documentation on Cirrus
11 * Logic web site:
12 * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18
19#include <linux/io.h>
20#include <linux/clk.h>
21#include <linux/err.h>
22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/bitops.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/workqueue.h>
28#include <linux/sched.h>
29#include <linux/spi/spi.h>
30
31#include <mach/ep93xx_spi.h>
32
33#define SSPCR0 0x0000
34#define SSPCR0_MODE_SHIFT 6
35#define SSPCR0_SCR_SHIFT 8
36
37#define SSPCR1 0x0004
38#define SSPCR1_RIE BIT(0)
39#define SSPCR1_TIE BIT(1)
40#define SSPCR1_RORIE BIT(2)
41#define SSPCR1_LBM BIT(3)
42#define SSPCR1_SSE BIT(4)
43#define SSPCR1_MS BIT(5)
44#define SSPCR1_SOD BIT(6)
45
46#define SSPDR 0x0008
47
48#define SSPSR 0x000c
49#define SSPSR_TFE BIT(0)
50#define SSPSR_TNF BIT(1)
51#define SSPSR_RNE BIT(2)
52#define SSPSR_RFF BIT(3)
53#define SSPSR_BSY BIT(4)
54#define SSPCPSR 0x0010
55
56#define SSPIIR 0x0014
57#define SSPIIR_RIS BIT(0)
58#define SSPIIR_TIS BIT(1)
59#define SSPIIR_RORIS BIT(2)
60#define SSPICR SSPIIR
61
62/* timeout in milliseconds */
63#define SPI_TIMEOUT 5
64/* maximum depth of RX/TX FIFO */
65#define SPI_FIFO_SIZE 8
66
67/**
68 * struct ep93xx_spi - EP93xx SPI controller structure
69 * @lock: spinlock that protects concurrent accesses to fields @running,
70 * @current_msg and @msg_queue
71 * @pdev: pointer to platform device
72 * @clk: clock for the controller
73 * @regs_base: pointer to ioremap()'d registers
74 * @irq: IRQ number used by the driver
75 * @min_rate: minimum clock rate (in Hz) supported by the controller
76 * @max_rate: maximum clock rate (in Hz) supported by the controller
77 * @running: is the queue running
78 * @wq: workqueue used by the driver
79 * @msg_work: work that is queued for the driver
80 * @wait: wait here until given transfer is completed
81 * @msg_queue: queue for the messages
82 * @current_msg: message that is currently processed (or %NULL if none)
83 * @tx: current byte in transfer to transmit
84 * @rx: current byte in transfer to receive
85 * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
86 * frame decreases this level and sending one frame increases it.
87 *
88 * This structure holds EP93xx SPI controller specific information. When
89 * @running is %true, driver accepts transfer requests from protocol drivers.
90 * @current_msg is used to hold pointer to the message that is currently
91 * processed. If @current_msg is %NULL, it means that no processing is going
92 * on.
93 *
94 * Most of the fields are only written once and they can be accessed without
95 * taking the @lock. Fields that are accessed concurrently are: @current_msg,
96 * @running, and @msg_queue.
97 */
98struct ep93xx_spi {
99 spinlock_t lock;
100 const struct platform_device *pdev;
101 struct clk *clk;
102 void __iomem *regs_base;
103 int irq;
104 unsigned long min_rate;
105 unsigned long max_rate;
106 bool running;
107 struct workqueue_struct *wq;
108 struct work_struct msg_work;
109 struct completion wait;
110 struct list_head msg_queue;
111 struct spi_message *current_msg;
112 size_t tx;
113 size_t rx;
114 size_t fifo_level;
115};
116
117/**
118 * struct ep93xx_spi_chip - SPI device hardware settings
119 * @spi: back pointer to the SPI device
120 * @rate: max rate in hz this chip supports
121 * @div_cpsr: cpsr (pre-scaler) divider
122 * @div_scr: scr divider
123 * @dss: bits per word (4 - 16 bits)
124 * @ops: private chip operations
125 *
126 * This structure is used to store hardware register specific settings for each
127 * SPI device. Settings are written to hardware by function
128 * ep93xx_spi_chip_setup().
129 */
130struct ep93xx_spi_chip {
131 const struct spi_device *spi;
132 unsigned long rate;
133 u8 div_cpsr;
134 u8 div_scr;
135 u8 dss;
136 struct ep93xx_spi_chip_ops *ops;
137};
138
139/* converts bits per word to CR0.DSS value */
140#define bits_per_word_to_dss(bpw) ((bpw) - 1)
141
142static inline void
143ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value)
144{
145 __raw_writeb(value, espi->regs_base + reg);
146}
147
148static inline u8
149ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
150{
151 return __raw_readb(spi->regs_base + reg);
152}
153
154static inline void
155ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value)
156{
157 __raw_writew(value, espi->regs_base + reg);
158}
159
160static inline u16
161ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
162{
163 return __raw_readw(spi->regs_base + reg);
164}
165
166static int ep93xx_spi_enable(const struct ep93xx_spi *espi)
167{
168 u8 regval;
169 int err;
170
171 err = clk_enable(espi->clk);
172 if (err)
173 return err;
174
175 regval = ep93xx_spi_read_u8(espi, SSPCR1);
176 regval |= SSPCR1_SSE;
177 ep93xx_spi_write_u8(espi, SSPCR1, regval);
178
179 return 0;
180}
181
182static void ep93xx_spi_disable(const struct ep93xx_spi *espi)
183{
184 u8 regval;
185
186 regval = ep93xx_spi_read_u8(espi, SSPCR1);
187 regval &= ~SSPCR1_SSE;
188 ep93xx_spi_write_u8(espi, SSPCR1, regval);
189
190 clk_disable(espi->clk);
191}
192
193static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi)
194{
195 u8 regval;
196
197 regval = ep93xx_spi_read_u8(espi, SSPCR1);
198 regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
199 ep93xx_spi_write_u8(espi, SSPCR1, regval);
200}
201
202static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
203{
204 u8 regval;
205
206 regval = ep93xx_spi_read_u8(espi, SSPCR1);
207 regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
208 ep93xx_spi_write_u8(espi, SSPCR1, regval);
209}
210
211/**
212 * ep93xx_spi_calc_divisors() - calculates SPI clock divisors
213 * @espi: ep93xx SPI controller struct
214 * @chip: divisors are calculated for this chip
215 * @rate: desired SPI output clock rate
216 *
217 * Function calculates cpsr (clock pre-scaler) and scr divisors based on
218 * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If,
219 * for some reason, divisors cannot be calculated nothing is stored and
220 * %-EINVAL is returned.
221 */
222static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
223 struct ep93xx_spi_chip *chip,
224 unsigned long rate)
225{
226 unsigned long spi_clk_rate = clk_get_rate(espi->clk);
227 int cpsr, scr;
228
229 /*
230 * Make sure that max value is between values supported by the
231 * controller. Note that minimum value is already checked in
232 * ep93xx_spi_transfer().
233 */
234 rate = clamp(rate, espi->min_rate, espi->max_rate);
235
236 /*
237 * Calculate divisors so that we can get speed according the
238 * following formula:
239 * rate = spi_clock_rate / (cpsr * (1 + scr))
240 *
241 * cpsr must be even number and starts from 2, scr can be any number
242 * between 0 and 255.
243 */
244 for (cpsr = 2; cpsr <= 254; cpsr += 2) {
245 for (scr = 0; scr <= 255; scr++) {
246 if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
247 chip->div_scr = (u8)scr;
248 chip->div_cpsr = (u8)cpsr;
249 return 0;
250 }
251 }
252 }
253
254 return -EINVAL;
255}
256
257static void ep93xx_spi_cs_control(struct spi_device *spi, bool control)
258{
259 struct ep93xx_spi_chip *chip = spi_get_ctldata(spi);
260 int value = (spi->mode & SPI_CS_HIGH) ? control : !control;
261
262 if (chip->ops && chip->ops->cs_control)
263 chip->ops->cs_control(spi, value);
264}
265
266/**
267 * ep93xx_spi_setup() - setup an SPI device
268 * @spi: SPI device to setup
269 *
270 * This function sets up SPI device mode, speed etc. Can be called multiple
271 * times for a single device. Returns %0 in case of success, negative error in
272 * case of failure. When this function returns success, the device is
273 * deselected.
274 */
275static int ep93xx_spi_setup(struct spi_device *spi)
276{
277 struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
278 struct ep93xx_spi_chip *chip;
279
280 if (spi->bits_per_word < 4 || spi->bits_per_word > 16) {
281 dev_err(&espi->pdev->dev, "invalid bits per word %d\n",
282 spi->bits_per_word);
283 return -EINVAL;
284 }
285
286 chip = spi_get_ctldata(spi);
287 if (!chip) {
288 dev_dbg(&espi->pdev->dev, "initial setup for %s\n",
289 spi->modalias);
290
291 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
292 if (!chip)
293 return -ENOMEM;
294
295 chip->spi = spi;
296 chip->ops = spi->controller_data;
297
298 if (chip->ops && chip->ops->setup) {
299 int ret = chip->ops->setup(spi);
300 if (ret) {
301 kfree(chip);
302 return ret;
303 }
304 }
305
306 spi_set_ctldata(spi, chip);
307 }
308
309 if (spi->max_speed_hz != chip->rate) {
310 int err;
311
312 err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz);
313 if (err != 0) {
314 spi_set_ctldata(spi, NULL);
315 kfree(chip);
316 return err;
317 }
318 chip->rate = spi->max_speed_hz;
319 }
320
321 chip->dss = bits_per_word_to_dss(spi->bits_per_word);
322
323 ep93xx_spi_cs_control(spi, false);
324 return 0;
325}
326
327/**
328 * ep93xx_spi_transfer() - queue message to be transferred
329 * @spi: target SPI device
330 * @msg: message to be transferred
331 *
332 * This function is called by SPI device drivers when they are going to transfer
333 * a new message. It simply puts the message in the queue and schedules
334 * workqueue to perform the actual transfer later on.
335 *
336 * Returns %0 on success and negative error in case of failure.
337 */
338static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg)
339{
340 struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
341 struct spi_transfer *t;
342 unsigned long flags;
343
344 if (!msg || !msg->complete)
345 return -EINVAL;
346
347 /* first validate each transfer */
348 list_for_each_entry(t, &msg->transfers, transfer_list) {
349 if (t->bits_per_word) {
350 if (t->bits_per_word < 4 || t->bits_per_word > 16)
351 return -EINVAL;
352 }
353 if (t->speed_hz && t->speed_hz < espi->min_rate)
354 return -EINVAL;
355 }
356
357 /*
358 * Now that we own the message, let's initialize it so that it is
359 * suitable for us. We use @msg->status to signal whether there was
360 * error in transfer and @msg->state is used to hold pointer to the
361 * current transfer (or %NULL if no active current transfer).
362 */
363 msg->state = NULL;
364 msg->status = 0;
365 msg->actual_length = 0;
366
367 spin_lock_irqsave(&espi->lock, flags);
368 if (!espi->running) {
369 spin_unlock_irqrestore(&espi->lock, flags);
370 return -ESHUTDOWN;
371 }
372 list_add_tail(&msg->queue, &espi->msg_queue);
373 queue_work(espi->wq, &espi->msg_work);
374 spin_unlock_irqrestore(&espi->lock, flags);
375
376 return 0;
377}
378
379/**
380 * ep93xx_spi_cleanup() - cleans up master controller specific state
381 * @spi: SPI device to cleanup
382 *
383 * This function releases master controller specific state for given @spi
384 * device.
385 */
386static void ep93xx_spi_cleanup(struct spi_device *spi)
387{
388 struct ep93xx_spi_chip *chip;
389
390 chip = spi_get_ctldata(spi);
391 if (chip) {
392 if (chip->ops && chip->ops->cleanup)
393 chip->ops->cleanup(spi);
394 spi_set_ctldata(spi, NULL);
395 kfree(chip);
396 }
397}
398
399/**
400 * ep93xx_spi_chip_setup() - configures hardware according to given @chip
401 * @espi: ep93xx SPI controller struct
402 * @chip: chip specific settings
403 *
404 * This function sets up the actual hardware registers with settings given in
405 * @chip. Note that no validation is done so make sure that callers validate
406 * settings before calling this.
407 */
408static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
409 const struct ep93xx_spi_chip *chip)
410{
411 u16 cr0;
412
413 cr0 = chip->div_scr << SSPCR0_SCR_SHIFT;
414 cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
415 cr0 |= chip->dss;
416
417 dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
418 chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss);
419 dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0);
420
421 ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr);
422 ep93xx_spi_write_u16(espi, SSPCR0, cr0);
423}
424
425static inline int bits_per_word(const struct ep93xx_spi *espi)
426{
427 struct spi_message *msg = espi->current_msg;
428 struct spi_transfer *t = msg->state;
429
430 return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word;
431}
432
433static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
434{
435 if (bits_per_word(espi) > 8) {
436 u16 tx_val = 0;
437
438 if (t->tx_buf)
439 tx_val = ((u16 *)t->tx_buf)[espi->tx];
440 ep93xx_spi_write_u16(espi, SSPDR, tx_val);
441 espi->tx += sizeof(tx_val);
442 } else {
443 u8 tx_val = 0;
444
445 if (t->tx_buf)
446 tx_val = ((u8 *)t->tx_buf)[espi->tx];
447 ep93xx_spi_write_u8(espi, SSPDR, tx_val);
448 espi->tx += sizeof(tx_val);
449 }
450}
451
452static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
453{
454 if (bits_per_word(espi) > 8) {
455 u16 rx_val;
456
457 rx_val = ep93xx_spi_read_u16(espi, SSPDR);
458 if (t->rx_buf)
459 ((u16 *)t->rx_buf)[espi->rx] = rx_val;
460 espi->rx += sizeof(rx_val);
461 } else {
462 u8 rx_val;
463
464 rx_val = ep93xx_spi_read_u8(espi, SSPDR);
465 if (t->rx_buf)
466 ((u8 *)t->rx_buf)[espi->rx] = rx_val;
467 espi->rx += sizeof(rx_val);
468 }
469}
470
471/**
472 * ep93xx_spi_read_write() - perform next RX/TX transfer
473 * @espi: ep93xx SPI controller struct
474 *
475 * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
476 * called several times, the whole transfer will be completed. Returns
477 * %-EINPROGRESS when current transfer was not yet completed otherwise %0.
478 *
479 * When this function is finished, RX FIFO should be empty and TX FIFO should be
480 * full.
481 */
482static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
483{
484 struct spi_message *msg = espi->current_msg;
485 struct spi_transfer *t = msg->state;
486
487 /* read as long as RX FIFO has frames in it */
488 while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) {
489 ep93xx_do_read(espi, t);
490 espi->fifo_level--;
491 }
492
493 /* write as long as TX FIFO has room */
494 while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) {
495 ep93xx_do_write(espi, t);
496 espi->fifo_level++;
497 }
498
499 if (espi->rx == t->len) {
500 msg->actual_length += t->len;
501 return 0;
502 }
503
504 return -EINPROGRESS;
505}
506
507/**
508 * ep93xx_spi_process_transfer() - processes one SPI transfer
509 * @espi: ep93xx SPI controller struct
510 * @msg: current message
511 * @t: transfer to process
512 *
513 * This function processes one SPI transfer given in @t. Function waits until
514 * transfer is complete (may sleep) and updates @msg->status based on whether
515 * transfer was succesfully processed or not.
516 */
517static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
518 struct spi_message *msg,
519 struct spi_transfer *t)
520{
521 struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
522
523 msg->state = t;
524
525 /*
526 * Handle any transfer specific settings if needed. We use
527 * temporary chip settings here and restore original later when
528 * the transfer is finished.
529 */
530 if (t->speed_hz || t->bits_per_word) {
531 struct ep93xx_spi_chip tmp_chip = *chip;
532
533 if (t->speed_hz) {
534 int err;
535
536 err = ep93xx_spi_calc_divisors(espi, &tmp_chip,
537 t->speed_hz);
538 if (err) {
539 dev_err(&espi->pdev->dev,
540 "failed to adjust speed\n");
541 msg->status = err;
542 return;
543 }
544 }
545
546 if (t->bits_per_word)
547 tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word);
548
549 /*
550 * Set up temporary new hw settings for this transfer.
551 */
552 ep93xx_spi_chip_setup(espi, &tmp_chip);
553 }
554
555 espi->rx = 0;
556 espi->tx = 0;
557
558 /*
559 * Now everything is set up for the current transfer. We prime the TX
560 * FIFO, enable interrupts, and wait for the transfer to complete.
561 */
562 if (ep93xx_spi_read_write(espi)) {
563 ep93xx_spi_enable_interrupts(espi);
564 wait_for_completion(&espi->wait);
565 }
566
567 /*
568 * In case of error during transmit, we bail out from processing
569 * the message.
570 */
571 if (msg->status)
572 return;
573
574 /*
575 * After this transfer is finished, perform any possible
576 * post-transfer actions requested by the protocol driver.
577 */
578 if (t->delay_usecs) {
579 set_current_state(TASK_UNINTERRUPTIBLE);
580 schedule_timeout(usecs_to_jiffies(t->delay_usecs));
581 }
582 if (t->cs_change) {
583 if (!list_is_last(&t->transfer_list, &msg->transfers)) {
584 /*
585 * In case protocol driver is asking us to drop the
586 * chipselect briefly, we let the scheduler to handle
587 * any "delay" here.
588 */
589 ep93xx_spi_cs_control(msg->spi, false);
590 cond_resched();
591 ep93xx_spi_cs_control(msg->spi, true);
592 }
593 }
594
595 if (t->speed_hz || t->bits_per_word)
596 ep93xx_spi_chip_setup(espi, chip);
597}
598
599/*
600 * ep93xx_spi_process_message() - process one SPI message
601 * @espi: ep93xx SPI controller struct
602 * @msg: message to process
603 *
604 * This function processes a single SPI message. We go through all transfers in
605 * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is
606 * asserted during the whole message (unless per transfer cs_change is set).
607 *
608 * @msg->status contains %0 in case of success or negative error code in case of
609 * failure.
610 */
611static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
612 struct spi_message *msg)
613{
614 unsigned long timeout;
615 struct spi_transfer *t;
616 int err;
617
618 /*
619 * Enable the SPI controller and its clock.
620 */
621 err = ep93xx_spi_enable(espi);
622 if (err) {
623 dev_err(&espi->pdev->dev, "failed to enable SPI controller\n");
624 msg->status = err;
625 return;
626 }
627
628 /*
629 * Just to be sure: flush any data from RX FIFO.
630 */
631 timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
632 while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) {
633 if (time_after(jiffies, timeout)) {
634 dev_warn(&espi->pdev->dev,
635 "timeout while flushing RX FIFO\n");
636 msg->status = -ETIMEDOUT;
637 return;
638 }
639 ep93xx_spi_read_u16(espi, SSPDR);
640 }
641
642 /*
643 * We explicitly handle FIFO level. This way we don't have to check TX
644 * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
645 */
646 espi->fifo_level = 0;
647
648 /*
649 * Update SPI controller registers according to spi device and assert
650 * the chipselect.
651 */
652 ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi));
653 ep93xx_spi_cs_control(msg->spi, true);
654
655 list_for_each_entry(t, &msg->transfers, transfer_list) {
656 ep93xx_spi_process_transfer(espi, msg, t);
657 if (msg->status)
658 break;
659 }
660
661 /*
662 * Now the whole message is transferred (or failed for some reason). We
663 * deselect the device and disable the SPI controller.
664 */
665 ep93xx_spi_cs_control(msg->spi, false);
666 ep93xx_spi_disable(espi);
667}
668
669#define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work))
670
671/**
672 * ep93xx_spi_work() - EP93xx SPI workqueue worker function
673 * @work: work struct
674 *
675 * Workqueue worker function. This function is called when there are new
676 * SPI messages to be processed. Message is taken out from the queue and then
677 * passed to ep93xx_spi_process_message().
678 *
679 * After message is transferred, protocol driver is notified by calling
680 * @msg->complete(). In case of error, @msg->status is set to negative error
681 * number, otherwise it contains zero (and @msg->actual_length is updated).
682 */
683static void ep93xx_spi_work(struct work_struct *work)
684{
685 struct ep93xx_spi *espi = work_to_espi(work);
686 struct spi_message *msg;
687
688 spin_lock_irq(&espi->lock);
689 if (!espi->running || espi->current_msg ||
690 list_empty(&espi->msg_queue)) {
691 spin_unlock_irq(&espi->lock);
692 return;
693 }
694 msg = list_first_entry(&espi->msg_queue, struct spi_message, queue);
695 list_del_init(&msg->queue);
696 espi->current_msg = msg;
697 spin_unlock_irq(&espi->lock);
698
699 ep93xx_spi_process_message(espi, msg);
700
701 /*
702 * Update the current message and re-schedule ourselves if there are
703 * more messages in the queue.
704 */
705 spin_lock_irq(&espi->lock);
706 espi->current_msg = NULL;
707 if (espi->running && !list_empty(&espi->msg_queue))
708 queue_work(espi->wq, &espi->msg_work);
709 spin_unlock_irq(&espi->lock);
710
711 /* notify the protocol driver that we are done with this message */
712 msg->complete(msg->context);
713}
714
715static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
716{
717 struct ep93xx_spi *espi = dev_id;
718 u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR);
719
720 /*
721 * If we got ROR (receive overrun) interrupt we know that something is
722 * wrong. Just abort the message.
723 */
724 if (unlikely(irq_status & SSPIIR_RORIS)) {
725 /* clear the overrun interrupt */
726 ep93xx_spi_write_u8(espi, SSPICR, 0);
727 dev_warn(&espi->pdev->dev,
728 "receive overrun, aborting the message\n");
729 espi->current_msg->status = -EIO;
730 } else {
731 /*
732 * Interrupt is either RX (RIS) or TX (TIS). For both cases we
733 * simply execute next data transfer.
734 */
735 if (ep93xx_spi_read_write(espi)) {
736 /*
737 * In normal case, there still is some processing left
738 * for current transfer. Let's wait for the next
739 * interrupt then.
740 */
741 return IRQ_HANDLED;
742 }
743 }
744
745 /*
746 * Current transfer is finished, either with error or with success. In
747 * any case we disable interrupts and notify the worker to handle
748 * any post-processing of the message.
749 */
750 ep93xx_spi_disable_interrupts(espi);
751 complete(&espi->wait);
752 return IRQ_HANDLED;
753}
754
755static int __init ep93xx_spi_probe(struct platform_device *pdev)
756{
757 struct spi_master *master;
758 struct ep93xx_spi_info *info;
759 struct ep93xx_spi *espi;
760 struct resource *res;
761 int error;
762
763 info = pdev->dev.platform_data;
764
765 master = spi_alloc_master(&pdev->dev, sizeof(*espi));
766 if (!master) {
767 dev_err(&pdev->dev, "failed to allocate spi master\n");
768 return -ENOMEM;
769 }
770
771 master->setup = ep93xx_spi_setup;
772 master->transfer = ep93xx_spi_transfer;
773 master->cleanup = ep93xx_spi_cleanup;
774 master->bus_num = pdev->id;
775 master->num_chipselect = info->num_chipselect;
776 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
777
778 platform_set_drvdata(pdev, master);
779
780 espi = spi_master_get_devdata(master);
781
782 espi->clk = clk_get(&pdev->dev, NULL);
783 if (IS_ERR(espi->clk)) {
784 dev_err(&pdev->dev, "unable to get spi clock\n");
785 error = PTR_ERR(espi->clk);
786 goto fail_release_master;
787 }
788
789 spin_lock_init(&espi->lock);
790 init_completion(&espi->wait);
791
792 /*
793 * Calculate maximum and minimum supported clock rates
794 * for the controller.
795 */
796 espi->max_rate = clk_get_rate(espi->clk) / 2;
797 espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
798 espi->pdev = pdev;
799
800 espi->irq = platform_get_irq(pdev, 0);
801 if (espi->irq < 0) {
802 error = -EBUSY;
803 dev_err(&pdev->dev, "failed to get irq resources\n");
804 goto fail_put_clock;
805 }
806
807 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
808 if (!res) {
809 dev_err(&pdev->dev, "unable to get iomem resource\n");
810 error = -ENODEV;
811 goto fail_put_clock;
812 }
813
814 res = request_mem_region(res->start, resource_size(res), pdev->name);
815 if (!res) {
816 dev_err(&pdev->dev, "unable to request iomem resources\n");
817 error = -EBUSY;
818 goto fail_put_clock;
819 }
820
821 espi->regs_base = ioremap(res->start, resource_size(res));
822 if (!espi->regs_base) {
823 dev_err(&pdev->dev, "failed to map resources\n");
824 error = -ENODEV;
825 goto fail_free_mem;
826 }
827
828 error = request_irq(espi->irq, ep93xx_spi_interrupt, 0,
829 "ep93xx-spi", espi);
830 if (error) {
831 dev_err(&pdev->dev, "failed to request irq\n");
832 goto fail_unmap_regs;
833 }
834
835 espi->wq = create_singlethread_workqueue("ep93xx_spid");
836 if (!espi->wq) {
837 dev_err(&pdev->dev, "unable to create workqueue\n");
838 goto fail_free_irq;
839 }
840 INIT_WORK(&espi->msg_work, ep93xx_spi_work);
841 INIT_LIST_HEAD(&espi->msg_queue);
842 espi->running = true;
843
844 /* make sure that the hardware is disabled */
845 ep93xx_spi_write_u8(espi, SSPCR1, 0);
846
847 error = spi_register_master(master);
848 if (error) {
849 dev_err(&pdev->dev, "failed to register SPI master\n");
850 goto fail_free_queue;
851 }
852
853 dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
854 (unsigned long)res->start, espi->irq);
855
856 return 0;
857
858fail_free_queue:
859 destroy_workqueue(espi->wq);
860fail_free_irq:
861 free_irq(espi->irq, espi);
862fail_unmap_regs:
863 iounmap(espi->regs_base);
864fail_free_mem:
865 release_mem_region(res->start, resource_size(res));
866fail_put_clock:
867 clk_put(espi->clk);
868fail_release_master:
869 spi_master_put(master);
870 platform_set_drvdata(pdev, NULL);
871
872 return error;
873}
874
875static int __exit ep93xx_spi_remove(struct platform_device *pdev)
876{
877 struct spi_master *master = platform_get_drvdata(pdev);
878 struct ep93xx_spi *espi = spi_master_get_devdata(master);
879 struct resource *res;
880
881 spin_lock_irq(&espi->lock);
882 espi->running = false;
883 spin_unlock_irq(&espi->lock);
884
885 destroy_workqueue(espi->wq);
886
887 /*
888 * Complete remaining messages with %-ESHUTDOWN status.
889 */
890 spin_lock_irq(&espi->lock);
891 while (!list_empty(&espi->msg_queue)) {
892 struct spi_message *msg;
893
894 msg = list_first_entry(&espi->msg_queue,
895 struct spi_message, queue);
896 list_del_init(&msg->queue);
897 msg->status = -ESHUTDOWN;
898 spin_unlock_irq(&espi->lock);
899 msg->complete(msg->context);
900 spin_lock_irq(&espi->lock);
901 }
902 spin_unlock_irq(&espi->lock);
903
904 free_irq(espi->irq, espi);
905 iounmap(espi->regs_base);
906 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
907 release_mem_region(res->start, resource_size(res));
908 clk_put(espi->clk);
909 platform_set_drvdata(pdev, NULL);
910
911 spi_unregister_master(master);
912 return 0;
913}
914
915static struct platform_driver ep93xx_spi_driver = {
916 .driver = {
917 .name = "ep93xx-spi",
918 .owner = THIS_MODULE,
919 },
920 .remove = __exit_p(ep93xx_spi_remove),
921};
922
923static int __init ep93xx_spi_init(void)
924{
925 return platform_driver_probe(&ep93xx_spi_driver, ep93xx_spi_probe);
926}
927module_init(ep93xx_spi_init);
928
929static void __exit ep93xx_spi_exit(void)
930{
931 platform_driver_unregister(&ep93xx_spi_driver);
932}
933module_exit(ep93xx_spi_exit);
934
935MODULE_DESCRIPTION("EP93xx SPI Controller driver");
936MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
937MODULE_LICENSE("GPL");
938MODULE_ALIAS("platform:ep93xx-spi");
diff --git a/drivers/spi/mpc512x_psc_spi.c b/drivers/spi/mpc512x_psc_spi.c
new file mode 100644
index 000000000000..28a126d2742b
--- /dev/null
+++ b/drivers/spi/mpc512x_psc_spi.c
@@ -0,0 +1,576 @@
1/*
2 * MPC512x PSC in SPI mode driver.
3 *
4 * Copyright (C) 2007,2008 Freescale Semiconductor Inc.
5 * Original port from 52xx driver:
6 * Hongjun Chen <hong-jun.chen@freescale.com>
7 *
8 * Fork of mpc52xx_psc_spi.c:
9 * Copyright (C) 2006 TOPTICA Photonics AG., Dragos Carp
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/interrupt.h>
22#include <linux/of_platform.h>
23#include <linux/workqueue.h>
24#include <linux/completion.h>
25#include <linux/io.h>
26#include <linux/delay.h>
27#include <linux/clk.h>
28#include <linux/spi/spi.h>
29#include <linux/fsl_devices.h>
30#include <asm/mpc52xx_psc.h>
31
32struct mpc512x_psc_spi {
33 void (*cs_control)(struct spi_device *spi, bool on);
34 u32 sysclk;
35
36 /* driver internal data */
37 struct mpc52xx_psc __iomem *psc;
38 struct mpc512x_psc_fifo __iomem *fifo;
39 unsigned int irq;
40 u8 bits_per_word;
41 u8 busy;
42 u32 mclk;
43 u8 eofbyte;
44
45 struct workqueue_struct *workqueue;
46 struct work_struct work;
47
48 struct list_head queue;
49 spinlock_t lock; /* Message queue lock */
50
51 struct completion done;
52};
53
54/* controller state */
55struct mpc512x_psc_spi_cs {
56 int bits_per_word;
57 int speed_hz;
58};
59
60/* set clock freq, clock ramp, bits per work
61 * if t is NULL then reset the values to the default values
62 */
63static int mpc512x_psc_spi_transfer_setup(struct spi_device *spi,
64 struct spi_transfer *t)
65{
66 struct mpc512x_psc_spi_cs *cs = spi->controller_state;
67
68 cs->speed_hz = (t && t->speed_hz)
69 ? t->speed_hz : spi->max_speed_hz;
70 cs->bits_per_word = (t && t->bits_per_word)
71 ? t->bits_per_word : spi->bits_per_word;
72 cs->bits_per_word = ((cs->bits_per_word + 7) / 8) * 8;
73 return 0;
74}
75
76static void mpc512x_psc_spi_activate_cs(struct spi_device *spi)
77{
78 struct mpc512x_psc_spi_cs *cs = spi->controller_state;
79 struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
80 struct mpc52xx_psc __iomem *psc = mps->psc;
81 u32 sicr;
82 u32 ccr;
83 u16 bclkdiv;
84
85 sicr = in_be32(&psc->sicr);
86
87 /* Set clock phase and polarity */
88 if (spi->mode & SPI_CPHA)
89 sicr |= 0x00001000;
90 else
91 sicr &= ~0x00001000;
92
93 if (spi->mode & SPI_CPOL)
94 sicr |= 0x00002000;
95 else
96 sicr &= ~0x00002000;
97
98 if (spi->mode & SPI_LSB_FIRST)
99 sicr |= 0x10000000;
100 else
101 sicr &= ~0x10000000;
102 out_be32(&psc->sicr, sicr);
103
104 ccr = in_be32(&psc->ccr);
105 ccr &= 0xFF000000;
106 if (cs->speed_hz)
107 bclkdiv = (mps->mclk / cs->speed_hz) - 1;
108 else
109 bclkdiv = (mps->mclk / 1000000) - 1; /* default 1MHz */
110
111 ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8));
112 out_be32(&psc->ccr, ccr);
113 mps->bits_per_word = cs->bits_per_word;
114
115 if (mps->cs_control)
116 mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0);
117}
118
119static void mpc512x_psc_spi_deactivate_cs(struct spi_device *spi)
120{
121 struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
122
123 if (mps->cs_control)
124 mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
125
126}
127
128/* extract and scale size field in txsz or rxsz */
129#define MPC512x_PSC_FIFO_SZ(sz) ((sz & 0x7ff) << 2);
130
131#define EOFBYTE 1
132
133static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi,
134 struct spi_transfer *t)
135{
136 struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
137 struct mpc52xx_psc __iomem *psc = mps->psc;
138 struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
139 size_t len = t->len;
140 u8 *tx_buf = (u8 *)t->tx_buf;
141 u8 *rx_buf = (u8 *)t->rx_buf;
142
143 if (!tx_buf && !rx_buf && t->len)
144 return -EINVAL;
145
146 /* Zero MR2 */
147 in_8(&psc->mode);
148 out_8(&psc->mode, 0x0);
149
150 while (len) {
151 int count;
152 int i;
153 u8 data;
154 size_t fifosz;
155 int rxcount;
156
157 /*
158 * The number of bytes that can be sent at a time
159 * depends on the fifo size.
160 */
161 fifosz = MPC512x_PSC_FIFO_SZ(in_be32(&fifo->txsz));
162 count = min(fifosz, len);
163
164 for (i = count; i > 0; i--) {
165 data = tx_buf ? *tx_buf++ : 0;
166 if (len == EOFBYTE)
167 setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF);
168 out_8(&fifo->txdata_8, data);
169 len--;
170 }
171
172 INIT_COMPLETION(mps->done);
173
174 /* interrupt on tx fifo empty */
175 out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY);
176 out_be32(&fifo->tximr, MPC512x_PSC_FIFO_EMPTY);
177
178 /* enable transmiter/receiver */
179 out_8(&psc->command,
180 MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE);
181
182 wait_for_completion(&mps->done);
183
184 mdelay(1);
185
186 /* rx fifo should have count bytes in it */
187 rxcount = in_be32(&fifo->rxcnt);
188 if (rxcount != count)
189 mdelay(1);
190
191 rxcount = in_be32(&fifo->rxcnt);
192 if (rxcount != count) {
193 dev_warn(&spi->dev, "expected %d bytes in rx fifo "
194 "but got %d\n", count, rxcount);
195 }
196
197 rxcount = min(rxcount, count);
198 for (i = rxcount; i > 0; i--) {
199 data = in_8(&fifo->rxdata_8);
200 if (rx_buf)
201 *rx_buf++ = data;
202 }
203 while (in_be32(&fifo->rxcnt)) {
204 in_8(&fifo->rxdata_8);
205 }
206
207 out_8(&psc->command,
208 MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
209 }
210 /* disable transmiter/receiver and fifo interrupt */
211 out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
212 out_be32(&fifo->tximr, 0);
213 return 0;
214}
215
216static void mpc512x_psc_spi_work(struct work_struct *work)
217{
218 struct mpc512x_psc_spi *mps = container_of(work,
219 struct mpc512x_psc_spi,
220 work);
221
222 spin_lock_irq(&mps->lock);
223 mps->busy = 1;
224 while (!list_empty(&mps->queue)) {
225 struct spi_message *m;
226 struct spi_device *spi;
227 struct spi_transfer *t = NULL;
228 unsigned cs_change;
229 int status;
230
231 m = container_of(mps->queue.next, struct spi_message, queue);
232 list_del_init(&m->queue);
233 spin_unlock_irq(&mps->lock);
234
235 spi = m->spi;
236 cs_change = 1;
237 status = 0;
238 list_for_each_entry(t, &m->transfers, transfer_list) {
239 if (t->bits_per_word || t->speed_hz) {
240 status = mpc512x_psc_spi_transfer_setup(spi, t);
241 if (status < 0)
242 break;
243 }
244
245 if (cs_change)
246 mpc512x_psc_spi_activate_cs(spi);
247 cs_change = t->cs_change;
248
249 status = mpc512x_psc_spi_transfer_rxtx(spi, t);
250 if (status)
251 break;
252 m->actual_length += t->len;
253
254 if (t->delay_usecs)
255 udelay(t->delay_usecs);
256
257 if (cs_change)
258 mpc512x_psc_spi_deactivate_cs(spi);
259 }
260
261 m->status = status;
262 m->complete(m->context);
263
264 if (status || !cs_change)
265 mpc512x_psc_spi_deactivate_cs(spi);
266
267 mpc512x_psc_spi_transfer_setup(spi, NULL);
268
269 spin_lock_irq(&mps->lock);
270 }
271 mps->busy = 0;
272 spin_unlock_irq(&mps->lock);
273}
274
275static int mpc512x_psc_spi_setup(struct spi_device *spi)
276{
277 struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
278 struct mpc512x_psc_spi_cs *cs = spi->controller_state;
279 unsigned long flags;
280
281 if (spi->bits_per_word % 8)
282 return -EINVAL;
283
284 if (!cs) {
285 cs = kzalloc(sizeof *cs, GFP_KERNEL);
286 if (!cs)
287 return -ENOMEM;
288 spi->controller_state = cs;
289 }
290
291 cs->bits_per_word = spi->bits_per_word;
292 cs->speed_hz = spi->max_speed_hz;
293
294 spin_lock_irqsave(&mps->lock, flags);
295 if (!mps->busy)
296 mpc512x_psc_spi_deactivate_cs(spi);
297 spin_unlock_irqrestore(&mps->lock, flags);
298
299 return 0;
300}
301
302static int mpc512x_psc_spi_transfer(struct spi_device *spi,
303 struct spi_message *m)
304{
305 struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
306 unsigned long flags;
307
308 m->actual_length = 0;
309 m->status = -EINPROGRESS;
310
311 spin_lock_irqsave(&mps->lock, flags);
312 list_add_tail(&m->queue, &mps->queue);
313 queue_work(mps->workqueue, &mps->work);
314 spin_unlock_irqrestore(&mps->lock, flags);
315
316 return 0;
317}
318
319static void mpc512x_psc_spi_cleanup(struct spi_device *spi)
320{
321 kfree(spi->controller_state);
322}
323
324static int mpc512x_psc_spi_port_config(struct spi_master *master,
325 struct mpc512x_psc_spi *mps)
326{
327 struct mpc52xx_psc __iomem *psc = mps->psc;
328 struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
329 struct clk *spiclk;
330 int ret = 0;
331 char name[32];
332 u32 sicr;
333 u32 ccr;
334 u16 bclkdiv;
335
336 sprintf(name, "psc%d_mclk", master->bus_num);
337 spiclk = clk_get(&master->dev, name);
338 clk_enable(spiclk);
339 mps->mclk = clk_get_rate(spiclk);
340 clk_put(spiclk);
341
342 /* Reset the PSC into a known state */
343 out_8(&psc->command, MPC52xx_PSC_RST_RX);
344 out_8(&psc->command, MPC52xx_PSC_RST_TX);
345 out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
346
347 /* Disable psc interrupts all useful interrupts are in fifo */
348 out_be16(&psc->isr_imr.imr, 0);
349
350 /* Disable fifo interrupts, will be enabled later */
351 out_be32(&fifo->tximr, 0);
352 out_be32(&fifo->rximr, 0);
353
354 /* Setup fifo slice address and size */
355 /*out_be32(&fifo->txsz, 0x0fe00004);*/
356 /*out_be32(&fifo->rxsz, 0x0ff00004);*/
357
358 sicr = 0x01000000 | /* SIM = 0001 -- 8 bit */
359 0x00800000 | /* GenClk = 1 -- internal clk */
360 0x00008000 | /* SPI = 1 */
361 0x00004000 | /* MSTR = 1 -- SPI master */
362 0x00000800; /* UseEOF = 1 -- SS low until EOF */
363
364 out_be32(&psc->sicr, sicr);
365
366 ccr = in_be32(&psc->ccr);
367 ccr &= 0xFF000000;
368 bclkdiv = (mps->mclk / 1000000) - 1; /* default 1MHz */
369 ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8));
370 out_be32(&psc->ccr, ccr);
371
372 /* Set 2ms DTL delay */
373 out_8(&psc->ctur, 0x00);
374 out_8(&psc->ctlr, 0x82);
375
376 /* we don't use the alarms */
377 out_be32(&fifo->rxalarm, 0xfff);
378 out_be32(&fifo->txalarm, 0);
379
380 /* Enable FIFO slices for Rx/Tx */
381 out_be32(&fifo->rxcmd,
382 MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA);
383 out_be32(&fifo->txcmd,
384 MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA);
385
386 mps->bits_per_word = 8;
387
388 return ret;
389}
390
391static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id)
392{
393 struct mpc512x_psc_spi *mps = (struct mpc512x_psc_spi *)dev_id;
394 struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
395
396 /* clear interrupt and wake up the work queue */
397 if (in_be32(&fifo->txisr) &
398 in_be32(&fifo->tximr) & MPC512x_PSC_FIFO_EMPTY) {
399 out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY);
400 out_be32(&fifo->tximr, 0);
401 complete(&mps->done);
402 return IRQ_HANDLED;
403 }
404 return IRQ_NONE;
405}
406
407/* bus_num is used only for the case dev->platform_data == NULL */
408static int __init mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
409 u32 size, unsigned int irq,
410 s16 bus_num)
411{
412 struct fsl_spi_platform_data *pdata = dev->platform_data;
413 struct mpc512x_psc_spi *mps;
414 struct spi_master *master;
415 int ret;
416 void *tempp;
417
418 master = spi_alloc_master(dev, sizeof *mps);
419 if (master == NULL)
420 return -ENOMEM;
421
422 dev_set_drvdata(dev, master);
423 mps = spi_master_get_devdata(master);
424 mps->irq = irq;
425
426 if (pdata == NULL) {
427 dev_err(dev, "probe called without platform data, no "
428 "cs_control function will be called\n");
429 mps->cs_control = NULL;
430 mps->sysclk = 0;
431 master->bus_num = bus_num;
432 master->num_chipselect = 255;
433 } else {
434 mps->cs_control = pdata->cs_control;
435 mps->sysclk = pdata->sysclk;
436 master->bus_num = pdata->bus_num;
437 master->num_chipselect = pdata->max_chipselect;
438 }
439
440 master->setup = mpc512x_psc_spi_setup;
441 master->transfer = mpc512x_psc_spi_transfer;
442 master->cleanup = mpc512x_psc_spi_cleanup;
443
444 tempp = ioremap(regaddr, size);
445 if (!tempp) {
446 dev_err(dev, "could not ioremap I/O port range\n");
447 ret = -EFAULT;
448 goto free_master;
449 }
450 mps->psc = tempp;
451 mps->fifo =
452 (struct mpc512x_psc_fifo *)(tempp + sizeof(struct mpc52xx_psc));
453
454 ret = request_irq(mps->irq, mpc512x_psc_spi_isr, IRQF_SHARED,
455 "mpc512x-psc-spi", mps);
456 if (ret)
457 goto free_master;
458
459 ret = mpc512x_psc_spi_port_config(master, mps);
460 if (ret < 0)
461 goto free_irq;
462
463 spin_lock_init(&mps->lock);
464 init_completion(&mps->done);
465 INIT_WORK(&mps->work, mpc512x_psc_spi_work);
466 INIT_LIST_HEAD(&mps->queue);
467
468 mps->workqueue =
469 create_singlethread_workqueue(dev_name(master->dev.parent));
470 if (mps->workqueue == NULL) {
471 ret = -EBUSY;
472 goto free_irq;
473 }
474
475 ret = spi_register_master(master);
476 if (ret < 0)
477 goto unreg_master;
478
479 return ret;
480
481unreg_master:
482 destroy_workqueue(mps->workqueue);
483free_irq:
484 free_irq(mps->irq, mps);
485free_master:
486 if (mps->psc)
487 iounmap(mps->psc);
488 spi_master_put(master);
489
490 return ret;
491}
492
493static int __exit mpc512x_psc_spi_do_remove(struct device *dev)
494{
495 struct spi_master *master = dev_get_drvdata(dev);
496 struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
497
498 flush_workqueue(mps->workqueue);
499 destroy_workqueue(mps->workqueue);
500 spi_unregister_master(master);
501 free_irq(mps->irq, mps);
502 if (mps->psc)
503 iounmap(mps->psc);
504
505 return 0;
506}
507
508static int __init mpc512x_psc_spi_of_probe(struct of_device *op,
509 const struct of_device_id *match)
510{
511 const u32 *regaddr_p;
512 u64 regaddr64, size64;
513 s16 id = -1;
514
515 regaddr_p = of_get_address(op->node, 0, &size64, NULL);
516 if (!regaddr_p) {
517 dev_err(&op->dev, "Invalid PSC address\n");
518 return -EINVAL;
519 }
520 regaddr64 = of_translate_address(op->node, regaddr_p);
521
522 /* get PSC id (0..11, used by port_config) */
523 if (op->dev.platform_data == NULL) {
524 const u32 *psc_nump;
525
526 psc_nump = of_get_property(op->node, "cell-index", NULL);
527 if (!psc_nump || *psc_nump > 11) {
528 dev_err(&op->dev, "mpc512x_psc_spi: Device node %s "
529 "has invalid cell-index property\n",
530 op->node->full_name);
531 return -EINVAL;
532 }
533 id = *psc_nump;
534 }
535
536 return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64,
537 irq_of_parse_and_map(op->node, 0), id);
538}
539
540static int __exit mpc512x_psc_spi_of_remove(struct of_device *op)
541{
542 return mpc512x_psc_spi_do_remove(&op->dev);
543}
544
545static struct of_device_id mpc512x_psc_spi_of_match[] = {
546 { .compatible = "fsl,mpc5121-psc-spi", },
547 {},
548};
549
550MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match);
551
552static struct of_platform_driver mpc512x_psc_spi_of_driver = {
553 .match_table = mpc512x_psc_spi_of_match,
554 .probe = mpc512x_psc_spi_of_probe,
555 .remove = __exit_p(mpc512x_psc_spi_of_remove),
556 .driver = {
557 .name = "mpc512x-psc-spi",
558 .owner = THIS_MODULE,
559 },
560};
561
562static int __init mpc512x_psc_spi_init(void)
563{
564 return of_register_platform_driver(&mpc512x_psc_spi_of_driver);
565}
566module_init(mpc512x_psc_spi_init);
567
568static void __exit mpc512x_psc_spi_exit(void)
569{
570 of_unregister_platform_driver(&mpc512x_psc_spi_of_driver);
571}
572module_exit(mpc512x_psc_spi_exit);
573
574MODULE_AUTHOR("John Rigby");
575MODULE_DESCRIPTION("MPC512x PSC SPI Driver");
576MODULE_LICENSE("GPL");
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 77d4cc88edea..7104cb739da7 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -472,18 +472,18 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op,
472 s16 id = -1; 472 s16 id = -1;
473 int rc; 473 int rc;
474 474
475 regaddr_p = of_get_address(op->node, 0, &size64, NULL); 475 regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL);
476 if (!regaddr_p) { 476 if (!regaddr_p) {
477 dev_err(&op->dev, "Invalid PSC address\n"); 477 dev_err(&op->dev, "Invalid PSC address\n");
478 return -EINVAL; 478 return -EINVAL;
479 } 479 }
480 regaddr64 = of_translate_address(op->node, regaddr_p); 480 regaddr64 = of_translate_address(op->dev.of_node, regaddr_p);
481 481
482 /* get PSC id (1..6, used by port_config) */ 482 /* get PSC id (1..6, used by port_config) */
483 if (op->dev.platform_data == NULL) { 483 if (op->dev.platform_data == NULL) {
484 const u32 *psc_nump; 484 const u32 *psc_nump;
485 485
486 psc_nump = of_get_property(op->node, "cell-index", NULL); 486 psc_nump = of_get_property(op->dev.of_node, "cell-index", NULL);
487 if (!psc_nump || *psc_nump > 5) { 487 if (!psc_nump || *psc_nump > 5) {
488 dev_err(&op->dev, "Invalid cell-index property\n"); 488 dev_err(&op->dev, "Invalid cell-index property\n");
489 return -EINVAL; 489 return -EINVAL;
@@ -492,9 +492,10 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op,
492 } 492 }
493 493
494 rc = mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64, 494 rc = mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64,
495 irq_of_parse_and_map(op->node, 0), id); 495 irq_of_parse_and_map(op->dev.of_node, 0), id);
496 if (rc == 0) 496 if (rc == 0)
497 of_register_spi_devices(dev_get_drvdata(&op->dev), op->node); 497 of_register_spi_devices(dev_get_drvdata(&op->dev),
498 op->dev.of_node);
498 499
499 return rc; 500 return rc;
500} 501}
@@ -513,14 +514,12 @@ static const struct of_device_id mpc52xx_psc_spi_of_match[] = {
513MODULE_DEVICE_TABLE(of, mpc52xx_psc_spi_of_match); 514MODULE_DEVICE_TABLE(of, mpc52xx_psc_spi_of_match);
514 515
515static struct of_platform_driver mpc52xx_psc_spi_of_driver = { 516static struct of_platform_driver mpc52xx_psc_spi_of_driver = {
516 .owner = THIS_MODULE,
517 .name = "mpc52xx-psc-spi",
518 .match_table = mpc52xx_psc_spi_of_match,
519 .probe = mpc52xx_psc_spi_of_probe, 517 .probe = mpc52xx_psc_spi_of_probe,
520 .remove = __exit_p(mpc52xx_psc_spi_of_remove), 518 .remove = __exit_p(mpc52xx_psc_spi_of_remove),
521 .driver = { 519 .driver = {
522 .name = "mpc52xx-psc-spi", 520 .name = "mpc52xx-psc-spi",
523 .owner = THIS_MODULE, 521 .owner = THIS_MODULE,
522 .of_match_table = mpc52xx_psc_spi_of_match,
524 }, 523 },
525}; 524};
526 525
diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c
index cd68f1ce5cc3..b1a76bff775f 100644
--- a/drivers/spi/mpc52xx_spi.c
+++ b/drivers/spi/mpc52xx_spi.c
@@ -403,7 +403,7 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op,
403 403
404 /* MMIO registers */ 404 /* MMIO registers */
405 dev_dbg(&op->dev, "probing mpc5200 SPI device\n"); 405 dev_dbg(&op->dev, "probing mpc5200 SPI device\n");
406 regs = of_iomap(op->node, 0); 406 regs = of_iomap(op->dev.of_node, 0);
407 if (!regs) 407 if (!regs)
408 return -ENODEV; 408 return -ENODEV;
409 409
@@ -445,11 +445,11 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op,
445 ms = spi_master_get_devdata(master); 445 ms = spi_master_get_devdata(master);
446 ms->master = master; 446 ms->master = master;
447 ms->regs = regs; 447 ms->regs = regs;
448 ms->irq0 = irq_of_parse_and_map(op->node, 0); 448 ms->irq0 = irq_of_parse_and_map(op->dev.of_node, 0);
449 ms->irq1 = irq_of_parse_and_map(op->node, 1); 449 ms->irq1 = irq_of_parse_and_map(op->dev.of_node, 1);
450 ms->state = mpc52xx_spi_fsmstate_idle; 450 ms->state = mpc52xx_spi_fsmstate_idle;
451 ms->ipb_freq = mpc5xxx_get_bus_frequency(op->node); 451 ms->ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node);
452 ms->gpio_cs_count = of_gpio_count(op->node); 452 ms->gpio_cs_count = of_gpio_count(op->dev.of_node);
453 if (ms->gpio_cs_count > 0) { 453 if (ms->gpio_cs_count > 0) {
454 master->num_chipselect = ms->gpio_cs_count; 454 master->num_chipselect = ms->gpio_cs_count;
455 ms->gpio_cs = kmalloc(ms->gpio_cs_count * sizeof(unsigned int), 455 ms->gpio_cs = kmalloc(ms->gpio_cs_count * sizeof(unsigned int),
@@ -460,7 +460,7 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op,
460 } 460 }
461 461
462 for (i = 0; i < ms->gpio_cs_count; i++) { 462 for (i = 0; i < ms->gpio_cs_count; i++) {
463 gpio_cs = of_get_gpio(op->node, i); 463 gpio_cs = of_get_gpio(op->dev.of_node, i);
464 if (gpio_cs < 0) { 464 if (gpio_cs < 0) {
465 dev_err(&op->dev, 465 dev_err(&op->dev,
466 "could not parse the gpio field " 466 "could not parse the gpio field "
@@ -512,7 +512,7 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op,
512 if (rc) 512 if (rc)
513 goto err_register; 513 goto err_register;
514 514
515 of_register_spi_devices(master, op->node); 515 of_register_spi_devices(master, op->dev.of_node);
516 dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n"); 516 dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n");
517 517
518 return rc; 518 return rc;
@@ -558,9 +558,11 @@ static const struct of_device_id mpc52xx_spi_match[] __devinitconst = {
558MODULE_DEVICE_TABLE(of, mpc52xx_spi_match); 558MODULE_DEVICE_TABLE(of, mpc52xx_spi_match);
559 559
560static struct of_platform_driver mpc52xx_spi_of_driver = { 560static struct of_platform_driver mpc52xx_spi_of_driver = {
561 .owner = THIS_MODULE, 561 .driver = {
562 .name = "mpc52xx-spi", 562 .name = "mpc52xx-spi",
563 .match_table = mpc52xx_spi_match, 563 .owner = THIS_MODULE,
564 .of_match_table = mpc52xx_spi_match,
565 },
564 .probe = mpc52xx_spi_probe, 566 .probe = mpc52xx_spi_probe,
565 .remove = __exit_p(mpc52xx_spi_remove), 567 .remove = __exit_p(mpc52xx_spi_remove),
566}; 568};
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index e0de0d0eedea..b3a94ca0a75a 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -38,7 +38,7 @@
38 38
39#include <plat/dma.h> 39#include <plat/dma.h>
40#include <plat/clock.h> 40#include <plat/clock.h>
41 41#include <plat/mcspi.h>
42 42
43#define OMAP2_MCSPI_MAX_FREQ 48000000 43#define OMAP2_MCSPI_MAX_FREQ 48000000
44 44
@@ -113,7 +113,7 @@ struct omap2_mcspi_dma {
113/* use PIO for small transfers, avoiding DMA setup/teardown overhead and 113/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
114 * cache operations; better heuristics consider wordsize and bitrate. 114 * cache operations; better heuristics consider wordsize and bitrate.
115 */ 115 */
116#define DMA_MIN_BYTES 8 116#define DMA_MIN_BYTES 160
117 117
118 118
119struct omap2_mcspi { 119struct omap2_mcspi {
@@ -229,6 +229,8 @@ static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
229 229
230 l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0; 230 l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0;
231 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l); 231 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l);
232 /* Flash post-writes */
233 mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
232} 234}
233 235
234static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active) 236static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
@@ -303,11 +305,14 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
303 unsigned int count, c; 305 unsigned int count, c;
304 unsigned long base, tx_reg, rx_reg; 306 unsigned long base, tx_reg, rx_reg;
305 int word_len, data_type, element_count; 307 int word_len, data_type, element_count;
308 int elements;
309 u32 l;
306 u8 * rx; 310 u8 * rx;
307 const u8 * tx; 311 const u8 * tx;
308 312
309 mcspi = spi_master_get_devdata(spi->master); 313 mcspi = spi_master_get_devdata(spi->master);
310 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 314 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
315 l = mcspi_cached_chconf0(spi);
311 316
312 count = xfer->len; 317 count = xfer->len;
313 c = count; 318 c = count;
@@ -346,8 +351,12 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
346 } 351 }
347 352
348 if (rx != NULL) { 353 if (rx != NULL) {
354 elements = element_count - 1;
355 if (l & OMAP2_MCSPI_CHCONF_TURBO)
356 elements--;
357
349 omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel, 358 omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel,
350 data_type, element_count - 1, 1, 359 data_type, elements, 1,
351 OMAP_DMA_SYNC_ELEMENT, 360 OMAP_DMA_SYNC_ELEMENT,
352 mcspi_dma->dma_rx_sync_dev, 1); 361 mcspi_dma->dma_rx_sync_dev, 1);
353 362
@@ -379,17 +388,42 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
379 wait_for_completion(&mcspi_dma->dma_rx_completion); 388 wait_for_completion(&mcspi_dma->dma_rx_completion);
380 dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE); 389 dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE);
381 omap2_mcspi_set_enable(spi, 0); 390 omap2_mcspi_set_enable(spi, 0);
391
392 if (l & OMAP2_MCSPI_CHCONF_TURBO) {
393
394 if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
395 & OMAP2_MCSPI_CHSTAT_RXS)) {
396 u32 w;
397
398 w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
399 if (word_len <= 8)
400 ((u8 *)xfer->rx_buf)[elements++] = w;
401 else if (word_len <= 16)
402 ((u16 *)xfer->rx_buf)[elements++] = w;
403 else /* word_len <= 32 */
404 ((u32 *)xfer->rx_buf)[elements++] = w;
405 } else {
406 dev_err(&spi->dev,
407 "DMA RX penultimate word empty");
408 count -= (word_len <= 8) ? 2 :
409 (word_len <= 16) ? 4 :
410 /* word_len <= 32 */ 8;
411 omap2_mcspi_set_enable(spi, 1);
412 return count;
413 }
414 }
415
382 if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) 416 if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
383 & OMAP2_MCSPI_CHSTAT_RXS)) { 417 & OMAP2_MCSPI_CHSTAT_RXS)) {
384 u32 w; 418 u32 w;
385 419
386 w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); 420 w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
387 if (word_len <= 8) 421 if (word_len <= 8)
388 ((u8 *)xfer->rx_buf)[element_count - 1] = w; 422 ((u8 *)xfer->rx_buf)[elements] = w;
389 else if (word_len <= 16) 423 else if (word_len <= 16)
390 ((u16 *)xfer->rx_buf)[element_count - 1] = w; 424 ((u16 *)xfer->rx_buf)[elements] = w;
391 else /* word_len <= 32 */ 425 else /* word_len <= 32 */
392 ((u32 *)xfer->rx_buf)[element_count - 1] = w; 426 ((u32 *)xfer->rx_buf)[elements] = w;
393 } else { 427 } else {
394 dev_err(&spi->dev, "DMA RX last word empty"); 428 dev_err(&spi->dev, "DMA RX last word empty");
395 count -= (word_len <= 8) ? 1 : 429 count -= (word_len <= 8) ? 1 :
@@ -433,7 +467,6 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
433 word_len = cs->word_len; 467 word_len = cs->word_len;
434 468
435 l = mcspi_cached_chconf0(spi); 469 l = mcspi_cached_chconf0(spi);
436 l &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
437 470
438 /* We store the pre-calculated register addresses on stack to speed 471 /* We store the pre-calculated register addresses on stack to speed
439 * up the transfer loop. */ 472 * up the transfer loop. */
@@ -468,11 +501,26 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
468 dev_err(&spi->dev, "RXS timed out\n"); 501 dev_err(&spi->dev, "RXS timed out\n");
469 goto out; 502 goto out;
470 } 503 }
471 /* prevent last RX_ONLY read from triggering 504
472 * more word i/o: switch to rx+tx 505 if (c == 1 && tx == NULL &&
473 */ 506 (l & OMAP2_MCSPI_CHCONF_TURBO)) {
474 if (c == 0 && tx == NULL) 507 omap2_mcspi_set_enable(spi, 0);
475 mcspi_write_chconf0(spi, l); 508 *rx++ = __raw_readl(rx_reg);
509#ifdef VERBOSE
510 dev_dbg(&spi->dev, "read-%d %02x\n",
511 word_len, *(rx - 1));
512#endif
513 if (mcspi_wait_for_reg_bit(chstat_reg,
514 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
515 dev_err(&spi->dev,
516 "RXS timed out\n");
517 goto out;
518 }
519 c = 0;
520 } else if (c == 0 && tx == NULL) {
521 omap2_mcspi_set_enable(spi, 0);
522 }
523
476 *rx++ = __raw_readl(rx_reg); 524 *rx++ = __raw_readl(rx_reg);
477#ifdef VERBOSE 525#ifdef VERBOSE
478 dev_dbg(&spi->dev, "read-%d %02x\n", 526 dev_dbg(&spi->dev, "read-%d %02x\n",
@@ -506,11 +554,26 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
506 dev_err(&spi->dev, "RXS timed out\n"); 554 dev_err(&spi->dev, "RXS timed out\n");
507 goto out; 555 goto out;
508 } 556 }
509 /* prevent last RX_ONLY read from triggering 557
510 * more word i/o: switch to rx+tx 558 if (c == 2 && tx == NULL &&
511 */ 559 (l & OMAP2_MCSPI_CHCONF_TURBO)) {
512 if (c == 0 && tx == NULL) 560 omap2_mcspi_set_enable(spi, 0);
513 mcspi_write_chconf0(spi, l); 561 *rx++ = __raw_readl(rx_reg);
562#ifdef VERBOSE
563 dev_dbg(&spi->dev, "read-%d %04x\n",
564 word_len, *(rx - 1));
565#endif
566 if (mcspi_wait_for_reg_bit(chstat_reg,
567 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
568 dev_err(&spi->dev,
569 "RXS timed out\n");
570 goto out;
571 }
572 c = 0;
573 } else if (c == 0 && tx == NULL) {
574 omap2_mcspi_set_enable(spi, 0);
575 }
576
514 *rx++ = __raw_readl(rx_reg); 577 *rx++ = __raw_readl(rx_reg);
515#ifdef VERBOSE 578#ifdef VERBOSE
516 dev_dbg(&spi->dev, "read-%d %04x\n", 579 dev_dbg(&spi->dev, "read-%d %04x\n",
@@ -544,11 +607,26 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
544 dev_err(&spi->dev, "RXS timed out\n"); 607 dev_err(&spi->dev, "RXS timed out\n");
545 goto out; 608 goto out;
546 } 609 }
547 /* prevent last RX_ONLY read from triggering 610
548 * more word i/o: switch to rx+tx 611 if (c == 4 && tx == NULL &&
549 */ 612 (l & OMAP2_MCSPI_CHCONF_TURBO)) {
550 if (c == 0 && tx == NULL) 613 omap2_mcspi_set_enable(spi, 0);
551 mcspi_write_chconf0(spi, l); 614 *rx++ = __raw_readl(rx_reg);
615#ifdef VERBOSE
616 dev_dbg(&spi->dev, "read-%d %08x\n",
617 word_len, *(rx - 1));
618#endif
619 if (mcspi_wait_for_reg_bit(chstat_reg,
620 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
621 dev_err(&spi->dev,
622 "RXS timed out\n");
623 goto out;
624 }
625 c = 0;
626 } else if (c == 0 && tx == NULL) {
627 omap2_mcspi_set_enable(spi, 0);
628 }
629
552 *rx++ = __raw_readl(rx_reg); 630 *rx++ = __raw_readl(rx_reg);
553#ifdef VERBOSE 631#ifdef VERBOSE
554 dev_dbg(&spi->dev, "read-%d %08x\n", 632 dev_dbg(&spi->dev, "read-%d %08x\n",
@@ -568,6 +646,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
568 dev_err(&spi->dev, "EOT timed out\n"); 646 dev_err(&spi->dev, "EOT timed out\n");
569 } 647 }
570out: 648out:
649 omap2_mcspi_set_enable(spi, 1);
571 return count - c; 650 return count - c;
572} 651}
573 652
@@ -755,7 +834,6 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
755 struct omap2_mcspi_cs *cs; 834 struct omap2_mcspi_cs *cs;
756 835
757 mcspi = spi_master_get_devdata(spi->master); 836 mcspi = spi_master_get_devdata(spi->master);
758 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
759 837
760 if (spi->controller_state) { 838 if (spi->controller_state) {
761 /* Unlink controller state from context save list */ 839 /* Unlink controller state from context save list */
@@ -765,13 +843,17 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
765 kfree(spi->controller_state); 843 kfree(spi->controller_state);
766 } 844 }
767 845
768 if (mcspi_dma->dma_rx_channel != -1) { 846 if (spi->chip_select < spi->master->num_chipselect) {
769 omap_free_dma(mcspi_dma->dma_rx_channel); 847 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
770 mcspi_dma->dma_rx_channel = -1; 848
771 } 849 if (mcspi_dma->dma_rx_channel != -1) {
772 if (mcspi_dma->dma_tx_channel != -1) { 850 omap_free_dma(mcspi_dma->dma_rx_channel);
773 omap_free_dma(mcspi_dma->dma_tx_channel); 851 mcspi_dma->dma_rx_channel = -1;
774 mcspi_dma->dma_tx_channel = -1; 852 }
853 if (mcspi_dma->dma_tx_channel != -1) {
854 omap_free_dma(mcspi_dma->dma_tx_channel);
855 mcspi_dma->dma_tx_channel = -1;
856 }
775 } 857 }
776} 858}
777 859
@@ -797,6 +879,7 @@ static void omap2_mcspi_work(struct work_struct *work)
797 struct spi_transfer *t = NULL; 879 struct spi_transfer *t = NULL;
798 int cs_active = 0; 880 int cs_active = 0;
799 struct omap2_mcspi_cs *cs; 881 struct omap2_mcspi_cs *cs;
882 struct omap2_mcspi_device_config *cd;
800 int par_override = 0; 883 int par_override = 0;
801 int status = 0; 884 int status = 0;
802 u32 chconf; 885 u32 chconf;
@@ -809,6 +892,7 @@ static void omap2_mcspi_work(struct work_struct *work)
809 892
810 spi = m->spi; 893 spi = m->spi;
811 cs = spi->controller_state; 894 cs = spi->controller_state;
895 cd = spi->controller_data;
812 896
813 omap2_mcspi_set_enable(spi, 1); 897 omap2_mcspi_set_enable(spi, 1);
814 list_for_each_entry(t, &m->transfers, transfer_list) { 898 list_for_each_entry(t, &m->transfers, transfer_list) {
@@ -832,10 +916,19 @@ static void omap2_mcspi_work(struct work_struct *work)
832 916
833 chconf = mcspi_cached_chconf0(spi); 917 chconf = mcspi_cached_chconf0(spi);
834 chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK; 918 chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
919 chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
920
835 if (t->tx_buf == NULL) 921 if (t->tx_buf == NULL)
836 chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY; 922 chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
837 else if (t->rx_buf == NULL) 923 else if (t->rx_buf == NULL)
838 chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY; 924 chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
925
926 if (cd && cd->turbo_mode && t->tx_buf == NULL) {
927 /* Turbo mode is for more than one word */
928 if (t->len > ((cs->word_len + 7) >> 3))
929 chconf |= OMAP2_MCSPI_CHCONF_TURBO;
930 }
931
839 mcspi_write_chconf0(spi, chconf); 932 mcspi_write_chconf0(spi, chconf);
840 933
841 if (t->len) { 934 if (t->len) {
diff --git a/drivers/spi/spi_bitbang_txrx.h b/drivers/spi/spi_bitbang_txrx.h
new file mode 100644
index 000000000000..fc033bbf9180
--- /dev/null
+++ b/drivers/spi/spi_bitbang_txrx.h
@@ -0,0 +1,93 @@
1/*
2 * Mix this utility code with some glue code to get one of several types of
3 * simple SPI master driver. Two do polled word-at-a-time I/O:
4 *
5 * - GPIO/parport bitbangers. Provide chipselect() and txrx_word[](),
6 * expanding the per-word routines from the inline templates below.
7 *
8 * - Drivers for controllers resembling bare shift registers. Provide
9 * chipselect() and txrx_word[](), with custom setup()/cleanup() methods
10 * that use your controller's clock and chipselect registers.
11 *
12 * Some hardware works well with requests at spi_transfer scope:
13 *
14 * - Drivers leveraging smarter hardware, with fifos or DMA; or for half
15 * duplex (MicroWire) controllers. Provide chipselect() and txrx_bufs(),
16 * and custom setup()/cleanup() methods.
17 */
18
19/*
20 * The code that knows what GPIO pins do what should have declared four
21 * functions, ideally as inlines, before including this header:
22 *
23 * void setsck(struct spi_device *, int is_on);
24 * void setmosi(struct spi_device *, int is_on);
25 * int getmiso(struct spi_device *);
26 * void spidelay(unsigned);
27 *
28 * setsck()'s is_on parameter is a zero/nonzero boolean.
29 *
30 * setmosi()'s is_on parameter is a zero/nonzero boolean.
31 *
32 * getmiso() is required to return 0 or 1 only. Any other value is invalid
33 * and will result in improper operation.
34 *
35 * A non-inlined routine would call bitbang_txrx_*() routines. The
36 * main loop could easily compile down to a handful of instructions,
37 * especially if the delay is a NOP (to run at peak speed).
38 *
39 * Since this is software, the timings may not be exactly what your board's
40 * chips need ... there may be several reasons you'd need to tweak timings
41 * in these routines, not just make to make it faster or slower to match a
42 * particular CPU clock rate.
43 */
44
45static inline u32
46bitbang_txrx_be_cpha0(struct spi_device *spi,
47 unsigned nsecs, unsigned cpol,
48 u32 word, u8 bits)
49{
50 /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
51
52 /* clock starts at inactive polarity */
53 for (word <<= (32 - bits); likely(bits); bits--) {
54
55 /* setup MSB (to slave) on trailing edge */
56 setmosi(spi, word & (1 << 31));
57 spidelay(nsecs); /* T(setup) */
58
59 setsck(spi, !cpol);
60 spidelay(nsecs);
61
62 /* sample MSB (from slave) on leading edge */
63 word <<= 1;
64 word |= getmiso(spi);
65 setsck(spi, cpol);
66 }
67 return word;
68}
69
70static inline u32
71bitbang_txrx_be_cpha1(struct spi_device *spi,
72 unsigned nsecs, unsigned cpol,
73 u32 word, u8 bits)
74{
75 /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
76
77 /* clock starts at inactive polarity */
78 for (word <<= (32 - bits); likely(bits); bits--) {
79
80 /* setup MSB (to slave) on leading edge */
81 setsck(spi, !cpol);
82 setmosi(spi, word & (1 << 31));
83 spidelay(nsecs); /* T(setup) */
84
85 setsck(spi, cpol);
86 spidelay(nsecs);
87
88 /* sample MSB (from slave) on trailing edge */
89 word <<= 1;
90 word |= getmiso(spi);
91 }
92 return word;
93}
diff --git a/drivers/spi/spi_butterfly.c b/drivers/spi/spi_butterfly.c
index c2184866fa9c..8b5281281111 100644
--- a/drivers/spi/spi_butterfly.c
+++ b/drivers/spi/spi_butterfly.c
@@ -149,8 +149,7 @@ static void butterfly_chipselect(struct spi_device *spi, int value)
149#define spidelay(X) do{}while(0) 149#define spidelay(X) do{}while(0)
150//#define spidelay ndelay 150//#define spidelay ndelay
151 151
152#define EXPAND_BITBANG_TXRX 152#include "spi_bitbang_txrx.h"
153#include <linux/spi/spi_bitbang.h>
154 153
155static u32 154static u32
156butterfly_txrx_word_mode0(struct spi_device *spi, 155butterfly_txrx_word_mode0(struct spi_device *spi,
diff --git a/drivers/spi/spi_gpio.c b/drivers/spi/spi_gpio.c
index 26bd03e61855..7edbd5807e0e 100644
--- a/drivers/spi/spi_gpio.c
+++ b/drivers/spi/spi_gpio.c
@@ -127,8 +127,7 @@ static inline int getmiso(const struct spi_device *spi)
127 */ 127 */
128#define spidelay(nsecs) do {} while (0) 128#define spidelay(nsecs) do {} while (0)
129 129
130#define EXPAND_BITBANG_TXRX 130#include "spi_bitbang_txrx.h"
131#include <linux/spi/spi_bitbang.h>
132 131
133/* 132/*
134 * These functions can leverage inline expansion of GPIO calls to shrink 133 * These functions can leverage inline expansion of GPIO calls to shrink
diff --git a/drivers/spi/spi_lm70llp.c b/drivers/spi/spi_lm70llp.c
index 568c781ad91c..86fb7b5993db 100644
--- a/drivers/spi/spi_lm70llp.c
+++ b/drivers/spi/spi_lm70llp.c
@@ -174,8 +174,7 @@ static inline int getmiso(struct spi_device *s)
174} 174}
175/*--------------------------------------------------------------------*/ 175/*--------------------------------------------------------------------*/
176 176
177#define EXPAND_BITBANG_TXRX 1 177#include "spi_bitbang_txrx.h"
178#include <linux/spi/spi_bitbang.h>
179 178
180static void lm70_chipselect(struct spi_device *spi, int value) 179static void lm70_chipselect(struct spi_device *spi, int value)
181{ 180{
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c
index e324627d97a2..ffa111a7e9d4 100644
--- a/drivers/spi/spi_mpc8xxx.c
+++ b/drivers/spi/spi_mpc8xxx.c
@@ -241,7 +241,6 @@ static void mpc8xxx_spi_change_mode(struct spi_device *spi)
241 241
242 /* Turn off SPI unit prior changing mode */ 242 /* Turn off SPI unit prior changing mode */
243 mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE); 243 mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE);
244 mpc8xxx_spi_write_reg(mode, cs->hw_mode);
245 244
246 /* When in CPM mode, we need to reinit tx and rx. */ 245 /* When in CPM mode, we need to reinit tx and rx. */
247 if (mspi->flags & SPI_CPM_MODE) { 246 if (mspi->flags & SPI_CPM_MODE) {
@@ -258,7 +257,7 @@ static void mpc8xxx_spi_change_mode(struct spi_device *spi)
258 } 257 }
259 } 258 }
260 } 259 }
261 260 mpc8xxx_spi_write_reg(mode, cs->hw_mode);
262 local_irq_restore(flags); 261 local_irq_restore(flags);
263} 262}
264 263
@@ -287,36 +286,12 @@ static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value)
287 } 286 }
288} 287}
289 288
290static 289static int
291int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) 290mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
291 struct spi_device *spi,
292 struct mpc8xxx_spi *mpc8xxx_spi,
293 int bits_per_word)
292{ 294{
293 struct mpc8xxx_spi *mpc8xxx_spi;
294 u8 bits_per_word, pm;
295 u32 hz;
296 struct spi_mpc8xxx_cs *cs = spi->controller_state;
297
298 mpc8xxx_spi = spi_master_get_devdata(spi->master);
299
300 if (t) {
301 bits_per_word = t->bits_per_word;
302 hz = t->speed_hz;
303 } else {
304 bits_per_word = 0;
305 hz = 0;
306 }
307
308 /* spi_transfer level calls that work per-word */
309 if (!bits_per_word)
310 bits_per_word = spi->bits_per_word;
311
312 /* Make sure its a bit width we support [4..16, 32] */
313 if ((bits_per_word < 4)
314 || ((bits_per_word > 16) && (bits_per_word != 32)))
315 return -EINVAL;
316
317 if (!hz)
318 hz = spi->max_speed_hz;
319
320 cs->rx_shift = 0; 295 cs->rx_shift = 0;
321 cs->tx_shift = 0; 296 cs->tx_shift = 0;
322 if (bits_per_word <= 8) { 297 if (bits_per_word <= 8) {
@@ -340,19 +315,82 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
340 return -EINVAL; 315 return -EINVAL;
341 316
342 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE && 317 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE &&
343 spi->mode & SPI_LSB_FIRST) { 318 spi->mode & SPI_LSB_FIRST) {
344 cs->tx_shift = 0; 319 cs->tx_shift = 0;
345 if (bits_per_word <= 8) 320 if (bits_per_word <= 8)
346 cs->rx_shift = 8; 321 cs->rx_shift = 8;
347 else 322 else
348 cs->rx_shift = 0; 323 cs->rx_shift = 0;
349 } 324 }
350
351 mpc8xxx_spi->rx_shift = cs->rx_shift; 325 mpc8xxx_spi->rx_shift = cs->rx_shift;
352 mpc8xxx_spi->tx_shift = cs->tx_shift; 326 mpc8xxx_spi->tx_shift = cs->tx_shift;
353 mpc8xxx_spi->get_rx = cs->get_rx; 327 mpc8xxx_spi->get_rx = cs->get_rx;
354 mpc8xxx_spi->get_tx = cs->get_tx; 328 mpc8xxx_spi->get_tx = cs->get_tx;
355 329
330 return bits_per_word;
331}
332
333static int
334mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
335 struct spi_device *spi,
336 int bits_per_word)
337{
338 /* QE uses Little Endian for words > 8
339 * so transform all words > 8 into 8 bits
340 * Unfortnatly that doesn't work for LSB so
341 * reject these for now */
342 /* Note: 32 bits word, LSB works iff
343 * tfcr/rfcr is set to CPMFCR_GBL */
344 if (spi->mode & SPI_LSB_FIRST &&
345 bits_per_word > 8)
346 return -EINVAL;
347 if (bits_per_word > 8)
348 return 8; /* pretend its 8 bits */
349 return bits_per_word;
350}
351
352static
353int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
354{
355 struct mpc8xxx_spi *mpc8xxx_spi;
356 int bits_per_word;
357 u8 pm;
358 u32 hz;
359 struct spi_mpc8xxx_cs *cs = spi->controller_state;
360
361 mpc8xxx_spi = spi_master_get_devdata(spi->master);
362
363 if (t) {
364 bits_per_word = t->bits_per_word;
365 hz = t->speed_hz;
366 } else {
367 bits_per_word = 0;
368 hz = 0;
369 }
370
371 /* spi_transfer level calls that work per-word */
372 if (!bits_per_word)
373 bits_per_word = spi->bits_per_word;
374
375 /* Make sure its a bit width we support [4..16, 32] */
376 if ((bits_per_word < 4)
377 || ((bits_per_word > 16) && (bits_per_word != 32)))
378 return -EINVAL;
379
380 if (!hz)
381 hz = spi->max_speed_hz;
382
383 if (!(mpc8xxx_spi->flags & SPI_CPM_MODE))
384 bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi,
385 mpc8xxx_spi,
386 bits_per_word);
387 else if (mpc8xxx_spi->flags & SPI_QE)
388 bits_per_word = mspi_apply_qe_mode_quirks(cs, spi,
389 bits_per_word);
390
391 if (bits_per_word < 0)
392 return bits_per_word;
393
356 if (bits_per_word == 32) 394 if (bits_per_word == 32)
357 bits_per_word = 0; 395 bits_per_word = 0;
358 else 396 else
@@ -438,7 +476,7 @@ static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
438 dev_err(dev, "unable to map tx dma\n"); 476 dev_err(dev, "unable to map tx dma\n");
439 return -ENOMEM; 477 return -ENOMEM;
440 } 478 }
441 } else { 479 } else if (t->tx_buf) {
442 mspi->tx_dma = t->tx_dma; 480 mspi->tx_dma = t->tx_dma;
443 } 481 }
444 482
@@ -449,7 +487,7 @@ static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
449 dev_err(dev, "unable to map rx dma\n"); 487 dev_err(dev, "unable to map rx dma\n");
450 goto err_rx_dma; 488 goto err_rx_dma;
451 } 489 }
452 } else { 490 } else if (t->rx_buf) {
453 mspi->rx_dma = t->rx_dma; 491 mspi->rx_dma = t->rx_dma;
454 } 492 }
455 493
@@ -477,7 +515,7 @@ static void mpc8xxx_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
477 515
478 if (mspi->map_tx_dma) 516 if (mspi->map_tx_dma)
479 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); 517 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
480 if (mspi->map_tx_dma) 518 if (mspi->map_rx_dma)
481 dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE); 519 dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
482 mspi->xfer_in_progress = NULL; 520 mspi->xfer_in_progress = NULL;
483} 521}
@@ -797,7 +835,7 @@ static void mpc8xxx_spi_free_dummy_rx(void)
797static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) 835static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
798{ 836{
799 struct device *dev = mspi->dev; 837 struct device *dev = mspi->dev;
800 struct device_node *np = dev_archdata_get_node(&dev->archdata); 838 struct device_node *np = dev->of_node;
801 const u32 *iprop; 839 const u32 *iprop;
802 int size; 840 int size;
803 unsigned long spi_base_ofs; 841 unsigned long spi_base_ofs;
@@ -851,7 +889,7 @@ static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
851static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi) 889static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi)
852{ 890{
853 struct device *dev = mspi->dev; 891 struct device *dev = mspi->dev;
854 struct device_node *np = dev_archdata_get_node(&dev->archdata); 892 struct device_node *np = dev->of_node;
855 const u32 *iprop; 893 const u32 *iprop;
856 int size; 894 int size;
857 unsigned long pram_ofs; 895 unsigned long pram_ofs;
@@ -1123,7 +1161,7 @@ static void mpc8xxx_spi_cs_control(struct spi_device *spi, bool on)
1123 1161
1124static int of_mpc8xxx_spi_get_chipselects(struct device *dev) 1162static int of_mpc8xxx_spi_get_chipselects(struct device *dev)
1125{ 1163{
1126 struct device_node *np = dev_archdata_get_node(&dev->archdata); 1164 struct device_node *np = dev->of_node;
1127 struct fsl_spi_platform_data *pdata = dev->platform_data; 1165 struct fsl_spi_platform_data *pdata = dev->platform_data;
1128 struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); 1166 struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
1129 unsigned int ngpios; 1167 unsigned int ngpios;
@@ -1224,7 +1262,7 @@ static int __devinit of_mpc8xxx_spi_probe(struct of_device *ofdev,
1224 const struct of_device_id *ofid) 1262 const struct of_device_id *ofid)
1225{ 1263{
1226 struct device *dev = &ofdev->dev; 1264 struct device *dev = &ofdev->dev;
1227 struct device_node *np = ofdev->node; 1265 struct device_node *np = ofdev->dev.of_node;
1228 struct mpc8xxx_spi_probe_info *pinfo; 1266 struct mpc8xxx_spi_probe_info *pinfo;
1229 struct fsl_spi_platform_data *pdata; 1267 struct fsl_spi_platform_data *pdata;
1230 struct spi_master *master; 1268 struct spi_master *master;
@@ -1312,8 +1350,11 @@ static const struct of_device_id of_mpc8xxx_spi_match[] = {
1312MODULE_DEVICE_TABLE(of, of_mpc8xxx_spi_match); 1350MODULE_DEVICE_TABLE(of, of_mpc8xxx_spi_match);
1313 1351
1314static struct of_platform_driver of_mpc8xxx_spi_driver = { 1352static struct of_platform_driver of_mpc8xxx_spi_driver = {
1315 .name = "mpc8xxx_spi", 1353 .driver = {
1316 .match_table = of_mpc8xxx_spi_match, 1354 .name = "mpc8xxx_spi",
1355 .owner = THIS_MODULE,
1356 .of_match_table = of_mpc8xxx_spi_match,
1357 },
1317 .probe = of_mpc8xxx_spi_probe, 1358 .probe = of_mpc8xxx_spi_probe,
1318 .remove = __devexit_p(of_mpc8xxx_spi_remove), 1359 .remove = __devexit_p(of_mpc8xxx_spi_remove),
1319}; 1360};
diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi_ppc4xx.c
index 7cb5ff37f6e2..19c0b3b34fce 100644
--- a/drivers/spi/spi_ppc4xx.c
+++ b/drivers/spi/spi_ppc4xx.c
@@ -587,12 +587,12 @@ static const struct of_device_id spi_ppc4xx_of_match[] = {
587MODULE_DEVICE_TABLE(of, spi_ppc4xx_of_match); 587MODULE_DEVICE_TABLE(of, spi_ppc4xx_of_match);
588 588
589static struct of_platform_driver spi_ppc4xx_of_driver = { 589static struct of_platform_driver spi_ppc4xx_of_driver = {
590 .match_table = spi_ppc4xx_of_match,
591 .probe = spi_ppc4xx_of_probe, 590 .probe = spi_ppc4xx_of_probe,
592 .remove = __exit_p(spi_ppc4xx_of_remove), 591 .remove = __exit_p(spi_ppc4xx_of_remove),
593 .driver = { 592 .driver = {
594 .name = DRIVER_NAME, 593 .name = DRIVER_NAME,
595 .owner = THIS_MODULE, 594 .owner = THIS_MODULE,
595 .of_match_table = spi_ppc4xx_of_match,
596 }, 596 },
597}; 597};
598 598
diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c
index bbf9371cd284..8979a75dbd7b 100644
--- a/drivers/spi/spi_s3c24xx_gpio.c
+++ b/drivers/spi/spi_s3c24xx_gpio.c
@@ -58,8 +58,7 @@ static inline u32 getmiso(struct spi_device *dev)
58 58
59#define spidelay(x) ndelay(x) 59#define spidelay(x) ndelay(x)
60 60
61#define EXPAND_BITBANG_TXRX 61#include "spi_bitbang_txrx.h"
62#include <linux/spi/spi_bitbang.h>
63 62
64 63
65static u32 s3c2410_spigpio_txrx_mode0(struct spi_device *spi, 64static u32 s3c2410_spigpio_txrx_mode0(struct spi_device *spi,
diff --git a/drivers/spi/spi_sh_sci.c b/drivers/spi/spi_sh_sci.c
index a65c12ffa733..a511be7961a0 100644
--- a/drivers/spi/spi_sh_sci.c
+++ b/drivers/spi/spi_sh_sci.c
@@ -78,8 +78,7 @@ static inline u32 getmiso(struct spi_device *dev)
78 78
79#define spidelay(x) ndelay(x) 79#define spidelay(x) ndelay(x)
80 80
81#define EXPAND_BITBANG_TXRX 81#include "spi_bitbang_txrx.h"
82#include <linux/spi/spi_bitbang.h>
83 82
84static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi, 83static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi,
85 unsigned nsecs, u32 word, u8 bits) 84 unsigned nsecs, u32 word, u8 bits)
diff --git a/drivers/spi/xilinx_spi_of.c b/drivers/spi/xilinx_spi_of.c
index 748d33a76d29..4654805b08d8 100644
--- a/drivers/spi/xilinx_spi_of.c
+++ b/drivers/spi/xilinx_spi_of.c
@@ -48,13 +48,13 @@ static int __devinit xilinx_spi_of_probe(struct of_device *ofdev,
48 const u32 *prop; 48 const u32 *prop;
49 int len; 49 int len;
50 50
51 rc = of_address_to_resource(ofdev->node, 0, &r_mem); 51 rc = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem);
52 if (rc) { 52 if (rc) {
53 dev_warn(&ofdev->dev, "invalid address\n"); 53 dev_warn(&ofdev->dev, "invalid address\n");
54 return rc; 54 return rc;
55 } 55 }
56 56
57 rc = of_irq_to_resource(ofdev->node, 0, &r_irq); 57 rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq);
58 if (rc == NO_IRQ) { 58 if (rc == NO_IRQ) {
59 dev_warn(&ofdev->dev, "no IRQ found\n"); 59 dev_warn(&ofdev->dev, "no IRQ found\n");
60 return -ENODEV; 60 return -ENODEV;
@@ -67,7 +67,7 @@ static int __devinit xilinx_spi_of_probe(struct of_device *ofdev,
67 return -ENOMEM; 67 return -ENOMEM;
68 68
69 /* number of slave select bits is required */ 69 /* number of slave select bits is required */
70 prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len); 70 prop = of_get_property(ofdev->dev.of_node, "xlnx,num-ss-bits", &len);
71 if (!prop || len < sizeof(*prop)) { 71 if (!prop || len < sizeof(*prop)) {
72 dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n"); 72 dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n");
73 return -EINVAL; 73 return -EINVAL;
@@ -81,7 +81,7 @@ static int __devinit xilinx_spi_of_probe(struct of_device *ofdev,
81 dev_set_drvdata(&ofdev->dev, master); 81 dev_set_drvdata(&ofdev->dev, master);
82 82
83 /* Add any subnodes on the SPI bus */ 83 /* Add any subnodes on the SPI bus */
84 of_register_spi_devices(master, ofdev->node); 84 of_register_spi_devices(master, ofdev->dev.of_node);
85 85
86 return 0; 86 return 0;
87} 87}
@@ -109,12 +109,12 @@ static const struct of_device_id xilinx_spi_of_match[] = {
109MODULE_DEVICE_TABLE(of, xilinx_spi_of_match); 109MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
110 110
111static struct of_platform_driver xilinx_spi_of_driver = { 111static struct of_platform_driver xilinx_spi_of_driver = {
112 .match_table = xilinx_spi_of_match,
113 .probe = xilinx_spi_of_probe, 112 .probe = xilinx_spi_of_probe,
114 .remove = __exit_p(xilinx_spi_of_remove), 113 .remove = __exit_p(xilinx_spi_of_remove),
115 .driver = { 114 .driver = {
116 .name = "xilinx-xps-spi", 115 .name = "xilinx-xps-spi",
117 .owner = THIS_MODULE, 116 .owner = THIS_MODULE,
117 .of_match_table = xilinx_spi_of_match,
118 }, 118 },
119}; 119};
120 120
diff --git a/drivers/staging/go7007/saa7134-go7007.c b/drivers/staging/go7007/saa7134-go7007.c
index 49f0d31c118a..cf7c34a99459 100644
--- a/drivers/staging/go7007/saa7134-go7007.c
+++ b/drivers/staging/go7007/saa7134-go7007.c
@@ -242,13 +242,13 @@ static void saa7134_go7007_irq_ts_done(struct saa7134_dev *dev,
242 printk(KERN_DEBUG "saa7134-go7007: irq: lost %ld\n", 242 printk(KERN_DEBUG "saa7134-go7007: irq: lost %ld\n",
243 (status >> 16) & 0x0f); 243 (status >> 16) & 0x0f);
244 if (status & 0x100000) { 244 if (status & 0x100000) {
245 dma_sync_single(&dev->pci->dev, 245 dma_sync_single_for_cpu(&dev->pci->dev,
246 saa->bottom_dma, PAGE_SIZE, DMA_FROM_DEVICE); 246 saa->bottom_dma, PAGE_SIZE, DMA_FROM_DEVICE);
247 go7007_parse_video_stream(go, saa->bottom, PAGE_SIZE); 247 go7007_parse_video_stream(go, saa->bottom, PAGE_SIZE);
248 saa_writel(SAA7134_RS_BA2(5), cpu_to_le32(saa->bottom_dma)); 248 saa_writel(SAA7134_RS_BA2(5), cpu_to_le32(saa->bottom_dma));
249 } else { 249 } else {
250 dma_sync_single(&dev->pci->dev, 250 dma_sync_single_for_cpu(&dev->pci->dev,
251 saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE); 251 saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE);
252 go7007_parse_video_stream(go, saa->top, PAGE_SIZE); 252 go7007_parse_video_stream(go, saa->top, PAGE_SIZE);
253 saa_writel(SAA7134_RS_BA1(5), cpu_to_le32(saa->top_dma)); 253 saa_writel(SAA7134_RS_BA1(5), cpu_to_le32(saa->top_dma));
254 } 254 }
diff --git a/drivers/staging/rt2860/common/rtmp_init.c b/drivers/staging/rt2860/common/rtmp_init.c
index 21a95ffdfb86..a09038542f26 100644
--- a/drivers/staging/rt2860/common/rtmp_init.c
+++ b/drivers/staging/rt2860/common/rtmp_init.c
@@ -2810,17 +2810,6 @@ void UserCfgInit(struct rt_rtmp_adapter *pAd)
2810} 2810}
2811 2811
2812/* IRQL = PASSIVE_LEVEL */ 2812/* IRQL = PASSIVE_LEVEL */
2813u8 BtoH(char ch)
2814{
2815 if (ch >= '0' && ch <= '9')
2816 return (ch - '0'); /* Handle numerals */
2817 if (ch >= 'A' && ch <= 'F')
2818 return (ch - 'A' + 0xA); /* Handle capitol hex digits */
2819 if (ch >= 'a' && ch <= 'f')
2820 return (ch - 'a' + 0xA); /* Handle small hex digits */
2821 return (255);
2822}
2823
2824/* */ 2813/* */
2825/* FUNCTION: AtoH(char *, u8 *, int) */ 2814/* FUNCTION: AtoH(char *, u8 *, int) */
2826/* */ 2815/* */
@@ -2847,8 +2836,8 @@ void AtoH(char *src, u8 *dest, int destlen)
2847 destTemp = (u8 *)dest; 2836 destTemp = (u8 *)dest;
2848 2837
2849 while (destlen--) { 2838 while (destlen--) {
2850 *destTemp = BtoH(*srcptr++) << 4; /* Put 1st ascii byte in upper nibble. */ 2839 *destTemp = hex_to_bin(*srcptr++) << 4; /* Put 1st ascii byte in upper nibble. */
2851 *destTemp += BtoH(*srcptr++); /* Add 2nd ascii byte to above. */ 2840 *destTemp += hex_to_bin(*srcptr++); /* Add 2nd ascii byte to above. */
2852 destTemp++; 2841 destTemp++;
2853 } 2842 }
2854} 2843}
diff --git a/drivers/staging/rt2860/rtmp.h b/drivers/staging/rt2860/rtmp.h
index ab525ee15042..82b6e783b33f 100644
--- a/drivers/staging/rt2860/rtmp.h
+++ b/drivers/staging/rt2860/rtmp.h
@@ -2356,8 +2356,6 @@ void RTMPMoveMemory(void *pDest, void *pSrc, unsigned long Length);
2356 2356
2357void AtoH(char *src, u8 *dest, int destlen); 2357void AtoH(char *src, u8 *dest, int destlen);
2358 2358
2359u8 BtoH(char ch);
2360
2361void RTMPPatchMacBbpBug(struct rt_rtmp_adapter *pAd); 2359void RTMPPatchMacBbpBug(struct rt_rtmp_adapter *pAd);
2362 2360
2363void RTMPInitTimer(struct rt_rtmp_adapter *pAd, 2361void RTMPInitTimer(struct rt_rtmp_adapter *pAd,
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index e89304c72568..b53deee25d74 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -5879,20 +5879,13 @@ out:
5879static int ixj_build_filter_cadence(IXJ *j, IXJ_FILTER_CADENCE __user * cp) 5879static int ixj_build_filter_cadence(IXJ *j, IXJ_FILTER_CADENCE __user * cp)
5880{ 5880{
5881 IXJ_FILTER_CADENCE *lcp; 5881 IXJ_FILTER_CADENCE *lcp;
5882 lcp = kmalloc(sizeof(IXJ_FILTER_CADENCE), GFP_KERNEL); 5882 lcp = memdup_user(cp, sizeof(IXJ_FILTER_CADENCE));
5883 if (lcp == NULL) { 5883 if (IS_ERR(lcp)) {
5884 if(ixjdebug & 0x0001) { 5884 if(ixjdebug & 0x0001) {
5885 printk(KERN_INFO "Could not allocate memory for cadence\n"); 5885 printk(KERN_INFO "Could not allocate memory for cadence or could not copy cadence to kernel\n");
5886 } 5886 }
5887 return -ENOMEM; 5887 return PTR_ERR(lcp);
5888 } 5888 }
5889 if (copy_from_user(lcp, cp, sizeof(IXJ_FILTER_CADENCE))) {
5890 if(ixjdebug & 0x0001) {
5891 printk(KERN_INFO "Could not copy cadence to kernel\n");
5892 }
5893 kfree(lcp);
5894 return -EFAULT;
5895 }
5896 if (lcp->filter > 5) { 5889 if (lcp->filter > 5) {
5897 if(ixjdebug & 0x0001) { 5890 if(ixjdebug & 0x0001) {
5898 printk(KERN_INFO "Cadence out of range\n"); 5891 printk(KERN_INFO "Cadence out of range\n");
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 1e9ba4bdffef..1335456b4f93 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -127,8 +127,6 @@ MODULE_PARM_DESC(ModemOption, "default: 0x10,0x00,0x00,0x00,0x20");
127#define ENDPOINT_ISOC_DATA 0x07 127#define ENDPOINT_ISOC_DATA 0x07
128#define ENDPOINT_FIRMWARE 0x05 128#define ENDPOINT_FIRMWARE 0x05
129 129
130#define hex2int(c) ( (c >= '0') && (c <= '9') ? (c - '0') : ((c & 0xf) + 9) )
131
132struct speedtch_params { 130struct speedtch_params {
133 unsigned int altsetting; 131 unsigned int altsetting;
134 unsigned int BMaxDSL; 132 unsigned int BMaxDSL;
@@ -669,7 +667,8 @@ static int speedtch_atm_start(struct usbatm_data *usbatm, struct atm_dev *atm_de
669 memset(atm_dev->esi, 0, sizeof(atm_dev->esi)); 667 memset(atm_dev->esi, 0, sizeof(atm_dev->esi));
670 if (usb_string(usb_dev, usb_dev->descriptor.iSerialNumber, mac_str, sizeof(mac_str)) == 12) { 668 if (usb_string(usb_dev, usb_dev->descriptor.iSerialNumber, mac_str, sizeof(mac_str)) == 12) {
671 for (i = 0; i < 6; i++) 669 for (i = 0; i < 6; i++)
672 atm_dev->esi[i] = (hex2int(mac_str[i * 2]) * 16) + (hex2int(mac_str[i * 2 + 1])); 670 atm_dev->esi[i] = (hex_to_bin(mac_str[i * 2]) << 4) +
671 hex_to_bin(mac_str[i * 2 + 1]);
673 } 672 }
674 673
675 /* Start modem synchronisation */ 674 /* Start modem synchronisation */
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 3537d51073b2..2928523268b5 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2768,8 +2768,11 @@ static const struct of_device_id qe_udc_match[] __devinitconst = {
2768MODULE_DEVICE_TABLE(of, qe_udc_match); 2768MODULE_DEVICE_TABLE(of, qe_udc_match);
2769 2769
2770static struct of_platform_driver udc_driver = { 2770static struct of_platform_driver udc_driver = {
2771 .name = (char *)driver_name, 2771 .driver = {
2772 .match_table = qe_udc_match, 2772 .name = (char *)driver_name,
2773 .owner = THIS_MODULE,
2774 .of_match_table = qe_udc_match,
2775 },
2773 .probe = qe_udc_probe, 2776 .probe = qe_udc_probe,
2774 .remove = __devexit_p(qe_udc_remove), 2777 .remove = __devexit_p(qe_udc_remove),
2775#ifdef CONFIG_PM 2778#ifdef CONFIG_PM
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index ead59f42e69b..544ccfd7056e 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -199,8 +199,8 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
199 writel(pdata->portsc, hcd->regs + PORTSC_OFFSET); 199 writel(pdata->portsc, hcd->regs + PORTSC_OFFSET);
200 mdelay(10); 200 mdelay(10);
201 201
202 /* setup USBCONTROL. */ 202 /* setup specific usb hw */
203 ret = mxc_set_usbcontrol(pdev->id, pdata->flags); 203 ret = mxc_initialize_usb_hw(pdev->id, pdata->flags);
204 if (ret < 0) 204 if (ret < 0)
205 goto err_init; 205 goto err_init;
206 206
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 8df33b8a634c..5aec92866ab3 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -108,7 +108,7 @@ ppc44x_enable_bmt(struct device_node *dn)
108static int __devinit 108static int __devinit
109ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match) 109ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
110{ 110{
111 struct device_node *dn = op->node; 111 struct device_node *dn = op->dev.of_node;
112 struct usb_hcd *hcd; 112 struct usb_hcd *hcd;
113 struct ehci_hcd *ehci = NULL; 113 struct ehci_hcd *ehci = NULL;
114 struct resource res; 114 struct resource res;
@@ -274,13 +274,12 @@ MODULE_DEVICE_TABLE(of, ehci_hcd_ppc_of_match);
274 274
275 275
276static struct of_platform_driver ehci_hcd_ppc_of_driver = { 276static struct of_platform_driver ehci_hcd_ppc_of_driver = {
277 .name = "ppc-of-ehci",
278 .match_table = ehci_hcd_ppc_of_match,
279 .probe = ehci_hcd_ppc_of_probe, 277 .probe = ehci_hcd_ppc_of_probe,
280 .remove = ehci_hcd_ppc_of_remove, 278 .remove = ehci_hcd_ppc_of_remove,
281 .shutdown = ehci_hcd_ppc_of_shutdown, 279 .shutdown = ehci_hcd_ppc_of_shutdown,
282 .driver = { 280 .driver = {
283 .name = "ppc-of-ehci", 281 .name = "ppc-of-ehci",
284 .owner = THIS_MODULE, 282 .owner = THIS_MODULE,
283 .of_match_table = ehci_hcd_ppc_of_match,
285 }, 284 },
286}; 285};
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index f603bb2c0a8e..013972bbde57 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -288,13 +288,12 @@ static const struct of_device_id ehci_hcd_xilinx_of_match[] = {
288MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match); 288MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match);
289 289
290static struct of_platform_driver ehci_hcd_xilinx_of_driver = { 290static struct of_platform_driver ehci_hcd_xilinx_of_driver = {
291 .name = "xilinx-of-ehci",
292 .match_table = ehci_hcd_xilinx_of_match,
293 .probe = ehci_hcd_xilinx_of_probe, 291 .probe = ehci_hcd_xilinx_of_probe,
294 .remove = ehci_hcd_xilinx_of_remove, 292 .remove = ehci_hcd_xilinx_of_remove,
295 .shutdown = ehci_hcd_xilinx_of_shutdown, 293 .shutdown = ehci_hcd_xilinx_of_shutdown,
296 .driver = { 294 .driver = {
297 .name = "xilinx-of-ehci", 295 .name = "xilinx-of-ehci",
298 .owner = THIS_MODULE, 296 .owner = THIS_MODULE,
297 .of_match_table = ehci_hcd_xilinx_of_match,
299 }, 298 },
300}; 299};
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 90453379a434..c7c8392a88b9 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -565,7 +565,7 @@ static int __devinit of_fhci_probe(struct of_device *ofdev,
565 const struct of_device_id *ofid) 565 const struct of_device_id *ofid)
566{ 566{
567 struct device *dev = &ofdev->dev; 567 struct device *dev = &ofdev->dev;
568 struct device_node *node = ofdev->node; 568 struct device_node *node = dev->of_node;
569 struct usb_hcd *hcd; 569 struct usb_hcd *hcd;
570 struct fhci_hcd *fhci; 570 struct fhci_hcd *fhci;
571 struct resource usb_regs; 571 struct resource usb_regs;
@@ -670,7 +670,7 @@ static int __devinit of_fhci_probe(struct of_device *ofdev,
670 } 670 }
671 671
672 for (j = 0; j < NUM_PINS; j++) { 672 for (j = 0; j < NUM_PINS; j++) {
673 fhci->pins[j] = qe_pin_request(ofdev->node, j); 673 fhci->pins[j] = qe_pin_request(node, j);
674 if (IS_ERR(fhci->pins[j])) { 674 if (IS_ERR(fhci->pins[j])) {
675 ret = PTR_ERR(fhci->pins[j]); 675 ret = PTR_ERR(fhci->pins[j]);
676 dev_err(dev, "can't get pin %d: %d\n", j, ret); 676 dev_err(dev, "can't get pin %d: %d\n", j, ret);
@@ -813,8 +813,11 @@ static const struct of_device_id of_fhci_match[] = {
813MODULE_DEVICE_TABLE(of, of_fhci_match); 813MODULE_DEVICE_TABLE(of, of_fhci_match);
814 814
815static struct of_platform_driver of_fhci_driver = { 815static struct of_platform_driver of_fhci_driver = {
816 .name = "fsl,usb-fhci", 816 .driver = {
817 .match_table = of_fhci_match, 817 .name = "fsl,usb-fhci",
818 .owner = THIS_MODULE,
819 .of_match_table = of_fhci_match,
820 },
818 .probe = of_fhci_probe, 821 .probe = of_fhci_probe,
819 .remove = __devexit_p(of_fhci_remove), 822 .remove = __devexit_p(of_fhci_remove),
820}; 823};
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 8f0259eaa2c7..ec85d0c3cc3e 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -31,7 +31,7 @@ static int of_isp1760_probe(struct of_device *dev,
31 const struct of_device_id *match) 31 const struct of_device_id *match)
32{ 32{
33 struct usb_hcd *hcd; 33 struct usb_hcd *hcd;
34 struct device_node *dp = dev->node; 34 struct device_node *dp = dev->dev.of_node;
35 struct resource *res; 35 struct resource *res;
36 struct resource memory; 36 struct resource memory;
37 struct of_irq oirq; 37 struct of_irq oirq;
@@ -120,8 +120,11 @@ static const struct of_device_id of_isp1760_match[] = {
120MODULE_DEVICE_TABLE(of, of_isp1760_match); 120MODULE_DEVICE_TABLE(of, of_isp1760_match);
121 121
122static struct of_platform_driver isp1760_of_driver = { 122static struct of_platform_driver isp1760_of_driver = {
123 .name = "nxp-isp1760", 123 .driver = {
124 .match_table = of_isp1760_match, 124 .name = "nxp-isp1760",
125 .owner = THIS_MODULE,
126 .of_match_table = of_isp1760_match,
127 },
125 .probe = of_isp1760_probe, 128 .probe = of_isp1760_probe,
126 .remove = of_isp1760_remove, 129 .remove = of_isp1760_remove,
127}; 130};
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 103263c230cf..df165917412a 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -83,7 +83,7 @@ static const struct hc_driver ohci_ppc_of_hc_driver = {
83static int __devinit 83static int __devinit
84ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match) 84ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
85{ 85{
86 struct device_node *dn = op->node; 86 struct device_node *dn = op->dev.of_node;
87 struct usb_hcd *hcd; 87 struct usb_hcd *hcd;
88 struct ohci_hcd *ohci; 88 struct ohci_hcd *ohci;
89 struct resource res; 89 struct resource res;
@@ -244,18 +244,13 @@ MODULE_DEVICE_TABLE(of, ohci_hcd_ppc_of_match);
244 244
245 245
246static struct of_platform_driver ohci_hcd_ppc_of_driver = { 246static struct of_platform_driver ohci_hcd_ppc_of_driver = {
247 .name = "ppc-of-ohci",
248 .match_table = ohci_hcd_ppc_of_match,
249 .probe = ohci_hcd_ppc_of_probe, 247 .probe = ohci_hcd_ppc_of_probe,
250 .remove = ohci_hcd_ppc_of_remove, 248 .remove = ohci_hcd_ppc_of_remove,
251 .shutdown = ohci_hcd_ppc_of_shutdown, 249 .shutdown = ohci_hcd_ppc_of_shutdown,
252#ifdef CONFIG_PM 250 .driver = {
253 /*.suspend = ohci_hcd_ppc_soc_drv_suspend,*/ 251 .name = "ppc-of-ohci",
254 /*.resume = ohci_hcd_ppc_soc_drv_resume,*/ 252 .owner = THIS_MODULE,
255#endif 253 .of_match_table = ohci_hcd_ppc_of_match,
256 .driver = {
257 .name = "ppc-of-ohci",
258 .owner = THIS_MODULE,
259 }, 254 },
260}; 255};
261 256
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index e7fa3644ba6a..61c76b13f0f1 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -954,8 +954,7 @@ static int mon_bin_queued(struct mon_reader_bin *rp)
954 954
955/* 955/*
956 */ 956 */
957static int mon_bin_ioctl(struct inode *inode, struct file *file, 957static int mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
958 unsigned int cmd, unsigned long arg)
959{ 958{
960 struct mon_reader_bin *rp = file->private_data; 959 struct mon_reader_bin *rp = file->private_data;
961 // struct mon_bus* mbus = rp->r.m_bus; 960 // struct mon_bus* mbus = rp->r.m_bus;
@@ -1095,6 +1094,19 @@ static int mon_bin_ioctl(struct inode *inode, struct file *file,
1095 return ret; 1094 return ret;
1096} 1095}
1097 1096
1097static long mon_bin_unlocked_ioctl(struct file *file, unsigned int cmd,
1098 unsigned long arg)
1099{
1100 int ret;
1101
1102 lock_kernel();
1103 ret = mon_bin_ioctl(file, cmd, arg);
1104 unlock_kernel();
1105
1106 return ret;
1107}
1108
1109
1098#ifdef CONFIG_COMPAT 1110#ifdef CONFIG_COMPAT
1099static long mon_bin_compat_ioctl(struct file *file, 1111static long mon_bin_compat_ioctl(struct file *file,
1100 unsigned int cmd, unsigned long arg) 1112 unsigned int cmd, unsigned long arg)
@@ -1148,14 +1160,13 @@ static long mon_bin_compat_ioctl(struct file *file,
1148 return 0; 1160 return 0;
1149 1161
1150 case MON_IOCG_STATS: 1162 case MON_IOCG_STATS:
1151 return mon_bin_ioctl(NULL, file, cmd, 1163 return mon_bin_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
1152 (unsigned long) compat_ptr(arg));
1153 1164
1154 case MON_IOCQ_URB_LEN: 1165 case MON_IOCQ_URB_LEN:
1155 case MON_IOCQ_RING_SIZE: 1166 case MON_IOCQ_RING_SIZE:
1156 case MON_IOCT_RING_SIZE: 1167 case MON_IOCT_RING_SIZE:
1157 case MON_IOCH_MFLUSH: 1168 case MON_IOCH_MFLUSH:
1158 return mon_bin_ioctl(NULL, file, cmd, arg); 1169 return mon_bin_ioctl(file, cmd, arg);
1159 1170
1160 default: 1171 default:
1161 ; 1172 ;
@@ -1239,7 +1250,7 @@ static const struct file_operations mon_fops_binary = {
1239 .read = mon_bin_read, 1250 .read = mon_bin_read,
1240 /* .write = mon_text_write, */ 1251 /* .write = mon_text_write, */
1241 .poll = mon_bin_poll, 1252 .poll = mon_bin_poll,
1242 .ioctl = mon_bin_ioctl, 1253 .unlocked_ioctl = mon_bin_unlocked_ioctl,
1243#ifdef CONFIG_COMPAT 1254#ifdef CONFIG_COMPAT
1244 .compat_ioctl = mon_bin_compat_ioctl, 1255 .compat_ioctl = mon_bin_compat_ioctl,
1245#endif 1256#endif
diff --git a/drivers/usb/mon/mon_stat.c b/drivers/usb/mon/mon_stat.c
index 1becdc3837e6..8ec94f15a738 100644
--- a/drivers/usb/mon/mon_stat.c
+++ b/drivers/usb/mon/mon_stat.c
@@ -11,6 +11,7 @@
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/usb.h> 12#include <linux/usb.h>
13#include <linux/fs.h> 13#include <linux/fs.h>
14#include <linux/smp_lock.h>
14#include <asm/uaccess.h> 15#include <asm/uaccess.h>
15 16
16#include "usb_mon.h" 17#include "usb_mon.h"
@@ -63,6 +64,6 @@ const struct file_operations mon_fops_stat = {
63 .read = mon_stat_read, 64 .read = mon_stat_read,
64 /* .write = mon_stat_write, */ 65 /* .write = mon_stat_write, */
65 /* .poll = mon_stat_poll, */ 66 /* .poll = mon_stat_poll, */
66 /* .ioctl = mon_stat_ioctl, */ 67 /* .unlocked_ioctl = mon_stat_ioctl, */
67 .release = mon_stat_release, 68 .release = mon_stat_release,
68}; 69};
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 750effe0f98b..c6fb8e968f21 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -806,7 +806,7 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
806 count = indirect->len / sizeof desc; 806 count = indirect->len / sizeof desc;
807 /* Buffers are chained via a 16 bit next field, so 807 /* Buffers are chained via a 16 bit next field, so
808 * we can have at most 2^16 of these. */ 808 * we can have at most 2^16 of these. */
809 if (count > USHORT_MAX + 1) { 809 if (count > USHRT_MAX + 1) {
810 vq_err(vq, "Indirect buffer length too big: %d\n", 810 vq_err(vq, "Indirect buffer length too big: %d\n",
811 indirect->len); 811 indirect->len);
812 return -E2BIG; 812 return -E2BIG;
diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
index 8d406fb689c1..f3d7440f0072 100644
--- a/drivers/video/arcfb.c
+++ b/drivers/video/arcfb.c
@@ -80,7 +80,7 @@ struct arcfb_par {
80 spinlock_t lock; 80 spinlock_t lock;
81}; 81};
82 82
83static struct fb_fix_screeninfo arcfb_fix __initdata = { 83static struct fb_fix_screeninfo arcfb_fix __devinitdata = {
84 .id = "arcfb", 84 .id = "arcfb",
85 .type = FB_TYPE_PACKED_PIXELS, 85 .type = FB_TYPE_PACKED_PIXELS,
86 .visual = FB_VISUAL_MONO01, 86 .visual = FB_VISUAL_MONO01,
@@ -90,7 +90,7 @@ static struct fb_fix_screeninfo arcfb_fix __initdata = {
90 .accel = FB_ACCEL_NONE, 90 .accel = FB_ACCEL_NONE,
91}; 91};
92 92
93static struct fb_var_screeninfo arcfb_var __initdata = { 93static struct fb_var_screeninfo arcfb_var __devinitdata = {
94 .xres = 128, 94 .xres = 128,
95 .yres = 64, 95 .yres = 64,
96 .xres_virtual = 128, 96 .xres_virtual = 128,
@@ -588,7 +588,7 @@ err:
588 return retval; 588 return retval;
589} 589}
590 590
591static int arcfb_remove(struct platform_device *dev) 591static int __devexit arcfb_remove(struct platform_device *dev)
592{ 592{
593 struct fb_info *info = platform_get_drvdata(dev); 593 struct fb_info *info = platform_get_drvdata(dev);
594 594
@@ -602,7 +602,7 @@ static int arcfb_remove(struct platform_device *dev)
602 602
603static struct platform_driver arcfb_driver = { 603static struct platform_driver arcfb_driver = {
604 .probe = arcfb_probe, 604 .probe = arcfb_probe,
605 .remove = arcfb_remove, 605 .remove = __devexit_p(arcfb_remove),
606 .driver = { 606 .driver = {
607 .name = "arcfb", 607 .name = "arcfb",
608 }, 608 },
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 29d72851f85b..f8d69ad36830 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -1820,10 +1820,6 @@ struct atyclk {
1820#define ATYIO_FEATW 0x41545903 /* ATY\03 */ 1820#define ATYIO_FEATW 0x41545903 /* ATY\03 */
1821#endif 1821#endif
1822 1822
1823#ifndef FBIO_WAITFORVSYNC
1824#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32)
1825#endif
1826
1827static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg) 1823static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
1828{ 1824{
1829 struct atyfb_par *par = (struct atyfb_par *) info->par; 1825 struct atyfb_par *par = (struct atyfb_par *) info->par;
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 23b2a8c0dbfc..b020ba7f1cf2 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -501,7 +501,9 @@ static irqreturn_t bfin_bf54x_irq_error(int irq, void *dev_id)
501 501
502static int __devinit bfin_bf54x_probe(struct platform_device *pdev) 502static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
503{ 503{
504#ifndef NO_BL_SUPPORT
504 struct backlight_properties props; 505 struct backlight_properties props;
506#endif
505 struct bfin_bf54xfb_info *info; 507 struct bfin_bf54xfb_info *info;
506 struct fb_info *fbinfo; 508 struct fb_info *fbinfo;
507 int ret; 509 int ret;
@@ -654,7 +656,8 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
654 printk(KERN_ERR DRIVER_NAME 656 printk(KERN_ERR DRIVER_NAME
655 ": unable to register backlight.\n"); 657 ": unable to register backlight.\n");
656 ret = -EINVAL; 658 ret = -EINVAL;
657 goto out9; 659 unregister_framebuffer(fbinfo);
660 goto out8;
658 } 661 }
659 662
660 lcd_dev = lcd_device_register(DRIVER_NAME, &pdev->dev, NULL, &bfin_lcd_ops); 663 lcd_dev = lcd_device_register(DRIVER_NAME, &pdev->dev, NULL, &bfin_lcd_ops);
@@ -663,8 +666,6 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
663 666
664 return 0; 667 return 0;
665 668
666out9:
667 unregister_framebuffer(fbinfo);
668out8: 669out8:
669 free_irq(info->irq, info); 670 free_irq(info->irq, info);
670out7: 671out7:
diff --git a/drivers/video/bfin-lq035q1-fb.c b/drivers/video/bfin-lq035q1-fb.c
index 2baac7cc1425..c8e1f04941bd 100644
--- a/drivers/video/bfin-lq035q1-fb.c
+++ b/drivers/video/bfin-lq035q1-fb.c
@@ -61,47 +61,13 @@
61#define LCD_X_RES 320 /* Horizontal Resolution */ 61#define LCD_X_RES 320 /* Horizontal Resolution */
62#define LCD_Y_RES 240 /* Vertical Resolution */ 62#define LCD_Y_RES 240 /* Vertical Resolution */
63#define DMA_BUS_SIZE 16 63#define DMA_BUS_SIZE 16
64#define U_LINE 4 /* Blanking Lines */
64 65
65#define USE_RGB565_16_BIT_PPI
66
67#ifdef USE_RGB565_16_BIT_PPI
68#define LCD_BPP 16 /* Bit Per Pixel */
69#define CLOCKS_PER_PIX 1
70#define CPLD_PIPELINE_DELAY_COR 0 /* NO CPLB */
71#endif
72 66
73/* Interface 16/18-bit TFT over an 8-bit wide PPI using a small Programmable Logic Device (CPLD) 67/* Interface 16/18-bit TFT over an 8-bit wide PPI using a small Programmable Logic Device (CPLD)
74 * http://blackfin.uclinux.org/gf/project/stamp/frs/?action=FrsReleaseBrowse&frs_package_id=165 68 * http://blackfin.uclinux.org/gf/project/stamp/frs/?action=FrsReleaseBrowse&frs_package_id=165
75 */ 69 */
76 70
77#ifdef USE_RGB565_8_BIT_PPI
78#define LCD_BPP 16 /* Bit Per Pixel */
79#define CLOCKS_PER_PIX 2
80#define CPLD_PIPELINE_DELAY_COR 3 /* RGB565 */
81#endif
82
83#ifdef USE_RGB888_8_BIT_PPI
84#define LCD_BPP 24 /* Bit Per Pixel */
85#define CLOCKS_PER_PIX 3
86#define CPLD_PIPELINE_DELAY_COR 5 /* RGB888 */
87#endif
88
89 /*
90 * HS and VS timing parameters (all in number of PPI clk ticks)
91 */
92
93#define U_LINE 4 /* Blanking Lines */
94
95#define H_ACTPIX (LCD_X_RES * CLOCKS_PER_PIX) /* active horizontal pixel */
96#define H_PERIOD (336 * CLOCKS_PER_PIX) /* HS period */
97#define H_PULSE (2 * CLOCKS_PER_PIX) /* HS pulse width */
98#define H_START (7 * CLOCKS_PER_PIX + CPLD_PIPELINE_DELAY_COR) /* first valid pixel */
99
100#define V_LINES (LCD_Y_RES + U_LINE) /* total vertical lines */
101#define V_PULSE (2 * CLOCKS_PER_PIX) /* VS pulse width (1-5 H_PERIODs) */
102#define V_PERIOD (H_PERIOD * V_LINES) /* VS period */
103
104#define ACTIVE_VIDEO_MEM_OFFSET ((U_LINE / 2) * LCD_X_RES * (LCD_BPP / 8))
105 71
106#define BFIN_LCD_NBR_PALETTE_ENTRIES 256 72#define BFIN_LCD_NBR_PALETTE_ENTRIES 256
107 73
@@ -110,12 +76,6 @@
110#define PPI_PORT_CFG_01 0x10 76#define PPI_PORT_CFG_01 0x10
111#define PPI_POLS_1 0x8000 77#define PPI_POLS_1 0x8000
112 78
113#if (CLOCKS_PER_PIX > 1)
114#define PPI_PMODE (DLEN_8 | PACK_EN)
115#else
116#define PPI_PMODE (DLEN_16)
117#endif
118
119#define LQ035_INDEX 0x74 79#define LQ035_INDEX 0x74
120#define LQ035_DATA 0x76 80#define LQ035_DATA 0x76
121 81
@@ -139,6 +99,15 @@ struct bfin_lq035q1fb_info {
139 int irq; 99 int irq;
140 spinlock_t lock; /* lock */ 100 spinlock_t lock; /* lock */
141 u32 pseudo_pal[16]; 101 u32 pseudo_pal[16];
102
103 u32 lcd_bpp;
104 u32 h_actpix;
105 u32 h_period;
106 u32 h_pulse;
107 u32 h_start;
108 u32 v_lines;
109 u32 v_pulse;
110 u32 v_period;
142}; 111};
143 112
144static int nocursor; 113static int nocursor;
@@ -234,16 +203,69 @@ static int lq035q1_backlight(struct bfin_lq035q1fb_info *info, unsigned arg)
234 return 0; 203 return 0;
235} 204}
236 205
206static int bfin_lq035q1_calc_timing(struct bfin_lq035q1fb_info *fbi)
207{
208 unsigned long clocks_per_pix, cpld_pipeline_delay_cor;
209
210 /*
211 * Interface 16/18-bit TFT over an 8-bit wide PPI using a small
212 * Programmable Logic Device (CPLD)
213 * http://blackfin.uclinux.org/gf/project/stamp/frs/?action=FrsReleaseBrowse&frs_package_id=165
214 */
215
216 switch (fbi->disp_info->ppi_mode) {
217 case USE_RGB565_16_BIT_PPI:
218 fbi->lcd_bpp = 16;
219 clocks_per_pix = 1;
220 cpld_pipeline_delay_cor = 0;
221 break;
222 case USE_RGB565_8_BIT_PPI:
223 fbi->lcd_bpp = 16;
224 clocks_per_pix = 2;
225 cpld_pipeline_delay_cor = 3;
226 break;
227 case USE_RGB888_8_BIT_PPI:
228 fbi->lcd_bpp = 24;
229 clocks_per_pix = 3;
230 cpld_pipeline_delay_cor = 5;
231 break;
232 default:
233 return -EINVAL;
234 }
235
236 /*
237 * HS and VS timing parameters (all in number of PPI clk ticks)
238 */
239
240 fbi->h_actpix = (LCD_X_RES * clocks_per_pix); /* active horizontal pixel */
241 fbi->h_period = (336 * clocks_per_pix); /* HS period */
242 fbi->h_pulse = (2 * clocks_per_pix); /* HS pulse width */
243 fbi->h_start = (7 * clocks_per_pix + cpld_pipeline_delay_cor); /* first valid pixel */
244
245 fbi->v_lines = (LCD_Y_RES + U_LINE); /* total vertical lines */
246 fbi->v_pulse = (2 * clocks_per_pix); /* VS pulse width (1-5 H_PERIODs) */
247 fbi->v_period = (fbi->h_period * fbi->v_lines); /* VS period */
248
249 return 0;
250}
251
237static void bfin_lq035q1_config_ppi(struct bfin_lq035q1fb_info *fbi) 252static void bfin_lq035q1_config_ppi(struct bfin_lq035q1fb_info *fbi)
238{ 253{
239 bfin_write_PPI_DELAY(H_START); 254 unsigned ppi_pmode;
240 bfin_write_PPI_COUNT(H_ACTPIX - 1); 255
241 bfin_write_PPI_FRAME(V_LINES); 256 if (fbi->disp_info->ppi_mode == USE_RGB565_16_BIT_PPI)
257 ppi_pmode = DLEN_16;
258 else
259 ppi_pmode = (DLEN_8 | PACK_EN);
260
261 bfin_write_PPI_DELAY(fbi->h_start);
262 bfin_write_PPI_COUNT(fbi->h_actpix - 1);
263 bfin_write_PPI_FRAME(fbi->v_lines);
242 264
243 bfin_write_PPI_CONTROL(PPI_TX_MODE | /* output mode , PORT_DIR */ 265 bfin_write_PPI_CONTROL(PPI_TX_MODE | /* output mode , PORT_DIR */
244 PPI_XFER_TYPE_11 | /* sync mode XFR_TYPE */ 266 PPI_XFER_TYPE_11 | /* sync mode XFR_TYPE */
245 PPI_PORT_CFG_01 | /* two frame sync PORT_CFG */ 267 PPI_PORT_CFG_01 | /* two frame sync PORT_CFG */
246 PPI_PMODE | /* 8/16 bit data length / PACK_EN? */ 268 ppi_pmode | /* 8/16 bit data length / PACK_EN? */
247 PPI_POLS_1); /* faling edge syncs POLS */ 269 PPI_POLS_1); /* faling edge syncs POLS */
248} 270}
249 271
@@ -272,19 +294,19 @@ static void bfin_lq035q1_stop_timers(void)
272 294
273} 295}
274 296
275static void bfin_lq035q1_init_timers(void) 297static void bfin_lq035q1_init_timers(struct bfin_lq035q1fb_info *fbi)
276{ 298{
277 299
278 bfin_lq035q1_stop_timers(); 300 bfin_lq035q1_stop_timers();
279 301
280 set_gptimer_period(TIMER_HSYNC_id, H_PERIOD); 302 set_gptimer_period(TIMER_HSYNC_id, fbi->h_period);
281 set_gptimer_pwidth(TIMER_HSYNC_id, H_PULSE); 303 set_gptimer_pwidth(TIMER_HSYNC_id, fbi->h_pulse);
282 set_gptimer_config(TIMER_HSYNC_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT | 304 set_gptimer_config(TIMER_HSYNC_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT |
283 TIMER_TIN_SEL | TIMER_CLK_SEL| 305 TIMER_TIN_SEL | TIMER_CLK_SEL|
284 TIMER_EMU_RUN); 306 TIMER_EMU_RUN);
285 307
286 set_gptimer_period(TIMER_VSYNC_id, V_PERIOD); 308 set_gptimer_period(TIMER_VSYNC_id, fbi->v_period);
287 set_gptimer_pwidth(TIMER_VSYNC_id, V_PULSE); 309 set_gptimer_pwidth(TIMER_VSYNC_id, fbi->v_pulse);
288 set_gptimer_config(TIMER_VSYNC_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT | 310 set_gptimer_config(TIMER_VSYNC_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT |
289 TIMER_TIN_SEL | TIMER_CLK_SEL | 311 TIMER_TIN_SEL | TIMER_CLK_SEL |
290 TIMER_EMU_RUN); 312 TIMER_EMU_RUN);
@@ -294,21 +316,21 @@ static void bfin_lq035q1_init_timers(void)
294static void bfin_lq035q1_config_dma(struct bfin_lq035q1fb_info *fbi) 316static void bfin_lq035q1_config_dma(struct bfin_lq035q1fb_info *fbi)
295{ 317{
296 318
319
297 set_dma_config(CH_PPI, 320 set_dma_config(CH_PPI,
298 set_bfin_dma_config(DIR_READ, DMA_FLOW_AUTO, 321 set_bfin_dma_config(DIR_READ, DMA_FLOW_AUTO,
299 INTR_DISABLE, DIMENSION_2D, 322 INTR_DISABLE, DIMENSION_2D,
300 DATA_SIZE_16, 323 DATA_SIZE_16,
301 DMA_NOSYNC_KEEP_DMA_BUF)); 324 DMA_NOSYNC_KEEP_DMA_BUF));
302 set_dma_x_count(CH_PPI, (LCD_X_RES * LCD_BPP) / DMA_BUS_SIZE); 325 set_dma_x_count(CH_PPI, (LCD_X_RES * fbi->lcd_bpp) / DMA_BUS_SIZE);
303 set_dma_x_modify(CH_PPI, DMA_BUS_SIZE / 8); 326 set_dma_x_modify(CH_PPI, DMA_BUS_SIZE / 8);
304 set_dma_y_count(CH_PPI, V_LINES); 327 set_dma_y_count(CH_PPI, fbi->v_lines);
305 328
306 set_dma_y_modify(CH_PPI, DMA_BUS_SIZE / 8); 329 set_dma_y_modify(CH_PPI, DMA_BUS_SIZE / 8);
307 set_dma_start_addr(CH_PPI, (unsigned long)fbi->fb_buffer); 330 set_dma_start_addr(CH_PPI, (unsigned long)fbi->fb_buffer);
308 331
309} 332}
310 333
311#if (CLOCKS_PER_PIX == 1)
312static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2, 334static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
313 P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, 335 P_PPI0_D0, P_PPI0_D1, P_PPI0_D2,
314 P_PPI0_D3, P_PPI0_D4, P_PPI0_D5, 336 P_PPI0_D3, P_PPI0_D4, P_PPI0_D5,
@@ -316,22 +338,27 @@ static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
316 P_PPI0_D9, P_PPI0_D10, P_PPI0_D11, 338 P_PPI0_D9, P_PPI0_D10, P_PPI0_D11,
317 P_PPI0_D12, P_PPI0_D13, P_PPI0_D14, 339 P_PPI0_D12, P_PPI0_D13, P_PPI0_D14,
318 P_PPI0_D15, 0}; 340 P_PPI0_D15, 0};
319#else 341
320static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2, 342static const u16 ppi0_req_8[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
321 P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, 343 P_PPI0_D0, P_PPI0_D1, P_PPI0_D2,
322 P_PPI0_D3, P_PPI0_D4, P_PPI0_D5, 344 P_PPI0_D3, P_PPI0_D4, P_PPI0_D5,
323 P_PPI0_D6, P_PPI0_D7, 0}; 345 P_PPI0_D6, P_PPI0_D7, 0};
324#endif
325 346
326static inline void bfin_lq035q1_free_ports(void) 347static inline void bfin_lq035q1_free_ports(unsigned ppi16)
327{ 348{
328 peripheral_free_list(ppi0_req_16); 349 if (ppi16)
350 peripheral_free_list(ppi0_req_16);
351 else
352 peripheral_free_list(ppi0_req_8);
353
329 if (ANOMALY_05000400) 354 if (ANOMALY_05000400)
330 gpio_free(P_IDENT(P_PPI0_FS3)); 355 gpio_free(P_IDENT(P_PPI0_FS3));
331} 356}
332 357
333static int __devinit bfin_lq035q1_request_ports(struct platform_device *pdev) 358static int __devinit bfin_lq035q1_request_ports(struct platform_device *pdev,
359 unsigned ppi16)
334{ 360{
361 int ret;
335 /* ANOMALY_05000400 - PPI Does Not Start Properly In Specific Mode: 362 /* ANOMALY_05000400 - PPI Does Not Start Properly In Specific Mode:
336 * Drive PPI_FS3 Low 363 * Drive PPI_FS3 Low
337 */ 364 */
@@ -342,7 +369,12 @@ static int __devinit bfin_lq035q1_request_ports(struct platform_device *pdev)
342 gpio_direction_output(P_IDENT(P_PPI0_FS3), 0); 369 gpio_direction_output(P_IDENT(P_PPI0_FS3), 0);
343 } 370 }
344 371
345 if (peripheral_request_list(ppi0_req_16, DRIVER_NAME)) { 372 if (ppi16)
373 ret = peripheral_request_list(ppi0_req_16, DRIVER_NAME);
374 else
375 ret = peripheral_request_list(ppi0_req_8, DRIVER_NAME);
376
377 if (ret) {
346 dev_err(&pdev->dev, "requesting peripherals failed\n"); 378 dev_err(&pdev->dev, "requesting peripherals failed\n");
347 return -EFAULT; 379 return -EFAULT;
348 } 380 }
@@ -364,7 +396,7 @@ static int bfin_lq035q1_fb_open(struct fb_info *info, int user)
364 396
365 bfin_lq035q1_config_dma(fbi); 397 bfin_lq035q1_config_dma(fbi);
366 bfin_lq035q1_config_ppi(fbi); 398 bfin_lq035q1_config_ppi(fbi);
367 bfin_lq035q1_init_timers(); 399 bfin_lq035q1_init_timers(fbi);
368 400
369 /* start dma */ 401 /* start dma */
370 enable_dma(CH_PPI); 402 enable_dma(CH_PPI);
@@ -402,12 +434,9 @@ static int bfin_lq035q1_fb_release(struct fb_info *info, int user)
402static int bfin_lq035q1_fb_check_var(struct fb_var_screeninfo *var, 434static int bfin_lq035q1_fb_check_var(struct fb_var_screeninfo *var,
403 struct fb_info *info) 435 struct fb_info *info)
404{ 436{
405 switch (var->bits_per_pixel) { 437 struct bfin_lq035q1fb_info *fbi = info->par;
406#if (LCD_BPP == 24) 438
407 case 24:/* TRUECOLOUR, 16m */ 439 if (var->bits_per_pixel == fbi->lcd_bpp) {
408#else
409 case 16:/* DIRECTCOLOUR, 64k */
410#endif
411 var->red.offset = info->var.red.offset; 440 var->red.offset = info->var.red.offset;
412 var->green.offset = info->var.green.offset; 441 var->green.offset = info->var.green.offset;
413 var->blue.offset = info->var.blue.offset; 442 var->blue.offset = info->var.blue.offset;
@@ -420,8 +449,7 @@ static int bfin_lq035q1_fb_check_var(struct fb_var_screeninfo *var,
420 var->red.msb_right = 0; 449 var->red.msb_right = 0;
421 var->green.msb_right = 0; 450 var->green.msb_right = 0;
422 var->blue.msb_right = 0; 451 var->blue.msb_right = 0;
423 break; 452 } else {
424 default:
425 pr_debug("%s: depth not supported: %u BPP\n", __func__, 453 pr_debug("%s: depth not supported: %u BPP\n", __func__,
426 var->bits_per_pixel); 454 var->bits_per_pixel);
427 return -EINVAL; 455 return -EINVAL;
@@ -528,6 +556,7 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
528{ 556{
529 struct bfin_lq035q1fb_info *info; 557 struct bfin_lq035q1fb_info *info;
530 struct fb_info *fbinfo; 558 struct fb_info *fbinfo;
559 u32 active_video_mem_offset;
531 int ret; 560 int ret;
532 561
533 ret = request_dma(CH_PPI, DRIVER_NAME"_CH_PPI"); 562 ret = request_dma(CH_PPI, DRIVER_NAME"_CH_PPI");
@@ -550,6 +579,12 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
550 579
551 platform_set_drvdata(pdev, fbinfo); 580 platform_set_drvdata(pdev, fbinfo);
552 581
582 ret = bfin_lq035q1_calc_timing(info);
583 if (ret < 0) {
584 dev_err(&pdev->dev, "Failed PPI Mode\n");
585 goto out3;
586 }
587
553 strcpy(fbinfo->fix.id, DRIVER_NAME); 588 strcpy(fbinfo->fix.id, DRIVER_NAME);
554 589
555 fbinfo->fix.type = FB_TYPE_PACKED_PIXELS; 590 fbinfo->fix.type = FB_TYPE_PACKED_PIXELS;
@@ -571,46 +606,48 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
571 fbinfo->var.xres_virtual = LCD_X_RES; 606 fbinfo->var.xres_virtual = LCD_X_RES;
572 fbinfo->var.yres = LCD_Y_RES; 607 fbinfo->var.yres = LCD_Y_RES;
573 fbinfo->var.yres_virtual = LCD_Y_RES; 608 fbinfo->var.yres_virtual = LCD_Y_RES;
574 fbinfo->var.bits_per_pixel = LCD_BPP; 609 fbinfo->var.bits_per_pixel = info->lcd_bpp;
575 610
576 if (info->disp_info->mode & LQ035_BGR) { 611 if (info->disp_info->mode & LQ035_BGR) {
577#if (LCD_BPP == 24) 612 if (info->lcd_bpp == 24) {
578 fbinfo->var.red.offset = 0; 613 fbinfo->var.red.offset = 0;
579 fbinfo->var.green.offset = 8; 614 fbinfo->var.green.offset = 8;
580 fbinfo->var.blue.offset = 16; 615 fbinfo->var.blue.offset = 16;
581#else 616 } else {
582 fbinfo->var.red.offset = 0; 617 fbinfo->var.red.offset = 0;
583 fbinfo->var.green.offset = 5; 618 fbinfo->var.green.offset = 5;
584 fbinfo->var.blue.offset = 11; 619 fbinfo->var.blue.offset = 11;
585#endif 620 }
586 } else { 621 } else {
587#if (LCD_BPP == 24) 622 if (info->lcd_bpp == 24) {
588 fbinfo->var.red.offset = 16; 623 fbinfo->var.red.offset = 16;
589 fbinfo->var.green.offset = 8; 624 fbinfo->var.green.offset = 8;
590 fbinfo->var.blue.offset = 0; 625 fbinfo->var.blue.offset = 0;
591#else 626 } else {
592 fbinfo->var.red.offset = 11; 627 fbinfo->var.red.offset = 11;
593 fbinfo->var.green.offset = 5; 628 fbinfo->var.green.offset = 5;
594 fbinfo->var.blue.offset = 0; 629 fbinfo->var.blue.offset = 0;
595#endif 630 }
596 } 631 }
597 632
598 fbinfo->var.transp.offset = 0; 633 fbinfo->var.transp.offset = 0;
599 634
600#if (LCD_BPP == 24) 635 if (info->lcd_bpp == 24) {
601 fbinfo->var.red.length = 8; 636 fbinfo->var.red.length = 8;
602 fbinfo->var.green.length = 8; 637 fbinfo->var.green.length = 8;
603 fbinfo->var.blue.length = 8; 638 fbinfo->var.blue.length = 8;
604#else 639 } else {
605 fbinfo->var.red.length = 5; 640 fbinfo->var.red.length = 5;
606 fbinfo->var.green.length = 6; 641 fbinfo->var.green.length = 6;
607 fbinfo->var.blue.length = 5; 642 fbinfo->var.blue.length = 5;
608#endif 643 }
609 644
610 fbinfo->var.transp.length = 0; 645 fbinfo->var.transp.length = 0;
611 646
612 fbinfo->fix.smem_len = LCD_X_RES * LCD_Y_RES * LCD_BPP / 8 647 active_video_mem_offset = ((U_LINE / 2) * LCD_X_RES * (info->lcd_bpp / 8));
613 + ACTIVE_VIDEO_MEM_OFFSET; 648
649 fbinfo->fix.smem_len = LCD_X_RES * LCD_Y_RES * info->lcd_bpp / 8
650 + active_video_mem_offset;
614 651
615 fbinfo->fix.line_length = fbinfo->var.xres_virtual * 652 fbinfo->fix.line_length = fbinfo->var.xres_virtual *
616 fbinfo->var.bits_per_pixel / 8; 653 fbinfo->var.bits_per_pixel / 8;
@@ -629,8 +666,8 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
629 goto out3; 666 goto out3;
630 } 667 }
631 668
632 fbinfo->screen_base = (void *)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET; 669 fbinfo->screen_base = (void *)info->fb_buffer + active_video_mem_offset;
633 fbinfo->fix.smem_start = (int)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET; 670 fbinfo->fix.smem_start = (int)info->fb_buffer + active_video_mem_offset;
634 671
635 fbinfo->fbops = &bfin_lq035q1_fb_ops; 672 fbinfo->fbops = &bfin_lq035q1_fb_ops;
636 673
@@ -643,7 +680,8 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
643 goto out4; 680 goto out4;
644 } 681 }
645 682
646 ret = bfin_lq035q1_request_ports(pdev); 683 ret = bfin_lq035q1_request_ports(pdev,
684 info->disp_info->ppi_mode == USE_RGB565_16_BIT_PPI);
647 if (ret) { 685 if (ret) {
648 dev_err(&pdev->dev, "couldn't request gpio port\n"); 686 dev_err(&pdev->dev, "couldn't request gpio port\n");
649 goto out6; 687 goto out6;
@@ -693,7 +731,7 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
693 } 731 }
694 732
695 dev_info(&pdev->dev, "%dx%d %d-bit RGB FrameBuffer initialized\n", 733 dev_info(&pdev->dev, "%dx%d %d-bit RGB FrameBuffer initialized\n",
696 LCD_X_RES, LCD_Y_RES, LCD_BPP); 734 LCD_X_RES, LCD_Y_RES, info->lcd_bpp);
697 735
698 return 0; 736 return 0;
699 737
@@ -705,7 +743,8 @@ static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
705 out8: 743 out8:
706 free_irq(info->irq, info); 744 free_irq(info->irq, info);
707 out7: 745 out7:
708 bfin_lq035q1_free_ports(); 746 bfin_lq035q1_free_ports(info->disp_info->ppi_mode ==
747 USE_RGB565_16_BIT_PPI);
709 out6: 748 out6:
710 fb_dealloc_cmap(&fbinfo->cmap); 749 fb_dealloc_cmap(&fbinfo->cmap);
711 out4: 750 out4:
@@ -742,7 +781,8 @@ static int __devexit bfin_lq035q1_remove(struct platform_device *pdev)
742 781
743 fb_dealloc_cmap(&fbinfo->cmap); 782 fb_dealloc_cmap(&fbinfo->cmap);
744 783
745 bfin_lq035q1_free_ports(); 784 bfin_lq035q1_free_ports(info->disp_info->ppi_mode ==
785 USE_RGB565_16_BIT_PPI);
746 786
747 platform_set_drvdata(pdev, NULL); 787 platform_set_drvdata(pdev, NULL);
748 framebuffer_release(fbinfo); 788 framebuffer_release(fbinfo);
@@ -781,7 +821,7 @@ static int bfin_lq035q1_resume(struct device *dev)
781 821
782 bfin_lq035q1_config_dma(info); 822 bfin_lq035q1_config_dma(info);
783 bfin_lq035q1_config_ppi(info); 823 bfin_lq035q1_config_ppi(info);
784 bfin_lq035q1_init_timers(); 824 bfin_lq035q1_init_timers(info);
785 825
786 /* start dma */ 826 /* start dma */
787 enable_dma(CH_PPI); 827 enable_dma(CH_PPI);
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index c2ec3dcd4e91..7a50272eaab9 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -420,7 +420,9 @@ static irqreturn_t bfin_t350mcqb_irq_error(int irq, void *dev_id)
420 420
421static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev) 421static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
422{ 422{
423#ifndef NO_BL_SUPPORT
423 struct backlight_properties props; 424 struct backlight_properties props;
425#endif
424 struct bfin_t350mcqbfb_info *info; 426 struct bfin_t350mcqbfb_info *info;
425 struct fb_info *fbinfo; 427 struct fb_info *fbinfo;
426 int ret; 428 int ret;
@@ -550,7 +552,8 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
550 printk(KERN_ERR DRIVER_NAME 552 printk(KERN_ERR DRIVER_NAME
551 ": unable to register backlight.\n"); 553 ": unable to register backlight.\n");
552 ret = -EINVAL; 554 ret = -EINVAL;
553 goto out9; 555 unregister_framebuffer(fbinfo);
556 goto out8;
554 } 557 }
555 558
556 lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops); 559 lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops);
@@ -559,8 +562,6 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
559 562
560 return 0; 563 return 0;
561 564
562out9:
563 unregister_framebuffer(fbinfo);
564out8: 565out8:
565 free_irq(info->irq, info); 566 free_irq(info->irq, info);
566out7: 567out7:
diff --git a/drivers/video/bw2.c b/drivers/video/bw2.c
index 43320925c4ce..2c371c07f0da 100644
--- a/drivers/video/bw2.c
+++ b/drivers/video/bw2.c
@@ -376,8 +376,11 @@ static const struct of_device_id bw2_match[] = {
376MODULE_DEVICE_TABLE(of, bw2_match); 376MODULE_DEVICE_TABLE(of, bw2_match);
377 377
378static struct of_platform_driver bw2_driver = { 378static struct of_platform_driver bw2_driver = {
379 .name = "bw2", 379 .driver = {
380 .match_table = bw2_match, 380 .name = "bw2",
381 .owner = THIS_MODULE,
382 .of_match_table = bw2_match,
383 },
381 .probe = bw2_probe, 384 .probe = bw2_probe,
382 .remove = __devexit_p(bw2_remove), 385 .remove = __devexit_p(bw2_remove),
383}; 386};
diff --git a/drivers/video/cg14.c b/drivers/video/cg14.c
index 77a040af20a7..d12e05b6e63f 100644
--- a/drivers/video/cg14.c
+++ b/drivers/video/cg14.c
@@ -596,8 +596,11 @@ static const struct of_device_id cg14_match[] = {
596MODULE_DEVICE_TABLE(of, cg14_match); 596MODULE_DEVICE_TABLE(of, cg14_match);
597 597
598static struct of_platform_driver cg14_driver = { 598static struct of_platform_driver cg14_driver = {
599 .name = "cg14", 599 .driver = {
600 .match_table = cg14_match, 600 .name = "cg14",
601 .owner = THIS_MODULE,
602 .of_match_table = cg14_match,
603 },
601 .probe = cg14_probe, 604 .probe = cg14_probe,
602 .remove = __devexit_p(cg14_remove), 605 .remove = __devexit_p(cg14_remove),
603}; 606};
diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c
index 30eedf79322c..b98f93f7f663 100644
--- a/drivers/video/cg3.c
+++ b/drivers/video/cg3.c
@@ -463,8 +463,11 @@ static const struct of_device_id cg3_match[] = {
463MODULE_DEVICE_TABLE(of, cg3_match); 463MODULE_DEVICE_TABLE(of, cg3_match);
464 464
465static struct of_platform_driver cg3_driver = { 465static struct of_platform_driver cg3_driver = {
466 .name = "cg3", 466 .driver = {
467 .match_table = cg3_match, 467 .name = "cg3",
468 .owner = THIS_MODULE,
469 .of_match_table = cg3_match,
470 },
468 .probe = cg3_probe, 471 .probe = cg3_probe,
469 .remove = __devexit_p(cg3_remove), 472 .remove = __devexit_p(cg3_remove),
470}; 473};
diff --git a/drivers/video/cg6.c b/drivers/video/cg6.c
index 6d0fcb43696e..480d761a27a8 100644
--- a/drivers/video/cg6.c
+++ b/drivers/video/cg6.c
@@ -740,7 +740,7 @@ static void cg6_unmap_regs(struct of_device *op, struct fb_info *info,
740static int __devinit cg6_probe(struct of_device *op, 740static int __devinit cg6_probe(struct of_device *op,
741 const struct of_device_id *match) 741 const struct of_device_id *match)
742{ 742{
743 struct device_node *dp = op->node; 743 struct device_node *dp = op->dev.of_node;
744 struct fb_info *info; 744 struct fb_info *info;
745 struct cg6_par *par; 745 struct cg6_par *par;
746 int linebytes, err; 746 int linebytes, err;
@@ -856,8 +856,11 @@ static const struct of_device_id cg6_match[] = {
856MODULE_DEVICE_TABLE(of, cg6_match); 856MODULE_DEVICE_TABLE(of, cg6_match);
857 857
858static struct of_platform_driver cg6_driver = { 858static struct of_platform_driver cg6_driver = {
859 .name = "cg6", 859 .driver = {
860 .match_table = cg6_match, 860 .name = "cg6",
861 .owner = THIS_MODULE,
862 .of_match_table = cg6_match,
863 },
861 .probe = cg6_probe, 864 .probe = cg6_probe,
862 .remove = __devexit_p(cg6_remove), 865 .remove = __devexit_p(cg6_remove),
863}; 866};
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 8d244ba0f601..cad7d45c8bac 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -36,7 +36,9 @@
36#define DRIVER_NAME "da8xx_lcdc" 36#define DRIVER_NAME "da8xx_lcdc"
37 37
38/* LCD Status Register */ 38/* LCD Status Register */
39#define LCD_END_OF_FRAME1 BIT(9)
39#define LCD_END_OF_FRAME0 BIT(8) 40#define LCD_END_OF_FRAME0 BIT(8)
41#define LCD_PL_LOAD_DONE BIT(6)
40#define LCD_FIFO_UNDERFLOW BIT(5) 42#define LCD_FIFO_UNDERFLOW BIT(5)
41#define LCD_SYNC_LOST BIT(2) 43#define LCD_SYNC_LOST BIT(2)
42 44
@@ -58,11 +60,13 @@
58#define LCD_PALETTE_LOAD_MODE(x) ((x) << 20) 60#define LCD_PALETTE_LOAD_MODE(x) ((x) << 20)
59#define PALETTE_AND_DATA 0x00 61#define PALETTE_AND_DATA 0x00
60#define PALETTE_ONLY 0x01 62#define PALETTE_ONLY 0x01
63#define DATA_ONLY 0x02
61 64
62#define LCD_MONO_8BIT_MODE BIT(9) 65#define LCD_MONO_8BIT_MODE BIT(9)
63#define LCD_RASTER_ORDER BIT(8) 66#define LCD_RASTER_ORDER BIT(8)
64#define LCD_TFT_MODE BIT(7) 67#define LCD_TFT_MODE BIT(7)
65#define LCD_UNDERFLOW_INT_ENA BIT(6) 68#define LCD_UNDERFLOW_INT_ENA BIT(6)
69#define LCD_PL_ENABLE BIT(4)
66#define LCD_MONOCHROME_MODE BIT(1) 70#define LCD_MONOCHROME_MODE BIT(1)
67#define LCD_RASTER_ENABLE BIT(0) 71#define LCD_RASTER_ENABLE BIT(0)
68#define LCD_TFT_ALT_ENABLE BIT(23) 72#define LCD_TFT_ALT_ENABLE BIT(23)
@@ -87,6 +91,10 @@
87#define LCD_DMA_CTRL_REG 0x40 91#define LCD_DMA_CTRL_REG 0x40
88#define LCD_DMA_FRM_BUF_BASE_ADDR_0_REG 0x44 92#define LCD_DMA_FRM_BUF_BASE_ADDR_0_REG 0x44
89#define LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG 0x48 93#define LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG 0x48
94#define LCD_DMA_FRM_BUF_BASE_ADDR_1_REG 0x4C
95#define LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG 0x50
96
97#define LCD_NUM_BUFFERS 2
90 98
91#define WSI_TIMEOUT 50 99#define WSI_TIMEOUT 50
92#define PALETTE_SIZE 256 100#define PALETTE_SIZE 256
@@ -111,13 +119,20 @@ static inline void lcdc_write(unsigned int val, unsigned int addr)
111struct da8xx_fb_par { 119struct da8xx_fb_par {
112 resource_size_t p_palette_base; 120 resource_size_t p_palette_base;
113 unsigned char *v_palette_base; 121 unsigned char *v_palette_base;
122 dma_addr_t vram_phys;
123 unsigned long vram_size;
124 void *vram_virt;
125 unsigned int dma_start;
126 unsigned int dma_end;
114 struct clk *lcdc_clk; 127 struct clk *lcdc_clk;
115 int irq; 128 int irq;
116 unsigned short pseudo_palette[16]; 129 unsigned short pseudo_palette[16];
117 unsigned int databuf_sz;
118 unsigned int palette_sz; 130 unsigned int palette_sz;
119 unsigned int pxl_clk; 131 unsigned int pxl_clk;
120 int blank; 132 int blank;
133 wait_queue_head_t vsync_wait;
134 int vsync_flag;
135 int vsync_timeout;
121#ifdef CONFIG_CPU_FREQ 136#ifdef CONFIG_CPU_FREQ
122 struct notifier_block freq_transition; 137 struct notifier_block freq_transition;
123#endif 138#endif
@@ -148,9 +163,9 @@ static struct fb_fix_screeninfo da8xx_fb_fix __devinitdata = {
148 .type = FB_TYPE_PACKED_PIXELS, 163 .type = FB_TYPE_PACKED_PIXELS,
149 .type_aux = 0, 164 .type_aux = 0,
150 .visual = FB_VISUAL_PSEUDOCOLOR, 165 .visual = FB_VISUAL_PSEUDOCOLOR,
151 .xpanstep = 1, 166 .xpanstep = 0,
152 .ypanstep = 1, 167 .ypanstep = 1,
153 .ywrapstep = 1, 168 .ywrapstep = 0,
154 .accel = FB_ACCEL_NONE 169 .accel = FB_ACCEL_NONE
155}; 170};
156 171
@@ -221,22 +236,48 @@ static inline void lcd_disable_raster(void)
221 236
222static void lcd_blit(int load_mode, struct da8xx_fb_par *par) 237static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
223{ 238{
224 u32 tmp = par->p_palette_base + par->databuf_sz - 4; 239 u32 start;
225 u32 reg; 240 u32 end;
241 u32 reg_ras;
242 u32 reg_dma;
243
244 /* init reg to clear PLM (loading mode) fields */
245 reg_ras = lcdc_read(LCD_RASTER_CTRL_REG);
246 reg_ras &= ~(3 << 20);
247
248 reg_dma = lcdc_read(LCD_DMA_CTRL_REG);
249
250 if (load_mode == LOAD_DATA) {
251 start = par->dma_start;
252 end = par->dma_end;
253
254 reg_ras |= LCD_PALETTE_LOAD_MODE(DATA_ONLY);
255 reg_dma |= LCD_END_OF_FRAME_INT_ENA;
256 reg_dma |= LCD_DUAL_FRAME_BUFFER_ENABLE;
257
258 lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
259 lcdc_write(end, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
260 lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
261 lcdc_write(end, LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
262 } else if (load_mode == LOAD_PALETTE) {
263 start = par->p_palette_base;
264 end = start + par->palette_sz - 1;
265
266 reg_ras |= LCD_PALETTE_LOAD_MODE(PALETTE_ONLY);
267 reg_ras |= LCD_PL_ENABLE;
268
269 lcdc_write(start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
270 lcdc_write(end, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
271 }
226 272
227 /* Update the databuf in the hw. */ 273 lcdc_write(reg_dma, LCD_DMA_CTRL_REG);
228 lcdc_write(par->p_palette_base, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG); 274 lcdc_write(reg_ras, LCD_RASTER_CTRL_REG);
229 lcdc_write(tmp, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
230 275
231 /* Start the DMA. */ 276 /*
232 reg = lcdc_read(LCD_RASTER_CTRL_REG); 277 * The Raster enable bit must be set after all other control fields are
233 reg &= ~(3 << 20); 278 * set.
234 if (load_mode == LOAD_DATA) 279 */
235 reg |= LCD_PALETTE_LOAD_MODE(PALETTE_AND_DATA); 280 lcd_enable_raster();
236 else if (load_mode == LOAD_PALETTE)
237 reg |= LCD_PALETTE_LOAD_MODE(PALETTE_ONLY);
238
239 lcdc_write(reg, LCD_RASTER_CTRL_REG);
240} 281}
241 282
242/* Configure the Burst Size of DMA */ 283/* Configure the Burst Size of DMA */
@@ -368,12 +409,8 @@ static int lcd_cfg_display(const struct lcd_ctrl_config *cfg)
368static int lcd_cfg_frame_buffer(struct da8xx_fb_par *par, u32 width, u32 height, 409static int lcd_cfg_frame_buffer(struct da8xx_fb_par *par, u32 width, u32 height,
369 u32 bpp, u32 raster_order) 410 u32 bpp, u32 raster_order)
370{ 411{
371 u32 bpl, reg; 412 u32 reg;
372 413
373 /* Disable Dual Frame Buffer. */
374 reg = lcdc_read(LCD_DMA_CTRL_REG);
375 lcdc_write(reg & ~LCD_DUAL_FRAME_BUFFER_ENABLE,
376 LCD_DMA_CTRL_REG);
377 /* Set the Panel Width */ 414 /* Set the Panel Width */
378 /* Pixels per line = (PPL + 1)*16 */ 415 /* Pixels per line = (PPL + 1)*16 */
379 /*0x3F in bits 4..9 gives max horisontal resolution = 1024 pixels*/ 416 /*0x3F in bits 4..9 gives max horisontal resolution = 1024 pixels*/
@@ -410,9 +447,6 @@ static int lcd_cfg_frame_buffer(struct da8xx_fb_par *par, u32 width, u32 height,
410 return -EINVAL; 447 return -EINVAL;
411 } 448 }
412 449
413 bpl = width * bpp / 8;
414 par->databuf_sz = height * bpl + par->palette_sz;
415
416 return 0; 450 return 0;
417} 451}
418 452
@@ -421,8 +455,9 @@ static int fb_setcolreg(unsigned regno, unsigned red, unsigned green,
421 struct fb_info *info) 455 struct fb_info *info)
422{ 456{
423 struct da8xx_fb_par *par = info->par; 457 struct da8xx_fb_par *par = info->par;
424 unsigned short *palette = (unsigned short *)par->v_palette_base; 458 unsigned short *palette = (unsigned short *) par->v_palette_base;
425 u_short pal; 459 u_short pal;
460 int update_hw = 0;
426 461
427 if (regno > 255) 462 if (regno > 255)
428 return 1; 463 return 1;
@@ -439,8 +474,10 @@ static int fb_setcolreg(unsigned regno, unsigned red, unsigned green,
439 pal |= (green & 0x00f0); 474 pal |= (green & 0x00f0);
440 pal |= (blue & 0x000f); 475 pal |= (blue & 0x000f);
441 476
442 palette[regno] = pal; 477 if (palette[regno] != pal) {
443 478 update_hw = 1;
479 palette[regno] = pal;
480 }
444 } else if ((info->var.bits_per_pixel == 16) && regno < 16) { 481 } else if ((info->var.bits_per_pixel == 16) && regno < 16) {
445 red >>= (16 - info->var.red.length); 482 red >>= (16 - info->var.red.length);
446 red <<= info->var.red.offset; 483 red <<= info->var.red.offset;
@@ -453,9 +490,16 @@ static int fb_setcolreg(unsigned regno, unsigned red, unsigned green,
453 490
454 par->pseudo_palette[regno] = red | green | blue; 491 par->pseudo_palette[regno] = red | green | blue;
455 492
456 palette[0] = 0x4000; 493 if (palette[0] != 0x4000) {
494 update_hw = 1;
495 palette[0] = 0x4000;
496 }
457 } 497 }
458 498
499 /* Update the palette in the h/w as needed. */
500 if (update_hw)
501 lcd_blit(LOAD_PALETTE, par);
502
459 return 0; 503 return 0;
460} 504}
461 505
@@ -541,15 +585,54 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
541 585
542static irqreturn_t lcdc_irq_handler(int irq, void *arg) 586static irqreturn_t lcdc_irq_handler(int irq, void *arg)
543{ 587{
588 struct da8xx_fb_par *par = arg;
544 u32 stat = lcdc_read(LCD_STAT_REG); 589 u32 stat = lcdc_read(LCD_STAT_REG);
590 u32 reg_ras;
545 591
546 if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) { 592 if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) {
547 lcd_disable_raster(); 593 lcd_disable_raster();
548 lcdc_write(stat, LCD_STAT_REG); 594 lcdc_write(stat, LCD_STAT_REG);
549 lcd_enable_raster(); 595 lcd_enable_raster();
550 } else 596 } else if (stat & LCD_PL_LOAD_DONE) {
597 /*
598 * Must disable raster before changing state of any control bit.
599 * And also must be disabled before clearing the PL loading
600 * interrupt via the following write to the status register. If
601 * this is done after then one gets multiple PL done interrupts.
602 */
603 lcd_disable_raster();
604
551 lcdc_write(stat, LCD_STAT_REG); 605 lcdc_write(stat, LCD_STAT_REG);
552 606
607 /* Disable PL completion inerrupt */
608 reg_ras = lcdc_read(LCD_RASTER_CTRL_REG);
609 reg_ras &= ~LCD_PL_ENABLE;
610 lcdc_write(reg_ras, LCD_RASTER_CTRL_REG);
611
612 /* Setup and start data loading mode */
613 lcd_blit(LOAD_DATA, par);
614 } else {
615 lcdc_write(stat, LCD_STAT_REG);
616
617 if (stat & LCD_END_OF_FRAME0) {
618 lcdc_write(par->dma_start,
619 LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
620 lcdc_write(par->dma_end,
621 LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
622 par->vsync_flag = 1;
623 wake_up_interruptible(&par->vsync_wait);
624 }
625
626 if (stat & LCD_END_OF_FRAME1) {
627 lcdc_write(par->dma_start,
628 LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
629 lcdc_write(par->dma_end,
630 LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
631 par->vsync_flag = 1;
632 wake_up_interruptible(&par->vsync_wait);
633 }
634 }
635
553 return IRQ_HANDLED; 636 return IRQ_HANDLED;
554} 637}
555 638
@@ -654,9 +737,10 @@ static int __devexit fb_remove(struct platform_device *dev)
654 737
655 unregister_framebuffer(info); 738 unregister_framebuffer(info);
656 fb_dealloc_cmap(&info->cmap); 739 fb_dealloc_cmap(&info->cmap);
657 dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE, 740 dma_free_coherent(NULL, PALETTE_SIZE, par->v_palette_base,
658 info->screen_base - PAGE_SIZE, 741 par->p_palette_base);
659 info->fix.smem_start); 742 dma_free_coherent(NULL, par->vram_size, par->vram_virt,
743 par->vram_phys);
660 free_irq(par->irq, par); 744 free_irq(par->irq, par);
661 clk_disable(par->lcdc_clk); 745 clk_disable(par->lcdc_clk);
662 clk_put(par->lcdc_clk); 746 clk_put(par->lcdc_clk);
@@ -668,6 +752,39 @@ static int __devexit fb_remove(struct platform_device *dev)
668 return 0; 752 return 0;
669} 753}
670 754
755/*
756 * Function to wait for vertical sync which for this LCD peripheral
757 * translates into waiting for the current raster frame to complete.
758 */
759static int fb_wait_for_vsync(struct fb_info *info)
760{
761 struct da8xx_fb_par *par = info->par;
762 int ret;
763
764 /*
765 * Set flag to 0 and wait for isr to set to 1. It would seem there is a
766 * race condition here where the ISR could have occured just before or
767 * just after this set. But since we are just coarsely waiting for
768 * a frame to complete then that's OK. i.e. if the frame completed
769 * just before this code executed then we have to wait another full
770 * frame time but there is no way to avoid such a situation. On the
771 * other hand if the frame completed just after then we don't need
772 * to wait long at all. Either way we are guaranteed to return to the
773 * user immediately after a frame completion which is all that is
774 * required.
775 */
776 par->vsync_flag = 0;
777 ret = wait_event_interruptible_timeout(par->vsync_wait,
778 par->vsync_flag != 0,
779 par->vsync_timeout);
780 if (ret < 0)
781 return ret;
782 if (ret == 0)
783 return -ETIMEDOUT;
784
785 return 0;
786}
787
671static int fb_ioctl(struct fb_info *info, unsigned int cmd, 788static int fb_ioctl(struct fb_info *info, unsigned int cmd,
672 unsigned long arg) 789 unsigned long arg)
673{ 790{
@@ -697,6 +814,8 @@ static int fb_ioctl(struct fb_info *info, unsigned int cmd,
697 sync_arg.pulse_width, 814 sync_arg.pulse_width,
698 sync_arg.front_porch); 815 sync_arg.front_porch);
699 break; 816 break;
817 case FBIO_WAITFORVSYNC:
818 return fb_wait_for_vsync(info);
700 default: 819 default:
701 return -EINVAL; 820 return -EINVAL;
702 } 821 }
@@ -732,10 +851,47 @@ static int cfb_blank(int blank, struct fb_info *info)
732 return ret; 851 return ret;
733} 852}
734 853
854/*
855 * Set new x,y offsets in the virtual display for the visible area and switch
856 * to the new mode.
857 */
858static int da8xx_pan_display(struct fb_var_screeninfo *var,
859 struct fb_info *fbi)
860{
861 int ret = 0;
862 struct fb_var_screeninfo new_var;
863 struct da8xx_fb_par *par = fbi->par;
864 struct fb_fix_screeninfo *fix = &fbi->fix;
865 unsigned int end;
866 unsigned int start;
867
868 if (var->xoffset != fbi->var.xoffset ||
869 var->yoffset != fbi->var.yoffset) {
870 memcpy(&new_var, &fbi->var, sizeof(new_var));
871 new_var.xoffset = var->xoffset;
872 new_var.yoffset = var->yoffset;
873 if (fb_check_var(&new_var, fbi))
874 ret = -EINVAL;
875 else {
876 memcpy(&fbi->var, &new_var, sizeof(new_var));
877
878 start = fix->smem_start +
879 new_var.yoffset * fix->line_length +
880 new_var.xoffset * var->bits_per_pixel / 8;
881 end = start + var->yres * fix->line_length - 1;
882 par->dma_start = start;
883 par->dma_end = end;
884 }
885 }
886
887 return ret;
888}
889
735static struct fb_ops da8xx_fb_ops = { 890static struct fb_ops da8xx_fb_ops = {
736 .owner = THIS_MODULE, 891 .owner = THIS_MODULE,
737 .fb_check_var = fb_check_var, 892 .fb_check_var = fb_check_var,
738 .fb_setcolreg = fb_setcolreg, 893 .fb_setcolreg = fb_setcolreg,
894 .fb_pan_display = da8xx_pan_display,
739 .fb_ioctl = fb_ioctl, 895 .fb_ioctl = fb_ioctl,
740 .fb_fillrect = cfb_fillrect, 896 .fb_fillrect = cfb_fillrect,
741 .fb_copyarea = cfb_copyarea, 897 .fb_copyarea = cfb_copyarea,
@@ -829,40 +985,53 @@ static int __init fb_probe(struct platform_device *device)
829 } 985 }
830 986
831 /* allocate frame buffer */ 987 /* allocate frame buffer */
832 da8xx_fb_info->screen_base = dma_alloc_coherent(NULL, 988 par->vram_size = lcdc_info->width * lcdc_info->height * lcd_cfg->bpp;
833 par->databuf_sz + PAGE_SIZE, 989 par->vram_size = PAGE_ALIGN(par->vram_size/8);
834 (resource_size_t *) 990 par->vram_size = par->vram_size * LCD_NUM_BUFFERS;
835 &da8xx_fb_info->fix.smem_start, 991
836 GFP_KERNEL | GFP_DMA); 992 par->vram_virt = dma_alloc_coherent(NULL,
837 993 par->vram_size,
838 if (!da8xx_fb_info->screen_base) { 994 (resource_size_t *) &par->vram_phys,
995 GFP_KERNEL | GFP_DMA);
996 if (!par->vram_virt) {
839 dev_err(&device->dev, 997 dev_err(&device->dev,
840 "GLCD: kmalloc for frame buffer failed\n"); 998 "GLCD: kmalloc for frame buffer failed\n");
841 ret = -EINVAL; 999 ret = -EINVAL;
842 goto err_release_fb; 1000 goto err_release_fb;
843 } 1001 }
844 1002
845 /* move palette base pointer by (PAGE_SIZE - palette_sz) bytes */ 1003 da8xx_fb_info->screen_base = (char __iomem *) par->vram_virt;
846 par->v_palette_base = da8xx_fb_info->screen_base + 1004 da8xx_fb_fix.smem_start = par->vram_phys;
847 (PAGE_SIZE - par->palette_sz); 1005 da8xx_fb_fix.smem_len = par->vram_size;
848 par->p_palette_base = da8xx_fb_info->fix.smem_start + 1006 da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8;
849 (PAGE_SIZE - par->palette_sz); 1007
850 1008 par->dma_start = par->vram_phys;
851 /* the rest of the frame buffer is pixel data */ 1009 par->dma_end = par->dma_start + lcdc_info->height *
852 da8xx_fb_info->screen_base = par->v_palette_base + par->palette_sz; 1010 da8xx_fb_fix.line_length - 1;
853 da8xx_fb_fix.smem_start = par->p_palette_base + par->palette_sz; 1011
854 da8xx_fb_fix.smem_len = par->databuf_sz - par->palette_sz; 1012 /* allocate palette buffer */
855 da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8; 1013 par->v_palette_base = dma_alloc_coherent(NULL,
1014 PALETTE_SIZE,
1015 (resource_size_t *)
1016 &par->p_palette_base,
1017 GFP_KERNEL | GFP_DMA);
1018 if (!par->v_palette_base) {
1019 dev_err(&device->dev,
1020 "GLCD: kmalloc for palette buffer failed\n");
1021 ret = -EINVAL;
1022 goto err_release_fb_mem;
1023 }
1024 memset(par->v_palette_base, 0, PALETTE_SIZE);
856 1025
857 par->irq = platform_get_irq(device, 0); 1026 par->irq = platform_get_irq(device, 0);
858 if (par->irq < 0) { 1027 if (par->irq < 0) {
859 ret = -ENOENT; 1028 ret = -ENOENT;
860 goto err_release_fb_mem; 1029 goto err_release_pl_mem;
861 } 1030 }
862 1031
863 ret = request_irq(par->irq, lcdc_irq_handler, 0, DRIVER_NAME, par); 1032 ret = request_irq(par->irq, lcdc_irq_handler, 0, DRIVER_NAME, par);
864 if (ret) 1033 if (ret)
865 goto err_release_fb_mem; 1034 goto err_release_pl_mem;
866 1035
867 /* Initialize par */ 1036 /* Initialize par */
868 da8xx_fb_info->var.bits_per_pixel = lcd_cfg->bpp; 1037 da8xx_fb_info->var.bits_per_pixel = lcd_cfg->bpp;
@@ -870,8 +1039,8 @@ static int __init fb_probe(struct platform_device *device)
870 da8xx_fb_var.xres = lcdc_info->width; 1039 da8xx_fb_var.xres = lcdc_info->width;
871 da8xx_fb_var.xres_virtual = lcdc_info->width; 1040 da8xx_fb_var.xres_virtual = lcdc_info->width;
872 1041
873 da8xx_fb_var.yres = lcdc_info->height; 1042 da8xx_fb_var.yres = lcdc_info->height;
874 da8xx_fb_var.yres_virtual = lcdc_info->height; 1043 da8xx_fb_var.yres_virtual = lcdc_info->height * LCD_NUM_BUFFERS;
875 1044
876 da8xx_fb_var.grayscale = 1045 da8xx_fb_var.grayscale =
877 lcd_cfg->p_disp_panel->panel_shade == MONOCHROME ? 1 : 0; 1046 lcd_cfg->p_disp_panel->panel_shade == MONOCHROME ? 1 : 0;
@@ -892,18 +1061,18 @@ static int __init fb_probe(struct platform_device *device)
892 ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0); 1061 ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0);
893 if (ret) 1062 if (ret)
894 goto err_free_irq; 1063 goto err_free_irq;
895
896 /* First palette_sz byte of the frame buffer is the palette */
897 da8xx_fb_info->cmap.len = par->palette_sz; 1064 da8xx_fb_info->cmap.len = par->palette_sz;
898 1065
899 /* Flush the buffer to the screen. */
900 lcd_blit(LOAD_DATA, par);
901
902 /* initialize var_screeninfo */ 1066 /* initialize var_screeninfo */
903 da8xx_fb_var.activate = FB_ACTIVATE_FORCE; 1067 da8xx_fb_var.activate = FB_ACTIVATE_FORCE;
904 fb_set_var(da8xx_fb_info, &da8xx_fb_var); 1068 fb_set_var(da8xx_fb_info, &da8xx_fb_var);
905 1069
906 dev_set_drvdata(&device->dev, da8xx_fb_info); 1070 dev_set_drvdata(&device->dev, da8xx_fb_info);
1071
1072 /* initialize the vsync wait queue */
1073 init_waitqueue_head(&par->vsync_wait);
1074 par->vsync_timeout = HZ / 5;
1075
907 /* Register the Frame Buffer */ 1076 /* Register the Frame Buffer */
908 if (register_framebuffer(da8xx_fb_info) < 0) { 1077 if (register_framebuffer(da8xx_fb_info) < 0) {
909 dev_err(&device->dev, 1078 dev_err(&device->dev,
@@ -919,10 +1088,6 @@ static int __init fb_probe(struct platform_device *device)
919 goto err_cpu_freq; 1088 goto err_cpu_freq;
920 } 1089 }
921#endif 1090#endif
922
923 /* enable raster engine */
924 lcd_enable_raster();
925
926 return 0; 1091 return 0;
927 1092
928#ifdef CONFIG_CPU_FREQ 1093#ifdef CONFIG_CPU_FREQ
@@ -936,10 +1101,12 @@ err_dealloc_cmap:
936err_free_irq: 1101err_free_irq:
937 free_irq(par->irq, par); 1102 free_irq(par->irq, par);
938 1103
1104err_release_pl_mem:
1105 dma_free_coherent(NULL, PALETTE_SIZE, par->v_palette_base,
1106 par->p_palette_base);
1107
939err_release_fb_mem: 1108err_release_fb_mem:
940 dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE, 1109 dma_free_coherent(NULL, par->vram_size, par->vram_virt, par->vram_phys);
941 da8xx_fb_info->screen_base - PAGE_SIZE,
942 da8xx_fb_info->fix.smem_start);
943 1110
944err_release_fb: 1111err_release_fb:
945 framebuffer_release(da8xx_fb_info); 1112 framebuffer_release(da8xx_fb_info);
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 6113c47e095a..1105a591dcc1 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -155,25 +155,41 @@ static void fb_deferred_io_work(struct work_struct *work)
155{ 155{
156 struct fb_info *info = container_of(work, struct fb_info, 156 struct fb_info *info = container_of(work, struct fb_info,
157 deferred_work.work); 157 deferred_work.work);
158 struct list_head *node, *next;
159 struct page *cur;
160 struct fb_deferred_io *fbdefio = info->fbdefio; 158 struct fb_deferred_io *fbdefio = info->fbdefio;
159 struct page *page, *tmp_page;
160 struct list_head *node, *tmp_node;
161 struct list_head non_dirty;
162
163 INIT_LIST_HEAD(&non_dirty);
161 164
162 /* here we mkclean the pages, then do all deferred IO */ 165 /* here we mkclean the pages, then do all deferred IO */
163 mutex_lock(&fbdefio->lock); 166 mutex_lock(&fbdefio->lock);
164 list_for_each_entry(cur, &fbdefio->pagelist, lru) { 167 list_for_each_entry_safe(page, tmp_page, &fbdefio->pagelist, lru) {
165 lock_page(cur); 168 lock_page(page);
166 page_mkclean(cur); 169 /*
167 unlock_page(cur); 170 * The workqueue callback can be triggered after a
171 * ->page_mkwrite() call but before the PTE has been marked
172 * dirty. In this case page_mkclean() won't "rearm" the page.
173 *
174 * To avoid this, remove those "non-dirty" pages from the
175 * pagelist before calling the driver's callback, then add
176 * them back to get processed on the next work iteration.
177 * At that time, their PTEs will hopefully be dirty for real.
178 */
179 if (!page_mkclean(page))
180 list_move_tail(&page->lru, &non_dirty);
181 unlock_page(page);
168 } 182 }
169 183
170 /* driver's callback with pagelist */ 184 /* driver's callback with pagelist */
171 fbdefio->deferred_io(info, &fbdefio->pagelist); 185 fbdefio->deferred_io(info, &fbdefio->pagelist);
172 186
173 /* clear the list */ 187 /* clear the list... */
174 list_for_each_safe(node, next, &fbdefio->pagelist) { 188 list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
175 list_del(node); 189 list_del(node);
176 } 190 }
191 /* ... and add back the "non-dirty" pages to the list */
192 list_splice_tail(&non_dirty, &fbdefio->pagelist);
177 mutex_unlock(&fbdefio->lock); 193 mutex_unlock(&fbdefio->lock);
178} 194}
179 195
@@ -202,6 +218,7 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_open);
202void fb_deferred_io_cleanup(struct fb_info *info) 218void fb_deferred_io_cleanup(struct fb_info *info)
203{ 219{
204 struct fb_deferred_io *fbdefio = info->fbdefio; 220 struct fb_deferred_io *fbdefio = info->fbdefio;
221 struct list_head *node, *tmp_node;
205 struct page *page; 222 struct page *page;
206 int i; 223 int i;
207 224
@@ -209,6 +226,13 @@ void fb_deferred_io_cleanup(struct fb_info *info)
209 cancel_delayed_work(&info->deferred_work); 226 cancel_delayed_work(&info->deferred_work);
210 flush_scheduled_work(); 227 flush_scheduled_work();
211 228
229 /* the list may have still some non-dirty pages at this point */
230 mutex_lock(&fbdefio->lock);
231 list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
232 list_del(node);
233 }
234 mutex_unlock(&fbdefio->lock);
235
212 /* clear out the mapping that we setup */ 236 /* clear out the mapping that we setup */
213 for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) { 237 for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
214 page = fb_deferred_io_page(info, i); 238 page = fb_deferred_io_page(info, i);
diff --git a/drivers/video/ffb.c b/drivers/video/ffb.c
index a42fabab69df..95c0227f47fc 100644
--- a/drivers/video/ffb.c
+++ b/drivers/video/ffb.c
@@ -896,7 +896,7 @@ static void ffb_init_fix(struct fb_info *info)
896static int __devinit ffb_probe(struct of_device *op, 896static int __devinit ffb_probe(struct of_device *op,
897 const struct of_device_id *match) 897 const struct of_device_id *match)
898{ 898{
899 struct device_node *dp = op->node; 899 struct device_node *dp = op->dev.of_node;
900 struct ffb_fbc __iomem *fbc; 900 struct ffb_fbc __iomem *fbc;
901 struct ffb_dac __iomem *dac; 901 struct ffb_dac __iomem *dac;
902 struct fb_info *info; 902 struct fb_info *info;
@@ -1053,8 +1053,11 @@ static const struct of_device_id ffb_match[] = {
1053MODULE_DEVICE_TABLE(of, ffb_match); 1053MODULE_DEVICE_TABLE(of, ffb_match);
1054 1054
1055static struct of_platform_driver ffb_driver = { 1055static struct of_platform_driver ffb_driver = {
1056 .name = "ffb", 1056 .driver = {
1057 .match_table = ffb_match, 1057 .name = "ffb",
1058 .owner = THIS_MODULE,
1059 .of_match_table = ffb_match,
1060 },
1058 .probe = ffb_probe, 1061 .probe = ffb_probe,
1059 .remove = __devexit_p(ffb_remove), 1062 .remove = __devexit_p(ffb_remove),
1060}; 1063};
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index 994358a4f302..27455ce298b7 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -1421,7 +1421,7 @@ static ssize_t show_monitor(struct device *device,
1421static int __devinit fsl_diu_probe(struct of_device *ofdev, 1421static int __devinit fsl_diu_probe(struct of_device *ofdev,
1422 const struct of_device_id *match) 1422 const struct of_device_id *match)
1423{ 1423{
1424 struct device_node *np = ofdev->node; 1424 struct device_node *np = ofdev->dev.of_node;
1425 struct mfb_info *mfbi; 1425 struct mfb_info *mfbi;
1426 phys_addr_t dummy_ad_addr; 1426 phys_addr_t dummy_ad_addr;
1427 int ret, i, error = 0; 1427 int ret, i, error = 0;
@@ -1647,9 +1647,11 @@ static struct of_device_id fsl_diu_match[] = {
1647MODULE_DEVICE_TABLE(of, fsl_diu_match); 1647MODULE_DEVICE_TABLE(of, fsl_diu_match);
1648 1648
1649static struct of_platform_driver fsl_diu_driver = { 1649static struct of_platform_driver fsl_diu_driver = {
1650 .owner = THIS_MODULE, 1650 .driver = {
1651 .name = "fsl_diu", 1651 .name = "fsl_diu",
1652 .match_table = fsl_diu_match, 1652 .owner = THIS_MODULE,
1653 .of_match_table = fsl_diu_match,
1654 },
1653 .probe = fsl_diu_probe, 1655 .probe = fsl_diu_probe,
1654 .remove = fsl_diu_remove, 1656 .remove = fsl_diu_remove,
1655 .suspend = fsl_diu_suspend, 1657 .suspend = fsl_diu_suspend,
diff --git a/drivers/video/hgafb.c b/drivers/video/hgafb.c
index 8bbf251f83d9..af8f0f2cc782 100644
--- a/drivers/video/hgafb.c
+++ b/drivers/video/hgafb.c
@@ -106,7 +106,7 @@ static DEFINE_SPINLOCK(hga_reg_lock);
106 106
107/* Framebuffer driver structures */ 107/* Framebuffer driver structures */
108 108
109static struct fb_var_screeninfo __initdata hga_default_var = { 109static struct fb_var_screeninfo hga_default_var __devinitdata = {
110 .xres = 720, 110 .xres = 720,
111 .yres = 348, 111 .yres = 348,
112 .xres_virtual = 720, 112 .xres_virtual = 720,
@@ -120,7 +120,7 @@ static struct fb_var_screeninfo __initdata hga_default_var = {
120 .width = -1, 120 .width = -1,
121}; 121};
122 122
123static struct fb_fix_screeninfo __initdata hga_fix = { 123static struct fb_fix_screeninfo hga_fix __devinitdata = {
124 .id = "HGA", 124 .id = "HGA",
125 .type = FB_TYPE_PACKED_PIXELS, /* (not sure) */ 125 .type = FB_TYPE_PACKED_PIXELS, /* (not sure) */
126 .visual = FB_VISUAL_MONO10, 126 .visual = FB_VISUAL_MONO10,
@@ -276,7 +276,7 @@ static void hga_blank(int blank_mode)
276 spin_unlock_irqrestore(&hga_reg_lock, flags); 276 spin_unlock_irqrestore(&hga_reg_lock, flags);
277} 277}
278 278
279static int __init hga_card_detect(void) 279static int __devinit hga_card_detect(void)
280{ 280{
281 int count = 0; 281 int count = 0;
282 void __iomem *p, *q; 282 void __iomem *p, *q;
@@ -596,7 +596,7 @@ static int __devinit hgafb_probe(struct platform_device *pdev)
596 return 0; 596 return 0;
597} 597}
598 598
599static int hgafb_remove(struct platform_device *pdev) 599static int __devexit hgafb_remove(struct platform_device *pdev)
600{ 600{
601 struct fb_info *info = platform_get_drvdata(pdev); 601 struct fb_info *info = platform_get_drvdata(pdev);
602 602
@@ -621,7 +621,7 @@ static int hgafb_remove(struct platform_device *pdev)
621 621
622static struct platform_driver hgafb_driver = { 622static struct platform_driver hgafb_driver = {
623 .probe = hgafb_probe, 623 .probe = hgafb_probe,
624 .remove = hgafb_remove, 624 .remove = __devexit_p(hgafb_remove),
625 .driver = { 625 .driver = {
626 .name = "hgafb", 626 .name = "hgafb",
627 }, 627 },
diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c
index 393f3f3d3dfe..cfb8d6451014 100644
--- a/drivers/video/hitfb.c
+++ b/drivers/video/hitfb.c
@@ -30,14 +30,14 @@
30 30
31#define WIDTH 640 31#define WIDTH 640
32 32
33static struct fb_var_screeninfo hitfb_var __initdata = { 33static struct fb_var_screeninfo hitfb_var __devinitdata = {
34 .activate = FB_ACTIVATE_NOW, 34 .activate = FB_ACTIVATE_NOW,
35 .height = -1, 35 .height = -1,
36 .width = -1, 36 .width = -1,
37 .vmode = FB_VMODE_NONINTERLACED, 37 .vmode = FB_VMODE_NONINTERLACED,
38}; 38};
39 39
40static struct fb_fix_screeninfo hitfb_fix __initdata = { 40static struct fb_fix_screeninfo hitfb_fix __devinitdata = {
41 .id = "Hitachi HD64461", 41 .id = "Hitachi HD64461",
42 .type = FB_TYPE_PACKED_PIXELS, 42 .type = FB_TYPE_PACKED_PIXELS,
43 .accel = FB_ACCEL_NONE, 43 .accel = FB_ACCEL_NONE,
@@ -417,7 +417,7 @@ err_fb:
417 return ret; 417 return ret;
418} 418}
419 419
420static int __exit hitfb_remove(struct platform_device *dev) 420static int __devexit hitfb_remove(struct platform_device *dev)
421{ 421{
422 struct fb_info *info = platform_get_drvdata(dev); 422 struct fb_info *info = platform_get_drvdata(dev);
423 423
@@ -462,7 +462,7 @@ static const struct dev_pm_ops hitfb_dev_pm_ops = {
462 462
463static struct platform_driver hitfb_driver = { 463static struct platform_driver hitfb_driver = {
464 .probe = hitfb_probe, 464 .probe = hitfb_probe,
465 .remove = __exit_p(hitfb_remove), 465 .remove = __devexit_p(hitfb_remove),
466 .driver = { 466 .driver = {
467 .name = "hitfb", 467 .name = "hitfb",
468 .owner = THIS_MODULE, 468 .owner = THIS_MODULE,
diff --git a/drivers/video/intelfb/intelfb.h b/drivers/video/intelfb/intelfb.h
index 40984551c927..6b51175629c7 100644
--- a/drivers/video/intelfb/intelfb.h
+++ b/drivers/video/intelfb/intelfb.h
@@ -371,10 +371,6 @@ struct intelfb_info {
371 ((dinfo)->chipset == INTEL_965G) || \ 371 ((dinfo)->chipset == INTEL_965G) || \
372 ((dinfo)->chipset == INTEL_965GM)) 372 ((dinfo)->chipset == INTEL_965GM))
373 373
374#ifndef FBIO_WAITFORVSYNC
375#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32)
376#endif
377
378/*** function prototypes ***/ 374/*** function prototypes ***/
379 375
380extern int intelfb_var_to_depth(const struct fb_var_screeninfo *var); 376extern int intelfb_var_to_depth(const struct fb_var_screeninfo *var);
diff --git a/drivers/video/leo.c b/drivers/video/leo.c
index 1db55f128490..3d7895316eaf 100644
--- a/drivers/video/leo.c
+++ b/drivers/video/leo.c
@@ -663,8 +663,11 @@ static const struct of_device_id leo_match[] = {
663MODULE_DEVICE_TABLE(of, leo_match); 663MODULE_DEVICE_TABLE(of, leo_match);
664 664
665static struct of_platform_driver leo_driver = { 665static struct of_platform_driver leo_driver = {
666 .name = "leo", 666 .driver = {
667 .match_table = leo_match, 667 .name = "leo",
668 .owner = THIS_MODULE,
669 .of_match_table = leo_match,
670 },
668 .probe = leo_probe, 671 .probe = leo_probe,
669 .remove = __devexit_p(leo_remove), 672 .remove = __devexit_p(leo_remove),
670}; 673};
diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfb.c
index 8280a58a0e55..0540de4f5cb4 100644
--- a/drivers/video/mb862xx/mb862xxfb.c
+++ b/drivers/video/mb862xx/mb862xxfb.c
@@ -718,9 +718,11 @@ static struct of_device_id __devinitdata of_platform_mb862xx_tbl[] = {
718}; 718};
719 719
720static struct of_platform_driver of_platform_mb862xxfb_driver = { 720static struct of_platform_driver of_platform_mb862xxfb_driver = {
721 .owner = THIS_MODULE, 721 .driver = {
722 .name = DRV_NAME, 722 .name = DRV_NAME,
723 .match_table = of_platform_mb862xx_tbl, 723 .owner = THIS_MODULE,
724 .of_match_table = of_platform_mb862xx_tbl,
725 },
724 .probe = of_platform_mb862xx_probe, 726 .probe = of_platform_mb862xx_probe,
725 .remove = __devexit_p(of_platform_mb862xx_remove), 727 .remove = __devexit_p(of_platform_mb862xx_remove),
726}; 728};
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c
index 6bf0d460a738..d4cde79ea15e 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/nuc900fb.c
@@ -667,7 +667,7 @@ release_irq:
667release_regs: 667release_regs:
668 iounmap(fbi->io); 668 iounmap(fbi->io);
669release_mem_region: 669release_mem_region:
670 release_mem_region((unsigned long)fbi->mem, size); 670 release_mem_region(res->start, size);
671free_fb: 671free_fb:
672 framebuffer_release(fbinfo); 672 framebuffer_release(fbinfo);
673 return ret; 673 return ret;
diff --git a/drivers/video/p9100.c b/drivers/video/p9100.c
index 81440f2b9091..c85dd408a9b8 100644
--- a/drivers/video/p9100.c
+++ b/drivers/video/p9100.c
@@ -353,8 +353,11 @@ static const struct of_device_id p9100_match[] = {
353MODULE_DEVICE_TABLE(of, p9100_match); 353MODULE_DEVICE_TABLE(of, p9100_match);
354 354
355static struct of_platform_driver p9100_driver = { 355static struct of_platform_driver p9100_driver = {
356 .name = "p9100", 356 .driver = {
357 .match_table = p9100_match, 357 .name = "p9100",
358 .owner = THIS_MODULE,
359 .of_match_table = p9100_match,
360 },
358 .probe = p9100_probe, 361 .probe = p9100_probe,
359 .remove = __devexit_p(p9100_remove), 362 .remove = __devexit_p(p9100_remove),
360}; 363};
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index 8a204e7a5b5b..72a1f4c04732 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -536,7 +536,7 @@ static int __init platinumfb_setup(char *options)
536static int __devinit platinumfb_probe(struct of_device* odev, 536static int __devinit platinumfb_probe(struct of_device* odev,
537 const struct of_device_id *match) 537 const struct of_device_id *match)
538{ 538{
539 struct device_node *dp = odev->node; 539 struct device_node *dp = odev->dev.of_node;
540 struct fb_info *info; 540 struct fb_info *info;
541 struct fb_info_platinum *pinfo; 541 struct fb_info_platinum *pinfo;
542 volatile __u8 *fbuffer; 542 volatile __u8 *fbuffer;
@@ -679,8 +679,11 @@ static struct of_device_id platinumfb_match[] =
679 679
680static struct of_platform_driver platinum_driver = 680static struct of_platform_driver platinum_driver =
681{ 681{
682 .name = "platinumfb", 682 .driver = {
683 .match_table = platinumfb_match, 683 .name = "platinumfb",
684 .owner = THIS_MODULE,
685 .of_match_table = platinumfb_match,
686 },
684 .probe = platinumfb_probe, 687 .probe = platinumfb_probe,
685 .remove = platinumfb_remove, 688 .remove = platinumfb_remove,
686}; 689};
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 2b094dec4a56..46b430978bcc 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -631,7 +631,7 @@ static struct fb_ops s3c2410fb_ops = {
631 * cache. Once this area is remapped, all virtual memory 631 * cache. Once this area is remapped, all virtual memory
632 * access to the video memory should occur at the new region. 632 * access to the video memory should occur at the new region.
633 */ 633 */
634static int __init s3c2410fb_map_video_memory(struct fb_info *info) 634static int __devinit s3c2410fb_map_video_memory(struct fb_info *info)
635{ 635{
636 struct s3c2410fb_info *fbi = info->par; 636 struct s3c2410fb_info *fbi = info->par;
637 dma_addr_t map_dma; 637 dma_addr_t map_dma;
@@ -814,7 +814,7 @@ static inline void s3c2410fb_cpufreq_deregister(struct s3c2410fb_info *info)
814 814
815static char driver_name[] = "s3c2410fb"; 815static char driver_name[] = "s3c2410fb";
816 816
817static int __init s3c24xxfb_probe(struct platform_device *pdev, 817static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
818 enum s3c_drv_type drv_type) 818 enum s3c_drv_type drv_type)
819{ 819{
820 struct s3c2410fb_info *info; 820 struct s3c2410fb_info *info;
@@ -1018,7 +1018,7 @@ static int __devinit s3c2412fb_probe(struct platform_device *pdev)
1018/* 1018/*
1019 * Cleanup 1019 * Cleanup
1020 */ 1020 */
1021static int s3c2410fb_remove(struct platform_device *pdev) 1021static int __devexit s3c2410fb_remove(struct platform_device *pdev)
1022{ 1022{
1023 struct fb_info *fbinfo = platform_get_drvdata(pdev); 1023 struct fb_info *fbinfo = platform_get_drvdata(pdev);
1024 struct s3c2410fb_info *info = fbinfo->par; 1024 struct s3c2410fb_info *info = fbinfo->par;
@@ -1096,7 +1096,7 @@ static int s3c2410fb_resume(struct platform_device *dev)
1096 1096
1097static struct platform_driver s3c2410fb_driver = { 1097static struct platform_driver s3c2410fb_driver = {
1098 .probe = s3c2410fb_probe, 1098 .probe = s3c2410fb_probe,
1099 .remove = s3c2410fb_remove, 1099 .remove = __devexit_p(s3c2410fb_remove),
1100 .suspend = s3c2410fb_suspend, 1100 .suspend = s3c2410fb_suspend,
1101 .resume = s3c2410fb_resume, 1101 .resume = s3c2410fb_resume,
1102 .driver = { 1102 .driver = {
@@ -1107,7 +1107,7 @@ static struct platform_driver s3c2410fb_driver = {
1107 1107
1108static struct platform_driver s3c2412fb_driver = { 1108static struct platform_driver s3c2412fb_driver = {
1109 .probe = s3c2412fb_probe, 1109 .probe = s3c2412fb_probe,
1110 .remove = s3c2410fb_remove, 1110 .remove = __devexit_p(s3c2410fb_remove),
1111 .suspend = s3c2410fb_suspend, 1111 .suspend = s3c2410fb_suspend,
1112 .resume = s3c2410fb_resume, 1112 .resume = s3c2410fb_resume,
1113 .driver = { 1113 .driver = {
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index d4471b4c0374..dce8c97b4333 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -71,7 +71,8 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64",
71 "S3 Trio64UV+", "S3 Trio64V2/DX", "S3 Trio64V2/GX", 71 "S3 Trio64UV+", "S3 Trio64V2/DX", "S3 Trio64V2/GX",
72 "S3 Plato/PX", "S3 Aurora64VP", "S3 Virge", 72 "S3 Plato/PX", "S3 Aurora64VP", "S3 Virge",
73 "S3 Virge/VX", "S3 Virge/DX", "S3 Virge/GX", 73 "S3 Virge/VX", "S3 Virge/DX", "S3 Virge/GX",
74 "S3 Virge/GX2", "S3 Virge/GX2P", "S3 Virge/GX2P"}; 74 "S3 Virge/GX2", "S3 Virge/GX2P", "S3 Virge/GX2P",
75 "S3 Trio3D/1X", "S3 Trio3D/2X", "S3 Trio3D/2X"};
75 76
76#define CHIP_UNKNOWN 0x00 77#define CHIP_UNKNOWN 0x00
77#define CHIP_732_TRIO32 0x01 78#define CHIP_732_TRIO32 0x01
@@ -89,10 +90,14 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64",
89#define CHIP_356_VIRGE_GX2 0x0D 90#define CHIP_356_VIRGE_GX2 0x0D
90#define CHIP_357_VIRGE_GX2P 0x0E 91#define CHIP_357_VIRGE_GX2P 0x0E
91#define CHIP_359_VIRGE_GX2P 0x0F 92#define CHIP_359_VIRGE_GX2P 0x0F
93#define CHIP_360_TRIO3D_1X 0x10
94#define CHIP_362_TRIO3D_2X 0x11
95#define CHIP_368_TRIO3D_2X 0x12
92 96
93#define CHIP_XXX_TRIO 0x80 97#define CHIP_XXX_TRIO 0x80
94#define CHIP_XXX_TRIO64V2_DXGX 0x81 98#define CHIP_XXX_TRIO64V2_DXGX 0x81
95#define CHIP_XXX_VIRGE_DXGX 0x82 99#define CHIP_XXX_VIRGE_DXGX 0x82
100#define CHIP_36X_TRIO3D_1X_2X 0x83
96 101
97#define CHIP_UNDECIDED_FLAG 0x80 102#define CHIP_UNDECIDED_FLAG 0x80
98#define CHIP_MASK 0xFF 103#define CHIP_MASK 0xFF
@@ -324,6 +329,7 @@ static void s3fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
324 329
325static void s3_set_pixclock(struct fb_info *info, u32 pixclock) 330static void s3_set_pixclock(struct fb_info *info, u32 pixclock)
326{ 331{
332 struct s3fb_info *par = info->par;
327 u16 m, n, r; 333 u16 m, n, r;
328 u8 regval; 334 u8 regval;
329 int rv; 335 int rv;
@@ -339,7 +345,13 @@ static void s3_set_pixclock(struct fb_info *info, u32 pixclock)
339 vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD); 345 vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
340 346
341 /* Set S3 clock registers */ 347 /* Set S3 clock registers */
342 vga_wseq(NULL, 0x12, ((n - 2) | (r << 5))); 348 if (par->chip == CHIP_360_TRIO3D_1X ||
349 par->chip == CHIP_362_TRIO3D_2X ||
350 par->chip == CHIP_368_TRIO3D_2X) {
351 vga_wseq(NULL, 0x12, (n - 2) | ((r & 3) << 6)); /* n and two bits of r */
352 vga_wseq(NULL, 0x29, r >> 2); /* remaining highest bit of r */
353 } else
354 vga_wseq(NULL, 0x12, (n - 2) | (r << 5));
343 vga_wseq(NULL, 0x13, m - 2); 355 vga_wseq(NULL, 0x13, m - 2);
344 356
345 udelay(1000); 357 udelay(1000);
@@ -456,7 +468,7 @@ static int s3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
456static int s3fb_set_par(struct fb_info *info) 468static int s3fb_set_par(struct fb_info *info)
457{ 469{
458 struct s3fb_info *par = info->par; 470 struct s3fb_info *par = info->par;
459 u32 value, mode, hmul, offset_value, screen_size, multiplex; 471 u32 value, mode, hmul, offset_value, screen_size, multiplex, dbytes;
460 u32 bpp = info->var.bits_per_pixel; 472 u32 bpp = info->var.bits_per_pixel;
461 473
462 if (bpp != 0) { 474 if (bpp != 0) {
@@ -518,7 +530,7 @@ static int s3fb_set_par(struct fb_info *info)
518 svga_wcrt_mask(0x33, 0x00, 0x08); /* no DDR ? */ 530 svga_wcrt_mask(0x33, 0x00, 0x08); /* no DDR ? */
519 svga_wcrt_mask(0x43, 0x00, 0x01); /* no DDR ? */ 531 svga_wcrt_mask(0x43, 0x00, 0x01); /* no DDR ? */
520 532
521 svga_wcrt_mask(0x5D, 0x00, 0x28); // Clear strange HSlen bits 533 svga_wcrt_mask(0x5D, 0x00, 0x28); /* Clear strange HSlen bits */
522 534
523/* svga_wcrt_mask(0x58, 0x03, 0x03); */ 535/* svga_wcrt_mask(0x58, 0x03, 0x03); */
524 536
@@ -530,10 +542,14 @@ static int s3fb_set_par(struct fb_info *info)
530 pr_debug("fb%d: offset register : %d\n", info->node, offset_value); 542 pr_debug("fb%d: offset register : %d\n", info->node, offset_value);
531 svga_wcrt_multi(s3_offset_regs, offset_value); 543 svga_wcrt_multi(s3_offset_regs, offset_value);
532 544
533 vga_wcrt(NULL, 0x54, 0x18); /* M parameter */ 545 if (par->chip != CHIP_360_TRIO3D_1X &&
534 vga_wcrt(NULL, 0x60, 0xff); /* N parameter */ 546 par->chip != CHIP_362_TRIO3D_2X &&
535 vga_wcrt(NULL, 0x61, 0xff); /* L parameter */ 547 par->chip != CHIP_368_TRIO3D_2X) {
536 vga_wcrt(NULL, 0x62, 0xff); /* L parameter */ 548 vga_wcrt(NULL, 0x54, 0x18); /* M parameter */
549 vga_wcrt(NULL, 0x60, 0xff); /* N parameter */
550 vga_wcrt(NULL, 0x61, 0xff); /* L parameter */
551 vga_wcrt(NULL, 0x62, 0xff); /* L parameter */
552 }
537 553
538 vga_wcrt(NULL, 0x3A, 0x35); 554 vga_wcrt(NULL, 0x3A, 0x35);
539 svga_wattr(0x33, 0x00); 555 svga_wattr(0x33, 0x00);
@@ -570,6 +586,16 @@ static int s3fb_set_par(struct fb_info *info)
570 vga_wcrt(NULL, 0x66, 0x90); 586 vga_wcrt(NULL, 0x66, 0x90);
571 } 587 }
572 588
589 if (par->chip == CHIP_360_TRIO3D_1X ||
590 par->chip == CHIP_362_TRIO3D_2X ||
591 par->chip == CHIP_368_TRIO3D_2X) {
592 dbytes = info->var.xres * ((bpp+7)/8);
593 vga_wcrt(NULL, 0x91, (dbytes + 7) / 8);
594 vga_wcrt(NULL, 0x90, (((dbytes + 7) / 8) >> 8) | 0x80);
595
596 vga_wcrt(NULL, 0x66, 0x81);
597 }
598
573 svga_wcrt_mask(0x31, 0x00, 0x40); 599 svga_wcrt_mask(0x31, 0x00, 0x40);
574 multiplex = 0; 600 multiplex = 0;
575 hmul = 1; 601 hmul = 1;
@@ -615,11 +641,13 @@ static int s3fb_set_par(struct fb_info *info)
615 break; 641 break;
616 case 3: 642 case 3:
617 pr_debug("fb%d: 8 bit pseudocolor\n", info->node); 643 pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
618 if (info->var.pixclock > 20000) { 644 svga_wcrt_mask(0x50, 0x00, 0x30);
619 svga_wcrt_mask(0x50, 0x00, 0x30); 645 if (info->var.pixclock > 20000 ||
646 par->chip == CHIP_360_TRIO3D_1X ||
647 par->chip == CHIP_362_TRIO3D_2X ||
648 par->chip == CHIP_368_TRIO3D_2X)
620 svga_wcrt_mask(0x67, 0x00, 0xF0); 649 svga_wcrt_mask(0x67, 0x00, 0xF0);
621 } else { 650 else {
622 svga_wcrt_mask(0x50, 0x00, 0x30);
623 svga_wcrt_mask(0x67, 0x10, 0xF0); 651 svga_wcrt_mask(0x67, 0x10, 0xF0);
624 multiplex = 1; 652 multiplex = 1;
625 } 653 }
@@ -634,7 +662,10 @@ static int s3fb_set_par(struct fb_info *info)
634 } else { 662 } else {
635 svga_wcrt_mask(0x50, 0x10, 0x30); 663 svga_wcrt_mask(0x50, 0x10, 0x30);
636 svga_wcrt_mask(0x67, 0x30, 0xF0); 664 svga_wcrt_mask(0x67, 0x30, 0xF0);
637 hmul = 2; 665 if (par->chip != CHIP_360_TRIO3D_1X &&
666 par->chip != CHIP_362_TRIO3D_2X &&
667 par->chip != CHIP_368_TRIO3D_2X)
668 hmul = 2;
638 } 669 }
639 break; 670 break;
640 case 5: 671 case 5:
@@ -647,7 +678,10 @@ static int s3fb_set_par(struct fb_info *info)
647 } else { 678 } else {
648 svga_wcrt_mask(0x50, 0x10, 0x30); 679 svga_wcrt_mask(0x50, 0x10, 0x30);
649 svga_wcrt_mask(0x67, 0x50, 0xF0); 680 svga_wcrt_mask(0x67, 0x50, 0xF0);
650 hmul = 2; 681 if (par->chip != CHIP_360_TRIO3D_1X &&
682 par->chip != CHIP_362_TRIO3D_2X &&
683 par->chip != CHIP_368_TRIO3D_2X)
684 hmul = 2;
651 } 685 }
652 break; 686 break;
653 case 6: 687 case 6:
@@ -866,6 +900,17 @@ static int __devinit s3_identification(int chip)
866 return CHIP_385_VIRGE_GX; 900 return CHIP_385_VIRGE_GX;
867 } 901 }
868 902
903 if (chip == CHIP_36X_TRIO3D_1X_2X) {
904 switch (vga_rcrt(NULL, 0x2f)) {
905 case 0x00:
906 return CHIP_360_TRIO3D_1X;
907 case 0x01:
908 return CHIP_362_TRIO3D_2X;
909 case 0x02:
910 return CHIP_368_TRIO3D_2X;
911 }
912 }
913
869 return CHIP_UNKNOWN; 914 return CHIP_UNKNOWN;
870} 915}
871 916
@@ -930,17 +975,32 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
930 vga_wcrt(NULL, 0x38, 0x48); 975 vga_wcrt(NULL, 0x38, 0x48);
931 vga_wcrt(NULL, 0x39, 0xA5); 976 vga_wcrt(NULL, 0x39, 0xA5);
932 977
933 /* Find how many physical memory there is on card */ 978 /* Identify chip type */
934 /* 0x36 register is accessible even if other registers are locked */
935 regval = vga_rcrt(NULL, 0x36);
936 info->screen_size = s3_memsizes[regval >> 5] << 10;
937 info->fix.smem_len = info->screen_size;
938
939 par->chip = id->driver_data & CHIP_MASK; 979 par->chip = id->driver_data & CHIP_MASK;
940 par->rev = vga_rcrt(NULL, 0x2f); 980 par->rev = vga_rcrt(NULL, 0x2f);
941 if (par->chip & CHIP_UNDECIDED_FLAG) 981 if (par->chip & CHIP_UNDECIDED_FLAG)
942 par->chip = s3_identification(par->chip); 982 par->chip = s3_identification(par->chip);
943 983
984 /* Find how many physical memory there is on card */
985 /* 0x36 register is accessible even if other registers are locked */
986 regval = vga_rcrt(NULL, 0x36);
987 if (par->chip == CHIP_360_TRIO3D_1X ||
988 par->chip == CHIP_362_TRIO3D_2X ||
989 par->chip == CHIP_368_TRIO3D_2X) {
990 switch ((regval & 0xE0) >> 5) {
991 case 0: /* 8MB -- only 4MB usable for display */
992 case 1: /* 4MB with 32-bit bus */
993 case 2: /* 4MB */
994 info->screen_size = 4 << 20;
995 break;
996 case 6: /* 2MB */
997 info->screen_size = 2 << 20;
998 break;
999 }
1000 } else
1001 info->screen_size = s3_memsizes[regval >> 5] << 10;
1002 info->fix.smem_len = info->screen_size;
1003
944 /* Find MCLK frequency */ 1004 /* Find MCLK frequency */
945 regval = vga_rseq(NULL, 0x10); 1005 regval = vga_rseq(NULL, 0x10);
946 par->mclk_freq = ((vga_rseq(NULL, 0x11) + 2) * 14318) / ((regval & 0x1F) + 2); 1006 par->mclk_freq = ((vga_rseq(NULL, 0x11) + 2) * 14318) / ((regval & 0x1F) + 2);
@@ -1131,6 +1191,7 @@ static struct pci_device_id s3_devices[] __devinitdata = {
1131 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A10), .driver_data = CHIP_356_VIRGE_GX2}, 1191 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A10), .driver_data = CHIP_356_VIRGE_GX2},
1132 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A11), .driver_data = CHIP_357_VIRGE_GX2P}, 1192 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A11), .driver_data = CHIP_357_VIRGE_GX2P},
1133 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A12), .driver_data = CHIP_359_VIRGE_GX2P}, 1193 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A12), .driver_data = CHIP_359_VIRGE_GX2P},
1194 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A13), .driver_data = CHIP_36X_TRIO3D_1X_2X},
1134 1195
1135 {0, 0, 0, 0, 0, 0, 0} 1196 {0, 0, 0, 0, 0, 0, 0}
1136}; 1197};
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c
index 7a3a5e28eca1..53455f295510 100644
--- a/drivers/video/sgivwfb.c
+++ b/drivers/video/sgivwfb.c
@@ -47,7 +47,7 @@ static int ywrap = 0;
47 47
48static int flatpanel_id = -1; 48static int flatpanel_id = -1;
49 49
50static struct fb_fix_screeninfo sgivwfb_fix __initdata = { 50static struct fb_fix_screeninfo sgivwfb_fix __devinitdata = {
51 .id = "SGI Vis WS FB", 51 .id = "SGI Vis WS FB",
52 .type = FB_TYPE_PACKED_PIXELS, 52 .type = FB_TYPE_PACKED_PIXELS,
53 .visual = FB_VISUAL_PSEUDOCOLOR, 53 .visual = FB_VISUAL_PSEUDOCOLOR,
@@ -57,7 +57,7 @@ static struct fb_fix_screeninfo sgivwfb_fix __initdata = {
57 .line_length = 640, 57 .line_length = 640,
58}; 58};
59 59
60static struct fb_var_screeninfo sgivwfb_var __initdata = { 60static struct fb_var_screeninfo sgivwfb_var __devinitdata = {
61 /* 640x480, 8 bpp */ 61 /* 640x480, 8 bpp */
62 .xres = 640, 62 .xres = 640,
63 .yres = 480, 63 .yres = 480,
@@ -79,7 +79,7 @@ static struct fb_var_screeninfo sgivwfb_var __initdata = {
79 .vmode = FB_VMODE_NONINTERLACED 79 .vmode = FB_VMODE_NONINTERLACED
80}; 80};
81 81
82static struct fb_var_screeninfo sgivwfb_var1600sw __initdata = { 82static struct fb_var_screeninfo sgivwfb_var1600sw __devinitdata = {
83 /* 1600x1024, 8 bpp */ 83 /* 1600x1024, 8 bpp */
84 .xres = 1600, 84 .xres = 1600,
85 .yres = 1024, 85 .yres = 1024,
@@ -825,7 +825,7 @@ fail_ioremap_regs:
825 return -ENXIO; 825 return -ENXIO;
826} 826}
827 827
828static int sgivwfb_remove(struct platform_device *dev) 828static int __devexit sgivwfb_remove(struct platform_device *dev)
829{ 829{
830 struct fb_info *info = platform_get_drvdata(dev); 830 struct fb_info *info = platform_get_drvdata(dev);
831 831
@@ -845,7 +845,7 @@ static int sgivwfb_remove(struct platform_device *dev)
845 845
846static struct platform_driver sgivwfb_driver = { 846static struct platform_driver sgivwfb_driver = {
847 .probe = sgivwfb_probe, 847 .probe = sgivwfb_probe,
848 .remove = sgivwfb_remove, 848 .remove = __devexit_p(sgivwfb_remove),
849 .driver = { 849 .driver = {
850 .name = "sgivwfb", 850 .name = "sgivwfb",
851 }, 851 },
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index a531a0f7cdf2..559bf1727a2b 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1845,7 +1845,7 @@ sisfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info)
1845 1845
1846 memset(fix, 0, sizeof(struct fb_fix_screeninfo)); 1846 memset(fix, 0, sizeof(struct fb_fix_screeninfo));
1847 1847
1848 strcpy(fix->id, ivideo->myid); 1848 strlcpy(fix->id, ivideo->myid, sizeof(fix->id));
1849 1849
1850 mutex_lock(&info->mm_lock); 1850 mutex_lock(&info->mm_lock);
1851 fix->smem_start = ivideo->video_base + ivideo->video_offset; 1851 fix->smem_start = ivideo->video_base + ivideo->video_offset;
diff --git a/drivers/video/sunxvr1000.c b/drivers/video/sunxvr1000.c
index 23e69e834a18..489b44e8db81 100644
--- a/drivers/video/sunxvr1000.c
+++ b/drivers/video/sunxvr1000.c
@@ -114,7 +114,7 @@ static int __devinit gfb_set_fbinfo(struct gfb_info *gp)
114static int __devinit gfb_probe(struct of_device *op, 114static int __devinit gfb_probe(struct of_device *op,
115 const struct of_device_id *match) 115 const struct of_device_id *match)
116{ 116{
117 struct device_node *dp = op->node; 117 struct device_node *dp = op->dev.of_node;
118 struct fb_info *info; 118 struct fb_info *info;
119 struct gfb_info *gp; 119 struct gfb_info *gp;
120 int err; 120 int err;
@@ -199,10 +199,13 @@ static const struct of_device_id gfb_match[] = {
199MODULE_DEVICE_TABLE(of, ffb_match); 199MODULE_DEVICE_TABLE(of, ffb_match);
200 200
201static struct of_platform_driver gfb_driver = { 201static struct of_platform_driver gfb_driver = {
202 .name = "gfb",
203 .match_table = gfb_match,
204 .probe = gfb_probe, 202 .probe = gfb_probe,
205 .remove = __devexit_p(gfb_remove), 203 .remove = __devexit_p(gfb_remove),
204 .driver = {
205 .name = "gfb",
206 .owner = THIS_MODULE,
207 .of_match_table = gfb_match,
208 },
206}; 209};
207 210
208static int __init gfb_init(void) 211static int __init gfb_init(void)
diff --git a/drivers/video/tcx.c b/drivers/video/tcx.c
index c0c2b18fcdcf..ef7a7bd8b503 100644
--- a/drivers/video/tcx.c
+++ b/drivers/video/tcx.c
@@ -512,8 +512,11 @@ static const struct of_device_id tcx_match[] = {
512MODULE_DEVICE_TABLE(of, tcx_match); 512MODULE_DEVICE_TABLE(of, tcx_match);
513 513
514static struct of_platform_driver tcx_driver = { 514static struct of_platform_driver tcx_driver = {
515 .name = "tcx", 515 .driver = {
516 .match_table = tcx_match, 516 .name = "tcx",
517 .owner = THIS_MODULE,
518 .of_match_table = tcx_match,
519 },
517 .probe = tcx_probe, 520 .probe = tcx_probe,
518 .remove = __devexit_p(tcx_remove), 521 .remove = __devexit_p(tcx_remove),
519}; 522};
diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
index 9b5532b4de35..bc67251f1a2f 100644
--- a/drivers/video/vfb.c
+++ b/drivers/video/vfb.c
@@ -78,7 +78,7 @@ static void rvfree(void *mem, unsigned long size)
78 vfree(mem); 78 vfree(mem);
79} 79}
80 80
81static struct fb_var_screeninfo vfb_default __initdata = { 81static struct fb_var_screeninfo vfb_default __devinitdata = {
82 .xres = 640, 82 .xres = 640,
83 .yres = 480, 83 .yres = 480,
84 .xres_virtual = 640, 84 .xres_virtual = 640,
@@ -100,7 +100,7 @@ static struct fb_var_screeninfo vfb_default __initdata = {
100 .vmode = FB_VMODE_NONINTERLACED, 100 .vmode = FB_VMODE_NONINTERLACED,
101}; 101};
102 102
103static struct fb_fix_screeninfo vfb_fix __initdata = { 103static struct fb_fix_screeninfo vfb_fix __devinitdata = {
104 .id = "Virtual FB", 104 .id = "Virtual FB",
105 .type = FB_TYPE_PACKED_PIXELS, 105 .type = FB_TYPE_PACKED_PIXELS,
106 .visual = FB_VISUAL_PSEUDOCOLOR, 106 .visual = FB_VISUAL_PSEUDOCOLOR,
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index 149c47ac7e93..28ccab44a391 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -65,7 +65,7 @@ struct vga16fb_par {
65 65
66/* --------------------------------------------------------------------- */ 66/* --------------------------------------------------------------------- */
67 67
68static struct fb_var_screeninfo vga16fb_defined __initdata = { 68static struct fb_var_screeninfo vga16fb_defined __devinitdata = {
69 .xres = 640, 69 .xres = 640,
70 .yres = 480, 70 .yres = 480,
71 .xres_virtual = 640, 71 .xres_virtual = 640,
@@ -85,7 +85,7 @@ static struct fb_var_screeninfo vga16fb_defined __initdata = {
85}; 85};
86 86
87/* name should not depend on EGA/VGA */ 87/* name should not depend on EGA/VGA */
88static struct fb_fix_screeninfo vga16fb_fix __initdata = { 88static struct fb_fix_screeninfo vga16fb_fix __devinitdata = {
89 .id = "VGA16 VGA", 89 .id = "VGA16 VGA",
90 .smem_start = VGA_FB_PHYS, 90 .smem_start = VGA_FB_PHYS,
91 .smem_len = VGA_FB_PHYS_LEN, 91 .smem_len = VGA_FB_PHYS_LEN,
@@ -1287,7 +1287,7 @@ static struct fb_ops vga16fb_ops = {
1287}; 1287};
1288 1288
1289#ifndef MODULE 1289#ifndef MODULE
1290static int vga16fb_setup(char *options) 1290static int __init vga16fb_setup(char *options)
1291{ 1291{
1292 char *this_opt; 1292 char *this_opt;
1293 1293
@@ -1393,7 +1393,7 @@ static int __devinit vga16fb_probe(struct platform_device *dev)
1393 return ret; 1393 return ret;
1394} 1394}
1395 1395
1396static int vga16fb_remove(struct platform_device *dev) 1396static int __devexit vga16fb_remove(struct platform_device *dev)
1397{ 1397{
1398 struct fb_info *info = platform_get_drvdata(dev); 1398 struct fb_info *info = platform_get_drvdata(dev);
1399 1399
@@ -1405,7 +1405,7 @@ static int vga16fb_remove(struct platform_device *dev)
1405 1405
1406static struct platform_driver vga16fb_driver = { 1406static struct platform_driver vga16fb_driver = {
1407 .probe = vga16fb_probe, 1407 .probe = vga16fb_probe,
1408 .remove = vga16fb_remove, 1408 .remove = __devexit_p(vga16fb_remove),
1409 .driver = { 1409 .driver = {
1410 .name = "vga16fb", 1410 .name = "vga16fb",
1411 }, 1411 },
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 2bc40e682f95..1082541358f0 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -578,14 +578,9 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
578 break; 578 break;
579 579
580 case VIAFB_SET_GAMMA_LUT: 580 case VIAFB_SET_GAMMA_LUT:
581 viafb_gamma_table = kmalloc(256 * sizeof(u32), GFP_KERNEL); 581 viafb_gamma_table = memdup_user(argp, 256 * sizeof(u32));
582 if (!viafb_gamma_table) 582 if (IS_ERR(viafb_gamma_table))
583 return -ENOMEM; 583 return PTR_ERR(viafb_gamma_table);
584 if (copy_from_user(viafb_gamma_table, argp,
585 256 * sizeof(u32))) {
586 kfree(viafb_gamma_table);
587 return -EFAULT;
588 }
589 viafb_set_gamma_table(viafb_bpp, viafb_gamma_table); 584 viafb_set_gamma_table(viafb_bpp, viafb_gamma_table);
590 kfree(viafb_gamma_table); 585 kfree(viafb_gamma_table);
591 break; 586 break;
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index 31b0e17ed090..e66b8b19ce5d 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -53,7 +53,7 @@ static void w100_update_enable(void);
53static void w100_update_disable(void); 53static void w100_update_disable(void);
54static void calc_hsync(struct w100fb_par *par); 54static void calc_hsync(struct w100fb_par *par);
55static void w100_init_graphic_engine(struct w100fb_par *par); 55static void w100_init_graphic_engine(struct w100fb_par *par);
56struct w100_pll_info *w100_get_xtal_table(unsigned int freq); 56struct w100_pll_info *w100_get_xtal_table(unsigned int freq) __devinit;
57 57
58/* Pseudo palette size */ 58/* Pseudo palette size */
59#define MAX_PALETTES 16 59#define MAX_PALETTES 16
@@ -782,7 +782,7 @@ out:
782} 782}
783 783
784 784
785static int w100fb_remove(struct platform_device *pdev) 785static int __devexit w100fb_remove(struct platform_device *pdev)
786{ 786{
787 struct fb_info *info = platform_get_drvdata(pdev); 787 struct fb_info *info = platform_get_drvdata(pdev);
788 struct w100fb_par *par=info->par; 788 struct w100fb_par *par=info->par;
@@ -1020,7 +1020,7 @@ static struct pll_entries {
1020 { 0 }, 1020 { 0 },
1021}; 1021};
1022 1022
1023struct w100_pll_info *w100_get_xtal_table(unsigned int freq) 1023struct w100_pll_info __devinit *w100_get_xtal_table(unsigned int freq)
1024{ 1024{
1025 struct pll_entries *pll_entry = w100_pll_tables; 1025 struct pll_entries *pll_entry = w100_pll_tables;
1026 1026
@@ -1611,7 +1611,7 @@ static void w100_vsync(void)
1611 1611
1612static struct platform_driver w100fb_driver = { 1612static struct platform_driver w100fb_driver = {
1613 .probe = w100fb_probe, 1613 .probe = w100fb_probe,
1614 .remove = w100fb_remove, 1614 .remove = __devexit_p(w100fb_remove),
1615 .suspend = w100fb_suspend, 1615 .suspend = w100fb_suspend,
1616 .resume = w100fb_resume, 1616 .resume = w100fb_resume,
1617 .driver = { 1617 .driver = {
@@ -1619,7 +1619,7 @@ static struct platform_driver w100fb_driver = {
1619 }, 1619 },
1620}; 1620};
1621 1621
1622int __devinit w100fb_init(void) 1622int __init w100fb_init(void)
1623{ 1623{
1624 return platform_driver_register(&w100fb_driver); 1624 return platform_driver_register(&w100fb_driver);
1625} 1625}
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c
index 3fcb83f03881..574dc54e12d4 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/xilinxfb.c
@@ -423,7 +423,7 @@ xilinxfb_of_probe(struct of_device *op, const struct of_device_id *match)
423 * To check whether the core is connected directly to DCR or PLB 423 * To check whether the core is connected directly to DCR or PLB
424 * interface and initialize the tft_access accordingly. 424 * interface and initialize the tft_access accordingly.
425 */ 425 */
426 p = (u32 *)of_get_property(op->node, "xlnx,dcr-splb-slave-if", NULL); 426 p = (u32 *)of_get_property(op->dev.of_node, "xlnx,dcr-splb-slave-if", NULL);
427 tft_access = p ? *p : 0; 427 tft_access = p ? *p : 0;
428 428
429 /* 429 /*
@@ -432,41 +432,41 @@ xilinxfb_of_probe(struct of_device *op, const struct of_device_id *match)
432 */ 432 */
433 if (tft_access) { 433 if (tft_access) {
434 drvdata->flags |= PLB_ACCESS_FLAG; 434 drvdata->flags |= PLB_ACCESS_FLAG;
435 rc = of_address_to_resource(op->node, 0, &res); 435 rc = of_address_to_resource(op->dev.of_node, 0, &res);
436 if (rc) { 436 if (rc) {
437 dev_err(&op->dev, "invalid address\n"); 437 dev_err(&op->dev, "invalid address\n");
438 goto err; 438 goto err;
439 } 439 }
440 } else { 440 } else {
441 res.start = 0; 441 res.start = 0;
442 start = dcr_resource_start(op->node, 0); 442 start = dcr_resource_start(op->dev.of_node, 0);
443 drvdata->dcr_len = dcr_resource_len(op->node, 0); 443 drvdata->dcr_len = dcr_resource_len(op->dev.of_node, 0);
444 drvdata->dcr_host = dcr_map(op->node, start, drvdata->dcr_len); 444 drvdata->dcr_host = dcr_map(op->dev.of_node, start, drvdata->dcr_len);
445 if (!DCR_MAP_OK(drvdata->dcr_host)) { 445 if (!DCR_MAP_OK(drvdata->dcr_host)) {
446 dev_err(&op->dev, "invalid DCR address\n"); 446 dev_err(&op->dev, "invalid DCR address\n");
447 goto err; 447 goto err;
448 } 448 }
449 } 449 }
450 450
451 prop = of_get_property(op->node, "phys-size", &size); 451 prop = of_get_property(op->dev.of_node, "phys-size", &size);
452 if ((prop) && (size >= sizeof(u32)*2)) { 452 if ((prop) && (size >= sizeof(u32)*2)) {
453 pdata.screen_width_mm = prop[0]; 453 pdata.screen_width_mm = prop[0];
454 pdata.screen_height_mm = prop[1]; 454 pdata.screen_height_mm = prop[1];
455 } 455 }
456 456
457 prop = of_get_property(op->node, "resolution", &size); 457 prop = of_get_property(op->dev.of_node, "resolution", &size);
458 if ((prop) && (size >= sizeof(u32)*2)) { 458 if ((prop) && (size >= sizeof(u32)*2)) {
459 pdata.xres = prop[0]; 459 pdata.xres = prop[0];
460 pdata.yres = prop[1]; 460 pdata.yres = prop[1];
461 } 461 }
462 462
463 prop = of_get_property(op->node, "virtual-resolution", &size); 463 prop = of_get_property(op->dev.of_node, "virtual-resolution", &size);
464 if ((prop) && (size >= sizeof(u32)*2)) { 464 if ((prop) && (size >= sizeof(u32)*2)) {
465 pdata.xvirt = prop[0]; 465 pdata.xvirt = prop[0];
466 pdata.yvirt = prop[1]; 466 pdata.yvirt = prop[1];
467 } 467 }
468 468
469 if (of_find_property(op->node, "rotate-display", NULL)) 469 if (of_find_property(op->dev.of_node, "rotate-display", NULL))
470 pdata.rotate_screen = 1; 470 pdata.rotate_screen = 1;
471 471
472 dev_set_drvdata(&op->dev, drvdata); 472 dev_set_drvdata(&op->dev, drvdata);
@@ -492,13 +492,12 @@ static struct of_device_id xilinxfb_of_match[] __devinitdata = {
492MODULE_DEVICE_TABLE(of, xilinxfb_of_match); 492MODULE_DEVICE_TABLE(of, xilinxfb_of_match);
493 493
494static struct of_platform_driver xilinxfb_of_driver = { 494static struct of_platform_driver xilinxfb_of_driver = {
495 .owner = THIS_MODULE,
496 .name = DRIVER_NAME,
497 .match_table = xilinxfb_of_match,
498 .probe = xilinxfb_of_probe, 495 .probe = xilinxfb_of_probe,
499 .remove = __devexit_p(xilinxfb_of_remove), 496 .remove = __devexit_p(xilinxfb_of_remove),
500 .driver = { 497 .driver = {
501 .name = DRIVER_NAME, 498 .name = DRIVER_NAME,
499 .owner = THIS_MODULE,
500 .of_match_table = xilinxfb_of_match,
502 }, 501 },
503}; 502};
504 503
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index b87ba23442d2..afcfacc9bbe2 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -145,13 +145,19 @@ config KS8695_WATCHDOG
145 Watchdog timer embedded into KS8695 processor. This will reboot your 145 Watchdog timer embedded into KS8695 processor. This will reboot your
146 system when the timeout is reached. 146 system when the timeout is reached.
147 147
148config HAVE_S3C2410_WATCHDOG
149 bool
150 help
151 This will include watchdog timer support for Samsung SoCs. If
152 you want to include watchdog support for any machine, kindly
153 select this in the respective mach-XXXX/Kconfig file.
154
148config S3C2410_WATCHDOG 155config S3C2410_WATCHDOG
149 tristate "S3C2410 Watchdog" 156 tristate "S3C2410 Watchdog"
150 depends on ARCH_S3C2410 157 depends on ARCH_S3C2410 || HAVE_S3C2410_WATCHDOG
151 help 158 help
152 Watchdog timer block in the Samsung S3C2410 chips. This will 159 Watchdog timer block in the Samsung SoCs. This will reboot
153 reboot the system when the timer expires with the watchdog 160 the system when the timer expires with the watchdog enabled.
154 enabled.
155 161
156 The driver is limited by the speed of the system's PCLK 162 The driver is limited by the speed of the system's PCLK
157 signal, so with reasonably fast systems (PCLK around 50-66MHz) 163 signal, so with reasonably fast systems (PCLK around 50-66MHz)
@@ -306,6 +312,18 @@ config MAX63XX_WATCHDOG
306 help 312 help
307 Support for memory mapped max63{69,70,71,72,73,74} watchdog timer. 313 Support for memory mapped max63{69,70,71,72,73,74} watchdog timer.
308 314
315config IMX2_WDT
316 tristate "IMX2+ Watchdog"
317 depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 || ARCH_MX5
318 help
319 This is the driver for the hardware watchdog
320 on the Freescale IMX2 and later processors.
321 If you have one of these processors and wish to have
322 watchdog support enabled, say Y, otherwise say N.
323
324 To compile this driver as a module, choose M here: the
325 module will be called imx2_wdt.
326
309# AVR32 Architecture 327# AVR32 Architecture
310 328
311config AT32AP700X_WDT 329config AT32AP700X_WDT
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 5e3cb95bb0e9..72f3e2073f8e 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -47,6 +47,7 @@ obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o
47obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o 47obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
48obj-$(CONFIG_ADX_WATCHDOG) += adx_wdt.o 48obj-$(CONFIG_ADX_WATCHDOG) += adx_wdt.o
49obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o 49obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o
50obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o
50 51
51# AVR32 Architecture 52# AVR32 Architecture
52obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o 53obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c
index 9c7ccd1e9088..9042a95fc98c 100644
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -23,6 +23,7 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/uaccess.h> 24#include <linux/uaccess.h>
25#include <asm/blackfin.h> 25#include <asm/blackfin.h>
26#include <asm/bfin_watchdog.h>
26 27
27#define stamp(fmt, args...) \ 28#define stamp(fmt, args...) \
28 pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args) 29 pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args)
@@ -49,24 +50,6 @@
49# define bfin_write_WDOG_STAT(x) bfin_write_WDOGA_STAT(x) 50# define bfin_write_WDOG_STAT(x) bfin_write_WDOGA_STAT(x)
50#endif 51#endif
51 52
52/* Bit in SWRST that indicates boot caused by watchdog */
53#define SWRST_RESET_WDOG 0x4000
54
55/* Bit in WDOG_CTL that indicates watchdog has expired (WDR0) */
56#define WDOG_EXPIRED 0x8000
57
58/* Masks for WDEV field in WDOG_CTL register */
59#define ICTL_RESET 0x0
60#define ICTL_NMI 0x2
61#define ICTL_GPI 0x4
62#define ICTL_NONE 0x6
63#define ICTL_MASK 0x6
64
65/* Masks for WDEN field in WDOG_CTL register */
66#define WDEN_MASK 0x0FF0
67#define WDEN_ENABLE 0x0000
68#define WDEN_DISABLE 0x0AD0
69
70/* some defaults */ 53/* some defaults */
71#define WATCHDOG_TIMEOUT 20 54#define WATCHDOG_TIMEOUT 20
72 55
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 801ead191499..3d49671cdf5a 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -137,12 +137,12 @@ static long booke_wdt_ioctl(struct file *file,
137 if (copy_to_user((void *)arg, &ident, sizeof(ident))) 137 if (copy_to_user((void *)arg, &ident, sizeof(ident)))
138 return -EFAULT; 138 return -EFAULT;
139 case WDIOC_GETSTATUS: 139 case WDIOC_GETSTATUS:
140 return put_user(ident.options, p); 140 return put_user(0, p);
141 case WDIOC_GETBOOTSTATUS: 141 case WDIOC_GETBOOTSTATUS:
142 /* XXX: something is clearing TSR */ 142 /* XXX: something is clearing TSR */
143 tmp = mfspr(SPRN_TSR) & TSR_WRS(3); 143 tmp = mfspr(SPRN_TSR) & TSR_WRS(3);
144 /* returns 1 if last reset was caused by the WDT */ 144 /* returns CARDRESET if last reset was caused by the WDT */
145 return (tmp ? 1 : 0); 145 return (tmp ? WDIOF_CARDRESET : 0);
146 case WDIOC_SETOPTIONS: 146 case WDIOC_SETOPTIONS:
147 if (get_user(tmp, p)) 147 if (get_user(tmp, p))
148 return -EINVAL; 148 return -EINVAL;
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index ba2efce4b40e..d62b9ce8f773 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -577,7 +577,7 @@ static int __devinit cpwd_probe(struct of_device *op,
577 * interrupt_mask register cannot be written, so no timer 577 * interrupt_mask register cannot be written, so no timer
578 * interrupts can be masked within the PLD. 578 * interrupts can be masked within the PLD.
579 */ 579 */
580 str_prop = of_get_property(op->node, "model", NULL); 580 str_prop = of_get_property(op->dev.of_node, "model", NULL);
581 p->broken = (str_prop && !strcmp(str_prop, WD_BADMODEL)); 581 p->broken = (str_prop && !strcmp(str_prop, WD_BADMODEL));
582 582
583 if (!p->enabled) 583 if (!p->enabled)
@@ -677,8 +677,11 @@ static const struct of_device_id cpwd_match[] = {
677MODULE_DEVICE_TABLE(of, cpwd_match); 677MODULE_DEVICE_TABLE(of, cpwd_match);
678 678
679static struct of_platform_driver cpwd_driver = { 679static struct of_platform_driver cpwd_driver = {
680 .name = DRIVER_NAME, 680 .driver = {
681 .match_table = cpwd_match, 681 .name = DRIVER_NAME,
682 .owner = THIS_MODULE,
683 .of_match_table = cpwd_match,
684 },
682 .probe = cpwd_probe, 685 .probe = cpwd_probe,
683 .remove = __devexit_p(cpwd_remove), 686 .remove = __devexit_p(cpwd_remove),
684}; 687};
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index d1c4e55b1db0..3f3dc093ad68 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -68,7 +68,6 @@ static spinlock_t eurwdt_lock;
68 68
69/* 69/*
70 * You must set these - there is no sane way to probe for this board. 70 * You must set these - there is no sane way to probe for this board.
71 * You can use eurwdt=x,y to set these now.
72 */ 71 */
73 72
74static int io = 0x3f0; 73static int io = 0x3f0;
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index abdbad034a6c..ca0f4c6cf5ab 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -303,9 +303,11 @@ static const struct of_device_id gef_wdt_ids[] = {
303}; 303};
304 304
305static struct of_platform_driver gef_wdt_driver = { 305static struct of_platform_driver gef_wdt_driver = {
306 .owner = THIS_MODULE, 306 .driver = {
307 .name = "gef_wdt", 307 .name = "gef_wdt",
308 .match_table = gef_wdt_ids, 308 .owner = THIS_MODULE,
309 .of_match_table = gef_wdt_ids,
310 },
309 .probe = gef_wdt_probe, 311 .probe = gef_wdt_probe,
310}; 312};
311 313
diff --git a/drivers/watchdog/iTCO_vendor_support.c b/drivers/watchdog/iTCO_vendor_support.c
index 5133bca5ccbe..481d1ad43464 100644
--- a/drivers/watchdog/iTCO_vendor_support.c
+++ b/drivers/watchdog/iTCO_vendor_support.c
@@ -101,13 +101,6 @@ static void supermicro_old_pre_stop(unsigned long acpibase)
101 outl(val32, SMI_EN); /* Needed to deactivate watchdog */ 101 outl(val32, SMI_EN); /* Needed to deactivate watchdog */
102} 102}
103 103
104static void supermicro_old_pre_keepalive(unsigned long acpibase)
105{
106 /* Reload TCO Timer (done in iTCO_wdt_keepalive) + */
107 /* Clear "Expire Flag" (Bit 3 of TC01_STS register) */
108 outb(0x08, TCO1_STS);
109}
110
111/* 104/*
112 * Vendor Support: 2 105 * Vendor Support: 2
113 * Board: Super Micro Computer Inc. P4SBx, P4DPx 106 * Board: Super Micro Computer Inc. P4SBx, P4DPx
@@ -337,9 +330,7 @@ EXPORT_SYMBOL(iTCO_vendor_pre_stop);
337 330
338void iTCO_vendor_pre_keepalive(unsigned long acpibase, unsigned int heartbeat) 331void iTCO_vendor_pre_keepalive(unsigned long acpibase, unsigned int heartbeat)
339{ 332{
340 if (vendorsupport == SUPERMICRO_OLD_BOARD) 333 if (vendorsupport == SUPERMICRO_NEW_BOARD)
341 supermicro_old_pre_keepalive(acpibase);
342 else if (vendorsupport == SUPERMICRO_NEW_BOARD)
343 supermicro_new_pre_set_heartbeat(heartbeat); 334 supermicro_new_pre_set_heartbeat(heartbeat);
344} 335}
345EXPORT_SYMBOL(iTCO_vendor_pre_keepalive); 336EXPORT_SYMBOL(iTCO_vendor_pre_keepalive);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 8da886035374..69de8713b8e4 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -40,7 +40,7 @@
40 40
41/* Module and version information */ 41/* Module and version information */
42#define DRV_NAME "iTCO_wdt" 42#define DRV_NAME "iTCO_wdt"
43#define DRV_VERSION "1.05" 43#define DRV_VERSION "1.06"
44#define PFX DRV_NAME ": " 44#define PFX DRV_NAME ": "
45 45
46/* Includes */ 46/* Includes */
@@ -391,8 +391,8 @@ static struct platform_device *iTCO_wdt_platform_device;
391#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */ 391#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */
392static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */ 392static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
393module_param(heartbeat, int, 0); 393module_param(heartbeat, int, 0);
394MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. " 394MODULE_PARM_DESC(heartbeat, "Watchdog timeout in seconds. "
395 "(2<heartbeat<39 (TCO v1) or 613 (TCO v2), default=" 395 "5..76 (TCO v1) or 3..614 (TCO v2), default="
396 __MODULE_STRING(WATCHDOG_HEARTBEAT) ")"); 396 __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
397 397
398static int nowayout = WATCHDOG_NOWAYOUT; 398static int nowayout = WATCHDOG_NOWAYOUT;
@@ -523,8 +523,13 @@ static int iTCO_wdt_keepalive(void)
523 /* Reload the timer by writing to the TCO Timer Counter register */ 523 /* Reload the timer by writing to the TCO Timer Counter register */
524 if (iTCO_wdt_private.iTCO_version == 2) 524 if (iTCO_wdt_private.iTCO_version == 2)
525 outw(0x01, TCO_RLD); 525 outw(0x01, TCO_RLD);
526 else if (iTCO_wdt_private.iTCO_version == 1) 526 else if (iTCO_wdt_private.iTCO_version == 1) {
527 /* Reset the timeout status bit so that the timer
528 * needs to count down twice again before rebooting */
529 outw(0x0008, TCO1_STS); /* write 1 to clear bit */
530
527 outb(0x01, TCO_RLD); 531 outb(0x01, TCO_RLD);
532 }
528 533
529 spin_unlock(&iTCO_wdt_private.io_lock); 534 spin_unlock(&iTCO_wdt_private.io_lock);
530 return 0; 535 return 0;
@@ -537,6 +542,11 @@ static int iTCO_wdt_set_heartbeat(int t)
537 unsigned int tmrval; 542 unsigned int tmrval;
538 543
539 tmrval = seconds_to_ticks(t); 544 tmrval = seconds_to_ticks(t);
545
546 /* For TCO v1 the timer counts down twice before rebooting */
547 if (iTCO_wdt_private.iTCO_version == 1)
548 tmrval /= 2;
549
540 /* from the specs: */ 550 /* from the specs: */
541 /* "Values of 0h-3h are ignored and should not be attempted" */ 551 /* "Values of 0h-3h are ignored and should not be attempted" */
542 if (tmrval < 0x04) 552 if (tmrval < 0x04)
@@ -593,6 +603,8 @@ static int iTCO_wdt_get_timeleft(int *time_left)
593 spin_lock(&iTCO_wdt_private.io_lock); 603 spin_lock(&iTCO_wdt_private.io_lock);
594 val8 = inb(TCO_RLD); 604 val8 = inb(TCO_RLD);
595 val8 &= 0x3f; 605 val8 &= 0x3f;
606 if (!(inw(TCO1_STS) & 0x0008))
607 val8 += (inb(TCOv1_TMR) & 0x3f);
596 spin_unlock(&iTCO_wdt_private.io_lock); 608 spin_unlock(&iTCO_wdt_private.io_lock);
597 609
598 *time_left = (val8 * 6) / 10; 610 *time_left = (val8 * 6) / 10;
@@ -832,9 +844,9 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
832 TCOBASE); 844 TCOBASE);
833 845
834 /* Clear out the (probably old) status */ 846 /* Clear out the (probably old) status */
835 outb(8, TCO1_STS); /* Clear the Time Out Status bit */ 847 outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */
836 outb(2, TCO2_STS); /* Clear SECOND_TO_STS bit */ 848 outw(0x0002, TCO2_STS); /* Clear SECOND_TO_STS bit */
837 outb(4, TCO2_STS); /* Clear BOOT_STS bit */ 849 outw(0x0004, TCO2_STS); /* Clear BOOT_STS bit */
838 850
839 /* Make sure the watchdog is not running */ 851 /* Make sure the watchdog is not running */
840 iTCO_wdt_stop(); 852 iTCO_wdt_stop();
@@ -844,8 +856,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
844 if (iTCO_wdt_set_heartbeat(heartbeat)) { 856 if (iTCO_wdt_set_heartbeat(heartbeat)) {
845 iTCO_wdt_set_heartbeat(WATCHDOG_HEARTBEAT); 857 iTCO_wdt_set_heartbeat(WATCHDOG_HEARTBEAT);
846 printk(KERN_INFO PFX 858 printk(KERN_INFO PFX
847 "heartbeat value must be 2 < heartbeat < 39 (TCO v1) " 859 "timeout value out of range, using %d\n", heartbeat);
848 "or 613 (TCO v2), using %d\n", heartbeat);
849 } 860 }
850 861
851 ret = misc_register(&iTCO_wdt_miscdev); 862 ret = misc_register(&iTCO_wdt_miscdev);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
new file mode 100644
index 000000000000..ea25885781bb
--- /dev/null
+++ b/drivers/watchdog/imx2_wdt.c
@@ -0,0 +1,358 @@
1/*
2 * Watchdog driver for IMX2 and later processors
3 *
4 * Copyright (C) 2010 Wolfram Sang, Pengutronix e.K. <w.sang@pengutronix.de>
5 *
6 * some parts adapted by similar drivers from Darius Augulis and Vladimir
7 * Zapolskiy, additional improvements by Wim Van Sebroeck.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published by
11 * the Free Software Foundation.
12 *
13 * NOTE: MX1 has a slightly different Watchdog than MX2 and later:
14 *
15 * MX1: MX2+:
16 * ---- -----
17 * Registers: 32-bit 16-bit
18 * Stopable timer: Yes No
19 * Need to enable clk: No Yes
20 * Halt on suspend: Manual Can be automatic
21 */
22
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/miscdevice.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/platform_device.h>
29#include <linux/watchdog.h>
30#include <linux/clk.h>
31#include <linux/fs.h>
32#include <linux/io.h>
33#include <linux/uaccess.h>
34#include <linux/timer.h>
35#include <linux/jiffies.h>
36#include <mach/hardware.h>
37
38#define DRIVER_NAME "imx2-wdt"
39
40#define IMX2_WDT_WCR 0x00 /* Control Register */
41#define IMX2_WDT_WCR_WT (0xFF << 8) /* -> Watchdog Timeout Field */
42#define IMX2_WDT_WCR_WRE (1 << 3) /* -> WDOG Reset Enable */
43#define IMX2_WDT_WCR_WDE (1 << 2) /* -> Watchdog Enable */
44
45#define IMX2_WDT_WSR 0x02 /* Service Register */
46#define IMX2_WDT_SEQ1 0x5555 /* -> service sequence 1 */
47#define IMX2_WDT_SEQ2 0xAAAA /* -> service sequence 2 */
48
49#define IMX2_WDT_MAX_TIME 128
50#define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */
51
52#define WDOG_SEC_TO_COUNT(s) ((s * 2 - 1) << 8)
53
54#define IMX2_WDT_STATUS_OPEN 0
55#define IMX2_WDT_STATUS_STARTED 1
56#define IMX2_WDT_EXPECT_CLOSE 2
57
58static struct {
59 struct clk *clk;
60 void __iomem *base;
61 unsigned timeout;
62 unsigned long status;
63 struct timer_list timer; /* Pings the watchdog when closed */
64} imx2_wdt;
65
66static struct miscdevice imx2_wdt_miscdev;
67
68static int nowayout = WATCHDOG_NOWAYOUT;
69module_param(nowayout, int, 0);
70MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
71 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
72
73
74static unsigned timeout = IMX2_WDT_DEFAULT_TIME;
75module_param(timeout, uint, 0);
76MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default="
77 __MODULE_STRING(IMX2_WDT_DEFAULT_TIME) ")");
78
79static const struct watchdog_info imx2_wdt_info = {
80 .identity = "imx2+ watchdog",
81 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE,
82};
83
84static inline void imx2_wdt_setup(void)
85{
86 u16 val = __raw_readw(imx2_wdt.base + IMX2_WDT_WCR);
87
88 /* Strip the old watchdog Time-Out value */
89 val &= ~IMX2_WDT_WCR_WT;
90 /* Generate reset if WDOG times out */
91 val &= ~IMX2_WDT_WCR_WRE;
92 /* Keep Watchdog Disabled */
93 val &= ~IMX2_WDT_WCR_WDE;
94 /* Set the watchdog's Time-Out value */
95 val |= WDOG_SEC_TO_COUNT(imx2_wdt.timeout);
96
97 __raw_writew(val, imx2_wdt.base + IMX2_WDT_WCR);
98
99 /* enable the watchdog */
100 val |= IMX2_WDT_WCR_WDE;
101 __raw_writew(val, imx2_wdt.base + IMX2_WDT_WCR);
102}
103
104static inline void imx2_wdt_ping(void)
105{
106 __raw_writew(IMX2_WDT_SEQ1, imx2_wdt.base + IMX2_WDT_WSR);
107 __raw_writew(IMX2_WDT_SEQ2, imx2_wdt.base + IMX2_WDT_WSR);
108}
109
110static void imx2_wdt_timer_ping(unsigned long arg)
111{
112 /* ping it every imx2_wdt.timeout / 2 seconds to prevent reboot */
113 imx2_wdt_ping();
114 mod_timer(&imx2_wdt.timer, jiffies + imx2_wdt.timeout * HZ / 2);
115}
116
117static void imx2_wdt_start(void)
118{
119 if (!test_and_set_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) {
120 /* at our first start we enable clock and do initialisations */
121 clk_enable(imx2_wdt.clk);
122
123 imx2_wdt_setup();
124 } else /* delete the timer that pings the watchdog after close */
125 del_timer_sync(&imx2_wdt.timer);
126
127 /* Watchdog is enabled - time to reload the timeout value */
128 imx2_wdt_ping();
129}
130
131static void imx2_wdt_stop(void)
132{
133 /* we don't need a clk_disable, it cannot be disabled once started.
134 * We use a timer to ping the watchdog while /dev/watchdog is closed */
135 imx2_wdt_timer_ping(0);
136}
137
138static void imx2_wdt_set_timeout(int new_timeout)
139{
140 u16 val = __raw_readw(imx2_wdt.base + IMX2_WDT_WCR);
141
142 /* set the new timeout value in the WSR */
143 val &= ~IMX2_WDT_WCR_WT;
144 val |= WDOG_SEC_TO_COUNT(new_timeout);
145 __raw_writew(val, imx2_wdt.base + IMX2_WDT_WCR);
146}
147
148static int imx2_wdt_open(struct inode *inode, struct file *file)
149{
150 if (test_and_set_bit(IMX2_WDT_STATUS_OPEN, &imx2_wdt.status))
151 return -EBUSY;
152
153 imx2_wdt_start();
154 return nonseekable_open(inode, file);
155}
156
157static int imx2_wdt_close(struct inode *inode, struct file *file)
158{
159 if (test_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status) && !nowayout)
160 imx2_wdt_stop();
161 else {
162 dev_crit(imx2_wdt_miscdev.parent,
163 "Unexpected close: Expect reboot!\n");
164 imx2_wdt_ping();
165 }
166
167 clear_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status);
168 clear_bit(IMX2_WDT_STATUS_OPEN, &imx2_wdt.status);
169 return 0;
170}
171
172static long imx2_wdt_ioctl(struct file *file, unsigned int cmd,
173 unsigned long arg)
174{
175 void __user *argp = (void __user *)arg;
176 int __user *p = argp;
177 int new_value;
178
179 switch (cmd) {
180 case WDIOC_GETSUPPORT:
181 return copy_to_user(argp, &imx2_wdt_info,
182 sizeof(struct watchdog_info)) ? -EFAULT : 0;
183
184 case WDIOC_GETSTATUS:
185 case WDIOC_GETBOOTSTATUS:
186 return put_user(0, p);
187
188 case WDIOC_KEEPALIVE:
189 imx2_wdt_ping();
190 return 0;
191
192 case WDIOC_SETTIMEOUT:
193 if (get_user(new_value, p))
194 return -EFAULT;
195 if ((new_value < 1) || (new_value > IMX2_WDT_MAX_TIME))
196 return -EINVAL;
197 imx2_wdt_set_timeout(new_value);
198 imx2_wdt.timeout = new_value;
199 imx2_wdt_ping();
200
201 /* Fallthrough to return current value */
202 case WDIOC_GETTIMEOUT:
203 return put_user(imx2_wdt.timeout, p);
204
205 default:
206 return -ENOTTY;
207 }
208}
209
210static ssize_t imx2_wdt_write(struct file *file, const char __user *data,
211 size_t len, loff_t *ppos)
212{
213 size_t i;
214 char c;
215
216 if (len == 0) /* Can we see this even ? */
217 return 0;
218
219 clear_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status);
220 /* scan to see whether or not we got the magic character */
221 for (i = 0; i != len; i++) {
222 if (get_user(c, data + i))
223 return -EFAULT;
224 if (c == 'V')
225 set_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status);
226 }
227
228 imx2_wdt_ping();
229 return len;
230}
231
232static const struct file_operations imx2_wdt_fops = {
233 .owner = THIS_MODULE,
234 .llseek = no_llseek,
235 .unlocked_ioctl = imx2_wdt_ioctl,
236 .open = imx2_wdt_open,
237 .release = imx2_wdt_close,
238 .write = imx2_wdt_write,
239};
240
241static struct miscdevice imx2_wdt_miscdev = {
242 .minor = WATCHDOG_MINOR,
243 .name = "watchdog",
244 .fops = &imx2_wdt_fops,
245};
246
247static int __init imx2_wdt_probe(struct platform_device *pdev)
248{
249 int ret;
250 int res_size;
251 struct resource *res;
252
253 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
254 if (!res) {
255 dev_err(&pdev->dev, "can't get device resources\n");
256 return -ENODEV;
257 }
258
259 res_size = resource_size(res);
260 if (!devm_request_mem_region(&pdev->dev, res->start, res_size,
261 res->name)) {
262 dev_err(&pdev->dev, "can't allocate %d bytes at %d address\n",
263 res_size, res->start);
264 return -ENOMEM;
265 }
266
267 imx2_wdt.base = devm_ioremap_nocache(&pdev->dev, res->start, res_size);
268 if (!imx2_wdt.base) {
269 dev_err(&pdev->dev, "ioremap failed\n");
270 return -ENOMEM;
271 }
272
273 imx2_wdt.clk = clk_get_sys("imx-wdt.0", NULL);
274 if (IS_ERR(imx2_wdt.clk)) {
275 dev_err(&pdev->dev, "can't get Watchdog clock\n");
276 return PTR_ERR(imx2_wdt.clk);
277 }
278
279 imx2_wdt.timeout = clamp_t(unsigned, timeout, 1, IMX2_WDT_MAX_TIME);
280 if (imx2_wdt.timeout != timeout)
281 dev_warn(&pdev->dev, "Initial timeout out of range! "
282 "Clamped from %u to %u\n", timeout, imx2_wdt.timeout);
283
284 setup_timer(&imx2_wdt.timer, imx2_wdt_timer_ping, 0);
285
286 imx2_wdt_miscdev.parent = &pdev->dev;
287 ret = misc_register(&imx2_wdt_miscdev);
288 if (ret)
289 goto fail;
290
291 dev_info(&pdev->dev,
292 "IMX2+ Watchdog Timer enabled. timeout=%ds (nowayout=%d)\n",
293 imx2_wdt.timeout, nowayout);
294 return 0;
295
296fail:
297 imx2_wdt_miscdev.parent = NULL;
298 clk_put(imx2_wdt.clk);
299 return ret;
300}
301
302static int __exit imx2_wdt_remove(struct platform_device *pdev)
303{
304 misc_deregister(&imx2_wdt_miscdev);
305
306 if (test_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) {
307 del_timer_sync(&imx2_wdt.timer);
308
309 dev_crit(imx2_wdt_miscdev.parent,
310 "Device removed: Expect reboot!\n");
311 } else
312 clk_put(imx2_wdt.clk);
313
314 imx2_wdt_miscdev.parent = NULL;
315 return 0;
316}
317
318static void imx2_wdt_shutdown(struct platform_device *pdev)
319{
320 if (test_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) {
321 /* we are running, we need to delete the timer but will give
322 * max timeout before reboot will take place */
323 del_timer_sync(&imx2_wdt.timer);
324 imx2_wdt_set_timeout(IMX2_WDT_MAX_TIME);
325 imx2_wdt_ping();
326
327 dev_crit(imx2_wdt_miscdev.parent,
328 "Device shutdown: Expect reboot!\n");
329 }
330}
331
332static struct platform_driver imx2_wdt_driver = {
333 .probe = imx2_wdt_probe,
334 .remove = __exit_p(imx2_wdt_remove),
335 .shutdown = imx2_wdt_shutdown,
336 .driver = {
337 .name = DRIVER_NAME,
338 .owner = THIS_MODULE,
339 },
340};
341
342static int __init imx2_wdt_init(void)
343{
344 return platform_driver_probe(&imx2_wdt_driver, imx2_wdt_probe);
345}
346module_init(imx2_wdt_init);
347
348static void __exit imx2_wdt_exit(void)
349{
350 platform_driver_unregister(&imx2_wdt_driver);
351}
352module_exit(imx2_wdt_exit);
353
354MODULE_AUTHOR("Wolfram Sang");
355MODULE_DESCRIPTION("Watchdog driver for IMX2 and later");
356MODULE_LICENSE("GPL v2");
357MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
358MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 4e3941c5e293..6622335773bb 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -53,7 +53,7 @@ static int mpc8xxx_wdt_init_late(void);
53static u16 timeout = 0xffff; 53static u16 timeout = 0xffff;
54module_param(timeout, ushort, 0); 54module_param(timeout, ushort, 0);
55MODULE_PARM_DESC(timeout, 55MODULE_PARM_DESC(timeout,
56 "Watchdog timeout in ticks. (0<timeout<65536, default=65535"); 56 "Watchdog timeout in ticks. (0<timeout<65536, default=65535)");
57 57
58static int reset = 1; 58static int reset = 1;
59module_param(reset, bool, 0); 59module_param(reset, bool, 0);
@@ -273,12 +273,12 @@ static const struct of_device_id mpc8xxx_wdt_match[] = {
273MODULE_DEVICE_TABLE(of, mpc8xxx_wdt_match); 273MODULE_DEVICE_TABLE(of, mpc8xxx_wdt_match);
274 274
275static struct of_platform_driver mpc8xxx_wdt_driver = { 275static struct of_platform_driver mpc8xxx_wdt_driver = {
276 .match_table = mpc8xxx_wdt_match,
277 .probe = mpc8xxx_wdt_probe, 276 .probe = mpc8xxx_wdt_probe,
278 .remove = __devexit_p(mpc8xxx_wdt_remove), 277 .remove = __devexit_p(mpc8xxx_wdt_remove),
279 .driver = { 278 .driver = {
280 .name = "mpc8xxx_wdt", 279 .name = "mpc8xxx_wdt",
281 .owner = THIS_MODULE, 280 .owner = THIS_MODULE,
281 .of_match_table = mpc8xxx_wdt_match,
282 }, 282 },
283}; 283};
284 284
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index d3aa2f1fe61d..3a56bc360924 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -53,7 +53,9 @@
53#define WDTO 0x11 /* Watchdog timeout register */ 53#define WDTO 0x11 /* Watchdog timeout register */
54#define WDCFG 0x12 /* Watchdog config register */ 54#define WDCFG 0x12 /* Watchdog config register */
55 55
56static int io = 0x2E; /* Address used on Portwell Boards */ 56#define IO_DEFAULT 0x2E /* Address used on Portwell Boards */
57
58static int io = IO_DEFAULT;
57 59
58static int timeout = DEFAULT_TIMEOUT; /* timeout value */ 60static int timeout = DEFAULT_TIMEOUT; /* timeout value */
59static unsigned long timer_enabled; /* is the timer enabled? */ 61static unsigned long timer_enabled; /* is the timer enabled? */
@@ -583,12 +585,13 @@ MODULE_LICENSE("GPL");
583MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); 585MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
584 586
585module_param(io, int, 0); 587module_param(io, int, 0);
586MODULE_PARM_DESC(io, MODNAME " I/O port (default: " __MODULE_STRING(io) ")."); 588MODULE_PARM_DESC(io, MODNAME " I/O port (default: "
589 __MODULE_STRING(IO_DEFAULT) ").");
587 590
588module_param(timeout, int, 0); 591module_param(timeout, int, 0);
589MODULE_PARM_DESC(timeout, 592MODULE_PARM_DESC(timeout,
590 "Watchdog timeout in minutes (default=" 593 "Watchdog timeout in minutes (default="
591 __MODULE_STRING(timeout) ")."); 594 __MODULE_STRING(DEFAULT_TIMEOUT) ").");
592 595
593module_param(nowayout, int, 0); 596module_param(nowayout, int, 0);
594MODULE_PARM_DESC(nowayout, 597MODULE_PARM_DESC(nowayout,
diff --git a/drivers/watchdog/pnx833x_wdt.c b/drivers/watchdog/pnx833x_wdt.c
index 09102f09e681..a7b5ad2a98bd 100644
--- a/drivers/watchdog/pnx833x_wdt.c
+++ b/drivers/watchdog/pnx833x_wdt.c
@@ -33,6 +33,8 @@
33#define PFX "pnx833x: " 33#define PFX "pnx833x: "
34#define WATCHDOG_TIMEOUT 30 /* 30 sec Maximum timeout */ 34#define WATCHDOG_TIMEOUT 30 /* 30 sec Maximum timeout */
35#define WATCHDOG_COUNT_FREQUENCY 68000000U /* Watchdog counts at 68MHZ. */ 35#define WATCHDOG_COUNT_FREQUENCY 68000000U /* Watchdog counts at 68MHZ. */
36#define PNX_WATCHDOG_TIMEOUT (WATCHDOG_TIMEOUT * WATCHDOG_COUNT_FREQUENCY)
37#define PNX_TIMEOUT_VALUE 2040000000U
36 38
37/** CONFIG block */ 39/** CONFIG block */
38#define PNX833X_CONFIG (0x07000U) 40#define PNX833X_CONFIG (0x07000U)
@@ -47,20 +49,21 @@
47static int pnx833x_wdt_alive; 49static int pnx833x_wdt_alive;
48 50
49/* Set default timeout in MHZ.*/ 51/* Set default timeout in MHZ.*/
50static int pnx833x_wdt_timeout = (WATCHDOG_TIMEOUT * WATCHDOG_COUNT_FREQUENCY); 52static int pnx833x_wdt_timeout = PNX_WATCHDOG_TIMEOUT;
51module_param(pnx833x_wdt_timeout, int, 0); 53module_param(pnx833x_wdt_timeout, int, 0);
52MODULE_PARM_DESC(timeout, "Watchdog timeout in Mhz. (68Mhz clock), default=" 54MODULE_PARM_DESC(timeout, "Watchdog timeout in Mhz. (68Mhz clock), default="
53 __MODULE_STRING(pnx833x_wdt_timeout) "(30 seconds)."); 55 __MODULE_STRING(PNX_TIMEOUT_VALUE) "(30 seconds).");
54 56
55static int nowayout = WATCHDOG_NOWAYOUT; 57static int nowayout = WATCHDOG_NOWAYOUT;
56module_param(nowayout, int, 0); 58module_param(nowayout, int, 0);
57MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" 59MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
58 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 60 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
59 61
60static int start_enabled = 1; 62#define START_DEFAULT 1
63static int start_enabled = START_DEFAULT;
61module_param(start_enabled, int, 0); 64module_param(start_enabled, int, 0);
62MODULE_PARM_DESC(start_enabled, "Watchdog is started on module insertion " 65MODULE_PARM_DESC(start_enabled, "Watchdog is started on module insertion "
63 "(default=" __MODULE_STRING(start_enabled) ")"); 66 "(default=" __MODULE_STRING(START_DEFAULT) ")");
64 67
65static void pnx833x_wdt_start(void) 68static void pnx833x_wdt_start(void)
66{ 69{
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index ea7f803f6248..5dceeddc8859 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -239,8 +239,11 @@ static const struct of_device_id riowd_match[] = {
239MODULE_DEVICE_TABLE(of, riowd_match); 239MODULE_DEVICE_TABLE(of, riowd_match);
240 240
241static struct of_platform_driver riowd_driver = { 241static struct of_platform_driver riowd_driver = {
242 .name = DRIVER_NAME, 242 .driver = {
243 .match_table = riowd_match, 243 .name = DRIVER_NAME,
244 .owner = THIS_MODULE,
245 .of_match_table = riowd_match,
246 },
244 .probe = riowd_probe, 247 .probe = riowd_probe,
245 .remove = __devexit_p(riowd_remove), 248 .remove = __devexit_p(riowd_remove),
246}; 249};
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index e4cebef55177..300932580ded 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -63,7 +63,7 @@ module_param(nowayout, int, 0);
63module_param(soft_noboot, int, 0); 63module_param(soft_noboot, int, 0);
64module_param(debug, int, 0); 64module_param(debug, int, 0);
65 65
66MODULE_PARM_DESC(tmr_margin, "Watchdog tmr_margin in seconds. default=" 66MODULE_PARM_DESC(tmr_margin, "Watchdog tmr_margin in seconds. (default="
67 __MODULE_STRING(CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME) ")"); 67 __MODULE_STRING(CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME) ")");
68MODULE_PARM_DESC(tmr_atboot, 68MODULE_PARM_DESC(tmr_atboot,
69 "Watchdog is started at boot time if set to 1, default=" 69 "Watchdog is started at boot time if set to 1, default="
@@ -71,8 +71,8 @@ MODULE_PARM_DESC(tmr_atboot,
71MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" 71MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
72 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); 72 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
73MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, " 73MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, "
74 "0 to reboot (default depends on ONLY_TESTING)"); 74 "0 to reboot (default 0)");
75MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug, (default 0)"); 75MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug (default 0)");
76 76
77static unsigned long open_lock; 77static unsigned long open_lock;
78static struct device *wdt_dev; /* platform device attached to */ 78static struct device *wdt_dev; /* platform device attached to */
@@ -426,8 +426,7 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
426 wdt_mem = request_mem_region(res->start, size, pdev->name); 426 wdt_mem = request_mem_region(res->start, size, pdev->name);
427 if (wdt_mem == NULL) { 427 if (wdt_mem == NULL) {
428 dev_err(dev, "failed to get memory region\n"); 428 dev_err(dev, "failed to get memory region\n");
429 ret = -ENOENT; 429 return -EBUSY;
430 goto err_req;
431 } 430 }
432 431
433 wdt_base = ioremap(res->start, size); 432 wdt_base = ioremap(res->start, size);
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index a03f84e5ee1f..6fc74065abee 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -496,7 +496,7 @@ MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
496module_param(clock_division_ratio, int, 0); 496module_param(clock_division_ratio, int, 0);
497MODULE_PARM_DESC(clock_division_ratio, 497MODULE_PARM_DESC(clock_division_ratio,
498 "Clock division ratio. Valid ranges are from 0x5 (1.31ms) " 498 "Clock division ratio. Valid ranges are from 0x5 (1.31ms) "
499 "to 0x7 (5.25ms). (default=" __MODULE_STRING(clock_division_ratio) ")"); 499 "to 0x7 (5.25ms). (default=" __MODULE_STRING(WTCSR_CKS_4096) ")");
500 500
501module_param(heartbeat, int, 0); 501module_param(heartbeat, int, 0);
502MODULE_PARM_DESC(heartbeat, 502MODULE_PARM_DESC(heartbeat,
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index dcabe77ad141..b5045ca7e61c 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -190,6 +190,8 @@ static int __devinit twl4030_wdt_probe(struct platform_device *pdev)
190 190
191 twl4030_wdt_dev = pdev; 191 twl4030_wdt_dev = pdev;
192 192
193 twl4030_wdt_disable(wdt);
194
193 ret = misc_register(&wdt->miscdev); 195 ret = misc_register(&wdt->miscdev);
194 if (ret) { 196 if (ret) {
195 dev_err(wdt->miscdev.parent, 197 dev_err(wdt->miscdev.parent,
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index bfda2e99dd89..552a4381e78f 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -91,7 +91,7 @@ MODULE_PARM_DESC(tachometer,
91static int type = 500; 91static int type = 500;
92module_param(type, int, 0); 92module_param(type, int, 0);
93MODULE_PARM_DESC(type, 93MODULE_PARM_DESC(type,
94 "WDT501-P Card type (500 or 501 , default=500)"); 94 "WDT501-P Card type (500 or 501, default=500)");
95 95
96/* 96/*
97 * Programming support 97 * Programming support
diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c
index 90ef70eb47d7..5c2521fc836c 100644
--- a/drivers/watchdog/wdt977.c
+++ b/drivers/watchdog/wdt977.c
@@ -63,7 +63,7 @@ static char expect_close;
63static DEFINE_SPINLOCK(spinlock); 63static DEFINE_SPINLOCK(spinlock);
64 64
65module_param(timeout, int, 0); 65module_param(timeout, int, 0);
66MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (60..15300), default=" 66MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (60..15300, default="
67 __MODULE_STRING(DEFAULT_TIMEOUT) ")"); 67 __MODULE_STRING(DEFAULT_TIMEOUT) ")");
68module_param(testmode, int, 0); 68module_param(testmode, int, 0);
69MODULE_PARM_DESC(testmode, "Watchdog testmode (1 = no reboot), default=0"); 69MODULE_PARM_DESC(testmode, "Watchdog testmode (1 = no reboot), default=0");
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 8943b8ccee1a..07e857b0de13 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -185,6 +185,7 @@ static void shutdown_handler(struct xenbus_watch *watch,
185 kfree(str); 185 kfree(str);
186} 186}
187 187
188#ifdef CONFIG_MAGIC_SYSRQ
188static void sysrq_handler(struct xenbus_watch *watch, const char **vec, 189static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
189 unsigned int len) 190 unsigned int len)
190{ 191{
@@ -214,15 +215,16 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
214 handle_sysrq(sysrq_key, NULL); 215 handle_sysrq(sysrq_key, NULL);
215} 216}
216 217
217static struct xenbus_watch shutdown_watch = {
218 .node = "control/shutdown",
219 .callback = shutdown_handler
220};
221
222static struct xenbus_watch sysrq_watch = { 218static struct xenbus_watch sysrq_watch = {
223 .node = "control/sysrq", 219 .node = "control/sysrq",
224 .callback = sysrq_handler 220 .callback = sysrq_handler
225}; 221};
222#endif
223
224static struct xenbus_watch shutdown_watch = {
225 .node = "control/shutdown",
226 .callback = shutdown_handler
227};
226 228
227static int setup_shutdown_watcher(void) 229static int setup_shutdown_watcher(void)
228{ 230{
@@ -234,11 +236,13 @@ static int setup_shutdown_watcher(void)
234 return err; 236 return err;
235 } 237 }
236 238
239#ifdef CONFIG_MAGIC_SYSRQ
237 err = register_xenbus_watch(&sysrq_watch); 240 err = register_xenbus_watch(&sysrq_watch);
238 if (err) { 241 if (err) {
239 printk(KERN_ERR "Failed to set sysrq watcher\n"); 242 printk(KERN_ERR "Failed to set sysrq watcher\n");
240 return err; 243 return err;
241 } 244 }
245#endif
242 246
243 return 0; 247 return 0;
244} 248}